Lines Matching +full:n +full:- +full:1
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
51 if (str[n1 - i - 1] != suffix[n2 - i - 1])
67 t = skip_mods_and_typedefs(btf, t->type, res_id);
77 strncpy(file_copy, file, PATH_MAX - 1)[PATH_MAX - 1] = '\0';
78 strncpy(name, basename(file_copy), MAX_OBJ_NAME_LEN - 1)[MAX_OBJ_NAME_LEN - 1] = '\0';
80 name[strlen(name) - 2] = '\0';
97 int i, n;
104 for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
109 snprintf(buf, buf_sz, "%s", p + 1);
121 int i, n;
124 if (strcmp(sec_name, ".addr_space.1") == 0) {
129 for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
133 snprintf(buf, buf_sz, "%s", sec_name + 1);
153 const char *sec_name = btf__name_by_offset(btf, sec->name_off);
165 printf(" struct %s__%s {\n", obj_name, sec_ident);
167 const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
168 const char *var_name = btf__name_by_offset(btf, var->name_off);
174 int need_off = sec_var->offset, align_off, align;
175 __u32 var_type_id = var->type;
178 if (btf_var(var)->linkage == BTF_VAR_STATIC)
182 p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
184 return -EINVAL;
187 align = btf__align_of(btf, var->type);
191 return -EINVAL;
193 /* Assume 32-bit architectures when generating data section
196 * conservative and assume 32-bit one to ensure enough padding
198 * still work correctly for 64-bit architectures, because in
200 * which on 64-bit architectures is not strictly necessary and
201 * would be handled by natural 8-byte alignment. But it still
208 align_off = (off + align - 1) / align * align;
210 printf("\t\tchar __pad%d[%d];\n",
211 pad_cnt, need_off - off);
220 strncat(var_ident, var_name, sizeof(var_ident) - 1);
227 printf(";\n");
229 off = sec_var->offset + sec_var->size;
231 printf(" } *%s;\n", sec_ident);
237 int n = btf__type_cnt(btf), i;
240 for (i = 1; i < n; i++) {
247 name = btf__str_by_offset(btf, t->name_off);
286 return -errno;
289 /* only generate definitions for memory-mapped internal maps */
299 * map. It will still be memory-mapped and its contents
300 * accessible from user-space through BPF skeleton.
303 printf(" struct %s__%s {\n", obj_name, map_ident);
304 printf(" } *%s;\n", map_ident);
321 return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
339 return -errno;
342 /* only generate definitions for memory-mapped internal maps */
350 sec_name = btf__name_by_offset(btf, sec->name_off);
355 printf(" struct %s__%s {\n", obj_name, sec_ident);
367 var = btf__type_by_id(btf, sec_var->type);
368 var_name = btf__name_by_offset(btf, var->name_off);
369 var_type_id = var->type;
372 if (btf_var(var)->linkage == BTF_VAR_STATIC)
378 var = skip_mods_and_typedefs(btf, var->type, NULL);
398 printf(" *%s;\n", var_name);
400 printf(" } %s;\n", sec_ident);
411 int skip_tabs = 0, n;
416 n = strlen(template);
417 s = malloc(n + 1);
419 exit(-1);
427 } else if (c == '\n') {
431 src - template - 1, template, c);
433 exit(-1);
439 for (n = skip_tabs; n > 0; n--, src++) {
442 src - template - 1, template);
444 exit(-1);
448 end = strchrnul(src, '\n');
449 for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
451 memcpy(dst, src, n);
452 dst += n;
454 *dst++ = '\n';
455 src = *end ? end + 1 : end;
461 n = vprintf(s, args);
476 printf("\\\n");
496 /* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
510 \n\
511 __attribute__((unused)) static void \n\
512 %1$s__assert(struct %1$s *s __attribute__((unused))) \n\
513 { \n\
514 #ifdef __cplusplus \n\
515 #define _Static_assert static_assert \n\
516 #endif \n\
533 const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
534 const char *var_name = btf__name_by_offset(btf, var->name_off);
538 if (btf_var(var)->linkage == BTF_VAR_STATIC)
541 var_size = btf__resolve_size(btf, var->type);
546 strncat(var_ident, var_name, sizeof(var_ident) - 1);
549 printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
554 \n\
555 #ifdef __cplusplus \n\
556 #undef _Static_assert \n\
557 #endif \n\
558 } \n\
570 \n\
571 \n\
572 static inline int \n\
573 %1$s__%2$s__attach(struct %1$s *skel) \n\
574 { \n\
575 int prog_fd = skel->progs.%2$s.prog_fd; \n\
580 tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
581 printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
586 printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
588 printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
591 printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
595 \n\
596 \n\
597 if (fd > 0) \n\
598 skel->links.%1$s_fd = fd; \n\
599 return fd; \n\
600 } \n\
605 \n\
606 \n\
607 static inline int \n\
608 %1$s__attach(struct %1$s *skel) \n\
609 { \n\
610 int ret = 0; \n\
611 \n\
616 \n\
617 ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
622 \n\
623 return ret < 0 ? ret : 0; \n\
624 } \n\
625 \n\
626 static inline void \n\
627 %1$s__detach(struct %1$s *skel) \n\
628 { \n\
633 \n\
634 skel_closenz(skel->links.%1$s_fd); \n\
639 \n\
640 } \n\
651 \n\
652 static void \n\
653 %1$s__destroy(struct %1$s *skel) \n\
654 { \n\
655 if (!skel) \n\
656 return; \n\
657 %1$s__detach(skel); \n\
663 \n\
664 skel_closenz(skel->progs.%1$s.prog_fd); \n\
673 printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zu);\n",
676 \n\
677 skel_closenz(skel->maps.%1$s.map_fd); \n\
681 \n\
682 skel_free(skel); \n\
683 } \n\
718 \n\
719 }; \n\
728 \n\
729 static inline struct %1$s * \n\
730 %1$s__open(void) \n\
731 { \n\
732 struct %1$s *skel; \n\
733 \n\
734 skel = skel_alloc(sizeof(*skel)); \n\
735 if (!skel) \n\
736 goto cleanup; \n\
737 skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
748 \n\
749 { \n\
750 static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
755 \n\
756 \"; \n\
757 \n\
758 skel->%1$s = skel_prep_map_data((void *)data, %2$zd,\n\
759 sizeof(data) - 1);\n\
760 if (!skel->%1$s) \n\
761 goto cleanup; \n\
762 skel->maps.%1$s.initial_value = (__u64) (long) skel->%1$s;\n\
763 } \n\
767 \n\
768 return skel; \n\
769 cleanup: \n\
770 %1$s__destroy(skel); \n\
771 return NULL; \n\
772 } \n\
773 \n\
774 static inline int \n\
775 %1$s__load(struct %1$s *skel) \n\
776 { \n\
777 struct bpf_load_and_run_opts opts = {}; \n\
778 int err; \n\
779 static const char opts_data[] __attribute__((__aligned__(8))) = \"\\\n\
784 \n\
785 \"; \n\
786 static const char opts_insn[] __attribute__((__aligned__(8))) = \"\\\n\
790 \n\
791 \";\n");
808 \n\
809 static const char opts_sig[] __attribute__((__aligned__(8))) = \"\\\n\
813 \n\
814 \";\n");
817 \n\
818 static const char opts_excl_hash[] __attribute__((__aligned__(8))) = \"\\\n\
822 \n\
823 \";\n");
826 \n\
827 opts.signature = (void *)opts_sig; \n\
828 opts.signature_sz = sizeof(opts_sig) - 1; \n\
829 opts.excl_prog_hash = (void *)opts_excl_hash; \n\
830 opts.excl_prog_hash_sz = sizeof(opts_excl_hash) - 1; \n\
831 opts.keyring_id = skel->keyring_id; \n\
836 \n\
837 opts.ctx = (struct bpf_loader_ctx *)skel; \n\
838 opts.data_sz = sizeof(opts_data) - 1; \n\
839 opts.data = (void *)opts_data; \n\
840 opts.insns_sz = sizeof(opts_insn) - 1; \n\
841 opts.insns = (void *)opts_insn; \n\
842 \n\
843 err = bpf_load_and_run(&opts); \n\
844 if (err < 0) \n\
845 return err; \n\
859 \n\
860 skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\
861 %2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
862 if (!skel->%1$s) \n\
863 return -ENOMEM; \n\
868 \n\
869 return 0; \n\
870 } \n\
871 \n\
872 static inline struct %1$s * \n\
873 %1$s__open_and_load(void) \n\
874 { \n\
875 struct %1$s *skel; \n\
876 \n\
877 skel = %1$s__open(); \n\
878 if (!skel) \n\
879 return NULL; \n\
880 if (%1$s__load(skel)) { \n\
881 %1$s__destroy(skel); \n\
882 return NULL; \n\
883 } \n\
884 return skel; \n\
885 } \n\
886 \n\
892 \n\
893 \n\
894 #endif /* %s */ \n\
929 \n\
930 \n\
931 /* maps */ \n\
932 s->map_cnt = %zu; \n\
933 s->map_skel_sz = %zu; \n\
934 s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt,\n\
935 sizeof(*s->maps) > %zu ? sizeof(*s->maps) : %zu);\n\
936 if (!s->maps) { \n\
937 err = -ENOMEM; \n\
938 goto err; \n\
939 } \n\
949 \n\
950 \n\
951 map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\
952 map->name = \"%s\"; \n\
953 map->map = &obj->maps.%s; \n\
956 /* memory-mapped internal maps */
958 printf("\tmap->mmaped = (void **)&obj->%s;\n", ident);
963 \n\
964 map->link = &obj->links.%s; \n\
981 \n\
982 \n\
983 /* programs */ \n\
984 s->prog_cnt = %zu; \n\
985 s->prog_skel_sz = sizeof(*s->progs); \n\
986 s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
987 if (!s->progs) { \n\
988 err = -ENOMEM; \n\
989 goto err; \n\
990 } \n\
997 \n\
998 \n\
999 s->progs[%1$zu].name = \"%2$s\"; \n\
1000 s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
1006 \n\
1007 s->progs[%1$zu].link = &obj->links.%2$s;\n\
1025 int i, err = 0, n;
1030 return -errno;
1032 n = btf_vlen(map_type);
1033 for (i = 0, m = btf_members(map_type); i < n; i++, m++) {
1034 member_type = skip_mods_and_typedefs(btf, m->type, &member_type_id);
1035 member_name = btf__name_by_offset(btf, m->name_off);
1037 offset = m->offset / 8;
1039 printf("\t\t\tchar __padding_%d[%u];\n", i, offset - next_offset);
1054 printf(";\n");
1058 p_err("Failed to resolve size of %s: %d\n", member_name, size);
1067 if (resolve_func_ptr(btf, m->type, NULL)) {
1069 printf("\t\t\tstruct bpf_program *%s;\n", member_name);
1093 p_err("Failed to resolve size of %s: %d\n", member_name, size);
1097 printf("\t\t\tchar __unsupported_%d[%d];\n", i, size);
1107 printf("\t\t\tchar __padding_end[%u];\n", size - next_offset);
1140 return -EINVAL;
1143 return -EINVAL;
1145 type_name = btf__name_by_offset(btf, map_type->name_off);
1147 printf("\t\tstruct %s__%s__%s {\n", obj_name, ident, type_name);
1153 printf("\t\t} *%s;\n", ident);
1177 printf("\tstruct {\n");
1186 printf("\t} struct_ops;\n");
1209 \n\
1210 obj->struct_ops.%1$s = (__typeof__(obj->struct_ops.%1$s))\n\
1211 bpf_map__initial_value(obj->maps.%1$s, NULL);\n\
1212 \n\
1227 int fd, err = -1;
1232 if (!REQ_ARGS(1)) {
1234 return -1;
1240 return -1;
1247 return -1;
1250 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1251 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1254 return -1;
1262 return -1;
1267 return -1;
1274 return -1;
1287 opts.kernel_log_level = 1 + 2 + 4;
1292 err = -errno;
1317 \n\
1318 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1319 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1320 #ifndef %2$s \n\
1321 #define %2$s \n\
1322 \n\
1323 #include <bpf/skel_internal.h> \n\
1324 \n\
1325 struct %1$s { \n\
1326 struct bpf_loader_ctx ctx; \n\
1332 \n\
1333 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1334 \n\
1335 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1336 #ifndef %2$s \n\
1337 #define %2$s \n\
1338 \n\
1339 #include <errno.h> \n\
1340 #include <stdlib.h> \n\
1341 #include <bpf/libbpf.h> \n\
1342 \n\
1343 #define BPF_SKEL_SUPPORTS_MAP_AUTO_ATTACH 1 \n\
1344 \n\
1345 struct %1$s { \n\
1346 struct bpf_object_skeleton *skeleton; \n\
1347 struct bpf_object *obj; \n\
1354 printf("\tstruct {\n");
1359 printf("\t\tstruct bpf_map_desc %s;\n", ident);
1361 printf("\t\tstruct bpf_map *%s;\n", ident);
1363 printf("\t} maps;\n");
1372 printf("\tstruct {\n");
1375 printf("\t\tstruct bpf_prog_desc %s;\n",
1378 printf("\t\tstruct bpf_program *%s;\n",
1381 printf("\t} progs;\n");
1385 printf("\tstruct {\n");
1388 printf("\t\tint %s_fd;\n",
1391 printf("\t\tstruct bpf_link *%s;\n",
1402 printf("t\tint %s_fd;\n", ident);
1404 printf("\t\tstruct bpf_link *%s;\n", ident);
1407 printf("\t} links;\n");
1412 \n\
1413 __s32 keyring_id; \n\
1428 \n\
1429 \n\
1430 #ifdef __cplusplus \n\
1431 static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
1432 static inline struct %1$s *open_and_load(); \n\
1433 static inline int load(struct %1$s *skel); \n\
1434 static inline int attach(struct %1$s *skel); \n\
1435 static inline void detach(struct %1$s *skel); \n\
1436 static inline void destroy(struct %1$s *skel); \n\
1437 static inline const void *elf_bytes(size_t *sz); \n\
1438 #endif /* __cplusplus */ \n\
1439 }; \n\
1440 \n\
1441 static void \n\
1442 %1$s__destroy(struct %1$s *obj) \n\
1443 { \n\
1444 if (!obj) \n\
1445 return; \n\
1446 if (obj->skeleton) \n\
1447 bpf_object__destroy_skeleton(obj->skeleton);\n\
1448 free(obj); \n\
1449 } \n\
1450 \n\
1451 static inline int \n\
1452 %1$s__create_skeleton(struct %1$s *obj); \n\
1453 \n\
1454 static inline struct %1$s * \n\
1455 %1$s__open_opts(const struct bpf_object_open_opts *opts) \n\
1456 { \n\
1457 struct %1$s *obj; \n\
1458 int err; \n\
1459 \n\
1460 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1461 if (!obj) { \n\
1462 errno = ENOMEM; \n\
1463 return NULL; \n\
1464 } \n\
1465 \n\
1466 err = %1$s__create_skeleton(obj); \n\
1467 if (err) \n\
1468 goto err_out; \n\
1469 \n\
1470 err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
1471 if (err) \n\
1472 goto err_out; \n\
1473 \n\
1479 \n\
1480 return obj; \n\
1481 err_out: \n\
1482 %1$s__destroy(obj); \n\
1483 errno = -err; \n\
1484 return NULL; \n\
1485 } \n\
1486 \n\
1487 static inline struct %1$s * \n\
1488 %1$s__open(void) \n\
1489 { \n\
1490 return %1$s__open_opts(NULL); \n\
1491 } \n\
1492 \n\
1493 static inline int \n\
1494 %1$s__load(struct %1$s *obj) \n\
1495 { \n\
1496 return bpf_object__load_skeleton(obj->skeleton); \n\
1497 } \n\
1498 \n\
1499 static inline struct %1$s * \n\
1500 %1$s__open_and_load(void) \n\
1501 { \n\
1502 struct %1$s *obj; \n\
1503 int err; \n\
1504 \n\
1505 obj = %1$s__open(); \n\
1506 if (!obj) \n\
1507 return NULL; \n\
1508 err = %1$s__load(obj); \n\
1509 if (err) { \n\
1510 %1$s__destroy(obj); \n\
1511 errno = -err; \n\
1512 return NULL; \n\
1513 } \n\
1514 return obj; \n\
1515 } \n\
1516 \n\
1517 static inline int \n\
1518 %1$s__attach(struct %1$s *obj) \n\
1519 { \n\
1520 return bpf_object__attach_skeleton(obj->skeleton); \n\
1521 } \n\
1522 \n\
1523 static inline void \n\
1524 %1$s__detach(struct %1$s *obj) \n\
1525 { \n\
1526 bpf_object__detach_skeleton(obj->skeleton); \n\
1527 } \n\
1533 \n\
1534 \n\
1535 static inline const void *%1$s__elf_bytes(size_t *sz); \n\
1536 \n\
1537 static inline int \n\
1538 %1$s__create_skeleton(struct %1$s *obj) \n\
1539 { \n\
1540 struct bpf_object_skeleton *s; \n\
1541 struct bpf_map_skeleton *map __attribute__((unused));\n\
1542 int err; \n\
1543 \n\
1544 s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
1545 if (!s) { \n\
1546 err = -ENOMEM; \n\
1547 goto err; \n\
1548 } \n\
1549 \n\
1550 s->sz = sizeof(*s); \n\
1551 s->name = \"%1$s\"; \n\
1552 s->obj = &obj->obj; \n\
1561 \n\
1562 \n\
1563 s->data = %1$s__elf_bytes(&s->data_sz); \n\
1564 \n\
1565 obj->skeleton = s; \n\
1566 return 0; \n\
1567 err: \n\
1568 bpf_object__destroy_skeleton(s); \n\
1569 return err; \n\
1570 } \n\
1571 \n\
1572 static inline const void *%1$s__elf_bytes(size_t *sz) \n\
1573 { \n\
1574 static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
1583 \n\
1584 \"; \n\
1585 \n\
1586 *sz = sizeof(data) - 1; \n\
1587 return (const void *)data; \n\
1588 } \n\
1589 \n\
1590 #ifdef __cplusplus \n\
1591 struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
1592 struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\
1593 int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\
1594 int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\
1595 void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\
1596 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\
1597 const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
1598 #endif /* __cplusplus */ \n\
1599 \n\
1606 \n\
1607 \n\
1608 #endif /* %1$s */ \n\
1626 * This allows for library-like BPF objects to have userspace counterparts
1639 int fd, err = -1, map_type_id;
1647 if (!REQ_ARGS(1)) {
1649 return -1;
1655 return -1;
1662 return -1;
1665 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1666 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1669 return -1;
1677 return -1;
1682 return -1;
1687 return -1;
1694 return -1;
1721 err = -1;
1754 var_type = btf__type_by_id(btf, var->type);
1756 if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1765 \n\
1766 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1767 \n\
1768 /* THIS FILE IS AUTOGENERATED! */ \n\
1769 #ifndef %2$s \n\
1770 #define %2$s \n\
1771 \n\
1772 #include <errno.h> \n\
1773 #include <stdlib.h> \n\
1774 #include <bpf/libbpf.h> \n\
1775 \n\
1776 struct %1$s { \n\
1777 struct bpf_object *obj; \n\
1778 struct bpf_object_subskeleton *subskel; \n\
1782 printf("\tstruct {\n");
1786 printf("\t\tstruct bpf_map *%s;\n", ident);
1788 printf("\t} maps;\n");
1796 printf("\tstruct {\n");
1798 printf("\t\tstruct bpf_program *%s;\n",
1801 printf("\t} progs;\n");
1810 \n\
1811 \n\
1812 #ifdef __cplusplus \n\
1813 static inline struct %1$s *open(const struct bpf_object *src);\n\
1814 static inline void destroy(struct %1$s *skel); \n\
1815 #endif /* __cplusplus */ \n\
1816 }; \n\
1817 \n\
1818 static inline void \n\
1819 %1$s__destroy(struct %1$s *skel) \n\
1820 { \n\
1821 if (!skel) \n\
1822 return; \n\
1823 if (skel->subskel) \n\
1824 bpf_object__destroy_subskeleton(skel->subskel);\n\
1825 free(skel); \n\
1826 } \n\
1827 \n\
1828 static inline struct %1$s * \n\
1829 %1$s__open(const struct bpf_object *src) \n\
1830 { \n\
1831 struct %1$s *obj; \n\
1832 struct bpf_object_subskeleton *s; \n\
1833 struct bpf_map_skeleton *map __attribute__((unused));\n\
1834 int err; \n\
1835 \n\
1836 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1837 if (!obj) { \n\
1838 err = -ENOMEM; \n\
1839 goto err; \n\
1840 } \n\
1841 s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
1842 if (!s) { \n\
1843 err = -ENOMEM; \n\
1844 goto err; \n\
1845 } \n\
1846 s->sz = sizeof(*s); \n\
1847 s->obj = src; \n\
1848 s->var_skel_sz = sizeof(*s->vars); \n\
1849 obj->subskel = s; \n\
1850 \n\
1851 /* vars */ \n\
1852 s->var_cnt = %2$d; \n\
1853 s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
1854 if (!s->vars) { \n\
1855 err = -ENOMEM; \n\
1856 goto err; \n\
1857 } \n\
1876 var_type = btf__type_by_id(btf, var->type);
1877 var_name = btf__name_by_offset(btf, var_type->name_off);
1879 if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1886 \n\
1887 \n\
1888 s->vars[%3$d].name = \"%1$s\"; \n\
1889 s->vars[%3$d].map = &obj->maps.%2$s; \n\
1890 s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
1901 \n\
1902 \n\
1903 err = bpf_object__open_subskeleton(s); \n\
1904 if (err) \n\
1905 goto err; \n\
1906 \n\
1912 \n\
1913 return obj; \n\
1914 err: \n\
1915 %1$s__destroy(obj); \n\
1916 errno = -err; \n\
1917 return NULL; \n\
1918 } \n\
1919 \n\
1920 #ifdef __cplusplus \n\
1921 struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
1922 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
1923 #endif /* __cplusplus */ \n\
1924 \n\
1925 #endif /* %2$s */ \n\
1945 return -1;
1953 return -1;
1986 "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1987 " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
1988 " %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
1989 " %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
1990 " %1$s %2$s help\n"
1991 "\n"
1992 " " HELP_SPEC_OPTIONS " |\n"
1993 " {-L|--use-loader} | [ {-S|--sign } {-k} <private_key.pem> {-i} <certificate.x509> ]}\n"
2009 return -ENOMEM;
2013 return -errno;
2015 if (fwrite(data, 1, data_sz, f) != data_sz)
2016 err = -errno;
2042 btf__free(info->src_btf);
2043 btf__free(info->marked_btf);
2054 info = calloc(1, sizeof(*info));
2058 info->src_btf = btf__parse(targ_btf_path, NULL);
2059 if (!info->src_btf) {
2060 err = -errno;
2065 info->marked_btf = btf__parse(targ_btf_path, NULL);
2066 if (!info->marked_btf) {
2067 err = -errno;
2076 errno = -err;
2084 const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
2087 m->name_off = MARKED;
2093 const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
2103 cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
2104 cloned_type->name_off = MARKED;
2118 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2127 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2135 err = btfgen_mark_type(info, array->type, follow_pointers);
2137 err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
2143 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2150 err = btfgen_mark_type(info, param->type, follow_pointers);
2159 return -EINVAL;
2167 struct btf *btf = info->src_btf;
2171 unsigned int type_id = targ_spec->root_type_id;
2181 for (int i = 1; i < targ_spec->raw_len; i++) {
2184 type_id = btf_type->type;
2191 idx = targ_spec->raw_spec[i];
2198 type_id = btf_member->type;
2206 type_id = array->type;
2211 btf_kind_str(btf_type), btf_type->type);
2212 return -EINVAL;
2230 struct btf *btf = info->src_btf;
2239 cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
2240 cloned_type->name_off = MARKED;
2262 err = btfgen_mark_type_match(info, m->type, false);
2273 return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
2275 return btfgen_mark_type_match(info, btf_type->type, true);
2281 err = btfgen_mark_type_match(info, array->type, false);
2283 err = err ? : btfgen_mark_type_match(info, array->index_type, false);
2293 err = btfgen_mark_type_match(info, btf_type->type, false);
2300 err = btfgen_mark_type_match(info, param->type, false);
2310 return -EINVAL;
2322 return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
2327 return btfgen_mark_type(info, targ_spec->root_type_id, true);
2332 return btfgen_mark_type(info, targ_spec->root_type_id, false);
2337 switch (res->relo_kind) {
2357 return -EINVAL;
2376 err = -EINVAL;
2380 local_name = btf__name_by_offset(local_btf, local_type->name_off);
2382 err = -EINVAL;
2387 cands = calloc(1, sizeof(*cands));
2391 err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
2399 errno = -err;
2419 err = -errno;
2427 err = -EINVAL;
2431 if (btf_ext->core_relo_info.len == 0) {
2442 seg = &btf_ext->core_relo_info;
2448 const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
2450 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
2451 !hashmap__find(cand_cache, relo->type_id, &cands)) {
2452 cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
2454 err = -errno;
2458 err = hashmap__set(cand_cache, relo->type_id, cands,
2482 bpf_core_free_cands(entry->pvalue);
2495 unsigned int i, n = btf__type_cnt(info->marked_btf);
2500 err = -errno;
2504 ids = calloc(n, sizeof(*ids));
2506 err = -errno;
2511 for (i = 1; i < n; i++) {
2516 cloned_type = btf__type_by_id(info->marked_btf, i);
2518 if (cloned_type->name_off != MARKED)
2521 type = btf__type_by_id(info->src_btf, i);
2529 name = btf__str_by_offset(info->src_btf, type->name_off);
2532 err = btf__add_struct(btf_new, name, type->size);
2534 err = btf__add_union(btf_new, name, type->size);
2545 if (cloned_m->name_off != MARKED)
2548 name = btf__str_by_offset(info->src_btf, m->name_off);
2549 err = btf__add_field(btf_new, name, m->type,
2556 err = btf__add_type(btf_new, info->src_btf, type);
2567 for (i = 1; i < btf__type_cnt(btf_new); i++) {
2586 errno = -err;
2592 * The BTFGen algorithm is divided in two main parts: (1) collect the
2602 * relocation. For field-based relocations only the members that are
2604 * BTF file. For type-based relocations empty struct / unions are
2605 * generated and for enum-based relocations the whole type is saved.
2621 err = -errno;
2637 err = -errno;
2662 return -1;
2668 objs = (const char **) calloc(argc + 1, sizeof(*objs));
2671 return -ENOMEM;