Lines Matching +full:debian +full:- +full:arm64 +full:- +full:release

1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
71 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
299 if (err != -EPERM || geteuid() != 0)
316 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
330 fd = -1; \
341 /* as of v1.0 libbpf_set_strict_mode() is a no-op */
387 /* stored as sec_def->cookie for all libbpf-supported SEC()s */
406 /* BPF program support non-linear XDP buffer */
444 * program. For the entry-point (main) BPF program, this is always
445 * zero. For a sub-program, this gets reset before each of main BPF
447 * whether sub-program was already appended to the main program, and
459 * entry-point BPF programs this includes the size of main program
460 * itself plus all the used sub-programs, appended at the end
510 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
620 /* BTF fd index to be patched in for insn->off, this is
621 * 0 for vmlinux BTF, index in obj->fd_array for module
714 /* Path to the custom BTF to be used for BPF CO-RE relocations as an
718 /* vmlinux BTF override for CO-RE relocations */
763 zclose(prog->fd);
765 zfree(&prog->func_info);
766 zfree(&prog->line_info);
775 zfree(&prog->name);
776 zfree(&prog->sec_name);
777 zfree(&prog->insns);
778 zfree(&prog->reloc_desc);
780 prog->nr_reloc = 0;
781 prog->insns_cnt = 0;
782 prog->sec_idx = -1;
787 return BPF_CLASS(insn->code) == BPF_JMP &&
788 BPF_OP(insn->code) == BPF_CALL &&
789 BPF_SRC(insn->code) == BPF_K &&
790 insn->src_reg == BPF_PSEUDO_CALL &&
791 insn->dst_reg == 0 &&
792 insn->off == 0;
797 return insn->code == (BPF_JMP | BPF_CALL);
802 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
813 return -EINVAL;
817 prog->obj = obj;
819 prog->sec_idx = sec_idx;
820 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
821 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
823 prog->insns_cnt = prog->sec_insn_cnt;
825 prog->type = BPF_PROG_TYPE_UNSPEC;
826 prog->fd = -1;
827 prog->exception_cb_idx = -1;
834 prog->autoload = false;
838 prog->autoload = true;
841 prog->autoattach = true;
844 prog->log_level = obj->log_level;
846 prog->sec_name = strdup(sec_name);
847 if (!prog->sec_name)
850 prog->name = strdup(name);
851 if (!prog->name)
854 prog->insns = malloc(insn_data_sz);
855 if (!prog->insns)
857 memcpy(prog->insns, insn_data, insn_data_sz);
863 return -ENOMEM;
870 Elf_Data *symbols = obj->efile.symbols;
872 void *data = sec_data->d_buf;
873 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
878 progs = obj->programs;
879 nr_progs = obj->nr_programs;
880 nr_syms = symbols->d_size / sizeof(Elf64_Sym);
885 if (sym->st_shndx != sec_idx)
887 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
890 prog_sz = sym->st_size;
891 sec_off = sym->st_value;
893 name = elf_sym_str(obj, sym->st_name);
897 return -LIBBPF_ERRNO__FORMAT;
903 return -LIBBPF_ERRNO__FORMAT;
906 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
908 return -ENOTSUP;
917 * In this case the original obj->programs
923 return -ENOMEM;
925 obj->programs = progs;
934 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
935 prog->sym_global = true;
942 if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
943 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
944 prog->mark_btf_static = true;
947 obj->nr_programs = nr_progs;
955 struct bpf_program *prog = obj->programs;
959 for (p = 0; p < obj->nr_programs; p++, prog++) {
960 insn = prog->insns;
961 for (i = 0; i < prog->insns_cnt; i++, insn++)
964 pr_debug("converted %zu BPF programs to native byte order\n", obj->nr_programs);
989 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
1050 if (kern_data_member->type == kern_type_id)
1056 return -EINVAL;
1070 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
1078 for (i = 0; i < obj->nr_programs; i++) {
1079 if (&obj->programs[i] == prog)
1080 return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
1097 for (i = 0; i < obj->nr_programs; ++i) {
1101 prog = &obj->programs[i];
1102 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1105 for (j = 0; j < obj->nr_maps; ++j) {
1108 map = &obj->maps[j];
1112 type = btf__type_by_id(obj->btf, map->st_ops->type_id);
1115 slot_prog = map->st_ops->progs[k];
1120 if (map->autocreate)
1125 prog->autoload = should_load;
1137 struct bpf_object *obj = map->obj;
1138 const struct btf *btf = obj->btf;
1146 st_ops = map->st_ops;
1147 type = btf__type_by_id(btf, st_ops->type_id);
1148 tname = btf__name_by_offset(btf, type->name_off);
1156 kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
1159 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
1161 map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
1162 map->def.value_size = kern_vtype->size;
1163 map->btf_vmlinux_value_type_id = kern_vtype_id;
1165 st_ops->kern_vdata = calloc(1, kern_vtype->size);
1166 if (!st_ops->kern_vdata)
1167 return -ENOMEM;
1169 data = st_ops->data;
1170 kern_data_off = kern_data_member->offset / 8;
1171 kern_data = st_ops->kern_vdata + kern_data_off;
1184 mname = btf__name_by_offset(btf, member->name_off);
1185 moff = member->offset / 8;
1187 msize = btf__resolve_size(btf, member->type);
1190 map->name, mname);
1198 map->name, mname);
1199 return -ENOTSUP;
1202 if (st_ops->progs[i]) {
1210 st_ops->progs[i]->autoload = false;
1211 st_ops->progs[i] = NULL;
1214 /* Skip all-zero/NULL fields if they are not present in the kernel BTF */
1216 map->name, mname);
1220 kern_member_idx = kern_member - btf_members(kern_type);
1224 map->name, mname);
1225 return -ENOTSUP;
1228 kern_moff = kern_member->offset / 8;
1231 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1232 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1234 if (BTF_INFO_KIND(mtype->info) !=
1235 BTF_INFO_KIND(kern_mtype->info)) {
1237 map->name, mname, BTF_INFO_KIND(mtype->info),
1238 BTF_INFO_KIND(kern_mtype->info));
1239 return -ENOTSUP;
1248 if (st_ops->progs[i] && st_ops->progs[i] != prog)
1249 st_ops->progs[i]->autoload = false;
1252 st_ops->progs[i] = prog;
1258 map->name, mname);
1259 return -ENOTSUP;
1263 kern_mtype->type,
1266 /* mtype->type must be a func_proto which was
1272 map->name, mname);
1273 return -ENOTSUP;
1277 prog->attach_btf_obj_fd = mod_btf->fd;
1282 if (!prog->attach_btf_id) {
1283 prog->attach_btf_id = kern_type_id;
1284 prog->expected_attach_type = kern_member_idx;
1287 /* struct_ops BPF prog can be re-used between multiple
1292 if (prog->attach_btf_id != kern_type_id) {
1294 map->name, mname, prog->name, prog->sec_name, prog->type,
1295 prog->attach_btf_id, kern_type_id);
1296 return -EINVAL;
1298 if (prog->expected_attach_type != kern_member_idx) {
1300 map->name, mname, prog->name, prog->sec_name, prog->type,
1301 prog->expected_attach_type, kern_member_idx);
1302 return -EINVAL;
1305 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1308 map->name, mname, prog->name, moff,
1317 map->name, mname, (ssize_t)msize,
1319 return -ENOTSUP;
1323 map->name, mname, (unsigned int)msize,
1337 for (i = 0; i < obj->nr_maps; i++) {
1338 map = &obj->maps[i];
1343 if (!map->autocreate)
1366 if (shndx == -1)
1369 btf = obj->btf;
1375 return -EINVAL;
1381 type = btf__type_by_id(obj->btf, vsi->type);
1382 var_name = btf__name_by_offset(obj->btf, type->name_off);
1384 type_id = btf__resolve_type(obj->btf, vsi->type);
1387 vsi->type, sec_name);
1388 return -EINVAL;
1391 type = btf__type_by_id(obj->btf, type_id);
1392 tname = btf__name_by_offset(obj->btf, type->name_off);
1395 return -ENOTSUP;
1399 return -EINVAL;
1406 map->sec_idx = shndx;
1407 map->sec_offset = vsi->offset;
1408 map->name = strdup(var_name);
1409 if (!map->name)
1410 return -ENOMEM;
1411 map->btf_value_type_id = type_id;
1417 map->autocreate = false;
1422 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1423 map->def.key_size = sizeof(int);
1424 map->def.value_size = type->size;
1425 map->def.max_entries = 1;
1426 map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
1427 map->autoattach = true;
1429 map->st_ops = calloc(1, sizeof(*map->st_ops));
1430 if (!map->st_ops)
1431 return -ENOMEM;
1432 st_ops = map->st_ops;
1433 st_ops->data = malloc(type->size);
1434 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1435 st_ops->kern_func_off = malloc(btf_vlen(type) *
1436 sizeof(*st_ops->kern_func_off));
1437 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1438 return -ENOMEM;
1440 if (vsi->offset + type->size > data->d_size) {
1443 return -EINVAL;
1446 memcpy(st_ops->data,
1447 data->d_buf + vsi->offset,
1448 type->size);
1449 st_ops->type_id = type_id;
1452 tname, type_id, var_name, vsi->offset);
1463 for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
1464 struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
1466 if (desc->sec_type != SEC_ST_OPS)
1471 return -LIBBPF_ERRNO__FORMAT;
1473 err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
1492 return ERR_PTR(-ENOMEM);
1495 strcpy(obj->path, path);
1497 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1500 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1501 end = strchr(obj->name, '.');
1506 obj->efile.fd = -1;
1513 obj->efile.obj_buf = obj_buf;
1514 obj->efile.obj_buf_sz = obj_buf_sz;
1515 obj->efile.btf_maps_shndx = -1;
1516 obj->kconfig_map_idx = -1;
1517 obj->arena_map_idx = -1;
1519 obj->kern_version = get_kernel_version();
1520 obj->state = OBJ_OPEN;
1527 if (!obj->efile.elf)
1530 elf_end(obj->efile.elf);
1531 obj->efile.elf = NULL;
1532 obj->efile.ehdr = NULL;
1533 obj->efile.symbols = NULL;
1534 obj->efile.arena_data = NULL;
1536 zfree(&obj->efile.secs);
1537 obj->efile.sec_cnt = 0;
1538 zclose(obj->efile.fd);
1539 obj->efile.obj_buf = NULL;
1540 obj->efile.obj_buf_sz = 0;
1549 if (obj->efile.elf) {
1551 return -LIBBPF_ERRNO__LIBELF;
1554 if (obj->efile.obj_buf_sz > 0) {
1556 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1558 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1559 if (obj->efile.fd < 0) {
1560 err = -errno;
1561 pr_warn("elf: failed to open %s: %s\n", obj->path, errstr(err));
1565 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1569 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1570 err = -LIBBPF_ERRNO__LIBELF;
1574 obj->efile.elf = elf;
1577 err = -LIBBPF_ERRNO__FORMAT;
1578 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1583 err = -LIBBPF_ERRNO__FORMAT;
1584 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1588 obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1589 if (!obj->efile.ehdr) {
1590 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1591 err = -LIBBPF_ERRNO__FORMAT;
1596 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB &&
1597 ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
1598 err = -LIBBPF_ERRNO__ENDIAN;
1599 pr_warn("elf: '%s' has unknown byte order\n", obj->path);
1603 obj->byteorder = ehdr->e_ident[EI_DATA];
1605 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1607 obj->path, elf_errmsg(-1));
1608 err = -LIBBPF_ERRNO__FORMAT;
1613 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1615 obj->path, elf_errmsg(-1));
1616 err = -LIBBPF_ERRNO__FORMAT;
1621 if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1622 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1623 err = -LIBBPF_ERRNO__FORMAT;
1636 return obj->byteorder == ELFDATA2LSB;
1638 return obj->byteorder == ELFDATA2MSB;
1648 pr_warn("invalid license section in %s\n", obj->path);
1649 return -LIBBPF_ERRNO__FORMAT;
1651 /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1654 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1655 pr_debug("license of %s is %s\n", obj->path, obj->license);
1665 pr_warn("invalid kver section in %s\n", obj->path);
1666 return -LIBBPF_ERRNO__FORMAT;
1669 obj->kern_version = kver;
1670 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1688 return -EINVAL;
1693 *size = data->d_size;
1697 return -ENOENT;
1702 Elf_Data *symbols = obj->efile.symbols;
1706 for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1709 if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1712 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1713 ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1716 sname = elf_sym_str(obj, sym->st_name);
1719 return ERR_PTR(-EIO);
1725 return ERR_PTR(-ENOENT);
1738 const char *name = "libbpf-placeholder-fd";
1745 return -errno;
1750 return -errno;
1759 err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1760 sizeof(*obj->maps), obj->nr_maps + 1);
1764 map = &obj->maps[obj->nr_maps++];
1765 map->obj = obj;
1778 map->fd = create_placeholder_fd();
1779 if (map->fd < 0)
1780 return ERR_PTR(map->fd);
1781 map->inner_map_fd = -1;
1782 map->autocreate = true;
1801 switch (map->def.type) {
1803 return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
1805 return page_sz * map->def.max_entries;
1815 if (!map->mmaped)
1816 return -EINVAL;
1821 mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1823 return -errno;
1825 memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
1826 munmap(map->mmaped, old_sz);
1827 map->mmaped = mmaped;
1861 * '.rodata.abracad' kernel and user-visible name.
1870 sfx_len = BPF_OBJ_NAME_LEN - 1;
1876 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1878 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1903 if (!map->btf_value_type_id)
1906 t = btf__type_by_id(obj->btf, map->btf_value_type_id);
1912 vt = btf__type_by_id(obj->btf, vsi->type);
1916 if (btf_var(vt)->linkage != BTF_VAR_STATIC)
1936 map->libbpf_type = type;
1937 map->sec_idx = sec_idx;
1938 map->sec_offset = 0;
1939 map->real_name = strdup(real_name);
1940 map->name = internal_map_name(obj, real_name);
1941 if (!map->real_name || !map->name) {
1942 zfree(&map->real_name);
1943 zfree(&map->name);
1944 return -ENOMEM;
1947 def = &map->def;
1948 def->type = BPF_MAP_TYPE_ARRAY;
1949 def->key_size = sizeof(int);
1950 def->value_size = data_sz;
1951 def->max_entries = 1;
1952 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1959 def->map_flags |= BPF_F_MMAPABLE;
1962 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1965 map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
1966 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1967 if (map->mmaped == MAP_FAILED) {
1968 err = -errno;
1969 map->mmaped = NULL;
1970 pr_warn("failed to alloc map '%s' content buffer: %s\n", map->name, errstr(err));
1971 zfree(&map->real_name);
1972 zfree(&map->name);
1977 memcpy(map->mmaped, data, data_sz);
1979 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1990 * Populate obj->maps with libbpf internal maps.
1992 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1993 sec_desc = &obj->efile.secs[sec_idx];
1996 if (!sec_desc->data || sec_desc->data->d_size == 0)
1999 switch (sec_desc->sec_type) {
2004 sec_desc->data->d_buf,
2005 sec_desc->data->d_size);
2008 obj->has_rodata = true;
2012 sec_desc->data->d_buf,
2013 sec_desc->data->d_size);
2020 sec_desc->data->d_size);
2038 for (i = 0; i < obj->nr_extern; i++) {
2039 if (strcmp(obj->externs[i].name, name) == 0)
2040 return &obj->externs[i];
2051 for (i = 0; i < obj->nr_extern; i++) {
2052 ext_name = obj->externs[i].name;
2054 return &obj->externs[i];
2062 switch (ext->kcfg.type) {
2066 ext->name, value);
2067 return -EINVAL;
2087 ext->name, value);
2088 return -EINVAL;
2090 ext->is_set = true;
2099 if (ext->kcfg.type != KCFG_CHAR_ARR) {
2101 ext->name, value);
2102 return -EINVAL;
2106 if (len < 2 || value[len - 1] != '"') {
2108 ext->name, value);
2109 return -EINVAL;
2113 len -= 2;
2114 if (len >= ext->kcfg.sz) {
2116 ext->name, value, len, ext->kcfg.sz - 1);
2117 len = ext->kcfg.sz - 1;
2121 ext->is_set = true;
2133 err = -errno;
2139 return -EINVAL;
2146 int bit_sz = ext->kcfg.sz * 8;
2148 if (ext->kcfg.sz == 8)
2151 /* Validate that value stored in u64 fits in integer of `ext->sz`
2156 * -2^(Y-1) <= X <= 2^(Y-1) - 1
2157 * 0 <= X + 2^(Y-1) <= 2^Y - 1
2158 * 0 <= X + 2^(Y-1) < 2^Y
2160 * For unsigned target integer, check that all the (64 - Y) bits are
2163 if (ext->kcfg.is_signed)
2164 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
2172 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
2173 ext->kcfg.type != KCFG_BOOL) {
2175 ext->name, (unsigned long long)value);
2176 return -EINVAL;
2178 if (ext->kcfg.type == KCFG_BOOL && value > 1) {
2180 ext->name, (unsigned long long)value);
2181 return -EINVAL;
2186 ext->name, (unsigned long long)value, ext->kcfg.sz);
2187 return -ERANGE;
2189 switch (ext->kcfg.sz) {
2203 return -EINVAL;
2205 ext->is_set = true;
2224 return -EINVAL;
2229 if (buf[len - 1] == '\n')
2230 buf[len - 1] = '\0';
2236 return -EINVAL;
2240 if (!ext || ext->is_set)
2243 ext_val = data + ext->kcfg.data_off;
2257 pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
2260 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
2261 pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
2262 return -EINVAL;
2269 pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
2281 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
2283 return -EINVAL;
2285 return -ENAMETOOLONG;
2294 return -ENOENT;
2320 err = -errno;
2321 pr_warn("failed to open in-memory Kconfig: %s\n", errstr(err));
2328 pr_warn("error parsing in-memory Kconfig line '%s': %s\n",
2344 for (i = 0; i < obj->nr_extern; i++) {
2345 ext = &obj->externs[i];
2346 if (ext->type == EXT_KCFG)
2353 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
2355 ".kconfig", obj->efile.symbols_shndx,
2360 obj->kconfig_map_idx = obj->nr_maps - 1;
2375 *res_id = t->type;
2376 t = btf__type_by_id(btf, t->type);
2391 t = skip_mods_and_typedefs(btf, t->type, res_id);
2438 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2439 const char *name = btf__name_by_offset(btf, m->name_off);
2449 arr_t = btf__type_by_id(btf, t->type);
2452 map_name, name, t->type);
2461 *res = arr_info->nelems;
2468 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2469 const char *name = btf__name_by_offset(btf, m->name_off);
2496 *res = e->val;
2511 return -EINVAL;
2513 return -ENAMETOOLONG;
2552 const char *name = btf__name_by_offset(btf, m->name_off);
2556 return -EINVAL;
2559 if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2560 return -EINVAL;
2561 map_def->parts |= MAP_DEF_MAP_TYPE;
2563 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2564 return -EINVAL;
2565 map_def->parts |= MAP_DEF_MAX_ENTRIES;
2567 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2568 return -EINVAL;
2569 map_def->parts |= MAP_DEF_MAP_FLAGS;
2571 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2572 return -EINVAL;
2573 map_def->parts |= MAP_DEF_NUMA_NODE;
2578 return -EINVAL;
2579 if (map_def->key_size && map_def->key_size != sz) {
2581 map_name, map_def->key_size, sz);
2582 return -EINVAL;
2584 map_def->key_size = sz;
2585 map_def->parts |= MAP_DEF_KEY_SIZE;
2589 t = btf__type_by_id(btf, m->type);
2592 map_name, m->type);
2593 return -EINVAL;
2598 return -EINVAL;
2600 sz = btf__resolve_size(btf, t->type);
2603 map_name, t->type, (ssize_t)sz);
2606 if (map_def->key_size && map_def->key_size != sz) {
2608 map_name, map_def->key_size, (ssize_t)sz);
2609 return -EINVAL;
2611 map_def->key_size = sz;
2612 map_def->key_type_id = t->type;
2613 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2618 return -EINVAL;
2619 if (map_def->value_size && map_def->value_size != sz) {
2621 map_name, map_def->value_size, sz);
2622 return -EINVAL;
2624 map_def->value_size = sz;
2625 map_def->parts |= MAP_DEF_VALUE_SIZE;
2629 t = btf__type_by_id(btf, m->type);
2632 map_name, m->type);
2633 return -EINVAL;
2638 return -EINVAL;
2640 sz = btf__resolve_size(btf, t->type);
2643 map_name, t->type, (ssize_t)sz);
2646 if (map_def->value_size && map_def->value_size != sz) {
2648 map_name, map_def->value_size, (ssize_t)sz);
2649 return -EINVAL;
2651 map_def->value_size = sz;
2652 map_def->value_type_id = t->type;
2653 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2656 bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2657 bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2658 const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2663 pr_warn("map '%s': multi-level inner maps not supported.\n",
2665 return -ENOTSUP;
2667 if (i != vlen - 1) {
2670 return -EINVAL;
2673 pr_warn("map '%s': should be map-in-map or prog-array.\n",
2675 return -ENOTSUP;
2677 if (map_def->value_size && map_def->value_size != 4) {
2679 map_name, map_def->value_size);
2680 return -EINVAL;
2682 map_def->value_size = 4;
2683 t = btf__type_by_id(btf, m->type);
2686 map_name, desc, m->type);
2687 return -EINVAL;
2689 if (!btf_is_array(t) || btf_array(t)->nelems) {
2690 pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2692 return -EINVAL;
2694 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2698 return -EINVAL;
2700 t = skip_mods_and_typedefs(btf, t->type, NULL);
2703 pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2705 return -EINVAL;
2710 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2712 return -EINVAL;
2720 map_def->parts |= MAP_DEF_INNER_MAP;
2726 return -EINVAL;
2729 return -EINVAL;
2733 return -EINVAL;
2735 map_def->pinning = val;
2736 map_def->parts |= MAP_DEF_PINNING;
2741 return -EINVAL;
2742 map_def->map_extra = map_extra;
2743 map_def->parts |= MAP_DEF_MAP_EXTRA;
2747 return -ENOTSUP;
2753 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2755 return -EINVAL;
2770 * a power-of-2 multiple of kernel's page size. If user diligently
2777 * user-set size to satisfy both user size request and kernel
2786 * very close to UINT_MAX but is not a power-of-2 multiple of
2794 return map->def.type == BPF_MAP_TYPE_RINGBUF ||
2795 map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
2800 map->def.type = def->map_type;
2801 map->def.key_size = def->key_size;
2802 map->def.value_size = def->value_size;
2803 map->def.max_entries = def->max_entries;
2804 map->def.map_flags = def->map_flags;
2805 map->map_extra = def->map_extra;
2807 map->numa_node = def->numa_node;
2808 map->btf_key_type_id = def->key_type_id;
2809 map->btf_value_type_id = def->value_type_id;
2811 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
2813 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2815 if (def->parts & MAP_DEF_MAP_TYPE)
2816 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2818 if (def->parts & MAP_DEF_KEY_TYPE)
2820 map->name, def->key_type_id, def->key_size);
2821 else if (def->parts & MAP_DEF_KEY_SIZE)
2822 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2824 if (def->parts & MAP_DEF_VALUE_TYPE)
2826 map->name, def->value_type_id, def->value_size);
2827 else if (def->parts & MAP_DEF_VALUE_SIZE)
2828 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2830 if (def->parts & MAP_DEF_MAX_ENTRIES)
2831 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2832 if (def->parts & MAP_DEF_MAP_FLAGS)
2833 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2834 if (def->parts & MAP_DEF_MAP_EXTRA)
2835 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2836 (unsigned long long)def->map_extra);
2837 if (def->parts & MAP_DEF_PINNING)
2838 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2839 if (def->parts & MAP_DEF_NUMA_NODE)
2840 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2842 if (def->parts & MAP_DEF_INNER_MAP)
2843 pr_debug("map '%s': found inner map definition.\n", map->name);
2871 var = btf__type_by_id(obj->btf, vi->type);
2873 map_name = btf__name_by_offset(obj->btf, var->name_off);
2877 return -EINVAL;
2879 if ((__u64)vi->offset + vi->size > data->d_size) {
2881 return -EINVAL;
2886 return -EINVAL;
2888 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2890 map_name, btf_var_linkage_str(var_extra->linkage));
2891 return -EOPNOTSUPP;
2894 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2898 return -EINVAL;
2900 if (def->size > vi->size) {
2902 return -EINVAL;
2908 map->name = strdup(map_name);
2909 if (!map->name) {
2911 return -ENOMEM;
2913 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2914 map->def.type = BPF_MAP_TYPE_UNSPEC;
2915 map->sec_idx = sec_idx;
2916 map->sec_offset = vi->offset;
2917 map->btf_var_idx = var_idx;
2919 map_name, map->sec_idx, map->sec_offset);
2921 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2930 pr_warn("map '%s': couldn't build pin path.\n", map->name);
2936 map->inner_map = calloc(1, sizeof(*map->inner_map));
2937 if (!map->inner_map)
2938 return -ENOMEM;
2939 map->inner_map->fd = create_placeholder_fd();
2940 if (map->inner_map->fd < 0)
2941 return map->inner_map->fd;
2942 map->inner_map->sec_idx = sec_idx;
2943 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2944 if (!map->inner_map->name)
2945 return -ENOMEM;
2946 sprintf(map->inner_map->name, "%s.inner", map_name);
2948 fill_map_from_def(map->inner_map, &inner_def);
2969 return -E2BIG;
2972 obj->arena_data = malloc(data_sz);
2973 if (!obj->arena_data)
2974 return -ENOMEM;
2975 memcpy(obj->arena_data, data, data_sz);
2976 obj->arena_data_sz = data_sz;
2979 map->mmaped = obj->arena_data;
2994 if (obj->efile.btf_maps_shndx < 0)
2997 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
3001 MAPS_ELF_SEC, obj->path);
3002 return -EINVAL;
3005 nr_types = btf__type_cnt(obj->btf);
3007 t = btf__type_by_id(obj->btf, i);
3010 name = btf__name_by_offset(obj->btf, t->name_off);
3013 obj->efile.btf_maps_sec_btf_id = i;
3020 return -ENOENT;
3026 obj->efile.btf_maps_shndx,
3033 for (i = 0; i < obj->nr_maps; i++) {
3034 struct bpf_map *map = &obj->maps[i];
3036 if (map->def.type != BPF_MAP_TYPE_ARENA)
3039 if (obj->arena_map_idx >= 0) {
3041 map->name, obj->maps[obj->arena_map_idx].name);
3042 return -EINVAL;
3044 obj->arena_map_idx = i;
3046 if (obj->efile.arena_data) {
3047 err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
3048 obj->efile.arena_data->d_buf,
3049 obj->efile.arena_data->d_size);
3054 if (obj->efile.arena_data && obj->arena_map_idx < 0) {
3057 return -ENOENT;
3089 return sh->sh_flags & SHF_EXECINSTR;
3131 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
3137 t->size = 1;
3146 name = (char *)btf__name_by_offset(btf, t->name_off);
3154 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
3157 m->offset = v->offset * 8;
3158 m->type = v->type;
3160 vt = (void *)btf__type_by_id(btf, v->type);
3161 m->name_off = vt->name_off;
3164 starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
3168 name = (char *)btf__name_by_offset(btf, t->name_off);
3174 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
3175 t->size = sizeof(__u32); /* kernel enforced */
3178 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
3181 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
3183 /* replace FLOAT with an equally-sized empty STRUCT;
3187 t->name_off = 0;
3188 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
3191 t->name_off = 0;
3192 t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
3195 t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
3210 t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
3212 m->type = enum64_placeholder_id;
3213 m->offset = 0;
3223 return obj->efile.btf_maps_shndx >= 0 ||
3224 obj->efile.has_st_ops ||
3225 obj->nr_extern > 0;
3230 return obj->efile.has_st_ops;
3237 int err = -ENOENT;
3240 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
3241 err = libbpf_get_error(obj->btf);
3243 obj->btf = NULL;
3247 /* enforce 8-byte pointers for BPF-targeted BTFs */
3248 btf__set_pointer_size(obj->btf, 8);
3254 if (!obj->btf) {
3259 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
3260 err = libbpf_get_error(obj->btf_ext);
3264 obj->btf_ext = NULL;
3269 ext_segs[0] = &obj->btf_ext->func_info;
3270 ext_segs[1] = &obj->btf_ext->line_info;
3271 ext_segs[2] = &obj->btf_ext->core_relo_info;
3278 if (seg->sec_cnt == 0)
3281 seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
3282 if (!seg->sec_idxs) {
3283 err = -ENOMEM;
3294 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3301 seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
3318 return a->offset - b->offset;
3325 const char *sec_name = btf__name_by_offset(btf, t->name_off);
3332 return -ENOENT;
3335 /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
3347 * to be optional. But the STV_HIDDEN handling is non-optional for any
3348 * non-extern DATASEC, so the variable fixup loop below handles both
3349 * functions at the same time, paying the cost of BTF VAR <-> ELF
3352 if (t->size == 0) {
3357 return -ENOENT;
3360 t->size = size;
3370 t_var = btf__type_by_id(btf, vsi->type);
3372 pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
3373 return -EINVAL;
3377 if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
3380 var_name = btf__name_by_offset(btf, t_var->name_off);
3384 return -ENOENT;
3391 return -ENOENT;
3395 vsi->offset = sym->st_value;
3404 if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
3405 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
3406 var->linkage = BTF_VAR_STATIC;
3418 if (!obj->btf)
3421 n = btf__type_cnt(obj->btf);
3423 struct btf_type *t = btf_type_by_id(obj->btf, i);
3431 err = btf_fixup_datasec(obj, obj->btf, t);
3442 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
3443 prog->type == BPF_PROG_TYPE_LSM)
3449 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
3466 /* CO-RE relocations need kernel BTF, only when btf_custom_path
3469 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
3473 for (i = 0; i < obj->nr_extern; i++) {
3476 ext = &obj->externs[i];
3477 if (ext->type == EXT_KSYM && ext->ksym.type_id)
3482 if (!prog->autoload)
3501 if (obj->btf_vmlinux || obj->gen_loader)
3507 obj->btf_vmlinux = btf__load_vmlinux_btf();
3508 err = libbpf_get_error(obj->btf_vmlinux);
3511 obj->btf_vmlinux = NULL;
3519 struct btf *kern_btf = obj->btf;
3523 if (!obj->btf)
3528 err = -EOPNOTSUPP;
3543 for (i = 0; i < obj->nr_programs; i++) {
3544 struct bpf_program *prog = &obj->programs[i];
3549 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3552 n = btf__type_cnt(obj->btf);
3554 t = btf_type_by_id(obj->btf, j);
3558 name = btf__str_by_offset(obj->btf, t->name_off);
3559 if (strcmp(name, prog->name) != 0)
3562 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3573 raw_data = btf__raw_data(obj->btf, &sz);
3579 /* enforce 8-byte pointers for BPF-targeted BTFs */
3580 btf__set_pointer_size(obj->btf, 8);
3586 if (obj->gen_loader) {
3591 return -ENOMEM;
3592 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3594 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3599 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3600 obj->log_level ? 1 : 0, obj->token_fd);
3605 btf__set_fd(obj->btf, btf__fd(kern_btf));
3606 btf__set_fd(kern_btf, -1);
3629 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3632 off, obj->path, elf_errmsg(-1));
3643 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3646 off, obj->path, elf_errmsg(-1));
3657 scn = elf_getscn(obj->efile.elf, idx);
3660 idx, obj->path, elf_errmsg(-1));
3669 Elf *elf = obj->efile.elf;
3695 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3714 name = elf_sec_str(obj, sh->sh_name);
3717 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3735 obj->path, elf_errmsg(-1));
3744 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3747 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3752 if (idx >= data->d_size / sizeof(Elf64_Rel))
3755 return (Elf64_Rel *)data->d_buf + idx;
3767 if (hdr->sh_type == SHT_STRTAB)
3771 if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3775 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3784 name += sizeof(".rel") - 1;
3803 if (a->sec_idx != b->sec_idx)
3804 return a->sec_idx < b->sec_idx ? -1 : 1;
3807 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3813 Elf *elf = obj->efile.elf;
3822 /* ELF section indices are 0-based, but sec #0 is special "invalid"
3827 if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
3829 obj->path, elf_errmsg(-1));
3830 return -LIBBPF_ERRNO__FORMAT;
3832 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3833 if (!obj->efile.secs)
3834 return -ENOMEM;
3843 return -LIBBPF_ERRNO__FORMAT;
3845 if (sh->sh_type == SHT_SYMTAB) {
3846 if (obj->efile.symbols) {
3847 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3848 return -LIBBPF_ERRNO__FORMAT;
3853 return -LIBBPF_ERRNO__FORMAT;
3857 obj->efile.symbols = data;
3858 obj->efile.symbols_shndx = idx;
3859 obj->efile.strtabidx = sh->sh_link;
3863 if (!obj->efile.symbols) {
3865 obj->path);
3866 return -ENOENT;
3872 sec_desc = &obj->efile.secs[idx];
3876 return -LIBBPF_ERRNO__FORMAT;
3878 name = elf_sec_str(obj, sh->sh_name);
3880 return -LIBBPF_ERRNO__FORMAT;
3887 return -LIBBPF_ERRNO__FORMAT;
3890 idx, name, (unsigned long)data->d_size,
3891 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3892 (int)sh->sh_type);
3895 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3899 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3904 return -ENOTSUP;
3906 obj->efile.btf_maps_shndx = idx;
3908 if (sh->sh_type != SHT_PROGBITS)
3909 return -LIBBPF_ERRNO__FORMAT;
3912 if (sh->sh_type != SHT_PROGBITS)
3913 return -LIBBPF_ERRNO__FORMAT;
3915 } else if (sh->sh_type == SHT_SYMTAB) {
3917 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3918 if (sh->sh_flags & SHF_EXECINSTR) {
3920 obj->efile.text_shndx = idx;
3926 sec_desc->sec_type = SEC_DATA;
3927 sec_desc->shdr = sh;
3928 sec_desc->data = data;
3931 sec_desc->sec_type = SEC_RODATA;
3932 sec_desc->shdr = sh;
3933 sec_desc->data = data;
3938 sec_desc->sec_type = SEC_ST_OPS;
3939 sec_desc->shdr = sh;
3940 sec_desc->data = data;
3941 obj->efile.has_st_ops = true;
3943 obj->efile.arena_data = data;
3944 obj->efile.arena_data_shndx = idx;
3949 } else if (sh->sh_type == SHT_REL) {
3950 int targ_sec_idx = sh->sh_info; /* points to other section */
3952 if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3953 targ_sec_idx >= obj->efile.sec_cnt)
3954 return -LIBBPF_ERRNO__FORMAT;
3969 sec_desc->sec_type = SEC_RELO;
3970 sec_desc->shdr = sh;
3971 sec_desc->data = data;
3972 } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
3974 sec_desc->sec_type = SEC_BSS;
3975 sec_desc->shdr = sh;
3976 sec_desc->data = data;
3979 (size_t)sh->sh_size);
3983 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3984 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3985 return -LIBBPF_ERRNO__FORMAT;
3992 /* sort BPF programs by section name and in-section instruction offset
3995 if (obj->nr_programs)
3996 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
4003 int bind = ELF64_ST_BIND(sym->st_info);
4005 return sym->st_shndx == SHN_UNDEF &&
4007 ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
4012 int bind = ELF64_ST_BIND(sym->st_info);
4013 int type = ELF64_ST_TYPE(sym->st_info);
4016 if (sym->st_shndx != text_shndx)
4034 return -ESRCH;
4043 tname = btf__name_by_offset(btf, t->name_off);
4048 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
4049 return -EINVAL;
4052 return -EINVAL;
4057 return -ENOENT;
4066 return -ESRCH;
4077 if (vs->type == ext_btf_id)
4082 return -ENOENT;
4092 name = btf__name_by_offset(btf, t->name_off);
4101 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
4104 if (t->size == 1)
4106 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
4111 if (t->size != 4)
4121 if (btf_array(t)->nelems == 0)
4123 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
4136 if (a->type != b->type)
4137 return a->type < b->type ? -1 : 1;
4139 if (a->type == EXT_KCFG) {
4141 if (a->kcfg.align != b->kcfg.align)
4142 return a->kcfg.align > b->kcfg.align ? -1 : 1;
4144 if (a->kcfg.sz != b->kcfg.sz)
4145 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
4149 return strcmp(a->name, b->name);
4187 vt = btf__type_by_id(btf, vs->type);
4218 if (!obj->efile.symbols)
4221 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
4223 if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
4224 return -LIBBPF_ERRNO__FORMAT;
4226 dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
4230 n = sh->sh_size / sh->sh_entsize;
4237 return -LIBBPF_ERRNO__FORMAT;
4240 ext_name = elf_sym_str(obj, sym->st_name);
4244 ext = obj->externs;
4245 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
4247 return -ENOMEM;
4248 obj->externs = ext;
4249 ext = &ext[obj->nr_extern];
4251 obj->nr_extern++;
4253 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
4254 if (ext->btf_id <= 0) {
4256 ext_name, ext->btf_id);
4257 return ext->btf_id;
4259 t = btf__type_by_id(obj->btf, ext->btf_id);
4260 ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off));
4261 if (!ext->name)
4262 return -ENOMEM;
4263 ext->sym_idx = i;
4264 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
4266 ext_essent_len = bpf_core_essential_name_len(ext->name);
4267 ext->essent_name = NULL;
4268 if (ext_essent_len != strlen(ext->name)) {
4269 ext->essent_name = strndup(ext->name, ext_essent_len);
4270 if (!ext->essent_name)
4271 return -ENOMEM;
4274 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
4275 if (ext->sec_btf_id <= 0) {
4277 ext_name, ext->btf_id, ext->sec_btf_id);
4278 return ext->sec_btf_id;
4280 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
4281 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
4286 ext->name, KCONFIG_SEC);
4287 return -ENOTSUP;
4290 ext->type = EXT_KCFG;
4291 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
4292 if (ext->kcfg.sz <= 0) {
4294 ext_name, ext->kcfg.sz);
4295 return ext->kcfg.sz;
4297 ext->kcfg.align = btf__align_of(obj->btf, t->type);
4298 if (ext->kcfg.align <= 0) {
4300 ext_name, ext->kcfg.align);
4301 return -EINVAL;
4303 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
4304 &ext->kcfg.is_signed);
4305 if (ext->kcfg.type == KCFG_UNKNOWN) {
4307 return -ENOTSUP;
4311 ext->type = EXT_KSYM;
4312 skip_mods_and_typedefs(obj->btf, t->type,
4313 &ext->ksym.type_id);
4316 return -ENOTSUP;
4319 pr_debug("collected %d externs total\n", obj->nr_extern);
4321 if (!obj->nr_extern)
4325 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
4329 * pretending that each extern is a 8-byte variable
4332 /* find existing 4-byte integer type in BTF to use for fake
4335 int int_btf_id = find_int_btf_id(obj->btf);
4337 * will be used to replace the vs->type and
4343 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
4344 for (i = 0; i < obj->nr_extern; i++) {
4345 ext = &obj->externs[i];
4346 if (ext->type != EXT_KSYM)
4349 i, ext->sym_idx, ext->name);
4358 vt = (void *)btf__type_by_id(obj->btf, vs->type);
4359 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
4364 return -ESRCH;
4371 func_proto = btf__type_by_id(obj->btf,
4372 vt->type);
4380 dummy_var->name_off;
4381 vs->type = dummy_var_btf_id;
4382 vt->info &= ~0xffff;
4383 vt->info |= BTF_FUNC_GLOBAL;
4385 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4386 vt->type = int_btf_id;
4388 vs->offset = off;
4389 vs->size = sizeof(int);
4391 sec->size = off;
4398 for (i = 0; i < obj->nr_extern; i++) {
4399 ext = &obj->externs[i];
4400 if (ext->type != EXT_KCFG)
4403 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
4404 off = ext->kcfg.data_off + ext->kcfg.sz;
4406 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
4408 sec->size = off;
4413 t = btf__type_by_id(obj->btf, vs->type);
4414 ext_name = btf__name_by_offset(obj->btf, t->name_off);
4419 return -ESRCH;
4421 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4422 vs->offset = ext->kcfg.data_off;
4430 return prog->sec_idx == obj->efile.text_shndx;
4442 if (!strcmp(prog->name, name))
4451 switch (obj->efile.secs[shndx].sec_type) {
4464 return shndx == obj->efile.btf_maps_shndx;
4470 if (shndx == obj->efile.symbols_shndx)
4473 switch (obj->efile.secs[shndx].sec_type) {
4490 purged = calloc(prog->insns_cnt, BPF_INSN_SZ);
4492 return -ENOMEM;
4497 for (i = 0; i < prog->insns_cnt; i++) {
4498 purged[i] = prog->insns[i];
4504 if (i >= prog->insns_cnt ||
4505 prog->insns[i].code != 0 ||
4506 prog->insns[i].dst_reg != 0 ||
4507 prog->insns[i].src_reg != 0 ||
4508 prog->insns[i].off != 0) {
4509 err = -EINVAL;
4512 purged[i] = prog->insns[i];
4516 libbpf_sha256(purged, prog->insns_cnt * sizeof(struct bpf_insn),
4517 prog->hash);
4528 struct bpf_insn *insn = &prog->insns[insn_idx];
4529 size_t map_idx, nr_maps = prog->obj->nr_maps;
4530 struct bpf_object *obj = prog->obj;
4531 __u32 shdr_idx = sym->st_shndx;
4538 prog->name, sym_name, insn_idx, insn->code);
4539 return -LIBBPF_ERRNO__RELOC;
4543 int sym_idx = ELF64_R_SYM(rel->r_info);
4544 int i, n = obj->nr_extern;
4548 ext = &obj->externs[i];
4549 if (ext->sym_idx == sym_idx)
4554 prog->name, sym_name, sym_idx);
4555 return -LIBBPF_ERRNO__RELOC;
4558 prog->name, i, ext->name, ext->sym_idx, insn_idx);
4559 if (insn->code == (BPF_JMP | BPF_CALL))
4560 reloc_desc->type = RELO_EXTERN_CALL;
4562 reloc_desc->type = RELO_EXTERN_LD64;
4563 reloc_desc->insn_idx = insn_idx;
4564 reloc_desc->ext_idx = i;
4568 /* sub-program call relocation */
4570 if (insn->src_reg != BPF_PSEUDO_CALL) {
4571 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
4572 return -LIBBPF_ERRNO__RELOC;
4575 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4578 prog->name, sym_name, sym_sec_name);
4579 return -LIBBPF_ERRNO__RELOC;
4581 if (sym->st_value % BPF_INSN_SZ) {
4583 prog->name, sym_name, (size_t)sym->st_value);
4584 return -LIBBPF_ERRNO__RELOC;
4586 reloc_desc->type = RELO_CALL;
4587 reloc_desc->insn_idx = insn_idx;
4588 reloc_desc->sym_off = sym->st_value;
4594 prog->name, sym_name, shdr_idx);
4595 return -LIBBPF_ERRNO__RELOC;
4599 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4600 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
4601 * local_func: sym->st_value = 0, insn->imm = offset in the section.
4603 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
4605 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
4606 return -LIBBPF_ERRNO__RELOC;
4609 reloc_desc->type = RELO_SUBPROG_ADDR;
4610 reloc_desc->insn_idx = insn_idx;
4611 reloc_desc->sym_off = sym->st_value;
4619 if (shdr_idx == obj->efile.arena_data_shndx) {
4620 if (obj->arena_map_idx < 0) {
4622 prog->name, insn_idx);
4623 return -LIBBPF_ERRNO__RELOC;
4625 reloc_desc->type = RELO_DATA;
4626 reloc_desc->insn_idx = insn_idx;
4627 reloc_desc->map_idx = obj->arena_map_idx;
4628 reloc_desc->sym_off = sym->st_value;
4630 map = &obj->maps[obj->arena_map_idx];
4632 prog->name, obj->arena_map_idx, map->name, map->sec_idx,
4633 map->sec_offset, insn_idx);
4641 prog->name, sym_name, sym_sec_name);
4642 return -LIBBPF_ERRNO__RELOC;
4645 map = &obj->maps[map_idx];
4646 if (map->libbpf_type != type ||
4647 map->sec_idx != sym->st_shndx ||
4648 map->sec_offset != sym->st_value)
4651 prog->name, map_idx, map->name, map->sec_idx,
4652 map->sec_offset, insn_idx);
4657 prog->name, sym_sec_name, (size_t)sym->st_value);
4658 return -LIBBPF_ERRNO__RELOC;
4660 reloc_desc->type = RELO_LD64;
4661 reloc_desc->insn_idx = insn_idx;
4662 reloc_desc->map_idx = map_idx;
4663 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
4670 prog->name, sym_sec_name);
4671 return -LIBBPF_ERRNO__RELOC;
4674 map = &obj->maps[map_idx];
4675 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4678 prog->name, map_idx, map->name, map->sec_idx,
4679 map->sec_offset, insn_idx);
4684 prog->name, sym_sec_name);
4685 return -LIBBPF_ERRNO__RELOC;
4688 reloc_desc->type = RELO_DATA;
4689 reloc_desc->insn_idx = insn_idx;
4690 reloc_desc->map_idx = map_idx;
4691 reloc_desc->sym_off = sym->st_value;
4697 return insn_idx >= prog->sec_insn_off &&
4698 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4704 int l = 0, r = obj->nr_programs - 1, m;
4707 if (!obj->nr_programs)
4711 m = l + (r - l + 1) / 2;
4712 prog = &obj->programs[m];
4714 if (prog->sec_idx < sec_idx ||
4715 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4718 r = m - 1;
4723 prog = &obj->programs[l];
4724 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4733 size_t sec_idx = shdr->sh_info, sym_idx;
4744 if (sec_idx >= obj->efile.sec_cnt)
4745 return -EINVAL;
4750 return -LIBBPF_ERRNO__FORMAT;
4752 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4755 return -EINVAL;
4759 nrels = shdr->sh_size / shdr->sh_entsize;
4765 return -LIBBPF_ERRNO__FORMAT;
4768 sym_idx = ELF64_R_SYM(rel->r_info);
4773 return -LIBBPF_ERRNO__FORMAT;
4776 if (sym->st_shndx >= obj->efile.sec_cnt) {
4778 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4779 return -LIBBPF_ERRNO__FORMAT;
4782 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4784 relo_sec_name, (size_t)rel->r_offset, i);
4785 return -LIBBPF_ERRNO__FORMAT;
4788 insn_idx = rel->r_offset / BPF_INSN_SZ;
4795 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4796 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4798 sym_name = elf_sym_str(obj, sym->st_name);
4811 relos = libbpf_reallocarray(prog->reloc_desc,
4812 prog->nr_reloc + 1, sizeof(*relos));
4814 return -ENOMEM;
4815 prog->reloc_desc = relos;
4818 insn_idx -= prog->sec_insn_off;
4819 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4824 prog->nr_reloc++;
4833 if (!obj->btf)
4834 return -ENOENT;
4836 /* if it's BTF-defined map, we don't need to search for type IDs.
4840 if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
4848 return -ENOENT;
4850 id = btf__find_by_name(obj->btf, map->real_name);
4854 map->btf_key_type_id = 0;
4855 map->btf_value_type_id = id;
4871 err = -errno;
4879 info->type = val;
4881 info->key_size = val;
4883 info->value_size = val;
4885 info->max_entries = val;
4887 info->map_flags = val;
4897 return map->obj->state >= OBJ_PREPARED || map->reused;
4902 return map->autocreate;
4908 return libbpf_err(-EBUSY);
4910 map->autocreate = autocreate;
4917 return libbpf_err(-EINVAL);
4919 map->autoattach = autoattach;
4925 return map->autoattach;
4943 if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
4944 new_name = strdup(map->name);
4949 return libbpf_err(-errno);
4958 err = -errno;
4962 err = reuse_fd(map->fd, new_fd);
4966 free(map->name);
4968 map->name = new_name;
4969 map->def.type = info.type;
4970 map->def.key_size = info.key_size;
4971 map->def.value_size = info.value_size;
4972 map->def.max_entries = info.max_entries;
4973 map->def.map_flags = info.map_flags;
4974 map->btf_key_type_id = info.btf_key_type_id;
4975 map->btf_value_type_id = info.btf_value_type_id;
4976 map->reused = true;
4977 map->map_extra = info.map_extra;
4988 return map->def.max_entries;
4993 if (!bpf_map_type__is_map_in_map(map->def.type))
4996 return map->inner_map;
5002 return libbpf_err(-EBUSY);
5004 map->def.max_entries = max_entries;
5006 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
5008 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
5016 int bpffs_fd = -1, token_fd, err;
5021 if (obj->token_path && obj->token_path[0] == '\0') {
5022 pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
5026 mandatory = obj->token_path != NULL;
5029 bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
5032 err = -errno;
5034 obj->name, errstr(err), bpffs_path,
5042 if (!mandatory && token_fd == -ENOENT) {
5044 obj->name, bpffs_path);
5048 obj->name, token_fd, bpffs_path,
5053 obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
5054 if (!obj->feat_cache) {
5056 return -ENOMEM;
5059 obj->token_fd = token_fd;
5060 obj->feat_cache->token_fd = token_fd;
5074 .token_fd = obj->token_fd,
5075 .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0,
5078 if (obj->gen_loader)
5094 return -ret;
5103 if (obj->gen_loader)
5109 if (obj->token_fd)
5110 return feat_supported(obj->feat_cache, feat_id);
5138 if (map->def.type == BPF_MAP_TYPE_DEVMAP || map->def.type == BPF_MAP_TYPE_DEVMAP_HASH)
5141 return (map_info.type == map->def.type &&
5142 map_info.key_size == map->def.key_size &&
5143 map_info.value_size == map->def.value_size &&
5144 map_info.max_entries == map->def.max_entries &&
5145 map_info.map_flags == map->def.map_flags &&
5146 map_info.map_extra == map->map_extra);
5154 pin_fd = bpf_obj_get(map->pin_path);
5156 err = -errno;
5157 if (err == -ENOENT) {
5159 map->pin_path);
5164 map->pin_path, errstr(err));
5170 map->pin_path);
5172 return -EINVAL;
5180 map->pinned = true;
5181 pr_debug("reused pinned map at '%s'\n", map->pin_path);
5189 enum libbpf_map_type map_type = map->libbpf_type;
5193 if (obj->gen_loader) {
5194 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
5195 map->mmaped, map->def.value_size);
5197 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
5201 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
5203 err = -errno;
5209 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
5211 err = bpf_map_freeze(map->fd);
5213 err = -errno;
5214 pr_warn("map '%s': failed to freeze as read-only: %s\n",
5220 /* Remap anonymous mmap()-ed "map initialization image" as
5221 * a BPF map-backed mmap()-ed memory, but preserving the same
5230 if (map->def.map_flags & BPF_F_MMAPABLE) {
5234 if (map->def.map_flags & BPF_F_RDONLY_PROG)
5238 mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
5240 err = -errno;
5241 pr_warn("map '%s': failed to re-mmap() contents: %s\n",
5245 map->mmaped = mmaped;
5246 } else if (map->mmaped) {
5247 munmap(map->mmaped, mmap_sz);
5248 map->mmaped = NULL;
5259 struct bpf_map_def *def = &map->def;
5264 map_name = map->name;
5265 create_attr.map_ifindex = map->map_ifindex;
5266 create_attr.map_flags = def->map_flags;
5267 create_attr.numa_node = map->numa_node;
5268 create_attr.map_extra = map->map_extra;
5269 create_attr.token_fd = obj->token_fd;
5270 if (obj->token_fd)
5272 if (map->excl_prog) {
5273 err = bpf_prog_compute_hash(map->excl_prog);
5277 create_attr.excl_prog_hash = map->excl_prog->hash;
5282 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
5283 if (map->mod_btf_fd >= 0) {
5284 create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
5289 if (obj->btf && btf__fd(obj->btf) >= 0) {
5290 create_attr.btf_fd = btf__fd(obj->btf);
5291 create_attr.btf_key_type_id = map->btf_key_type_id;
5292 create_attr.btf_value_type_id = map->btf_value_type_id;
5295 if (bpf_map_type__is_map_in_map(def->type)) {
5296 if (map->inner_map) {
5297 err = map_set_def_max_entries(map->inner_map);
5300 err = bpf_object__create_map(obj, map->inner_map, true);
5303 map->name, errstr(err));
5306 map->inner_map_fd = map->inner_map->fd;
5308 if (map->inner_map_fd >= 0)
5309 create_attr.inner_map_fd = map->inner_map_fd;
5312 switch (def->type) {
5330 map->btf_key_type_id = 0;
5331 map->btf_value_type_id = 0;
5340 if (obj->gen_loader) {
5341 bpf_gen__map_create(obj->gen_loader, def->type, map_name,
5342 def->key_size, def->value_size, def->max_entries,
5343 &create_attr, is_inner ? -1 : map - obj->maps);
5348 * will be reset to -1 eventually.
5350 map_fd = map->fd;
5352 map_fd = bpf_map_create(def->type, map_name,
5353 def->key_size, def->value_size,
5354 def->max_entries, &create_attr);
5357 err = -errno;
5359 map->name, errstr(err));
5363 map->btf_key_type_id = 0;
5364 map->btf_value_type_id = 0;
5365 map_fd = bpf_map_create(def->type, map_name,
5366 def->key_size, def->value_size,
5367 def->max_entries, &create_attr);
5370 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
5371 if (obj->gen_loader)
5372 map->inner_map->fd = -1;
5373 bpf_map__destroy(map->inner_map);
5374 zfree(&map->inner_map);
5380 /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
5381 if (map->fd == map_fd)
5387 * map->fd stays valid but now point to what map_fd points to.
5389 return reuse_fd(map->fd, map_fd);
5398 for (i = 0; i < map->init_slots_sz; i++) {
5399 if (!map->init_slots[i])
5402 targ_map = map->init_slots[i];
5403 fd = targ_map->fd;
5405 if (obj->gen_loader) {
5406 bpf_gen__populate_outer_map(obj->gen_loader,
5407 map - obj->maps, i,
5408 targ_map - obj->maps);
5410 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5413 err = -errno;
5415 map->name, i, targ_map->name, fd, errstr(err));
5419 map->name, i, targ_map->name, fd);
5422 zfree(&map->init_slots);
5423 map->init_slots_sz = 0;
5434 if (obj->gen_loader)
5435 return -ENOTSUP;
5437 for (i = 0; i < map->init_slots_sz; i++) {
5438 if (!map->init_slots[i])
5441 targ_prog = map->init_slots[i];
5444 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5446 err = -errno;
5448 map->name, i, targ_prog->name, fd, errstr(err));
5452 map->name, i, targ_prog->name, fd);
5455 zfree(&map->init_slots);
5456 map->init_slots_sz = 0;
5466 for (i = 0; i < obj->nr_maps; i++) {
5467 map = &obj->maps[i];
5469 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5481 if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5487 map->name, nr_cpus);
5490 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5491 map->def.max_entries = nr_cpus;
5505 for (i = 0; i < obj->nr_maps; i++) {
5506 map = &obj->maps[i];
5510 * loading, if we detect that at least one of the to-be-loaded
5515 * but also it allows to have CO-RE applications that use
5517 * If those global variable-using programs are not loaded at
5523 map->autocreate = false;
5525 if (!map->autocreate) {
5526 pr_debug("map '%s': skipped auto-creating...\n", map->name);
5536 if (map->pin_path) {
5540 map->name);
5543 if (retried && map->fd < 0) {
5545 map->name);
5546 err = -ENOENT;
5551 if (map->reused) {
5553 map->name, map->fd);
5560 map->name, map->fd);
5566 } else if (map->def.type == BPF_MAP_TYPE_ARENA) {
5567 map->mmaped = mmap((void *)(long)map->map_extra,
5569 map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
5570 map->fd, 0);
5571 if (map->mmaped == MAP_FAILED) {
5572 err = -errno;
5573 map->mmaped = NULL;
5575 map->name, errstr(err));
5578 if (obj->arena_data) {
5579 memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz);
5580 zfree(&obj->arena_data);
5583 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5590 if (map->pin_path && !map->pinned) {
5593 if (!retried && err == -EEXIST) {
5597 pr_warn("map '%s': failed to auto-pin at '%s': %s\n",
5598 map->name, map->pin_path, errstr(err));
5607 pr_warn("map '%s': failed to create: %s\n", map->name, errstr(err));
5610 zclose(obj->maps[j].fd);
5624 * underscore is ignored by BPF CO-RE relocation during relocation matching.
5631 for (i = n - 5; i >= 0; i--) {
5643 free(cands->cands);
5660 local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5661 local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5669 targ_name = btf__name_by_offset(targ_btf, t->name_off);
5680 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5681 local_cand->id, btf_kind_str(local_t),
5684 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5685 sizeof(*cands->cands));
5687 return -ENOMEM;
5689 cand = &new_cands[cands->len];
5690 cand->btf = targ_btf;
5691 cand->id = i;
5693 cands->cands = new_cands;
5694 cands->len++;
5708 if (obj->btf_modules_loaded)
5711 if (obj->gen_loader)
5715 obj->btf_modules_loaded = true;
5730 err = -errno;
5739 err = -errno;
5751 err = -errno;
5756 /* ignore non-module BTFs */
5762 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5770 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5771 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5775 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5777 mod_btf->btf = btf;
5778 mod_btf->id = id;
5779 mod_btf->fd = fd;
5780 mod_btf->name = strdup(name);
5781 if (!mod_btf->name) {
5782 err = -ENOMEM;
5810 return ERR_PTR(-EINVAL);
5812 local_name = btf__name_by_offset(local_btf, local_t->name_off);
5814 return ERR_PTR(-EINVAL);
5819 return ERR_PTR(-ENOMEM);
5822 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5828 if (cands->len)
5832 if (obj->btf_vmlinux_override)
5840 for (i = 0; i < obj->btf_module_cnt; i++) {
5842 obj->btf_modules[i].btf,
5843 obj->btf_modules[i].name,
5844 btf__type_cnt(obj->btf_vmlinux),
5857 * type-based CO-RE relocations and follow slightly different rules than
5858 * field-based relocations. This function assumes that root types were already
5859 * checked for name match. Beyond that initial root-level name check, names
5861 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5864 * - for ENUMs, the size is ignored;
5865 * - for INT, size and signedness are ignored;
5866 * - for ARRAY, dimensionality is ignored, element types are checked for
5868 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
5869 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5870 * - FUNC_PROTOs are compatible if they have compatible signature: same
5873 * more experience with using BPF CO-RE relocations.
5902 relos = libbpf_reallocarray(prog->reloc_desc,
5903 prog->nr_reloc + 1, sizeof(*relos));
5905 return -ENOMEM;
5906 relo = &relos[prog->nr_reloc];
5907 relo->type = RELO_CORE;
5908 relo->insn_idx = insn_idx;
5909 relo->core_relo = core_relo;
5910 prog->reloc_desc = relos;
5911 prog->nr_reloc++;
5920 for (i = 0; i < prog->nr_reloc; i++) {
5921 relo = &prog->reloc_desc[i];
5922 if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
5925 return relo->core_relo;
5940 const char *prog_name = prog->name;
5943 __u32 local_id = relo->type_id;
5948 return -EINVAL;
5950 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5952 return -EINVAL;
5954 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
5956 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5988 if (obj->btf_ext->core_relo_info.len == 0)
5992 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
5993 err = libbpf_get_error(obj->btf_vmlinux_override);
6006 seg = &obj->btf_ext->core_relo_info;
6009 sec_idx = seg->sec_idxs[sec_num];
6012 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6014 err = -EINVAL;
6018 pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
6021 if (rec->insn_off % BPF_INSN_SZ)
6022 return -EINVAL;
6023 insn_idx = rec->insn_off / BPF_INSN_SZ;
6030 * This is similar to what x86-64 linker does for relocations.
6034 pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
6038 /* no need to apply CO-RE relocation if the program is
6041 if (!prog->autoload)
6045 * program's frame of reference; (sub-)program code is not yet
6046 * relocated, so it's enough to just subtract in-section offset
6048 insn_idx = insn_idx - prog->sec_insn_off;
6049 if (insn_idx >= prog->insns_cnt)
6050 return -EINVAL;
6051 insn = &prog->insns[insn_idx];
6056 prog->name, i, errstr(err));
6060 if (prog->obj->gen_loader)
6063 err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
6066 prog->name, i, errstr(err));
6070 err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
6073 prog->name, i, insn_idx, errstr(err));
6080 /* obj->btf_vmlinux and module BTFs are freed after object load */
6081 btf__free(obj->btf_vmlinux_override);
6082 obj->btf_vmlinux_override = NULL;
6086 bpf_core_free_cands(entry->pvalue);
6104 prog->name, relo_idx, insn_idx, map_idx, map->name);
6108 insn->code = BPF_JMP | BPF_CALL;
6109 insn->dst_reg = 0;
6110 insn->src_reg = 0;
6111 insn->off = 0;
6115 * where lower 123 is map index into obj->maps[] array
6117 insn->imm = POISON_LDIMM64_MAP_BASE + map_idx;
6132 prog->name, relo_idx, insn_idx, ext->name);
6135 insn->code = BPF_JMP | BPF_CALL;
6136 insn->dst_reg = 0;
6137 insn->src_reg = 0;
6138 insn->off = 0;
6142 * where lower 123 is extern index into obj->externs[] array
6144 insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
6148 * - map references;
6149 * - global variable references;
6150 * - extern references.
6157 for (i = 0; i < prog->nr_reloc; i++) {
6158 struct reloc_desc *relo = &prog->reloc_desc[i];
6159 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6163 switch (relo->type) {
6165 map = &obj->maps[relo->map_idx];
6166 if (obj->gen_loader) {
6168 insn[0].imm = relo->map_idx;
6169 } else if (map->autocreate) {
6171 insn[0].imm = map->fd;
6173 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
6174 relo->map_idx, map);
6178 map = &obj->maps[relo->map_idx];
6179 insn[1].imm = insn[0].imm + relo->sym_off;
6180 if (obj->gen_loader) {
6182 insn[0].imm = relo->map_idx;
6183 } else if (map->autocreate) {
6185 insn[0].imm = map->fd;
6187 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
6188 relo->map_idx, map);
6192 ext = &obj->externs[relo->ext_idx];
6193 if (ext->type == EXT_KCFG) {
6194 if (obj->gen_loader) {
6196 insn[0].imm = obj->kconfig_map_idx;
6199 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6201 insn[1].imm = ext->kcfg.data_off;
6203 if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
6205 insn[0].imm = ext->ksym.kernel_btf_id;
6206 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
6208 insn[0].imm = (__u32)ext->ksym.addr;
6209 insn[1].imm = ext->ksym.addr >> 32;
6214 ext = &obj->externs[relo->ext_idx];
6216 if (ext->is_set) {
6217 insn[0].imm = ext->ksym.kernel_btf_id;
6218 insn[0].off = ext->ksym.btf_fd_idx;
6220 poison_kfunc_call(prog, i, relo->insn_idx, insn,
6221 relo->ext_idx, ext);
6227 prog->name, i);
6228 return -EINVAL;
6240 prog->name, i, relo->type);
6241 return -EINVAL;
6262 sec_idx = ext_info->sec_idxs[sec_num];
6264 if (prog->sec_idx != sec_idx)
6270 if (insn_off < prog->sec_insn_off)
6272 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6277 copy_end = rec + ext_info->rec_size;
6281 return -ENOENT;
6283 /* append func/line info of a given (sub-)program to the main
6286 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6287 new_sz = old_sz + (copy_end - copy_start);
6290 return -ENOMEM;
6292 *prog_rec_cnt = new_sz / ext_info->rec_size;
6293 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6295 /* Kernel instruction offsets are in units of 8-byte
6301 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6304 for (; rec < rec_end; rec += ext_info->rec_size) {
6309 *prog_rec_sz = ext_info->rec_size;
6313 return -ENOENT;
6326 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
6332 if (main_prog != prog && !main_prog->func_info)
6335 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6336 &main_prog->func_info,
6337 &main_prog->func_info_cnt,
6338 &main_prog->func_info_rec_size);
6340 if (err != -ENOENT) {
6342 prog->name, errstr(err));
6345 if (main_prog->func_info) {
6350 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6355 prog->name);
6360 if (main_prog != prog && !main_prog->line_info)
6363 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6364 &main_prog->line_info,
6365 &main_prog->line_info_cnt,
6366 &main_prog->line_info_rec_size);
6368 if (err != -ENOENT) {
6370 prog->name, errstr(err));
6373 if (main_prog->line_info) {
6378 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6383 prog->name);
6393 if (insn_idx == relo->insn_idx)
6395 return insn_idx < relo->insn_idx ? -1 : 1;
6400 if (!prog->nr_reloc)
6402 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6403 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6408 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6414 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
6420 return -ENOMEM;
6421 if (subprog->nr_reloc)
6422 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6423 sizeof(*relos) * subprog->nr_reloc);
6425 for (i = main_prog->nr_reloc; i < new_cnt; i++)
6426 relos[i].insn_idx += subprog->sub_insn_off;
6430 main_prog->reloc_desc = relos;
6431 main_prog->nr_reloc = new_cnt;
6443 subprog->sub_insn_off = main_prog->insns_cnt;
6445 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6446 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6448 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6449 return -ENOMEM;
6451 main_prog->insns = insns;
6452 main_prog->insns_cnt = new_cnt;
6454 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6455 subprog->insns_cnt * sizeof(*insns));
6457 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6458 main_prog->name, subprog->insns_cnt, subprog->name);
6481 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6482 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6487 if (relo && relo->type == RELO_EXTERN_CALL)
6492 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6494 prog->name, insn_idx, relo->type);
6495 return -LIBBPF_ERRNO__RELOC;
6498 /* sub-program instruction index is a combination of
6501 * call always has imm = -1, but for static functions
6502 * relocation is against STT_SECTION and insn->imm
6505 * for subprog addr relocation, the relo->sym_off + insn->imm is
6508 if (relo->type == RELO_CALL)
6509 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6511 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6518 prog->name, insn_idx);
6519 return -LIBBPF_ERRNO__RELOC;
6524 * offset necessary, insns->imm is relative to
6527 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6530 /* we enforce that sub-programs should be in .text section */
6531 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6533 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6534 prog->name);
6535 return -LIBBPF_ERRNO__RELOC;
6541 * - append it at the end of main program's instructions blog;
6542 * - process is recursively, while current program is put on hold;
6543 * - if that subprogram calls some other not yet processes
6548 if (subprog->sub_insn_off == 0) {
6557 /* main_prog->insns memory could have been re-allocated, so
6560 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6567 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6570 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6577 * Relocate sub-program calls.
6579 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6580 * main prog) is processed separately. For each subprog (non-entry functions,
6589 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6605 * subprog->sub_insn_off as zero at all times and won't be appended to current
6614 * +--------+ +-------+
6616 * +--+---+ +--+-+-+ +---+--+
6618 * +--+---+ +------+ +---+--+
6621 * +---+-------+ +------+----+
6623 * +-----------+ +-----------+
6628 * +-----------+------+
6630 * +-----------+------+
6635 * +-----------+------+------+
6637 * +-----------+------+------+
6646 * +-----------+------+
6648 * +-----------+------+
6651 * +-----------+------+------+
6653 * +-----------+------+------+
6666 for (i = 0; i < obj->nr_programs; i++) {
6667 subprog = &obj->programs[i];
6671 subprog->sub_insn_off = 0;
6688 for (i = 0; i < obj->nr_programs; i++) {
6689 prog = &obj->programs[i];
6690 zfree(&prog->reloc_desc);
6691 prog->nr_reloc = 0;
6700 if (a->insn_idx != b->insn_idx)
6701 return a->insn_idx < b->insn_idx ? -1 : 1;
6704 if (a->type != b->type)
6705 return a->type < b->type ? -1 : 1;
6714 for (i = 0; i < obj->nr_programs; i++) {
6715 struct bpf_program *p = &obj->programs[i];
6717 if (!p->nr_reloc)
6720 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6730 if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG))
6733 n = btf__type_cnt(obj->btf);
6738 t = btf_type_by_id(obj->btf, i);
6739 if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
6742 name = btf__str_by_offset(obj->btf, t->name_off);
6746 t = btf_type_by_id(obj->btf, t->type);
6749 prog->name);
6750 return -EINVAL;
6752 if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0)
6758 if (prog->exception_cb_idx >= 0) {
6759 prog->exception_cb_idx = -1;
6766 prog->name);
6767 return -EINVAL;
6770 for (j = 0; j < obj->nr_programs; j++) {
6771 struct bpf_program *subprog = &obj->programs[j];
6775 if (strcmp(name, subprog->name) != 0)
6777 /* Enforce non-hidden, as from verifier point of
6781 if (!subprog->sym_global || subprog->mark_btf_static) {
6782 pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
6783 prog->name, subprog->name);
6784 return -EINVAL;
6787 if (prog->exception_cb_idx >= 0) {
6789 prog->name, subprog->name);
6790 return -EINVAL;
6792 prog->exception_cb_idx = j;
6796 if (prog->exception_cb_idx >= 0)
6799 pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
6800 return -ENOENT;
6838 /* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef,
6840 * with this approach we don't need any extra arch-specific #ifdef guards
6863 t = btf__type_by_id(btf, t->type);
6865 (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) {
6866 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6872 t = skip_mods_and_typedefs(btf, t->type, NULL);
6879 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6884 switch (prog->type) {
6904 if (btf_is_int(t) && t->size == 8)
6913 prog->name, subprog_name, arg_idx, ctx_name);
6924 /* caller already validated FUNC -> FUNC_PROTO validity */
6926 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6937 fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */
6939 orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */
6940 ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */
6946 return -EINVAL;
6954 name_off = p->name_off;
6956 err = btf__add_func_param(btf, "", p->type);
6962 p->name_off = name_off; /* use remembered str offset */
6965 /* clone FUNC now, btf__add_func() enforces non-empty name, so use
6969 fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id);
6971 return -EINVAL;
6974 fn_t->name_off = fn_name_off; /* reuse original string */
6991 struct btf *btf = obj->btf;
6999 if (!obj->btf_ext || !prog->func_info)
7010 if (global_ctx_map[i].prog_type != prog->type)
7019 orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids));
7021 return -ENOMEM;
7022 for (i = 0; i < prog->func_info_cnt; i++) {
7023 func_rec = prog->func_info + prog->func_info_rec_size * i;
7024 orig_ids[i] = func_rec->type_id;
7029 * clone and adjust FUNC -> FUNC_PROTO combo
7036 if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0)
7040 orig_fn_id = t->type;
7045 /* sanity check FUNC -> FUNC_PROTO chain, just in case */
7046 fn_proto_t = btf_type_by_id(btf, fn_t->type);
7052 for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) {
7053 if (orig_ids[rec_idx] == t->type) {
7054 func_rec = prog->func_info + prog->func_info_rec_size * rec_idx;
7064 arg_idx = btf_decl_tag(t)->component_idx;
7070 fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>";
7071 if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name))
7075 if (func_rec->type_id == orig_fn_id) {
7085 func_rec->type_id = fn_id;
7088 /* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument;
7090 * funcs share the same program type, so need only PTR ->
7097 err = -EINVAL;
7103 tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx);
7105 err = -EINVAL;
7110 fn_t = btf_type_by_id(btf, func_rec->type_id);
7111 fn_proto_t = btf_type_by_id(btf, fn_t->type);
7115 p->type = ptr_id;
7131 if (obj->btf_ext) {
7134 pr_warn("failed to perform CO-RE relocations: %s\n",
7141 /* Before relocating calls pre-process relocations and mark
7148 for (i = 0; i < obj->nr_programs; i++) {
7149 prog = &obj->programs[i];
7150 for (j = 0; j < prog->nr_reloc; j++) {
7151 struct reloc_desc *relo = &prog->reloc_desc[j];
7152 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
7155 if (relo->type == RELO_SUBPROG_ADDR)
7167 for (i = 0; i < obj->nr_programs; i++) {
7168 prog = &obj->programs[i];
7169 /* sub-program's sub-calls are relocated within the context of
7174 if (!prog->autoload)
7180 prog->name, errstr(err));
7188 if (prog->exception_cb_idx >= 0) {
7189 struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
7196 if (subprog->sub_insn_off == 0) {
7206 for (i = 0; i < obj->nr_programs; i++) {
7207 prog = &obj->programs[i];
7210 if (!prog->autoload)
7217 prog->name, errstr(err));
7225 prog->name, errstr(err));
7253 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
7254 return -EINVAL;
7255 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
7257 return -EINVAL;
7259 nrels = shdr->sh_size / shdr->sh_entsize;
7264 return -LIBBPF_ERRNO__FORMAT;
7267 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
7270 i, (size_t)ELF64_R_SYM(rel->r_info));
7271 return -LIBBPF_ERRNO__FORMAT;
7273 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
7275 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
7276 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
7277 (size_t)rel->r_offset, sym->st_name, name);
7279 for (j = 0; j < obj->nr_maps; j++) {
7280 map = &obj->maps[j];
7281 if (map->sec_idx != obj->efile.btf_maps_shndx)
7284 vi = btf_var_secinfos(sec) + map->btf_var_idx;
7285 if (vi->offset <= rel->r_offset &&
7286 rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
7289 if (j == obj->nr_maps) {
7290 pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
7291 i, name, (size_t)rel->r_offset);
7292 return -EINVAL;
7295 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
7296 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
7299 if (sym->st_shndx != obj->efile.btf_maps_shndx) {
7300 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
7302 return -LIBBPF_ERRNO__RELOC;
7304 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
7305 map->def.key_size != sizeof(int)) {
7306 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
7307 i, map->name, sizeof(int));
7308 return -EINVAL;
7314 return -ESRCH;
7321 return -ESRCH;
7323 if (targ_prog->sec_idx != sym->st_shndx ||
7324 targ_prog->sec_insn_off * 8 != sym->st_value ||
7326 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
7328 return -LIBBPF_ERRNO__RELOC;
7331 return -EINVAL;
7334 var = btf__type_by_id(obj->btf, vi->type);
7335 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
7337 return -EINVAL;
7338 member = btf_members(def) + btf_vlen(def) - 1;
7339 mname = btf__name_by_offset(obj->btf, member->name_off);
7341 return -EINVAL;
7343 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
7344 if (rel->r_offset - vi->offset < moff)
7345 return -EINVAL;
7347 moff = rel->r_offset - vi->offset - moff;
7352 return -EINVAL;
7354 if (moff >= map->init_slots_sz) {
7356 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
7358 return -ENOMEM;
7359 map->init_slots = tmp;
7360 memset(map->init_slots + map->init_slots_sz, 0,
7361 (new_sz - map->init_slots_sz) * host_ptr_sz);
7362 map->init_slots_sz = new_sz;
7364 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
7367 i, map->name, moff, type, name);
7377 for (i = 0; i < obj->efile.sec_cnt; i++) {
7378 struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
7383 if (sec_desc->sec_type != SEC_RELO)
7386 shdr = sec_desc->shdr;
7387 data = sec_desc->data;
7388 idx = shdr->sh_info;
7390 if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
7392 return -LIBBPF_ERRNO__INTERNAL;
7395 if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
7397 else if (idx == obj->efile.btf_maps_shndx)
7411 if (BPF_CLASS(insn->code) == BPF_JMP &&
7412 BPF_OP(insn->code) == BPF_CALL &&
7413 BPF_SRC(insn->code) == BPF_K &&
7414 insn->src_reg == 0 &&
7415 insn->dst_reg == 0) {
7416 *func_id = insn->imm;
7424 struct bpf_insn *insn = prog->insns;
7428 if (obj->gen_loader)
7431 for (i = 0; i < prog->insns_cnt; i++, insn++) {
7443 insn->imm = BPF_FUNC_probe_read;
7448 insn->imm = BPF_FUNC_probe_read_str;
7460 /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
7467 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
7468 opts->expected_attach_type = 0;
7471 opts->prog_flags |= BPF_F_SLEEPABLE;
7473 if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
7474 opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
7477 if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) {
7482 prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
7483 opts->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
7486 if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
7490 attach_name = strchr(prog->sec_name, '/');
7501 pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
7502 prog->name);
7503 return -EINVAL;
7512 prog->attach_btf_obj_fd = btf_obj_fd;
7513 prog->attach_btf_id = btf_type_id;
7516 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
7520 opts->attach_btf_obj_fd = btf_obj_fd;
7521 opts->attach_btf_id = btf_type_id;
7537 __u32 log_level = prog->log_level;
7543 switch (prog->type) {
7550 prog->name, prog->sec_name);
7551 return -EINVAL;
7553 if (prog->attach_btf_id == 0) {
7555 prog->name);
7556 return -EINVAL;
7564 return -EINVAL;
7567 prog_name = prog->name;
7568 load_attr.attach_prog_fd = prog->attach_prog_fd;
7569 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
7570 load_attr.attach_btf_id = prog->attach_btf_id;
7572 load_attr.prog_ifindex = prog->prog_ifindex;
7573 load_attr.expected_attach_type = prog->expected_attach_type;
7576 if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
7577 load_attr.prog_btf_fd = btf__fd(obj->btf);
7578 load_attr.func_info = prog->func_info;
7579 load_attr.func_info_rec_size = prog->func_info_rec_size;
7580 load_attr.func_info_cnt = prog->func_info_cnt;
7581 load_attr.line_info = prog->line_info;
7582 load_attr.line_info_rec_size = prog->line_info_rec_size;
7583 load_attr.line_info_cnt = prog->line_info_cnt;
7586 load_attr.prog_flags = prog->prog_flags;
7587 load_attr.fd_array = obj->fd_array;
7589 load_attr.token_fd = obj->token_fd;
7590 if (obj->token_fd)
7594 if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
7595 err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
7598 prog->name, errstr(err));
7601 insns = prog->insns;
7602 insns_cnt = prog->insns_cnt;
7605 if (obj->gen_loader) {
7606 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
7608 prog - obj->programs);
7609 *prog_fd = -1;
7620 if (prog->log_buf) {
7621 log_buf = prog->log_buf;
7622 log_buf_size = prog->log_size;
7624 } else if (obj->log_buf) {
7625 log_buf = obj->log_buf;
7626 log_buf_size = obj->log_size;
7632 ret = -ENOMEM;
7645 ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
7648 pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7649 prog->name, log_buf);
7652 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
7656 for (i = 0; i < obj->nr_maps; i++) {
7657 map = &prog->obj->maps[i];
7658 if (map->libbpf_type != LIBBPF_MAP_RODATA)
7661 if (bpf_prog_bind_map(ret, map->fd, NULL)) {
7663 prog->name, map->real_name, errstr(errno));
7683 * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
7688 ret = -errno;
7690 /* post-process verifier log to improve error descriptions */
7693 pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, errstr(errno));
7697 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7698 prog->name, log_buf);
7714 p = cur - 1;
7715 while (p - 1 >= buf && *(p - 1) != '\n')
7716 p--;
7724 /* size of the remaining log content to the right from the to-be-replaced part */
7725 size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
7730 * shift log contents by (patch_sz - orig_sz) bytes to the right
7731 * starting from after to-be-replaced part of the log.
7734 * shift log contents by (orig_sz - patch_sz) bytes to the left
7735 * starting from after to-be-replaced part of the log
7744 patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
7746 } else if (patch_sz - orig_sz > buf_sz - log_sz) {
7748 rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
7762 /* Expected log for failed and not properly guarded CO-RE relocation:
7763 * line1 -> 123: (85) call unknown#195896080
7764 * line2 -> invalid func unknown#195896080
7765 * line3 -> <anything else or end of buffer>
7768 * instruction index to find corresponding CO-RE relocation and
7770 * failed CO-RE relocation.
7784 err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
7790 "%d: <invalid CO-RE relocation>\n"
7791 "failed to resolve CO-RE relocation %s%s\n",
7794 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7802 * line1 -> 123: (85) call unknown#2001000345
7803 * line2 -> invalid func unknown#2001000345
7804 * line3 -> <anything else or end of buffer>
7807 * "345" in "2001000345" is a map index in obj->maps to fetch map name.
7809 struct bpf_object *obj = prog->obj;
7817 map_idx -= POISON_LDIMM64_MAP_BASE;
7818 if (map_idx < 0 || map_idx >= obj->nr_maps)
7820 map = &obj->maps[map_idx];
7825 insn_idx, map->name);
7827 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7835 * line1 -> 123: (85) call unknown#2002000345
7836 * line2 -> invalid func unknown#2002000345
7837 * line3 -> <anything else or end of buffer>
7840 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name.
7842 struct bpf_object *obj = prog->obj;
7850 ext_idx -= POISON_CALL_KFUNC_BASE;
7851 if (ext_idx < 0 || ext_idx >= obj->nr_extern)
7853 ext = &obj->externs[ext_idx];
7858 insn_idx, ext->name);
7860 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7875 next_line = buf + log_sz - 1;
7887 /* failed CO-RE relocation case */
7915 struct bpf_object *obj = prog->obj;
7918 for (i = 0; i < prog->nr_reloc; i++) {
7919 struct reloc_desc *relo = &prog->reloc_desc[i];
7920 struct extern_desc *ext = &obj->externs[relo->ext_idx];
7923 switch (relo->type) {
7925 if (ext->type != EXT_KSYM)
7927 kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
7929 bpf_gen__record_extern(obj->gen_loader, ext->name,
7930 ext->is_weak, !ext->ksym.type_id,
7931 true, kind, relo->insn_idx);
7934 bpf_gen__record_extern(obj->gen_loader, ext->name,
7935 ext->is_weak, false, false, BTF_KIND_FUNC,
7936 relo->insn_idx);
7940 .insn_off = relo->insn_idx * 8,
7941 .type_id = relo->core_relo->type_id,
7942 .access_str_off = relo->core_relo->access_str_off,
7943 .kind = relo->core_relo->kind,
7946 bpf_gen__record_relo_core(obj->gen_loader, &cr);
7963 for (i = 0; i < obj->nr_programs; i++) {
7964 prog = &obj->programs[i];
7967 if (!prog->autoload) {
7968 pr_debug("prog '%s': skipped loading\n", prog->name);
7971 prog->log_level |= log_level;
7973 if (obj->gen_loader)
7976 err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
7977 obj->license, obj->kern_version, &prog->fd);
7979 pr_warn("prog '%s': failed to load: %s\n", prog->name, errstr(err));
7994 for (i = 0; i < obj->nr_programs; i++) {
7995 prog = &obj->programs[i];
8011 prog->sec_def = find_sec_def(prog->sec_name);
8012 if (!prog->sec_def) {
8015 prog->name, prog->sec_name);
8019 prog->type = prog->sec_def->prog_type;
8020 prog->expected_attach_type = prog->sec_def->expected_attach_type;
8025 if (prog->sec_def->prog_setup_fn) {
8026 err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
8029 prog->name, errstr(err));
8050 return ERR_PTR(-EINVAL);
8055 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
8059 return ERR_PTR(-EINVAL);
8073 return ERR_PTR(-EINVAL);
8075 return ERR_PTR(-EINVAL);
8085 return ERR_PTR(-ENAMETOOLONG);
8091 obj->log_buf = log_buf;
8092 obj->log_size = log_size;
8093 obj->log_level = log_level;
8096 obj->token_path = strdup(token_path);
8097 if (!obj->token_path) {
8098 err = -ENOMEM;
8106 err = -ENAMETOOLONG;
8109 obj->btf_custom_path = strdup(btf_tmp_path);
8110 if (!obj->btf_custom_path) {
8111 err = -ENOMEM;
8118 obj->kconfig = strdup(kconfig);
8119 if (!obj->kconfig) {
8120 err = -ENOMEM;
8147 return libbpf_err_ptr(-EINVAL);
8164 return libbpf_err_ptr(-EINVAL);
8167 snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz);
8177 return libbpf_err(-EINVAL);
8179 for (i = 0; i < obj->nr_maps; i++) {
8180 zclose(obj->maps[i].fd);
8181 if (obj->maps[i].st_ops)
8182 zfree(&obj->maps[i].st_ops->kern_vdata);
8185 for (i = 0; i < obj->nr_programs; i++)
8186 bpf_program__unload(&obj->programs[i]);
8199 m->def.map_flags &= ~BPF_F_MMAPABLE;
8217 err = -errno;
8229 err = -EINVAL;
8252 ext = find_extern_by_name_with_len(obj, sym_name, res - sym_name);
8255 if (!ext || ext->type != EXT_KSYM)
8258 t = btf__type_by_id(obj->btf, ext->btf_id);
8262 if (ext->is_set && ext->ksym.addr != sym_addr) {
8264 sym_name, ext->ksym.addr, sym_addr);
8265 return -EINVAL;
8267 if (!ext->is_set) {
8268 ext->is_set = true;
8269 ext->ksym.addr = sym_addr;
8288 btf = obj->btf_vmlinux;
8292 if (id == -ENOENT) {
8297 for (i = 0; i < obj->btf_module_cnt; i++) {
8299 mod_btf = &obj->btf_modules[i];
8300 btf = mod_btf->btf;
8302 if (id != -ENOENT)
8307 return -ESRCH;
8324 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
8326 if (id == -ESRCH && ext->is_weak)
8329 ext->name);
8334 local_type_id = ext->ksym.type_id;
8338 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
8339 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
8341 err = bpf_core_types_are_compat(obj->btf, local_type_id,
8347 local_type = btf__type_by_id(obj->btf, local_type_id);
8348 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
8349 targ_name = btf__name_by_offset(btf, targ_type->name_off);
8352 ext->name, local_type_id,
8355 return -EINVAL;
8358 ext->is_set = true;
8359 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8360 ext->ksym.kernel_btf_id = id;
8362 ext->name, id, btf_kind_str(targ_var), targ_var_name);
8376 local_func_proto_id = ext->ksym.type_id;
8378 kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf,
8381 if (kfunc_id == -ESRCH && ext->is_weak)
8384 ext->name);
8389 kfunc_proto_id = kern_func->type;
8391 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
8394 if (ext->is_weak)
8398 ext->name, local_func_proto_id,
8399 mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id);
8400 return -EINVAL;
8404 if (mod_btf && !mod_btf->fd_array_idx) {
8405 /* insn->off is s16 */
8406 if (obj->fd_array_cnt == INT16_MAX) {
8408 ext->name, mod_btf->fd_array_idx);
8409 return -E2BIG;
8412 if (!obj->fd_array_cnt)
8413 obj->fd_array_cnt = 1;
8415 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
8416 obj->fd_array_cnt + 1);
8419 mod_btf->fd_array_idx = obj->fd_array_cnt;
8421 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
8424 ext->is_set = true;
8425 ext->ksym.kernel_btf_id = kfunc_id;
8426 ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
8429 * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
8430 * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
8432 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8434 ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id);
8445 for (i = 0; i < obj->nr_extern; i++) {
8446 ext = &obj->externs[i];
8447 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
8450 if (obj->gen_loader) {
8451 ext->is_set = true;
8452 ext->ksym.kernel_btf_obj_fd = 0;
8453 ext->ksym.kernel_btf_id = 0;
8456 t = btf__type_by_id(obj->btf, ext->btf_id);
8476 if (obj->nr_extern == 0)
8479 if (obj->kconfig_map_idx >= 0)
8480 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
8482 for (i = 0; i < obj->nr_extern; i++) {
8483 ext = &obj->externs[i];
8485 if (ext->type == EXT_KSYM) {
8486 if (ext->ksym.type_id)
8491 } else if (ext->type == EXT_KCFG) {
8492 void *ext_ptr = kcfg_data + ext->kcfg.data_off;
8496 if (str_has_pfx(ext->name, "CONFIG_")) {
8502 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
8505 pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
8506 return -EINVAL;
8508 } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
8510 } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
8512 } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
8520 pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
8521 return -EINVAL;
8528 ext->name, (long long)value);
8530 pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
8531 return -EINVAL;
8537 return -EINVAL;
8539 for (i = 0; i < obj->nr_extern; i++) {
8540 ext = &obj->externs[i];
8541 if (ext->type == EXT_KCFG && !ext->is_set) {
8550 return -EINVAL;
8555 return -EINVAL;
8560 return -EINVAL;
8562 for (i = 0; i < obj->nr_extern; i++) {
8563 ext = &obj->externs[i];
8565 if (!ext->is_set && !ext->is_weak) {
8566 pr_warn("extern '%s' (strong): not resolved\n", ext->name);
8567 return -ESRCH;
8568 } else if (!ext->is_set) {
8570 ext->name);
8583 st_ops = map->st_ops;
8584 type = btf__type_by_id(map->obj->btf, st_ops->type_id);
8586 struct bpf_program *prog = st_ops->progs[i];
8594 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
8604 for (i = 0; i < obj->nr_maps; i++) {
8605 map = &obj->maps[i];
8610 if (!map->autocreate)
8623 /* unpin any maps that were auto-pinned during load */
8624 for (i = 0; i < obj->nr_maps; i++)
8625 if (obj->maps[i].pinned && !obj->maps[i].reused)
8626 bpf_map__unpin(&obj->maps[i], NULL);
8634 zfree(&obj->fd_array);
8637 for (i = 0; i < obj->btf_module_cnt; i++) {
8638 close(obj->btf_modules[i].fd);
8639 btf__free(obj->btf_modules[i].btf);
8640 free(obj->btf_modules[i].name);
8642 obj->btf_module_cnt = 0;
8643 zfree(&obj->btf_modules);
8646 btf__free(obj->btf_vmlinux);
8647 obj->btf_vmlinux = NULL;
8654 if (obj->state >= OBJ_PREPARED) {
8655 pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name);
8656 return -EINVAL;
8662 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
8666 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
8674 obj->state = OBJ_LOADED;
8678 obj->state = OBJ_PREPARED;
8687 return libbpf_err(-EINVAL);
8689 if (obj->state >= OBJ_LOADED) {
8690 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
8691 return libbpf_err(-EINVAL);
8694 /* Disallow kernel loading programs of non-native endianness but
8695 * permit cross-endian creation of "light skeleton".
8697 if (obj->gen_loader) {
8698 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
8700 pr_warn("object '%s': loading non-native endianness is unsupported\n", obj->name);
8701 return libbpf_err(-LIBBPF_ERRNO__ENDIAN);
8704 if (obj->state < OBJ_PREPARED) {
8713 if (obj->gen_loader) {
8715 if (obj->btf)
8716 btf__set_fd(obj->btf, -1);
8718 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
8722 obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */
8727 pr_warn("failed to load object '%s'\n", obj->path);
8751 return -ENOMEM;
8755 err = -errno;
8771 return -EINVAL;
8775 return -ENOMEM;
8780 err = -errno;
8786 err = -EINVAL;
8796 if (prog->fd < 0) {
8797 pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
8798 return libbpf_err(-EINVAL);
8809 if (bpf_obj_pin(prog->fd, path)) {
8810 err = -errno;
8811 pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, errstr(err));
8815 pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
8823 if (prog->fd < 0) {
8824 pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
8825 return libbpf_err(-EINVAL);
8834 return libbpf_err(-errno);
8836 pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
8846 return libbpf_err(-EINVAL);
8849 if (map->fd < 0) {
8850 pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name);
8851 return libbpf_err(-EINVAL);
8854 if (map->pin_path) {
8855 if (path && strcmp(path, map->pin_path)) {
8857 bpf_map__name(map), map->pin_path, path);
8858 return libbpf_err(-EINVAL);
8859 } else if (map->pinned) {
8860 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
8861 bpf_map__name(map), map->pin_path);
8868 return libbpf_err(-EINVAL);
8869 } else if (map->pinned) {
8871 return libbpf_err(-EEXIST);
8874 map->pin_path = strdup(path);
8875 if (!map->pin_path) {
8876 err = -errno;
8881 err = make_parent_dir(map->pin_path);
8885 err = check_path(map->pin_path);
8889 if (bpf_obj_pin(map->fd, map->pin_path)) {
8890 err = -errno;
8894 map->pinned = true;
8895 pr_debug("pinned map '%s'\n", map->pin_path);
8910 return libbpf_err(-EINVAL);
8913 if (map->pin_path) {
8914 if (path && strcmp(path, map->pin_path)) {
8916 bpf_map__name(map), map->pin_path, path);
8917 return libbpf_err(-EINVAL);
8919 path = map->pin_path;
8923 return libbpf_err(-EINVAL);
8932 return libbpf_err(-errno);
8934 map->pinned = false;
8947 return libbpf_err(-errno);
8950 free(map->pin_path);
8951 map->pin_path = new;
8960 return map->pin_path;
8965 return map->pinned;
8984 return libbpf_err(-ENOENT);
8986 if (obj->state < OBJ_PREPARED) {
8988 return libbpf_err(-ENOENT);
8995 if (!map->autocreate)
9004 } else if (!map->pin_path) {
9017 if (!map->pin_path)
9032 return libbpf_err(-ENOENT);
9044 } else if (!map->pin_path) {
9063 return libbpf_err(-ENOENT);
9065 if (obj->state < OBJ_LOADED) {
9067 return libbpf_err(-ENOENT);
9071 err = pathname_concat(buf, sizeof(buf), path, prog->name);
9084 if (pathname_concat(buf, sizeof(buf), path, prog->name))
9099 return libbpf_err(-ENOENT);
9104 err = pathname_concat(buf, sizeof(buf), path, prog->name);
9150 if (map->inner_map) {
9151 bpf_map__destroy(map->inner_map);
9152 zfree(&map->inner_map);
9155 zfree(&map->init_slots);
9156 map->init_slots_sz = 0;
9158 if (map->mmaped && map->mmaped != map->obj->arena_data)
9159 munmap(map->mmaped, bpf_map_mmap_sz(map));
9160 map->mmaped = NULL;
9162 if (map->st_ops) {
9163 zfree(&map->st_ops->data);
9164 zfree(&map->st_ops->progs);
9165 zfree(&map->st_ops->kern_func_off);
9166 zfree(&map->st_ops);
9169 zfree(&map->name);
9170 zfree(&map->real_name);
9171 zfree(&map->pin_path);
9173 if (map->fd >= 0)
9174 zclose(map->fd);
9191 usdt_manager_free(obj->usdt_man);
9192 obj->usdt_man = NULL;
9194 bpf_gen__free(obj->gen_loader);
9197 btf__free(obj->btf);
9198 btf__free(obj->btf_vmlinux);
9199 btf_ext__free(obj->btf_ext);
9201 for (i = 0; i < obj->nr_maps; i++)
9202 bpf_map__destroy(&obj->maps[i]);
9204 zfree(&obj->btf_custom_path);
9205 zfree(&obj->kconfig);
9207 for (i = 0; i < obj->nr_extern; i++) {
9208 zfree(&obj->externs[i].name);
9209 zfree(&obj->externs[i].essent_name);
9212 zfree(&obj->externs);
9213 obj->nr_extern = 0;
9215 zfree(&obj->maps);
9216 obj->nr_maps = 0;
9218 if (obj->programs && obj->nr_programs) {
9219 for (i = 0; i < obj->nr_programs; i++)
9220 bpf_program__exit(&obj->programs[i]);
9222 zfree(&obj->programs);
9224 zfree(&obj->feat_cache);
9225 zfree(&obj->token_path);
9226 if (obj->token_fd > 0)
9227 close(obj->token_fd);
9229 zfree(&obj->arena_data);
9236 return obj ? obj->name : libbpf_err_ptr(-EINVAL);
9241 return obj ? obj->kern_version : 0;
9246 return obj->token_fd ?: -1;
9251 return obj ? obj->btf : NULL;
9256 return obj->btf ? btf__fd(obj->btf) : -1;
9261 if (obj->state >= OBJ_LOADED)
9262 return libbpf_err(-EINVAL);
9264 obj->kern_version = kern_version;
9274 return libbpf_err(-EFAULT);
9276 return libbpf_err(-EINVAL);
9279 return libbpf_err(-ENOMEM);
9280 gen->opts = opts;
9281 gen->swapped_endian = !is_native_endianness(obj);
9282 obj->gen_loader = gen;
9290 size_t nr_programs = obj->nr_programs;
9298 return forward ? &obj->programs[0] :
9299 &obj->programs[nr_programs - 1];
9301 if (p->obj != obj) {
9306 idx = (p - obj->programs) + (forward ? 1 : -1);
9307 if (idx >= obj->nr_programs || idx < 0)
9309 return &obj->programs[idx];
9338 prog->prog_ifindex = ifindex;
9343 return prog->name;
9348 return prog->sec_name;
9353 return prog->autoload;
9358 if (prog->obj->state >= OBJ_LOADED)
9359 return libbpf_err(-EINVAL);
9361 prog->autoload = autoload;
9367 return prog->autoattach;
9372 prog->autoattach = autoattach;
9377 return prog->insns;
9382 return prog->insns_cnt;
9390 if (prog->obj->state >= OBJ_LOADED)
9391 return libbpf_err(-EBUSY);
9393 insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
9396 pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
9397 return libbpf_err(-ENOMEM);
9401 prog->insns = insns;
9402 prog->insns_cnt = new_insn_cnt;
9409 return libbpf_err(-EINVAL);
9411 if (prog->fd < 0)
9412 return libbpf_err(-ENOENT);
9414 return prog->fd;
9422 return prog->type;
9433 if (prog->obj->state >= OBJ_LOADED)
9434 return libbpf_err(-EBUSY);
9437 if (prog->type == type)
9440 prog->type = type;
9444 * fallback handler, which by definition is program type-agnostic and
9445 * is a catch-all custom handler, optionally set by the application,
9448 if (prog->sec_def != &custom_fallback_def)
9449 prog->sec_def = NULL;
9458 return prog->expected_attach_type;
9464 if (prog->obj->state >= OBJ_LOADED)
9465 return libbpf_err(-EBUSY);
9467 prog->expected_attach_type = type;
9473 return prog->prog_flags;
9478 if (prog->obj->state >= OBJ_LOADED)
9479 return libbpf_err(-EBUSY);
9481 prog->prog_flags = flags;
9487 return prog->log_level;
9492 if (prog->obj->state >= OBJ_LOADED)
9493 return libbpf_err(-EBUSY);
9495 prog->log_level = log_level;
9501 *log_size = prog->log_size;
9502 return prog->log_buf;
9508 return libbpf_err(-EINVAL);
9509 if (prog->log_size > UINT_MAX)
9510 return libbpf_err(-EINVAL);
9511 if (prog->obj->state >= OBJ_LOADED)
9512 return libbpf_err(-EBUSY);
9514 prog->log_buf = log_buf;
9515 prog->log_size = log_size;
9521 if (prog->func_info_rec_size != sizeof(struct bpf_func_info))
9522 return libbpf_err_ptr(-EOPNOTSUPP);
9523 return prog->func_info;
9528 return prog->func_info_cnt;
9533 if (prog->line_info_rec_size != sizeof(struct bpf_line_info))
9534 return libbpf_err_ptr(-EOPNOTSUPP);
9535 return prog->line_info;
9540 return prog->line_info_cnt;
9679 return libbpf_err(-EINVAL);
9682 return libbpf_err(-E2BIG);
9688 return libbpf_err(-ENOMEM);
9694 return libbpf_err(-EBUSY);
9699 sec_def->sec = sec ? strdup(sec) : NULL;
9700 if (sec && !sec_def->sec)
9701 return libbpf_err(-ENOMEM);
9703 sec_def->prog_type = prog_type;
9704 sec_def->expected_attach_type = exp_attach_type;
9705 sec_def->cookie = OPTS_GET(opts, cookie, 0);
9707 sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
9708 sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
9709 sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
9711 sec_def->handler_id = ++last_custom_sec_def_handler_id;
9718 return sec_def->handler_id;
9727 return libbpf_err(-EINVAL);
9741 return libbpf_err(-ENOENT);
9745 custom_sec_defs[i - 1] = custom_sec_defs[i];
9746 custom_sec_def_cnt--;
9762 size_t len = strlen(sec_def->sec);
9765 if (sec_def->sec[len - 1] == '/') {
9766 if (str_has_pfx(sec_name, sec_def->sec))
9772 * well-formed SEC("type/extras") with proper '/' separator
9774 if (sec_def->sec[len - 1] == '+') {
9775 len--;
9777 if (strncmp(sec_name, sec_def->sec, len) != 0)
9785 return strcmp(sec_name, sec_def->sec) == 0;
9830 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9833 if (!(sec_def->cookie & SEC_ATTACHABLE))
9855 return libbpf_err(-EINVAL);
9859 *prog_type = sec_def->prog_type;
9860 *expected_attach_type = sec_def->expected_attach_type;
9871 return libbpf_err(-ESRCH);
9913 for (i = 0; i < obj->nr_maps; i++) {
9914 map = &obj->maps[i];
9917 if (map->sec_idx == sec_idx &&
9918 map->sec_offset <= offset &&
9919 offset - map->sec_offset < map->def.value_size)
9926 /* Collect the reloc from ELF, populate the st_ops->progs[], and update
9927 * st_ops->data for shadow type.
9946 btf = obj->btf;
9947 nrels = shdr->sh_size / shdr->sh_entsize;
9952 return -LIBBPF_ERRNO__FORMAT;
9955 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
9958 (size_t)ELF64_R_SYM(rel->r_info));
9959 return -LIBBPF_ERRNO__FORMAT;
9962 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
9963 map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
9965 pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
9966 (size_t)rel->r_offset);
9967 return -EINVAL;
9970 moff = rel->r_offset - map->sec_offset;
9971 shdr_idx = sym->st_shndx;
9972 st_ops = map->st_ops;
9973 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
9974 map->name,
9975 (long long)(rel->r_info >> 32),
9976 (long long)sym->st_value,
9977 shdr_idx, (size_t)rel->r_offset,
9978 map->sec_offset, sym->st_name, name);
9981 pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
9982 map->name, (size_t)rel->r_offset, shdr_idx);
9983 return -LIBBPF_ERRNO__RELOC;
9985 if (sym->st_value % BPF_INSN_SZ) {
9987 map->name, (unsigned long long)sym->st_value);
9988 return -LIBBPF_ERRNO__FORMAT;
9990 insn_idx = sym->st_value / BPF_INSN_SZ;
9992 type = btf__type_by_id(btf, st_ops->type_id);
9996 map->name, moff);
9997 return -EINVAL;
9999 member_idx = member - btf_members(type);
10000 name = btf__name_by_offset(btf, member->name_off);
10002 if (!resolve_func_ptr(btf, member->type, NULL)) {
10004 map->name, name);
10005 return -EINVAL;
10011 map->name, shdr_idx, name);
10012 return -EINVAL;
10016 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
10018 map->name, prog->name);
10019 return -EINVAL;
10022 st_ops->progs[member_idx] = prog;
10024 /* st_ops->data will be exposed to users, being returned by
10030 *((struct bpf_program **)(st_ops->data + moff)) = prog;
10077 return -ENAMETOOLONG;
10127 err = -EINVAL;
10158 mod_len = fn_name - mod_name;
10163 ret = find_attach_btf_id(obj->btf_vmlinux,
10171 if (ret != -ENOENT)
10179 for (i = 0; i < obj->btf_module_cnt; i++) {
10180 const struct module_btf *mod = &obj->btf_modules[i];
10182 if (mod_name && strncmp(mod->name, mod_name, mod_len) != 0)
10185 ret = find_attach_btf_id(mod->btf,
10189 *btf_obj_fd = mod->fd;
10193 if (ret == -ENOENT)
10199 return -ESRCH;
10205 enum bpf_attach_type attach_type = prog->expected_attach_type;
10206 __u32 attach_prog_fd = prog->attach_prog_fd;
10210 if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
10212 pr_warn("prog '%s': attach program FD is not set\n", prog->name);
10213 return -EINVAL;
10215 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd, prog->obj->token_fd);
10218 prog->name, attach_prog_fd, attach_name, errstr(err));
10227 if (prog->obj->gen_loader) {
10228 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
10232 err = find_kernel_btf_id(prog->obj, attach_name,
10238 prog->name, attach_name, errstr(err));
10251 return libbpf_err(-EINVAL);
10262 return libbpf_err(-EINVAL);
10265 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
10266 return libbpf_err(-EINVAL);
10267 if (!(sec_def->cookie & SEC_ATTACHABLE))
10268 return libbpf_err(-EINVAL);
10270 *attach_type = sec_def->expected_attach_type;
10277 return libbpf_err(-EINVAL);
10279 return -1;
10280 return map->fd;
10286 * their user-visible name differs from kernel-visible name. Users see
10291 if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
10293 if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
10304 return map->real_name;
10306 return map->name;
10311 return map->def.type;
10317 return libbpf_err(-EBUSY);
10318 map->def.type = type;
10324 return map->def.map_flags;
10330 return libbpf_err(-EBUSY);
10331 map->def.map_flags = flags;
10337 return map->map_extra;
10343 return libbpf_err(-EBUSY);
10344 map->map_extra = map_extra;
10350 return map->numa_node;
10356 return libbpf_err(-EBUSY);
10357 map->numa_node = numa_node;
10363 return map->def.key_size;
10369 return libbpf_err(-EBUSY);
10370 map->def.key_size = size;
10376 return map->def.value_size;
10390 btf = bpf_object__btf(map->obj);
10392 return -ENOENT;
10399 return -EINVAL;
10407 return -EINVAL;
10411 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10412 var_type = btf_type_by_id(btf, var->type);
10413 array_type = skip_mods_and_typedefs(btf, var_type->type, NULL);
10417 return -EINVAL;
10422 element_sz = btf__resolve_size(btf, array->type);
10423 if (element_sz <= 0 || (size - var->offset) % element_sz != 0) {
10426 return -EINVAL;
10430 nr_elements = (size - var->offset) / element_sz;
10431 new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements);
10438 datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
10439 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10440 var_type = btf_type_by_id(btf, var->type);
10443 datasec_type->size = size;
10444 var->size = size - var->offset;
10445 var_type->type = new_array_id;
10453 return libbpf_err(-EBUSY);
10455 if (map->mmaped) {
10459 if (map->def.type != BPF_MAP_TYPE_ARRAY)
10460 return libbpf_err(-EOPNOTSUPP);
10463 mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
10466 pr_warn("map '%s': failed to resize memory-mapped region: %s\n",
10471 if (err && err != -ENOENT) {
10474 map->btf_value_type_id = 0;
10475 map->btf_key_type_id = 0;
10479 map->def.value_size = size;
10485 return map ? map->btf_key_type_id : 0;
10490 return map ? map->btf_value_type_id : 0;
10499 return libbpf_err(-EBUSY);
10501 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
10502 return libbpf_err(-EINVAL);
10504 if (map->def.type == BPF_MAP_TYPE_ARENA)
10505 actual_sz = map->obj->arena_data_sz;
10507 actual_sz = map->def.value_size;
10509 return libbpf_err(-EINVAL);
10511 memcpy(map->mmaped, data, size);
10519 *psize = map->def.value_size;
10520 return map->st_ops->data;
10523 if (!map->mmaped)
10526 if (map->def.type == BPF_MAP_TYPE_ARENA)
10527 *psize = map->obj->arena_data_sz;
10529 *psize = map->def.value_size;
10531 return map->mmaped;
10536 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
10541 return map->map_ifindex;
10547 return libbpf_err(-EBUSY);
10548 map->map_ifindex = ifindex;
10554 if (!bpf_map_type__is_map_in_map(map->def.type)) {
10556 return libbpf_err(-EINVAL);
10558 if (map->inner_map_fd != -1) {
10560 return libbpf_err(-EINVAL);
10562 if (map->inner_map) {
10563 bpf_map__destroy(map->inner_map);
10564 zfree(&map->inner_map);
10566 map->inner_map_fd = fd;
10574 return libbpf_err(-EINVAL);
10577 if (map->obj != prog->obj) {
10579 return libbpf_err(-EINVAL);
10582 map->excl_prog = prog;
10588 return map->excl_prog;
10597 if (!obj || !obj->maps)
10600 s = obj->maps;
10601 e = obj->maps + obj->nr_maps;
10609 idx = (m - obj->maps) + i;
10610 if (idx >= obj->nr_maps || idx < 0)
10612 return &obj->maps[idx];
10619 return obj->maps;
10628 if (!obj->nr_maps)
10630 return obj->maps + obj->nr_maps - 1;
10633 return __bpf_map__iter(next, obj, -1);
10647 if (pos->real_name && strcmp(pos->real_name, name) == 0)
10653 if (strcmp(pos->real_name, name) == 0)
10657 if (strcmp(pos->name, name) == 0)
10673 return -ENOENT;
10675 if (map->def.key_size != key_sz) {
10677 map->name, key_sz, map->def.key_size);
10678 return -EINVAL;
10681 if (map->fd < 0) {
10682 pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
10683 return -EINVAL;
10689 switch (map->def.type) {
10695 size_t elem_sz = roundup(map->def.value_size, 8);
10698 pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
10699 map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
10700 return -EINVAL;
10705 if (map->def.value_size != value_sz) {
10707 map->name, value_sz, map->def.value_size);
10708 return -EINVAL;
10725 return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
10738 return bpf_map_update_elem(map->fd, key, value, flags);
10750 return bpf_map_delete_elem_flags(map->fd, key, flags);
10763 return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
10775 return bpf_map_get_next_key(map->fd, cur_key, next_key);
10784 errno = -PTR_ERR(ptr);
10791 return -errno;
10802 prog->name);
10803 return libbpf_err(-EINVAL);
10810 /* Release "ownership" of underlying BPF resource (typically, BPF program
10822 link->disconnected = true;
10832 if (!link->disconnected && link->detach)
10833 err = link->detach(link);
10834 if (link->pin_path)
10835 free(link->pin_path);
10836 if (link->dealloc)
10837 link->dealloc(link);
10846 return link->fd;
10851 return link->pin_path;
10856 return libbpf_err_errno(close(link->fd));
10866 fd = -errno;
10874 return libbpf_err_ptr(-ENOMEM);
10876 link->detach = &bpf_link__detach_fd;
10877 link->fd = fd;
10879 link->pin_path = strdup(path);
10880 if (!link->pin_path) {
10882 return libbpf_err_ptr(-ENOMEM);
10890 return bpf_link_detach(link->fd) ? -errno : 0;
10897 if (link->pin_path)
10898 return libbpf_err(-EBUSY);
10906 link->pin_path = strdup(path);
10907 if (!link->pin_path)
10908 return libbpf_err(-ENOMEM);
10910 if (bpf_obj_pin(link->fd, link->pin_path)) {
10911 err = -errno;
10912 zfree(&link->pin_path);
10916 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
10924 if (!link->pin_path)
10925 return libbpf_err(-EINVAL);
10927 err = unlink(link->pin_path);
10929 return -errno;
10931 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
10932 zfree(&link->pin_path);
10953 if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
10954 err = -errno;
10956 if (perf_link->perf_event_fd != link->fd)
10957 close(perf_link->perf_event_fd);
10958 close(link->fd);
10961 if (perf_link->legacy_probe_name) {
10962 if (perf_link->legacy_is_kprobe) {
10963 err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
10964 perf_link->legacy_is_retprobe);
10966 err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
10967 perf_link->legacy_is_retprobe);
10978 free(perf_link->legacy_probe_name);
10986 int prog_fd, link_fd = -1, err;
10990 return libbpf_err_ptr(-EINVAL);
10994 prog->name, pfd);
10995 return libbpf_err_ptr(-EINVAL);
11000 prog->name);
11001 return libbpf_err_ptr(-EINVAL);
11006 return libbpf_err_ptr(-ENOMEM);
11007 link->link.detach = &bpf_link_perf_detach;
11008 link->link.dealloc = &bpf_link_perf_dealloc;
11009 link->perf_event_fd = pfd;
11012 if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
11018 err = -errno;
11020 prog->name, pfd, errstr(err));
11023 link->link.fd = link_fd;
11026 pr_warn("prog '%s': user context value is not supported\n", prog->name);
11027 err = -EOPNOTSUPP;
11032 err = -errno;
11034 prog->name, pfd, errstr(err));
11035 if (err == -EPROTO)
11037 prog->name, pfd);
11040 link->link.fd = pfd;
11045 err = -errno;
11047 prog->name, pfd, errstr(err));
11052 return &link->link;
11066 * this function is expected to parse integer in the range of [0, 2^31-1] from
11077 err = -errno;
11083 err = err == EOF ? -EIO : -errno;
11131 return -EINVAL;
11163 pid < 0 ? -1 : pid /* pid */,
11164 pid == -1 ? 0 : -1 /* cpu */,
11165 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11166 return pfd >= 0 ? pfd : -errno;
11180 return -EINVAL;
11184 return -errno;
11187 err = -errno;
11198 static int has_debugfs = -1;
11260 return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
11303 pid < 0 ? -1 : pid, /* pid */
11304 pid == -1 ? 0 : -1, /* cpu */
11305 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11307 err = -errno;
11333 return "arm64";
11392 return libbpf_err_ptr(-EINVAL);
11407 return libbpf_err_ptr(-ENOTSUP);
11411 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11412 return libbpf_err_ptr(-ENOTSUP);
11417 return libbpf_err_ptr(-EINVAL);
11423 -1 /* pid */, 0 /* ref_ctr_off */);
11432 return libbpf_err_ptr(-ENOMEM);
11435 offset, -1 /* pid */);
11438 err = -errno;
11440 prog->name, retprobe ? "kretprobe" : "kprobe",
11450 prog->name, retprobe ? "kretprobe" : "kprobe",
11458 perf_link->legacy_probe_name = legacy_probe;
11459 perf_link->legacy_is_kprobe = true;
11460 perf_link->legacy_is_retprobe = retprobe;
11492 return libbpf_err_ptr(-EINVAL);
11494 if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
11561 struct kprobe_multi_resolve *res = data->res;
11564 if (!glob_match(sym_name, res->pattern))
11567 if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) {
11572 * make multi-kprobe usability a bit better: if no match is
11586 snprintf(sym_trim, sizeof(sym_trim), "%.*s", (int)(sym_sfx - sym_name), sym_name);
11587 if (!bsearch(&psym_trim, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
11591 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1);
11595 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11611 err = -errno;
11625 err = -EINVAL;
11629 if (!glob_match(sym_name, res->pattern))
11638 err = -errno;
11647 err = -ENOENT;
11659 if (res->cnt == 0)
11660 err = -ENOENT;
11673 return access(tracefs_available_filter_functions_addrs(), R_OK) != -1;
11686 err = -errno;
11699 err = -EINVAL;
11703 if (!glob_match(sym_name, res->pattern))
11706 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap,
11707 sizeof(*res->addrs), res->cnt + 1);
11711 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11714 if (res->cnt == 0)
11715 err = -ENOENT;
11741 return libbpf_err_ptr(-EINVAL);
11746 prog->name);
11747 return libbpf_err_ptr(-EINVAL);
11757 return libbpf_err_ptr(-EINVAL);
11759 return libbpf_err_ptr(-EINVAL);
11761 return libbpf_err_ptr(-EINVAL);
11763 return libbpf_err_ptr(-EINVAL);
11765 return libbpf_err_ptr(-EINVAL);
11777 prog->name, pattern, res.cnt);
11778 err = -EINVAL;
11790 return libbpf_err_ptr(-EINVAL);
11802 err = -ENOMEM;
11805 link->detach = &bpf_link__detach_fd;
11809 err = -errno;
11811 prog->name, errstr(err));
11814 link->fd = link_fd;
11834 /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
11835 if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
11838 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
11840 func_name = prog->sec_name + sizeof("kretprobe/") - 1;
11842 func_name = prog->sec_name + sizeof("kprobe/") - 1;
11844 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
11847 return -EINVAL;
11852 return -EINVAL;
11868 /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
11869 if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
11872 opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
11874 syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
11876 syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
11879 return *link ? 0 : -errno;
11891 /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
11892 if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
11893 strcmp(prog->sec_name, "kretprobe.multi") == 0)
11896 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
11898 spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
11900 spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
11902 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11905 return -EINVAL;
11923 /* no auto-attach for SEC("kprobe.session") */
11924 if (strcmp(prog->sec_name, "kprobe.session") == 0)
11927 spec = prog->sec_name + sizeof("kprobe.session/") - 1;
11928 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11931 return -EINVAL;
11936 return *link ? 0 : -errno;
11943 int n, ret = -EINVAL;
11947 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
11951 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
11958 *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
11962 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
11963 prog->sec_name);
11983 return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
12024 pid < 0 ? -1 : pid, /* pid */
12025 pid == -1 ? 0 : -1, /* cpu */
12026 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
12028 err = -errno;
12076 ret = -LIBBPF_ERRNO__FORMAT;
12083 elf_errmsg(-1));
12084 ret = -LIBBPF_ERRNO__LIBELF;
12105 * Based on https://packages.debian.org/sid/libc6.
12111 return "/lib/x86_64-linux-gnu";
12113 return "/lib/i386-linux-gnu";
12115 return "/lib/s390x-linux-gnu";
12117 return "/lib/s390-linux-gnu";
12119 return "/lib/arm-linux-gnueabi";
12121 return "/lib/arm-linux-gnueabihf";
12123 return "/lib/aarch64-linux-gnu";
12125 return "/lib/mips64el-linux-gnuabi64";
12127 return "/lib/mipsel-linux-gnu";
12129 return "/lib/powerpc64le-linux-gnu";
12131 return "/lib/sparc64-linux-gnu";
12133 return "/lib/riscv64-linux-gnu";
12168 seg_len = next_path ? next_path - s : strlen(s);
12179 return -ENOENT;
12202 return libbpf_err_ptr(-EINVAL);
12207 prog->name);
12208 return libbpf_err_ptr(-EINVAL);
12227 * - syms and offsets are mutually exclusive
12228 * - ref_ctr_offsets and cookies are optional
12234 return libbpf_err_ptr(-EINVAL);
12236 return libbpf_err_ptr(-EINVAL);
12240 return libbpf_err_ptr(-EINVAL);
12243 return libbpf_err_ptr(-EINVAL);
12247 return libbpf_err_ptr(-EINVAL);
12254 prog->name, path, errstr(err));
12288 err = -ENOMEM;
12291 link->detach = &bpf_link__detach_fd;
12295 err = -errno;
12296 pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
12297 prog->name, errstr(err));
12300 link->fd = link_fd;
12327 return libbpf_err_ptr(-EINVAL);
12335 return libbpf_err_ptr(-EINVAL);
12342 min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
12349 prog->name, binary_path, errstr(err));
12378 return libbpf_err_ptr(-ENOTSUP);
12382 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
12383 return libbpf_err_ptr(-ENOTSUP);
12388 return libbpf_err_ptr(-EINVAL);
12398 return libbpf_err_ptr(-EINVAL);
12406 return libbpf_err_ptr(-ENOMEM);
12412 err = -errno;
12414 prog->name, retprobe ? "uretprobe" : "uprobe",
12425 prog->name, retprobe ? "uretprobe" : "uprobe",
12433 perf_link->legacy_probe_name = legacy_probe;
12434 perf_link->legacy_is_kprobe = false;
12435 perf_link->legacy_is_retprobe = retprobe;
12447 /* Format of u[ret]probe section definition supporting auto-attach:
12454 * specified (and auto-attach is not possible) or the above format is specified for
12455 * auto-attach.
12461 int n, c, ret = -EINVAL;
12466 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
12470 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
12475 prog->name, prog->sec_name);
12493 prog->name);
12497 *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
12501 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
12502 prog->sec_name);
12528 struct bpf_object *obj = prog->obj;
12534 return libbpf_err_ptr(-EINVAL);
12538 prog->name);
12539 return libbpf_err_ptr(-EINVAL);
12543 return libbpf_err_ptr(-EINVAL);
12549 prog->name, binary_path, errstr(err));
12558 if (IS_ERR(obj->usdt_man))
12559 return libbpf_ptr(obj->usdt_man);
12560 if (!obj->usdt_man) {
12561 obj->usdt_man = usdt_manager_new(obj);
12562 if (IS_ERR(obj->usdt_man))
12563 return libbpf_ptr(obj->usdt_man);
12567 link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
12583 /* no auto-attach for just SEC("usdt") */
12592 err = -EINVAL;
12594 *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
12613 return -errno;
12617 return -E2BIG;
12642 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
12643 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
12645 err = -errno;
12664 return libbpf_err_ptr(-EINVAL);
12671 prog->name, tp_category, tp_name,
12680 prog->name, tp_category, tp_name,
12700 /* no auto-attach for SEC("tp") or SEC("tracepoint") */
12701 if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
12704 sec_name = strdup(prog->sec_name);
12706 return -ENOMEM;
12709 if (str_has_pfx(prog->sec_name, "tp/"))
12710 tp_cat = sec_name + sizeof("tp/") - 1;
12712 tp_cat = sec_name + sizeof("tracepoint/") - 1;
12716 return -EINVAL;
12736 return libbpf_err_ptr(-EINVAL);
12740 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12741 return libbpf_err_ptr(-EINVAL);
12746 return libbpf_err_ptr(-ENOMEM);
12747 link->detach = &bpf_link__detach_fd;
12753 pfd = -errno;
12756 prog->name, tp_name, errstr(pfd));
12759 link->fd = pfd;
12785 if (!str_has_pfx(prog->sec_name, prefixes[i]))
12789 /* no auto-attach case of, e.g., SEC("raw_tp") */
12790 if (prog->sec_name[pfx_len] == '\0')
12793 if (prog->sec_name[pfx_len] != '/')
12796 tp_name = prog->sec_name + pfx_len + 1;
12802 prog->name, prog->sec_name);
12803 return -EINVAL;
12819 return libbpf_err_ptr(-EINVAL);
12823 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12824 return libbpf_err_ptr(-EINVAL);
12829 return libbpf_err_ptr(-ENOMEM);
12830 link->detach = &bpf_link__detach_fd;
12836 pfd = -errno;
12839 prog->name, errstr(pfd));
12842 link->fd = pfd;
12885 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12886 return libbpf_err_ptr(-EINVAL);
12891 return libbpf_err_ptr(-ENOMEM);
12892 link->detach = &bpf_link__detach_fd;
12897 link_fd = -errno;
12900 prog->name, target_name,
12904 link->fd = link_fd;
12941 return libbpf_err_ptr(-EINVAL);
12948 prog->name);
12949 return libbpf_err_ptr(-EINVAL);
12969 return libbpf_err_ptr(-EINVAL);
12974 /* validate we don't have unexpected combinations of non-zero fields */
12977 prog->name);
12978 return libbpf_err_ptr(-EINVAL);
12982 prog->name);
12983 return libbpf_err_ptr(-EINVAL);
13004 return libbpf_err_ptr(-EINVAL);
13009 /* validate we don't have unexpected combinations of non-zero fields */
13012 prog->name);
13013 return libbpf_err_ptr(-EINVAL);
13017 prog->name);
13018 return libbpf_err_ptr(-EINVAL);
13037 prog->name);
13038 return libbpf_err_ptr(-EINVAL);
13041 if (prog->type != BPF_PROG_TYPE_EXT) {
13043 prog->name);
13044 return libbpf_err_ptr(-EINVAL);
13050 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd, prog->obj->token_fd);
13076 return libbpf_err_ptr(-EINVAL);
13083 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
13084 return libbpf_err_ptr(-EINVAL);
13089 return libbpf_err_ptr(-ENOMEM);
13090 link->detach = &bpf_link__detach_fd;
13095 link_fd = -errno;
13098 prog->name, errstr(link_fd));
13101 link->fd = link_fd;
13119 return libbpf_err_ptr(-EINVAL);
13123 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
13124 return libbpf_err_ptr(-EINVAL);
13129 return libbpf_err_ptr(-ENOMEM);
13131 link->detach = &bpf_link__detach_fd;
13140 link_fd = -errno;
13143 prog->name, errstr(link_fd));
13146 link->fd = link_fd;
13156 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
13157 return libbpf_err_ptr(-EOPNOTSUPP);
13161 prog->name);
13162 return libbpf_err_ptr(-EINVAL);
13165 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
13169 /* When calling bpf_program__attach() explicitly, auto-attach support
13175 return libbpf_err_ptr(-EOPNOTSUPP);
13192 if (st_link->map_fd < 0)
13194 return bpf_map_delete_elem(link->fd, &zero);
13196 return close(link->fd);
13206 pr_warn("map '%s': can't attach non-struct_ops map\n", map->name);
13207 return libbpf_err_ptr(-EINVAL);
13210 if (map->fd < 0) {
13211 pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
13212 return libbpf_err_ptr(-EINVAL);
13217 return libbpf_err_ptr(-EINVAL);
13220 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
13226 if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
13231 link->link.detach = bpf_link__detach_struct_ops;
13233 if (!(map->def.map_flags & BPF_F_LINK)) {
13235 link->link.fd = map->fd;
13236 link->map_fd = -1;
13237 return &link->link;
13240 fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
13246 link->link.fd = fd;
13247 link->map_fd = map->fd;
13249 return &link->link;
13262 return libbpf_err(-EINVAL);
13264 if (map->fd < 0) {
13265 pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
13266 return libbpf_err(-EINVAL);
13271 if (st_ops_link->map_fd < 0)
13272 return libbpf_err(-EINVAL);
13274 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
13280 if (err && err != -EBUSY)
13283 err = bpf_link_update(link->fd, map->fd, NULL);
13287 st_ops_link->map_fd = map->fd;
13302 __u64 data_tail = header->data_tail;
13309 ehdr = base + (data_tail & (mmap_size - 1));
13310 ehdr_size = ehdr->size;
13314 size_t len_first = base + mmap_size - copy_start;
13315 size_t len_secnd = ehdr_size - len_first;
13349 /* sample_cb and lost_cb are higher-level common-case callbacks */
13388 if (cpu_buf->base &&
13389 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
13390 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
13391 if (cpu_buf->fd >= 0) {
13392 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
13393 close(cpu_buf->fd);
13395 free(cpu_buf->buf);
13405 if (pb->cpu_bufs) {
13406 for (i = 0; i < pb->cpu_cnt; i++) {
13407 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13412 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
13415 free(pb->cpu_bufs);
13417 if (pb->epoll_fd >= 0)
13418 close(pb->epoll_fd);
13419 free(pb->events);
13432 return ERR_PTR(-ENOMEM);
13434 cpu_buf->pb = pb;
13435 cpu_buf->cpu = cpu;
13436 cpu_buf->map_key = map_key;
13438 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
13439 -1, PERF_FLAG_FD_CLOEXEC);
13440 if (cpu_buf->fd < 0) {
13441 err = -errno;
13447 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
13449 cpu_buf->fd, 0);
13450 if (cpu_buf->base == MAP_FAILED) {
13451 cpu_buf->base = NULL;
13452 err = -errno;
13458 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
13459 err = -errno;
13487 return libbpf_err_ptr(-EINVAL);
13516 return libbpf_err_ptr(-EINVAL);
13519 return libbpf_err_ptr(-EINVAL);
13541 if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
13544 return ERR_PTR(-EINVAL);
13547 /* best-effort sanity checks */
13552 err = -errno;
13554 * -EBADFD, -EFAULT, or -E2BIG on real error
13556 if (err != -EINVAL) {
13567 return ERR_PTR(-EINVAL);
13573 return ERR_PTR(-ENOMEM);
13575 pb->event_cb = p->event_cb;
13576 pb->sample_cb = p->sample_cb;
13577 pb->lost_cb = p->lost_cb;
13578 pb->ctx = p->ctx;
13580 pb->page_size = getpagesize();
13581 pb->mmap_size = pb->page_size * page_cnt;
13582 pb->map_fd = map_fd;
13584 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
13585 if (pb->epoll_fd < 0) {
13586 err = -errno;
13592 if (p->cpu_cnt > 0) {
13593 pb->cpu_cnt = p->cpu_cnt;
13595 pb->cpu_cnt = libbpf_num_possible_cpus();
13596 if (pb->cpu_cnt < 0) {
13597 err = pb->cpu_cnt;
13600 if (map.max_entries && map.max_entries < pb->cpu_cnt)
13601 pb->cpu_cnt = map.max_entries;
13604 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
13605 if (!pb->events) {
13606 err = -ENOMEM;
13610 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
13611 if (!pb->cpu_bufs) {
13612 err = -ENOMEM;
13623 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
13627 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
13628 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
13633 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
13636 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
13642 pb->cpu_bufs[j] = cpu_buf;
13644 err = bpf_map_update_elem(pb->map_fd, &map_key,
13645 &cpu_buf->fd, 0);
13647 err = -errno;
13648 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
13649 cpu, map_key, cpu_buf->fd,
13654 pb->events[j].events = EPOLLIN;
13655 pb->events[j].data.ptr = cpu_buf;
13656 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
13657 &pb->events[j]) < 0) {
13658 err = -errno;
13660 cpu, cpu_buf->fd,
13666 pb->cpu_cnt = j;
13695 struct perf_buffer *pb = cpu_buf->pb;
13699 if (pb->event_cb)
13700 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
13702 switch (e->type) {
13706 if (pb->sample_cb)
13707 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
13713 if (pb->lost_cb)
13714 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
13718 pr_warn("unknown perf sample type %d\n", e->type);
13729 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
13730 pb->page_size, &cpu_buf->buf,
13731 &cpu_buf->buf_size,
13740 return pb->epoll_fd;
13747 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
13749 return -errno;
13752 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
13768 return pb->cpu_cnt;
13780 if (buf_idx >= pb->cpu_cnt)
13781 return libbpf_err(-EINVAL);
13783 cpu_buf = pb->cpu_bufs[buf_idx];
13785 return libbpf_err(-ENOENT);
13787 return cpu_buf->fd;
13794 if (buf_idx >= pb->cpu_cnt)
13795 return libbpf_err(-EINVAL);
13797 cpu_buf = pb->cpu_bufs[buf_idx];
13799 return libbpf_err(-ENOENT);
13801 *buf = cpu_buf->base;
13802 *buf_size = pb->mmap_size;
13811 * - 0 on success;
13812 * - <0 on failure.
13818 if (buf_idx >= pb->cpu_cnt)
13819 return libbpf_err(-EINVAL);
13821 cpu_buf = pb->cpu_bufs[buf_idx];
13823 return libbpf_err(-ENOENT);
13832 for (i = 0; i < pb->cpu_cnt; i++) {
13833 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13855 return libbpf_err(-EINVAL);
13857 if (prog->obj->state >= OBJ_LOADED)
13858 return libbpf_err(-EINVAL);
13864 prog->attach_prog_fd = attach_prog_fd;
13870 attach_prog_fd, prog->obj->token_fd);
13875 return libbpf_err(-EINVAL);
13878 err = bpf_object__load_vmlinux_btf(prog->obj, true);
13881 err = find_kernel_btf_id(prog->obj, attach_func_name,
13882 prog->expected_attach_type,
13888 prog->attach_btf_id = btf_id;
13889 prog->attach_btf_obj_fd = btf_obj_fd;
13890 prog->attach_prog_fd = attach_prog_fd;
13896 int err = 0, n, len, start, end = -1;
13902 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
13908 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
13911 err = -EINVAL;
13919 err = -EINVAL;
13924 err = -ENOMEM;
13928 memset(tmp + *mask_sz, 0, start - *mask_sz);
13929 memset(tmp + start, 1, end - start + 1);
13935 return -EINVAL;
13951 err = -errno;
13958 err = len ? -errno : -EINVAL;
13964 return -E2BIG;
14005 struct bpf_map **map = map_skel->map;
14006 const char *name = map_skel->name;
14007 void **mmaped = map_skel->mmaped;
14012 return -ESRCH;
14015 /* externs shouldn't be pre-setup from user code */
14016 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
14017 *mmaped = (*map)->mmaped;
14030 struct bpf_program **prog = prog_skel->prog;
14031 const char *name = prog_skel->name;
14036 return -ESRCH;
14048 obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts);
14052 s->name, errstr(err));
14056 *s->obj = obj;
14057 err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);
14059 pr_warn("failed to populate skeleton maps for '%s': %s\n", s->name, errstr(err));
14063 err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);
14065 pr_warn("failed to populate skeleton progs for '%s': %s\n", s->name, errstr(err));
14083 if (!s->obj)
14084 return libbpf_err(-EINVAL);
14086 btf = bpf_object__btf(s->obj);
14089 bpf_object__name(s->obj));
14090 return libbpf_err(-errno);
14093 err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);
14099 err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);
14105 for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
14106 var_skel = (void *)s->vars + var_idx * s->var_skel_sz;
14107 map = *var_skel->map;
14115 return libbpf_err(-EINVAL);
14121 var_type = btf__type_by_id(btf, var->type);
14122 var_name = btf__name_by_offset(btf, var_type->name_off);
14123 if (strcmp(var_name, var_skel->name) == 0) {
14124 *var_skel->addr = map->mmaped + var->offset;
14136 free(s->maps);
14137 free(s->progs);
14138 free(s->vars);
14146 err = bpf_object__load(*s->obj);
14148 pr_warn("failed to load BPF skeleton '%s': %s\n", s->name, errstr(err));
14152 for (i = 0; i < s->map_cnt; i++) {
14153 struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
14154 struct bpf_map *map = *map_skel->map;
14156 if (!map_skel->mmaped)
14159 *map_skel->mmaped = map->mmaped;
14169 for (i = 0; i < s->prog_cnt; i++) {
14170 struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
14171 struct bpf_program *prog = *prog_skel->prog;
14172 struct bpf_link **link = prog_skel->link;
14174 if (!prog->autoload || !prog->autoattach)
14177 /* auto-attaching not supported for this program */
14178 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
14181 /* if user already set the link manually, don't attempt auto-attach */
14185 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
14187 pr_warn("prog '%s': failed to auto-attach: %s\n",
14192 /* It's possible that for some SEC() definitions auto-attach
14197 * auto-attached. But if not, it shouldn't trigger skeleton's
14205 for (i = 0; i < s->map_cnt; i++) {
14206 struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
14207 struct bpf_map *map = *map_skel->map;
14210 if (!map->autocreate || !map->autoattach)
14218 if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) {
14219 pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n",
14224 link = map_skel->link;
14236 err = -errno;
14237 pr_warn("map '%s': failed to auto-attach: %s\n",
14250 for (i = 0; i < s->prog_cnt; i++) {
14251 struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
14252 struct bpf_link **link = prog_skel->link;
14258 if (s->map_skel_sz < sizeof(struct bpf_map_skeleton))
14261 for (i = 0; i < s->map_cnt; i++) {
14262 struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
14263 struct bpf_link **link = map_skel->link;
14278 if (s->obj)
14279 bpf_object__close(*s->obj);
14280 free(s->maps);
14281 free(s->progs);