1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * Common eBPF ELF object loading operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * Copyright (C) 2017 Nicira, Inc. 10 * Copyright (C) 2019 Isovalent, Inc. 11 */ 12 13 #ifndef _GNU_SOURCE 14 #define _GNU_SOURCE 15 #endif 16 #include <stdlib.h> 17 #include <stdio.h> 18 #include <stdarg.h> 19 #include <libgen.h> 20 #include <inttypes.h> 21 #include <limits.h> 22 #include <string.h> 23 #include <unistd.h> 24 #include <endian.h> 25 #include <fcntl.h> 26 #include <errno.h> 27 #include <ctype.h> 28 #include <asm/unistd.h> 29 #include <linux/err.h> 30 #include <linux/kernel.h> 31 #include <linux/bpf.h> 32 #include <linux/btf.h> 33 #include <linux/filter.h> 34 #include <linux/list.h> 35 #include <linux/limits.h> 36 #include <linux/perf_event.h> 37 #include <linux/ring_buffer.h> 38 #include <linux/version.h> 39 #include <sys/epoll.h> 40 #include <sys/ioctl.h> 41 #include <sys/mman.h> 42 #include <sys/stat.h> 43 #include <sys/types.h> 44 #include <sys/vfs.h> 45 #include <sys/utsname.h> 46 #include <sys/resource.h> 47 #include <libelf.h> 48 #include <gelf.h> 49 #include <zlib.h> 50 51 #include "libbpf.h" 52 #include "bpf.h" 53 #include "btf.h" 54 #include "str_error.h" 55 #include "libbpf_internal.h" 56 #include "hashmap.h" 57 58 #ifndef BPF_FS_MAGIC 59 #define BPF_FS_MAGIC 0xcafe4a11 60 #endif 61 62 #define BPF_INSN_SZ (sizeof(struct bpf_insn)) 63 64 /* vsprintf() in __base_pr() uses nonliteral format string. It may break 65 * compilation if user enables corresponding warning. Disable it explicitly. 66 */ 67 #pragma GCC diagnostic ignored "-Wformat-nonliteral" 68 69 #define __printf(a, b) __attribute__((format(printf, a, b))) 70 71 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj); 72 static const struct btf_type * 73 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id); 74 75 static int __base_pr(enum libbpf_print_level level, const char *format, 76 va_list args) 77 { 78 if (level == LIBBPF_DEBUG) 79 return 0; 80 81 return vfprintf(stderr, format, args); 82 } 83 84 static libbpf_print_fn_t __libbpf_pr = __base_pr; 85 86 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn) 87 { 88 libbpf_print_fn_t old_print_fn = __libbpf_pr; 89 90 __libbpf_pr = fn; 91 return old_print_fn; 92 } 93 94 __printf(2, 3) 95 void libbpf_print(enum libbpf_print_level level, const char *format, ...) 96 { 97 va_list args; 98 99 if (!__libbpf_pr) 100 return; 101 102 va_start(args, format); 103 __libbpf_pr(level, format, args); 104 va_end(args); 105 } 106 107 static void pr_perm_msg(int err) 108 { 109 struct rlimit limit; 110 char buf[100]; 111 112 if (err != -EPERM || geteuid() != 0) 113 return; 114 115 err = getrlimit(RLIMIT_MEMLOCK, &limit); 116 if (err) 117 return; 118 119 if (limit.rlim_cur == RLIM_INFINITY) 120 return; 121 122 if (limit.rlim_cur < 1024) 123 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur); 124 else if (limit.rlim_cur < 1024*1024) 125 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024); 126 else 127 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024)); 128 129 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", 130 buf); 131 } 132 133 #define STRERR_BUFSIZE 128 134 135 /* Copied from tools/perf/util/util.h */ 136 #ifndef zfree 137 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) 138 #endif 139 140 #ifndef zclose 141 # define zclose(fd) ({ \ 142 int ___err = 0; \ 143 if ((fd) >= 0) \ 144 ___err = close((fd)); \ 145 fd = -1; \ 146 ___err; }) 147 #endif 148 149 static inline __u64 ptr_to_u64(const void *ptr) 150 { 151 return (__u64) (unsigned long) ptr; 152 } 153 154 enum kern_feature_id { 155 /* v4.14: kernel support for program & map names. */ 156 FEAT_PROG_NAME, 157 /* v5.2: kernel support for global data sections. */ 158 FEAT_GLOBAL_DATA, 159 /* BTF support */ 160 FEAT_BTF, 161 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */ 162 FEAT_BTF_FUNC, 163 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */ 164 FEAT_BTF_DATASEC, 165 /* BTF_FUNC_GLOBAL is supported */ 166 FEAT_BTF_GLOBAL_FUNC, 167 /* BPF_F_MMAPABLE is supported for arrays */ 168 FEAT_ARRAY_MMAP, 169 /* kernel support for expected_attach_type in BPF_PROG_LOAD */ 170 FEAT_EXP_ATTACH_TYPE, 171 /* bpf_probe_read_{kernel,user}[_str] helpers */ 172 FEAT_PROBE_READ_KERN, 173 /* BPF_PROG_BIND_MAP is supported */ 174 FEAT_PROG_BIND_MAP, 175 /* Kernel support for module BTFs */ 176 FEAT_MODULE_BTF, 177 /* BTF_KIND_FLOAT support */ 178 FEAT_BTF_FLOAT, 179 __FEAT_CNT, 180 }; 181 182 static bool kernel_supports(enum kern_feature_id feat_id); 183 184 enum reloc_type { 185 RELO_LD64, 186 RELO_CALL, 187 RELO_DATA, 188 RELO_EXTERN, 189 RELO_SUBPROG_ADDR, 190 }; 191 192 struct reloc_desc { 193 enum reloc_type type; 194 int insn_idx; 195 int map_idx; 196 int sym_off; 197 bool processed; 198 }; 199 200 struct bpf_sec_def; 201 202 typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec, 203 struct bpf_program *prog); 204 205 struct bpf_sec_def { 206 const char *sec; 207 size_t len; 208 enum bpf_prog_type prog_type; 209 enum bpf_attach_type expected_attach_type; 210 bool is_exp_attach_type_optional; 211 bool is_attachable; 212 bool is_attach_btf; 213 bool is_sleepable; 214 attach_fn_t attach_fn; 215 }; 216 217 /* 218 * bpf_prog should be a better name but it has been used in 219 * linux/filter.h. 220 */ 221 struct bpf_program { 222 const struct bpf_sec_def *sec_def; 223 char *sec_name; 224 size_t sec_idx; 225 /* this program's instruction offset (in number of instructions) 226 * within its containing ELF section 227 */ 228 size_t sec_insn_off; 229 /* number of original instructions in ELF section belonging to this 230 * program, not taking into account subprogram instructions possible 231 * appended later during relocation 232 */ 233 size_t sec_insn_cnt; 234 /* Offset (in number of instructions) of the start of instruction 235 * belonging to this BPF program within its containing main BPF 236 * program. For the entry-point (main) BPF program, this is always 237 * zero. For a sub-program, this gets reset before each of main BPF 238 * programs are processed and relocated and is used to determined 239 * whether sub-program was already appended to the main program, and 240 * if yes, at which instruction offset. 241 */ 242 size_t sub_insn_off; 243 244 char *name; 245 /* sec_name with / replaced by _; makes recursive pinning 246 * in bpf_object__pin_programs easier 247 */ 248 char *pin_name; 249 250 /* instructions that belong to BPF program; insns[0] is located at 251 * sec_insn_off instruction within its ELF section in ELF file, so 252 * when mapping ELF file instruction index to the local instruction, 253 * one needs to subtract sec_insn_off; and vice versa. 254 */ 255 struct bpf_insn *insns; 256 /* actual number of instruction in this BPF program's image; for 257 * entry-point BPF programs this includes the size of main program 258 * itself plus all the used sub-programs, appended at the end 259 */ 260 size_t insns_cnt; 261 262 struct reloc_desc *reloc_desc; 263 int nr_reloc; 264 int log_level; 265 266 struct { 267 int nr; 268 int *fds; 269 } instances; 270 bpf_program_prep_t preprocessor; 271 272 struct bpf_object *obj; 273 void *priv; 274 bpf_program_clear_priv_t clear_priv; 275 276 bool load; 277 enum bpf_prog_type type; 278 enum bpf_attach_type expected_attach_type; 279 int prog_ifindex; 280 __u32 attach_btf_obj_fd; 281 __u32 attach_btf_id; 282 __u32 attach_prog_fd; 283 void *func_info; 284 __u32 func_info_rec_size; 285 __u32 func_info_cnt; 286 287 void *line_info; 288 __u32 line_info_rec_size; 289 __u32 line_info_cnt; 290 __u32 prog_flags; 291 }; 292 293 struct bpf_struct_ops { 294 const char *tname; 295 const struct btf_type *type; 296 struct bpf_program **progs; 297 __u32 *kern_func_off; 298 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */ 299 void *data; 300 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in 301 * btf_vmlinux's format. 302 * struct bpf_struct_ops_tcp_congestion_ops { 303 * [... some other kernel fields ...] 304 * struct tcp_congestion_ops data; 305 * } 306 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops) 307 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata" 308 * from "data". 309 */ 310 void *kern_vdata; 311 __u32 type_id; 312 }; 313 314 #define DATA_SEC ".data" 315 #define BSS_SEC ".bss" 316 #define RODATA_SEC ".rodata" 317 #define KCONFIG_SEC ".kconfig" 318 #define KSYMS_SEC ".ksyms" 319 #define STRUCT_OPS_SEC ".struct_ops" 320 321 enum libbpf_map_type { 322 LIBBPF_MAP_UNSPEC, 323 LIBBPF_MAP_DATA, 324 LIBBPF_MAP_BSS, 325 LIBBPF_MAP_RODATA, 326 LIBBPF_MAP_KCONFIG, 327 }; 328 329 static const char * const libbpf_type_to_btf_name[] = { 330 [LIBBPF_MAP_DATA] = DATA_SEC, 331 [LIBBPF_MAP_BSS] = BSS_SEC, 332 [LIBBPF_MAP_RODATA] = RODATA_SEC, 333 [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC, 334 }; 335 336 struct bpf_map { 337 char *name; 338 int fd; 339 int sec_idx; 340 size_t sec_offset; 341 int map_ifindex; 342 int inner_map_fd; 343 struct bpf_map_def def; 344 __u32 numa_node; 345 __u32 btf_var_idx; 346 __u32 btf_key_type_id; 347 __u32 btf_value_type_id; 348 __u32 btf_vmlinux_value_type_id; 349 void *priv; 350 bpf_map_clear_priv_t clear_priv; 351 enum libbpf_map_type libbpf_type; 352 void *mmaped; 353 struct bpf_struct_ops *st_ops; 354 struct bpf_map *inner_map; 355 void **init_slots; 356 int init_slots_sz; 357 char *pin_path; 358 bool pinned; 359 bool reused; 360 }; 361 362 enum extern_type { 363 EXT_UNKNOWN, 364 EXT_KCFG, 365 EXT_KSYM, 366 }; 367 368 enum kcfg_type { 369 KCFG_UNKNOWN, 370 KCFG_CHAR, 371 KCFG_BOOL, 372 KCFG_INT, 373 KCFG_TRISTATE, 374 KCFG_CHAR_ARR, 375 }; 376 377 struct extern_desc { 378 enum extern_type type; 379 int sym_idx; 380 int btf_id; 381 int sec_btf_id; 382 const char *name; 383 bool is_set; 384 bool is_weak; 385 union { 386 struct { 387 enum kcfg_type type; 388 int sz; 389 int align; 390 int data_off; 391 bool is_signed; 392 } kcfg; 393 struct { 394 unsigned long long addr; 395 396 /* target btf_id of the corresponding kernel var. */ 397 int kernel_btf_obj_fd; 398 int kernel_btf_id; 399 400 /* local btf_id of the ksym extern's type. */ 401 __u32 type_id; 402 } ksym; 403 }; 404 }; 405 406 static LIST_HEAD(bpf_objects_list); 407 408 struct module_btf { 409 struct btf *btf; 410 char *name; 411 __u32 id; 412 int fd; 413 }; 414 415 struct bpf_object { 416 char name[BPF_OBJ_NAME_LEN]; 417 char license[64]; 418 __u32 kern_version; 419 420 struct bpf_program *programs; 421 size_t nr_programs; 422 struct bpf_map *maps; 423 size_t nr_maps; 424 size_t maps_cap; 425 426 char *kconfig; 427 struct extern_desc *externs; 428 int nr_extern; 429 int kconfig_map_idx; 430 int rodata_map_idx; 431 432 bool loaded; 433 bool has_subcalls; 434 435 /* 436 * Information when doing elf related work. Only valid if fd 437 * is valid. 438 */ 439 struct { 440 int fd; 441 const void *obj_buf; 442 size_t obj_buf_sz; 443 Elf *elf; 444 GElf_Ehdr ehdr; 445 Elf_Data *symbols; 446 Elf_Data *data; 447 Elf_Data *rodata; 448 Elf_Data *bss; 449 Elf_Data *st_ops_data; 450 size_t shstrndx; /* section index for section name strings */ 451 size_t strtabidx; 452 struct { 453 GElf_Shdr shdr; 454 Elf_Data *data; 455 } *reloc_sects; 456 int nr_reloc_sects; 457 int maps_shndx; 458 int btf_maps_shndx; 459 __u32 btf_maps_sec_btf_id; 460 int text_shndx; 461 int symbols_shndx; 462 int data_shndx; 463 int rodata_shndx; 464 int bss_shndx; 465 int st_ops_shndx; 466 } efile; 467 /* 468 * All loaded bpf_object is linked in a list, which is 469 * hidden to caller. bpf_objects__<func> handlers deal with 470 * all objects. 471 */ 472 struct list_head list; 473 474 struct btf *btf; 475 struct btf_ext *btf_ext; 476 477 /* Parse and load BTF vmlinux if any of the programs in the object need 478 * it at load time. 479 */ 480 struct btf *btf_vmlinux; 481 /* vmlinux BTF override for CO-RE relocations */ 482 struct btf *btf_vmlinux_override; 483 /* Lazily initialized kernel module BTFs */ 484 struct module_btf *btf_modules; 485 bool btf_modules_loaded; 486 size_t btf_module_cnt; 487 size_t btf_module_cap; 488 489 void *priv; 490 bpf_object_clear_priv_t clear_priv; 491 492 char path[]; 493 }; 494 #define obj_elf_valid(o) ((o)->efile.elf) 495 496 static const char *elf_sym_str(const struct bpf_object *obj, size_t off); 497 static const char *elf_sec_str(const struct bpf_object *obj, size_t off); 498 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx); 499 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name); 500 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr); 501 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn); 502 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn); 503 static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx, 504 size_t off, __u32 sym_type, GElf_Sym *sym); 505 506 void bpf_program__unload(struct bpf_program *prog) 507 { 508 int i; 509 510 if (!prog) 511 return; 512 513 /* 514 * If the object is opened but the program was never loaded, 515 * it is possible that prog->instances.nr == -1. 516 */ 517 if (prog->instances.nr > 0) { 518 for (i = 0; i < prog->instances.nr; i++) 519 zclose(prog->instances.fds[i]); 520 } else if (prog->instances.nr != -1) { 521 pr_warn("Internal error: instances.nr is %d\n", 522 prog->instances.nr); 523 } 524 525 prog->instances.nr = -1; 526 zfree(&prog->instances.fds); 527 528 zfree(&prog->func_info); 529 zfree(&prog->line_info); 530 } 531 532 static void bpf_program__exit(struct bpf_program *prog) 533 { 534 if (!prog) 535 return; 536 537 if (prog->clear_priv) 538 prog->clear_priv(prog, prog->priv); 539 540 prog->priv = NULL; 541 prog->clear_priv = NULL; 542 543 bpf_program__unload(prog); 544 zfree(&prog->name); 545 zfree(&prog->sec_name); 546 zfree(&prog->pin_name); 547 zfree(&prog->insns); 548 zfree(&prog->reloc_desc); 549 550 prog->nr_reloc = 0; 551 prog->insns_cnt = 0; 552 prog->sec_idx = -1; 553 } 554 555 static char *__bpf_program__pin_name(struct bpf_program *prog) 556 { 557 char *name, *p; 558 559 name = p = strdup(prog->sec_name); 560 while ((p = strchr(p, '/'))) 561 *p = '_'; 562 563 return name; 564 } 565 566 static bool insn_is_subprog_call(const struct bpf_insn *insn) 567 { 568 return BPF_CLASS(insn->code) == BPF_JMP && 569 BPF_OP(insn->code) == BPF_CALL && 570 BPF_SRC(insn->code) == BPF_K && 571 insn->src_reg == BPF_PSEUDO_CALL && 572 insn->dst_reg == 0 && 573 insn->off == 0; 574 } 575 576 static bool is_ldimm64(struct bpf_insn *insn) 577 { 578 return insn->code == (BPF_LD | BPF_IMM | BPF_DW); 579 } 580 581 static bool insn_is_pseudo_func(struct bpf_insn *insn) 582 { 583 return is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC; 584 } 585 586 static int 587 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog, 588 const char *name, size_t sec_idx, const char *sec_name, 589 size_t sec_off, void *insn_data, size_t insn_data_sz) 590 { 591 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) { 592 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n", 593 sec_name, name, sec_off, insn_data_sz); 594 return -EINVAL; 595 } 596 597 memset(prog, 0, sizeof(*prog)); 598 prog->obj = obj; 599 600 prog->sec_idx = sec_idx; 601 prog->sec_insn_off = sec_off / BPF_INSN_SZ; 602 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; 603 /* insns_cnt can later be increased by appending used subprograms */ 604 prog->insns_cnt = prog->sec_insn_cnt; 605 606 prog->type = BPF_PROG_TYPE_UNSPEC; 607 prog->load = true; 608 609 prog->instances.fds = NULL; 610 prog->instances.nr = -1; 611 612 prog->sec_name = strdup(sec_name); 613 if (!prog->sec_name) 614 goto errout; 615 616 prog->name = strdup(name); 617 if (!prog->name) 618 goto errout; 619 620 prog->pin_name = __bpf_program__pin_name(prog); 621 if (!prog->pin_name) 622 goto errout; 623 624 prog->insns = malloc(insn_data_sz); 625 if (!prog->insns) 626 goto errout; 627 memcpy(prog->insns, insn_data, insn_data_sz); 628 629 return 0; 630 errout: 631 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name); 632 bpf_program__exit(prog); 633 return -ENOMEM; 634 } 635 636 static int 637 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, 638 const char *sec_name, int sec_idx) 639 { 640 struct bpf_program *prog, *progs; 641 void *data = sec_data->d_buf; 642 size_t sec_sz = sec_data->d_size, sec_off, prog_sz; 643 int nr_progs, err; 644 const char *name; 645 GElf_Sym sym; 646 647 progs = obj->programs; 648 nr_progs = obj->nr_programs; 649 sec_off = 0; 650 651 while (sec_off < sec_sz) { 652 if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) { 653 pr_warn("sec '%s': failed to find program symbol at offset %zu\n", 654 sec_name, sec_off); 655 return -LIBBPF_ERRNO__FORMAT; 656 } 657 658 prog_sz = sym.st_size; 659 660 name = elf_sym_str(obj, sym.st_name); 661 if (!name) { 662 pr_warn("sec '%s': failed to get symbol name for offset %zu\n", 663 sec_name, sec_off); 664 return -LIBBPF_ERRNO__FORMAT; 665 } 666 667 if (sec_off + prog_sz > sec_sz) { 668 pr_warn("sec '%s': program at offset %zu crosses section boundary\n", 669 sec_name, sec_off); 670 return -LIBBPF_ERRNO__FORMAT; 671 } 672 673 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n", 674 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz); 675 676 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs)); 677 if (!progs) { 678 /* 679 * In this case the original obj->programs 680 * is still valid, so don't need special treat for 681 * bpf_close_object(). 682 */ 683 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n", 684 sec_name, name); 685 return -ENOMEM; 686 } 687 obj->programs = progs; 688 689 prog = &progs[nr_progs]; 690 691 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name, 692 sec_off, data + sec_off, prog_sz); 693 if (err) 694 return err; 695 696 nr_progs++; 697 obj->nr_programs = nr_progs; 698 699 sec_off += prog_sz; 700 } 701 702 return 0; 703 } 704 705 static __u32 get_kernel_version(void) 706 { 707 __u32 major, minor, patch; 708 struct utsname info; 709 710 uname(&info); 711 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3) 712 return 0; 713 return KERNEL_VERSION(major, minor, patch); 714 } 715 716 static const struct btf_member * 717 find_member_by_offset(const struct btf_type *t, __u32 bit_offset) 718 { 719 struct btf_member *m; 720 int i; 721 722 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { 723 if (btf_member_bit_offset(t, i) == bit_offset) 724 return m; 725 } 726 727 return NULL; 728 } 729 730 static const struct btf_member * 731 find_member_by_name(const struct btf *btf, const struct btf_type *t, 732 const char *name) 733 { 734 struct btf_member *m; 735 int i; 736 737 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { 738 if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) 739 return m; 740 } 741 742 return NULL; 743 } 744 745 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" 746 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, 747 const char *name, __u32 kind); 748 749 static int 750 find_struct_ops_kern_types(const struct btf *btf, const char *tname, 751 const struct btf_type **type, __u32 *type_id, 752 const struct btf_type **vtype, __u32 *vtype_id, 753 const struct btf_member **data_member) 754 { 755 const struct btf_type *kern_type, *kern_vtype; 756 const struct btf_member *kern_data_member; 757 __s32 kern_vtype_id, kern_type_id; 758 __u32 i; 759 760 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); 761 if (kern_type_id < 0) { 762 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", 763 tname); 764 return kern_type_id; 765 } 766 kern_type = btf__type_by_id(btf, kern_type_id); 767 768 /* Find the corresponding "map_value" type that will be used 769 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example, 770 * find "struct bpf_struct_ops_tcp_congestion_ops" from the 771 * btf_vmlinux. 772 */ 773 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX, 774 tname, BTF_KIND_STRUCT); 775 if (kern_vtype_id < 0) { 776 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n", 777 STRUCT_OPS_VALUE_PREFIX, tname); 778 return kern_vtype_id; 779 } 780 kern_vtype = btf__type_by_id(btf, kern_vtype_id); 781 782 /* Find "struct tcp_congestion_ops" from 783 * struct bpf_struct_ops_tcp_congestion_ops { 784 * [ ... ] 785 * struct tcp_congestion_ops data; 786 * } 787 */ 788 kern_data_member = btf_members(kern_vtype); 789 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) { 790 if (kern_data_member->type == kern_type_id) 791 break; 792 } 793 if (i == btf_vlen(kern_vtype)) { 794 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n", 795 tname, STRUCT_OPS_VALUE_PREFIX, tname); 796 return -EINVAL; 797 } 798 799 *type = kern_type; 800 *type_id = kern_type_id; 801 *vtype = kern_vtype; 802 *vtype_id = kern_vtype_id; 803 *data_member = kern_data_member; 804 805 return 0; 806 } 807 808 static bool bpf_map__is_struct_ops(const struct bpf_map *map) 809 { 810 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; 811 } 812 813 /* Init the map's fields that depend on kern_btf */ 814 static int bpf_map__init_kern_struct_ops(struct bpf_map *map, 815 const struct btf *btf, 816 const struct btf *kern_btf) 817 { 818 const struct btf_member *member, *kern_member, *kern_data_member; 819 const struct btf_type *type, *kern_type, *kern_vtype; 820 __u32 i, kern_type_id, kern_vtype_id, kern_data_off; 821 struct bpf_struct_ops *st_ops; 822 void *data, *kern_data; 823 const char *tname; 824 int err; 825 826 st_ops = map->st_ops; 827 type = st_ops->type; 828 tname = st_ops->tname; 829 err = find_struct_ops_kern_types(kern_btf, tname, 830 &kern_type, &kern_type_id, 831 &kern_vtype, &kern_vtype_id, 832 &kern_data_member); 833 if (err) 834 return err; 835 836 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n", 837 map->name, st_ops->type_id, kern_type_id, kern_vtype_id); 838 839 map->def.value_size = kern_vtype->size; 840 map->btf_vmlinux_value_type_id = kern_vtype_id; 841 842 st_ops->kern_vdata = calloc(1, kern_vtype->size); 843 if (!st_ops->kern_vdata) 844 return -ENOMEM; 845 846 data = st_ops->data; 847 kern_data_off = kern_data_member->offset / 8; 848 kern_data = st_ops->kern_vdata + kern_data_off; 849 850 member = btf_members(type); 851 for (i = 0; i < btf_vlen(type); i++, member++) { 852 const struct btf_type *mtype, *kern_mtype; 853 __u32 mtype_id, kern_mtype_id; 854 void *mdata, *kern_mdata; 855 __s64 msize, kern_msize; 856 __u32 moff, kern_moff; 857 __u32 kern_member_idx; 858 const char *mname; 859 860 mname = btf__name_by_offset(btf, member->name_off); 861 kern_member = find_member_by_name(kern_btf, kern_type, mname); 862 if (!kern_member) { 863 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n", 864 map->name, mname); 865 return -ENOTSUP; 866 } 867 868 kern_member_idx = kern_member - btf_members(kern_type); 869 if (btf_member_bitfield_size(type, i) || 870 btf_member_bitfield_size(kern_type, kern_member_idx)) { 871 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n", 872 map->name, mname); 873 return -ENOTSUP; 874 } 875 876 moff = member->offset / 8; 877 kern_moff = kern_member->offset / 8; 878 879 mdata = data + moff; 880 kern_mdata = kern_data + kern_moff; 881 882 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); 883 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, 884 &kern_mtype_id); 885 if (BTF_INFO_KIND(mtype->info) != 886 BTF_INFO_KIND(kern_mtype->info)) { 887 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n", 888 map->name, mname, BTF_INFO_KIND(mtype->info), 889 BTF_INFO_KIND(kern_mtype->info)); 890 return -ENOTSUP; 891 } 892 893 if (btf_is_ptr(mtype)) { 894 struct bpf_program *prog; 895 896 prog = st_ops->progs[i]; 897 if (!prog) 898 continue; 899 900 kern_mtype = skip_mods_and_typedefs(kern_btf, 901 kern_mtype->type, 902 &kern_mtype_id); 903 904 /* mtype->type must be a func_proto which was 905 * guaranteed in bpf_object__collect_st_ops_relos(), 906 * so only check kern_mtype for func_proto here. 907 */ 908 if (!btf_is_func_proto(kern_mtype)) { 909 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n", 910 map->name, mname); 911 return -ENOTSUP; 912 } 913 914 prog->attach_btf_id = kern_type_id; 915 prog->expected_attach_type = kern_member_idx; 916 917 st_ops->kern_func_off[i] = kern_data_off + kern_moff; 918 919 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n", 920 map->name, mname, prog->name, moff, 921 kern_moff); 922 923 continue; 924 } 925 926 msize = btf__resolve_size(btf, mtype_id); 927 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id); 928 if (msize < 0 || kern_msize < 0 || msize != kern_msize) { 929 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n", 930 map->name, mname, (ssize_t)msize, 931 (ssize_t)kern_msize); 932 return -ENOTSUP; 933 } 934 935 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n", 936 map->name, mname, (unsigned int)msize, 937 moff, kern_moff); 938 memcpy(kern_mdata, mdata, msize); 939 } 940 941 return 0; 942 } 943 944 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) 945 { 946 struct bpf_map *map; 947 size_t i; 948 int err; 949 950 for (i = 0; i < obj->nr_maps; i++) { 951 map = &obj->maps[i]; 952 953 if (!bpf_map__is_struct_ops(map)) 954 continue; 955 956 err = bpf_map__init_kern_struct_ops(map, obj->btf, 957 obj->btf_vmlinux); 958 if (err) 959 return err; 960 } 961 962 return 0; 963 } 964 965 static int bpf_object__init_struct_ops_maps(struct bpf_object *obj) 966 { 967 const struct btf_type *type, *datasec; 968 const struct btf_var_secinfo *vsi; 969 struct bpf_struct_ops *st_ops; 970 const char *tname, *var_name; 971 __s32 type_id, datasec_id; 972 const struct btf *btf; 973 struct bpf_map *map; 974 __u32 i; 975 976 if (obj->efile.st_ops_shndx == -1) 977 return 0; 978 979 btf = obj->btf; 980 datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC, 981 BTF_KIND_DATASEC); 982 if (datasec_id < 0) { 983 pr_warn("struct_ops init: DATASEC %s not found\n", 984 STRUCT_OPS_SEC); 985 return -EINVAL; 986 } 987 988 datasec = btf__type_by_id(btf, datasec_id); 989 vsi = btf_var_secinfos(datasec); 990 for (i = 0; i < btf_vlen(datasec); i++, vsi++) { 991 type = btf__type_by_id(obj->btf, vsi->type); 992 var_name = btf__name_by_offset(obj->btf, type->name_off); 993 994 type_id = btf__resolve_type(obj->btf, vsi->type); 995 if (type_id < 0) { 996 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n", 997 vsi->type, STRUCT_OPS_SEC); 998 return -EINVAL; 999 } 1000 1001 type = btf__type_by_id(obj->btf, type_id); 1002 tname = btf__name_by_offset(obj->btf, type->name_off); 1003 if (!tname[0]) { 1004 pr_warn("struct_ops init: anonymous type is not supported\n"); 1005 return -ENOTSUP; 1006 } 1007 if (!btf_is_struct(type)) { 1008 pr_warn("struct_ops init: %s is not a struct\n", tname); 1009 return -EINVAL; 1010 } 1011 1012 map = bpf_object__add_map(obj); 1013 if (IS_ERR(map)) 1014 return PTR_ERR(map); 1015 1016 map->sec_idx = obj->efile.st_ops_shndx; 1017 map->sec_offset = vsi->offset; 1018 map->name = strdup(var_name); 1019 if (!map->name) 1020 return -ENOMEM; 1021 1022 map->def.type = BPF_MAP_TYPE_STRUCT_OPS; 1023 map->def.key_size = sizeof(int); 1024 map->def.value_size = type->size; 1025 map->def.max_entries = 1; 1026 1027 map->st_ops = calloc(1, sizeof(*map->st_ops)); 1028 if (!map->st_ops) 1029 return -ENOMEM; 1030 st_ops = map->st_ops; 1031 st_ops->data = malloc(type->size); 1032 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); 1033 st_ops->kern_func_off = malloc(btf_vlen(type) * 1034 sizeof(*st_ops->kern_func_off)); 1035 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) 1036 return -ENOMEM; 1037 1038 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) { 1039 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n", 1040 var_name, STRUCT_OPS_SEC); 1041 return -EINVAL; 1042 } 1043 1044 memcpy(st_ops->data, 1045 obj->efile.st_ops_data->d_buf + vsi->offset, 1046 type->size); 1047 st_ops->tname = tname; 1048 st_ops->type = type; 1049 st_ops->type_id = type_id; 1050 1051 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n", 1052 tname, type_id, var_name, vsi->offset); 1053 } 1054 1055 return 0; 1056 } 1057 1058 static struct bpf_object *bpf_object__new(const char *path, 1059 const void *obj_buf, 1060 size_t obj_buf_sz, 1061 const char *obj_name) 1062 { 1063 struct bpf_object *obj; 1064 char *end; 1065 1066 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); 1067 if (!obj) { 1068 pr_warn("alloc memory failed for %s\n", path); 1069 return ERR_PTR(-ENOMEM); 1070 } 1071 1072 strcpy(obj->path, path); 1073 if (obj_name) { 1074 strncpy(obj->name, obj_name, sizeof(obj->name) - 1); 1075 obj->name[sizeof(obj->name) - 1] = 0; 1076 } else { 1077 /* Using basename() GNU version which doesn't modify arg. */ 1078 strncpy(obj->name, basename((void *)path), 1079 sizeof(obj->name) - 1); 1080 end = strchr(obj->name, '.'); 1081 if (end) 1082 *end = 0; 1083 } 1084 1085 obj->efile.fd = -1; 1086 /* 1087 * Caller of this function should also call 1088 * bpf_object__elf_finish() after data collection to return 1089 * obj_buf to user. If not, we should duplicate the buffer to 1090 * avoid user freeing them before elf finish. 1091 */ 1092 obj->efile.obj_buf = obj_buf; 1093 obj->efile.obj_buf_sz = obj_buf_sz; 1094 obj->efile.maps_shndx = -1; 1095 obj->efile.btf_maps_shndx = -1; 1096 obj->efile.data_shndx = -1; 1097 obj->efile.rodata_shndx = -1; 1098 obj->efile.bss_shndx = -1; 1099 obj->efile.st_ops_shndx = -1; 1100 obj->kconfig_map_idx = -1; 1101 obj->rodata_map_idx = -1; 1102 1103 obj->kern_version = get_kernel_version(); 1104 obj->loaded = false; 1105 1106 INIT_LIST_HEAD(&obj->list); 1107 list_add(&obj->list, &bpf_objects_list); 1108 return obj; 1109 } 1110 1111 static void bpf_object__elf_finish(struct bpf_object *obj) 1112 { 1113 if (!obj_elf_valid(obj)) 1114 return; 1115 1116 if (obj->efile.elf) { 1117 elf_end(obj->efile.elf); 1118 obj->efile.elf = NULL; 1119 } 1120 obj->efile.symbols = NULL; 1121 obj->efile.data = NULL; 1122 obj->efile.rodata = NULL; 1123 obj->efile.bss = NULL; 1124 obj->efile.st_ops_data = NULL; 1125 1126 zfree(&obj->efile.reloc_sects); 1127 obj->efile.nr_reloc_sects = 0; 1128 zclose(obj->efile.fd); 1129 obj->efile.obj_buf = NULL; 1130 obj->efile.obj_buf_sz = 0; 1131 } 1132 1133 static int bpf_object__elf_init(struct bpf_object *obj) 1134 { 1135 int err = 0; 1136 GElf_Ehdr *ep; 1137 1138 if (obj_elf_valid(obj)) { 1139 pr_warn("elf: init internal error\n"); 1140 return -LIBBPF_ERRNO__LIBELF; 1141 } 1142 1143 if (obj->efile.obj_buf_sz > 0) { 1144 /* 1145 * obj_buf should have been validated by 1146 * bpf_object__open_buffer(). 1147 */ 1148 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf, 1149 obj->efile.obj_buf_sz); 1150 } else { 1151 obj->efile.fd = open(obj->path, O_RDONLY); 1152 if (obj->efile.fd < 0) { 1153 char errmsg[STRERR_BUFSIZE], *cp; 1154 1155 err = -errno; 1156 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 1157 pr_warn("elf: failed to open %s: %s\n", obj->path, cp); 1158 return err; 1159 } 1160 1161 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); 1162 } 1163 1164 if (!obj->efile.elf) { 1165 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); 1166 err = -LIBBPF_ERRNO__LIBELF; 1167 goto errout; 1168 } 1169 1170 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { 1171 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); 1172 err = -LIBBPF_ERRNO__FORMAT; 1173 goto errout; 1174 } 1175 ep = &obj->efile.ehdr; 1176 1177 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) { 1178 pr_warn("elf: failed to get section names section index for %s: %s\n", 1179 obj->path, elf_errmsg(-1)); 1180 err = -LIBBPF_ERRNO__FORMAT; 1181 goto errout; 1182 } 1183 1184 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 1185 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) { 1186 pr_warn("elf: failed to get section names strings from %s: %s\n", 1187 obj->path, elf_errmsg(-1)); 1188 return -LIBBPF_ERRNO__FORMAT; 1189 } 1190 1191 /* Old LLVM set e_machine to EM_NONE */ 1192 if (ep->e_type != ET_REL || 1193 (ep->e_machine && ep->e_machine != EM_BPF)) { 1194 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); 1195 err = -LIBBPF_ERRNO__FORMAT; 1196 goto errout; 1197 } 1198 1199 return 0; 1200 errout: 1201 bpf_object__elf_finish(obj); 1202 return err; 1203 } 1204 1205 static int bpf_object__check_endianness(struct bpf_object *obj) 1206 { 1207 #if __BYTE_ORDER == __LITTLE_ENDIAN 1208 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB) 1209 return 0; 1210 #elif __BYTE_ORDER == __BIG_ENDIAN 1211 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB) 1212 return 0; 1213 #else 1214 # error "Unrecognized __BYTE_ORDER__" 1215 #endif 1216 pr_warn("elf: endianness mismatch in %s.\n", obj->path); 1217 return -LIBBPF_ERRNO__ENDIAN; 1218 } 1219 1220 static int 1221 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) 1222 { 1223 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1)); 1224 pr_debug("license of %s is %s\n", obj->path, obj->license); 1225 return 0; 1226 } 1227 1228 static int 1229 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) 1230 { 1231 __u32 kver; 1232 1233 if (size != sizeof(kver)) { 1234 pr_warn("invalid kver section in %s\n", obj->path); 1235 return -LIBBPF_ERRNO__FORMAT; 1236 } 1237 memcpy(&kver, data, sizeof(kver)); 1238 obj->kern_version = kver; 1239 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); 1240 return 0; 1241 } 1242 1243 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) 1244 { 1245 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 1246 type == BPF_MAP_TYPE_HASH_OF_MAPS) 1247 return true; 1248 return false; 1249 } 1250 1251 int bpf_object__section_size(const struct bpf_object *obj, const char *name, 1252 __u32 *size) 1253 { 1254 int ret = -ENOENT; 1255 1256 *size = 0; 1257 if (!name) { 1258 return -EINVAL; 1259 } else if (!strcmp(name, DATA_SEC)) { 1260 if (obj->efile.data) 1261 *size = obj->efile.data->d_size; 1262 } else if (!strcmp(name, BSS_SEC)) { 1263 if (obj->efile.bss) 1264 *size = obj->efile.bss->d_size; 1265 } else if (!strcmp(name, RODATA_SEC)) { 1266 if (obj->efile.rodata) 1267 *size = obj->efile.rodata->d_size; 1268 } else if (!strcmp(name, STRUCT_OPS_SEC)) { 1269 if (obj->efile.st_ops_data) 1270 *size = obj->efile.st_ops_data->d_size; 1271 } else { 1272 Elf_Scn *scn = elf_sec_by_name(obj, name); 1273 Elf_Data *data = elf_sec_data(obj, scn); 1274 1275 if (data) { 1276 ret = 0; /* found it */ 1277 *size = data->d_size; 1278 } 1279 } 1280 1281 return *size ? 0 : ret; 1282 } 1283 1284 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, 1285 __u32 *off) 1286 { 1287 Elf_Data *symbols = obj->efile.symbols; 1288 const char *sname; 1289 size_t si; 1290 1291 if (!name || !off) 1292 return -EINVAL; 1293 1294 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) { 1295 GElf_Sym sym; 1296 1297 if (!gelf_getsym(symbols, si, &sym)) 1298 continue; 1299 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL || 1300 GELF_ST_TYPE(sym.st_info) != STT_OBJECT) 1301 continue; 1302 1303 sname = elf_sym_str(obj, sym.st_name); 1304 if (!sname) { 1305 pr_warn("failed to get sym name string for var %s\n", 1306 name); 1307 return -EIO; 1308 } 1309 if (strcmp(name, sname) == 0) { 1310 *off = sym.st_value; 1311 return 0; 1312 } 1313 } 1314 1315 return -ENOENT; 1316 } 1317 1318 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj) 1319 { 1320 struct bpf_map *new_maps; 1321 size_t new_cap; 1322 int i; 1323 1324 if (obj->nr_maps < obj->maps_cap) 1325 return &obj->maps[obj->nr_maps++]; 1326 1327 new_cap = max((size_t)4, obj->maps_cap * 3 / 2); 1328 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps)); 1329 if (!new_maps) { 1330 pr_warn("alloc maps for object failed\n"); 1331 return ERR_PTR(-ENOMEM); 1332 } 1333 1334 obj->maps_cap = new_cap; 1335 obj->maps = new_maps; 1336 1337 /* zero out new maps */ 1338 memset(obj->maps + obj->nr_maps, 0, 1339 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps)); 1340 /* 1341 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin) 1342 * when failure (zclose won't close negative fd)). 1343 */ 1344 for (i = obj->nr_maps; i < obj->maps_cap; i++) { 1345 obj->maps[i].fd = -1; 1346 obj->maps[i].inner_map_fd = -1; 1347 } 1348 1349 return &obj->maps[obj->nr_maps++]; 1350 } 1351 1352 static size_t bpf_map_mmap_sz(const struct bpf_map *map) 1353 { 1354 long page_sz = sysconf(_SC_PAGE_SIZE); 1355 size_t map_sz; 1356 1357 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries; 1358 map_sz = roundup(map_sz, page_sz); 1359 return map_sz; 1360 } 1361 1362 static char *internal_map_name(struct bpf_object *obj, 1363 enum libbpf_map_type type) 1364 { 1365 char map_name[BPF_OBJ_NAME_LEN], *p; 1366 const char *sfx = libbpf_type_to_btf_name[type]; 1367 int sfx_len = max((size_t)7, strlen(sfx)); 1368 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, 1369 strlen(obj->name)); 1370 1371 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, 1372 sfx_len, libbpf_type_to_btf_name[type]); 1373 1374 /* sanitise map name to characters allowed by kernel */ 1375 for (p = map_name; *p && p < map_name + sizeof(map_name); p++) 1376 if (!isalnum(*p) && *p != '_' && *p != '.') 1377 *p = '_'; 1378 1379 return strdup(map_name); 1380 } 1381 1382 static int 1383 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, 1384 int sec_idx, void *data, size_t data_sz) 1385 { 1386 struct bpf_map_def *def; 1387 struct bpf_map *map; 1388 int err; 1389 1390 map = bpf_object__add_map(obj); 1391 if (IS_ERR(map)) 1392 return PTR_ERR(map); 1393 1394 map->libbpf_type = type; 1395 map->sec_idx = sec_idx; 1396 map->sec_offset = 0; 1397 map->name = internal_map_name(obj, type); 1398 if (!map->name) { 1399 pr_warn("failed to alloc map name\n"); 1400 return -ENOMEM; 1401 } 1402 1403 def = &map->def; 1404 def->type = BPF_MAP_TYPE_ARRAY; 1405 def->key_size = sizeof(int); 1406 def->value_size = data_sz; 1407 def->max_entries = 1; 1408 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG 1409 ? BPF_F_RDONLY_PROG : 0; 1410 def->map_flags |= BPF_F_MMAPABLE; 1411 1412 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", 1413 map->name, map->sec_idx, map->sec_offset, def->map_flags); 1414 1415 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, 1416 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 1417 if (map->mmaped == MAP_FAILED) { 1418 err = -errno; 1419 map->mmaped = NULL; 1420 pr_warn("failed to alloc map '%s' content buffer: %d\n", 1421 map->name, err); 1422 zfree(&map->name); 1423 return err; 1424 } 1425 1426 if (data) 1427 memcpy(map->mmaped, data, data_sz); 1428 1429 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); 1430 return 0; 1431 } 1432 1433 static int bpf_object__init_global_data_maps(struct bpf_object *obj) 1434 { 1435 int err; 1436 1437 /* 1438 * Populate obj->maps with libbpf internal maps. 1439 */ 1440 if (obj->efile.data_shndx >= 0) { 1441 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, 1442 obj->efile.data_shndx, 1443 obj->efile.data->d_buf, 1444 obj->efile.data->d_size); 1445 if (err) 1446 return err; 1447 } 1448 if (obj->efile.rodata_shndx >= 0) { 1449 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, 1450 obj->efile.rodata_shndx, 1451 obj->efile.rodata->d_buf, 1452 obj->efile.rodata->d_size); 1453 if (err) 1454 return err; 1455 1456 obj->rodata_map_idx = obj->nr_maps - 1; 1457 } 1458 if (obj->efile.bss_shndx >= 0) { 1459 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, 1460 obj->efile.bss_shndx, 1461 NULL, 1462 obj->efile.bss->d_size); 1463 if (err) 1464 return err; 1465 } 1466 return 0; 1467 } 1468 1469 1470 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj, 1471 const void *name) 1472 { 1473 int i; 1474 1475 for (i = 0; i < obj->nr_extern; i++) { 1476 if (strcmp(obj->externs[i].name, name) == 0) 1477 return &obj->externs[i]; 1478 } 1479 return NULL; 1480 } 1481 1482 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val, 1483 char value) 1484 { 1485 switch (ext->kcfg.type) { 1486 case KCFG_BOOL: 1487 if (value == 'm') { 1488 pr_warn("extern (kcfg) %s=%c should be tristate or char\n", 1489 ext->name, value); 1490 return -EINVAL; 1491 } 1492 *(bool *)ext_val = value == 'y' ? true : false; 1493 break; 1494 case KCFG_TRISTATE: 1495 if (value == 'y') 1496 *(enum libbpf_tristate *)ext_val = TRI_YES; 1497 else if (value == 'm') 1498 *(enum libbpf_tristate *)ext_val = TRI_MODULE; 1499 else /* value == 'n' */ 1500 *(enum libbpf_tristate *)ext_val = TRI_NO; 1501 break; 1502 case KCFG_CHAR: 1503 *(char *)ext_val = value; 1504 break; 1505 case KCFG_UNKNOWN: 1506 case KCFG_INT: 1507 case KCFG_CHAR_ARR: 1508 default: 1509 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n", 1510 ext->name, value); 1511 return -EINVAL; 1512 } 1513 ext->is_set = true; 1514 return 0; 1515 } 1516 1517 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, 1518 const char *value) 1519 { 1520 size_t len; 1521 1522 if (ext->kcfg.type != KCFG_CHAR_ARR) { 1523 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value); 1524 return -EINVAL; 1525 } 1526 1527 len = strlen(value); 1528 if (value[len - 1] != '"') { 1529 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", 1530 ext->name, value); 1531 return -EINVAL; 1532 } 1533 1534 /* strip quotes */ 1535 len -= 2; 1536 if (len >= ext->kcfg.sz) { 1537 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n", 1538 ext->name, value, len, ext->kcfg.sz - 1); 1539 len = ext->kcfg.sz - 1; 1540 } 1541 memcpy(ext_val, value + 1, len); 1542 ext_val[len] = '\0'; 1543 ext->is_set = true; 1544 return 0; 1545 } 1546 1547 static int parse_u64(const char *value, __u64 *res) 1548 { 1549 char *value_end; 1550 int err; 1551 1552 errno = 0; 1553 *res = strtoull(value, &value_end, 0); 1554 if (errno) { 1555 err = -errno; 1556 pr_warn("failed to parse '%s' as integer: %d\n", value, err); 1557 return err; 1558 } 1559 if (*value_end) { 1560 pr_warn("failed to parse '%s' as integer completely\n", value); 1561 return -EINVAL; 1562 } 1563 return 0; 1564 } 1565 1566 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v) 1567 { 1568 int bit_sz = ext->kcfg.sz * 8; 1569 1570 if (ext->kcfg.sz == 8) 1571 return true; 1572 1573 /* Validate that value stored in u64 fits in integer of `ext->sz` 1574 * bytes size without any loss of information. If the target integer 1575 * is signed, we rely on the following limits of integer type of 1576 * Y bits and subsequent transformation: 1577 * 1578 * -2^(Y-1) <= X <= 2^(Y-1) - 1 1579 * 0 <= X + 2^(Y-1) <= 2^Y - 1 1580 * 0 <= X + 2^(Y-1) < 2^Y 1581 * 1582 * For unsigned target integer, check that all the (64 - Y) bits are 1583 * zero. 1584 */ 1585 if (ext->kcfg.is_signed) 1586 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); 1587 else 1588 return (v >> bit_sz) == 0; 1589 } 1590 1591 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, 1592 __u64 value) 1593 { 1594 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { 1595 pr_warn("extern (kcfg) %s=%llu should be integer\n", 1596 ext->name, (unsigned long long)value); 1597 return -EINVAL; 1598 } 1599 if (!is_kcfg_value_in_range(ext, value)) { 1600 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n", 1601 ext->name, (unsigned long long)value, ext->kcfg.sz); 1602 return -ERANGE; 1603 } 1604 switch (ext->kcfg.sz) { 1605 case 1: *(__u8 *)ext_val = value; break; 1606 case 2: *(__u16 *)ext_val = value; break; 1607 case 4: *(__u32 *)ext_val = value; break; 1608 case 8: *(__u64 *)ext_val = value; break; 1609 default: 1610 return -EINVAL; 1611 } 1612 ext->is_set = true; 1613 return 0; 1614 } 1615 1616 static int bpf_object__process_kconfig_line(struct bpf_object *obj, 1617 char *buf, void *data) 1618 { 1619 struct extern_desc *ext; 1620 char *sep, *value; 1621 int len, err = 0; 1622 void *ext_val; 1623 __u64 num; 1624 1625 if (strncmp(buf, "CONFIG_", 7)) 1626 return 0; 1627 1628 sep = strchr(buf, '='); 1629 if (!sep) { 1630 pr_warn("failed to parse '%s': no separator\n", buf); 1631 return -EINVAL; 1632 } 1633 1634 /* Trim ending '\n' */ 1635 len = strlen(buf); 1636 if (buf[len - 1] == '\n') 1637 buf[len - 1] = '\0'; 1638 /* Split on '=' and ensure that a value is present. */ 1639 *sep = '\0'; 1640 if (!sep[1]) { 1641 *sep = '='; 1642 pr_warn("failed to parse '%s': no value\n", buf); 1643 return -EINVAL; 1644 } 1645 1646 ext = find_extern_by_name(obj, buf); 1647 if (!ext || ext->is_set) 1648 return 0; 1649 1650 ext_val = data + ext->kcfg.data_off; 1651 value = sep + 1; 1652 1653 switch (*value) { 1654 case 'y': case 'n': case 'm': 1655 err = set_kcfg_value_tri(ext, ext_val, *value); 1656 break; 1657 case '"': 1658 err = set_kcfg_value_str(ext, ext_val, value); 1659 break; 1660 default: 1661 /* assume integer */ 1662 err = parse_u64(value, &num); 1663 if (err) { 1664 pr_warn("extern (kcfg) %s=%s should be integer\n", 1665 ext->name, value); 1666 return err; 1667 } 1668 err = set_kcfg_value_num(ext, ext_val, num); 1669 break; 1670 } 1671 if (err) 1672 return err; 1673 pr_debug("extern (kcfg) %s=%s\n", ext->name, value); 1674 return 0; 1675 } 1676 1677 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data) 1678 { 1679 char buf[PATH_MAX]; 1680 struct utsname uts; 1681 int len, err = 0; 1682 gzFile file; 1683 1684 uname(&uts); 1685 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); 1686 if (len < 0) 1687 return -EINVAL; 1688 else if (len >= PATH_MAX) 1689 return -ENAMETOOLONG; 1690 1691 /* gzopen also accepts uncompressed files. */ 1692 file = gzopen(buf, "r"); 1693 if (!file) 1694 file = gzopen("/proc/config.gz", "r"); 1695 1696 if (!file) { 1697 pr_warn("failed to open system Kconfig\n"); 1698 return -ENOENT; 1699 } 1700 1701 while (gzgets(file, buf, sizeof(buf))) { 1702 err = bpf_object__process_kconfig_line(obj, buf, data); 1703 if (err) { 1704 pr_warn("error parsing system Kconfig line '%s': %d\n", 1705 buf, err); 1706 goto out; 1707 } 1708 } 1709 1710 out: 1711 gzclose(file); 1712 return err; 1713 } 1714 1715 static int bpf_object__read_kconfig_mem(struct bpf_object *obj, 1716 const char *config, void *data) 1717 { 1718 char buf[PATH_MAX]; 1719 int err = 0; 1720 FILE *file; 1721 1722 file = fmemopen((void *)config, strlen(config), "r"); 1723 if (!file) { 1724 err = -errno; 1725 pr_warn("failed to open in-memory Kconfig: %d\n", err); 1726 return err; 1727 } 1728 1729 while (fgets(buf, sizeof(buf), file)) { 1730 err = bpf_object__process_kconfig_line(obj, buf, data); 1731 if (err) { 1732 pr_warn("error parsing in-memory Kconfig line '%s': %d\n", 1733 buf, err); 1734 break; 1735 } 1736 } 1737 1738 fclose(file); 1739 return err; 1740 } 1741 1742 static int bpf_object__init_kconfig_map(struct bpf_object *obj) 1743 { 1744 struct extern_desc *last_ext = NULL, *ext; 1745 size_t map_sz; 1746 int i, err; 1747 1748 for (i = 0; i < obj->nr_extern; i++) { 1749 ext = &obj->externs[i]; 1750 if (ext->type == EXT_KCFG) 1751 last_ext = ext; 1752 } 1753 1754 if (!last_ext) 1755 return 0; 1756 1757 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; 1758 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, 1759 obj->efile.symbols_shndx, 1760 NULL, map_sz); 1761 if (err) 1762 return err; 1763 1764 obj->kconfig_map_idx = obj->nr_maps - 1; 1765 1766 return 0; 1767 } 1768 1769 static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) 1770 { 1771 Elf_Data *symbols = obj->efile.symbols; 1772 int i, map_def_sz = 0, nr_maps = 0, nr_syms; 1773 Elf_Data *data = NULL; 1774 Elf_Scn *scn; 1775 1776 if (obj->efile.maps_shndx < 0) 1777 return 0; 1778 1779 if (!symbols) 1780 return -EINVAL; 1781 1782 1783 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx); 1784 data = elf_sec_data(obj, scn); 1785 if (!scn || !data) { 1786 pr_warn("elf: failed to get legacy map definitions for %s\n", 1787 obj->path); 1788 return -EINVAL; 1789 } 1790 1791 /* 1792 * Count number of maps. Each map has a name. 1793 * Array of maps is not supported: only the first element is 1794 * considered. 1795 * 1796 * TODO: Detect array of map and report error. 1797 */ 1798 nr_syms = symbols->d_size / sizeof(GElf_Sym); 1799 for (i = 0; i < nr_syms; i++) { 1800 GElf_Sym sym; 1801 1802 if (!gelf_getsym(symbols, i, &sym)) 1803 continue; 1804 if (sym.st_shndx != obj->efile.maps_shndx) 1805 continue; 1806 nr_maps++; 1807 } 1808 /* Assume equally sized map definitions */ 1809 pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n", 1810 nr_maps, data->d_size, obj->path); 1811 1812 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) { 1813 pr_warn("elf: unable to determine legacy map definition size in %s\n", 1814 obj->path); 1815 return -EINVAL; 1816 } 1817 map_def_sz = data->d_size / nr_maps; 1818 1819 /* Fill obj->maps using data in "maps" section. */ 1820 for (i = 0; i < nr_syms; i++) { 1821 GElf_Sym sym; 1822 const char *map_name; 1823 struct bpf_map_def *def; 1824 struct bpf_map *map; 1825 1826 if (!gelf_getsym(symbols, i, &sym)) 1827 continue; 1828 if (sym.st_shndx != obj->efile.maps_shndx) 1829 continue; 1830 1831 map = bpf_object__add_map(obj); 1832 if (IS_ERR(map)) 1833 return PTR_ERR(map); 1834 1835 map_name = elf_sym_str(obj, sym.st_name); 1836 if (!map_name) { 1837 pr_warn("failed to get map #%d name sym string for obj %s\n", 1838 i, obj->path); 1839 return -LIBBPF_ERRNO__FORMAT; 1840 } 1841 1842 map->libbpf_type = LIBBPF_MAP_UNSPEC; 1843 map->sec_idx = sym.st_shndx; 1844 map->sec_offset = sym.st_value; 1845 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n", 1846 map_name, map->sec_idx, map->sec_offset); 1847 if (sym.st_value + map_def_sz > data->d_size) { 1848 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n", 1849 obj->path, map_name); 1850 return -EINVAL; 1851 } 1852 1853 map->name = strdup(map_name); 1854 if (!map->name) { 1855 pr_warn("failed to alloc map name\n"); 1856 return -ENOMEM; 1857 } 1858 pr_debug("map %d is \"%s\"\n", i, map->name); 1859 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); 1860 /* 1861 * If the definition of the map in the object file fits in 1862 * bpf_map_def, copy it. Any extra fields in our version 1863 * of bpf_map_def will default to zero as a result of the 1864 * calloc above. 1865 */ 1866 if (map_def_sz <= sizeof(struct bpf_map_def)) { 1867 memcpy(&map->def, def, map_def_sz); 1868 } else { 1869 /* 1870 * Here the map structure being read is bigger than what 1871 * we expect, truncate if the excess bits are all zero. 1872 * If they are not zero, reject this map as 1873 * incompatible. 1874 */ 1875 char *b; 1876 1877 for (b = ((char *)def) + sizeof(struct bpf_map_def); 1878 b < ((char *)def) + map_def_sz; b++) { 1879 if (*b != 0) { 1880 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n", 1881 obj->path, map_name); 1882 if (strict) 1883 return -EINVAL; 1884 } 1885 } 1886 memcpy(&map->def, def, sizeof(struct bpf_map_def)); 1887 } 1888 } 1889 return 0; 1890 } 1891 1892 static const struct btf_type * 1893 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id) 1894 { 1895 const struct btf_type *t = btf__type_by_id(btf, id); 1896 1897 if (res_id) 1898 *res_id = id; 1899 1900 while (btf_is_mod(t) || btf_is_typedef(t)) { 1901 if (res_id) 1902 *res_id = t->type; 1903 t = btf__type_by_id(btf, t->type); 1904 } 1905 1906 return t; 1907 } 1908 1909 static const struct btf_type * 1910 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id) 1911 { 1912 const struct btf_type *t; 1913 1914 t = skip_mods_and_typedefs(btf, id, NULL); 1915 if (!btf_is_ptr(t)) 1916 return NULL; 1917 1918 t = skip_mods_and_typedefs(btf, t->type, res_id); 1919 1920 return btf_is_func_proto(t) ? t : NULL; 1921 } 1922 1923 static const char *btf_kind_str(const struct btf_type *t) 1924 { 1925 switch (btf_kind(t)) { 1926 case BTF_KIND_UNKN: return "void"; 1927 case BTF_KIND_INT: return "int"; 1928 case BTF_KIND_PTR: return "ptr"; 1929 case BTF_KIND_ARRAY: return "array"; 1930 case BTF_KIND_STRUCT: return "struct"; 1931 case BTF_KIND_UNION: return "union"; 1932 case BTF_KIND_ENUM: return "enum"; 1933 case BTF_KIND_FWD: return "fwd"; 1934 case BTF_KIND_TYPEDEF: return "typedef"; 1935 case BTF_KIND_VOLATILE: return "volatile"; 1936 case BTF_KIND_CONST: return "const"; 1937 case BTF_KIND_RESTRICT: return "restrict"; 1938 case BTF_KIND_FUNC: return "func"; 1939 case BTF_KIND_FUNC_PROTO: return "func_proto"; 1940 case BTF_KIND_VAR: return "var"; 1941 case BTF_KIND_DATASEC: return "datasec"; 1942 case BTF_KIND_FLOAT: return "float"; 1943 default: return "unknown"; 1944 } 1945 } 1946 1947 /* 1948 * Fetch integer attribute of BTF map definition. Such attributes are 1949 * represented using a pointer to an array, in which dimensionality of array 1950 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY]; 1951 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF 1952 * type definition, while using only sizeof(void *) space in ELF data section. 1953 */ 1954 static bool get_map_field_int(const char *map_name, const struct btf *btf, 1955 const struct btf_member *m, __u32 *res) 1956 { 1957 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); 1958 const char *name = btf__name_by_offset(btf, m->name_off); 1959 const struct btf_array *arr_info; 1960 const struct btf_type *arr_t; 1961 1962 if (!btf_is_ptr(t)) { 1963 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", 1964 map_name, name, btf_kind_str(t)); 1965 return false; 1966 } 1967 1968 arr_t = btf__type_by_id(btf, t->type); 1969 if (!arr_t) { 1970 pr_warn("map '%s': attr '%s': type [%u] not found.\n", 1971 map_name, name, t->type); 1972 return false; 1973 } 1974 if (!btf_is_array(arr_t)) { 1975 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", 1976 map_name, name, btf_kind_str(arr_t)); 1977 return false; 1978 } 1979 arr_info = btf_array(arr_t); 1980 *res = arr_info->nelems; 1981 return true; 1982 } 1983 1984 static int build_map_pin_path(struct bpf_map *map, const char *path) 1985 { 1986 char buf[PATH_MAX]; 1987 int len; 1988 1989 if (!path) 1990 path = "/sys/fs/bpf"; 1991 1992 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map)); 1993 if (len < 0) 1994 return -EINVAL; 1995 else if (len >= PATH_MAX) 1996 return -ENAMETOOLONG; 1997 1998 return bpf_map__set_pin_path(map, buf); 1999 } 2000 2001 2002 static int parse_btf_map_def(struct bpf_object *obj, 2003 struct bpf_map *map, 2004 const struct btf_type *def, 2005 bool strict, bool is_inner, 2006 const char *pin_root_path) 2007 { 2008 const struct btf_type *t; 2009 const struct btf_member *m; 2010 int vlen, i; 2011 2012 vlen = btf_vlen(def); 2013 m = btf_members(def); 2014 for (i = 0; i < vlen; i++, m++) { 2015 const char *name = btf__name_by_offset(obj->btf, m->name_off); 2016 2017 if (!name) { 2018 pr_warn("map '%s': invalid field #%d.\n", map->name, i); 2019 return -EINVAL; 2020 } 2021 if (strcmp(name, "type") == 0) { 2022 if (!get_map_field_int(map->name, obj->btf, m, 2023 &map->def.type)) 2024 return -EINVAL; 2025 pr_debug("map '%s': found type = %u.\n", 2026 map->name, map->def.type); 2027 } else if (strcmp(name, "max_entries") == 0) { 2028 if (!get_map_field_int(map->name, obj->btf, m, 2029 &map->def.max_entries)) 2030 return -EINVAL; 2031 pr_debug("map '%s': found max_entries = %u.\n", 2032 map->name, map->def.max_entries); 2033 } else if (strcmp(name, "map_flags") == 0) { 2034 if (!get_map_field_int(map->name, obj->btf, m, 2035 &map->def.map_flags)) 2036 return -EINVAL; 2037 pr_debug("map '%s': found map_flags = %u.\n", 2038 map->name, map->def.map_flags); 2039 } else if (strcmp(name, "numa_node") == 0) { 2040 if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node)) 2041 return -EINVAL; 2042 pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node); 2043 } else if (strcmp(name, "key_size") == 0) { 2044 __u32 sz; 2045 2046 if (!get_map_field_int(map->name, obj->btf, m, &sz)) 2047 return -EINVAL; 2048 pr_debug("map '%s': found key_size = %u.\n", 2049 map->name, sz); 2050 if (map->def.key_size && map->def.key_size != sz) { 2051 pr_warn("map '%s': conflicting key size %u != %u.\n", 2052 map->name, map->def.key_size, sz); 2053 return -EINVAL; 2054 } 2055 map->def.key_size = sz; 2056 } else if (strcmp(name, "key") == 0) { 2057 __s64 sz; 2058 2059 t = btf__type_by_id(obj->btf, m->type); 2060 if (!t) { 2061 pr_warn("map '%s': key type [%d] not found.\n", 2062 map->name, m->type); 2063 return -EINVAL; 2064 } 2065 if (!btf_is_ptr(t)) { 2066 pr_warn("map '%s': key spec is not PTR: %s.\n", 2067 map->name, btf_kind_str(t)); 2068 return -EINVAL; 2069 } 2070 sz = btf__resolve_size(obj->btf, t->type); 2071 if (sz < 0) { 2072 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n", 2073 map->name, t->type, (ssize_t)sz); 2074 return sz; 2075 } 2076 pr_debug("map '%s': found key [%u], sz = %zd.\n", 2077 map->name, t->type, (ssize_t)sz); 2078 if (map->def.key_size && map->def.key_size != sz) { 2079 pr_warn("map '%s': conflicting key size %u != %zd.\n", 2080 map->name, map->def.key_size, (ssize_t)sz); 2081 return -EINVAL; 2082 } 2083 map->def.key_size = sz; 2084 map->btf_key_type_id = t->type; 2085 } else if (strcmp(name, "value_size") == 0) { 2086 __u32 sz; 2087 2088 if (!get_map_field_int(map->name, obj->btf, m, &sz)) 2089 return -EINVAL; 2090 pr_debug("map '%s': found value_size = %u.\n", 2091 map->name, sz); 2092 if (map->def.value_size && map->def.value_size != sz) { 2093 pr_warn("map '%s': conflicting value size %u != %u.\n", 2094 map->name, map->def.value_size, sz); 2095 return -EINVAL; 2096 } 2097 map->def.value_size = sz; 2098 } else if (strcmp(name, "value") == 0) { 2099 __s64 sz; 2100 2101 t = btf__type_by_id(obj->btf, m->type); 2102 if (!t) { 2103 pr_warn("map '%s': value type [%d] not found.\n", 2104 map->name, m->type); 2105 return -EINVAL; 2106 } 2107 if (!btf_is_ptr(t)) { 2108 pr_warn("map '%s': value spec is not PTR: %s.\n", 2109 map->name, btf_kind_str(t)); 2110 return -EINVAL; 2111 } 2112 sz = btf__resolve_size(obj->btf, t->type); 2113 if (sz < 0) { 2114 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n", 2115 map->name, t->type, (ssize_t)sz); 2116 return sz; 2117 } 2118 pr_debug("map '%s': found value [%u], sz = %zd.\n", 2119 map->name, t->type, (ssize_t)sz); 2120 if (map->def.value_size && map->def.value_size != sz) { 2121 pr_warn("map '%s': conflicting value size %u != %zd.\n", 2122 map->name, map->def.value_size, (ssize_t)sz); 2123 return -EINVAL; 2124 } 2125 map->def.value_size = sz; 2126 map->btf_value_type_id = t->type; 2127 } 2128 else if (strcmp(name, "values") == 0) { 2129 int err; 2130 2131 if (is_inner) { 2132 pr_warn("map '%s': multi-level inner maps not supported.\n", 2133 map->name); 2134 return -ENOTSUP; 2135 } 2136 if (i != vlen - 1) { 2137 pr_warn("map '%s': '%s' member should be last.\n", 2138 map->name, name); 2139 return -EINVAL; 2140 } 2141 if (!bpf_map_type__is_map_in_map(map->def.type)) { 2142 pr_warn("map '%s': should be map-in-map.\n", 2143 map->name); 2144 return -ENOTSUP; 2145 } 2146 if (map->def.value_size && map->def.value_size != 4) { 2147 pr_warn("map '%s': conflicting value size %u != 4.\n", 2148 map->name, map->def.value_size); 2149 return -EINVAL; 2150 } 2151 map->def.value_size = 4; 2152 t = btf__type_by_id(obj->btf, m->type); 2153 if (!t) { 2154 pr_warn("map '%s': map-in-map inner type [%d] not found.\n", 2155 map->name, m->type); 2156 return -EINVAL; 2157 } 2158 if (!btf_is_array(t) || btf_array(t)->nelems) { 2159 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n", 2160 map->name); 2161 return -EINVAL; 2162 } 2163 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type, 2164 NULL); 2165 if (!btf_is_ptr(t)) { 2166 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", 2167 map->name, btf_kind_str(t)); 2168 return -EINVAL; 2169 } 2170 t = skip_mods_and_typedefs(obj->btf, t->type, NULL); 2171 if (!btf_is_struct(t)) { 2172 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", 2173 map->name, btf_kind_str(t)); 2174 return -EINVAL; 2175 } 2176 2177 map->inner_map = calloc(1, sizeof(*map->inner_map)); 2178 if (!map->inner_map) 2179 return -ENOMEM; 2180 map->inner_map->sec_idx = obj->efile.btf_maps_shndx; 2181 map->inner_map->name = malloc(strlen(map->name) + 2182 sizeof(".inner") + 1); 2183 if (!map->inner_map->name) 2184 return -ENOMEM; 2185 sprintf(map->inner_map->name, "%s.inner", map->name); 2186 2187 err = parse_btf_map_def(obj, map->inner_map, t, strict, 2188 true /* is_inner */, NULL); 2189 if (err) 2190 return err; 2191 } else if (strcmp(name, "pinning") == 0) { 2192 __u32 val; 2193 int err; 2194 2195 if (is_inner) { 2196 pr_debug("map '%s': inner def can't be pinned.\n", 2197 map->name); 2198 return -EINVAL; 2199 } 2200 if (!get_map_field_int(map->name, obj->btf, m, &val)) 2201 return -EINVAL; 2202 pr_debug("map '%s': found pinning = %u.\n", 2203 map->name, val); 2204 2205 if (val != LIBBPF_PIN_NONE && 2206 val != LIBBPF_PIN_BY_NAME) { 2207 pr_warn("map '%s': invalid pinning value %u.\n", 2208 map->name, val); 2209 return -EINVAL; 2210 } 2211 if (val == LIBBPF_PIN_BY_NAME) { 2212 err = build_map_pin_path(map, pin_root_path); 2213 if (err) { 2214 pr_warn("map '%s': couldn't build pin path.\n", 2215 map->name); 2216 return err; 2217 } 2218 } 2219 } else { 2220 if (strict) { 2221 pr_warn("map '%s': unknown field '%s'.\n", 2222 map->name, name); 2223 return -ENOTSUP; 2224 } 2225 pr_debug("map '%s': ignoring unknown field '%s'.\n", 2226 map->name, name); 2227 } 2228 } 2229 2230 if (map->def.type == BPF_MAP_TYPE_UNSPEC) { 2231 pr_warn("map '%s': map type isn't specified.\n", map->name); 2232 return -EINVAL; 2233 } 2234 2235 return 0; 2236 } 2237 2238 static int bpf_object__init_user_btf_map(struct bpf_object *obj, 2239 const struct btf_type *sec, 2240 int var_idx, int sec_idx, 2241 const Elf_Data *data, bool strict, 2242 const char *pin_root_path) 2243 { 2244 const struct btf_type *var, *def; 2245 const struct btf_var_secinfo *vi; 2246 const struct btf_var *var_extra; 2247 const char *map_name; 2248 struct bpf_map *map; 2249 2250 vi = btf_var_secinfos(sec) + var_idx; 2251 var = btf__type_by_id(obj->btf, vi->type); 2252 var_extra = btf_var(var); 2253 map_name = btf__name_by_offset(obj->btf, var->name_off); 2254 2255 if (map_name == NULL || map_name[0] == '\0') { 2256 pr_warn("map #%d: empty name.\n", var_idx); 2257 return -EINVAL; 2258 } 2259 if ((__u64)vi->offset + vi->size > data->d_size) { 2260 pr_warn("map '%s' BTF data is corrupted.\n", map_name); 2261 return -EINVAL; 2262 } 2263 if (!btf_is_var(var)) { 2264 pr_warn("map '%s': unexpected var kind %s.\n", 2265 map_name, btf_kind_str(var)); 2266 return -EINVAL; 2267 } 2268 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED && 2269 var_extra->linkage != BTF_VAR_STATIC) { 2270 pr_warn("map '%s': unsupported var linkage %u.\n", 2271 map_name, var_extra->linkage); 2272 return -EOPNOTSUPP; 2273 } 2274 2275 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); 2276 if (!btf_is_struct(def)) { 2277 pr_warn("map '%s': unexpected def kind %s.\n", 2278 map_name, btf_kind_str(var)); 2279 return -EINVAL; 2280 } 2281 if (def->size > vi->size) { 2282 pr_warn("map '%s': invalid def size.\n", map_name); 2283 return -EINVAL; 2284 } 2285 2286 map = bpf_object__add_map(obj); 2287 if (IS_ERR(map)) 2288 return PTR_ERR(map); 2289 map->name = strdup(map_name); 2290 if (!map->name) { 2291 pr_warn("map '%s': failed to alloc map name.\n", map_name); 2292 return -ENOMEM; 2293 } 2294 map->libbpf_type = LIBBPF_MAP_UNSPEC; 2295 map->def.type = BPF_MAP_TYPE_UNSPEC; 2296 map->sec_idx = sec_idx; 2297 map->sec_offset = vi->offset; 2298 map->btf_var_idx = var_idx; 2299 pr_debug("map '%s': at sec_idx %d, offset %zu.\n", 2300 map_name, map->sec_idx, map->sec_offset); 2301 2302 return parse_btf_map_def(obj, map, def, strict, false, pin_root_path); 2303 } 2304 2305 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, 2306 const char *pin_root_path) 2307 { 2308 const struct btf_type *sec = NULL; 2309 int nr_types, i, vlen, err; 2310 const struct btf_type *t; 2311 const char *name; 2312 Elf_Data *data; 2313 Elf_Scn *scn; 2314 2315 if (obj->efile.btf_maps_shndx < 0) 2316 return 0; 2317 2318 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); 2319 data = elf_sec_data(obj, scn); 2320 if (!scn || !data) { 2321 pr_warn("elf: failed to get %s map definitions for %s\n", 2322 MAPS_ELF_SEC, obj->path); 2323 return -EINVAL; 2324 } 2325 2326 nr_types = btf__get_nr_types(obj->btf); 2327 for (i = 1; i <= nr_types; i++) { 2328 t = btf__type_by_id(obj->btf, i); 2329 if (!btf_is_datasec(t)) 2330 continue; 2331 name = btf__name_by_offset(obj->btf, t->name_off); 2332 if (strcmp(name, MAPS_ELF_SEC) == 0) { 2333 sec = t; 2334 obj->efile.btf_maps_sec_btf_id = i; 2335 break; 2336 } 2337 } 2338 2339 if (!sec) { 2340 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC); 2341 return -ENOENT; 2342 } 2343 2344 vlen = btf_vlen(sec); 2345 for (i = 0; i < vlen; i++) { 2346 err = bpf_object__init_user_btf_map(obj, sec, i, 2347 obj->efile.btf_maps_shndx, 2348 data, strict, 2349 pin_root_path); 2350 if (err) 2351 return err; 2352 } 2353 2354 return 0; 2355 } 2356 2357 static int bpf_object__init_maps(struct bpf_object *obj, 2358 const struct bpf_object_open_opts *opts) 2359 { 2360 const char *pin_root_path; 2361 bool strict; 2362 int err; 2363 2364 strict = !OPTS_GET(opts, relaxed_maps, false); 2365 pin_root_path = OPTS_GET(opts, pin_root_path, NULL); 2366 2367 err = bpf_object__init_user_maps(obj, strict); 2368 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path); 2369 err = err ?: bpf_object__init_global_data_maps(obj); 2370 err = err ?: bpf_object__init_kconfig_map(obj); 2371 err = err ?: bpf_object__init_struct_ops_maps(obj); 2372 if (err) 2373 return err; 2374 2375 return 0; 2376 } 2377 2378 static bool section_have_execinstr(struct bpf_object *obj, int idx) 2379 { 2380 GElf_Shdr sh; 2381 2382 if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh)) 2383 return false; 2384 2385 return sh.sh_flags & SHF_EXECINSTR; 2386 } 2387 2388 static bool btf_needs_sanitization(struct bpf_object *obj) 2389 { 2390 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC); 2391 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC); 2392 bool has_float = kernel_supports(FEAT_BTF_FLOAT); 2393 bool has_func = kernel_supports(FEAT_BTF_FUNC); 2394 2395 return !has_func || !has_datasec || !has_func_global || !has_float; 2396 } 2397 2398 static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) 2399 { 2400 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC); 2401 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC); 2402 bool has_float = kernel_supports(FEAT_BTF_FLOAT); 2403 bool has_func = kernel_supports(FEAT_BTF_FUNC); 2404 struct btf_type *t; 2405 int i, j, vlen; 2406 2407 for (i = 1; i <= btf__get_nr_types(btf); i++) { 2408 t = (struct btf_type *)btf__type_by_id(btf, i); 2409 2410 if (!has_datasec && btf_is_var(t)) { 2411 /* replace VAR with INT */ 2412 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); 2413 /* 2414 * using size = 1 is the safest choice, 4 will be too 2415 * big and cause kernel BTF validation failure if 2416 * original variable took less than 4 bytes 2417 */ 2418 t->size = 1; 2419 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8); 2420 } else if (!has_datasec && btf_is_datasec(t)) { 2421 /* replace DATASEC with STRUCT */ 2422 const struct btf_var_secinfo *v = btf_var_secinfos(t); 2423 struct btf_member *m = btf_members(t); 2424 struct btf_type *vt; 2425 char *name; 2426 2427 name = (char *)btf__name_by_offset(btf, t->name_off); 2428 while (*name) { 2429 if (*name == '.') 2430 *name = '_'; 2431 name++; 2432 } 2433 2434 vlen = btf_vlen(t); 2435 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); 2436 for (j = 0; j < vlen; j++, v++, m++) { 2437 /* order of field assignments is important */ 2438 m->offset = v->offset * 8; 2439 m->type = v->type; 2440 /* preserve variable name as member name */ 2441 vt = (void *)btf__type_by_id(btf, v->type); 2442 m->name_off = vt->name_off; 2443 } 2444 } else if (!has_func && btf_is_func_proto(t)) { 2445 /* replace FUNC_PROTO with ENUM */ 2446 vlen = btf_vlen(t); 2447 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); 2448 t->size = sizeof(__u32); /* kernel enforced */ 2449 } else if (!has_func && btf_is_func(t)) { 2450 /* replace FUNC with TYPEDEF */ 2451 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); 2452 } else if (!has_func_global && btf_is_func(t)) { 2453 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */ 2454 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); 2455 } else if (!has_float && btf_is_float(t)) { 2456 /* replace FLOAT with an equally-sized empty STRUCT; 2457 * since C compilers do not accept e.g. "float" as a 2458 * valid struct name, make it anonymous 2459 */ 2460 t->name_off = 0; 2461 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0); 2462 } 2463 } 2464 } 2465 2466 static bool libbpf_needs_btf(const struct bpf_object *obj) 2467 { 2468 return obj->efile.btf_maps_shndx >= 0 || 2469 obj->efile.st_ops_shndx >= 0 || 2470 obj->nr_extern > 0; 2471 } 2472 2473 static bool kernel_needs_btf(const struct bpf_object *obj) 2474 { 2475 return obj->efile.st_ops_shndx >= 0; 2476 } 2477 2478 static int bpf_object__init_btf(struct bpf_object *obj, 2479 Elf_Data *btf_data, 2480 Elf_Data *btf_ext_data) 2481 { 2482 int err = -ENOENT; 2483 2484 if (btf_data) { 2485 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); 2486 if (IS_ERR(obj->btf)) { 2487 err = PTR_ERR(obj->btf); 2488 obj->btf = NULL; 2489 pr_warn("Error loading ELF section %s: %d.\n", 2490 BTF_ELF_SEC, err); 2491 goto out; 2492 } 2493 /* enforce 8-byte pointers for BPF-targeted BTFs */ 2494 btf__set_pointer_size(obj->btf, 8); 2495 err = 0; 2496 } 2497 if (btf_ext_data) { 2498 if (!obj->btf) { 2499 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", 2500 BTF_EXT_ELF_SEC, BTF_ELF_SEC); 2501 goto out; 2502 } 2503 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, 2504 btf_ext_data->d_size); 2505 if (IS_ERR(obj->btf_ext)) { 2506 pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n", 2507 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext)); 2508 obj->btf_ext = NULL; 2509 goto out; 2510 } 2511 } 2512 out: 2513 if (err && libbpf_needs_btf(obj)) { 2514 pr_warn("BTF is required, but is missing or corrupted.\n"); 2515 return err; 2516 } 2517 return 0; 2518 } 2519 2520 static int bpf_object__finalize_btf(struct bpf_object *obj) 2521 { 2522 int err; 2523 2524 if (!obj->btf) 2525 return 0; 2526 2527 err = btf__finalize_data(obj, obj->btf); 2528 if (err) { 2529 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); 2530 return err; 2531 } 2532 2533 return 0; 2534 } 2535 2536 static bool prog_needs_vmlinux_btf(struct bpf_program *prog) 2537 { 2538 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || 2539 prog->type == BPF_PROG_TYPE_LSM) 2540 return true; 2541 2542 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs 2543 * also need vmlinux BTF 2544 */ 2545 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) 2546 return true; 2547 2548 return false; 2549 } 2550 2551 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj) 2552 { 2553 struct bpf_program *prog; 2554 int i; 2555 2556 /* CO-RE relocations need kernel BTF */ 2557 if (obj->btf_ext && obj->btf_ext->core_relo_info.len) 2558 return true; 2559 2560 /* Support for typed ksyms needs kernel BTF */ 2561 for (i = 0; i < obj->nr_extern; i++) { 2562 const struct extern_desc *ext; 2563 2564 ext = &obj->externs[i]; 2565 if (ext->type == EXT_KSYM && ext->ksym.type_id) 2566 return true; 2567 } 2568 2569 bpf_object__for_each_program(prog, obj) { 2570 if (!prog->load) 2571 continue; 2572 if (prog_needs_vmlinux_btf(prog)) 2573 return true; 2574 } 2575 2576 return false; 2577 } 2578 2579 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force) 2580 { 2581 int err; 2582 2583 /* btf_vmlinux could be loaded earlier */ 2584 if (obj->btf_vmlinux) 2585 return 0; 2586 2587 if (!force && !obj_needs_vmlinux_btf(obj)) 2588 return 0; 2589 2590 obj->btf_vmlinux = libbpf_find_kernel_btf(); 2591 if (IS_ERR(obj->btf_vmlinux)) { 2592 err = PTR_ERR(obj->btf_vmlinux); 2593 pr_warn("Error loading vmlinux BTF: %d\n", err); 2594 obj->btf_vmlinux = NULL; 2595 return err; 2596 } 2597 return 0; 2598 } 2599 2600 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) 2601 { 2602 struct btf *kern_btf = obj->btf; 2603 bool btf_mandatory, sanitize; 2604 int err = 0; 2605 2606 if (!obj->btf) 2607 return 0; 2608 2609 if (!kernel_supports(FEAT_BTF)) { 2610 if (kernel_needs_btf(obj)) { 2611 err = -EOPNOTSUPP; 2612 goto report; 2613 } 2614 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n"); 2615 return 0; 2616 } 2617 2618 sanitize = btf_needs_sanitization(obj); 2619 if (sanitize) { 2620 const void *raw_data; 2621 __u32 sz; 2622 2623 /* clone BTF to sanitize a copy and leave the original intact */ 2624 raw_data = btf__get_raw_data(obj->btf, &sz); 2625 kern_btf = btf__new(raw_data, sz); 2626 if (IS_ERR(kern_btf)) 2627 return PTR_ERR(kern_btf); 2628 2629 /* enforce 8-byte pointers for BPF-targeted BTFs */ 2630 btf__set_pointer_size(obj->btf, 8); 2631 bpf_object__sanitize_btf(obj, kern_btf); 2632 } 2633 2634 err = btf__load(kern_btf); 2635 if (sanitize) { 2636 if (!err) { 2637 /* move fd to libbpf's BTF */ 2638 btf__set_fd(obj->btf, btf__fd(kern_btf)); 2639 btf__set_fd(kern_btf, -1); 2640 } 2641 btf__free(kern_btf); 2642 } 2643 report: 2644 if (err) { 2645 btf_mandatory = kernel_needs_btf(obj); 2646 pr_warn("Error loading .BTF into kernel: %d. %s\n", err, 2647 btf_mandatory ? "BTF is mandatory, can't proceed." 2648 : "BTF is optional, ignoring."); 2649 if (!btf_mandatory) 2650 err = 0; 2651 } 2652 return err; 2653 } 2654 2655 static const char *elf_sym_str(const struct bpf_object *obj, size_t off) 2656 { 2657 const char *name; 2658 2659 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); 2660 if (!name) { 2661 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", 2662 off, obj->path, elf_errmsg(-1)); 2663 return NULL; 2664 } 2665 2666 return name; 2667 } 2668 2669 static const char *elf_sec_str(const struct bpf_object *obj, size_t off) 2670 { 2671 const char *name; 2672 2673 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); 2674 if (!name) { 2675 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", 2676 off, obj->path, elf_errmsg(-1)); 2677 return NULL; 2678 } 2679 2680 return name; 2681 } 2682 2683 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx) 2684 { 2685 Elf_Scn *scn; 2686 2687 scn = elf_getscn(obj->efile.elf, idx); 2688 if (!scn) { 2689 pr_warn("elf: failed to get section(%zu) from %s: %s\n", 2690 idx, obj->path, elf_errmsg(-1)); 2691 return NULL; 2692 } 2693 return scn; 2694 } 2695 2696 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name) 2697 { 2698 Elf_Scn *scn = NULL; 2699 Elf *elf = obj->efile.elf; 2700 const char *sec_name; 2701 2702 while ((scn = elf_nextscn(elf, scn)) != NULL) { 2703 sec_name = elf_sec_name(obj, scn); 2704 if (!sec_name) 2705 return NULL; 2706 2707 if (strcmp(sec_name, name) != 0) 2708 continue; 2709 2710 return scn; 2711 } 2712 return NULL; 2713 } 2714 2715 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr) 2716 { 2717 if (!scn) 2718 return -EINVAL; 2719 2720 if (gelf_getshdr(scn, hdr) != hdr) { 2721 pr_warn("elf: failed to get section(%zu) header from %s: %s\n", 2722 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); 2723 return -EINVAL; 2724 } 2725 2726 return 0; 2727 } 2728 2729 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn) 2730 { 2731 const char *name; 2732 GElf_Shdr sh; 2733 2734 if (!scn) 2735 return NULL; 2736 2737 if (elf_sec_hdr(obj, scn, &sh)) 2738 return NULL; 2739 2740 name = elf_sec_str(obj, sh.sh_name); 2741 if (!name) { 2742 pr_warn("elf: failed to get section(%zu) name from %s: %s\n", 2743 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); 2744 return NULL; 2745 } 2746 2747 return name; 2748 } 2749 2750 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn) 2751 { 2752 Elf_Data *data; 2753 2754 if (!scn) 2755 return NULL; 2756 2757 data = elf_getdata(scn, 0); 2758 if (!data) { 2759 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n", 2760 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>", 2761 obj->path, elf_errmsg(-1)); 2762 return NULL; 2763 } 2764 2765 return data; 2766 } 2767 2768 static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx, 2769 size_t off, __u32 sym_type, GElf_Sym *sym) 2770 { 2771 Elf_Data *symbols = obj->efile.symbols; 2772 size_t n = symbols->d_size / sizeof(GElf_Sym); 2773 int i; 2774 2775 for (i = 0; i < n; i++) { 2776 if (!gelf_getsym(symbols, i, sym)) 2777 continue; 2778 if (sym->st_shndx != sec_idx || sym->st_value != off) 2779 continue; 2780 if (GELF_ST_TYPE(sym->st_info) != sym_type) 2781 continue; 2782 return 0; 2783 } 2784 2785 return -ENOENT; 2786 } 2787 2788 static bool is_sec_name_dwarf(const char *name) 2789 { 2790 /* approximation, but the actual list is too long */ 2791 return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0; 2792 } 2793 2794 static bool ignore_elf_section(GElf_Shdr *hdr, const char *name) 2795 { 2796 /* no special handling of .strtab */ 2797 if (hdr->sh_type == SHT_STRTAB) 2798 return true; 2799 2800 /* ignore .llvm_addrsig section as well */ 2801 if (hdr->sh_type == SHT_LLVM_ADDRSIG) 2802 return true; 2803 2804 /* no subprograms will lead to an empty .text section, ignore it */ 2805 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && 2806 strcmp(name, ".text") == 0) 2807 return true; 2808 2809 /* DWARF sections */ 2810 if (is_sec_name_dwarf(name)) 2811 return true; 2812 2813 if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) { 2814 name += sizeof(".rel") - 1; 2815 /* DWARF section relocations */ 2816 if (is_sec_name_dwarf(name)) 2817 return true; 2818 2819 /* .BTF and .BTF.ext don't need relocations */ 2820 if (strcmp(name, BTF_ELF_SEC) == 0 || 2821 strcmp(name, BTF_EXT_ELF_SEC) == 0) 2822 return true; 2823 } 2824 2825 return false; 2826 } 2827 2828 static int cmp_progs(const void *_a, const void *_b) 2829 { 2830 const struct bpf_program *a = _a; 2831 const struct bpf_program *b = _b; 2832 2833 if (a->sec_idx != b->sec_idx) 2834 return a->sec_idx < b->sec_idx ? -1 : 1; 2835 2836 /* sec_insn_off can't be the same within the section */ 2837 return a->sec_insn_off < b->sec_insn_off ? -1 : 1; 2838 } 2839 2840 static int bpf_object__elf_collect(struct bpf_object *obj) 2841 { 2842 Elf *elf = obj->efile.elf; 2843 Elf_Data *btf_ext_data = NULL; 2844 Elf_Data *btf_data = NULL; 2845 int idx = 0, err = 0; 2846 const char *name; 2847 Elf_Data *data; 2848 Elf_Scn *scn; 2849 GElf_Shdr sh; 2850 2851 /* a bunch of ELF parsing functionality depends on processing symbols, 2852 * so do the first pass and find the symbol table 2853 */ 2854 scn = NULL; 2855 while ((scn = elf_nextscn(elf, scn)) != NULL) { 2856 if (elf_sec_hdr(obj, scn, &sh)) 2857 return -LIBBPF_ERRNO__FORMAT; 2858 2859 if (sh.sh_type == SHT_SYMTAB) { 2860 if (obj->efile.symbols) { 2861 pr_warn("elf: multiple symbol tables in %s\n", obj->path); 2862 return -LIBBPF_ERRNO__FORMAT; 2863 } 2864 2865 data = elf_sec_data(obj, scn); 2866 if (!data) 2867 return -LIBBPF_ERRNO__FORMAT; 2868 2869 obj->efile.symbols = data; 2870 obj->efile.symbols_shndx = elf_ndxscn(scn); 2871 obj->efile.strtabidx = sh.sh_link; 2872 } 2873 } 2874 2875 scn = NULL; 2876 while ((scn = elf_nextscn(elf, scn)) != NULL) { 2877 idx++; 2878 2879 if (elf_sec_hdr(obj, scn, &sh)) 2880 return -LIBBPF_ERRNO__FORMAT; 2881 2882 name = elf_sec_str(obj, sh.sh_name); 2883 if (!name) 2884 return -LIBBPF_ERRNO__FORMAT; 2885 2886 if (ignore_elf_section(&sh, name)) 2887 continue; 2888 2889 data = elf_sec_data(obj, scn); 2890 if (!data) 2891 return -LIBBPF_ERRNO__FORMAT; 2892 2893 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", 2894 idx, name, (unsigned long)data->d_size, 2895 (int)sh.sh_link, (unsigned long)sh.sh_flags, 2896 (int)sh.sh_type); 2897 2898 if (strcmp(name, "license") == 0) { 2899 err = bpf_object__init_license(obj, data->d_buf, data->d_size); 2900 if (err) 2901 return err; 2902 } else if (strcmp(name, "version") == 0) { 2903 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); 2904 if (err) 2905 return err; 2906 } else if (strcmp(name, "maps") == 0) { 2907 obj->efile.maps_shndx = idx; 2908 } else if (strcmp(name, MAPS_ELF_SEC) == 0) { 2909 obj->efile.btf_maps_shndx = idx; 2910 } else if (strcmp(name, BTF_ELF_SEC) == 0) { 2911 btf_data = data; 2912 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { 2913 btf_ext_data = data; 2914 } else if (sh.sh_type == SHT_SYMTAB) { 2915 /* already processed during the first pass above */ 2916 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { 2917 if (sh.sh_flags & SHF_EXECINSTR) { 2918 if (strcmp(name, ".text") == 0) 2919 obj->efile.text_shndx = idx; 2920 err = bpf_object__add_programs(obj, data, name, idx); 2921 if (err) 2922 return err; 2923 } else if (strcmp(name, DATA_SEC) == 0) { 2924 obj->efile.data = data; 2925 obj->efile.data_shndx = idx; 2926 } else if (strcmp(name, RODATA_SEC) == 0) { 2927 obj->efile.rodata = data; 2928 obj->efile.rodata_shndx = idx; 2929 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) { 2930 obj->efile.st_ops_data = data; 2931 obj->efile.st_ops_shndx = idx; 2932 } else { 2933 pr_info("elf: skipping unrecognized data section(%d) %s\n", 2934 idx, name); 2935 } 2936 } else if (sh.sh_type == SHT_REL) { 2937 int nr_sects = obj->efile.nr_reloc_sects; 2938 void *sects = obj->efile.reloc_sects; 2939 int sec = sh.sh_info; /* points to other section */ 2940 2941 /* Only do relo for section with exec instructions */ 2942 if (!section_have_execinstr(obj, sec) && 2943 strcmp(name, ".rel" STRUCT_OPS_SEC) && 2944 strcmp(name, ".rel" MAPS_ELF_SEC)) { 2945 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", 2946 idx, name, sec, 2947 elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>"); 2948 continue; 2949 } 2950 2951 sects = libbpf_reallocarray(sects, nr_sects + 1, 2952 sizeof(*obj->efile.reloc_sects)); 2953 if (!sects) 2954 return -ENOMEM; 2955 2956 obj->efile.reloc_sects = sects; 2957 obj->efile.nr_reloc_sects++; 2958 2959 obj->efile.reloc_sects[nr_sects].shdr = sh; 2960 obj->efile.reloc_sects[nr_sects].data = data; 2961 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) { 2962 obj->efile.bss = data; 2963 obj->efile.bss_shndx = idx; 2964 } else { 2965 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name, 2966 (size_t)sh.sh_size); 2967 } 2968 } 2969 2970 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { 2971 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); 2972 return -LIBBPF_ERRNO__FORMAT; 2973 } 2974 2975 /* sort BPF programs by section name and in-section instruction offset 2976 * for faster search */ 2977 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); 2978 2979 return bpf_object__init_btf(obj, btf_data, btf_ext_data); 2980 } 2981 2982 static bool sym_is_extern(const GElf_Sym *sym) 2983 { 2984 int bind = GELF_ST_BIND(sym->st_info); 2985 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */ 2986 return sym->st_shndx == SHN_UNDEF && 2987 (bind == STB_GLOBAL || bind == STB_WEAK) && 2988 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE; 2989 } 2990 2991 static bool sym_is_subprog(const GElf_Sym *sym, int text_shndx) 2992 { 2993 int bind = GELF_ST_BIND(sym->st_info); 2994 int type = GELF_ST_TYPE(sym->st_info); 2995 2996 /* in .text section */ 2997 if (sym->st_shndx != text_shndx) 2998 return false; 2999 3000 /* local function */ 3001 if (bind == STB_LOCAL && type == STT_SECTION) 3002 return true; 3003 3004 /* global function */ 3005 return bind == STB_GLOBAL && type == STT_FUNC; 3006 } 3007 3008 static int find_extern_btf_id(const struct btf *btf, const char *ext_name) 3009 { 3010 const struct btf_type *t; 3011 const char *var_name; 3012 int i, n; 3013 3014 if (!btf) 3015 return -ESRCH; 3016 3017 n = btf__get_nr_types(btf); 3018 for (i = 1; i <= n; i++) { 3019 t = btf__type_by_id(btf, i); 3020 3021 if (!btf_is_var(t)) 3022 continue; 3023 3024 var_name = btf__name_by_offset(btf, t->name_off); 3025 if (strcmp(var_name, ext_name)) 3026 continue; 3027 3028 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) 3029 return -EINVAL; 3030 3031 return i; 3032 } 3033 3034 return -ENOENT; 3035 } 3036 3037 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { 3038 const struct btf_var_secinfo *vs; 3039 const struct btf_type *t; 3040 int i, j, n; 3041 3042 if (!btf) 3043 return -ESRCH; 3044 3045 n = btf__get_nr_types(btf); 3046 for (i = 1; i <= n; i++) { 3047 t = btf__type_by_id(btf, i); 3048 3049 if (!btf_is_datasec(t)) 3050 continue; 3051 3052 vs = btf_var_secinfos(t); 3053 for (j = 0; j < btf_vlen(t); j++, vs++) { 3054 if (vs->type == ext_btf_id) 3055 return i; 3056 } 3057 } 3058 3059 return -ENOENT; 3060 } 3061 3062 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id, 3063 bool *is_signed) 3064 { 3065 const struct btf_type *t; 3066 const char *name; 3067 3068 t = skip_mods_and_typedefs(btf, id, NULL); 3069 name = btf__name_by_offset(btf, t->name_off); 3070 3071 if (is_signed) 3072 *is_signed = false; 3073 switch (btf_kind(t)) { 3074 case BTF_KIND_INT: { 3075 int enc = btf_int_encoding(t); 3076 3077 if (enc & BTF_INT_BOOL) 3078 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; 3079 if (is_signed) 3080 *is_signed = enc & BTF_INT_SIGNED; 3081 if (t->size == 1) 3082 return KCFG_CHAR; 3083 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) 3084 return KCFG_UNKNOWN; 3085 return KCFG_INT; 3086 } 3087 case BTF_KIND_ENUM: 3088 if (t->size != 4) 3089 return KCFG_UNKNOWN; 3090 if (strcmp(name, "libbpf_tristate")) 3091 return KCFG_UNKNOWN; 3092 return KCFG_TRISTATE; 3093 case BTF_KIND_ARRAY: 3094 if (btf_array(t)->nelems == 0) 3095 return KCFG_UNKNOWN; 3096 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) 3097 return KCFG_UNKNOWN; 3098 return KCFG_CHAR_ARR; 3099 default: 3100 return KCFG_UNKNOWN; 3101 } 3102 } 3103 3104 static int cmp_externs(const void *_a, const void *_b) 3105 { 3106 const struct extern_desc *a = _a; 3107 const struct extern_desc *b = _b; 3108 3109 if (a->type != b->type) 3110 return a->type < b->type ? -1 : 1; 3111 3112 if (a->type == EXT_KCFG) { 3113 /* descending order by alignment requirements */ 3114 if (a->kcfg.align != b->kcfg.align) 3115 return a->kcfg.align > b->kcfg.align ? -1 : 1; 3116 /* ascending order by size, within same alignment class */ 3117 if (a->kcfg.sz != b->kcfg.sz) 3118 return a->kcfg.sz < b->kcfg.sz ? -1 : 1; 3119 } 3120 3121 /* resolve ties by name */ 3122 return strcmp(a->name, b->name); 3123 } 3124 3125 static int find_int_btf_id(const struct btf *btf) 3126 { 3127 const struct btf_type *t; 3128 int i, n; 3129 3130 n = btf__get_nr_types(btf); 3131 for (i = 1; i <= n; i++) { 3132 t = btf__type_by_id(btf, i); 3133 3134 if (btf_is_int(t) && btf_int_bits(t) == 32) 3135 return i; 3136 } 3137 3138 return 0; 3139 } 3140 3141 static int bpf_object__collect_externs(struct bpf_object *obj) 3142 { 3143 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL; 3144 const struct btf_type *t; 3145 struct extern_desc *ext; 3146 int i, n, off; 3147 const char *ext_name, *sec_name; 3148 Elf_Scn *scn; 3149 GElf_Shdr sh; 3150 3151 if (!obj->efile.symbols) 3152 return 0; 3153 3154 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); 3155 if (elf_sec_hdr(obj, scn, &sh)) 3156 return -LIBBPF_ERRNO__FORMAT; 3157 3158 n = sh.sh_size / sh.sh_entsize; 3159 pr_debug("looking for externs among %d symbols...\n", n); 3160 3161 for (i = 0; i < n; i++) { 3162 GElf_Sym sym; 3163 3164 if (!gelf_getsym(obj->efile.symbols, i, &sym)) 3165 return -LIBBPF_ERRNO__FORMAT; 3166 if (!sym_is_extern(&sym)) 3167 continue; 3168 ext_name = elf_sym_str(obj, sym.st_name); 3169 if (!ext_name || !ext_name[0]) 3170 continue; 3171 3172 ext = obj->externs; 3173 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); 3174 if (!ext) 3175 return -ENOMEM; 3176 obj->externs = ext; 3177 ext = &ext[obj->nr_extern]; 3178 memset(ext, 0, sizeof(*ext)); 3179 obj->nr_extern++; 3180 3181 ext->btf_id = find_extern_btf_id(obj->btf, ext_name); 3182 if (ext->btf_id <= 0) { 3183 pr_warn("failed to find BTF for extern '%s': %d\n", 3184 ext_name, ext->btf_id); 3185 return ext->btf_id; 3186 } 3187 t = btf__type_by_id(obj->btf, ext->btf_id); 3188 ext->name = btf__name_by_offset(obj->btf, t->name_off); 3189 ext->sym_idx = i; 3190 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK; 3191 3192 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); 3193 if (ext->sec_btf_id <= 0) { 3194 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", 3195 ext_name, ext->btf_id, ext->sec_btf_id); 3196 return ext->sec_btf_id; 3197 } 3198 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); 3199 sec_name = btf__name_by_offset(obj->btf, sec->name_off); 3200 3201 if (strcmp(sec_name, KCONFIG_SEC) == 0) { 3202 kcfg_sec = sec; 3203 ext->type = EXT_KCFG; 3204 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); 3205 if (ext->kcfg.sz <= 0) { 3206 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n", 3207 ext_name, ext->kcfg.sz); 3208 return ext->kcfg.sz; 3209 } 3210 ext->kcfg.align = btf__align_of(obj->btf, t->type); 3211 if (ext->kcfg.align <= 0) { 3212 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n", 3213 ext_name, ext->kcfg.align); 3214 return -EINVAL; 3215 } 3216 ext->kcfg.type = find_kcfg_type(obj->btf, t->type, 3217 &ext->kcfg.is_signed); 3218 if (ext->kcfg.type == KCFG_UNKNOWN) { 3219 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name); 3220 return -ENOTSUP; 3221 } 3222 } else if (strcmp(sec_name, KSYMS_SEC) == 0) { 3223 ksym_sec = sec; 3224 ext->type = EXT_KSYM; 3225 skip_mods_and_typedefs(obj->btf, t->type, 3226 &ext->ksym.type_id); 3227 } else { 3228 pr_warn("unrecognized extern section '%s'\n", sec_name); 3229 return -ENOTSUP; 3230 } 3231 } 3232 pr_debug("collected %d externs total\n", obj->nr_extern); 3233 3234 if (!obj->nr_extern) 3235 return 0; 3236 3237 /* sort externs by type, for kcfg ones also by (align, size, name) */ 3238 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); 3239 3240 /* for .ksyms section, we need to turn all externs into allocated 3241 * variables in BTF to pass kernel verification; we do this by 3242 * pretending that each extern is a 8-byte variable 3243 */ 3244 if (ksym_sec) { 3245 /* find existing 4-byte integer type in BTF to use for fake 3246 * extern variables in DATASEC 3247 */ 3248 int int_btf_id = find_int_btf_id(obj->btf); 3249 3250 for (i = 0; i < obj->nr_extern; i++) { 3251 ext = &obj->externs[i]; 3252 if (ext->type != EXT_KSYM) 3253 continue; 3254 pr_debug("extern (ksym) #%d: symbol %d, name %s\n", 3255 i, ext->sym_idx, ext->name); 3256 } 3257 3258 sec = ksym_sec; 3259 n = btf_vlen(sec); 3260 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) { 3261 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; 3262 struct btf_type *vt; 3263 3264 vt = (void *)btf__type_by_id(obj->btf, vs->type); 3265 ext_name = btf__name_by_offset(obj->btf, vt->name_off); 3266 ext = find_extern_by_name(obj, ext_name); 3267 if (!ext) { 3268 pr_warn("failed to find extern definition for BTF var '%s'\n", 3269 ext_name); 3270 return -ESRCH; 3271 } 3272 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; 3273 vt->type = int_btf_id; 3274 vs->offset = off; 3275 vs->size = sizeof(int); 3276 } 3277 sec->size = off; 3278 } 3279 3280 if (kcfg_sec) { 3281 sec = kcfg_sec; 3282 /* for kcfg externs calculate their offsets within a .kconfig map */ 3283 off = 0; 3284 for (i = 0; i < obj->nr_extern; i++) { 3285 ext = &obj->externs[i]; 3286 if (ext->type != EXT_KCFG) 3287 continue; 3288 3289 ext->kcfg.data_off = roundup(off, ext->kcfg.align); 3290 off = ext->kcfg.data_off + ext->kcfg.sz; 3291 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n", 3292 i, ext->sym_idx, ext->kcfg.data_off, ext->name); 3293 } 3294 sec->size = off; 3295 n = btf_vlen(sec); 3296 for (i = 0; i < n; i++) { 3297 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; 3298 3299 t = btf__type_by_id(obj->btf, vs->type); 3300 ext_name = btf__name_by_offset(obj->btf, t->name_off); 3301 ext = find_extern_by_name(obj, ext_name); 3302 if (!ext) { 3303 pr_warn("failed to find extern definition for BTF var '%s'\n", 3304 ext_name); 3305 return -ESRCH; 3306 } 3307 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; 3308 vs->offset = ext->kcfg.data_off; 3309 } 3310 } 3311 return 0; 3312 } 3313 3314 struct bpf_program * 3315 bpf_object__find_program_by_title(const struct bpf_object *obj, 3316 const char *title) 3317 { 3318 struct bpf_program *pos; 3319 3320 bpf_object__for_each_program(pos, obj) { 3321 if (pos->sec_name && !strcmp(pos->sec_name, title)) 3322 return pos; 3323 } 3324 return NULL; 3325 } 3326 3327 static bool prog_is_subprog(const struct bpf_object *obj, 3328 const struct bpf_program *prog) 3329 { 3330 /* For legacy reasons, libbpf supports an entry-point BPF programs 3331 * without SEC() attribute, i.e., those in the .text section. But if 3332 * there are 2 or more such programs in the .text section, they all 3333 * must be subprograms called from entry-point BPF programs in 3334 * designated SEC()'tions, otherwise there is no way to distinguish 3335 * which of those programs should be loaded vs which are a subprogram. 3336 * Similarly, if there is a function/program in .text and at least one 3337 * other BPF program with custom SEC() attribute, then we just assume 3338 * .text programs are subprograms (even if they are not called from 3339 * other programs), because libbpf never explicitly supported mixing 3340 * SEC()-designated BPF programs and .text entry-point BPF programs. 3341 */ 3342 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; 3343 } 3344 3345 struct bpf_program * 3346 bpf_object__find_program_by_name(const struct bpf_object *obj, 3347 const char *name) 3348 { 3349 struct bpf_program *prog; 3350 3351 bpf_object__for_each_program(prog, obj) { 3352 if (prog_is_subprog(obj, prog)) 3353 continue; 3354 if (!strcmp(prog->name, name)) 3355 return prog; 3356 } 3357 return NULL; 3358 } 3359 3360 static bool bpf_object__shndx_is_data(const struct bpf_object *obj, 3361 int shndx) 3362 { 3363 return shndx == obj->efile.data_shndx || 3364 shndx == obj->efile.bss_shndx || 3365 shndx == obj->efile.rodata_shndx; 3366 } 3367 3368 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, 3369 int shndx) 3370 { 3371 return shndx == obj->efile.maps_shndx || 3372 shndx == obj->efile.btf_maps_shndx; 3373 } 3374 3375 static enum libbpf_map_type 3376 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) 3377 { 3378 if (shndx == obj->efile.data_shndx) 3379 return LIBBPF_MAP_DATA; 3380 else if (shndx == obj->efile.bss_shndx) 3381 return LIBBPF_MAP_BSS; 3382 else if (shndx == obj->efile.rodata_shndx) 3383 return LIBBPF_MAP_RODATA; 3384 else if (shndx == obj->efile.symbols_shndx) 3385 return LIBBPF_MAP_KCONFIG; 3386 else 3387 return LIBBPF_MAP_UNSPEC; 3388 } 3389 3390 static int bpf_program__record_reloc(struct bpf_program *prog, 3391 struct reloc_desc *reloc_desc, 3392 __u32 insn_idx, const char *sym_name, 3393 const GElf_Sym *sym, const GElf_Rel *rel) 3394 { 3395 struct bpf_insn *insn = &prog->insns[insn_idx]; 3396 size_t map_idx, nr_maps = prog->obj->nr_maps; 3397 struct bpf_object *obj = prog->obj; 3398 __u32 shdr_idx = sym->st_shndx; 3399 enum libbpf_map_type type; 3400 const char *sym_sec_name; 3401 struct bpf_map *map; 3402 3403 reloc_desc->processed = false; 3404 3405 /* sub-program call relocation */ 3406 if (insn->code == (BPF_JMP | BPF_CALL)) { 3407 if (insn->src_reg != BPF_PSEUDO_CALL) { 3408 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); 3409 return -LIBBPF_ERRNO__RELOC; 3410 } 3411 /* text_shndx can be 0, if no default "main" program exists */ 3412 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { 3413 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); 3414 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n", 3415 prog->name, sym_name, sym_sec_name); 3416 return -LIBBPF_ERRNO__RELOC; 3417 } 3418 if (sym->st_value % BPF_INSN_SZ) { 3419 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n", 3420 prog->name, sym_name, (size_t)sym->st_value); 3421 return -LIBBPF_ERRNO__RELOC; 3422 } 3423 reloc_desc->type = RELO_CALL; 3424 reloc_desc->insn_idx = insn_idx; 3425 reloc_desc->sym_off = sym->st_value; 3426 return 0; 3427 } 3428 3429 if (!is_ldimm64(insn)) { 3430 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n", 3431 prog->name, sym_name, insn_idx, insn->code); 3432 return -LIBBPF_ERRNO__RELOC; 3433 } 3434 3435 if (sym_is_extern(sym)) { 3436 int sym_idx = GELF_R_SYM(rel->r_info); 3437 int i, n = obj->nr_extern; 3438 struct extern_desc *ext; 3439 3440 for (i = 0; i < n; i++) { 3441 ext = &obj->externs[i]; 3442 if (ext->sym_idx == sym_idx) 3443 break; 3444 } 3445 if (i >= n) { 3446 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n", 3447 prog->name, sym_name, sym_idx); 3448 return -LIBBPF_ERRNO__RELOC; 3449 } 3450 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n", 3451 prog->name, i, ext->name, ext->sym_idx, insn_idx); 3452 reloc_desc->type = RELO_EXTERN; 3453 reloc_desc->insn_idx = insn_idx; 3454 reloc_desc->sym_off = i; /* sym_off stores extern index */ 3455 return 0; 3456 } 3457 3458 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) { 3459 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n", 3460 prog->name, sym_name, shdr_idx); 3461 return -LIBBPF_ERRNO__RELOC; 3462 } 3463 3464 /* loading subprog addresses */ 3465 if (sym_is_subprog(sym, obj->efile.text_shndx)) { 3466 /* global_func: sym->st_value = offset in the section, insn->imm = 0. 3467 * local_func: sym->st_value = 0, insn->imm = offset in the section. 3468 */ 3469 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) { 3470 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n", 3471 prog->name, sym_name, (size_t)sym->st_value, insn->imm); 3472 return -LIBBPF_ERRNO__RELOC; 3473 } 3474 3475 reloc_desc->type = RELO_SUBPROG_ADDR; 3476 reloc_desc->insn_idx = insn_idx; 3477 reloc_desc->sym_off = sym->st_value; 3478 return 0; 3479 } 3480 3481 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); 3482 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); 3483 3484 /* generic map reference relocation */ 3485 if (type == LIBBPF_MAP_UNSPEC) { 3486 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) { 3487 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", 3488 prog->name, sym_name, sym_sec_name); 3489 return -LIBBPF_ERRNO__RELOC; 3490 } 3491 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 3492 map = &obj->maps[map_idx]; 3493 if (map->libbpf_type != type || 3494 map->sec_idx != sym->st_shndx || 3495 map->sec_offset != sym->st_value) 3496 continue; 3497 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", 3498 prog->name, map_idx, map->name, map->sec_idx, 3499 map->sec_offset, insn_idx); 3500 break; 3501 } 3502 if (map_idx >= nr_maps) { 3503 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", 3504 prog->name, sym_sec_name, (size_t)sym->st_value); 3505 return -LIBBPF_ERRNO__RELOC; 3506 } 3507 reloc_desc->type = RELO_LD64; 3508 reloc_desc->insn_idx = insn_idx; 3509 reloc_desc->map_idx = map_idx; 3510 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ 3511 return 0; 3512 } 3513 3514 /* global data map relocation */ 3515 if (!bpf_object__shndx_is_data(obj, shdr_idx)) { 3516 pr_warn("prog '%s': bad data relo against section '%s'\n", 3517 prog->name, sym_sec_name); 3518 return -LIBBPF_ERRNO__RELOC; 3519 } 3520 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 3521 map = &obj->maps[map_idx]; 3522 if (map->libbpf_type != type) 3523 continue; 3524 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", 3525 prog->name, map_idx, map->name, map->sec_idx, 3526 map->sec_offset, insn_idx); 3527 break; 3528 } 3529 if (map_idx >= nr_maps) { 3530 pr_warn("prog '%s': data relo failed to find map for section '%s'\n", 3531 prog->name, sym_sec_name); 3532 return -LIBBPF_ERRNO__RELOC; 3533 } 3534 3535 reloc_desc->type = RELO_DATA; 3536 reloc_desc->insn_idx = insn_idx; 3537 reloc_desc->map_idx = map_idx; 3538 reloc_desc->sym_off = sym->st_value; 3539 return 0; 3540 } 3541 3542 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx) 3543 { 3544 return insn_idx >= prog->sec_insn_off && 3545 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; 3546 } 3547 3548 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, 3549 size_t sec_idx, size_t insn_idx) 3550 { 3551 int l = 0, r = obj->nr_programs - 1, m; 3552 struct bpf_program *prog; 3553 3554 while (l < r) { 3555 m = l + (r - l + 1) / 2; 3556 prog = &obj->programs[m]; 3557 3558 if (prog->sec_idx < sec_idx || 3559 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) 3560 l = m; 3561 else 3562 r = m - 1; 3563 } 3564 /* matching program could be at index l, but it still might be the 3565 * wrong one, so we need to double check conditions for the last time 3566 */ 3567 prog = &obj->programs[l]; 3568 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) 3569 return prog; 3570 return NULL; 3571 } 3572 3573 static int 3574 bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data) 3575 { 3576 Elf_Data *symbols = obj->efile.symbols; 3577 const char *relo_sec_name, *sec_name; 3578 size_t sec_idx = shdr->sh_info; 3579 struct bpf_program *prog; 3580 struct reloc_desc *relos; 3581 int err, i, nrels; 3582 const char *sym_name; 3583 __u32 insn_idx; 3584 GElf_Sym sym; 3585 GElf_Rel rel; 3586 3587 relo_sec_name = elf_sec_str(obj, shdr->sh_name); 3588 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); 3589 if (!relo_sec_name || !sec_name) 3590 return -EINVAL; 3591 3592 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n", 3593 relo_sec_name, sec_idx, sec_name); 3594 nrels = shdr->sh_size / shdr->sh_entsize; 3595 3596 for (i = 0; i < nrels; i++) { 3597 if (!gelf_getrel(data, i, &rel)) { 3598 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i); 3599 return -LIBBPF_ERRNO__FORMAT; 3600 } 3601 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { 3602 pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n", 3603 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i); 3604 return -LIBBPF_ERRNO__FORMAT; 3605 } 3606 if (rel.r_offset % BPF_INSN_SZ) { 3607 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", 3608 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i); 3609 return -LIBBPF_ERRNO__FORMAT; 3610 } 3611 3612 insn_idx = rel.r_offset / BPF_INSN_SZ; 3613 /* relocations against static functions are recorded as 3614 * relocations against the section that contains a function; 3615 * in such case, symbol will be STT_SECTION and sym.st_name 3616 * will point to empty string (0), so fetch section name 3617 * instead 3618 */ 3619 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0) 3620 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx)); 3621 else 3622 sym_name = elf_sym_str(obj, sym.st_name); 3623 sym_name = sym_name ?: "<?"; 3624 3625 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n", 3626 relo_sec_name, i, insn_idx, sym_name); 3627 3628 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); 3629 if (!prog) { 3630 pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n", 3631 relo_sec_name, i, sec_name, insn_idx); 3632 return -LIBBPF_ERRNO__RELOC; 3633 } 3634 3635 relos = libbpf_reallocarray(prog->reloc_desc, 3636 prog->nr_reloc + 1, sizeof(*relos)); 3637 if (!relos) 3638 return -ENOMEM; 3639 prog->reloc_desc = relos; 3640 3641 /* adjust insn_idx to local BPF program frame of reference */ 3642 insn_idx -= prog->sec_insn_off; 3643 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], 3644 insn_idx, sym_name, &sym, &rel); 3645 if (err) 3646 return err; 3647 3648 prog->nr_reloc++; 3649 } 3650 return 0; 3651 } 3652 3653 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) 3654 { 3655 struct bpf_map_def *def = &map->def; 3656 __u32 key_type_id = 0, value_type_id = 0; 3657 int ret; 3658 3659 /* if it's BTF-defined map, we don't need to search for type IDs. 3660 * For struct_ops map, it does not need btf_key_type_id and 3661 * btf_value_type_id. 3662 */ 3663 if (map->sec_idx == obj->efile.btf_maps_shndx || 3664 bpf_map__is_struct_ops(map)) 3665 return 0; 3666 3667 if (!bpf_map__is_internal(map)) { 3668 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size, 3669 def->value_size, &key_type_id, 3670 &value_type_id); 3671 } else { 3672 /* 3673 * LLVM annotates global data differently in BTF, that is, 3674 * only as '.data', '.bss' or '.rodata'. 3675 */ 3676 ret = btf__find_by_name(obj->btf, 3677 libbpf_type_to_btf_name[map->libbpf_type]); 3678 } 3679 if (ret < 0) 3680 return ret; 3681 3682 map->btf_key_type_id = key_type_id; 3683 map->btf_value_type_id = bpf_map__is_internal(map) ? 3684 ret : value_type_id; 3685 return 0; 3686 } 3687 3688 int bpf_map__reuse_fd(struct bpf_map *map, int fd) 3689 { 3690 struct bpf_map_info info = {}; 3691 __u32 len = sizeof(info); 3692 int new_fd, err; 3693 char *new_name; 3694 3695 err = bpf_obj_get_info_by_fd(fd, &info, &len); 3696 if (err) 3697 return err; 3698 3699 new_name = strdup(info.name); 3700 if (!new_name) 3701 return -errno; 3702 3703 new_fd = open("/", O_RDONLY | O_CLOEXEC); 3704 if (new_fd < 0) { 3705 err = -errno; 3706 goto err_free_new_name; 3707 } 3708 3709 new_fd = dup3(fd, new_fd, O_CLOEXEC); 3710 if (new_fd < 0) { 3711 err = -errno; 3712 goto err_close_new_fd; 3713 } 3714 3715 err = zclose(map->fd); 3716 if (err) { 3717 err = -errno; 3718 goto err_close_new_fd; 3719 } 3720 free(map->name); 3721 3722 map->fd = new_fd; 3723 map->name = new_name; 3724 map->def.type = info.type; 3725 map->def.key_size = info.key_size; 3726 map->def.value_size = info.value_size; 3727 map->def.max_entries = info.max_entries; 3728 map->def.map_flags = info.map_flags; 3729 map->btf_key_type_id = info.btf_key_type_id; 3730 map->btf_value_type_id = info.btf_value_type_id; 3731 map->reused = true; 3732 3733 return 0; 3734 3735 err_close_new_fd: 3736 close(new_fd); 3737 err_free_new_name: 3738 free(new_name); 3739 return err; 3740 } 3741 3742 __u32 bpf_map__max_entries(const struct bpf_map *map) 3743 { 3744 return map->def.max_entries; 3745 } 3746 3747 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) 3748 { 3749 if (map->fd >= 0) 3750 return -EBUSY; 3751 map->def.max_entries = max_entries; 3752 return 0; 3753 } 3754 3755 int bpf_map__resize(struct bpf_map *map, __u32 max_entries) 3756 { 3757 if (!map || !max_entries) 3758 return -EINVAL; 3759 3760 return bpf_map__set_max_entries(map, max_entries); 3761 } 3762 3763 static int 3764 bpf_object__probe_loading(struct bpf_object *obj) 3765 { 3766 struct bpf_load_program_attr attr; 3767 char *cp, errmsg[STRERR_BUFSIZE]; 3768 struct bpf_insn insns[] = { 3769 BPF_MOV64_IMM(BPF_REG_0, 0), 3770 BPF_EXIT_INSN(), 3771 }; 3772 int ret; 3773 3774 /* make sure basic loading works */ 3775 3776 memset(&attr, 0, sizeof(attr)); 3777 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 3778 attr.insns = insns; 3779 attr.insns_cnt = ARRAY_SIZE(insns); 3780 attr.license = "GPL"; 3781 3782 ret = bpf_load_program_xattr(&attr, NULL, 0); 3783 if (ret < 0) { 3784 ret = errno; 3785 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); 3786 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF " 3787 "program. Make sure your kernel supports BPF " 3788 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is " 3789 "set to big enough value.\n", __func__, cp, ret); 3790 return -ret; 3791 } 3792 close(ret); 3793 3794 return 0; 3795 } 3796 3797 static int probe_fd(int fd) 3798 { 3799 if (fd >= 0) 3800 close(fd); 3801 return fd >= 0; 3802 } 3803 3804 static int probe_kern_prog_name(void) 3805 { 3806 struct bpf_load_program_attr attr; 3807 struct bpf_insn insns[] = { 3808 BPF_MOV64_IMM(BPF_REG_0, 0), 3809 BPF_EXIT_INSN(), 3810 }; 3811 int ret; 3812 3813 /* make sure loading with name works */ 3814 3815 memset(&attr, 0, sizeof(attr)); 3816 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 3817 attr.insns = insns; 3818 attr.insns_cnt = ARRAY_SIZE(insns); 3819 attr.license = "GPL"; 3820 attr.name = "test"; 3821 ret = bpf_load_program_xattr(&attr, NULL, 0); 3822 return probe_fd(ret); 3823 } 3824 3825 static int probe_kern_global_data(void) 3826 { 3827 struct bpf_load_program_attr prg_attr; 3828 struct bpf_create_map_attr map_attr; 3829 char *cp, errmsg[STRERR_BUFSIZE]; 3830 struct bpf_insn insns[] = { 3831 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16), 3832 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), 3833 BPF_MOV64_IMM(BPF_REG_0, 0), 3834 BPF_EXIT_INSN(), 3835 }; 3836 int ret, map; 3837 3838 memset(&map_attr, 0, sizeof(map_attr)); 3839 map_attr.map_type = BPF_MAP_TYPE_ARRAY; 3840 map_attr.key_size = sizeof(int); 3841 map_attr.value_size = 32; 3842 map_attr.max_entries = 1; 3843 3844 map = bpf_create_map_xattr(&map_attr); 3845 if (map < 0) { 3846 ret = -errno; 3847 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); 3848 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", 3849 __func__, cp, -ret); 3850 return ret; 3851 } 3852 3853 insns[0].imm = map; 3854 3855 memset(&prg_attr, 0, sizeof(prg_attr)); 3856 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 3857 prg_attr.insns = insns; 3858 prg_attr.insns_cnt = ARRAY_SIZE(insns); 3859 prg_attr.license = "GPL"; 3860 3861 ret = bpf_load_program_xattr(&prg_attr, NULL, 0); 3862 close(map); 3863 return probe_fd(ret); 3864 } 3865 3866 static int probe_kern_btf(void) 3867 { 3868 static const char strs[] = "\0int"; 3869 __u32 types[] = { 3870 /* int */ 3871 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), 3872 }; 3873 3874 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 3875 strs, sizeof(strs))); 3876 } 3877 3878 static int probe_kern_btf_func(void) 3879 { 3880 static const char strs[] = "\0int\0x\0a"; 3881 /* void x(int a) {} */ 3882 __u32 types[] = { 3883 /* int */ 3884 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3885 /* FUNC_PROTO */ /* [2] */ 3886 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), 3887 BTF_PARAM_ENC(7, 1), 3888 /* FUNC x */ /* [3] */ 3889 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2), 3890 }; 3891 3892 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 3893 strs, sizeof(strs))); 3894 } 3895 3896 static int probe_kern_btf_func_global(void) 3897 { 3898 static const char strs[] = "\0int\0x\0a"; 3899 /* static void x(int a) {} */ 3900 __u32 types[] = { 3901 /* int */ 3902 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3903 /* FUNC_PROTO */ /* [2] */ 3904 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), 3905 BTF_PARAM_ENC(7, 1), 3906 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */ 3907 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2), 3908 }; 3909 3910 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 3911 strs, sizeof(strs))); 3912 } 3913 3914 static int probe_kern_btf_datasec(void) 3915 { 3916 static const char strs[] = "\0x\0.data"; 3917 /* static int a; */ 3918 __u32 types[] = { 3919 /* int */ 3920 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3921 /* VAR x */ /* [2] */ 3922 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), 3923 BTF_VAR_STATIC, 3924 /* DATASEC val */ /* [3] */ 3925 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), 3926 BTF_VAR_SECINFO_ENC(2, 0, 4), 3927 }; 3928 3929 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 3930 strs, sizeof(strs))); 3931 } 3932 3933 static int probe_kern_btf_float(void) 3934 { 3935 static const char strs[] = "\0float"; 3936 __u32 types[] = { 3937 /* float */ 3938 BTF_TYPE_FLOAT_ENC(1, 4), 3939 }; 3940 3941 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 3942 strs, sizeof(strs))); 3943 } 3944 3945 static int probe_kern_array_mmap(void) 3946 { 3947 struct bpf_create_map_attr attr = { 3948 .map_type = BPF_MAP_TYPE_ARRAY, 3949 .map_flags = BPF_F_MMAPABLE, 3950 .key_size = sizeof(int), 3951 .value_size = sizeof(int), 3952 .max_entries = 1, 3953 }; 3954 3955 return probe_fd(bpf_create_map_xattr(&attr)); 3956 } 3957 3958 static int probe_kern_exp_attach_type(void) 3959 { 3960 struct bpf_load_program_attr attr; 3961 struct bpf_insn insns[] = { 3962 BPF_MOV64_IMM(BPF_REG_0, 0), 3963 BPF_EXIT_INSN(), 3964 }; 3965 3966 memset(&attr, 0, sizeof(attr)); 3967 /* use any valid combination of program type and (optional) 3968 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) 3969 * to see if kernel supports expected_attach_type field for 3970 * BPF_PROG_LOAD command 3971 */ 3972 attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK; 3973 attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE; 3974 attr.insns = insns; 3975 attr.insns_cnt = ARRAY_SIZE(insns); 3976 attr.license = "GPL"; 3977 3978 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0)); 3979 } 3980 3981 static int probe_kern_probe_read_kernel(void) 3982 { 3983 struct bpf_load_program_attr attr; 3984 struct bpf_insn insns[] = { 3985 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */ 3986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ 3987 BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */ 3988 BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */ 3989 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel), 3990 BPF_EXIT_INSN(), 3991 }; 3992 3993 memset(&attr, 0, sizeof(attr)); 3994 attr.prog_type = BPF_PROG_TYPE_KPROBE; 3995 attr.insns = insns; 3996 attr.insns_cnt = ARRAY_SIZE(insns); 3997 attr.license = "GPL"; 3998 3999 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0)); 4000 } 4001 4002 static int probe_prog_bind_map(void) 4003 { 4004 struct bpf_load_program_attr prg_attr; 4005 struct bpf_create_map_attr map_attr; 4006 char *cp, errmsg[STRERR_BUFSIZE]; 4007 struct bpf_insn insns[] = { 4008 BPF_MOV64_IMM(BPF_REG_0, 0), 4009 BPF_EXIT_INSN(), 4010 }; 4011 int ret, map, prog; 4012 4013 memset(&map_attr, 0, sizeof(map_attr)); 4014 map_attr.map_type = BPF_MAP_TYPE_ARRAY; 4015 map_attr.key_size = sizeof(int); 4016 map_attr.value_size = 32; 4017 map_attr.max_entries = 1; 4018 4019 map = bpf_create_map_xattr(&map_attr); 4020 if (map < 0) { 4021 ret = -errno; 4022 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); 4023 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", 4024 __func__, cp, -ret); 4025 return ret; 4026 } 4027 4028 memset(&prg_attr, 0, sizeof(prg_attr)); 4029 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 4030 prg_attr.insns = insns; 4031 prg_attr.insns_cnt = ARRAY_SIZE(insns); 4032 prg_attr.license = "GPL"; 4033 4034 prog = bpf_load_program_xattr(&prg_attr, NULL, 0); 4035 if (prog < 0) { 4036 close(map); 4037 return 0; 4038 } 4039 4040 ret = bpf_prog_bind_map(prog, map, NULL); 4041 4042 close(map); 4043 close(prog); 4044 4045 return ret >= 0; 4046 } 4047 4048 static int probe_module_btf(void) 4049 { 4050 static const char strs[] = "\0int"; 4051 __u32 types[] = { 4052 /* int */ 4053 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), 4054 }; 4055 struct bpf_btf_info info; 4056 __u32 len = sizeof(info); 4057 char name[16]; 4058 int fd, err; 4059 4060 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs)); 4061 if (fd < 0) 4062 return 0; /* BTF not supported at all */ 4063 4064 memset(&info, 0, sizeof(info)); 4065 info.name = ptr_to_u64(name); 4066 info.name_len = sizeof(name); 4067 4068 /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer; 4069 * kernel's module BTF support coincides with support for 4070 * name/name_len fields in struct bpf_btf_info. 4071 */ 4072 err = bpf_obj_get_info_by_fd(fd, &info, &len); 4073 close(fd); 4074 return !err; 4075 } 4076 4077 enum kern_feature_result { 4078 FEAT_UNKNOWN = 0, 4079 FEAT_SUPPORTED = 1, 4080 FEAT_MISSING = 2, 4081 }; 4082 4083 typedef int (*feature_probe_fn)(void); 4084 4085 static struct kern_feature_desc { 4086 const char *desc; 4087 feature_probe_fn probe; 4088 enum kern_feature_result res; 4089 } feature_probes[__FEAT_CNT] = { 4090 [FEAT_PROG_NAME] = { 4091 "BPF program name", probe_kern_prog_name, 4092 }, 4093 [FEAT_GLOBAL_DATA] = { 4094 "global variables", probe_kern_global_data, 4095 }, 4096 [FEAT_BTF] = { 4097 "minimal BTF", probe_kern_btf, 4098 }, 4099 [FEAT_BTF_FUNC] = { 4100 "BTF functions", probe_kern_btf_func, 4101 }, 4102 [FEAT_BTF_GLOBAL_FUNC] = { 4103 "BTF global function", probe_kern_btf_func_global, 4104 }, 4105 [FEAT_BTF_DATASEC] = { 4106 "BTF data section and variable", probe_kern_btf_datasec, 4107 }, 4108 [FEAT_ARRAY_MMAP] = { 4109 "ARRAY map mmap()", probe_kern_array_mmap, 4110 }, 4111 [FEAT_EXP_ATTACH_TYPE] = { 4112 "BPF_PROG_LOAD expected_attach_type attribute", 4113 probe_kern_exp_attach_type, 4114 }, 4115 [FEAT_PROBE_READ_KERN] = { 4116 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel, 4117 }, 4118 [FEAT_PROG_BIND_MAP] = { 4119 "BPF_PROG_BIND_MAP support", probe_prog_bind_map, 4120 }, 4121 [FEAT_MODULE_BTF] = { 4122 "module BTF support", probe_module_btf, 4123 }, 4124 [FEAT_BTF_FLOAT] = { 4125 "BTF_KIND_FLOAT support", probe_kern_btf_float, 4126 }, 4127 }; 4128 4129 static bool kernel_supports(enum kern_feature_id feat_id) 4130 { 4131 struct kern_feature_desc *feat = &feature_probes[feat_id]; 4132 int ret; 4133 4134 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) { 4135 ret = feat->probe(); 4136 if (ret > 0) { 4137 WRITE_ONCE(feat->res, FEAT_SUPPORTED); 4138 } else if (ret == 0) { 4139 WRITE_ONCE(feat->res, FEAT_MISSING); 4140 } else { 4141 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); 4142 WRITE_ONCE(feat->res, FEAT_MISSING); 4143 } 4144 } 4145 4146 return READ_ONCE(feat->res) == FEAT_SUPPORTED; 4147 } 4148 4149 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) 4150 { 4151 struct bpf_map_info map_info = {}; 4152 char msg[STRERR_BUFSIZE]; 4153 __u32 map_info_len; 4154 4155 map_info_len = sizeof(map_info); 4156 4157 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) { 4158 pr_warn("failed to get map info for map FD %d: %s\n", 4159 map_fd, libbpf_strerror_r(errno, msg, sizeof(msg))); 4160 return false; 4161 } 4162 4163 return (map_info.type == map->def.type && 4164 map_info.key_size == map->def.key_size && 4165 map_info.value_size == map->def.value_size && 4166 map_info.max_entries == map->def.max_entries && 4167 map_info.map_flags == map->def.map_flags); 4168 } 4169 4170 static int 4171 bpf_object__reuse_map(struct bpf_map *map) 4172 { 4173 char *cp, errmsg[STRERR_BUFSIZE]; 4174 int err, pin_fd; 4175 4176 pin_fd = bpf_obj_get(map->pin_path); 4177 if (pin_fd < 0) { 4178 err = -errno; 4179 if (err == -ENOENT) { 4180 pr_debug("found no pinned map to reuse at '%s'\n", 4181 map->pin_path); 4182 return 0; 4183 } 4184 4185 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 4186 pr_warn("couldn't retrieve pinned map '%s': %s\n", 4187 map->pin_path, cp); 4188 return err; 4189 } 4190 4191 if (!map_is_reuse_compat(map, pin_fd)) { 4192 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", 4193 map->pin_path); 4194 close(pin_fd); 4195 return -EINVAL; 4196 } 4197 4198 err = bpf_map__reuse_fd(map, pin_fd); 4199 if (err) { 4200 close(pin_fd); 4201 return err; 4202 } 4203 map->pinned = true; 4204 pr_debug("reused pinned map at '%s'\n", map->pin_path); 4205 4206 return 0; 4207 } 4208 4209 static int 4210 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) 4211 { 4212 enum libbpf_map_type map_type = map->libbpf_type; 4213 char *cp, errmsg[STRERR_BUFSIZE]; 4214 int err, zero = 0; 4215 4216 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); 4217 if (err) { 4218 err = -errno; 4219 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 4220 pr_warn("Error setting initial map(%s) contents: %s\n", 4221 map->name, cp); 4222 return err; 4223 } 4224 4225 /* Freeze .rodata and .kconfig map as read-only from syscall side. */ 4226 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) { 4227 err = bpf_map_freeze(map->fd); 4228 if (err) { 4229 err = -errno; 4230 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 4231 pr_warn("Error freezing map(%s) as read-only: %s\n", 4232 map->name, cp); 4233 return err; 4234 } 4235 } 4236 return 0; 4237 } 4238 4239 static void bpf_map__destroy(struct bpf_map *map); 4240 4241 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) 4242 { 4243 struct bpf_create_map_attr create_attr; 4244 struct bpf_map_def *def = &map->def; 4245 4246 memset(&create_attr, 0, sizeof(create_attr)); 4247 4248 if (kernel_supports(FEAT_PROG_NAME)) 4249 create_attr.name = map->name; 4250 create_attr.map_ifindex = map->map_ifindex; 4251 create_attr.map_type = def->type; 4252 create_attr.map_flags = def->map_flags; 4253 create_attr.key_size = def->key_size; 4254 create_attr.value_size = def->value_size; 4255 create_attr.numa_node = map->numa_node; 4256 4257 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { 4258 int nr_cpus; 4259 4260 nr_cpus = libbpf_num_possible_cpus(); 4261 if (nr_cpus < 0) { 4262 pr_warn("map '%s': failed to determine number of system CPUs: %d\n", 4263 map->name, nr_cpus); 4264 return nr_cpus; 4265 } 4266 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); 4267 create_attr.max_entries = nr_cpus; 4268 } else { 4269 create_attr.max_entries = def->max_entries; 4270 } 4271 4272 if (bpf_map__is_struct_ops(map)) 4273 create_attr.btf_vmlinux_value_type_id = 4274 map->btf_vmlinux_value_type_id; 4275 4276 create_attr.btf_fd = 0; 4277 create_attr.btf_key_type_id = 0; 4278 create_attr.btf_value_type_id = 0; 4279 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { 4280 create_attr.btf_fd = btf__fd(obj->btf); 4281 create_attr.btf_key_type_id = map->btf_key_type_id; 4282 create_attr.btf_value_type_id = map->btf_value_type_id; 4283 } 4284 4285 if (bpf_map_type__is_map_in_map(def->type)) { 4286 if (map->inner_map) { 4287 int err; 4288 4289 err = bpf_object__create_map(obj, map->inner_map); 4290 if (err) { 4291 pr_warn("map '%s': failed to create inner map: %d\n", 4292 map->name, err); 4293 return err; 4294 } 4295 map->inner_map_fd = bpf_map__fd(map->inner_map); 4296 } 4297 if (map->inner_map_fd >= 0) 4298 create_attr.inner_map_fd = map->inner_map_fd; 4299 } 4300 4301 map->fd = bpf_create_map_xattr(&create_attr); 4302 if (map->fd < 0 && (create_attr.btf_key_type_id || 4303 create_attr.btf_value_type_id)) { 4304 char *cp, errmsg[STRERR_BUFSIZE]; 4305 int err = -errno; 4306 4307 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 4308 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", 4309 map->name, cp, err); 4310 create_attr.btf_fd = 0; 4311 create_attr.btf_key_type_id = 0; 4312 create_attr.btf_value_type_id = 0; 4313 map->btf_key_type_id = 0; 4314 map->btf_value_type_id = 0; 4315 map->fd = bpf_create_map_xattr(&create_attr); 4316 } 4317 4318 if (map->fd < 0) 4319 return -errno; 4320 4321 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { 4322 bpf_map__destroy(map->inner_map); 4323 zfree(&map->inner_map); 4324 } 4325 4326 return 0; 4327 } 4328 4329 static int init_map_slots(struct bpf_map *map) 4330 { 4331 const struct bpf_map *targ_map; 4332 unsigned int i; 4333 int fd, err; 4334 4335 for (i = 0; i < map->init_slots_sz; i++) { 4336 if (!map->init_slots[i]) 4337 continue; 4338 4339 targ_map = map->init_slots[i]; 4340 fd = bpf_map__fd(targ_map); 4341 err = bpf_map_update_elem(map->fd, &i, &fd, 0); 4342 if (err) { 4343 err = -errno; 4344 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", 4345 map->name, i, targ_map->name, 4346 fd, err); 4347 return err; 4348 } 4349 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", 4350 map->name, i, targ_map->name, fd); 4351 } 4352 4353 zfree(&map->init_slots); 4354 map->init_slots_sz = 0; 4355 4356 return 0; 4357 } 4358 4359 static int 4360 bpf_object__create_maps(struct bpf_object *obj) 4361 { 4362 struct bpf_map *map; 4363 char *cp, errmsg[STRERR_BUFSIZE]; 4364 unsigned int i, j; 4365 int err; 4366 4367 for (i = 0; i < obj->nr_maps; i++) { 4368 map = &obj->maps[i]; 4369 4370 if (map->pin_path) { 4371 err = bpf_object__reuse_map(map); 4372 if (err) { 4373 pr_warn("map '%s': error reusing pinned map\n", 4374 map->name); 4375 goto err_out; 4376 } 4377 } 4378 4379 if (map->fd >= 0) { 4380 pr_debug("map '%s': skipping creation (preset fd=%d)\n", 4381 map->name, map->fd); 4382 } else { 4383 err = bpf_object__create_map(obj, map); 4384 if (err) 4385 goto err_out; 4386 4387 pr_debug("map '%s': created successfully, fd=%d\n", 4388 map->name, map->fd); 4389 4390 if (bpf_map__is_internal(map)) { 4391 err = bpf_object__populate_internal_map(obj, map); 4392 if (err < 0) { 4393 zclose(map->fd); 4394 goto err_out; 4395 } 4396 } 4397 4398 if (map->init_slots_sz) { 4399 err = init_map_slots(map); 4400 if (err < 0) { 4401 zclose(map->fd); 4402 goto err_out; 4403 } 4404 } 4405 } 4406 4407 if (map->pin_path && !map->pinned) { 4408 err = bpf_map__pin(map, NULL); 4409 if (err) { 4410 pr_warn("map '%s': failed to auto-pin at '%s': %d\n", 4411 map->name, map->pin_path, err); 4412 zclose(map->fd); 4413 goto err_out; 4414 } 4415 } 4416 } 4417 4418 return 0; 4419 4420 err_out: 4421 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 4422 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); 4423 pr_perm_msg(err); 4424 for (j = 0; j < i; j++) 4425 zclose(obj->maps[j].fd); 4426 return err; 4427 } 4428 4429 #define BPF_CORE_SPEC_MAX_LEN 64 4430 4431 /* represents BPF CO-RE field or array element accessor */ 4432 struct bpf_core_accessor { 4433 __u32 type_id; /* struct/union type or array element type */ 4434 __u32 idx; /* field index or array index */ 4435 const char *name; /* field name or NULL for array accessor */ 4436 }; 4437 4438 struct bpf_core_spec { 4439 const struct btf *btf; 4440 /* high-level spec: named fields and array indices only */ 4441 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN]; 4442 /* original unresolved (no skip_mods_or_typedefs) root type ID */ 4443 __u32 root_type_id; 4444 /* CO-RE relocation kind */ 4445 enum bpf_core_relo_kind relo_kind; 4446 /* high-level spec length */ 4447 int len; 4448 /* raw, low-level spec: 1-to-1 with accessor spec string */ 4449 int raw_spec[BPF_CORE_SPEC_MAX_LEN]; 4450 /* raw spec length */ 4451 int raw_len; 4452 /* field bit offset represented by spec */ 4453 __u32 bit_offset; 4454 }; 4455 4456 static bool str_is_empty(const char *s) 4457 { 4458 return !s || !s[0]; 4459 } 4460 4461 static bool is_flex_arr(const struct btf *btf, 4462 const struct bpf_core_accessor *acc, 4463 const struct btf_array *arr) 4464 { 4465 const struct btf_type *t; 4466 4467 /* not a flexible array, if not inside a struct or has non-zero size */ 4468 if (!acc->name || arr->nelems > 0) 4469 return false; 4470 4471 /* has to be the last member of enclosing struct */ 4472 t = btf__type_by_id(btf, acc->type_id); 4473 return acc->idx == btf_vlen(t) - 1; 4474 } 4475 4476 static const char *core_relo_kind_str(enum bpf_core_relo_kind kind) 4477 { 4478 switch (kind) { 4479 case BPF_FIELD_BYTE_OFFSET: return "byte_off"; 4480 case BPF_FIELD_BYTE_SIZE: return "byte_sz"; 4481 case BPF_FIELD_EXISTS: return "field_exists"; 4482 case BPF_FIELD_SIGNED: return "signed"; 4483 case BPF_FIELD_LSHIFT_U64: return "lshift_u64"; 4484 case BPF_FIELD_RSHIFT_U64: return "rshift_u64"; 4485 case BPF_TYPE_ID_LOCAL: return "local_type_id"; 4486 case BPF_TYPE_ID_TARGET: return "target_type_id"; 4487 case BPF_TYPE_EXISTS: return "type_exists"; 4488 case BPF_TYPE_SIZE: return "type_size"; 4489 case BPF_ENUMVAL_EXISTS: return "enumval_exists"; 4490 case BPF_ENUMVAL_VALUE: return "enumval_value"; 4491 default: return "unknown"; 4492 } 4493 } 4494 4495 static bool core_relo_is_field_based(enum bpf_core_relo_kind kind) 4496 { 4497 switch (kind) { 4498 case BPF_FIELD_BYTE_OFFSET: 4499 case BPF_FIELD_BYTE_SIZE: 4500 case BPF_FIELD_EXISTS: 4501 case BPF_FIELD_SIGNED: 4502 case BPF_FIELD_LSHIFT_U64: 4503 case BPF_FIELD_RSHIFT_U64: 4504 return true; 4505 default: 4506 return false; 4507 } 4508 } 4509 4510 static bool core_relo_is_type_based(enum bpf_core_relo_kind kind) 4511 { 4512 switch (kind) { 4513 case BPF_TYPE_ID_LOCAL: 4514 case BPF_TYPE_ID_TARGET: 4515 case BPF_TYPE_EXISTS: 4516 case BPF_TYPE_SIZE: 4517 return true; 4518 default: 4519 return false; 4520 } 4521 } 4522 4523 static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) 4524 { 4525 switch (kind) { 4526 case BPF_ENUMVAL_EXISTS: 4527 case BPF_ENUMVAL_VALUE: 4528 return true; 4529 default: 4530 return false; 4531 } 4532 } 4533 4534 /* 4535 * Turn bpf_core_relo into a low- and high-level spec representation, 4536 * validating correctness along the way, as well as calculating resulting 4537 * field bit offset, specified by accessor string. Low-level spec captures 4538 * every single level of nestedness, including traversing anonymous 4539 * struct/union members. High-level one only captures semantically meaningful 4540 * "turning points": named fields and array indicies. 4541 * E.g., for this case: 4542 * 4543 * struct sample { 4544 * int __unimportant; 4545 * struct { 4546 * int __1; 4547 * int __2; 4548 * int a[7]; 4549 * }; 4550 * }; 4551 * 4552 * struct sample *s = ...; 4553 * 4554 * int x = &s->a[3]; // access string = '0:1:2:3' 4555 * 4556 * Low-level spec has 1:1 mapping with each element of access string (it's 4557 * just a parsed access string representation): [0, 1, 2, 3]. 4558 * 4559 * High-level spec will capture only 3 points: 4560 * - intial zero-index access by pointer (&s->... is the same as &s[0]...); 4561 * - field 'a' access (corresponds to '2' in low-level spec); 4562 * - array element #3 access (corresponds to '3' in low-level spec). 4563 * 4564 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE, 4565 * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their 4566 * spec and raw_spec are kept empty. 4567 * 4568 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access 4569 * string to specify enumerator's value index that need to be relocated. 4570 */ 4571 static int bpf_core_parse_spec(const struct btf *btf, 4572 __u32 type_id, 4573 const char *spec_str, 4574 enum bpf_core_relo_kind relo_kind, 4575 struct bpf_core_spec *spec) 4576 { 4577 int access_idx, parsed_len, i; 4578 struct bpf_core_accessor *acc; 4579 const struct btf_type *t; 4580 const char *name; 4581 __u32 id; 4582 __s64 sz; 4583 4584 if (str_is_empty(spec_str) || *spec_str == ':') 4585 return -EINVAL; 4586 4587 memset(spec, 0, sizeof(*spec)); 4588 spec->btf = btf; 4589 spec->root_type_id = type_id; 4590 spec->relo_kind = relo_kind; 4591 4592 /* type-based relocations don't have a field access string */ 4593 if (core_relo_is_type_based(relo_kind)) { 4594 if (strcmp(spec_str, "0")) 4595 return -EINVAL; 4596 return 0; 4597 } 4598 4599 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */ 4600 while (*spec_str) { 4601 if (*spec_str == ':') 4602 ++spec_str; 4603 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1) 4604 return -EINVAL; 4605 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) 4606 return -E2BIG; 4607 spec_str += parsed_len; 4608 spec->raw_spec[spec->raw_len++] = access_idx; 4609 } 4610 4611 if (spec->raw_len == 0) 4612 return -EINVAL; 4613 4614 t = skip_mods_and_typedefs(btf, type_id, &id); 4615 if (!t) 4616 return -EINVAL; 4617 4618 access_idx = spec->raw_spec[0]; 4619 acc = &spec->spec[0]; 4620 acc->type_id = id; 4621 acc->idx = access_idx; 4622 spec->len++; 4623 4624 if (core_relo_is_enumval_based(relo_kind)) { 4625 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t)) 4626 return -EINVAL; 4627 4628 /* record enumerator name in a first accessor */ 4629 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off); 4630 return 0; 4631 } 4632 4633 if (!core_relo_is_field_based(relo_kind)) 4634 return -EINVAL; 4635 4636 sz = btf__resolve_size(btf, id); 4637 if (sz < 0) 4638 return sz; 4639 spec->bit_offset = access_idx * sz * 8; 4640 4641 for (i = 1; i < spec->raw_len; i++) { 4642 t = skip_mods_and_typedefs(btf, id, &id); 4643 if (!t) 4644 return -EINVAL; 4645 4646 access_idx = spec->raw_spec[i]; 4647 acc = &spec->spec[spec->len]; 4648 4649 if (btf_is_composite(t)) { 4650 const struct btf_member *m; 4651 __u32 bit_offset; 4652 4653 if (access_idx >= btf_vlen(t)) 4654 return -EINVAL; 4655 4656 bit_offset = btf_member_bit_offset(t, access_idx); 4657 spec->bit_offset += bit_offset; 4658 4659 m = btf_members(t) + access_idx; 4660 if (m->name_off) { 4661 name = btf__name_by_offset(btf, m->name_off); 4662 if (str_is_empty(name)) 4663 return -EINVAL; 4664 4665 acc->type_id = id; 4666 acc->idx = access_idx; 4667 acc->name = name; 4668 spec->len++; 4669 } 4670 4671 id = m->type; 4672 } else if (btf_is_array(t)) { 4673 const struct btf_array *a = btf_array(t); 4674 bool flex; 4675 4676 t = skip_mods_and_typedefs(btf, a->type, &id); 4677 if (!t) 4678 return -EINVAL; 4679 4680 flex = is_flex_arr(btf, acc - 1, a); 4681 if (!flex && access_idx >= a->nelems) 4682 return -EINVAL; 4683 4684 spec->spec[spec->len].type_id = id; 4685 spec->spec[spec->len].idx = access_idx; 4686 spec->len++; 4687 4688 sz = btf__resolve_size(btf, id); 4689 if (sz < 0) 4690 return sz; 4691 spec->bit_offset += access_idx * sz * 8; 4692 } else { 4693 pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n", 4694 type_id, spec_str, i, id, btf_kind_str(t)); 4695 return -EINVAL; 4696 } 4697 } 4698 4699 return 0; 4700 } 4701 4702 static bool bpf_core_is_flavor_sep(const char *s) 4703 { 4704 /* check X___Y name pattern, where X and Y are not underscores */ 4705 return s[0] != '_' && /* X */ 4706 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ 4707 s[4] != '_'; /* Y */ 4708 } 4709 4710 /* Given 'some_struct_name___with_flavor' return the length of a name prefix 4711 * before last triple underscore. Struct name part after last triple 4712 * underscore is ignored by BPF CO-RE relocation during relocation matching. 4713 */ 4714 static size_t bpf_core_essential_name_len(const char *name) 4715 { 4716 size_t n = strlen(name); 4717 int i; 4718 4719 for (i = n - 5; i >= 0; i--) { 4720 if (bpf_core_is_flavor_sep(name + i)) 4721 return i + 1; 4722 } 4723 return n; 4724 } 4725 4726 struct core_cand 4727 { 4728 const struct btf *btf; 4729 const struct btf_type *t; 4730 const char *name; 4731 __u32 id; 4732 }; 4733 4734 /* dynamically sized list of type IDs and its associated struct btf */ 4735 struct core_cand_list { 4736 struct core_cand *cands; 4737 int len; 4738 }; 4739 4740 static void bpf_core_free_cands(struct core_cand_list *cands) 4741 { 4742 free(cands->cands); 4743 free(cands); 4744 } 4745 4746 static int bpf_core_add_cands(struct core_cand *local_cand, 4747 size_t local_essent_len, 4748 const struct btf *targ_btf, 4749 const char *targ_btf_name, 4750 int targ_start_id, 4751 struct core_cand_list *cands) 4752 { 4753 struct core_cand *new_cands, *cand; 4754 const struct btf_type *t; 4755 const char *targ_name; 4756 size_t targ_essent_len; 4757 int n, i; 4758 4759 n = btf__get_nr_types(targ_btf); 4760 for (i = targ_start_id; i <= n; i++) { 4761 t = btf__type_by_id(targ_btf, i); 4762 if (btf_kind(t) != btf_kind(local_cand->t)) 4763 continue; 4764 4765 targ_name = btf__name_by_offset(targ_btf, t->name_off); 4766 if (str_is_empty(targ_name)) 4767 continue; 4768 4769 targ_essent_len = bpf_core_essential_name_len(targ_name); 4770 if (targ_essent_len != local_essent_len) 4771 continue; 4772 4773 if (strncmp(local_cand->name, targ_name, local_essent_len) != 0) 4774 continue; 4775 4776 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n", 4777 local_cand->id, btf_kind_str(local_cand->t), 4778 local_cand->name, i, btf_kind_str(t), targ_name, 4779 targ_btf_name); 4780 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1, 4781 sizeof(*cands->cands)); 4782 if (!new_cands) 4783 return -ENOMEM; 4784 4785 cand = &new_cands[cands->len]; 4786 cand->btf = targ_btf; 4787 cand->t = t; 4788 cand->name = targ_name; 4789 cand->id = i; 4790 4791 cands->cands = new_cands; 4792 cands->len++; 4793 } 4794 return 0; 4795 } 4796 4797 static int load_module_btfs(struct bpf_object *obj) 4798 { 4799 struct bpf_btf_info info; 4800 struct module_btf *mod_btf; 4801 struct btf *btf; 4802 char name[64]; 4803 __u32 id = 0, len; 4804 int err, fd; 4805 4806 if (obj->btf_modules_loaded) 4807 return 0; 4808 4809 /* don't do this again, even if we find no module BTFs */ 4810 obj->btf_modules_loaded = true; 4811 4812 /* kernel too old to support module BTFs */ 4813 if (!kernel_supports(FEAT_MODULE_BTF)) 4814 return 0; 4815 4816 while (true) { 4817 err = bpf_btf_get_next_id(id, &id); 4818 if (err && errno == ENOENT) 4819 return 0; 4820 if (err) { 4821 err = -errno; 4822 pr_warn("failed to iterate BTF objects: %d\n", err); 4823 return err; 4824 } 4825 4826 fd = bpf_btf_get_fd_by_id(id); 4827 if (fd < 0) { 4828 if (errno == ENOENT) 4829 continue; /* expected race: BTF was unloaded */ 4830 err = -errno; 4831 pr_warn("failed to get BTF object #%d FD: %d\n", id, err); 4832 return err; 4833 } 4834 4835 len = sizeof(info); 4836 memset(&info, 0, sizeof(info)); 4837 info.name = ptr_to_u64(name); 4838 info.name_len = sizeof(name); 4839 4840 err = bpf_obj_get_info_by_fd(fd, &info, &len); 4841 if (err) { 4842 err = -errno; 4843 pr_warn("failed to get BTF object #%d info: %d\n", id, err); 4844 goto err_out; 4845 } 4846 4847 /* ignore non-module BTFs */ 4848 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) { 4849 close(fd); 4850 continue; 4851 } 4852 4853 btf = btf_get_from_fd(fd, obj->btf_vmlinux); 4854 if (IS_ERR(btf)) { 4855 pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n", 4856 name, id, PTR_ERR(btf)); 4857 err = PTR_ERR(btf); 4858 goto err_out; 4859 } 4860 4861 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap, 4862 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1); 4863 if (err) 4864 goto err_out; 4865 4866 mod_btf = &obj->btf_modules[obj->btf_module_cnt++]; 4867 4868 mod_btf->btf = btf; 4869 mod_btf->id = id; 4870 mod_btf->fd = fd; 4871 mod_btf->name = strdup(name); 4872 if (!mod_btf->name) { 4873 err = -ENOMEM; 4874 goto err_out; 4875 } 4876 continue; 4877 4878 err_out: 4879 close(fd); 4880 return err; 4881 } 4882 4883 return 0; 4884 } 4885 4886 static struct core_cand_list * 4887 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id) 4888 { 4889 struct core_cand local_cand = {}; 4890 struct core_cand_list *cands; 4891 const struct btf *main_btf; 4892 size_t local_essent_len; 4893 int err, i; 4894 4895 local_cand.btf = local_btf; 4896 local_cand.t = btf__type_by_id(local_btf, local_type_id); 4897 if (!local_cand.t) 4898 return ERR_PTR(-EINVAL); 4899 4900 local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off); 4901 if (str_is_empty(local_cand.name)) 4902 return ERR_PTR(-EINVAL); 4903 local_essent_len = bpf_core_essential_name_len(local_cand.name); 4904 4905 cands = calloc(1, sizeof(*cands)); 4906 if (!cands) 4907 return ERR_PTR(-ENOMEM); 4908 4909 /* Attempt to find target candidates in vmlinux BTF first */ 4910 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux; 4911 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands); 4912 if (err) 4913 goto err_out; 4914 4915 /* if vmlinux BTF has any candidate, don't got for module BTFs */ 4916 if (cands->len) 4917 return cands; 4918 4919 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */ 4920 if (obj->btf_vmlinux_override) 4921 return cands; 4922 4923 /* now look through module BTFs, trying to still find candidates */ 4924 err = load_module_btfs(obj); 4925 if (err) 4926 goto err_out; 4927 4928 for (i = 0; i < obj->btf_module_cnt; i++) { 4929 err = bpf_core_add_cands(&local_cand, local_essent_len, 4930 obj->btf_modules[i].btf, 4931 obj->btf_modules[i].name, 4932 btf__get_nr_types(obj->btf_vmlinux) + 1, 4933 cands); 4934 if (err) 4935 goto err_out; 4936 } 4937 4938 return cands; 4939 err_out: 4940 bpf_core_free_cands(cands); 4941 return ERR_PTR(err); 4942 } 4943 4944 /* Check two types for compatibility for the purpose of field access 4945 * relocation. const/volatile/restrict and typedefs are skipped to ensure we 4946 * are relocating semantically compatible entities: 4947 * - any two STRUCTs/UNIONs are compatible and can be mixed; 4948 * - any two FWDs are compatible, if their names match (modulo flavor suffix); 4949 * - any two PTRs are always compatible; 4950 * - for ENUMs, names should be the same (ignoring flavor suffix) or at 4951 * least one of enums should be anonymous; 4952 * - for ENUMs, check sizes, names are ignored; 4953 * - for INT, size and signedness are ignored; 4954 * - for ARRAY, dimensionality is ignored, element types are checked for 4955 * compatibility recursively; 4956 * - everything else shouldn't be ever a target of relocation. 4957 * These rules are not set in stone and probably will be adjusted as we get 4958 * more experience with using BPF CO-RE relocations. 4959 */ 4960 static int bpf_core_fields_are_compat(const struct btf *local_btf, 4961 __u32 local_id, 4962 const struct btf *targ_btf, 4963 __u32 targ_id) 4964 { 4965 const struct btf_type *local_type, *targ_type; 4966 4967 recur: 4968 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id); 4969 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); 4970 if (!local_type || !targ_type) 4971 return -EINVAL; 4972 4973 if (btf_is_composite(local_type) && btf_is_composite(targ_type)) 4974 return 1; 4975 if (btf_kind(local_type) != btf_kind(targ_type)) 4976 return 0; 4977 4978 switch (btf_kind(local_type)) { 4979 case BTF_KIND_PTR: 4980 return 1; 4981 case BTF_KIND_FWD: 4982 case BTF_KIND_ENUM: { 4983 const char *local_name, *targ_name; 4984 size_t local_len, targ_len; 4985 4986 local_name = btf__name_by_offset(local_btf, 4987 local_type->name_off); 4988 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off); 4989 local_len = bpf_core_essential_name_len(local_name); 4990 targ_len = bpf_core_essential_name_len(targ_name); 4991 /* one of them is anonymous or both w/ same flavor-less names */ 4992 return local_len == 0 || targ_len == 0 || 4993 (local_len == targ_len && 4994 strncmp(local_name, targ_name, local_len) == 0); 4995 } 4996 case BTF_KIND_INT: 4997 /* just reject deprecated bitfield-like integers; all other 4998 * integers are by default compatible between each other 4999 */ 5000 return btf_int_offset(local_type) == 0 && 5001 btf_int_offset(targ_type) == 0; 5002 case BTF_KIND_ARRAY: 5003 local_id = btf_array(local_type)->type; 5004 targ_id = btf_array(targ_type)->type; 5005 goto recur; 5006 default: 5007 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n", 5008 btf_kind(local_type), local_id, targ_id); 5009 return 0; 5010 } 5011 } 5012 5013 /* 5014 * Given single high-level named field accessor in local type, find 5015 * corresponding high-level accessor for a target type. Along the way, 5016 * maintain low-level spec for target as well. Also keep updating target 5017 * bit offset. 5018 * 5019 * Searching is performed through recursive exhaustive enumeration of all 5020 * fields of a struct/union. If there are any anonymous (embedded) 5021 * structs/unions, they are recursively searched as well. If field with 5022 * desired name is found, check compatibility between local and target types, 5023 * before returning result. 5024 * 5025 * 1 is returned, if field is found. 5026 * 0 is returned if no compatible field is found. 5027 * <0 is returned on error. 5028 */ 5029 static int bpf_core_match_member(const struct btf *local_btf, 5030 const struct bpf_core_accessor *local_acc, 5031 const struct btf *targ_btf, 5032 __u32 targ_id, 5033 struct bpf_core_spec *spec, 5034 __u32 *next_targ_id) 5035 { 5036 const struct btf_type *local_type, *targ_type; 5037 const struct btf_member *local_member, *m; 5038 const char *local_name, *targ_name; 5039 __u32 local_id; 5040 int i, n, found; 5041 5042 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); 5043 if (!targ_type) 5044 return -EINVAL; 5045 if (!btf_is_composite(targ_type)) 5046 return 0; 5047 5048 local_id = local_acc->type_id; 5049 local_type = btf__type_by_id(local_btf, local_id); 5050 local_member = btf_members(local_type) + local_acc->idx; 5051 local_name = btf__name_by_offset(local_btf, local_member->name_off); 5052 5053 n = btf_vlen(targ_type); 5054 m = btf_members(targ_type); 5055 for (i = 0; i < n; i++, m++) { 5056 __u32 bit_offset; 5057 5058 bit_offset = btf_member_bit_offset(targ_type, i); 5059 5060 /* too deep struct/union/array nesting */ 5061 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) 5062 return -E2BIG; 5063 5064 /* speculate this member will be the good one */ 5065 spec->bit_offset += bit_offset; 5066 spec->raw_spec[spec->raw_len++] = i; 5067 5068 targ_name = btf__name_by_offset(targ_btf, m->name_off); 5069 if (str_is_empty(targ_name)) { 5070 /* embedded struct/union, we need to go deeper */ 5071 found = bpf_core_match_member(local_btf, local_acc, 5072 targ_btf, m->type, 5073 spec, next_targ_id); 5074 if (found) /* either found or error */ 5075 return found; 5076 } else if (strcmp(local_name, targ_name) == 0) { 5077 /* matching named field */ 5078 struct bpf_core_accessor *targ_acc; 5079 5080 targ_acc = &spec->spec[spec->len++]; 5081 targ_acc->type_id = targ_id; 5082 targ_acc->idx = i; 5083 targ_acc->name = targ_name; 5084 5085 *next_targ_id = m->type; 5086 found = bpf_core_fields_are_compat(local_btf, 5087 local_member->type, 5088 targ_btf, m->type); 5089 if (!found) 5090 spec->len--; /* pop accessor */ 5091 return found; 5092 } 5093 /* member turned out not to be what we looked for */ 5094 spec->bit_offset -= bit_offset; 5095 spec->raw_len--; 5096 } 5097 5098 return 0; 5099 } 5100 5101 /* Check local and target types for compatibility. This check is used for 5102 * type-based CO-RE relocations and follow slightly different rules than 5103 * field-based relocations. This function assumes that root types were already 5104 * checked for name match. Beyond that initial root-level name check, names 5105 * are completely ignored. Compatibility rules are as follows: 5106 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but 5107 * kind should match for local and target types (i.e., STRUCT is not 5108 * compatible with UNION); 5109 * - for ENUMs, the size is ignored; 5110 * - for INT, size and signedness are ignored; 5111 * - for ARRAY, dimensionality is ignored, element types are checked for 5112 * compatibility recursively; 5113 * - CONST/VOLATILE/RESTRICT modifiers are ignored; 5114 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; 5115 * - FUNC_PROTOs are compatible if they have compatible signature: same 5116 * number of input args and compatible return and argument types. 5117 * These rules are not set in stone and probably will be adjusted as we get 5118 * more experience with using BPF CO-RE relocations. 5119 */ 5120 static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, 5121 const struct btf *targ_btf, __u32 targ_id) 5122 { 5123 const struct btf_type *local_type, *targ_type; 5124 int depth = 32; /* max recursion depth */ 5125 5126 /* caller made sure that names match (ignoring flavor suffix) */ 5127 local_type = btf__type_by_id(local_btf, local_id); 5128 targ_type = btf__type_by_id(targ_btf, targ_id); 5129 if (btf_kind(local_type) != btf_kind(targ_type)) 5130 return 0; 5131 5132 recur: 5133 depth--; 5134 if (depth < 0) 5135 return -EINVAL; 5136 5137 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id); 5138 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); 5139 if (!local_type || !targ_type) 5140 return -EINVAL; 5141 5142 if (btf_kind(local_type) != btf_kind(targ_type)) 5143 return 0; 5144 5145 switch (btf_kind(local_type)) { 5146 case BTF_KIND_UNKN: 5147 case BTF_KIND_STRUCT: 5148 case BTF_KIND_UNION: 5149 case BTF_KIND_ENUM: 5150 case BTF_KIND_FWD: 5151 return 1; 5152 case BTF_KIND_INT: 5153 /* just reject deprecated bitfield-like integers; all other 5154 * integers are by default compatible between each other 5155 */ 5156 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0; 5157 case BTF_KIND_PTR: 5158 local_id = local_type->type; 5159 targ_id = targ_type->type; 5160 goto recur; 5161 case BTF_KIND_ARRAY: 5162 local_id = btf_array(local_type)->type; 5163 targ_id = btf_array(targ_type)->type; 5164 goto recur; 5165 case BTF_KIND_FUNC_PROTO: { 5166 struct btf_param *local_p = btf_params(local_type); 5167 struct btf_param *targ_p = btf_params(targ_type); 5168 __u16 local_vlen = btf_vlen(local_type); 5169 __u16 targ_vlen = btf_vlen(targ_type); 5170 int i, err; 5171 5172 if (local_vlen != targ_vlen) 5173 return 0; 5174 5175 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { 5176 skip_mods_and_typedefs(local_btf, local_p->type, &local_id); 5177 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id); 5178 err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id); 5179 if (err <= 0) 5180 return err; 5181 } 5182 5183 /* tail recurse for return type check */ 5184 skip_mods_and_typedefs(local_btf, local_type->type, &local_id); 5185 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id); 5186 goto recur; 5187 } 5188 default: 5189 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n", 5190 btf_kind_str(local_type), local_id, targ_id); 5191 return 0; 5192 } 5193 } 5194 5195 /* 5196 * Try to match local spec to a target type and, if successful, produce full 5197 * target spec (high-level, low-level + bit offset). 5198 */ 5199 static int bpf_core_spec_match(struct bpf_core_spec *local_spec, 5200 const struct btf *targ_btf, __u32 targ_id, 5201 struct bpf_core_spec *targ_spec) 5202 { 5203 const struct btf_type *targ_type; 5204 const struct bpf_core_accessor *local_acc; 5205 struct bpf_core_accessor *targ_acc; 5206 int i, sz, matched; 5207 5208 memset(targ_spec, 0, sizeof(*targ_spec)); 5209 targ_spec->btf = targ_btf; 5210 targ_spec->root_type_id = targ_id; 5211 targ_spec->relo_kind = local_spec->relo_kind; 5212 5213 if (core_relo_is_type_based(local_spec->relo_kind)) { 5214 return bpf_core_types_are_compat(local_spec->btf, 5215 local_spec->root_type_id, 5216 targ_btf, targ_id); 5217 } 5218 5219 local_acc = &local_spec->spec[0]; 5220 targ_acc = &targ_spec->spec[0]; 5221 5222 if (core_relo_is_enumval_based(local_spec->relo_kind)) { 5223 size_t local_essent_len, targ_essent_len; 5224 const struct btf_enum *e; 5225 const char *targ_name; 5226 5227 /* has to resolve to an enum */ 5228 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); 5229 if (!btf_is_enum(targ_type)) 5230 return 0; 5231 5232 local_essent_len = bpf_core_essential_name_len(local_acc->name); 5233 5234 for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) { 5235 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off); 5236 targ_essent_len = bpf_core_essential_name_len(targ_name); 5237 if (targ_essent_len != local_essent_len) 5238 continue; 5239 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) { 5240 targ_acc->type_id = targ_id; 5241 targ_acc->idx = i; 5242 targ_acc->name = targ_name; 5243 targ_spec->len++; 5244 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; 5245 targ_spec->raw_len++; 5246 return 1; 5247 } 5248 } 5249 return 0; 5250 } 5251 5252 if (!core_relo_is_field_based(local_spec->relo_kind)) 5253 return -EINVAL; 5254 5255 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) { 5256 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, 5257 &targ_id); 5258 if (!targ_type) 5259 return -EINVAL; 5260 5261 if (local_acc->name) { 5262 matched = bpf_core_match_member(local_spec->btf, 5263 local_acc, 5264 targ_btf, targ_id, 5265 targ_spec, &targ_id); 5266 if (matched <= 0) 5267 return matched; 5268 } else { 5269 /* for i=0, targ_id is already treated as array element 5270 * type (because it's the original struct), for others 5271 * we should find array element type first 5272 */ 5273 if (i > 0) { 5274 const struct btf_array *a; 5275 bool flex; 5276 5277 if (!btf_is_array(targ_type)) 5278 return 0; 5279 5280 a = btf_array(targ_type); 5281 flex = is_flex_arr(targ_btf, targ_acc - 1, a); 5282 if (!flex && local_acc->idx >= a->nelems) 5283 return 0; 5284 if (!skip_mods_and_typedefs(targ_btf, a->type, 5285 &targ_id)) 5286 return -EINVAL; 5287 } 5288 5289 /* too deep struct/union/array nesting */ 5290 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN) 5291 return -E2BIG; 5292 5293 targ_acc->type_id = targ_id; 5294 targ_acc->idx = local_acc->idx; 5295 targ_acc->name = NULL; 5296 targ_spec->len++; 5297 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; 5298 targ_spec->raw_len++; 5299 5300 sz = btf__resolve_size(targ_btf, targ_id); 5301 if (sz < 0) 5302 return sz; 5303 targ_spec->bit_offset += local_acc->idx * sz * 8; 5304 } 5305 } 5306 5307 return 1; 5308 } 5309 5310 static int bpf_core_calc_field_relo(const struct bpf_program *prog, 5311 const struct bpf_core_relo *relo, 5312 const struct bpf_core_spec *spec, 5313 __u32 *val, __u32 *field_sz, __u32 *type_id, 5314 bool *validate) 5315 { 5316 const struct bpf_core_accessor *acc; 5317 const struct btf_type *t; 5318 __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id; 5319 const struct btf_member *m; 5320 const struct btf_type *mt; 5321 bool bitfield; 5322 __s64 sz; 5323 5324 *field_sz = 0; 5325 5326 if (relo->kind == BPF_FIELD_EXISTS) { 5327 *val = spec ? 1 : 0; 5328 return 0; 5329 } 5330 5331 if (!spec) 5332 return -EUCLEAN; /* request instruction poisoning */ 5333 5334 acc = &spec->spec[spec->len - 1]; 5335 t = btf__type_by_id(spec->btf, acc->type_id); 5336 5337 /* a[n] accessor needs special handling */ 5338 if (!acc->name) { 5339 if (relo->kind == BPF_FIELD_BYTE_OFFSET) { 5340 *val = spec->bit_offset / 8; 5341 /* remember field size for load/store mem size */ 5342 sz = btf__resolve_size(spec->btf, acc->type_id); 5343 if (sz < 0) 5344 return -EINVAL; 5345 *field_sz = sz; 5346 *type_id = acc->type_id; 5347 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) { 5348 sz = btf__resolve_size(spec->btf, acc->type_id); 5349 if (sz < 0) 5350 return -EINVAL; 5351 *val = sz; 5352 } else { 5353 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n", 5354 prog->name, relo->kind, relo->insn_off / 8); 5355 return -EINVAL; 5356 } 5357 if (validate) 5358 *validate = true; 5359 return 0; 5360 } 5361 5362 m = btf_members(t) + acc->idx; 5363 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id); 5364 bit_off = spec->bit_offset; 5365 bit_sz = btf_member_bitfield_size(t, acc->idx); 5366 5367 bitfield = bit_sz > 0; 5368 if (bitfield) { 5369 byte_sz = mt->size; 5370 byte_off = bit_off / 8 / byte_sz * byte_sz; 5371 /* figure out smallest int size necessary for bitfield load */ 5372 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { 5373 if (byte_sz >= 8) { 5374 /* bitfield can't be read with 64-bit read */ 5375 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n", 5376 prog->name, relo->kind, relo->insn_off / 8); 5377 return -E2BIG; 5378 } 5379 byte_sz *= 2; 5380 byte_off = bit_off / 8 / byte_sz * byte_sz; 5381 } 5382 } else { 5383 sz = btf__resolve_size(spec->btf, field_type_id); 5384 if (sz < 0) 5385 return -EINVAL; 5386 byte_sz = sz; 5387 byte_off = spec->bit_offset / 8; 5388 bit_sz = byte_sz * 8; 5389 } 5390 5391 /* for bitfields, all the relocatable aspects are ambiguous and we 5392 * might disagree with compiler, so turn off validation of expected 5393 * value, except for signedness 5394 */ 5395 if (validate) 5396 *validate = !bitfield; 5397 5398 switch (relo->kind) { 5399 case BPF_FIELD_BYTE_OFFSET: 5400 *val = byte_off; 5401 if (!bitfield) { 5402 *field_sz = byte_sz; 5403 *type_id = field_type_id; 5404 } 5405 break; 5406 case BPF_FIELD_BYTE_SIZE: 5407 *val = byte_sz; 5408 break; 5409 case BPF_FIELD_SIGNED: 5410 /* enums will be assumed unsigned */ 5411 *val = btf_is_enum(mt) || 5412 (btf_int_encoding(mt) & BTF_INT_SIGNED); 5413 if (validate) 5414 *validate = true; /* signedness is never ambiguous */ 5415 break; 5416 case BPF_FIELD_LSHIFT_U64: 5417 #if __BYTE_ORDER == __LITTLE_ENDIAN 5418 *val = 64 - (bit_off + bit_sz - byte_off * 8); 5419 #else 5420 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); 5421 #endif 5422 break; 5423 case BPF_FIELD_RSHIFT_U64: 5424 *val = 64 - bit_sz; 5425 if (validate) 5426 *validate = true; /* right shift is never ambiguous */ 5427 break; 5428 case BPF_FIELD_EXISTS: 5429 default: 5430 return -EOPNOTSUPP; 5431 } 5432 5433 return 0; 5434 } 5435 5436 static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo, 5437 const struct bpf_core_spec *spec, 5438 __u32 *val) 5439 { 5440 __s64 sz; 5441 5442 /* type-based relos return zero when target type is not found */ 5443 if (!spec) { 5444 *val = 0; 5445 return 0; 5446 } 5447 5448 switch (relo->kind) { 5449 case BPF_TYPE_ID_TARGET: 5450 *val = spec->root_type_id; 5451 break; 5452 case BPF_TYPE_EXISTS: 5453 *val = 1; 5454 break; 5455 case BPF_TYPE_SIZE: 5456 sz = btf__resolve_size(spec->btf, spec->root_type_id); 5457 if (sz < 0) 5458 return -EINVAL; 5459 *val = sz; 5460 break; 5461 case BPF_TYPE_ID_LOCAL: 5462 /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */ 5463 default: 5464 return -EOPNOTSUPP; 5465 } 5466 5467 return 0; 5468 } 5469 5470 static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, 5471 const struct bpf_core_spec *spec, 5472 __u32 *val) 5473 { 5474 const struct btf_type *t; 5475 const struct btf_enum *e; 5476 5477 switch (relo->kind) { 5478 case BPF_ENUMVAL_EXISTS: 5479 *val = spec ? 1 : 0; 5480 break; 5481 case BPF_ENUMVAL_VALUE: 5482 if (!spec) 5483 return -EUCLEAN; /* request instruction poisoning */ 5484 t = btf__type_by_id(spec->btf, spec->spec[0].type_id); 5485 e = btf_enum(t) + spec->spec[0].idx; 5486 *val = e->val; 5487 break; 5488 default: 5489 return -EOPNOTSUPP; 5490 } 5491 5492 return 0; 5493 } 5494 5495 struct bpf_core_relo_res 5496 { 5497 /* expected value in the instruction, unless validate == false */ 5498 __u32 orig_val; 5499 /* new value that needs to be patched up to */ 5500 __u32 new_val; 5501 /* relocation unsuccessful, poison instruction, but don't fail load */ 5502 bool poison; 5503 /* some relocations can't be validated against orig_val */ 5504 bool validate; 5505 /* for field byte offset relocations or the forms: 5506 * *(T *)(rX + <off>) = rY 5507 * rX = *(T *)(rY + <off>), 5508 * we remember original and resolved field size to adjust direct 5509 * memory loads of pointers and integers; this is necessary for 32-bit 5510 * host kernel architectures, but also allows to automatically 5511 * relocate fields that were resized from, e.g., u32 to u64, etc. 5512 */ 5513 bool fail_memsz_adjust; 5514 __u32 orig_sz; 5515 __u32 orig_type_id; 5516 __u32 new_sz; 5517 __u32 new_type_id; 5518 }; 5519 5520 /* Calculate original and target relocation values, given local and target 5521 * specs and relocation kind. These values are calculated for each candidate. 5522 * If there are multiple candidates, resulting values should all be consistent 5523 * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity. 5524 * If instruction has to be poisoned, *poison will be set to true. 5525 */ 5526 static int bpf_core_calc_relo(const struct bpf_program *prog, 5527 const struct bpf_core_relo *relo, 5528 int relo_idx, 5529 const struct bpf_core_spec *local_spec, 5530 const struct bpf_core_spec *targ_spec, 5531 struct bpf_core_relo_res *res) 5532 { 5533 int err = -EOPNOTSUPP; 5534 5535 res->orig_val = 0; 5536 res->new_val = 0; 5537 res->poison = false; 5538 res->validate = true; 5539 res->fail_memsz_adjust = false; 5540 res->orig_sz = res->new_sz = 0; 5541 res->orig_type_id = res->new_type_id = 0; 5542 5543 if (core_relo_is_field_based(relo->kind)) { 5544 err = bpf_core_calc_field_relo(prog, relo, local_spec, 5545 &res->orig_val, &res->orig_sz, 5546 &res->orig_type_id, &res->validate); 5547 err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec, 5548 &res->new_val, &res->new_sz, 5549 &res->new_type_id, NULL); 5550 if (err) 5551 goto done; 5552 /* Validate if it's safe to adjust load/store memory size. 5553 * Adjustments are performed only if original and new memory 5554 * sizes differ. 5555 */ 5556 res->fail_memsz_adjust = false; 5557 if (res->orig_sz != res->new_sz) { 5558 const struct btf_type *orig_t, *new_t; 5559 5560 orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id); 5561 new_t = btf__type_by_id(targ_spec->btf, res->new_type_id); 5562 5563 /* There are two use cases in which it's safe to 5564 * adjust load/store's mem size: 5565 * - reading a 32-bit kernel pointer, while on BPF 5566 * size pointers are always 64-bit; in this case 5567 * it's safe to "downsize" instruction size due to 5568 * pointer being treated as unsigned integer with 5569 * zero-extended upper 32-bits; 5570 * - reading unsigned integers, again due to 5571 * zero-extension is preserving the value correctly. 5572 * 5573 * In all other cases it's incorrect to attempt to 5574 * load/store field because read value will be 5575 * incorrect, so we poison relocated instruction. 5576 */ 5577 if (btf_is_ptr(orig_t) && btf_is_ptr(new_t)) 5578 goto done; 5579 if (btf_is_int(orig_t) && btf_is_int(new_t) && 5580 btf_int_encoding(orig_t) != BTF_INT_SIGNED && 5581 btf_int_encoding(new_t) != BTF_INT_SIGNED) 5582 goto done; 5583 5584 /* mark as invalid mem size adjustment, but this will 5585 * only be checked for LDX/STX/ST insns 5586 */ 5587 res->fail_memsz_adjust = true; 5588 } 5589 } else if (core_relo_is_type_based(relo->kind)) { 5590 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val); 5591 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val); 5592 } else if (core_relo_is_enumval_based(relo->kind)) { 5593 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val); 5594 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val); 5595 } 5596 5597 done: 5598 if (err == -EUCLEAN) { 5599 /* EUCLEAN is used to signal instruction poisoning request */ 5600 res->poison = true; 5601 err = 0; 5602 } else if (err == -EOPNOTSUPP) { 5603 /* EOPNOTSUPP means unknown/unsupported relocation */ 5604 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n", 5605 prog->name, relo_idx, core_relo_kind_str(relo->kind), 5606 relo->kind, relo->insn_off / 8); 5607 } 5608 5609 return err; 5610 } 5611 5612 /* 5613 * Turn instruction for which CO_RE relocation failed into invalid one with 5614 * distinct signature. 5615 */ 5616 static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx, 5617 int insn_idx, struct bpf_insn *insn) 5618 { 5619 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n", 5620 prog->name, relo_idx, insn_idx); 5621 insn->code = BPF_JMP | BPF_CALL; 5622 insn->dst_reg = 0; 5623 insn->src_reg = 0; 5624 insn->off = 0; 5625 /* if this instruction is reachable (not a dead code), 5626 * verifier will complain with the following message: 5627 * invalid func unknown#195896080 5628 */ 5629 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ 5630 } 5631 5632 static int insn_bpf_size_to_bytes(struct bpf_insn *insn) 5633 { 5634 switch (BPF_SIZE(insn->code)) { 5635 case BPF_DW: return 8; 5636 case BPF_W: return 4; 5637 case BPF_H: return 2; 5638 case BPF_B: return 1; 5639 default: return -1; 5640 } 5641 } 5642 5643 static int insn_bytes_to_bpf_size(__u32 sz) 5644 { 5645 switch (sz) { 5646 case 8: return BPF_DW; 5647 case 4: return BPF_W; 5648 case 2: return BPF_H; 5649 case 1: return BPF_B; 5650 default: return -1; 5651 } 5652 } 5653 5654 /* 5655 * Patch relocatable BPF instruction. 5656 * 5657 * Patched value is determined by relocation kind and target specification. 5658 * For existence relocations target spec will be NULL if field/type is not found. 5659 * Expected insn->imm value is determined using relocation kind and local 5660 * spec, and is checked before patching instruction. If actual insn->imm value 5661 * is wrong, bail out with error. 5662 * 5663 * Currently supported classes of BPF instruction are: 5664 * 1. rX = <imm> (assignment with immediate operand); 5665 * 2. rX += <imm> (arithmetic operations with immediate operand); 5666 * 3. rX = <imm64> (load with 64-bit immediate value); 5667 * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64}; 5668 * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64}; 5669 * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}. 5670 */ 5671 static int bpf_core_patch_insn(struct bpf_program *prog, 5672 const struct bpf_core_relo *relo, 5673 int relo_idx, 5674 const struct bpf_core_relo_res *res) 5675 { 5676 __u32 orig_val, new_val; 5677 struct bpf_insn *insn; 5678 int insn_idx; 5679 __u8 class; 5680 5681 if (relo->insn_off % BPF_INSN_SZ) 5682 return -EINVAL; 5683 insn_idx = relo->insn_off / BPF_INSN_SZ; 5684 /* adjust insn_idx from section frame of reference to the local 5685 * program's frame of reference; (sub-)program code is not yet 5686 * relocated, so it's enough to just subtract in-section offset 5687 */ 5688 insn_idx = insn_idx - prog->sec_insn_off; 5689 insn = &prog->insns[insn_idx]; 5690 class = BPF_CLASS(insn->code); 5691 5692 if (res->poison) { 5693 poison: 5694 /* poison second part of ldimm64 to avoid confusing error from 5695 * verifier about "unknown opcode 00" 5696 */ 5697 if (is_ldimm64(insn)) 5698 bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1); 5699 bpf_core_poison_insn(prog, relo_idx, insn_idx, insn); 5700 return 0; 5701 } 5702 5703 orig_val = res->orig_val; 5704 new_val = res->new_val; 5705 5706 switch (class) { 5707 case BPF_ALU: 5708 case BPF_ALU64: 5709 if (BPF_SRC(insn->code) != BPF_K) 5710 return -EINVAL; 5711 if (res->validate && insn->imm != orig_val) { 5712 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n", 5713 prog->name, relo_idx, 5714 insn_idx, insn->imm, orig_val, new_val); 5715 return -EINVAL; 5716 } 5717 orig_val = insn->imm; 5718 insn->imm = new_val; 5719 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n", 5720 prog->name, relo_idx, insn_idx, 5721 orig_val, new_val); 5722 break; 5723 case BPF_LDX: 5724 case BPF_ST: 5725 case BPF_STX: 5726 if (res->validate && insn->off != orig_val) { 5727 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n", 5728 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val); 5729 return -EINVAL; 5730 } 5731 if (new_val > SHRT_MAX) { 5732 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n", 5733 prog->name, relo_idx, insn_idx, new_val); 5734 return -ERANGE; 5735 } 5736 if (res->fail_memsz_adjust) { 5737 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. " 5738 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n", 5739 prog->name, relo_idx, insn_idx); 5740 goto poison; 5741 } 5742 5743 orig_val = insn->off; 5744 insn->off = new_val; 5745 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n", 5746 prog->name, relo_idx, insn_idx, orig_val, new_val); 5747 5748 if (res->new_sz != res->orig_sz) { 5749 int insn_bytes_sz, insn_bpf_sz; 5750 5751 insn_bytes_sz = insn_bpf_size_to_bytes(insn); 5752 if (insn_bytes_sz != res->orig_sz) { 5753 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n", 5754 prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz); 5755 return -EINVAL; 5756 } 5757 5758 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz); 5759 if (insn_bpf_sz < 0) { 5760 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n", 5761 prog->name, relo_idx, insn_idx, res->new_sz); 5762 return -EINVAL; 5763 } 5764 5765 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code); 5766 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n", 5767 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz); 5768 } 5769 break; 5770 case BPF_LD: { 5771 __u64 imm; 5772 5773 if (!is_ldimm64(insn) || 5774 insn[0].src_reg != 0 || insn[0].off != 0 || 5775 insn_idx + 1 >= prog->insns_cnt || 5776 insn[1].code != 0 || insn[1].dst_reg != 0 || 5777 insn[1].src_reg != 0 || insn[1].off != 0) { 5778 pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n", 5779 prog->name, relo_idx, insn_idx); 5780 return -EINVAL; 5781 } 5782 5783 imm = insn[0].imm + ((__u64)insn[1].imm << 32); 5784 if (res->validate && imm != orig_val) { 5785 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n", 5786 prog->name, relo_idx, 5787 insn_idx, (unsigned long long)imm, 5788 orig_val, new_val); 5789 return -EINVAL; 5790 } 5791 5792 insn[0].imm = new_val; 5793 insn[1].imm = 0; /* currently only 32-bit values are supported */ 5794 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n", 5795 prog->name, relo_idx, insn_idx, 5796 (unsigned long long)imm, new_val); 5797 break; 5798 } 5799 default: 5800 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n", 5801 prog->name, relo_idx, insn_idx, insn->code, 5802 insn->src_reg, insn->dst_reg, insn->off, insn->imm); 5803 return -EINVAL; 5804 } 5805 5806 return 0; 5807 } 5808 5809 /* Output spec definition in the format: 5810 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>, 5811 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b 5812 */ 5813 static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec) 5814 { 5815 const struct btf_type *t; 5816 const struct btf_enum *e; 5817 const char *s; 5818 __u32 type_id; 5819 int i; 5820 5821 type_id = spec->root_type_id; 5822 t = btf__type_by_id(spec->btf, type_id); 5823 s = btf__name_by_offset(spec->btf, t->name_off); 5824 5825 libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s); 5826 5827 if (core_relo_is_type_based(spec->relo_kind)) 5828 return; 5829 5830 if (core_relo_is_enumval_based(spec->relo_kind)) { 5831 t = skip_mods_and_typedefs(spec->btf, type_id, NULL); 5832 e = btf_enum(t) + spec->raw_spec[0]; 5833 s = btf__name_by_offset(spec->btf, e->name_off); 5834 5835 libbpf_print(level, "::%s = %u", s, e->val); 5836 return; 5837 } 5838 5839 if (core_relo_is_field_based(spec->relo_kind)) { 5840 for (i = 0; i < spec->len; i++) { 5841 if (spec->spec[i].name) 5842 libbpf_print(level, ".%s", spec->spec[i].name); 5843 else if (i > 0 || spec->spec[i].idx > 0) 5844 libbpf_print(level, "[%u]", spec->spec[i].idx); 5845 } 5846 5847 libbpf_print(level, " ("); 5848 for (i = 0; i < spec->raw_len; i++) 5849 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]); 5850 5851 if (spec->bit_offset % 8) 5852 libbpf_print(level, " @ offset %u.%u)", 5853 spec->bit_offset / 8, spec->bit_offset % 8); 5854 else 5855 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8); 5856 return; 5857 } 5858 } 5859 5860 static size_t bpf_core_hash_fn(const void *key, void *ctx) 5861 { 5862 return (size_t)key; 5863 } 5864 5865 static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx) 5866 { 5867 return k1 == k2; 5868 } 5869 5870 static void *u32_as_hash_key(__u32 x) 5871 { 5872 return (void *)(uintptr_t)x; 5873 } 5874 5875 /* 5876 * CO-RE relocate single instruction. 5877 * 5878 * The outline and important points of the algorithm: 5879 * 1. For given local type, find corresponding candidate target types. 5880 * Candidate type is a type with the same "essential" name, ignoring 5881 * everything after last triple underscore (___). E.g., `sample`, 5882 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates 5883 * for each other. Names with triple underscore are referred to as 5884 * "flavors" and are useful, among other things, to allow to 5885 * specify/support incompatible variations of the same kernel struct, which 5886 * might differ between different kernel versions and/or build 5887 * configurations. 5888 * 5889 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C 5890 * converter, when deduplicated BTF of a kernel still contains more than 5891 * one different types with the same name. In that case, ___2, ___3, etc 5892 * are appended starting from second name conflict. But start flavors are 5893 * also useful to be defined "locally", in BPF program, to extract same 5894 * data from incompatible changes between different kernel 5895 * versions/configurations. For instance, to handle field renames between 5896 * kernel versions, one can use two flavors of the struct name with the 5897 * same common name and use conditional relocations to extract that field, 5898 * depending on target kernel version. 5899 * 2. For each candidate type, try to match local specification to this 5900 * candidate target type. Matching involves finding corresponding 5901 * high-level spec accessors, meaning that all named fields should match, 5902 * as well as all array accesses should be within the actual bounds. Also, 5903 * types should be compatible (see bpf_core_fields_are_compat for details). 5904 * 3. It is supported and expected that there might be multiple flavors 5905 * matching the spec. As long as all the specs resolve to the same set of 5906 * offsets across all candidates, there is no error. If there is any 5907 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate 5908 * imprefection of BTF deduplication, which can cause slight duplication of 5909 * the same BTF type, if some directly or indirectly referenced (by 5910 * pointer) type gets resolved to different actual types in different 5911 * object files. If such situation occurs, deduplicated BTF will end up 5912 * with two (or more) structurally identical types, which differ only in 5913 * types they refer to through pointer. This should be OK in most cases and 5914 * is not an error. 5915 * 4. Candidate types search is performed by linearly scanning through all 5916 * types in target BTF. It is anticipated that this is overall more 5917 * efficient memory-wise and not significantly worse (if not better) 5918 * CPU-wise compared to prebuilding a map from all local type names to 5919 * a list of candidate type names. It's also sped up by caching resolved 5920 * list of matching candidates per each local "root" type ID, that has at 5921 * least one bpf_core_relo associated with it. This list is shared 5922 * between multiple relocations for the same type ID and is updated as some 5923 * of the candidates are pruned due to structural incompatibility. 5924 */ 5925 static int bpf_core_apply_relo(struct bpf_program *prog, 5926 const struct bpf_core_relo *relo, 5927 int relo_idx, 5928 const struct btf *local_btf, 5929 struct hashmap *cand_cache) 5930 { 5931 struct bpf_core_spec local_spec, cand_spec, targ_spec = {}; 5932 const void *type_key = u32_as_hash_key(relo->type_id); 5933 struct bpf_core_relo_res cand_res, targ_res; 5934 const struct btf_type *local_type; 5935 const char *local_name; 5936 struct core_cand_list *cands = NULL; 5937 __u32 local_id; 5938 const char *spec_str; 5939 int i, j, err; 5940 5941 local_id = relo->type_id; 5942 local_type = btf__type_by_id(local_btf, local_id); 5943 if (!local_type) 5944 return -EINVAL; 5945 5946 local_name = btf__name_by_offset(local_btf, local_type->name_off); 5947 if (!local_name) 5948 return -EINVAL; 5949 5950 spec_str = btf__name_by_offset(local_btf, relo->access_str_off); 5951 if (str_is_empty(spec_str)) 5952 return -EINVAL; 5953 5954 err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec); 5955 if (err) { 5956 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n", 5957 prog->name, relo_idx, local_id, btf_kind_str(local_type), 5958 str_is_empty(local_name) ? "<anon>" : local_name, 5959 spec_str, err); 5960 return -EINVAL; 5961 } 5962 5963 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name, 5964 relo_idx, core_relo_kind_str(relo->kind), relo->kind); 5965 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec); 5966 libbpf_print(LIBBPF_DEBUG, "\n"); 5967 5968 /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */ 5969 if (relo->kind == BPF_TYPE_ID_LOCAL) { 5970 targ_res.validate = true; 5971 targ_res.poison = false; 5972 targ_res.orig_val = local_spec.root_type_id; 5973 targ_res.new_val = local_spec.root_type_id; 5974 goto patch_insn; 5975 } 5976 5977 /* libbpf doesn't support candidate search for anonymous types */ 5978 if (str_is_empty(spec_str)) { 5979 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n", 5980 prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); 5981 return -EOPNOTSUPP; 5982 } 5983 5984 if (!hashmap__find(cand_cache, type_key, (void **)&cands)) { 5985 cands = bpf_core_find_cands(prog->obj, local_btf, local_id); 5986 if (IS_ERR(cands)) { 5987 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n", 5988 prog->name, relo_idx, local_id, btf_kind_str(local_type), 5989 local_name, PTR_ERR(cands)); 5990 return PTR_ERR(cands); 5991 } 5992 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL); 5993 if (err) { 5994 bpf_core_free_cands(cands); 5995 return err; 5996 } 5997 } 5998 5999 for (i = 0, j = 0; i < cands->len; i++) { 6000 err = bpf_core_spec_match(&local_spec, cands->cands[i].btf, 6001 cands->cands[i].id, &cand_spec); 6002 if (err < 0) { 6003 pr_warn("prog '%s': relo #%d: error matching candidate #%d ", 6004 prog->name, relo_idx, i); 6005 bpf_core_dump_spec(LIBBPF_WARN, &cand_spec); 6006 libbpf_print(LIBBPF_WARN, ": %d\n", err); 6007 return err; 6008 } 6009 6010 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name, 6011 relo_idx, err == 0 ? "non-matching" : "matching", i); 6012 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec); 6013 libbpf_print(LIBBPF_DEBUG, "\n"); 6014 6015 if (err == 0) 6016 continue; 6017 6018 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res); 6019 if (err) 6020 return err; 6021 6022 if (j == 0) { 6023 targ_res = cand_res; 6024 targ_spec = cand_spec; 6025 } else if (cand_spec.bit_offset != targ_spec.bit_offset) { 6026 /* if there are many field relo candidates, they 6027 * should all resolve to the same bit offset 6028 */ 6029 pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n", 6030 prog->name, relo_idx, cand_spec.bit_offset, 6031 targ_spec.bit_offset); 6032 return -EINVAL; 6033 } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) { 6034 /* all candidates should result in the same relocation 6035 * decision and value, otherwise it's dangerous to 6036 * proceed due to ambiguity 6037 */ 6038 pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n", 6039 prog->name, relo_idx, 6040 cand_res.poison ? "failure" : "success", cand_res.new_val, 6041 targ_res.poison ? "failure" : "success", targ_res.new_val); 6042 return -EINVAL; 6043 } 6044 6045 cands->cands[j++] = cands->cands[i]; 6046 } 6047 6048 /* 6049 * For BPF_FIELD_EXISTS relo or when used BPF program has field 6050 * existence checks or kernel version/config checks, it's expected 6051 * that we might not find any candidates. In this case, if field 6052 * wasn't found in any candidate, the list of candidates shouldn't 6053 * change at all, we'll just handle relocating appropriately, 6054 * depending on relo's kind. 6055 */ 6056 if (j > 0) 6057 cands->len = j; 6058 6059 /* 6060 * If no candidates were found, it might be both a programmer error, 6061 * as well as expected case, depending whether instruction w/ 6062 * relocation is guarded in some way that makes it unreachable (dead 6063 * code) if relocation can't be resolved. This is handled in 6064 * bpf_core_patch_insn() uniformly by replacing that instruction with 6065 * BPF helper call insn (using invalid helper ID). If that instruction 6066 * is indeed unreachable, then it will be ignored and eliminated by 6067 * verifier. If it was an error, then verifier will complain and point 6068 * to a specific instruction number in its log. 6069 */ 6070 if (j == 0) { 6071 pr_debug("prog '%s': relo #%d: no matching targets found\n", 6072 prog->name, relo_idx); 6073 6074 /* calculate single target relo result explicitly */ 6075 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res); 6076 if (err) 6077 return err; 6078 } 6079 6080 patch_insn: 6081 /* bpf_core_patch_insn() should know how to handle missing targ_spec */ 6082 err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res); 6083 if (err) { 6084 pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n", 6085 prog->name, relo_idx, relo->insn_off, err); 6086 return -EINVAL; 6087 } 6088 6089 return 0; 6090 } 6091 6092 static int 6093 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) 6094 { 6095 const struct btf_ext_info_sec *sec; 6096 const struct bpf_core_relo *rec; 6097 const struct btf_ext_info *seg; 6098 struct hashmap_entry *entry; 6099 struct hashmap *cand_cache = NULL; 6100 struct bpf_program *prog; 6101 const char *sec_name; 6102 int i, err = 0, insn_idx, sec_idx; 6103 6104 if (obj->btf_ext->core_relo_info.len == 0) 6105 return 0; 6106 6107 if (targ_btf_path) { 6108 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL); 6109 if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) { 6110 err = PTR_ERR(obj->btf_vmlinux_override); 6111 pr_warn("failed to parse target BTF: %d\n", err); 6112 return err; 6113 } 6114 } 6115 6116 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL); 6117 if (IS_ERR(cand_cache)) { 6118 err = PTR_ERR(cand_cache); 6119 goto out; 6120 } 6121 6122 seg = &obj->btf_ext->core_relo_info; 6123 for_each_btf_ext_sec(seg, sec) { 6124 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); 6125 if (str_is_empty(sec_name)) { 6126 err = -EINVAL; 6127 goto out; 6128 } 6129 /* bpf_object's ELF is gone by now so it's not easy to find 6130 * section index by section name, but we can find *any* 6131 * bpf_program within desired section name and use it's 6132 * prog->sec_idx to do a proper search by section index and 6133 * instruction offset 6134 */ 6135 prog = NULL; 6136 for (i = 0; i < obj->nr_programs; i++) { 6137 prog = &obj->programs[i]; 6138 if (strcmp(prog->sec_name, sec_name) == 0) 6139 break; 6140 } 6141 if (!prog) { 6142 pr_warn("sec '%s': failed to find a BPF program\n", sec_name); 6143 return -ENOENT; 6144 } 6145 sec_idx = prog->sec_idx; 6146 6147 pr_debug("sec '%s': found %d CO-RE relocations\n", 6148 sec_name, sec->num_info); 6149 6150 for_each_btf_ext_rec(seg, sec, i, rec) { 6151 insn_idx = rec->insn_off / BPF_INSN_SZ; 6152 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); 6153 if (!prog) { 6154 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n", 6155 sec_name, insn_idx, i); 6156 err = -EINVAL; 6157 goto out; 6158 } 6159 /* no need to apply CO-RE relocation if the program is 6160 * not going to be loaded 6161 */ 6162 if (!prog->load) 6163 continue; 6164 6165 err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache); 6166 if (err) { 6167 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", 6168 prog->name, i, err); 6169 goto out; 6170 } 6171 } 6172 } 6173 6174 out: 6175 /* obj->btf_vmlinux and module BTFs are freed after object load */ 6176 btf__free(obj->btf_vmlinux_override); 6177 obj->btf_vmlinux_override = NULL; 6178 6179 if (!IS_ERR_OR_NULL(cand_cache)) { 6180 hashmap__for_each_entry(cand_cache, entry, i) { 6181 bpf_core_free_cands(entry->value); 6182 } 6183 hashmap__free(cand_cache); 6184 } 6185 return err; 6186 } 6187 6188 /* Relocate data references within program code: 6189 * - map references; 6190 * - global variable references; 6191 * - extern references. 6192 */ 6193 static int 6194 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) 6195 { 6196 int i; 6197 6198 for (i = 0; i < prog->nr_reloc; i++) { 6199 struct reloc_desc *relo = &prog->reloc_desc[i]; 6200 struct bpf_insn *insn = &prog->insns[relo->insn_idx]; 6201 struct extern_desc *ext; 6202 6203 switch (relo->type) { 6204 case RELO_LD64: 6205 insn[0].src_reg = BPF_PSEUDO_MAP_FD; 6206 insn[0].imm = obj->maps[relo->map_idx].fd; 6207 relo->processed = true; 6208 break; 6209 case RELO_DATA: 6210 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; 6211 insn[1].imm = insn[0].imm + relo->sym_off; 6212 insn[0].imm = obj->maps[relo->map_idx].fd; 6213 relo->processed = true; 6214 break; 6215 case RELO_EXTERN: 6216 ext = &obj->externs[relo->sym_off]; 6217 if (ext->type == EXT_KCFG) { 6218 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; 6219 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; 6220 insn[1].imm = ext->kcfg.data_off; 6221 } else /* EXT_KSYM */ { 6222 if (ext->ksym.type_id) { /* typed ksyms */ 6223 insn[0].src_reg = BPF_PSEUDO_BTF_ID; 6224 insn[0].imm = ext->ksym.kernel_btf_id; 6225 insn[1].imm = ext->ksym.kernel_btf_obj_fd; 6226 } else { /* typeless ksyms */ 6227 insn[0].imm = (__u32)ext->ksym.addr; 6228 insn[1].imm = ext->ksym.addr >> 32; 6229 } 6230 } 6231 relo->processed = true; 6232 break; 6233 case RELO_SUBPROG_ADDR: 6234 insn[0].src_reg = BPF_PSEUDO_FUNC; 6235 /* will be handled as a follow up pass */ 6236 break; 6237 case RELO_CALL: 6238 /* will be handled as a follow up pass */ 6239 break; 6240 default: 6241 pr_warn("prog '%s': relo #%d: bad relo type %d\n", 6242 prog->name, i, relo->type); 6243 return -EINVAL; 6244 } 6245 } 6246 6247 return 0; 6248 } 6249 6250 static int adjust_prog_btf_ext_info(const struct bpf_object *obj, 6251 const struct bpf_program *prog, 6252 const struct btf_ext_info *ext_info, 6253 void **prog_info, __u32 *prog_rec_cnt, 6254 __u32 *prog_rec_sz) 6255 { 6256 void *copy_start = NULL, *copy_end = NULL; 6257 void *rec, *rec_end, *new_prog_info; 6258 const struct btf_ext_info_sec *sec; 6259 size_t old_sz, new_sz; 6260 const char *sec_name; 6261 int i, off_adj; 6262 6263 for_each_btf_ext_sec(ext_info, sec) { 6264 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); 6265 if (!sec_name) 6266 return -EINVAL; 6267 if (strcmp(sec_name, prog->sec_name) != 0) 6268 continue; 6269 6270 for_each_btf_ext_rec(ext_info, sec, i, rec) { 6271 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ; 6272 6273 if (insn_off < prog->sec_insn_off) 6274 continue; 6275 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) 6276 break; 6277 6278 if (!copy_start) 6279 copy_start = rec; 6280 copy_end = rec + ext_info->rec_size; 6281 } 6282 6283 if (!copy_start) 6284 return -ENOENT; 6285 6286 /* append func/line info of a given (sub-)program to the main 6287 * program func/line info 6288 */ 6289 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; 6290 new_sz = old_sz + (copy_end - copy_start); 6291 new_prog_info = realloc(*prog_info, new_sz); 6292 if (!new_prog_info) 6293 return -ENOMEM; 6294 *prog_info = new_prog_info; 6295 *prog_rec_cnt = new_sz / ext_info->rec_size; 6296 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); 6297 6298 /* Kernel instruction offsets are in units of 8-byte 6299 * instructions, while .BTF.ext instruction offsets generated 6300 * by Clang are in units of bytes. So convert Clang offsets 6301 * into kernel offsets and adjust offset according to program 6302 * relocated position. 6303 */ 6304 off_adj = prog->sub_insn_off - prog->sec_insn_off; 6305 rec = new_prog_info + old_sz; 6306 rec_end = new_prog_info + new_sz; 6307 for (; rec < rec_end; rec += ext_info->rec_size) { 6308 __u32 *insn_off = rec; 6309 6310 *insn_off = *insn_off / BPF_INSN_SZ + off_adj; 6311 } 6312 *prog_rec_sz = ext_info->rec_size; 6313 return 0; 6314 } 6315 6316 return -ENOENT; 6317 } 6318 6319 static int 6320 reloc_prog_func_and_line_info(const struct bpf_object *obj, 6321 struct bpf_program *main_prog, 6322 const struct bpf_program *prog) 6323 { 6324 int err; 6325 6326 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't 6327 * supprot func/line info 6328 */ 6329 if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC)) 6330 return 0; 6331 6332 /* only attempt func info relocation if main program's func_info 6333 * relocation was successful 6334 */ 6335 if (main_prog != prog && !main_prog->func_info) 6336 goto line_info; 6337 6338 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, 6339 &main_prog->func_info, 6340 &main_prog->func_info_cnt, 6341 &main_prog->func_info_rec_size); 6342 if (err) { 6343 if (err != -ENOENT) { 6344 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n", 6345 prog->name, err); 6346 return err; 6347 } 6348 if (main_prog->func_info) { 6349 /* 6350 * Some info has already been found but has problem 6351 * in the last btf_ext reloc. Must have to error out. 6352 */ 6353 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); 6354 return err; 6355 } 6356 /* Have problem loading the very first info. Ignore the rest. */ 6357 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n", 6358 prog->name); 6359 } 6360 6361 line_info: 6362 /* don't relocate line info if main program's relocation failed */ 6363 if (main_prog != prog && !main_prog->line_info) 6364 return 0; 6365 6366 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, 6367 &main_prog->line_info, 6368 &main_prog->line_info_cnt, 6369 &main_prog->line_info_rec_size); 6370 if (err) { 6371 if (err != -ENOENT) { 6372 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n", 6373 prog->name, err); 6374 return err; 6375 } 6376 if (main_prog->line_info) { 6377 /* 6378 * Some info has already been found but has problem 6379 * in the last btf_ext reloc. Must have to error out. 6380 */ 6381 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); 6382 return err; 6383 } 6384 /* Have problem loading the very first info. Ignore the rest. */ 6385 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n", 6386 prog->name); 6387 } 6388 return 0; 6389 } 6390 6391 static int cmp_relo_by_insn_idx(const void *key, const void *elem) 6392 { 6393 size_t insn_idx = *(const size_t *)key; 6394 const struct reloc_desc *relo = elem; 6395 6396 if (insn_idx == relo->insn_idx) 6397 return 0; 6398 return insn_idx < relo->insn_idx ? -1 : 1; 6399 } 6400 6401 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx) 6402 { 6403 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, 6404 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); 6405 } 6406 6407 static int 6408 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, 6409 struct bpf_program *prog) 6410 { 6411 size_t sub_insn_idx, insn_idx, new_cnt; 6412 struct bpf_program *subprog; 6413 struct bpf_insn *insns, *insn; 6414 struct reloc_desc *relo; 6415 int err; 6416 6417 err = reloc_prog_func_and_line_info(obj, main_prog, prog); 6418 if (err) 6419 return err; 6420 6421 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { 6422 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; 6423 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn)) 6424 continue; 6425 6426 relo = find_prog_insn_relo(prog, insn_idx); 6427 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) { 6428 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n", 6429 prog->name, insn_idx, relo->type); 6430 return -LIBBPF_ERRNO__RELOC; 6431 } 6432 if (relo) { 6433 /* sub-program instruction index is a combination of 6434 * an offset of a symbol pointed to by relocation and 6435 * call instruction's imm field; for global functions, 6436 * call always has imm = -1, but for static functions 6437 * relocation is against STT_SECTION and insn->imm 6438 * points to a start of a static function 6439 * 6440 * for subprog addr relocation, the relo->sym_off + insn->imm is 6441 * the byte offset in the corresponding section. 6442 */ 6443 if (relo->type == RELO_CALL) 6444 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; 6445 else 6446 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ; 6447 } else if (insn_is_pseudo_func(insn)) { 6448 /* 6449 * RELO_SUBPROG_ADDR relo is always emitted even if both 6450 * functions are in the same section, so it shouldn't reach here. 6451 */ 6452 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n", 6453 prog->name, insn_idx); 6454 return -LIBBPF_ERRNO__RELOC; 6455 } else { 6456 /* if subprogram call is to a static function within 6457 * the same ELF section, there won't be any relocation 6458 * emitted, but it also means there is no additional 6459 * offset necessary, insns->imm is relative to 6460 * instruction's original position within the section 6461 */ 6462 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; 6463 } 6464 6465 /* we enforce that sub-programs should be in .text section */ 6466 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); 6467 if (!subprog) { 6468 pr_warn("prog '%s': no .text section found yet sub-program call exists\n", 6469 prog->name); 6470 return -LIBBPF_ERRNO__RELOC; 6471 } 6472 6473 /* if it's the first call instruction calling into this 6474 * subprogram (meaning this subprog hasn't been processed 6475 * yet) within the context of current main program: 6476 * - append it at the end of main program's instructions blog; 6477 * - process is recursively, while current program is put on hold; 6478 * - if that subprogram calls some other not yet processes 6479 * subprogram, same thing will happen recursively until 6480 * there are no more unprocesses subprograms left to append 6481 * and relocate. 6482 */ 6483 if (subprog->sub_insn_off == 0) { 6484 subprog->sub_insn_off = main_prog->insns_cnt; 6485 6486 new_cnt = main_prog->insns_cnt + subprog->insns_cnt; 6487 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); 6488 if (!insns) { 6489 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); 6490 return -ENOMEM; 6491 } 6492 main_prog->insns = insns; 6493 main_prog->insns_cnt = new_cnt; 6494 6495 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, 6496 subprog->insns_cnt * sizeof(*insns)); 6497 6498 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", 6499 main_prog->name, subprog->insns_cnt, subprog->name); 6500 6501 err = bpf_object__reloc_code(obj, main_prog, subprog); 6502 if (err) 6503 return err; 6504 } 6505 6506 /* main_prog->insns memory could have been re-allocated, so 6507 * calculate pointer again 6508 */ 6509 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; 6510 /* calculate correct instruction position within current main 6511 * prog; each main prog can have a different set of 6512 * subprograms appended (potentially in different order as 6513 * well), so position of any subprog can be different for 6514 * different main programs */ 6515 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; 6516 6517 if (relo) 6518 relo->processed = true; 6519 6520 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n", 6521 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); 6522 } 6523 6524 return 0; 6525 } 6526 6527 /* 6528 * Relocate sub-program calls. 6529 * 6530 * Algorithm operates as follows. Each entry-point BPF program (referred to as 6531 * main prog) is processed separately. For each subprog (non-entry functions, 6532 * that can be called from either entry progs or other subprogs) gets their 6533 * sub_insn_off reset to zero. This serves as indicator that this subprogram 6534 * hasn't been yet appended and relocated within current main prog. Once its 6535 * relocated, sub_insn_off will point at the position within current main prog 6536 * where given subprog was appended. This will further be used to relocate all 6537 * the call instructions jumping into this subprog. 6538 * 6539 * We start with main program and process all call instructions. If the call 6540 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off 6541 * is zero), subprog instructions are appended at the end of main program's 6542 * instruction array. Then main program is "put on hold" while we recursively 6543 * process newly appended subprogram. If that subprogram calls into another 6544 * subprogram that hasn't been appended, new subprogram is appended again to 6545 * the *main* prog's instructions (subprog's instructions are always left 6546 * untouched, as they need to be in unmodified state for subsequent main progs 6547 * and subprog instructions are always sent only as part of a main prog) and 6548 * the process continues recursively. Once all the subprogs called from a main 6549 * prog or any of its subprogs are appended (and relocated), all their 6550 * positions within finalized instructions array are known, so it's easy to 6551 * rewrite call instructions with correct relative offsets, corresponding to 6552 * desired target subprog. 6553 * 6554 * Its important to realize that some subprogs might not be called from some 6555 * main prog and any of its called/used subprogs. Those will keep their 6556 * subprog->sub_insn_off as zero at all times and won't be appended to current 6557 * main prog and won't be relocated within the context of current main prog. 6558 * They might still be used from other main progs later. 6559 * 6560 * Visually this process can be shown as below. Suppose we have two main 6561 * programs mainA and mainB and BPF object contains three subprogs: subA, 6562 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and 6563 * subC both call subB: 6564 * 6565 * +--------+ +-------+ 6566 * | v v | 6567 * +--+---+ +--+-+-+ +---+--+ 6568 * | subA | | subB | | subC | 6569 * +--+---+ +------+ +---+--+ 6570 * ^ ^ 6571 * | | 6572 * +---+-------+ +------+----+ 6573 * | mainA | | mainB | 6574 * +-----------+ +-----------+ 6575 * 6576 * We'll start relocating mainA, will find subA, append it and start 6577 * processing sub A recursively: 6578 * 6579 * +-----------+------+ 6580 * | mainA | subA | 6581 * +-----------+------+ 6582 * 6583 * At this point we notice that subB is used from subA, so we append it and 6584 * relocate (there are no further subcalls from subB): 6585 * 6586 * +-----------+------+------+ 6587 * | mainA | subA | subB | 6588 * +-----------+------+------+ 6589 * 6590 * At this point, we relocate subA calls, then go one level up and finish with 6591 * relocatin mainA calls. mainA is done. 6592 * 6593 * For mainB process is similar but results in different order. We start with 6594 * mainB and skip subA and subB, as mainB never calls them (at least 6595 * directly), but we see subC is needed, so we append and start processing it: 6596 * 6597 * +-----------+------+ 6598 * | mainB | subC | 6599 * +-----------+------+ 6600 * Now we see subC needs subB, so we go back to it, append and relocate it: 6601 * 6602 * +-----------+------+------+ 6603 * | mainB | subC | subB | 6604 * +-----------+------+------+ 6605 * 6606 * At this point we unwind recursion, relocate calls in subC, then in mainB. 6607 */ 6608 static int 6609 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog) 6610 { 6611 struct bpf_program *subprog; 6612 int i, j, err; 6613 6614 /* mark all subprogs as not relocated (yet) within the context of 6615 * current main program 6616 */ 6617 for (i = 0; i < obj->nr_programs; i++) { 6618 subprog = &obj->programs[i]; 6619 if (!prog_is_subprog(obj, subprog)) 6620 continue; 6621 6622 subprog->sub_insn_off = 0; 6623 for (j = 0; j < subprog->nr_reloc; j++) 6624 if (subprog->reloc_desc[j].type == RELO_CALL) 6625 subprog->reloc_desc[j].processed = false; 6626 } 6627 6628 err = bpf_object__reloc_code(obj, prog, prog); 6629 if (err) 6630 return err; 6631 6632 6633 return 0; 6634 } 6635 6636 static int 6637 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) 6638 { 6639 struct bpf_program *prog; 6640 size_t i; 6641 int err; 6642 6643 if (obj->btf_ext) { 6644 err = bpf_object__relocate_core(obj, targ_btf_path); 6645 if (err) { 6646 pr_warn("failed to perform CO-RE relocations: %d\n", 6647 err); 6648 return err; 6649 } 6650 } 6651 /* relocate data references first for all programs and sub-programs, 6652 * as they don't change relative to code locations, so subsequent 6653 * subprogram processing won't need to re-calculate any of them 6654 */ 6655 for (i = 0; i < obj->nr_programs; i++) { 6656 prog = &obj->programs[i]; 6657 err = bpf_object__relocate_data(obj, prog); 6658 if (err) { 6659 pr_warn("prog '%s': failed to relocate data references: %d\n", 6660 prog->name, err); 6661 return err; 6662 } 6663 } 6664 /* now relocate subprogram calls and append used subprograms to main 6665 * programs; each copy of subprogram code needs to be relocated 6666 * differently for each main program, because its code location might 6667 * have changed 6668 */ 6669 for (i = 0; i < obj->nr_programs; i++) { 6670 prog = &obj->programs[i]; 6671 /* sub-program's sub-calls are relocated within the context of 6672 * its main program only 6673 */ 6674 if (prog_is_subprog(obj, prog)) 6675 continue; 6676 6677 err = bpf_object__relocate_calls(obj, prog); 6678 if (err) { 6679 pr_warn("prog '%s': failed to relocate calls: %d\n", 6680 prog->name, err); 6681 return err; 6682 } 6683 } 6684 /* free up relocation descriptors */ 6685 for (i = 0; i < obj->nr_programs; i++) { 6686 prog = &obj->programs[i]; 6687 zfree(&prog->reloc_desc); 6688 prog->nr_reloc = 0; 6689 } 6690 return 0; 6691 } 6692 6693 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, 6694 GElf_Shdr *shdr, Elf_Data *data); 6695 6696 static int bpf_object__collect_map_relos(struct bpf_object *obj, 6697 GElf_Shdr *shdr, Elf_Data *data) 6698 { 6699 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); 6700 int i, j, nrels, new_sz; 6701 const struct btf_var_secinfo *vi = NULL; 6702 const struct btf_type *sec, *var, *def; 6703 struct bpf_map *map = NULL, *targ_map; 6704 const struct btf_member *member; 6705 const char *name, *mname; 6706 Elf_Data *symbols; 6707 unsigned int moff; 6708 GElf_Sym sym; 6709 GElf_Rel rel; 6710 void *tmp; 6711 6712 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) 6713 return -EINVAL; 6714 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); 6715 if (!sec) 6716 return -EINVAL; 6717 6718 symbols = obj->efile.symbols; 6719 nrels = shdr->sh_size / shdr->sh_entsize; 6720 for (i = 0; i < nrels; i++) { 6721 if (!gelf_getrel(data, i, &rel)) { 6722 pr_warn(".maps relo #%d: failed to get ELF relo\n", i); 6723 return -LIBBPF_ERRNO__FORMAT; 6724 } 6725 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { 6726 pr_warn(".maps relo #%d: symbol %zx not found\n", 6727 i, (size_t)GELF_R_SYM(rel.r_info)); 6728 return -LIBBPF_ERRNO__FORMAT; 6729 } 6730 name = elf_sym_str(obj, sym.st_name) ?: "<?>"; 6731 if (sym.st_shndx != obj->efile.btf_maps_shndx) { 6732 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", 6733 i, name); 6734 return -LIBBPF_ERRNO__RELOC; 6735 } 6736 6737 pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n", 6738 i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value, 6739 (size_t)rel.r_offset, sym.st_name, name); 6740 6741 for (j = 0; j < obj->nr_maps; j++) { 6742 map = &obj->maps[j]; 6743 if (map->sec_idx != obj->efile.btf_maps_shndx) 6744 continue; 6745 6746 vi = btf_var_secinfos(sec) + map->btf_var_idx; 6747 if (vi->offset <= rel.r_offset && 6748 rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size) 6749 break; 6750 } 6751 if (j == obj->nr_maps) { 6752 pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n", 6753 i, name, (size_t)rel.r_offset); 6754 return -EINVAL; 6755 } 6756 6757 if (!bpf_map_type__is_map_in_map(map->def.type)) 6758 return -EINVAL; 6759 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && 6760 map->def.key_size != sizeof(int)) { 6761 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", 6762 i, map->name, sizeof(int)); 6763 return -EINVAL; 6764 } 6765 6766 targ_map = bpf_object__find_map_by_name(obj, name); 6767 if (!targ_map) 6768 return -ESRCH; 6769 6770 var = btf__type_by_id(obj->btf, vi->type); 6771 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); 6772 if (btf_vlen(def) == 0) 6773 return -EINVAL; 6774 member = btf_members(def) + btf_vlen(def) - 1; 6775 mname = btf__name_by_offset(obj->btf, member->name_off); 6776 if (strcmp(mname, "values")) 6777 return -EINVAL; 6778 6779 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; 6780 if (rel.r_offset - vi->offset < moff) 6781 return -EINVAL; 6782 6783 moff = rel.r_offset - vi->offset - moff; 6784 /* here we use BPF pointer size, which is always 64 bit, as we 6785 * are parsing ELF that was built for BPF target 6786 */ 6787 if (moff % bpf_ptr_sz) 6788 return -EINVAL; 6789 moff /= bpf_ptr_sz; 6790 if (moff >= map->init_slots_sz) { 6791 new_sz = moff + 1; 6792 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); 6793 if (!tmp) 6794 return -ENOMEM; 6795 map->init_slots = tmp; 6796 memset(map->init_slots + map->init_slots_sz, 0, 6797 (new_sz - map->init_slots_sz) * host_ptr_sz); 6798 map->init_slots_sz = new_sz; 6799 } 6800 map->init_slots[moff] = targ_map; 6801 6802 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n", 6803 i, map->name, moff, name); 6804 } 6805 6806 return 0; 6807 } 6808 6809 static int cmp_relocs(const void *_a, const void *_b) 6810 { 6811 const struct reloc_desc *a = _a; 6812 const struct reloc_desc *b = _b; 6813 6814 if (a->insn_idx != b->insn_idx) 6815 return a->insn_idx < b->insn_idx ? -1 : 1; 6816 6817 /* no two relocations should have the same insn_idx, but ... */ 6818 if (a->type != b->type) 6819 return a->type < b->type ? -1 : 1; 6820 6821 return 0; 6822 } 6823 6824 static int bpf_object__collect_relos(struct bpf_object *obj) 6825 { 6826 int i, err; 6827 6828 for (i = 0; i < obj->efile.nr_reloc_sects; i++) { 6829 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr; 6830 Elf_Data *data = obj->efile.reloc_sects[i].data; 6831 int idx = shdr->sh_info; 6832 6833 if (shdr->sh_type != SHT_REL) { 6834 pr_warn("internal error at %d\n", __LINE__); 6835 return -LIBBPF_ERRNO__INTERNAL; 6836 } 6837 6838 if (idx == obj->efile.st_ops_shndx) 6839 err = bpf_object__collect_st_ops_relos(obj, shdr, data); 6840 else if (idx == obj->efile.btf_maps_shndx) 6841 err = bpf_object__collect_map_relos(obj, shdr, data); 6842 else 6843 err = bpf_object__collect_prog_relos(obj, shdr, data); 6844 if (err) 6845 return err; 6846 } 6847 6848 for (i = 0; i < obj->nr_programs; i++) { 6849 struct bpf_program *p = &obj->programs[i]; 6850 6851 if (!p->nr_reloc) 6852 continue; 6853 6854 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); 6855 } 6856 return 0; 6857 } 6858 6859 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id) 6860 { 6861 if (BPF_CLASS(insn->code) == BPF_JMP && 6862 BPF_OP(insn->code) == BPF_CALL && 6863 BPF_SRC(insn->code) == BPF_K && 6864 insn->src_reg == 0 && 6865 insn->dst_reg == 0) { 6866 *func_id = insn->imm; 6867 return true; 6868 } 6869 return false; 6870 } 6871 6872 static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog) 6873 { 6874 struct bpf_insn *insn = prog->insns; 6875 enum bpf_func_id func_id; 6876 int i; 6877 6878 for (i = 0; i < prog->insns_cnt; i++, insn++) { 6879 if (!insn_is_helper_call(insn, &func_id)) 6880 continue; 6881 6882 /* on kernels that don't yet support 6883 * bpf_probe_read_{kernel,user}[_str] helpers, fall back 6884 * to bpf_probe_read() which works well for old kernels 6885 */ 6886 switch (func_id) { 6887 case BPF_FUNC_probe_read_kernel: 6888 case BPF_FUNC_probe_read_user: 6889 if (!kernel_supports(FEAT_PROBE_READ_KERN)) 6890 insn->imm = BPF_FUNC_probe_read; 6891 break; 6892 case BPF_FUNC_probe_read_kernel_str: 6893 case BPF_FUNC_probe_read_user_str: 6894 if (!kernel_supports(FEAT_PROBE_READ_KERN)) 6895 insn->imm = BPF_FUNC_probe_read_str; 6896 break; 6897 default: 6898 break; 6899 } 6900 } 6901 return 0; 6902 } 6903 6904 static int 6905 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, 6906 char *license, __u32 kern_version, int *pfd) 6907 { 6908 struct bpf_prog_load_params load_attr = {}; 6909 char *cp, errmsg[STRERR_BUFSIZE]; 6910 size_t log_buf_size = 0; 6911 char *log_buf = NULL; 6912 int btf_fd, ret; 6913 6914 if (prog->type == BPF_PROG_TYPE_UNSPEC) { 6915 /* 6916 * The program type must be set. Most likely we couldn't find a proper 6917 * section definition at load time, and thus we didn't infer the type. 6918 */ 6919 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n", 6920 prog->name, prog->sec_name); 6921 return -EINVAL; 6922 } 6923 6924 if (!insns || !insns_cnt) 6925 return -EINVAL; 6926 6927 load_attr.prog_type = prog->type; 6928 /* old kernels might not support specifying expected_attach_type */ 6929 if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def && 6930 prog->sec_def->is_exp_attach_type_optional) 6931 load_attr.expected_attach_type = 0; 6932 else 6933 load_attr.expected_attach_type = prog->expected_attach_type; 6934 if (kernel_supports(FEAT_PROG_NAME)) 6935 load_attr.name = prog->name; 6936 load_attr.insns = insns; 6937 load_attr.insn_cnt = insns_cnt; 6938 load_attr.license = license; 6939 load_attr.attach_btf_id = prog->attach_btf_id; 6940 if (prog->attach_prog_fd) 6941 load_attr.attach_prog_fd = prog->attach_prog_fd; 6942 else 6943 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; 6944 load_attr.attach_btf_id = prog->attach_btf_id; 6945 load_attr.kern_version = kern_version; 6946 load_attr.prog_ifindex = prog->prog_ifindex; 6947 6948 /* specify func_info/line_info only if kernel supports them */ 6949 btf_fd = bpf_object__btf_fd(prog->obj); 6950 if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) { 6951 load_attr.prog_btf_fd = btf_fd; 6952 load_attr.func_info = prog->func_info; 6953 load_attr.func_info_rec_size = prog->func_info_rec_size; 6954 load_attr.func_info_cnt = prog->func_info_cnt; 6955 load_attr.line_info = prog->line_info; 6956 load_attr.line_info_rec_size = prog->line_info_rec_size; 6957 load_attr.line_info_cnt = prog->line_info_cnt; 6958 } 6959 load_attr.log_level = prog->log_level; 6960 load_attr.prog_flags = prog->prog_flags; 6961 6962 retry_load: 6963 if (log_buf_size) { 6964 log_buf = malloc(log_buf_size); 6965 if (!log_buf) 6966 return -ENOMEM; 6967 6968 *log_buf = 0; 6969 } 6970 6971 load_attr.log_buf = log_buf; 6972 load_attr.log_buf_sz = log_buf_size; 6973 ret = libbpf__bpf_prog_load(&load_attr); 6974 6975 if (ret >= 0) { 6976 if (log_buf && load_attr.log_level) 6977 pr_debug("verifier log:\n%s", log_buf); 6978 6979 if (prog->obj->rodata_map_idx >= 0 && 6980 kernel_supports(FEAT_PROG_BIND_MAP)) { 6981 struct bpf_map *rodata_map = 6982 &prog->obj->maps[prog->obj->rodata_map_idx]; 6983 6984 if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) { 6985 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 6986 pr_warn("prog '%s': failed to bind .rodata map: %s\n", 6987 prog->name, cp); 6988 /* Don't fail hard if can't bind rodata. */ 6989 } 6990 } 6991 6992 *pfd = ret; 6993 ret = 0; 6994 goto out; 6995 } 6996 6997 if (!log_buf || errno == ENOSPC) { 6998 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, 6999 log_buf_size << 1); 7000 7001 free(log_buf); 7002 goto retry_load; 7003 } 7004 ret = errno ? -errno : -LIBBPF_ERRNO__LOAD; 7005 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 7006 pr_warn("load bpf program failed: %s\n", cp); 7007 pr_perm_msg(ret); 7008 7009 if (log_buf && log_buf[0] != '\0') { 7010 ret = -LIBBPF_ERRNO__VERIFY; 7011 pr_warn("-- BEGIN DUMP LOG ---\n"); 7012 pr_warn("\n%s\n", log_buf); 7013 pr_warn("-- END LOG --\n"); 7014 } else if (load_attr.insn_cnt >= BPF_MAXINSNS) { 7015 pr_warn("Program too large (%zu insns), at most %d insns\n", 7016 load_attr.insn_cnt, BPF_MAXINSNS); 7017 ret = -LIBBPF_ERRNO__PROG2BIG; 7018 } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { 7019 /* Wrong program type? */ 7020 int fd; 7021 7022 load_attr.prog_type = BPF_PROG_TYPE_KPROBE; 7023 load_attr.expected_attach_type = 0; 7024 load_attr.log_buf = NULL; 7025 load_attr.log_buf_sz = 0; 7026 fd = libbpf__bpf_prog_load(&load_attr); 7027 if (fd >= 0) { 7028 close(fd); 7029 ret = -LIBBPF_ERRNO__PROGTYPE; 7030 goto out; 7031 } 7032 } 7033 7034 out: 7035 free(log_buf); 7036 return ret; 7037 } 7038 7039 static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id); 7040 7041 int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) 7042 { 7043 int err = 0, fd, i; 7044 7045 if (prog->obj->loaded) { 7046 pr_warn("prog '%s': can't load after object was loaded\n", prog->name); 7047 return -EINVAL; 7048 } 7049 7050 if ((prog->type == BPF_PROG_TYPE_TRACING || 7051 prog->type == BPF_PROG_TYPE_LSM || 7052 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { 7053 int btf_obj_fd = 0, btf_type_id = 0; 7054 7055 err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id); 7056 if (err) 7057 return err; 7058 7059 prog->attach_btf_obj_fd = btf_obj_fd; 7060 prog->attach_btf_id = btf_type_id; 7061 } 7062 7063 if (prog->instances.nr < 0 || !prog->instances.fds) { 7064 if (prog->preprocessor) { 7065 pr_warn("Internal error: can't load program '%s'\n", 7066 prog->name); 7067 return -LIBBPF_ERRNO__INTERNAL; 7068 } 7069 7070 prog->instances.fds = malloc(sizeof(int)); 7071 if (!prog->instances.fds) { 7072 pr_warn("Not enough memory for BPF fds\n"); 7073 return -ENOMEM; 7074 } 7075 prog->instances.nr = 1; 7076 prog->instances.fds[0] = -1; 7077 } 7078 7079 if (!prog->preprocessor) { 7080 if (prog->instances.nr != 1) { 7081 pr_warn("prog '%s': inconsistent nr(%d) != 1\n", 7082 prog->name, prog->instances.nr); 7083 } 7084 err = load_program(prog, prog->insns, prog->insns_cnt, 7085 license, kern_ver, &fd); 7086 if (!err) 7087 prog->instances.fds[0] = fd; 7088 goto out; 7089 } 7090 7091 for (i = 0; i < prog->instances.nr; i++) { 7092 struct bpf_prog_prep_result result; 7093 bpf_program_prep_t preprocessor = prog->preprocessor; 7094 7095 memset(&result, 0, sizeof(result)); 7096 err = preprocessor(prog, i, prog->insns, 7097 prog->insns_cnt, &result); 7098 if (err) { 7099 pr_warn("Preprocessing the %dth instance of program '%s' failed\n", 7100 i, prog->name); 7101 goto out; 7102 } 7103 7104 if (!result.new_insn_ptr || !result.new_insn_cnt) { 7105 pr_debug("Skip loading the %dth instance of program '%s'\n", 7106 i, prog->name); 7107 prog->instances.fds[i] = -1; 7108 if (result.pfd) 7109 *result.pfd = -1; 7110 continue; 7111 } 7112 7113 err = load_program(prog, result.new_insn_ptr, 7114 result.new_insn_cnt, license, kern_ver, &fd); 7115 if (err) { 7116 pr_warn("Loading the %dth instance of program '%s' failed\n", 7117 i, prog->name); 7118 goto out; 7119 } 7120 7121 if (result.pfd) 7122 *result.pfd = fd; 7123 prog->instances.fds[i] = fd; 7124 } 7125 out: 7126 if (err) 7127 pr_warn("failed to load program '%s'\n", prog->name); 7128 zfree(&prog->insns); 7129 prog->insns_cnt = 0; 7130 return err; 7131 } 7132 7133 static int 7134 bpf_object__load_progs(struct bpf_object *obj, int log_level) 7135 { 7136 struct bpf_program *prog; 7137 size_t i; 7138 int err; 7139 7140 for (i = 0; i < obj->nr_programs; i++) { 7141 prog = &obj->programs[i]; 7142 err = bpf_object__sanitize_prog(obj, prog); 7143 if (err) 7144 return err; 7145 } 7146 7147 for (i = 0; i < obj->nr_programs; i++) { 7148 prog = &obj->programs[i]; 7149 if (prog_is_subprog(obj, prog)) 7150 continue; 7151 if (!prog->load) { 7152 pr_debug("prog '%s': skipped loading\n", prog->name); 7153 continue; 7154 } 7155 prog->log_level |= log_level; 7156 err = bpf_program__load(prog, obj->license, obj->kern_version); 7157 if (err) 7158 return err; 7159 } 7160 return 0; 7161 } 7162 7163 static const struct bpf_sec_def *find_sec_def(const char *sec_name); 7164 7165 static struct bpf_object * 7166 __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, 7167 const struct bpf_object_open_opts *opts) 7168 { 7169 const char *obj_name, *kconfig; 7170 struct bpf_program *prog; 7171 struct bpf_object *obj; 7172 char tmp_name[64]; 7173 int err; 7174 7175 if (elf_version(EV_CURRENT) == EV_NONE) { 7176 pr_warn("failed to init libelf for %s\n", 7177 path ? : "(mem buf)"); 7178 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 7179 } 7180 7181 if (!OPTS_VALID(opts, bpf_object_open_opts)) 7182 return ERR_PTR(-EINVAL); 7183 7184 obj_name = OPTS_GET(opts, object_name, NULL); 7185 if (obj_buf) { 7186 if (!obj_name) { 7187 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", 7188 (unsigned long)obj_buf, 7189 (unsigned long)obj_buf_sz); 7190 obj_name = tmp_name; 7191 } 7192 path = obj_name; 7193 pr_debug("loading object '%s' from buffer\n", obj_name); 7194 } 7195 7196 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); 7197 if (IS_ERR(obj)) 7198 return obj; 7199 7200 kconfig = OPTS_GET(opts, kconfig, NULL); 7201 if (kconfig) { 7202 obj->kconfig = strdup(kconfig); 7203 if (!obj->kconfig) 7204 return ERR_PTR(-ENOMEM); 7205 } 7206 7207 err = bpf_object__elf_init(obj); 7208 err = err ? : bpf_object__check_endianness(obj); 7209 err = err ? : bpf_object__elf_collect(obj); 7210 err = err ? : bpf_object__collect_externs(obj); 7211 err = err ? : bpf_object__finalize_btf(obj); 7212 err = err ? : bpf_object__init_maps(obj, opts); 7213 err = err ? : bpf_object__collect_relos(obj); 7214 if (err) 7215 goto out; 7216 bpf_object__elf_finish(obj); 7217 7218 bpf_object__for_each_program(prog, obj) { 7219 prog->sec_def = find_sec_def(prog->sec_name); 7220 if (!prog->sec_def) { 7221 /* couldn't guess, but user might manually specify */ 7222 pr_debug("prog '%s': unrecognized ELF section name '%s'\n", 7223 prog->name, prog->sec_name); 7224 continue; 7225 } 7226 7227 if (prog->sec_def->is_sleepable) 7228 prog->prog_flags |= BPF_F_SLEEPABLE; 7229 bpf_program__set_type(prog, prog->sec_def->prog_type); 7230 bpf_program__set_expected_attach_type(prog, 7231 prog->sec_def->expected_attach_type); 7232 7233 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || 7234 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) 7235 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); 7236 } 7237 7238 return obj; 7239 out: 7240 bpf_object__close(obj); 7241 return ERR_PTR(err); 7242 } 7243 7244 static struct bpf_object * 7245 __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags) 7246 { 7247 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, 7248 .relaxed_maps = flags & MAPS_RELAX_COMPAT, 7249 ); 7250 7251 /* param validation */ 7252 if (!attr->file) 7253 return NULL; 7254 7255 pr_debug("loading %s\n", attr->file); 7256 return __bpf_object__open(attr->file, NULL, 0, &opts); 7257 } 7258 7259 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) 7260 { 7261 return __bpf_object__open_xattr(attr, 0); 7262 } 7263 7264 struct bpf_object *bpf_object__open(const char *path) 7265 { 7266 struct bpf_object_open_attr attr = { 7267 .file = path, 7268 .prog_type = BPF_PROG_TYPE_UNSPEC, 7269 }; 7270 7271 return bpf_object__open_xattr(&attr); 7272 } 7273 7274 struct bpf_object * 7275 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) 7276 { 7277 if (!path) 7278 return ERR_PTR(-EINVAL); 7279 7280 pr_debug("loading %s\n", path); 7281 7282 return __bpf_object__open(path, NULL, 0, opts); 7283 } 7284 7285 struct bpf_object * 7286 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, 7287 const struct bpf_object_open_opts *opts) 7288 { 7289 if (!obj_buf || obj_buf_sz == 0) 7290 return ERR_PTR(-EINVAL); 7291 7292 return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts); 7293 } 7294 7295 struct bpf_object * 7296 bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, 7297 const char *name) 7298 { 7299 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, 7300 .object_name = name, 7301 /* wrong default, but backwards-compatible */ 7302 .relaxed_maps = true, 7303 ); 7304 7305 /* returning NULL is wrong, but backwards-compatible */ 7306 if (!obj_buf || obj_buf_sz == 0) 7307 return NULL; 7308 7309 return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts); 7310 } 7311 7312 int bpf_object__unload(struct bpf_object *obj) 7313 { 7314 size_t i; 7315 7316 if (!obj) 7317 return -EINVAL; 7318 7319 for (i = 0; i < obj->nr_maps; i++) { 7320 zclose(obj->maps[i].fd); 7321 if (obj->maps[i].st_ops) 7322 zfree(&obj->maps[i].st_ops->kern_vdata); 7323 } 7324 7325 for (i = 0; i < obj->nr_programs; i++) 7326 bpf_program__unload(&obj->programs[i]); 7327 7328 return 0; 7329 } 7330 7331 static int bpf_object__sanitize_maps(struct bpf_object *obj) 7332 { 7333 struct bpf_map *m; 7334 7335 bpf_object__for_each_map(m, obj) { 7336 if (!bpf_map__is_internal(m)) 7337 continue; 7338 if (!kernel_supports(FEAT_GLOBAL_DATA)) { 7339 pr_warn("kernel doesn't support global data\n"); 7340 return -ENOTSUP; 7341 } 7342 if (!kernel_supports(FEAT_ARRAY_MMAP)) 7343 m->def.map_flags ^= BPF_F_MMAPABLE; 7344 } 7345 7346 return 0; 7347 } 7348 7349 static int bpf_object__read_kallsyms_file(struct bpf_object *obj) 7350 { 7351 char sym_type, sym_name[500]; 7352 unsigned long long sym_addr; 7353 struct extern_desc *ext; 7354 int ret, err = 0; 7355 FILE *f; 7356 7357 f = fopen("/proc/kallsyms", "r"); 7358 if (!f) { 7359 err = -errno; 7360 pr_warn("failed to open /proc/kallsyms: %d\n", err); 7361 return err; 7362 } 7363 7364 while (true) { 7365 ret = fscanf(f, "%llx %c %499s%*[^\n]\n", 7366 &sym_addr, &sym_type, sym_name); 7367 if (ret == EOF && feof(f)) 7368 break; 7369 if (ret != 3) { 7370 pr_warn("failed to read kallsyms entry: %d\n", ret); 7371 err = -EINVAL; 7372 goto out; 7373 } 7374 7375 ext = find_extern_by_name(obj, sym_name); 7376 if (!ext || ext->type != EXT_KSYM) 7377 continue; 7378 7379 if (ext->is_set && ext->ksym.addr != sym_addr) { 7380 pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n", 7381 sym_name, ext->ksym.addr, sym_addr); 7382 err = -EINVAL; 7383 goto out; 7384 } 7385 if (!ext->is_set) { 7386 ext->is_set = true; 7387 ext->ksym.addr = sym_addr; 7388 pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr); 7389 } 7390 } 7391 7392 out: 7393 fclose(f); 7394 return err; 7395 } 7396 7397 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj) 7398 { 7399 struct extern_desc *ext; 7400 struct btf *btf; 7401 int i, j, id, btf_fd, err; 7402 7403 for (i = 0; i < obj->nr_extern; i++) { 7404 const struct btf_type *targ_var, *targ_type; 7405 __u32 targ_type_id, local_type_id; 7406 const char *targ_var_name; 7407 int ret; 7408 7409 ext = &obj->externs[i]; 7410 if (ext->type != EXT_KSYM || !ext->ksym.type_id) 7411 continue; 7412 7413 btf = obj->btf_vmlinux; 7414 btf_fd = 0; 7415 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR); 7416 if (id == -ENOENT) { 7417 err = load_module_btfs(obj); 7418 if (err) 7419 return err; 7420 7421 for (j = 0; j < obj->btf_module_cnt; j++) { 7422 btf = obj->btf_modules[j].btf; 7423 /* we assume module BTF FD is always >0 */ 7424 btf_fd = obj->btf_modules[j].fd; 7425 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR); 7426 if (id != -ENOENT) 7427 break; 7428 } 7429 } 7430 if (id <= 0) { 7431 pr_warn("extern (ksym) '%s': failed to find BTF ID in kernel BTF(s).\n", 7432 ext->name); 7433 return -ESRCH; 7434 } 7435 7436 /* find local type_id */ 7437 local_type_id = ext->ksym.type_id; 7438 7439 /* find target type_id */ 7440 targ_var = btf__type_by_id(btf, id); 7441 targ_var_name = btf__name_by_offset(btf, targ_var->name_off); 7442 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id); 7443 7444 ret = bpf_core_types_are_compat(obj->btf, local_type_id, 7445 btf, targ_type_id); 7446 if (ret <= 0) { 7447 const struct btf_type *local_type; 7448 const char *targ_name, *local_name; 7449 7450 local_type = btf__type_by_id(obj->btf, local_type_id); 7451 local_name = btf__name_by_offset(obj->btf, local_type->name_off); 7452 targ_name = btf__name_by_offset(btf, targ_type->name_off); 7453 7454 pr_warn("extern (ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n", 7455 ext->name, local_type_id, 7456 btf_kind_str(local_type), local_name, targ_type_id, 7457 btf_kind_str(targ_type), targ_name); 7458 return -EINVAL; 7459 } 7460 7461 ext->is_set = true; 7462 ext->ksym.kernel_btf_obj_fd = btf_fd; 7463 ext->ksym.kernel_btf_id = id; 7464 pr_debug("extern (ksym) '%s': resolved to [%d] %s %s\n", 7465 ext->name, id, btf_kind_str(targ_var), targ_var_name); 7466 } 7467 return 0; 7468 } 7469 7470 static int bpf_object__resolve_externs(struct bpf_object *obj, 7471 const char *extra_kconfig) 7472 { 7473 bool need_config = false, need_kallsyms = false; 7474 bool need_vmlinux_btf = false; 7475 struct extern_desc *ext; 7476 void *kcfg_data = NULL; 7477 int err, i; 7478 7479 if (obj->nr_extern == 0) 7480 return 0; 7481 7482 if (obj->kconfig_map_idx >= 0) 7483 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; 7484 7485 for (i = 0; i < obj->nr_extern; i++) { 7486 ext = &obj->externs[i]; 7487 7488 if (ext->type == EXT_KCFG && 7489 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { 7490 void *ext_val = kcfg_data + ext->kcfg.data_off; 7491 __u32 kver = get_kernel_version(); 7492 7493 if (!kver) { 7494 pr_warn("failed to get kernel version\n"); 7495 return -EINVAL; 7496 } 7497 err = set_kcfg_value_num(ext, ext_val, kver); 7498 if (err) 7499 return err; 7500 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver); 7501 } else if (ext->type == EXT_KCFG && 7502 strncmp(ext->name, "CONFIG_", 7) == 0) { 7503 need_config = true; 7504 } else if (ext->type == EXT_KSYM) { 7505 if (ext->ksym.type_id) 7506 need_vmlinux_btf = true; 7507 else 7508 need_kallsyms = true; 7509 } else { 7510 pr_warn("unrecognized extern '%s'\n", ext->name); 7511 return -EINVAL; 7512 } 7513 } 7514 if (need_config && extra_kconfig) { 7515 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data); 7516 if (err) 7517 return -EINVAL; 7518 need_config = false; 7519 for (i = 0; i < obj->nr_extern; i++) { 7520 ext = &obj->externs[i]; 7521 if (ext->type == EXT_KCFG && !ext->is_set) { 7522 need_config = true; 7523 break; 7524 } 7525 } 7526 } 7527 if (need_config) { 7528 err = bpf_object__read_kconfig_file(obj, kcfg_data); 7529 if (err) 7530 return -EINVAL; 7531 } 7532 if (need_kallsyms) { 7533 err = bpf_object__read_kallsyms_file(obj); 7534 if (err) 7535 return -EINVAL; 7536 } 7537 if (need_vmlinux_btf) { 7538 err = bpf_object__resolve_ksyms_btf_id(obj); 7539 if (err) 7540 return -EINVAL; 7541 } 7542 for (i = 0; i < obj->nr_extern; i++) { 7543 ext = &obj->externs[i]; 7544 7545 if (!ext->is_set && !ext->is_weak) { 7546 pr_warn("extern %s (strong) not resolved\n", ext->name); 7547 return -ESRCH; 7548 } else if (!ext->is_set) { 7549 pr_debug("extern %s (weak) not resolved, defaulting to zero\n", 7550 ext->name); 7551 } 7552 } 7553 7554 return 0; 7555 } 7556 7557 int bpf_object__load_xattr(struct bpf_object_load_attr *attr) 7558 { 7559 struct bpf_object *obj; 7560 int err, i; 7561 7562 if (!attr) 7563 return -EINVAL; 7564 obj = attr->obj; 7565 if (!obj) 7566 return -EINVAL; 7567 7568 if (obj->loaded) { 7569 pr_warn("object '%s': load can't be attempted twice\n", obj->name); 7570 return -EINVAL; 7571 } 7572 7573 err = bpf_object__probe_loading(obj); 7574 err = err ? : bpf_object__load_vmlinux_btf(obj, false); 7575 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); 7576 err = err ? : bpf_object__sanitize_and_load_btf(obj); 7577 err = err ? : bpf_object__sanitize_maps(obj); 7578 err = err ? : bpf_object__init_kern_struct_ops_maps(obj); 7579 err = err ? : bpf_object__create_maps(obj); 7580 err = err ? : bpf_object__relocate(obj, attr->target_btf_path); 7581 err = err ? : bpf_object__load_progs(obj, attr->log_level); 7582 7583 /* clean up module BTFs */ 7584 for (i = 0; i < obj->btf_module_cnt; i++) { 7585 close(obj->btf_modules[i].fd); 7586 btf__free(obj->btf_modules[i].btf); 7587 free(obj->btf_modules[i].name); 7588 } 7589 free(obj->btf_modules); 7590 7591 /* clean up vmlinux BTF */ 7592 btf__free(obj->btf_vmlinux); 7593 obj->btf_vmlinux = NULL; 7594 7595 obj->loaded = true; /* doesn't matter if successfully or not */ 7596 7597 if (err) 7598 goto out; 7599 7600 return 0; 7601 out: 7602 /* unpin any maps that were auto-pinned during load */ 7603 for (i = 0; i < obj->nr_maps; i++) 7604 if (obj->maps[i].pinned && !obj->maps[i].reused) 7605 bpf_map__unpin(&obj->maps[i], NULL); 7606 7607 bpf_object__unload(obj); 7608 pr_warn("failed to load object '%s'\n", obj->path); 7609 return err; 7610 } 7611 7612 int bpf_object__load(struct bpf_object *obj) 7613 { 7614 struct bpf_object_load_attr attr = { 7615 .obj = obj, 7616 }; 7617 7618 return bpf_object__load_xattr(&attr); 7619 } 7620 7621 static int make_parent_dir(const char *path) 7622 { 7623 char *cp, errmsg[STRERR_BUFSIZE]; 7624 char *dname, *dir; 7625 int err = 0; 7626 7627 dname = strdup(path); 7628 if (dname == NULL) 7629 return -ENOMEM; 7630 7631 dir = dirname(dname); 7632 if (mkdir(dir, 0700) && errno != EEXIST) 7633 err = -errno; 7634 7635 free(dname); 7636 if (err) { 7637 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 7638 pr_warn("failed to mkdir %s: %s\n", path, cp); 7639 } 7640 return err; 7641 } 7642 7643 static int check_path(const char *path) 7644 { 7645 char *cp, errmsg[STRERR_BUFSIZE]; 7646 struct statfs st_fs; 7647 char *dname, *dir; 7648 int err = 0; 7649 7650 if (path == NULL) 7651 return -EINVAL; 7652 7653 dname = strdup(path); 7654 if (dname == NULL) 7655 return -ENOMEM; 7656 7657 dir = dirname(dname); 7658 if (statfs(dir, &st_fs)) { 7659 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 7660 pr_warn("failed to statfs %s: %s\n", dir, cp); 7661 err = -errno; 7662 } 7663 free(dname); 7664 7665 if (!err && st_fs.f_type != BPF_FS_MAGIC) { 7666 pr_warn("specified path %s is not on BPF FS\n", path); 7667 err = -EINVAL; 7668 } 7669 7670 return err; 7671 } 7672 7673 int bpf_program__pin_instance(struct bpf_program *prog, const char *path, 7674 int instance) 7675 { 7676 char *cp, errmsg[STRERR_BUFSIZE]; 7677 int err; 7678 7679 err = make_parent_dir(path); 7680 if (err) 7681 return err; 7682 7683 err = check_path(path); 7684 if (err) 7685 return err; 7686 7687 if (prog == NULL) { 7688 pr_warn("invalid program pointer\n"); 7689 return -EINVAL; 7690 } 7691 7692 if (instance < 0 || instance >= prog->instances.nr) { 7693 pr_warn("invalid prog instance %d of prog %s (max %d)\n", 7694 instance, prog->name, prog->instances.nr); 7695 return -EINVAL; 7696 } 7697 7698 if (bpf_obj_pin(prog->instances.fds[instance], path)) { 7699 err = -errno; 7700 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 7701 pr_warn("failed to pin program: %s\n", cp); 7702 return err; 7703 } 7704 pr_debug("pinned program '%s'\n", path); 7705 7706 return 0; 7707 } 7708 7709 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, 7710 int instance) 7711 { 7712 int err; 7713 7714 err = check_path(path); 7715 if (err) 7716 return err; 7717 7718 if (prog == NULL) { 7719 pr_warn("invalid program pointer\n"); 7720 return -EINVAL; 7721 } 7722 7723 if (instance < 0 || instance >= prog->instances.nr) { 7724 pr_warn("invalid prog instance %d of prog %s (max %d)\n", 7725 instance, prog->name, prog->instances.nr); 7726 return -EINVAL; 7727 } 7728 7729 err = unlink(path); 7730 if (err != 0) 7731 return -errno; 7732 pr_debug("unpinned program '%s'\n", path); 7733 7734 return 0; 7735 } 7736 7737 int bpf_program__pin(struct bpf_program *prog, const char *path) 7738 { 7739 int i, err; 7740 7741 err = make_parent_dir(path); 7742 if (err) 7743 return err; 7744 7745 err = check_path(path); 7746 if (err) 7747 return err; 7748 7749 if (prog == NULL) { 7750 pr_warn("invalid program pointer\n"); 7751 return -EINVAL; 7752 } 7753 7754 if (prog->instances.nr <= 0) { 7755 pr_warn("no instances of prog %s to pin\n", prog->name); 7756 return -EINVAL; 7757 } 7758 7759 if (prog->instances.nr == 1) { 7760 /* don't create subdirs when pinning single instance */ 7761 return bpf_program__pin_instance(prog, path, 0); 7762 } 7763 7764 for (i = 0; i < prog->instances.nr; i++) { 7765 char buf[PATH_MAX]; 7766 int len; 7767 7768 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 7769 if (len < 0) { 7770 err = -EINVAL; 7771 goto err_unpin; 7772 } else if (len >= PATH_MAX) { 7773 err = -ENAMETOOLONG; 7774 goto err_unpin; 7775 } 7776 7777 err = bpf_program__pin_instance(prog, buf, i); 7778 if (err) 7779 goto err_unpin; 7780 } 7781 7782 return 0; 7783 7784 err_unpin: 7785 for (i = i - 1; i >= 0; i--) { 7786 char buf[PATH_MAX]; 7787 int len; 7788 7789 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 7790 if (len < 0) 7791 continue; 7792 else if (len >= PATH_MAX) 7793 continue; 7794 7795 bpf_program__unpin_instance(prog, buf, i); 7796 } 7797 7798 rmdir(path); 7799 7800 return err; 7801 } 7802 7803 int bpf_program__unpin(struct bpf_program *prog, const char *path) 7804 { 7805 int i, err; 7806 7807 err = check_path(path); 7808 if (err) 7809 return err; 7810 7811 if (prog == NULL) { 7812 pr_warn("invalid program pointer\n"); 7813 return -EINVAL; 7814 } 7815 7816 if (prog->instances.nr <= 0) { 7817 pr_warn("no instances of prog %s to pin\n", prog->name); 7818 return -EINVAL; 7819 } 7820 7821 if (prog->instances.nr == 1) { 7822 /* don't create subdirs when pinning single instance */ 7823 return bpf_program__unpin_instance(prog, path, 0); 7824 } 7825 7826 for (i = 0; i < prog->instances.nr; i++) { 7827 char buf[PATH_MAX]; 7828 int len; 7829 7830 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 7831 if (len < 0) 7832 return -EINVAL; 7833 else if (len >= PATH_MAX) 7834 return -ENAMETOOLONG; 7835 7836 err = bpf_program__unpin_instance(prog, buf, i); 7837 if (err) 7838 return err; 7839 } 7840 7841 err = rmdir(path); 7842 if (err) 7843 return -errno; 7844 7845 return 0; 7846 } 7847 7848 int bpf_map__pin(struct bpf_map *map, const char *path) 7849 { 7850 char *cp, errmsg[STRERR_BUFSIZE]; 7851 int err; 7852 7853 if (map == NULL) { 7854 pr_warn("invalid map pointer\n"); 7855 return -EINVAL; 7856 } 7857 7858 if (map->pin_path) { 7859 if (path && strcmp(path, map->pin_path)) { 7860 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", 7861 bpf_map__name(map), map->pin_path, path); 7862 return -EINVAL; 7863 } else if (map->pinned) { 7864 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", 7865 bpf_map__name(map), map->pin_path); 7866 return 0; 7867 } 7868 } else { 7869 if (!path) { 7870 pr_warn("missing a path to pin map '%s' at\n", 7871 bpf_map__name(map)); 7872 return -EINVAL; 7873 } else if (map->pinned) { 7874 pr_warn("map '%s' already pinned\n", bpf_map__name(map)); 7875 return -EEXIST; 7876 } 7877 7878 map->pin_path = strdup(path); 7879 if (!map->pin_path) { 7880 err = -errno; 7881 goto out_err; 7882 } 7883 } 7884 7885 err = make_parent_dir(map->pin_path); 7886 if (err) 7887 return err; 7888 7889 err = check_path(map->pin_path); 7890 if (err) 7891 return err; 7892 7893 if (bpf_obj_pin(map->fd, map->pin_path)) { 7894 err = -errno; 7895 goto out_err; 7896 } 7897 7898 map->pinned = true; 7899 pr_debug("pinned map '%s'\n", map->pin_path); 7900 7901 return 0; 7902 7903 out_err: 7904 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 7905 pr_warn("failed to pin map: %s\n", cp); 7906 return err; 7907 } 7908 7909 int bpf_map__unpin(struct bpf_map *map, const char *path) 7910 { 7911 int err; 7912 7913 if (map == NULL) { 7914 pr_warn("invalid map pointer\n"); 7915 return -EINVAL; 7916 } 7917 7918 if (map->pin_path) { 7919 if (path && strcmp(path, map->pin_path)) { 7920 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", 7921 bpf_map__name(map), map->pin_path, path); 7922 return -EINVAL; 7923 } 7924 path = map->pin_path; 7925 } else if (!path) { 7926 pr_warn("no path to unpin map '%s' from\n", 7927 bpf_map__name(map)); 7928 return -EINVAL; 7929 } 7930 7931 err = check_path(path); 7932 if (err) 7933 return err; 7934 7935 err = unlink(path); 7936 if (err != 0) 7937 return -errno; 7938 7939 map->pinned = false; 7940 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); 7941 7942 return 0; 7943 } 7944 7945 int bpf_map__set_pin_path(struct bpf_map *map, const char *path) 7946 { 7947 char *new = NULL; 7948 7949 if (path) { 7950 new = strdup(path); 7951 if (!new) 7952 return -errno; 7953 } 7954 7955 free(map->pin_path); 7956 map->pin_path = new; 7957 return 0; 7958 } 7959 7960 const char *bpf_map__get_pin_path(const struct bpf_map *map) 7961 { 7962 return map->pin_path; 7963 } 7964 7965 bool bpf_map__is_pinned(const struct bpf_map *map) 7966 { 7967 return map->pinned; 7968 } 7969 7970 static void sanitize_pin_path(char *s) 7971 { 7972 /* bpffs disallows periods in path names */ 7973 while (*s) { 7974 if (*s == '.') 7975 *s = '_'; 7976 s++; 7977 } 7978 } 7979 7980 int bpf_object__pin_maps(struct bpf_object *obj, const char *path) 7981 { 7982 struct bpf_map *map; 7983 int err; 7984 7985 if (!obj) 7986 return -ENOENT; 7987 7988 if (!obj->loaded) { 7989 pr_warn("object not yet loaded; load it first\n"); 7990 return -ENOENT; 7991 } 7992 7993 bpf_object__for_each_map(map, obj) { 7994 char *pin_path = NULL; 7995 char buf[PATH_MAX]; 7996 7997 if (path) { 7998 int len; 7999 8000 len = snprintf(buf, PATH_MAX, "%s/%s", path, 8001 bpf_map__name(map)); 8002 if (len < 0) { 8003 err = -EINVAL; 8004 goto err_unpin_maps; 8005 } else if (len >= PATH_MAX) { 8006 err = -ENAMETOOLONG; 8007 goto err_unpin_maps; 8008 } 8009 sanitize_pin_path(buf); 8010 pin_path = buf; 8011 } else if (!map->pin_path) { 8012 continue; 8013 } 8014 8015 err = bpf_map__pin(map, pin_path); 8016 if (err) 8017 goto err_unpin_maps; 8018 } 8019 8020 return 0; 8021 8022 err_unpin_maps: 8023 while ((map = bpf_map__prev(map, obj))) { 8024 if (!map->pin_path) 8025 continue; 8026 8027 bpf_map__unpin(map, NULL); 8028 } 8029 8030 return err; 8031 } 8032 8033 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) 8034 { 8035 struct bpf_map *map; 8036 int err; 8037 8038 if (!obj) 8039 return -ENOENT; 8040 8041 bpf_object__for_each_map(map, obj) { 8042 char *pin_path = NULL; 8043 char buf[PATH_MAX]; 8044 8045 if (path) { 8046 int len; 8047 8048 len = snprintf(buf, PATH_MAX, "%s/%s", path, 8049 bpf_map__name(map)); 8050 if (len < 0) 8051 return -EINVAL; 8052 else if (len >= PATH_MAX) 8053 return -ENAMETOOLONG; 8054 sanitize_pin_path(buf); 8055 pin_path = buf; 8056 } else if (!map->pin_path) { 8057 continue; 8058 } 8059 8060 err = bpf_map__unpin(map, pin_path); 8061 if (err) 8062 return err; 8063 } 8064 8065 return 0; 8066 } 8067 8068 int bpf_object__pin_programs(struct bpf_object *obj, const char *path) 8069 { 8070 struct bpf_program *prog; 8071 int err; 8072 8073 if (!obj) 8074 return -ENOENT; 8075 8076 if (!obj->loaded) { 8077 pr_warn("object not yet loaded; load it first\n"); 8078 return -ENOENT; 8079 } 8080 8081 bpf_object__for_each_program(prog, obj) { 8082 char buf[PATH_MAX]; 8083 int len; 8084 8085 len = snprintf(buf, PATH_MAX, "%s/%s", path, 8086 prog->pin_name); 8087 if (len < 0) { 8088 err = -EINVAL; 8089 goto err_unpin_programs; 8090 } else if (len >= PATH_MAX) { 8091 err = -ENAMETOOLONG; 8092 goto err_unpin_programs; 8093 } 8094 8095 err = bpf_program__pin(prog, buf); 8096 if (err) 8097 goto err_unpin_programs; 8098 } 8099 8100 return 0; 8101 8102 err_unpin_programs: 8103 while ((prog = bpf_program__prev(prog, obj))) { 8104 char buf[PATH_MAX]; 8105 int len; 8106 8107 len = snprintf(buf, PATH_MAX, "%s/%s", path, 8108 prog->pin_name); 8109 if (len < 0) 8110 continue; 8111 else if (len >= PATH_MAX) 8112 continue; 8113 8114 bpf_program__unpin(prog, buf); 8115 } 8116 8117 return err; 8118 } 8119 8120 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) 8121 { 8122 struct bpf_program *prog; 8123 int err; 8124 8125 if (!obj) 8126 return -ENOENT; 8127 8128 bpf_object__for_each_program(prog, obj) { 8129 char buf[PATH_MAX]; 8130 int len; 8131 8132 len = snprintf(buf, PATH_MAX, "%s/%s", path, 8133 prog->pin_name); 8134 if (len < 0) 8135 return -EINVAL; 8136 else if (len >= PATH_MAX) 8137 return -ENAMETOOLONG; 8138 8139 err = bpf_program__unpin(prog, buf); 8140 if (err) 8141 return err; 8142 } 8143 8144 return 0; 8145 } 8146 8147 int bpf_object__pin(struct bpf_object *obj, const char *path) 8148 { 8149 int err; 8150 8151 err = bpf_object__pin_maps(obj, path); 8152 if (err) 8153 return err; 8154 8155 err = bpf_object__pin_programs(obj, path); 8156 if (err) { 8157 bpf_object__unpin_maps(obj, path); 8158 return err; 8159 } 8160 8161 return 0; 8162 } 8163 8164 static void bpf_map__destroy(struct bpf_map *map) 8165 { 8166 if (map->clear_priv) 8167 map->clear_priv(map, map->priv); 8168 map->priv = NULL; 8169 map->clear_priv = NULL; 8170 8171 if (map->inner_map) { 8172 bpf_map__destroy(map->inner_map); 8173 zfree(&map->inner_map); 8174 } 8175 8176 zfree(&map->init_slots); 8177 map->init_slots_sz = 0; 8178 8179 if (map->mmaped) { 8180 munmap(map->mmaped, bpf_map_mmap_sz(map)); 8181 map->mmaped = NULL; 8182 } 8183 8184 if (map->st_ops) { 8185 zfree(&map->st_ops->data); 8186 zfree(&map->st_ops->progs); 8187 zfree(&map->st_ops->kern_func_off); 8188 zfree(&map->st_ops); 8189 } 8190 8191 zfree(&map->name); 8192 zfree(&map->pin_path); 8193 8194 if (map->fd >= 0) 8195 zclose(map->fd); 8196 } 8197 8198 void bpf_object__close(struct bpf_object *obj) 8199 { 8200 size_t i; 8201 8202 if (IS_ERR_OR_NULL(obj)) 8203 return; 8204 8205 if (obj->clear_priv) 8206 obj->clear_priv(obj, obj->priv); 8207 8208 bpf_object__elf_finish(obj); 8209 bpf_object__unload(obj); 8210 btf__free(obj->btf); 8211 btf_ext__free(obj->btf_ext); 8212 8213 for (i = 0; i < obj->nr_maps; i++) 8214 bpf_map__destroy(&obj->maps[i]); 8215 8216 zfree(&obj->kconfig); 8217 zfree(&obj->externs); 8218 obj->nr_extern = 0; 8219 8220 zfree(&obj->maps); 8221 obj->nr_maps = 0; 8222 8223 if (obj->programs && obj->nr_programs) { 8224 for (i = 0; i < obj->nr_programs; i++) 8225 bpf_program__exit(&obj->programs[i]); 8226 } 8227 zfree(&obj->programs); 8228 8229 list_del(&obj->list); 8230 free(obj); 8231 } 8232 8233 struct bpf_object * 8234 bpf_object__next(struct bpf_object *prev) 8235 { 8236 struct bpf_object *next; 8237 8238 if (!prev) 8239 next = list_first_entry(&bpf_objects_list, 8240 struct bpf_object, 8241 list); 8242 else 8243 next = list_next_entry(prev, list); 8244 8245 /* Empty list is noticed here so don't need checking on entry. */ 8246 if (&next->list == &bpf_objects_list) 8247 return NULL; 8248 8249 return next; 8250 } 8251 8252 const char *bpf_object__name(const struct bpf_object *obj) 8253 { 8254 return obj ? obj->name : ERR_PTR(-EINVAL); 8255 } 8256 8257 unsigned int bpf_object__kversion(const struct bpf_object *obj) 8258 { 8259 return obj ? obj->kern_version : 0; 8260 } 8261 8262 struct btf *bpf_object__btf(const struct bpf_object *obj) 8263 { 8264 return obj ? obj->btf : NULL; 8265 } 8266 8267 int bpf_object__btf_fd(const struct bpf_object *obj) 8268 { 8269 return obj->btf ? btf__fd(obj->btf) : -1; 8270 } 8271 8272 int bpf_object__set_priv(struct bpf_object *obj, void *priv, 8273 bpf_object_clear_priv_t clear_priv) 8274 { 8275 if (obj->priv && obj->clear_priv) 8276 obj->clear_priv(obj, obj->priv); 8277 8278 obj->priv = priv; 8279 obj->clear_priv = clear_priv; 8280 return 0; 8281 } 8282 8283 void *bpf_object__priv(const struct bpf_object *obj) 8284 { 8285 return obj ? obj->priv : ERR_PTR(-EINVAL); 8286 } 8287 8288 static struct bpf_program * 8289 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, 8290 bool forward) 8291 { 8292 size_t nr_programs = obj->nr_programs; 8293 ssize_t idx; 8294 8295 if (!nr_programs) 8296 return NULL; 8297 8298 if (!p) 8299 /* Iter from the beginning */ 8300 return forward ? &obj->programs[0] : 8301 &obj->programs[nr_programs - 1]; 8302 8303 if (p->obj != obj) { 8304 pr_warn("error: program handler doesn't match object\n"); 8305 return NULL; 8306 } 8307 8308 idx = (p - obj->programs) + (forward ? 1 : -1); 8309 if (idx >= obj->nr_programs || idx < 0) 8310 return NULL; 8311 return &obj->programs[idx]; 8312 } 8313 8314 struct bpf_program * 8315 bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj) 8316 { 8317 struct bpf_program *prog = prev; 8318 8319 do { 8320 prog = __bpf_program__iter(prog, obj, true); 8321 } while (prog && prog_is_subprog(obj, prog)); 8322 8323 return prog; 8324 } 8325 8326 struct bpf_program * 8327 bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj) 8328 { 8329 struct bpf_program *prog = next; 8330 8331 do { 8332 prog = __bpf_program__iter(prog, obj, false); 8333 } while (prog && prog_is_subprog(obj, prog)); 8334 8335 return prog; 8336 } 8337 8338 int bpf_program__set_priv(struct bpf_program *prog, void *priv, 8339 bpf_program_clear_priv_t clear_priv) 8340 { 8341 if (prog->priv && prog->clear_priv) 8342 prog->clear_priv(prog, prog->priv); 8343 8344 prog->priv = priv; 8345 prog->clear_priv = clear_priv; 8346 return 0; 8347 } 8348 8349 void *bpf_program__priv(const struct bpf_program *prog) 8350 { 8351 return prog ? prog->priv : ERR_PTR(-EINVAL); 8352 } 8353 8354 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) 8355 { 8356 prog->prog_ifindex = ifindex; 8357 } 8358 8359 const char *bpf_program__name(const struct bpf_program *prog) 8360 { 8361 return prog->name; 8362 } 8363 8364 const char *bpf_program__section_name(const struct bpf_program *prog) 8365 { 8366 return prog->sec_name; 8367 } 8368 8369 const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy) 8370 { 8371 const char *title; 8372 8373 title = prog->sec_name; 8374 if (needs_copy) { 8375 title = strdup(title); 8376 if (!title) { 8377 pr_warn("failed to strdup program title\n"); 8378 return ERR_PTR(-ENOMEM); 8379 } 8380 } 8381 8382 return title; 8383 } 8384 8385 bool bpf_program__autoload(const struct bpf_program *prog) 8386 { 8387 return prog->load; 8388 } 8389 8390 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) 8391 { 8392 if (prog->obj->loaded) 8393 return -EINVAL; 8394 8395 prog->load = autoload; 8396 return 0; 8397 } 8398 8399 int bpf_program__fd(const struct bpf_program *prog) 8400 { 8401 return bpf_program__nth_fd(prog, 0); 8402 } 8403 8404 size_t bpf_program__size(const struct bpf_program *prog) 8405 { 8406 return prog->insns_cnt * BPF_INSN_SZ; 8407 } 8408 8409 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, 8410 bpf_program_prep_t prep) 8411 { 8412 int *instances_fds; 8413 8414 if (nr_instances <= 0 || !prep) 8415 return -EINVAL; 8416 8417 if (prog->instances.nr > 0 || prog->instances.fds) { 8418 pr_warn("Can't set pre-processor after loading\n"); 8419 return -EINVAL; 8420 } 8421 8422 instances_fds = malloc(sizeof(int) * nr_instances); 8423 if (!instances_fds) { 8424 pr_warn("alloc memory failed for fds\n"); 8425 return -ENOMEM; 8426 } 8427 8428 /* fill all fd with -1 */ 8429 memset(instances_fds, -1, sizeof(int) * nr_instances); 8430 8431 prog->instances.nr = nr_instances; 8432 prog->instances.fds = instances_fds; 8433 prog->preprocessor = prep; 8434 return 0; 8435 } 8436 8437 int bpf_program__nth_fd(const struct bpf_program *prog, int n) 8438 { 8439 int fd; 8440 8441 if (!prog) 8442 return -EINVAL; 8443 8444 if (n >= prog->instances.nr || n < 0) { 8445 pr_warn("Can't get the %dth fd from program %s: only %d instances\n", 8446 n, prog->name, prog->instances.nr); 8447 return -EINVAL; 8448 } 8449 8450 fd = prog->instances.fds[n]; 8451 if (fd < 0) { 8452 pr_warn("%dth instance of program '%s' is invalid\n", 8453 n, prog->name); 8454 return -ENOENT; 8455 } 8456 8457 return fd; 8458 } 8459 8460 enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog) 8461 { 8462 return prog->type; 8463 } 8464 8465 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) 8466 { 8467 prog->type = type; 8468 } 8469 8470 static bool bpf_program__is_type(const struct bpf_program *prog, 8471 enum bpf_prog_type type) 8472 { 8473 return prog ? (prog->type == type) : false; 8474 } 8475 8476 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \ 8477 int bpf_program__set_##NAME(struct bpf_program *prog) \ 8478 { \ 8479 if (!prog) \ 8480 return -EINVAL; \ 8481 bpf_program__set_type(prog, TYPE); \ 8482 return 0; \ 8483 } \ 8484 \ 8485 bool bpf_program__is_##NAME(const struct bpf_program *prog) \ 8486 { \ 8487 return bpf_program__is_type(prog, TYPE); \ 8488 } \ 8489 8490 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); 8491 BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM); 8492 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); 8493 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); 8494 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); 8495 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); 8496 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); 8497 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); 8498 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); 8499 BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING); 8500 BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS); 8501 BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT); 8502 BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP); 8503 8504 enum bpf_attach_type 8505 bpf_program__get_expected_attach_type(struct bpf_program *prog) 8506 { 8507 return prog->expected_attach_type; 8508 } 8509 8510 void bpf_program__set_expected_attach_type(struct bpf_program *prog, 8511 enum bpf_attach_type type) 8512 { 8513 prog->expected_attach_type = type; 8514 } 8515 8516 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \ 8517 attachable, attach_btf) \ 8518 { \ 8519 .sec = string, \ 8520 .len = sizeof(string) - 1, \ 8521 .prog_type = ptype, \ 8522 .expected_attach_type = eatype, \ 8523 .is_exp_attach_type_optional = eatype_optional, \ 8524 .is_attachable = attachable, \ 8525 .is_attach_btf = attach_btf, \ 8526 } 8527 8528 /* Programs that can NOT be attached. */ 8529 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0) 8530 8531 /* Programs that can be attached. */ 8532 #define BPF_APROG_SEC(string, ptype, atype) \ 8533 BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0) 8534 8535 /* Programs that must specify expected attach type at load time. */ 8536 #define BPF_EAPROG_SEC(string, ptype, eatype) \ 8537 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0) 8538 8539 /* Programs that use BTF to identify attach point */ 8540 #define BPF_PROG_BTF(string, ptype, eatype) \ 8541 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1) 8542 8543 /* Programs that can be attached but attach type can't be identified by section 8544 * name. Kept for backward compatibility. 8545 */ 8546 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype) 8547 8548 #define SEC_DEF(sec_pfx, ptype, ...) { \ 8549 .sec = sec_pfx, \ 8550 .len = sizeof(sec_pfx) - 1, \ 8551 .prog_type = BPF_PROG_TYPE_##ptype, \ 8552 __VA_ARGS__ \ 8553 } 8554 8555 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, 8556 struct bpf_program *prog); 8557 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec, 8558 struct bpf_program *prog); 8559 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec, 8560 struct bpf_program *prog); 8561 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec, 8562 struct bpf_program *prog); 8563 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec, 8564 struct bpf_program *prog); 8565 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec, 8566 struct bpf_program *prog); 8567 8568 static const struct bpf_sec_def section_defs[] = { 8569 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), 8570 BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT), 8571 SEC_DEF("kprobe/", KPROBE, 8572 .attach_fn = attach_kprobe), 8573 BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE), 8574 SEC_DEF("kretprobe/", KPROBE, 8575 .attach_fn = attach_kprobe), 8576 BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE), 8577 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), 8578 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), 8579 SEC_DEF("tracepoint/", TRACEPOINT, 8580 .attach_fn = attach_tp), 8581 SEC_DEF("tp/", TRACEPOINT, 8582 .attach_fn = attach_tp), 8583 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, 8584 .attach_fn = attach_raw_tp), 8585 SEC_DEF("raw_tp/", RAW_TRACEPOINT, 8586 .attach_fn = attach_raw_tp), 8587 SEC_DEF("tp_btf/", TRACING, 8588 .expected_attach_type = BPF_TRACE_RAW_TP, 8589 .is_attach_btf = true, 8590 .attach_fn = attach_trace), 8591 SEC_DEF("fentry/", TRACING, 8592 .expected_attach_type = BPF_TRACE_FENTRY, 8593 .is_attach_btf = true, 8594 .attach_fn = attach_trace), 8595 SEC_DEF("fmod_ret/", TRACING, 8596 .expected_attach_type = BPF_MODIFY_RETURN, 8597 .is_attach_btf = true, 8598 .attach_fn = attach_trace), 8599 SEC_DEF("fexit/", TRACING, 8600 .expected_attach_type = BPF_TRACE_FEXIT, 8601 .is_attach_btf = true, 8602 .attach_fn = attach_trace), 8603 SEC_DEF("fentry.s/", TRACING, 8604 .expected_attach_type = BPF_TRACE_FENTRY, 8605 .is_attach_btf = true, 8606 .is_sleepable = true, 8607 .attach_fn = attach_trace), 8608 SEC_DEF("fmod_ret.s/", TRACING, 8609 .expected_attach_type = BPF_MODIFY_RETURN, 8610 .is_attach_btf = true, 8611 .is_sleepable = true, 8612 .attach_fn = attach_trace), 8613 SEC_DEF("fexit.s/", TRACING, 8614 .expected_attach_type = BPF_TRACE_FEXIT, 8615 .is_attach_btf = true, 8616 .is_sleepable = true, 8617 .attach_fn = attach_trace), 8618 SEC_DEF("freplace/", EXT, 8619 .is_attach_btf = true, 8620 .attach_fn = attach_trace), 8621 SEC_DEF("lsm/", LSM, 8622 .is_attach_btf = true, 8623 .expected_attach_type = BPF_LSM_MAC, 8624 .attach_fn = attach_lsm), 8625 SEC_DEF("lsm.s/", LSM, 8626 .is_attach_btf = true, 8627 .is_sleepable = true, 8628 .expected_attach_type = BPF_LSM_MAC, 8629 .attach_fn = attach_lsm), 8630 SEC_DEF("iter/", TRACING, 8631 .expected_attach_type = BPF_TRACE_ITER, 8632 .is_attach_btf = true, 8633 .attach_fn = attach_iter), 8634 BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP, 8635 BPF_XDP_DEVMAP), 8636 BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP, 8637 BPF_XDP_CPUMAP), 8638 BPF_APROG_SEC("xdp", BPF_PROG_TYPE_XDP, 8639 BPF_XDP), 8640 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), 8641 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), 8642 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), 8643 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), 8644 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), 8645 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB, 8646 BPF_CGROUP_INET_INGRESS), 8647 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, 8648 BPF_CGROUP_INET_EGRESS), 8649 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), 8650 BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK, 8651 BPF_CGROUP_INET_SOCK_CREATE), 8652 BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK, 8653 BPF_CGROUP_INET_SOCK_RELEASE), 8654 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, 8655 BPF_CGROUP_INET_SOCK_CREATE), 8656 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, 8657 BPF_CGROUP_INET4_POST_BIND), 8658 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK, 8659 BPF_CGROUP_INET6_POST_BIND), 8660 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE, 8661 BPF_CGROUP_DEVICE), 8662 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS, 8663 BPF_CGROUP_SOCK_OPS), 8664 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB, 8665 BPF_SK_SKB_STREAM_PARSER), 8666 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB, 8667 BPF_SK_SKB_STREAM_VERDICT), 8668 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB), 8669 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG, 8670 BPF_SK_MSG_VERDICT), 8671 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2, 8672 BPF_LIRC_MODE2), 8673 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR, 8674 BPF_FLOW_DISSECTOR), 8675 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8676 BPF_CGROUP_INET4_BIND), 8677 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8678 BPF_CGROUP_INET6_BIND), 8679 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8680 BPF_CGROUP_INET4_CONNECT), 8681 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8682 BPF_CGROUP_INET6_CONNECT), 8683 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8684 BPF_CGROUP_UDP4_SENDMSG), 8685 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8686 BPF_CGROUP_UDP6_SENDMSG), 8687 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8688 BPF_CGROUP_UDP4_RECVMSG), 8689 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8690 BPF_CGROUP_UDP6_RECVMSG), 8691 BPF_EAPROG_SEC("cgroup/getpeername4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8692 BPF_CGROUP_INET4_GETPEERNAME), 8693 BPF_EAPROG_SEC("cgroup/getpeername6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8694 BPF_CGROUP_INET6_GETPEERNAME), 8695 BPF_EAPROG_SEC("cgroup/getsockname4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8696 BPF_CGROUP_INET4_GETSOCKNAME), 8697 BPF_EAPROG_SEC("cgroup/getsockname6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 8698 BPF_CGROUP_INET6_GETSOCKNAME), 8699 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL, 8700 BPF_CGROUP_SYSCTL), 8701 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT, 8702 BPF_CGROUP_GETSOCKOPT), 8703 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT, 8704 BPF_CGROUP_SETSOCKOPT), 8705 BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS), 8706 BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP, 8707 BPF_SK_LOOKUP), 8708 }; 8709 8710 #undef BPF_PROG_SEC_IMPL 8711 #undef BPF_PROG_SEC 8712 #undef BPF_APROG_SEC 8713 #undef BPF_EAPROG_SEC 8714 #undef BPF_APROG_COMPAT 8715 #undef SEC_DEF 8716 8717 #define MAX_TYPE_NAME_SIZE 32 8718 8719 static const struct bpf_sec_def *find_sec_def(const char *sec_name) 8720 { 8721 int i, n = ARRAY_SIZE(section_defs); 8722 8723 for (i = 0; i < n; i++) { 8724 if (strncmp(sec_name, 8725 section_defs[i].sec, section_defs[i].len)) 8726 continue; 8727 return §ion_defs[i]; 8728 } 8729 return NULL; 8730 } 8731 8732 static char *libbpf_get_type_names(bool attach_type) 8733 { 8734 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE; 8735 char *buf; 8736 8737 buf = malloc(len); 8738 if (!buf) 8739 return NULL; 8740 8741 buf[0] = '\0'; 8742 /* Forge string buf with all available names */ 8743 for (i = 0; i < ARRAY_SIZE(section_defs); i++) { 8744 if (attach_type && !section_defs[i].is_attachable) 8745 continue; 8746 8747 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) { 8748 free(buf); 8749 return NULL; 8750 } 8751 strcat(buf, " "); 8752 strcat(buf, section_defs[i].sec); 8753 } 8754 8755 return buf; 8756 } 8757 8758 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, 8759 enum bpf_attach_type *expected_attach_type) 8760 { 8761 const struct bpf_sec_def *sec_def; 8762 char *type_names; 8763 8764 if (!name) 8765 return -EINVAL; 8766 8767 sec_def = find_sec_def(name); 8768 if (sec_def) { 8769 *prog_type = sec_def->prog_type; 8770 *expected_attach_type = sec_def->expected_attach_type; 8771 return 0; 8772 } 8773 8774 pr_debug("failed to guess program type from ELF section '%s'\n", name); 8775 type_names = libbpf_get_type_names(false); 8776 if (type_names != NULL) { 8777 pr_debug("supported section(type) names are:%s\n", type_names); 8778 free(type_names); 8779 } 8780 8781 return -ESRCH; 8782 } 8783 8784 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, 8785 size_t offset) 8786 { 8787 struct bpf_map *map; 8788 size_t i; 8789 8790 for (i = 0; i < obj->nr_maps; i++) { 8791 map = &obj->maps[i]; 8792 if (!bpf_map__is_struct_ops(map)) 8793 continue; 8794 if (map->sec_offset <= offset && 8795 offset - map->sec_offset < map->def.value_size) 8796 return map; 8797 } 8798 8799 return NULL; 8800 } 8801 8802 /* Collect the reloc from ELF and populate the st_ops->progs[] */ 8803 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, 8804 GElf_Shdr *shdr, Elf_Data *data) 8805 { 8806 const struct btf_member *member; 8807 struct bpf_struct_ops *st_ops; 8808 struct bpf_program *prog; 8809 unsigned int shdr_idx; 8810 const struct btf *btf; 8811 struct bpf_map *map; 8812 Elf_Data *symbols; 8813 unsigned int moff, insn_idx; 8814 const char *name; 8815 __u32 member_idx; 8816 GElf_Sym sym; 8817 GElf_Rel rel; 8818 int i, nrels; 8819 8820 symbols = obj->efile.symbols; 8821 btf = obj->btf; 8822 nrels = shdr->sh_size / shdr->sh_entsize; 8823 for (i = 0; i < nrels; i++) { 8824 if (!gelf_getrel(data, i, &rel)) { 8825 pr_warn("struct_ops reloc: failed to get %d reloc\n", i); 8826 return -LIBBPF_ERRNO__FORMAT; 8827 } 8828 8829 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { 8830 pr_warn("struct_ops reloc: symbol %zx not found\n", 8831 (size_t)GELF_R_SYM(rel.r_info)); 8832 return -LIBBPF_ERRNO__FORMAT; 8833 } 8834 8835 name = elf_sym_str(obj, sym.st_name) ?: "<?>"; 8836 map = find_struct_ops_map_by_offset(obj, rel.r_offset); 8837 if (!map) { 8838 pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n", 8839 (size_t)rel.r_offset); 8840 return -EINVAL; 8841 } 8842 8843 moff = rel.r_offset - map->sec_offset; 8844 shdr_idx = sym.st_shndx; 8845 st_ops = map->st_ops; 8846 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n", 8847 map->name, 8848 (long long)(rel.r_info >> 32), 8849 (long long)sym.st_value, 8850 shdr_idx, (size_t)rel.r_offset, 8851 map->sec_offset, sym.st_name, name); 8852 8853 if (shdr_idx >= SHN_LORESERVE) { 8854 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n", 8855 map->name, (size_t)rel.r_offset, shdr_idx); 8856 return -LIBBPF_ERRNO__RELOC; 8857 } 8858 if (sym.st_value % BPF_INSN_SZ) { 8859 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n", 8860 map->name, (unsigned long long)sym.st_value); 8861 return -LIBBPF_ERRNO__FORMAT; 8862 } 8863 insn_idx = sym.st_value / BPF_INSN_SZ; 8864 8865 member = find_member_by_offset(st_ops->type, moff * 8); 8866 if (!member) { 8867 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n", 8868 map->name, moff); 8869 return -EINVAL; 8870 } 8871 member_idx = member - btf_members(st_ops->type); 8872 name = btf__name_by_offset(btf, member->name_off); 8873 8874 if (!resolve_func_ptr(btf, member->type, NULL)) { 8875 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n", 8876 map->name, name); 8877 return -EINVAL; 8878 } 8879 8880 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx); 8881 if (!prog) { 8882 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n", 8883 map->name, shdr_idx, name); 8884 return -EINVAL; 8885 } 8886 8887 if (prog->type == BPF_PROG_TYPE_UNSPEC) { 8888 const struct bpf_sec_def *sec_def; 8889 8890 sec_def = find_sec_def(prog->sec_name); 8891 if (sec_def && 8892 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) { 8893 /* for pr_warn */ 8894 prog->type = sec_def->prog_type; 8895 goto invalid_prog; 8896 } 8897 8898 prog->type = BPF_PROG_TYPE_STRUCT_OPS; 8899 prog->attach_btf_id = st_ops->type_id; 8900 prog->expected_attach_type = member_idx; 8901 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || 8902 prog->attach_btf_id != st_ops->type_id || 8903 prog->expected_attach_type != member_idx) { 8904 goto invalid_prog; 8905 } 8906 st_ops->progs[member_idx] = prog; 8907 } 8908 8909 return 0; 8910 8911 invalid_prog: 8912 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n", 8913 map->name, prog->name, prog->sec_name, prog->type, 8914 prog->attach_btf_id, prog->expected_attach_type, name); 8915 return -EINVAL; 8916 } 8917 8918 #define BTF_TRACE_PREFIX "btf_trace_" 8919 #define BTF_LSM_PREFIX "bpf_lsm_" 8920 #define BTF_ITER_PREFIX "bpf_iter_" 8921 #define BTF_MAX_NAME_SIZE 128 8922 8923 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, 8924 const char *name, __u32 kind) 8925 { 8926 char btf_type_name[BTF_MAX_NAME_SIZE]; 8927 int ret; 8928 8929 ret = snprintf(btf_type_name, sizeof(btf_type_name), 8930 "%s%s", prefix, name); 8931 /* snprintf returns the number of characters written excluding the 8932 * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it 8933 * indicates truncation. 8934 */ 8935 if (ret < 0 || ret >= sizeof(btf_type_name)) 8936 return -ENAMETOOLONG; 8937 return btf__find_by_name_kind(btf, btf_type_name, kind); 8938 } 8939 8940 static inline int find_attach_btf_id(struct btf *btf, const char *name, 8941 enum bpf_attach_type attach_type) 8942 { 8943 int err; 8944 8945 if (attach_type == BPF_TRACE_RAW_TP) 8946 err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name, 8947 BTF_KIND_TYPEDEF); 8948 else if (attach_type == BPF_LSM_MAC) 8949 err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name, 8950 BTF_KIND_FUNC); 8951 else if (attach_type == BPF_TRACE_ITER) 8952 err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name, 8953 BTF_KIND_FUNC); 8954 else 8955 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); 8956 8957 return err; 8958 } 8959 8960 int libbpf_find_vmlinux_btf_id(const char *name, 8961 enum bpf_attach_type attach_type) 8962 { 8963 struct btf *btf; 8964 int err; 8965 8966 btf = libbpf_find_kernel_btf(); 8967 if (IS_ERR(btf)) { 8968 pr_warn("vmlinux BTF is not found\n"); 8969 return -EINVAL; 8970 } 8971 8972 err = find_attach_btf_id(btf, name, attach_type); 8973 if (err <= 0) 8974 pr_warn("%s is not found in vmlinux BTF\n", name); 8975 8976 btf__free(btf); 8977 return err; 8978 } 8979 8980 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) 8981 { 8982 struct bpf_prog_info_linear *info_linear; 8983 struct bpf_prog_info *info; 8984 struct btf *btf = NULL; 8985 int err = -EINVAL; 8986 8987 info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0); 8988 if (IS_ERR_OR_NULL(info_linear)) { 8989 pr_warn("failed get_prog_info_linear for FD %d\n", 8990 attach_prog_fd); 8991 return -EINVAL; 8992 } 8993 info = &info_linear->info; 8994 if (!info->btf_id) { 8995 pr_warn("The target program doesn't have BTF\n"); 8996 goto out; 8997 } 8998 if (btf__get_from_id(info->btf_id, &btf)) { 8999 pr_warn("Failed to get BTF of the program\n"); 9000 goto out; 9001 } 9002 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); 9003 btf__free(btf); 9004 if (err <= 0) { 9005 pr_warn("%s is not found in prog's BTF\n", name); 9006 goto out; 9007 } 9008 out: 9009 free(info_linear); 9010 return err; 9011 } 9012 9013 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name, 9014 enum bpf_attach_type attach_type, 9015 int *btf_obj_fd, int *btf_type_id) 9016 { 9017 int ret, i; 9018 9019 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type); 9020 if (ret > 0) { 9021 *btf_obj_fd = 0; /* vmlinux BTF */ 9022 *btf_type_id = ret; 9023 return 0; 9024 } 9025 if (ret != -ENOENT) 9026 return ret; 9027 9028 ret = load_module_btfs(obj); 9029 if (ret) 9030 return ret; 9031 9032 for (i = 0; i < obj->btf_module_cnt; i++) { 9033 const struct module_btf *mod = &obj->btf_modules[i]; 9034 9035 ret = find_attach_btf_id(mod->btf, attach_name, attach_type); 9036 if (ret > 0) { 9037 *btf_obj_fd = mod->fd; 9038 *btf_type_id = ret; 9039 return 0; 9040 } 9041 if (ret == -ENOENT) 9042 continue; 9043 9044 return ret; 9045 } 9046 9047 return -ESRCH; 9048 } 9049 9050 static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id) 9051 { 9052 enum bpf_attach_type attach_type = prog->expected_attach_type; 9053 __u32 attach_prog_fd = prog->attach_prog_fd; 9054 const char *name = prog->sec_name, *attach_name; 9055 const struct bpf_sec_def *sec = NULL; 9056 int i, err; 9057 9058 if (!name) 9059 return -EINVAL; 9060 9061 for (i = 0; i < ARRAY_SIZE(section_defs); i++) { 9062 if (!section_defs[i].is_attach_btf) 9063 continue; 9064 if (strncmp(name, section_defs[i].sec, section_defs[i].len)) 9065 continue; 9066 9067 sec = §ion_defs[i]; 9068 break; 9069 } 9070 9071 if (!sec) { 9072 pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name); 9073 return -ESRCH; 9074 } 9075 attach_name = name + sec->len; 9076 9077 /* BPF program's BTF ID */ 9078 if (attach_prog_fd) { 9079 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd); 9080 if (err < 0) { 9081 pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n", 9082 attach_prog_fd, attach_name, err); 9083 return err; 9084 } 9085 *btf_obj_fd = 0; 9086 *btf_type_id = err; 9087 return 0; 9088 } 9089 9090 /* kernel/module BTF ID */ 9091 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id); 9092 if (err) { 9093 pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err); 9094 return err; 9095 } 9096 return 0; 9097 } 9098 9099 int libbpf_attach_type_by_name(const char *name, 9100 enum bpf_attach_type *attach_type) 9101 { 9102 char *type_names; 9103 int i; 9104 9105 if (!name) 9106 return -EINVAL; 9107 9108 for (i = 0; i < ARRAY_SIZE(section_defs); i++) { 9109 if (strncmp(name, section_defs[i].sec, section_defs[i].len)) 9110 continue; 9111 if (!section_defs[i].is_attachable) 9112 return -EINVAL; 9113 *attach_type = section_defs[i].expected_attach_type; 9114 return 0; 9115 } 9116 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); 9117 type_names = libbpf_get_type_names(true); 9118 if (type_names != NULL) { 9119 pr_debug("attachable section(type) names are:%s\n", type_names); 9120 free(type_names); 9121 } 9122 9123 return -EINVAL; 9124 } 9125 9126 int bpf_map__fd(const struct bpf_map *map) 9127 { 9128 return map ? map->fd : -EINVAL; 9129 } 9130 9131 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map) 9132 { 9133 return map ? &map->def : ERR_PTR(-EINVAL); 9134 } 9135 9136 const char *bpf_map__name(const struct bpf_map *map) 9137 { 9138 return map ? map->name : NULL; 9139 } 9140 9141 enum bpf_map_type bpf_map__type(const struct bpf_map *map) 9142 { 9143 return map->def.type; 9144 } 9145 9146 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) 9147 { 9148 if (map->fd >= 0) 9149 return -EBUSY; 9150 map->def.type = type; 9151 return 0; 9152 } 9153 9154 __u32 bpf_map__map_flags(const struct bpf_map *map) 9155 { 9156 return map->def.map_flags; 9157 } 9158 9159 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) 9160 { 9161 if (map->fd >= 0) 9162 return -EBUSY; 9163 map->def.map_flags = flags; 9164 return 0; 9165 } 9166 9167 __u32 bpf_map__numa_node(const struct bpf_map *map) 9168 { 9169 return map->numa_node; 9170 } 9171 9172 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) 9173 { 9174 if (map->fd >= 0) 9175 return -EBUSY; 9176 map->numa_node = numa_node; 9177 return 0; 9178 } 9179 9180 __u32 bpf_map__key_size(const struct bpf_map *map) 9181 { 9182 return map->def.key_size; 9183 } 9184 9185 int bpf_map__set_key_size(struct bpf_map *map, __u32 size) 9186 { 9187 if (map->fd >= 0) 9188 return -EBUSY; 9189 map->def.key_size = size; 9190 return 0; 9191 } 9192 9193 __u32 bpf_map__value_size(const struct bpf_map *map) 9194 { 9195 return map->def.value_size; 9196 } 9197 9198 int bpf_map__set_value_size(struct bpf_map *map, __u32 size) 9199 { 9200 if (map->fd >= 0) 9201 return -EBUSY; 9202 map->def.value_size = size; 9203 return 0; 9204 } 9205 9206 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) 9207 { 9208 return map ? map->btf_key_type_id : 0; 9209 } 9210 9211 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) 9212 { 9213 return map ? map->btf_value_type_id : 0; 9214 } 9215 9216 int bpf_map__set_priv(struct bpf_map *map, void *priv, 9217 bpf_map_clear_priv_t clear_priv) 9218 { 9219 if (!map) 9220 return -EINVAL; 9221 9222 if (map->priv) { 9223 if (map->clear_priv) 9224 map->clear_priv(map, map->priv); 9225 } 9226 9227 map->priv = priv; 9228 map->clear_priv = clear_priv; 9229 return 0; 9230 } 9231 9232 void *bpf_map__priv(const struct bpf_map *map) 9233 { 9234 return map ? map->priv : ERR_PTR(-EINVAL); 9235 } 9236 9237 int bpf_map__set_initial_value(struct bpf_map *map, 9238 const void *data, size_t size) 9239 { 9240 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG || 9241 size != map->def.value_size || map->fd >= 0) 9242 return -EINVAL; 9243 9244 memcpy(map->mmaped, data, size); 9245 return 0; 9246 } 9247 9248 bool bpf_map__is_offload_neutral(const struct bpf_map *map) 9249 { 9250 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 9251 } 9252 9253 bool bpf_map__is_internal(const struct bpf_map *map) 9254 { 9255 return map->libbpf_type != LIBBPF_MAP_UNSPEC; 9256 } 9257 9258 __u32 bpf_map__ifindex(const struct bpf_map *map) 9259 { 9260 return map->map_ifindex; 9261 } 9262 9263 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) 9264 { 9265 if (map->fd >= 0) 9266 return -EBUSY; 9267 map->map_ifindex = ifindex; 9268 return 0; 9269 } 9270 9271 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) 9272 { 9273 if (!bpf_map_type__is_map_in_map(map->def.type)) { 9274 pr_warn("error: unsupported map type\n"); 9275 return -EINVAL; 9276 } 9277 if (map->inner_map_fd != -1) { 9278 pr_warn("error: inner_map_fd already specified\n"); 9279 return -EINVAL; 9280 } 9281 map->inner_map_fd = fd; 9282 return 0; 9283 } 9284 9285 static struct bpf_map * 9286 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) 9287 { 9288 ssize_t idx; 9289 struct bpf_map *s, *e; 9290 9291 if (!obj || !obj->maps) 9292 return NULL; 9293 9294 s = obj->maps; 9295 e = obj->maps + obj->nr_maps; 9296 9297 if ((m < s) || (m >= e)) { 9298 pr_warn("error in %s: map handler doesn't belong to object\n", 9299 __func__); 9300 return NULL; 9301 } 9302 9303 idx = (m - obj->maps) + i; 9304 if (idx >= obj->nr_maps || idx < 0) 9305 return NULL; 9306 return &obj->maps[idx]; 9307 } 9308 9309 struct bpf_map * 9310 bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj) 9311 { 9312 if (prev == NULL) 9313 return obj->maps; 9314 9315 return __bpf_map__iter(prev, obj, 1); 9316 } 9317 9318 struct bpf_map * 9319 bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj) 9320 { 9321 if (next == NULL) { 9322 if (!obj->nr_maps) 9323 return NULL; 9324 return obj->maps + obj->nr_maps - 1; 9325 } 9326 9327 return __bpf_map__iter(next, obj, -1); 9328 } 9329 9330 struct bpf_map * 9331 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name) 9332 { 9333 struct bpf_map *pos; 9334 9335 bpf_object__for_each_map(pos, obj) { 9336 if (pos->name && !strcmp(pos->name, name)) 9337 return pos; 9338 } 9339 return NULL; 9340 } 9341 9342 int 9343 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name) 9344 { 9345 return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); 9346 } 9347 9348 struct bpf_map * 9349 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) 9350 { 9351 return ERR_PTR(-ENOTSUP); 9352 } 9353 9354 long libbpf_get_error(const void *ptr) 9355 { 9356 return PTR_ERR_OR_ZERO(ptr); 9357 } 9358 9359 int bpf_prog_load(const char *file, enum bpf_prog_type type, 9360 struct bpf_object **pobj, int *prog_fd) 9361 { 9362 struct bpf_prog_load_attr attr; 9363 9364 memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); 9365 attr.file = file; 9366 attr.prog_type = type; 9367 attr.expected_attach_type = 0; 9368 9369 return bpf_prog_load_xattr(&attr, pobj, prog_fd); 9370 } 9371 9372 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, 9373 struct bpf_object **pobj, int *prog_fd) 9374 { 9375 struct bpf_object_open_attr open_attr = {}; 9376 struct bpf_program *prog, *first_prog = NULL; 9377 struct bpf_object *obj; 9378 struct bpf_map *map; 9379 int err; 9380 9381 if (!attr) 9382 return -EINVAL; 9383 if (!attr->file) 9384 return -EINVAL; 9385 9386 open_attr.file = attr->file; 9387 open_attr.prog_type = attr->prog_type; 9388 9389 obj = bpf_object__open_xattr(&open_attr); 9390 if (IS_ERR_OR_NULL(obj)) 9391 return -ENOENT; 9392 9393 bpf_object__for_each_program(prog, obj) { 9394 enum bpf_attach_type attach_type = attr->expected_attach_type; 9395 /* 9396 * to preserve backwards compatibility, bpf_prog_load treats 9397 * attr->prog_type, if specified, as an override to whatever 9398 * bpf_object__open guessed 9399 */ 9400 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) { 9401 bpf_program__set_type(prog, attr->prog_type); 9402 bpf_program__set_expected_attach_type(prog, 9403 attach_type); 9404 } 9405 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) { 9406 /* 9407 * we haven't guessed from section name and user 9408 * didn't provide a fallback type, too bad... 9409 */ 9410 bpf_object__close(obj); 9411 return -EINVAL; 9412 } 9413 9414 prog->prog_ifindex = attr->ifindex; 9415 prog->log_level = attr->log_level; 9416 prog->prog_flags |= attr->prog_flags; 9417 if (!first_prog) 9418 first_prog = prog; 9419 } 9420 9421 bpf_object__for_each_map(map, obj) { 9422 if (!bpf_map__is_offload_neutral(map)) 9423 map->map_ifindex = attr->ifindex; 9424 } 9425 9426 if (!first_prog) { 9427 pr_warn("object file doesn't contain bpf program\n"); 9428 bpf_object__close(obj); 9429 return -ENOENT; 9430 } 9431 9432 err = bpf_object__load(obj); 9433 if (err) { 9434 bpf_object__close(obj); 9435 return err; 9436 } 9437 9438 *pobj = obj; 9439 *prog_fd = bpf_program__fd(first_prog); 9440 return 0; 9441 } 9442 9443 struct bpf_link { 9444 int (*detach)(struct bpf_link *link); 9445 int (*destroy)(struct bpf_link *link); 9446 char *pin_path; /* NULL, if not pinned */ 9447 int fd; /* hook FD, -1 if not applicable */ 9448 bool disconnected; 9449 }; 9450 9451 /* Replace link's underlying BPF program with the new one */ 9452 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog) 9453 { 9454 return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL); 9455 } 9456 9457 /* Release "ownership" of underlying BPF resource (typically, BPF program 9458 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected 9459 * link, when destructed through bpf_link__destroy() call won't attempt to 9460 * detach/unregisted that BPF resource. This is useful in situations where, 9461 * say, attached BPF program has to outlive userspace program that attached it 9462 * in the system. Depending on type of BPF program, though, there might be 9463 * additional steps (like pinning BPF program in BPF FS) necessary to ensure 9464 * exit of userspace program doesn't trigger automatic detachment and clean up 9465 * inside the kernel. 9466 */ 9467 void bpf_link__disconnect(struct bpf_link *link) 9468 { 9469 link->disconnected = true; 9470 } 9471 9472 int bpf_link__destroy(struct bpf_link *link) 9473 { 9474 int err = 0; 9475 9476 if (IS_ERR_OR_NULL(link)) 9477 return 0; 9478 9479 if (!link->disconnected && link->detach) 9480 err = link->detach(link); 9481 if (link->destroy) 9482 link->destroy(link); 9483 if (link->pin_path) 9484 free(link->pin_path); 9485 free(link); 9486 9487 return err; 9488 } 9489 9490 int bpf_link__fd(const struct bpf_link *link) 9491 { 9492 return link->fd; 9493 } 9494 9495 const char *bpf_link__pin_path(const struct bpf_link *link) 9496 { 9497 return link->pin_path; 9498 } 9499 9500 static int bpf_link__detach_fd(struct bpf_link *link) 9501 { 9502 return close(link->fd); 9503 } 9504 9505 struct bpf_link *bpf_link__open(const char *path) 9506 { 9507 struct bpf_link *link; 9508 int fd; 9509 9510 fd = bpf_obj_get(path); 9511 if (fd < 0) { 9512 fd = -errno; 9513 pr_warn("failed to open link at %s: %d\n", path, fd); 9514 return ERR_PTR(fd); 9515 } 9516 9517 link = calloc(1, sizeof(*link)); 9518 if (!link) { 9519 close(fd); 9520 return ERR_PTR(-ENOMEM); 9521 } 9522 link->detach = &bpf_link__detach_fd; 9523 link->fd = fd; 9524 9525 link->pin_path = strdup(path); 9526 if (!link->pin_path) { 9527 bpf_link__destroy(link); 9528 return ERR_PTR(-ENOMEM); 9529 } 9530 9531 return link; 9532 } 9533 9534 int bpf_link__detach(struct bpf_link *link) 9535 { 9536 return bpf_link_detach(link->fd) ? -errno : 0; 9537 } 9538 9539 int bpf_link__pin(struct bpf_link *link, const char *path) 9540 { 9541 int err; 9542 9543 if (link->pin_path) 9544 return -EBUSY; 9545 err = make_parent_dir(path); 9546 if (err) 9547 return err; 9548 err = check_path(path); 9549 if (err) 9550 return err; 9551 9552 link->pin_path = strdup(path); 9553 if (!link->pin_path) 9554 return -ENOMEM; 9555 9556 if (bpf_obj_pin(link->fd, link->pin_path)) { 9557 err = -errno; 9558 zfree(&link->pin_path); 9559 return err; 9560 } 9561 9562 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); 9563 return 0; 9564 } 9565 9566 int bpf_link__unpin(struct bpf_link *link) 9567 { 9568 int err; 9569 9570 if (!link->pin_path) 9571 return -EINVAL; 9572 9573 err = unlink(link->pin_path); 9574 if (err != 0) 9575 return -errno; 9576 9577 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); 9578 zfree(&link->pin_path); 9579 return 0; 9580 } 9581 9582 static int bpf_link__detach_perf_event(struct bpf_link *link) 9583 { 9584 int err; 9585 9586 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0); 9587 if (err) 9588 err = -errno; 9589 9590 close(link->fd); 9591 return err; 9592 } 9593 9594 struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, 9595 int pfd) 9596 { 9597 char errmsg[STRERR_BUFSIZE]; 9598 struct bpf_link *link; 9599 int prog_fd, err; 9600 9601 if (pfd < 0) { 9602 pr_warn("prog '%s': invalid perf event FD %d\n", 9603 prog->name, pfd); 9604 return ERR_PTR(-EINVAL); 9605 } 9606 prog_fd = bpf_program__fd(prog); 9607 if (prog_fd < 0) { 9608 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n", 9609 prog->name); 9610 return ERR_PTR(-EINVAL); 9611 } 9612 9613 link = calloc(1, sizeof(*link)); 9614 if (!link) 9615 return ERR_PTR(-ENOMEM); 9616 link->detach = &bpf_link__detach_perf_event; 9617 link->fd = pfd; 9618 9619 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { 9620 err = -errno; 9621 free(link); 9622 pr_warn("prog '%s': failed to attach to pfd %d: %s\n", 9623 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 9624 if (err == -EPROTO) 9625 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", 9626 prog->name, pfd); 9627 return ERR_PTR(err); 9628 } 9629 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { 9630 err = -errno; 9631 free(link); 9632 pr_warn("prog '%s': failed to enable pfd %d: %s\n", 9633 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 9634 return ERR_PTR(err); 9635 } 9636 return link; 9637 } 9638 9639 /* 9640 * this function is expected to parse integer in the range of [0, 2^31-1] from 9641 * given file using scanf format string fmt. If actual parsed value is 9642 * negative, the result might be indistinguishable from error 9643 */ 9644 static int parse_uint_from_file(const char *file, const char *fmt) 9645 { 9646 char buf[STRERR_BUFSIZE]; 9647 int err, ret; 9648 FILE *f; 9649 9650 f = fopen(file, "r"); 9651 if (!f) { 9652 err = -errno; 9653 pr_debug("failed to open '%s': %s\n", file, 9654 libbpf_strerror_r(err, buf, sizeof(buf))); 9655 return err; 9656 } 9657 err = fscanf(f, fmt, &ret); 9658 if (err != 1) { 9659 err = err == EOF ? -EIO : -errno; 9660 pr_debug("failed to parse '%s': %s\n", file, 9661 libbpf_strerror_r(err, buf, sizeof(buf))); 9662 fclose(f); 9663 return err; 9664 } 9665 fclose(f); 9666 return ret; 9667 } 9668 9669 static int determine_kprobe_perf_type(void) 9670 { 9671 const char *file = "/sys/bus/event_source/devices/kprobe/type"; 9672 9673 return parse_uint_from_file(file, "%d\n"); 9674 } 9675 9676 static int determine_uprobe_perf_type(void) 9677 { 9678 const char *file = "/sys/bus/event_source/devices/uprobe/type"; 9679 9680 return parse_uint_from_file(file, "%d\n"); 9681 } 9682 9683 static int determine_kprobe_retprobe_bit(void) 9684 { 9685 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe"; 9686 9687 return parse_uint_from_file(file, "config:%d\n"); 9688 } 9689 9690 static int determine_uprobe_retprobe_bit(void) 9691 { 9692 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe"; 9693 9694 return parse_uint_from_file(file, "config:%d\n"); 9695 } 9696 9697 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, 9698 uint64_t offset, int pid) 9699 { 9700 struct perf_event_attr attr = {}; 9701 char errmsg[STRERR_BUFSIZE]; 9702 int type, pfd, err; 9703 9704 type = uprobe ? determine_uprobe_perf_type() 9705 : determine_kprobe_perf_type(); 9706 if (type < 0) { 9707 pr_warn("failed to determine %s perf type: %s\n", 9708 uprobe ? "uprobe" : "kprobe", 9709 libbpf_strerror_r(type, errmsg, sizeof(errmsg))); 9710 return type; 9711 } 9712 if (retprobe) { 9713 int bit = uprobe ? determine_uprobe_retprobe_bit() 9714 : determine_kprobe_retprobe_bit(); 9715 9716 if (bit < 0) { 9717 pr_warn("failed to determine %s retprobe bit: %s\n", 9718 uprobe ? "uprobe" : "kprobe", 9719 libbpf_strerror_r(bit, errmsg, sizeof(errmsg))); 9720 return bit; 9721 } 9722 attr.config |= 1 << bit; 9723 } 9724 attr.size = sizeof(attr); 9725 attr.type = type; 9726 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */ 9727 attr.config2 = offset; /* kprobe_addr or probe_offset */ 9728 9729 /* pid filter is meaningful only for uprobes */ 9730 pfd = syscall(__NR_perf_event_open, &attr, 9731 pid < 0 ? -1 : pid /* pid */, 9732 pid == -1 ? 0 : -1 /* cpu */, 9733 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 9734 if (pfd < 0) { 9735 err = -errno; 9736 pr_warn("%s perf_event_open() failed: %s\n", 9737 uprobe ? "uprobe" : "kprobe", 9738 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 9739 return err; 9740 } 9741 return pfd; 9742 } 9743 9744 struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog, 9745 bool retprobe, 9746 const char *func_name) 9747 { 9748 char errmsg[STRERR_BUFSIZE]; 9749 struct bpf_link *link; 9750 int pfd, err; 9751 9752 pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name, 9753 0 /* offset */, -1 /* pid */); 9754 if (pfd < 0) { 9755 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n", 9756 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, 9757 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 9758 return ERR_PTR(pfd); 9759 } 9760 link = bpf_program__attach_perf_event(prog, pfd); 9761 if (IS_ERR(link)) { 9762 close(pfd); 9763 err = PTR_ERR(link); 9764 pr_warn("prog '%s': failed to attach to %s '%s': %s\n", 9765 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, 9766 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 9767 return link; 9768 } 9769 return link; 9770 } 9771 9772 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, 9773 struct bpf_program *prog) 9774 { 9775 const char *func_name; 9776 bool retprobe; 9777 9778 func_name = prog->sec_name + sec->len; 9779 retprobe = strcmp(sec->sec, "kretprobe/") == 0; 9780 9781 return bpf_program__attach_kprobe(prog, retprobe, func_name); 9782 } 9783 9784 struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog, 9785 bool retprobe, pid_t pid, 9786 const char *binary_path, 9787 size_t func_offset) 9788 { 9789 char errmsg[STRERR_BUFSIZE]; 9790 struct bpf_link *link; 9791 int pfd, err; 9792 9793 pfd = perf_event_open_probe(true /* uprobe */, retprobe, 9794 binary_path, func_offset, pid); 9795 if (pfd < 0) { 9796 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n", 9797 prog->name, retprobe ? "uretprobe" : "uprobe", 9798 binary_path, func_offset, 9799 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 9800 return ERR_PTR(pfd); 9801 } 9802 link = bpf_program__attach_perf_event(prog, pfd); 9803 if (IS_ERR(link)) { 9804 close(pfd); 9805 err = PTR_ERR(link); 9806 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n", 9807 prog->name, retprobe ? "uretprobe" : "uprobe", 9808 binary_path, func_offset, 9809 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 9810 return link; 9811 } 9812 return link; 9813 } 9814 9815 static int determine_tracepoint_id(const char *tp_category, 9816 const char *tp_name) 9817 { 9818 char file[PATH_MAX]; 9819 int ret; 9820 9821 ret = snprintf(file, sizeof(file), 9822 "/sys/kernel/debug/tracing/events/%s/%s/id", 9823 tp_category, tp_name); 9824 if (ret < 0) 9825 return -errno; 9826 if (ret >= sizeof(file)) { 9827 pr_debug("tracepoint %s/%s path is too long\n", 9828 tp_category, tp_name); 9829 return -E2BIG; 9830 } 9831 return parse_uint_from_file(file, "%d\n"); 9832 } 9833 9834 static int perf_event_open_tracepoint(const char *tp_category, 9835 const char *tp_name) 9836 { 9837 struct perf_event_attr attr = {}; 9838 char errmsg[STRERR_BUFSIZE]; 9839 int tp_id, pfd, err; 9840 9841 tp_id = determine_tracepoint_id(tp_category, tp_name); 9842 if (tp_id < 0) { 9843 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n", 9844 tp_category, tp_name, 9845 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg))); 9846 return tp_id; 9847 } 9848 9849 attr.type = PERF_TYPE_TRACEPOINT; 9850 attr.size = sizeof(attr); 9851 attr.config = tp_id; 9852 9853 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, 9854 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 9855 if (pfd < 0) { 9856 err = -errno; 9857 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n", 9858 tp_category, tp_name, 9859 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 9860 return err; 9861 } 9862 return pfd; 9863 } 9864 9865 struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog, 9866 const char *tp_category, 9867 const char *tp_name) 9868 { 9869 char errmsg[STRERR_BUFSIZE]; 9870 struct bpf_link *link; 9871 int pfd, err; 9872 9873 pfd = perf_event_open_tracepoint(tp_category, tp_name); 9874 if (pfd < 0) { 9875 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n", 9876 prog->name, tp_category, tp_name, 9877 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 9878 return ERR_PTR(pfd); 9879 } 9880 link = bpf_program__attach_perf_event(prog, pfd); 9881 if (IS_ERR(link)) { 9882 close(pfd); 9883 err = PTR_ERR(link); 9884 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n", 9885 prog->name, tp_category, tp_name, 9886 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 9887 return link; 9888 } 9889 return link; 9890 } 9891 9892 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec, 9893 struct bpf_program *prog) 9894 { 9895 char *sec_name, *tp_cat, *tp_name; 9896 struct bpf_link *link; 9897 9898 sec_name = strdup(prog->sec_name); 9899 if (!sec_name) 9900 return ERR_PTR(-ENOMEM); 9901 9902 /* extract "tp/<category>/<name>" */ 9903 tp_cat = sec_name + sec->len; 9904 tp_name = strchr(tp_cat, '/'); 9905 if (!tp_name) { 9906 link = ERR_PTR(-EINVAL); 9907 goto out; 9908 } 9909 *tp_name = '\0'; 9910 tp_name++; 9911 9912 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); 9913 out: 9914 free(sec_name); 9915 return link; 9916 } 9917 9918 struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog, 9919 const char *tp_name) 9920 { 9921 char errmsg[STRERR_BUFSIZE]; 9922 struct bpf_link *link; 9923 int prog_fd, pfd; 9924 9925 prog_fd = bpf_program__fd(prog); 9926 if (prog_fd < 0) { 9927 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 9928 return ERR_PTR(-EINVAL); 9929 } 9930 9931 link = calloc(1, sizeof(*link)); 9932 if (!link) 9933 return ERR_PTR(-ENOMEM); 9934 link->detach = &bpf_link__detach_fd; 9935 9936 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd); 9937 if (pfd < 0) { 9938 pfd = -errno; 9939 free(link); 9940 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n", 9941 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 9942 return ERR_PTR(pfd); 9943 } 9944 link->fd = pfd; 9945 return link; 9946 } 9947 9948 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec, 9949 struct bpf_program *prog) 9950 { 9951 const char *tp_name = prog->sec_name + sec->len; 9952 9953 return bpf_program__attach_raw_tracepoint(prog, tp_name); 9954 } 9955 9956 /* Common logic for all BPF program types that attach to a btf_id */ 9957 static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog) 9958 { 9959 char errmsg[STRERR_BUFSIZE]; 9960 struct bpf_link *link; 9961 int prog_fd, pfd; 9962 9963 prog_fd = bpf_program__fd(prog); 9964 if (prog_fd < 0) { 9965 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 9966 return ERR_PTR(-EINVAL); 9967 } 9968 9969 link = calloc(1, sizeof(*link)); 9970 if (!link) 9971 return ERR_PTR(-ENOMEM); 9972 link->detach = &bpf_link__detach_fd; 9973 9974 pfd = bpf_raw_tracepoint_open(NULL, prog_fd); 9975 if (pfd < 0) { 9976 pfd = -errno; 9977 free(link); 9978 pr_warn("prog '%s': failed to attach: %s\n", 9979 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 9980 return ERR_PTR(pfd); 9981 } 9982 link->fd = pfd; 9983 return (struct bpf_link *)link; 9984 } 9985 9986 struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog) 9987 { 9988 return bpf_program__attach_btf_id(prog); 9989 } 9990 9991 struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog) 9992 { 9993 return bpf_program__attach_btf_id(prog); 9994 } 9995 9996 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec, 9997 struct bpf_program *prog) 9998 { 9999 return bpf_program__attach_trace(prog); 10000 } 10001 10002 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec, 10003 struct bpf_program *prog) 10004 { 10005 return bpf_program__attach_lsm(prog); 10006 } 10007 10008 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec, 10009 struct bpf_program *prog) 10010 { 10011 return bpf_program__attach_iter(prog, NULL); 10012 } 10013 10014 static struct bpf_link * 10015 bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id, 10016 const char *target_name) 10017 { 10018 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts, 10019 .target_btf_id = btf_id); 10020 enum bpf_attach_type attach_type; 10021 char errmsg[STRERR_BUFSIZE]; 10022 struct bpf_link *link; 10023 int prog_fd, link_fd; 10024 10025 prog_fd = bpf_program__fd(prog); 10026 if (prog_fd < 0) { 10027 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 10028 return ERR_PTR(-EINVAL); 10029 } 10030 10031 link = calloc(1, sizeof(*link)); 10032 if (!link) 10033 return ERR_PTR(-ENOMEM); 10034 link->detach = &bpf_link__detach_fd; 10035 10036 attach_type = bpf_program__get_expected_attach_type(prog); 10037 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts); 10038 if (link_fd < 0) { 10039 link_fd = -errno; 10040 free(link); 10041 pr_warn("prog '%s': failed to attach to %s: %s\n", 10042 prog->name, target_name, 10043 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); 10044 return ERR_PTR(link_fd); 10045 } 10046 link->fd = link_fd; 10047 return link; 10048 } 10049 10050 struct bpf_link * 10051 bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd) 10052 { 10053 return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup"); 10054 } 10055 10056 struct bpf_link * 10057 bpf_program__attach_netns(struct bpf_program *prog, int netns_fd) 10058 { 10059 return bpf_program__attach_fd(prog, netns_fd, 0, "netns"); 10060 } 10061 10062 struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex) 10063 { 10064 /* target_fd/target_ifindex use the same field in LINK_CREATE */ 10065 return bpf_program__attach_fd(prog, ifindex, 0, "xdp"); 10066 } 10067 10068 struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog, 10069 int target_fd, 10070 const char *attach_func_name) 10071 { 10072 int btf_id; 10073 10074 if (!!target_fd != !!attach_func_name) { 10075 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n", 10076 prog->name); 10077 return ERR_PTR(-EINVAL); 10078 } 10079 10080 if (prog->type != BPF_PROG_TYPE_EXT) { 10081 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace", 10082 prog->name); 10083 return ERR_PTR(-EINVAL); 10084 } 10085 10086 if (target_fd) { 10087 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd); 10088 if (btf_id < 0) 10089 return ERR_PTR(btf_id); 10090 10091 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace"); 10092 } else { 10093 /* no target, so use raw_tracepoint_open for compatibility 10094 * with old kernels 10095 */ 10096 return bpf_program__attach_trace(prog); 10097 } 10098 } 10099 10100 struct bpf_link * 10101 bpf_program__attach_iter(struct bpf_program *prog, 10102 const struct bpf_iter_attach_opts *opts) 10103 { 10104 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); 10105 char errmsg[STRERR_BUFSIZE]; 10106 struct bpf_link *link; 10107 int prog_fd, link_fd; 10108 __u32 target_fd = 0; 10109 10110 if (!OPTS_VALID(opts, bpf_iter_attach_opts)) 10111 return ERR_PTR(-EINVAL); 10112 10113 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0); 10114 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0); 10115 10116 prog_fd = bpf_program__fd(prog); 10117 if (prog_fd < 0) { 10118 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 10119 return ERR_PTR(-EINVAL); 10120 } 10121 10122 link = calloc(1, sizeof(*link)); 10123 if (!link) 10124 return ERR_PTR(-ENOMEM); 10125 link->detach = &bpf_link__detach_fd; 10126 10127 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, 10128 &link_create_opts); 10129 if (link_fd < 0) { 10130 link_fd = -errno; 10131 free(link); 10132 pr_warn("prog '%s': failed to attach to iterator: %s\n", 10133 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); 10134 return ERR_PTR(link_fd); 10135 } 10136 link->fd = link_fd; 10137 return link; 10138 } 10139 10140 struct bpf_link *bpf_program__attach(struct bpf_program *prog) 10141 { 10142 const struct bpf_sec_def *sec_def; 10143 10144 sec_def = find_sec_def(prog->sec_name); 10145 if (!sec_def || !sec_def->attach_fn) 10146 return ERR_PTR(-ESRCH); 10147 10148 return sec_def->attach_fn(sec_def, prog); 10149 } 10150 10151 static int bpf_link__detach_struct_ops(struct bpf_link *link) 10152 { 10153 __u32 zero = 0; 10154 10155 if (bpf_map_delete_elem(link->fd, &zero)) 10156 return -errno; 10157 10158 return 0; 10159 } 10160 10161 struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map) 10162 { 10163 struct bpf_struct_ops *st_ops; 10164 struct bpf_link *link; 10165 __u32 i, zero = 0; 10166 int err; 10167 10168 if (!bpf_map__is_struct_ops(map) || map->fd == -1) 10169 return ERR_PTR(-EINVAL); 10170 10171 link = calloc(1, sizeof(*link)); 10172 if (!link) 10173 return ERR_PTR(-EINVAL); 10174 10175 st_ops = map->st_ops; 10176 for (i = 0; i < btf_vlen(st_ops->type); i++) { 10177 struct bpf_program *prog = st_ops->progs[i]; 10178 void *kern_data; 10179 int prog_fd; 10180 10181 if (!prog) 10182 continue; 10183 10184 prog_fd = bpf_program__fd(prog); 10185 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; 10186 *(unsigned long *)kern_data = prog_fd; 10187 } 10188 10189 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0); 10190 if (err) { 10191 err = -errno; 10192 free(link); 10193 return ERR_PTR(err); 10194 } 10195 10196 link->detach = bpf_link__detach_struct_ops; 10197 link->fd = map->fd; 10198 10199 return link; 10200 } 10201 10202 enum bpf_perf_event_ret 10203 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, 10204 void **copy_mem, size_t *copy_size, 10205 bpf_perf_event_print_t fn, void *private_data) 10206 { 10207 struct perf_event_mmap_page *header = mmap_mem; 10208 __u64 data_head = ring_buffer_read_head(header); 10209 __u64 data_tail = header->data_tail; 10210 void *base = ((__u8 *)header) + page_size; 10211 int ret = LIBBPF_PERF_EVENT_CONT; 10212 struct perf_event_header *ehdr; 10213 size_t ehdr_size; 10214 10215 while (data_head != data_tail) { 10216 ehdr = base + (data_tail & (mmap_size - 1)); 10217 ehdr_size = ehdr->size; 10218 10219 if (((void *)ehdr) + ehdr_size > base + mmap_size) { 10220 void *copy_start = ehdr; 10221 size_t len_first = base + mmap_size - copy_start; 10222 size_t len_secnd = ehdr_size - len_first; 10223 10224 if (*copy_size < ehdr_size) { 10225 free(*copy_mem); 10226 *copy_mem = malloc(ehdr_size); 10227 if (!*copy_mem) { 10228 *copy_size = 0; 10229 ret = LIBBPF_PERF_EVENT_ERROR; 10230 break; 10231 } 10232 *copy_size = ehdr_size; 10233 } 10234 10235 memcpy(*copy_mem, copy_start, len_first); 10236 memcpy(*copy_mem + len_first, base, len_secnd); 10237 ehdr = *copy_mem; 10238 } 10239 10240 ret = fn(ehdr, private_data); 10241 data_tail += ehdr_size; 10242 if (ret != LIBBPF_PERF_EVENT_CONT) 10243 break; 10244 } 10245 10246 ring_buffer_write_tail(header, data_tail); 10247 return ret; 10248 } 10249 10250 struct perf_buffer; 10251 10252 struct perf_buffer_params { 10253 struct perf_event_attr *attr; 10254 /* if event_cb is specified, it takes precendence */ 10255 perf_buffer_event_fn event_cb; 10256 /* sample_cb and lost_cb are higher-level common-case callbacks */ 10257 perf_buffer_sample_fn sample_cb; 10258 perf_buffer_lost_fn lost_cb; 10259 void *ctx; 10260 int cpu_cnt; 10261 int *cpus; 10262 int *map_keys; 10263 }; 10264 10265 struct perf_cpu_buf { 10266 struct perf_buffer *pb; 10267 void *base; /* mmap()'ed memory */ 10268 void *buf; /* for reconstructing segmented data */ 10269 size_t buf_size; 10270 int fd; 10271 int cpu; 10272 int map_key; 10273 }; 10274 10275 struct perf_buffer { 10276 perf_buffer_event_fn event_cb; 10277 perf_buffer_sample_fn sample_cb; 10278 perf_buffer_lost_fn lost_cb; 10279 void *ctx; /* passed into callbacks */ 10280 10281 size_t page_size; 10282 size_t mmap_size; 10283 struct perf_cpu_buf **cpu_bufs; 10284 struct epoll_event *events; 10285 int cpu_cnt; /* number of allocated CPU buffers */ 10286 int epoll_fd; /* perf event FD */ 10287 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */ 10288 }; 10289 10290 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb, 10291 struct perf_cpu_buf *cpu_buf) 10292 { 10293 if (!cpu_buf) 10294 return; 10295 if (cpu_buf->base && 10296 munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) 10297 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); 10298 if (cpu_buf->fd >= 0) { 10299 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); 10300 close(cpu_buf->fd); 10301 } 10302 free(cpu_buf->buf); 10303 free(cpu_buf); 10304 } 10305 10306 void perf_buffer__free(struct perf_buffer *pb) 10307 { 10308 int i; 10309 10310 if (IS_ERR_OR_NULL(pb)) 10311 return; 10312 if (pb->cpu_bufs) { 10313 for (i = 0; i < pb->cpu_cnt; i++) { 10314 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; 10315 10316 if (!cpu_buf) 10317 continue; 10318 10319 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); 10320 perf_buffer__free_cpu_buf(pb, cpu_buf); 10321 } 10322 free(pb->cpu_bufs); 10323 } 10324 if (pb->epoll_fd >= 0) 10325 close(pb->epoll_fd); 10326 free(pb->events); 10327 free(pb); 10328 } 10329 10330 static struct perf_cpu_buf * 10331 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, 10332 int cpu, int map_key) 10333 { 10334 struct perf_cpu_buf *cpu_buf; 10335 char msg[STRERR_BUFSIZE]; 10336 int err; 10337 10338 cpu_buf = calloc(1, sizeof(*cpu_buf)); 10339 if (!cpu_buf) 10340 return ERR_PTR(-ENOMEM); 10341 10342 cpu_buf->pb = pb; 10343 cpu_buf->cpu = cpu; 10344 cpu_buf->map_key = map_key; 10345 10346 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, 10347 -1, PERF_FLAG_FD_CLOEXEC); 10348 if (cpu_buf->fd < 0) { 10349 err = -errno; 10350 pr_warn("failed to open perf buffer event on cpu #%d: %s\n", 10351 cpu, libbpf_strerror_r(err, msg, sizeof(msg))); 10352 goto error; 10353 } 10354 10355 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, 10356 PROT_READ | PROT_WRITE, MAP_SHARED, 10357 cpu_buf->fd, 0); 10358 if (cpu_buf->base == MAP_FAILED) { 10359 cpu_buf->base = NULL; 10360 err = -errno; 10361 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n", 10362 cpu, libbpf_strerror_r(err, msg, sizeof(msg))); 10363 goto error; 10364 } 10365 10366 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { 10367 err = -errno; 10368 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n", 10369 cpu, libbpf_strerror_r(err, msg, sizeof(msg))); 10370 goto error; 10371 } 10372 10373 return cpu_buf; 10374 10375 error: 10376 perf_buffer__free_cpu_buf(pb, cpu_buf); 10377 return (struct perf_cpu_buf *)ERR_PTR(err); 10378 } 10379 10380 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, 10381 struct perf_buffer_params *p); 10382 10383 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, 10384 const struct perf_buffer_opts *opts) 10385 { 10386 struct perf_buffer_params p = {}; 10387 struct perf_event_attr attr = { 0, }; 10388 10389 attr.config = PERF_COUNT_SW_BPF_OUTPUT; 10390 attr.type = PERF_TYPE_SOFTWARE; 10391 attr.sample_type = PERF_SAMPLE_RAW; 10392 attr.sample_period = 1; 10393 attr.wakeup_events = 1; 10394 10395 p.attr = &attr; 10396 p.sample_cb = opts ? opts->sample_cb : NULL; 10397 p.lost_cb = opts ? opts->lost_cb : NULL; 10398 p.ctx = opts ? opts->ctx : NULL; 10399 10400 return __perf_buffer__new(map_fd, page_cnt, &p); 10401 } 10402 10403 struct perf_buffer * 10404 perf_buffer__new_raw(int map_fd, size_t page_cnt, 10405 const struct perf_buffer_raw_opts *opts) 10406 { 10407 struct perf_buffer_params p = {}; 10408 10409 p.attr = opts->attr; 10410 p.event_cb = opts->event_cb; 10411 p.ctx = opts->ctx; 10412 p.cpu_cnt = opts->cpu_cnt; 10413 p.cpus = opts->cpus; 10414 p.map_keys = opts->map_keys; 10415 10416 return __perf_buffer__new(map_fd, page_cnt, &p); 10417 } 10418 10419 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, 10420 struct perf_buffer_params *p) 10421 { 10422 const char *online_cpus_file = "/sys/devices/system/cpu/online"; 10423 struct bpf_map_info map; 10424 char msg[STRERR_BUFSIZE]; 10425 struct perf_buffer *pb; 10426 bool *online = NULL; 10427 __u32 map_info_len; 10428 int err, i, j, n; 10429 10430 if (page_cnt & (page_cnt - 1)) { 10431 pr_warn("page count should be power of two, but is %zu\n", 10432 page_cnt); 10433 return ERR_PTR(-EINVAL); 10434 } 10435 10436 /* best-effort sanity checks */ 10437 memset(&map, 0, sizeof(map)); 10438 map_info_len = sizeof(map); 10439 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); 10440 if (err) { 10441 err = -errno; 10442 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return 10443 * -EBADFD, -EFAULT, or -E2BIG on real error 10444 */ 10445 if (err != -EINVAL) { 10446 pr_warn("failed to get map info for map FD %d: %s\n", 10447 map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); 10448 return ERR_PTR(err); 10449 } 10450 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", 10451 map_fd); 10452 } else { 10453 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { 10454 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", 10455 map.name); 10456 return ERR_PTR(-EINVAL); 10457 } 10458 } 10459 10460 pb = calloc(1, sizeof(*pb)); 10461 if (!pb) 10462 return ERR_PTR(-ENOMEM); 10463 10464 pb->event_cb = p->event_cb; 10465 pb->sample_cb = p->sample_cb; 10466 pb->lost_cb = p->lost_cb; 10467 pb->ctx = p->ctx; 10468 10469 pb->page_size = getpagesize(); 10470 pb->mmap_size = pb->page_size * page_cnt; 10471 pb->map_fd = map_fd; 10472 10473 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); 10474 if (pb->epoll_fd < 0) { 10475 err = -errno; 10476 pr_warn("failed to create epoll instance: %s\n", 10477 libbpf_strerror_r(err, msg, sizeof(msg))); 10478 goto error; 10479 } 10480 10481 if (p->cpu_cnt > 0) { 10482 pb->cpu_cnt = p->cpu_cnt; 10483 } else { 10484 pb->cpu_cnt = libbpf_num_possible_cpus(); 10485 if (pb->cpu_cnt < 0) { 10486 err = pb->cpu_cnt; 10487 goto error; 10488 } 10489 if (map.max_entries && map.max_entries < pb->cpu_cnt) 10490 pb->cpu_cnt = map.max_entries; 10491 } 10492 10493 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); 10494 if (!pb->events) { 10495 err = -ENOMEM; 10496 pr_warn("failed to allocate events: out of memory\n"); 10497 goto error; 10498 } 10499 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); 10500 if (!pb->cpu_bufs) { 10501 err = -ENOMEM; 10502 pr_warn("failed to allocate buffers: out of memory\n"); 10503 goto error; 10504 } 10505 10506 err = parse_cpu_mask_file(online_cpus_file, &online, &n); 10507 if (err) { 10508 pr_warn("failed to get online CPU mask: %d\n", err); 10509 goto error; 10510 } 10511 10512 for (i = 0, j = 0; i < pb->cpu_cnt; i++) { 10513 struct perf_cpu_buf *cpu_buf; 10514 int cpu, map_key; 10515 10516 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; 10517 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; 10518 10519 /* in case user didn't explicitly requested particular CPUs to 10520 * be attached to, skip offline/not present CPUs 10521 */ 10522 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) 10523 continue; 10524 10525 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); 10526 if (IS_ERR(cpu_buf)) { 10527 err = PTR_ERR(cpu_buf); 10528 goto error; 10529 } 10530 10531 pb->cpu_bufs[j] = cpu_buf; 10532 10533 err = bpf_map_update_elem(pb->map_fd, &map_key, 10534 &cpu_buf->fd, 0); 10535 if (err) { 10536 err = -errno; 10537 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", 10538 cpu, map_key, cpu_buf->fd, 10539 libbpf_strerror_r(err, msg, sizeof(msg))); 10540 goto error; 10541 } 10542 10543 pb->events[j].events = EPOLLIN; 10544 pb->events[j].data.ptr = cpu_buf; 10545 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, 10546 &pb->events[j]) < 0) { 10547 err = -errno; 10548 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n", 10549 cpu, cpu_buf->fd, 10550 libbpf_strerror_r(err, msg, sizeof(msg))); 10551 goto error; 10552 } 10553 j++; 10554 } 10555 pb->cpu_cnt = j; 10556 free(online); 10557 10558 return pb; 10559 10560 error: 10561 free(online); 10562 if (pb) 10563 perf_buffer__free(pb); 10564 return ERR_PTR(err); 10565 } 10566 10567 struct perf_sample_raw { 10568 struct perf_event_header header; 10569 uint32_t size; 10570 char data[]; 10571 }; 10572 10573 struct perf_sample_lost { 10574 struct perf_event_header header; 10575 uint64_t id; 10576 uint64_t lost; 10577 uint64_t sample_id; 10578 }; 10579 10580 static enum bpf_perf_event_ret 10581 perf_buffer__process_record(struct perf_event_header *e, void *ctx) 10582 { 10583 struct perf_cpu_buf *cpu_buf = ctx; 10584 struct perf_buffer *pb = cpu_buf->pb; 10585 void *data = e; 10586 10587 /* user wants full control over parsing perf event */ 10588 if (pb->event_cb) 10589 return pb->event_cb(pb->ctx, cpu_buf->cpu, e); 10590 10591 switch (e->type) { 10592 case PERF_RECORD_SAMPLE: { 10593 struct perf_sample_raw *s = data; 10594 10595 if (pb->sample_cb) 10596 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); 10597 break; 10598 } 10599 case PERF_RECORD_LOST: { 10600 struct perf_sample_lost *s = data; 10601 10602 if (pb->lost_cb) 10603 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); 10604 break; 10605 } 10606 default: 10607 pr_warn("unknown perf sample type %d\n", e->type); 10608 return LIBBPF_PERF_EVENT_ERROR; 10609 } 10610 return LIBBPF_PERF_EVENT_CONT; 10611 } 10612 10613 static int perf_buffer__process_records(struct perf_buffer *pb, 10614 struct perf_cpu_buf *cpu_buf) 10615 { 10616 enum bpf_perf_event_ret ret; 10617 10618 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size, 10619 pb->page_size, &cpu_buf->buf, 10620 &cpu_buf->buf_size, 10621 perf_buffer__process_record, cpu_buf); 10622 if (ret != LIBBPF_PERF_EVENT_CONT) 10623 return ret; 10624 return 0; 10625 } 10626 10627 int perf_buffer__epoll_fd(const struct perf_buffer *pb) 10628 { 10629 return pb->epoll_fd; 10630 } 10631 10632 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) 10633 { 10634 int i, cnt, err; 10635 10636 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); 10637 for (i = 0; i < cnt; i++) { 10638 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; 10639 10640 err = perf_buffer__process_records(pb, cpu_buf); 10641 if (err) { 10642 pr_warn("error while processing records: %d\n", err); 10643 return err; 10644 } 10645 } 10646 return cnt < 0 ? -errno : cnt; 10647 } 10648 10649 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer 10650 * manager. 10651 */ 10652 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb) 10653 { 10654 return pb->cpu_cnt; 10655 } 10656 10657 /* 10658 * Return perf_event FD of a ring buffer in *buf_idx* slot of 10659 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using 10660 * select()/poll()/epoll() Linux syscalls. 10661 */ 10662 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx) 10663 { 10664 struct perf_cpu_buf *cpu_buf; 10665 10666 if (buf_idx >= pb->cpu_cnt) 10667 return -EINVAL; 10668 10669 cpu_buf = pb->cpu_bufs[buf_idx]; 10670 if (!cpu_buf) 10671 return -ENOENT; 10672 10673 return cpu_buf->fd; 10674 } 10675 10676 /* 10677 * Consume data from perf ring buffer corresponding to slot *buf_idx* in 10678 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to 10679 * consume, do nothing and return success. 10680 * Returns: 10681 * - 0 on success; 10682 * - <0 on failure. 10683 */ 10684 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx) 10685 { 10686 struct perf_cpu_buf *cpu_buf; 10687 10688 if (buf_idx >= pb->cpu_cnt) 10689 return -EINVAL; 10690 10691 cpu_buf = pb->cpu_bufs[buf_idx]; 10692 if (!cpu_buf) 10693 return -ENOENT; 10694 10695 return perf_buffer__process_records(pb, cpu_buf); 10696 } 10697 10698 int perf_buffer__consume(struct perf_buffer *pb) 10699 { 10700 int i, err; 10701 10702 for (i = 0; i < pb->cpu_cnt; i++) { 10703 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; 10704 10705 if (!cpu_buf) 10706 continue; 10707 10708 err = perf_buffer__process_records(pb, cpu_buf); 10709 if (err) { 10710 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err); 10711 return err; 10712 } 10713 } 10714 return 0; 10715 } 10716 10717 struct bpf_prog_info_array_desc { 10718 int array_offset; /* e.g. offset of jited_prog_insns */ 10719 int count_offset; /* e.g. offset of jited_prog_len */ 10720 int size_offset; /* > 0: offset of rec size, 10721 * < 0: fix size of -size_offset 10722 */ 10723 }; 10724 10725 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = { 10726 [BPF_PROG_INFO_JITED_INSNS] = { 10727 offsetof(struct bpf_prog_info, jited_prog_insns), 10728 offsetof(struct bpf_prog_info, jited_prog_len), 10729 -1, 10730 }, 10731 [BPF_PROG_INFO_XLATED_INSNS] = { 10732 offsetof(struct bpf_prog_info, xlated_prog_insns), 10733 offsetof(struct bpf_prog_info, xlated_prog_len), 10734 -1, 10735 }, 10736 [BPF_PROG_INFO_MAP_IDS] = { 10737 offsetof(struct bpf_prog_info, map_ids), 10738 offsetof(struct bpf_prog_info, nr_map_ids), 10739 -(int)sizeof(__u32), 10740 }, 10741 [BPF_PROG_INFO_JITED_KSYMS] = { 10742 offsetof(struct bpf_prog_info, jited_ksyms), 10743 offsetof(struct bpf_prog_info, nr_jited_ksyms), 10744 -(int)sizeof(__u64), 10745 }, 10746 [BPF_PROG_INFO_JITED_FUNC_LENS] = { 10747 offsetof(struct bpf_prog_info, jited_func_lens), 10748 offsetof(struct bpf_prog_info, nr_jited_func_lens), 10749 -(int)sizeof(__u32), 10750 }, 10751 [BPF_PROG_INFO_FUNC_INFO] = { 10752 offsetof(struct bpf_prog_info, func_info), 10753 offsetof(struct bpf_prog_info, nr_func_info), 10754 offsetof(struct bpf_prog_info, func_info_rec_size), 10755 }, 10756 [BPF_PROG_INFO_LINE_INFO] = { 10757 offsetof(struct bpf_prog_info, line_info), 10758 offsetof(struct bpf_prog_info, nr_line_info), 10759 offsetof(struct bpf_prog_info, line_info_rec_size), 10760 }, 10761 [BPF_PROG_INFO_JITED_LINE_INFO] = { 10762 offsetof(struct bpf_prog_info, jited_line_info), 10763 offsetof(struct bpf_prog_info, nr_jited_line_info), 10764 offsetof(struct bpf_prog_info, jited_line_info_rec_size), 10765 }, 10766 [BPF_PROG_INFO_PROG_TAGS] = { 10767 offsetof(struct bpf_prog_info, prog_tags), 10768 offsetof(struct bpf_prog_info, nr_prog_tags), 10769 -(int)sizeof(__u8) * BPF_TAG_SIZE, 10770 }, 10771 10772 }; 10773 10774 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, 10775 int offset) 10776 { 10777 __u32 *array = (__u32 *)info; 10778 10779 if (offset >= 0) 10780 return array[offset / sizeof(__u32)]; 10781 return -(int)offset; 10782 } 10783 10784 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, 10785 int offset) 10786 { 10787 __u64 *array = (__u64 *)info; 10788 10789 if (offset >= 0) 10790 return array[offset / sizeof(__u64)]; 10791 return -(int)offset; 10792 } 10793 10794 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset, 10795 __u32 val) 10796 { 10797 __u32 *array = (__u32 *)info; 10798 10799 if (offset >= 0) 10800 array[offset / sizeof(__u32)] = val; 10801 } 10802 10803 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset, 10804 __u64 val) 10805 { 10806 __u64 *array = (__u64 *)info; 10807 10808 if (offset >= 0) 10809 array[offset / sizeof(__u64)] = val; 10810 } 10811 10812 struct bpf_prog_info_linear * 10813 bpf_program__get_prog_info_linear(int fd, __u64 arrays) 10814 { 10815 struct bpf_prog_info_linear *info_linear; 10816 struct bpf_prog_info info = {}; 10817 __u32 info_len = sizeof(info); 10818 __u32 data_len = 0; 10819 int i, err; 10820 void *ptr; 10821 10822 if (arrays >> BPF_PROG_INFO_LAST_ARRAY) 10823 return ERR_PTR(-EINVAL); 10824 10825 /* step 1: get array dimensions */ 10826 err = bpf_obj_get_info_by_fd(fd, &info, &info_len); 10827 if (err) { 10828 pr_debug("can't get prog info: %s", strerror(errno)); 10829 return ERR_PTR(-EFAULT); 10830 } 10831 10832 /* step 2: calculate total size of all arrays */ 10833 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 10834 bool include_array = (arrays & (1UL << i)) > 0; 10835 struct bpf_prog_info_array_desc *desc; 10836 __u32 count, size; 10837 10838 desc = bpf_prog_info_array_desc + i; 10839 10840 /* kernel is too old to support this field */ 10841 if (info_len < desc->array_offset + sizeof(__u32) || 10842 info_len < desc->count_offset + sizeof(__u32) || 10843 (desc->size_offset > 0 && info_len < desc->size_offset)) 10844 include_array = false; 10845 10846 if (!include_array) { 10847 arrays &= ~(1UL << i); /* clear the bit */ 10848 continue; 10849 } 10850 10851 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 10852 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 10853 10854 data_len += count * size; 10855 } 10856 10857 /* step 3: allocate continuous memory */ 10858 data_len = roundup(data_len, sizeof(__u64)); 10859 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len); 10860 if (!info_linear) 10861 return ERR_PTR(-ENOMEM); 10862 10863 /* step 4: fill data to info_linear->info */ 10864 info_linear->arrays = arrays; 10865 memset(&info_linear->info, 0, sizeof(info)); 10866 ptr = info_linear->data; 10867 10868 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 10869 struct bpf_prog_info_array_desc *desc; 10870 __u32 count, size; 10871 10872 if ((arrays & (1UL << i)) == 0) 10873 continue; 10874 10875 desc = bpf_prog_info_array_desc + i; 10876 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 10877 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 10878 bpf_prog_info_set_offset_u32(&info_linear->info, 10879 desc->count_offset, count); 10880 bpf_prog_info_set_offset_u32(&info_linear->info, 10881 desc->size_offset, size); 10882 bpf_prog_info_set_offset_u64(&info_linear->info, 10883 desc->array_offset, 10884 ptr_to_u64(ptr)); 10885 ptr += count * size; 10886 } 10887 10888 /* step 5: call syscall again to get required arrays */ 10889 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); 10890 if (err) { 10891 pr_debug("can't get prog info: %s", strerror(errno)); 10892 free(info_linear); 10893 return ERR_PTR(-EFAULT); 10894 } 10895 10896 /* step 6: verify the data */ 10897 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 10898 struct bpf_prog_info_array_desc *desc; 10899 __u32 v1, v2; 10900 10901 if ((arrays & (1UL << i)) == 0) 10902 continue; 10903 10904 desc = bpf_prog_info_array_desc + i; 10905 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 10906 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 10907 desc->count_offset); 10908 if (v1 != v2) 10909 pr_warn("%s: mismatch in element count\n", __func__); 10910 10911 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 10912 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 10913 desc->size_offset); 10914 if (v1 != v2) 10915 pr_warn("%s: mismatch in rec size\n", __func__); 10916 } 10917 10918 /* step 7: update info_len and data_len */ 10919 info_linear->info_len = sizeof(struct bpf_prog_info); 10920 info_linear->data_len = data_len; 10921 10922 return info_linear; 10923 } 10924 10925 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear) 10926 { 10927 int i; 10928 10929 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 10930 struct bpf_prog_info_array_desc *desc; 10931 __u64 addr, offs; 10932 10933 if ((info_linear->arrays & (1UL << i)) == 0) 10934 continue; 10935 10936 desc = bpf_prog_info_array_desc + i; 10937 addr = bpf_prog_info_read_offset_u64(&info_linear->info, 10938 desc->array_offset); 10939 offs = addr - ptr_to_u64(info_linear->data); 10940 bpf_prog_info_set_offset_u64(&info_linear->info, 10941 desc->array_offset, offs); 10942 } 10943 } 10944 10945 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) 10946 { 10947 int i; 10948 10949 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 10950 struct bpf_prog_info_array_desc *desc; 10951 __u64 addr, offs; 10952 10953 if ((info_linear->arrays & (1UL << i)) == 0) 10954 continue; 10955 10956 desc = bpf_prog_info_array_desc + i; 10957 offs = bpf_prog_info_read_offset_u64(&info_linear->info, 10958 desc->array_offset); 10959 addr = offs + ptr_to_u64(info_linear->data); 10960 bpf_prog_info_set_offset_u64(&info_linear->info, 10961 desc->array_offset, addr); 10962 } 10963 } 10964 10965 int bpf_program__set_attach_target(struct bpf_program *prog, 10966 int attach_prog_fd, 10967 const char *attach_func_name) 10968 { 10969 int btf_obj_fd = 0, btf_id = 0, err; 10970 10971 if (!prog || attach_prog_fd < 0 || !attach_func_name) 10972 return -EINVAL; 10973 10974 if (prog->obj->loaded) 10975 return -EINVAL; 10976 10977 if (attach_prog_fd) { 10978 btf_id = libbpf_find_prog_btf_id(attach_func_name, 10979 attach_prog_fd); 10980 if (btf_id < 0) 10981 return btf_id; 10982 } else { 10983 /* load btf_vmlinux, if not yet */ 10984 err = bpf_object__load_vmlinux_btf(prog->obj, true); 10985 if (err) 10986 return err; 10987 err = find_kernel_btf_id(prog->obj, attach_func_name, 10988 prog->expected_attach_type, 10989 &btf_obj_fd, &btf_id); 10990 if (err) 10991 return err; 10992 } 10993 10994 prog->attach_btf_id = btf_id; 10995 prog->attach_btf_obj_fd = btf_obj_fd; 10996 prog->attach_prog_fd = attach_prog_fd; 10997 return 0; 10998 } 10999 11000 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz) 11001 { 11002 int err = 0, n, len, start, end = -1; 11003 bool *tmp; 11004 11005 *mask = NULL; 11006 *mask_sz = 0; 11007 11008 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ 11009 while (*s) { 11010 if (*s == ',' || *s == '\n') { 11011 s++; 11012 continue; 11013 } 11014 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); 11015 if (n <= 0 || n > 2) { 11016 pr_warn("Failed to get CPU range %s: %d\n", s, n); 11017 err = -EINVAL; 11018 goto cleanup; 11019 } else if (n == 1) { 11020 end = start; 11021 } 11022 if (start < 0 || start > end) { 11023 pr_warn("Invalid CPU range [%d,%d] in %s\n", 11024 start, end, s); 11025 err = -EINVAL; 11026 goto cleanup; 11027 } 11028 tmp = realloc(*mask, end + 1); 11029 if (!tmp) { 11030 err = -ENOMEM; 11031 goto cleanup; 11032 } 11033 *mask = tmp; 11034 memset(tmp + *mask_sz, 0, start - *mask_sz); 11035 memset(tmp + start, 1, end - start + 1); 11036 *mask_sz = end + 1; 11037 s += len; 11038 } 11039 if (!*mask_sz) { 11040 pr_warn("Empty CPU range\n"); 11041 return -EINVAL; 11042 } 11043 return 0; 11044 cleanup: 11045 free(*mask); 11046 *mask = NULL; 11047 return err; 11048 } 11049 11050 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) 11051 { 11052 int fd, err = 0, len; 11053 char buf[128]; 11054 11055 fd = open(fcpu, O_RDONLY); 11056 if (fd < 0) { 11057 err = -errno; 11058 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err); 11059 return err; 11060 } 11061 len = read(fd, buf, sizeof(buf)); 11062 close(fd); 11063 if (len <= 0) { 11064 err = len ? -errno : -EINVAL; 11065 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err); 11066 return err; 11067 } 11068 if (len >= sizeof(buf)) { 11069 pr_warn("CPU mask is too big in file %s\n", fcpu); 11070 return -E2BIG; 11071 } 11072 buf[len] = '\0'; 11073 11074 return parse_cpu_mask_str(buf, mask, mask_sz); 11075 } 11076 11077 int libbpf_num_possible_cpus(void) 11078 { 11079 static const char *fcpu = "/sys/devices/system/cpu/possible"; 11080 static int cpus; 11081 int err, n, i, tmp_cpus; 11082 bool *mask; 11083 11084 tmp_cpus = READ_ONCE(cpus); 11085 if (tmp_cpus > 0) 11086 return tmp_cpus; 11087 11088 err = parse_cpu_mask_file(fcpu, &mask, &n); 11089 if (err) 11090 return err; 11091 11092 tmp_cpus = 0; 11093 for (i = 0; i < n; i++) { 11094 if (mask[i]) 11095 tmp_cpus++; 11096 } 11097 free(mask); 11098 11099 WRITE_ONCE(cpus, tmp_cpus); 11100 return tmp_cpus; 11101 } 11102 11103 int bpf_object__open_skeleton(struct bpf_object_skeleton *s, 11104 const struct bpf_object_open_opts *opts) 11105 { 11106 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts, 11107 .object_name = s->name, 11108 ); 11109 struct bpf_object *obj; 11110 int i; 11111 11112 /* Attempt to preserve opts->object_name, unless overriden by user 11113 * explicitly. Overwriting object name for skeletons is discouraged, 11114 * as it breaks global data maps, because they contain object name 11115 * prefix as their own map name prefix. When skeleton is generated, 11116 * bpftool is making an assumption that this name will stay the same. 11117 */ 11118 if (opts) { 11119 memcpy(&skel_opts, opts, sizeof(*opts)); 11120 if (!opts->object_name) 11121 skel_opts.object_name = s->name; 11122 } 11123 11124 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); 11125 if (IS_ERR(obj)) { 11126 pr_warn("failed to initialize skeleton BPF object '%s': %ld\n", 11127 s->name, PTR_ERR(obj)); 11128 return PTR_ERR(obj); 11129 } 11130 11131 *s->obj = obj; 11132 11133 for (i = 0; i < s->map_cnt; i++) { 11134 struct bpf_map **map = s->maps[i].map; 11135 const char *name = s->maps[i].name; 11136 void **mmaped = s->maps[i].mmaped; 11137 11138 *map = bpf_object__find_map_by_name(obj, name); 11139 if (!*map) { 11140 pr_warn("failed to find skeleton map '%s'\n", name); 11141 return -ESRCH; 11142 } 11143 11144 /* externs shouldn't be pre-setup from user code */ 11145 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) 11146 *mmaped = (*map)->mmaped; 11147 } 11148 11149 for (i = 0; i < s->prog_cnt; i++) { 11150 struct bpf_program **prog = s->progs[i].prog; 11151 const char *name = s->progs[i].name; 11152 11153 *prog = bpf_object__find_program_by_name(obj, name); 11154 if (!*prog) { 11155 pr_warn("failed to find skeleton program '%s'\n", name); 11156 return -ESRCH; 11157 } 11158 } 11159 11160 return 0; 11161 } 11162 11163 int bpf_object__load_skeleton(struct bpf_object_skeleton *s) 11164 { 11165 int i, err; 11166 11167 err = bpf_object__load(*s->obj); 11168 if (err) { 11169 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); 11170 return err; 11171 } 11172 11173 for (i = 0; i < s->map_cnt; i++) { 11174 struct bpf_map *map = *s->maps[i].map; 11175 size_t mmap_sz = bpf_map_mmap_sz(map); 11176 int prot, map_fd = bpf_map__fd(map); 11177 void **mmaped = s->maps[i].mmaped; 11178 11179 if (!mmaped) 11180 continue; 11181 11182 if (!(map->def.map_flags & BPF_F_MMAPABLE)) { 11183 *mmaped = NULL; 11184 continue; 11185 } 11186 11187 if (map->def.map_flags & BPF_F_RDONLY_PROG) 11188 prot = PROT_READ; 11189 else 11190 prot = PROT_READ | PROT_WRITE; 11191 11192 /* Remap anonymous mmap()-ed "map initialization image" as 11193 * a BPF map-backed mmap()-ed memory, but preserving the same 11194 * memory address. This will cause kernel to change process' 11195 * page table to point to a different piece of kernel memory, 11196 * but from userspace point of view memory address (and its 11197 * contents, being identical at this point) will stay the 11198 * same. This mapping will be released by bpf_object__close() 11199 * as per normal clean up procedure, so we don't need to worry 11200 * about it from skeleton's clean up perspective. 11201 */ 11202 *mmaped = mmap(map->mmaped, mmap_sz, prot, 11203 MAP_SHARED | MAP_FIXED, map_fd, 0); 11204 if (*mmaped == MAP_FAILED) { 11205 err = -errno; 11206 *mmaped = NULL; 11207 pr_warn("failed to re-mmap() map '%s': %d\n", 11208 bpf_map__name(map), err); 11209 return err; 11210 } 11211 } 11212 11213 return 0; 11214 } 11215 11216 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) 11217 { 11218 int i; 11219 11220 for (i = 0; i < s->prog_cnt; i++) { 11221 struct bpf_program *prog = *s->progs[i].prog; 11222 struct bpf_link **link = s->progs[i].link; 11223 const struct bpf_sec_def *sec_def; 11224 11225 if (!prog->load) 11226 continue; 11227 11228 sec_def = find_sec_def(prog->sec_name); 11229 if (!sec_def || !sec_def->attach_fn) 11230 continue; 11231 11232 *link = sec_def->attach_fn(sec_def, prog); 11233 if (IS_ERR(*link)) { 11234 pr_warn("failed to auto-attach program '%s': %ld\n", 11235 bpf_program__name(prog), PTR_ERR(*link)); 11236 return PTR_ERR(*link); 11237 } 11238 } 11239 11240 return 0; 11241 } 11242 11243 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) 11244 { 11245 int i; 11246 11247 for (i = 0; i < s->prog_cnt; i++) { 11248 struct bpf_link **link = s->progs[i].link; 11249 11250 bpf_link__destroy(*link); 11251 *link = NULL; 11252 } 11253 } 11254 11255 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) 11256 { 11257 if (s->progs) 11258 bpf_object__detach_skeleton(s); 11259 if (s->obj) 11260 bpf_object__close(*s->obj); 11261 free(s->maps); 11262 free(s->progs); 11263 free(s); 11264 } 11265