1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * Common eBPF ELF object loading operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * Copyright (C) 2017 Nicira, Inc. 10 */ 11 12 #ifndef _GNU_SOURCE 13 #define _GNU_SOURCE 14 #endif 15 #include <stdlib.h> 16 #include <stdio.h> 17 #include <stdarg.h> 18 #include <libgen.h> 19 #include <inttypes.h> 20 #include <string.h> 21 #include <unistd.h> 22 #include <fcntl.h> 23 #include <errno.h> 24 #include <asm/unistd.h> 25 #include <linux/err.h> 26 #include <linux/kernel.h> 27 #include <linux/bpf.h> 28 #include <linux/btf.h> 29 #include <linux/filter.h> 30 #include <linux/list.h> 31 #include <linux/limits.h> 32 #include <linux/perf_event.h> 33 #include <linux/ring_buffer.h> 34 #include <sys/stat.h> 35 #include <sys/types.h> 36 #include <sys/vfs.h> 37 #include <tools/libc_compat.h> 38 #include <libelf.h> 39 #include <gelf.h> 40 41 #include "libbpf.h" 42 #include "bpf.h" 43 #include "btf.h" 44 #include "str_error.h" 45 #include "libbpf_util.h" 46 47 #ifndef EM_BPF 48 #define EM_BPF 247 49 #endif 50 51 #ifndef BPF_FS_MAGIC 52 #define BPF_FS_MAGIC 0xcafe4a11 53 #endif 54 55 #define __printf(a, b) __attribute__((format(printf, a, b))) 56 57 static int __base_pr(enum libbpf_print_level level, const char *format, 58 va_list args) 59 { 60 if (level == LIBBPF_DEBUG) 61 return 0; 62 63 return vfprintf(stderr, format, args); 64 } 65 66 static libbpf_print_fn_t __libbpf_pr = __base_pr; 67 68 void libbpf_set_print(libbpf_print_fn_t fn) 69 { 70 __libbpf_pr = fn; 71 } 72 73 __printf(2, 3) 74 void libbpf_print(enum libbpf_print_level level, const char *format, ...) 75 { 76 va_list args; 77 78 if (!__libbpf_pr) 79 return; 80 81 va_start(args, format); 82 __libbpf_pr(level, format, args); 83 va_end(args); 84 } 85 86 #define STRERR_BUFSIZE 128 87 88 #define CHECK_ERR(action, err, out) do { \ 89 err = action; \ 90 if (err) \ 91 goto out; \ 92 } while(0) 93 94 95 /* Copied from tools/perf/util/util.h */ 96 #ifndef zfree 97 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) 98 #endif 99 100 #ifndef zclose 101 # define zclose(fd) ({ \ 102 int ___err = 0; \ 103 if ((fd) >= 0) \ 104 ___err = close((fd)); \ 105 fd = -1; \ 106 ___err; }) 107 #endif 108 109 #ifdef HAVE_LIBELF_MMAP_SUPPORT 110 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP 111 #else 112 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ 113 #endif 114 115 static inline __u64 ptr_to_u64(const void *ptr) 116 { 117 return (__u64) (unsigned long) ptr; 118 } 119 120 struct bpf_capabilities { 121 /* v4.14: kernel support for program & map names. */ 122 __u32 name:1; 123 }; 124 125 /* 126 * bpf_prog should be a better name but it has been used in 127 * linux/filter.h. 128 */ 129 struct bpf_program { 130 /* Index in elf obj file, for relocation use. */ 131 int idx; 132 char *name; 133 int prog_ifindex; 134 char *section_name; 135 /* section_name with / replaced by _; makes recursive pinning 136 * in bpf_object__pin_programs easier 137 */ 138 char *pin_name; 139 struct bpf_insn *insns; 140 size_t insns_cnt, main_prog_cnt; 141 enum bpf_prog_type type; 142 143 struct reloc_desc { 144 enum { 145 RELO_LD64, 146 RELO_CALL, 147 } type; 148 int insn_idx; 149 union { 150 int map_idx; 151 int text_off; 152 }; 153 } *reloc_desc; 154 int nr_reloc; 155 156 struct { 157 int nr; 158 int *fds; 159 } instances; 160 bpf_program_prep_t preprocessor; 161 162 struct bpf_object *obj; 163 void *priv; 164 bpf_program_clear_priv_t clear_priv; 165 166 enum bpf_attach_type expected_attach_type; 167 int btf_fd; 168 void *func_info; 169 __u32 func_info_rec_size; 170 __u32 func_info_cnt; 171 172 struct bpf_capabilities *caps; 173 174 void *line_info; 175 __u32 line_info_rec_size; 176 __u32 line_info_cnt; 177 }; 178 179 struct bpf_map { 180 int fd; 181 char *name; 182 size_t offset; 183 int map_ifindex; 184 int inner_map_fd; 185 struct bpf_map_def def; 186 __u32 btf_key_type_id; 187 __u32 btf_value_type_id; 188 void *priv; 189 bpf_map_clear_priv_t clear_priv; 190 }; 191 192 static LIST_HEAD(bpf_objects_list); 193 194 struct bpf_object { 195 char license[64]; 196 __u32 kern_version; 197 198 struct bpf_program *programs; 199 size_t nr_programs; 200 struct bpf_map *maps; 201 size_t nr_maps; 202 203 bool loaded; 204 bool has_pseudo_calls; 205 206 /* 207 * Information when doing elf related work. Only valid if fd 208 * is valid. 209 */ 210 struct { 211 int fd; 212 void *obj_buf; 213 size_t obj_buf_sz; 214 Elf *elf; 215 GElf_Ehdr ehdr; 216 Elf_Data *symbols; 217 size_t strtabidx; 218 struct { 219 GElf_Shdr shdr; 220 Elf_Data *data; 221 } *reloc; 222 int nr_reloc; 223 int maps_shndx; 224 int text_shndx; 225 } efile; 226 /* 227 * All loaded bpf_object is linked in a list, which is 228 * hidden to caller. bpf_objects__<func> handlers deal with 229 * all objects. 230 */ 231 struct list_head list; 232 233 struct btf *btf; 234 struct btf_ext *btf_ext; 235 236 void *priv; 237 bpf_object_clear_priv_t clear_priv; 238 239 struct bpf_capabilities caps; 240 241 char path[]; 242 }; 243 #define obj_elf_valid(o) ((o)->efile.elf) 244 245 void bpf_program__unload(struct bpf_program *prog) 246 { 247 int i; 248 249 if (!prog) 250 return; 251 252 /* 253 * If the object is opened but the program was never loaded, 254 * it is possible that prog->instances.nr == -1. 255 */ 256 if (prog->instances.nr > 0) { 257 for (i = 0; i < prog->instances.nr; i++) 258 zclose(prog->instances.fds[i]); 259 } else if (prog->instances.nr != -1) { 260 pr_warning("Internal error: instances.nr is %d\n", 261 prog->instances.nr); 262 } 263 264 prog->instances.nr = -1; 265 zfree(&prog->instances.fds); 266 267 zclose(prog->btf_fd); 268 zfree(&prog->func_info); 269 zfree(&prog->line_info); 270 } 271 272 static void bpf_program__exit(struct bpf_program *prog) 273 { 274 if (!prog) 275 return; 276 277 if (prog->clear_priv) 278 prog->clear_priv(prog, prog->priv); 279 280 prog->priv = NULL; 281 prog->clear_priv = NULL; 282 283 bpf_program__unload(prog); 284 zfree(&prog->name); 285 zfree(&prog->section_name); 286 zfree(&prog->pin_name); 287 zfree(&prog->insns); 288 zfree(&prog->reloc_desc); 289 290 prog->nr_reloc = 0; 291 prog->insns_cnt = 0; 292 prog->idx = -1; 293 } 294 295 static char *__bpf_program__pin_name(struct bpf_program *prog) 296 { 297 char *name, *p; 298 299 name = p = strdup(prog->section_name); 300 while ((p = strchr(p, '/'))) 301 *p = '_'; 302 303 return name; 304 } 305 306 static int 307 bpf_program__init(void *data, size_t size, char *section_name, int idx, 308 struct bpf_program *prog) 309 { 310 if (size < sizeof(struct bpf_insn)) { 311 pr_warning("corrupted section '%s'\n", section_name); 312 return -EINVAL; 313 } 314 315 memset(prog, 0, sizeof(*prog)); 316 317 prog->section_name = strdup(section_name); 318 if (!prog->section_name) { 319 pr_warning("failed to alloc name for prog under section(%d) %s\n", 320 idx, section_name); 321 goto errout; 322 } 323 324 prog->pin_name = __bpf_program__pin_name(prog); 325 if (!prog->pin_name) { 326 pr_warning("failed to alloc pin name for prog under section(%d) %s\n", 327 idx, section_name); 328 goto errout; 329 } 330 331 prog->insns = malloc(size); 332 if (!prog->insns) { 333 pr_warning("failed to alloc insns for prog under section %s\n", 334 section_name); 335 goto errout; 336 } 337 prog->insns_cnt = size / sizeof(struct bpf_insn); 338 memcpy(prog->insns, data, 339 prog->insns_cnt * sizeof(struct bpf_insn)); 340 prog->idx = idx; 341 prog->instances.fds = NULL; 342 prog->instances.nr = -1; 343 prog->type = BPF_PROG_TYPE_UNSPEC; 344 prog->btf_fd = -1; 345 346 return 0; 347 errout: 348 bpf_program__exit(prog); 349 return -ENOMEM; 350 } 351 352 static int 353 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, 354 char *section_name, int idx) 355 { 356 struct bpf_program prog, *progs; 357 int nr_progs, err; 358 359 err = bpf_program__init(data, size, section_name, idx, &prog); 360 if (err) 361 return err; 362 363 prog.caps = &obj->caps; 364 progs = obj->programs; 365 nr_progs = obj->nr_programs; 366 367 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0])); 368 if (!progs) { 369 /* 370 * In this case the original obj->programs 371 * is still valid, so don't need special treat for 372 * bpf_close_object(). 373 */ 374 pr_warning("failed to alloc a new program under section '%s'\n", 375 section_name); 376 bpf_program__exit(&prog); 377 return -ENOMEM; 378 } 379 380 pr_debug("found program %s\n", prog.section_name); 381 obj->programs = progs; 382 obj->nr_programs = nr_progs + 1; 383 prog.obj = obj; 384 progs[nr_progs] = prog; 385 return 0; 386 } 387 388 static int 389 bpf_object__init_prog_names(struct bpf_object *obj) 390 { 391 Elf_Data *symbols = obj->efile.symbols; 392 struct bpf_program *prog; 393 size_t pi, si; 394 395 for (pi = 0; pi < obj->nr_programs; pi++) { 396 const char *name = NULL; 397 398 prog = &obj->programs[pi]; 399 400 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; 401 si++) { 402 GElf_Sym sym; 403 404 if (!gelf_getsym(symbols, si, &sym)) 405 continue; 406 if (sym.st_shndx != prog->idx) 407 continue; 408 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL) 409 continue; 410 411 name = elf_strptr(obj->efile.elf, 412 obj->efile.strtabidx, 413 sym.st_name); 414 if (!name) { 415 pr_warning("failed to get sym name string for prog %s\n", 416 prog->section_name); 417 return -LIBBPF_ERRNO__LIBELF; 418 } 419 } 420 421 if (!name && prog->idx == obj->efile.text_shndx) 422 name = ".text"; 423 424 if (!name) { 425 pr_warning("failed to find sym for prog %s\n", 426 prog->section_name); 427 return -EINVAL; 428 } 429 430 prog->name = strdup(name); 431 if (!prog->name) { 432 pr_warning("failed to allocate memory for prog sym %s\n", 433 name); 434 return -ENOMEM; 435 } 436 } 437 438 return 0; 439 } 440 441 static struct bpf_object *bpf_object__new(const char *path, 442 void *obj_buf, 443 size_t obj_buf_sz) 444 { 445 struct bpf_object *obj; 446 447 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); 448 if (!obj) { 449 pr_warning("alloc memory failed for %s\n", path); 450 return ERR_PTR(-ENOMEM); 451 } 452 453 strcpy(obj->path, path); 454 obj->efile.fd = -1; 455 456 /* 457 * Caller of this function should also calls 458 * bpf_object__elf_finish() after data collection to return 459 * obj_buf to user. If not, we should duplicate the buffer to 460 * avoid user freeing them before elf finish. 461 */ 462 obj->efile.obj_buf = obj_buf; 463 obj->efile.obj_buf_sz = obj_buf_sz; 464 obj->efile.maps_shndx = -1; 465 466 obj->loaded = false; 467 468 INIT_LIST_HEAD(&obj->list); 469 list_add(&obj->list, &bpf_objects_list); 470 return obj; 471 } 472 473 static void bpf_object__elf_finish(struct bpf_object *obj) 474 { 475 if (!obj_elf_valid(obj)) 476 return; 477 478 if (obj->efile.elf) { 479 elf_end(obj->efile.elf); 480 obj->efile.elf = NULL; 481 } 482 obj->efile.symbols = NULL; 483 484 zfree(&obj->efile.reloc); 485 obj->efile.nr_reloc = 0; 486 zclose(obj->efile.fd); 487 obj->efile.obj_buf = NULL; 488 obj->efile.obj_buf_sz = 0; 489 } 490 491 static int bpf_object__elf_init(struct bpf_object *obj) 492 { 493 int err = 0; 494 GElf_Ehdr *ep; 495 496 if (obj_elf_valid(obj)) { 497 pr_warning("elf init: internal error\n"); 498 return -LIBBPF_ERRNO__LIBELF; 499 } 500 501 if (obj->efile.obj_buf_sz > 0) { 502 /* 503 * obj_buf should have been validated by 504 * bpf_object__open_buffer(). 505 */ 506 obj->efile.elf = elf_memory(obj->efile.obj_buf, 507 obj->efile.obj_buf_sz); 508 } else { 509 obj->efile.fd = open(obj->path, O_RDONLY); 510 if (obj->efile.fd < 0) { 511 char errmsg[STRERR_BUFSIZE]; 512 char *cp = libbpf_strerror_r(errno, errmsg, 513 sizeof(errmsg)); 514 515 pr_warning("failed to open %s: %s\n", obj->path, cp); 516 return -errno; 517 } 518 519 obj->efile.elf = elf_begin(obj->efile.fd, 520 LIBBPF_ELF_C_READ_MMAP, 521 NULL); 522 } 523 524 if (!obj->efile.elf) { 525 pr_warning("failed to open %s as ELF file\n", 526 obj->path); 527 err = -LIBBPF_ERRNO__LIBELF; 528 goto errout; 529 } 530 531 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { 532 pr_warning("failed to get EHDR from %s\n", 533 obj->path); 534 err = -LIBBPF_ERRNO__FORMAT; 535 goto errout; 536 } 537 ep = &obj->efile.ehdr; 538 539 /* Old LLVM set e_machine to EM_NONE */ 540 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) { 541 pr_warning("%s is not an eBPF object file\n", 542 obj->path); 543 err = -LIBBPF_ERRNO__FORMAT; 544 goto errout; 545 } 546 547 return 0; 548 errout: 549 bpf_object__elf_finish(obj); 550 return err; 551 } 552 553 static int 554 bpf_object__check_endianness(struct bpf_object *obj) 555 { 556 static unsigned int const endian = 1; 557 558 switch (obj->efile.ehdr.e_ident[EI_DATA]) { 559 case ELFDATA2LSB: 560 /* We are big endian, BPF obj is little endian. */ 561 if (*(unsigned char const *)&endian != 1) 562 goto mismatch; 563 break; 564 565 case ELFDATA2MSB: 566 /* We are little endian, BPF obj is big endian. */ 567 if (*(unsigned char const *)&endian != 0) 568 goto mismatch; 569 break; 570 default: 571 return -LIBBPF_ERRNO__ENDIAN; 572 } 573 574 return 0; 575 576 mismatch: 577 pr_warning("Error: endianness mismatch.\n"); 578 return -LIBBPF_ERRNO__ENDIAN; 579 } 580 581 static int 582 bpf_object__init_license(struct bpf_object *obj, 583 void *data, size_t size) 584 { 585 memcpy(obj->license, data, 586 min(size, sizeof(obj->license) - 1)); 587 pr_debug("license of %s is %s\n", obj->path, obj->license); 588 return 0; 589 } 590 591 static int 592 bpf_object__init_kversion(struct bpf_object *obj, 593 void *data, size_t size) 594 { 595 __u32 kver; 596 597 if (size != sizeof(kver)) { 598 pr_warning("invalid kver section in %s\n", obj->path); 599 return -LIBBPF_ERRNO__FORMAT; 600 } 601 memcpy(&kver, data, sizeof(kver)); 602 obj->kern_version = kver; 603 pr_debug("kernel version of %s is %x\n", obj->path, 604 obj->kern_version); 605 return 0; 606 } 607 608 static int compare_bpf_map(const void *_a, const void *_b) 609 { 610 const struct bpf_map *a = _a; 611 const struct bpf_map *b = _b; 612 613 return a->offset - b->offset; 614 } 615 616 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) 617 { 618 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 619 type == BPF_MAP_TYPE_HASH_OF_MAPS) 620 return true; 621 return false; 622 } 623 624 static int 625 bpf_object__init_maps(struct bpf_object *obj, int flags) 626 { 627 bool strict = !(flags & MAPS_RELAX_COMPAT); 628 int i, map_idx, map_def_sz, nr_maps = 0; 629 Elf_Scn *scn; 630 Elf_Data *data = NULL; 631 Elf_Data *symbols = obj->efile.symbols; 632 633 if (obj->efile.maps_shndx < 0) 634 return -EINVAL; 635 if (!symbols) 636 return -EINVAL; 637 638 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx); 639 if (scn) 640 data = elf_getdata(scn, NULL); 641 if (!scn || !data) { 642 pr_warning("failed to get Elf_Data from map section %d\n", 643 obj->efile.maps_shndx); 644 return -EINVAL; 645 } 646 647 /* 648 * Count number of maps. Each map has a name. 649 * Array of maps is not supported: only the first element is 650 * considered. 651 * 652 * TODO: Detect array of map and report error. 653 */ 654 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { 655 GElf_Sym sym; 656 657 if (!gelf_getsym(symbols, i, &sym)) 658 continue; 659 if (sym.st_shndx != obj->efile.maps_shndx) 660 continue; 661 nr_maps++; 662 } 663 664 /* Alloc obj->maps and fill nr_maps. */ 665 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path, 666 nr_maps, data->d_size); 667 668 if (!nr_maps) 669 return 0; 670 671 /* Assume equally sized map definitions */ 672 map_def_sz = data->d_size / nr_maps; 673 if (!data->d_size || (data->d_size % nr_maps) != 0) { 674 pr_warning("unable to determine map definition size " 675 "section %s, %d maps in %zd bytes\n", 676 obj->path, nr_maps, data->d_size); 677 return -EINVAL; 678 } 679 680 obj->maps = calloc(nr_maps, sizeof(obj->maps[0])); 681 if (!obj->maps) { 682 pr_warning("alloc maps for object failed\n"); 683 return -ENOMEM; 684 } 685 obj->nr_maps = nr_maps; 686 687 for (i = 0; i < nr_maps; i++) { 688 /* 689 * fill all fd with -1 so won't close incorrect 690 * fd (fd=0 is stdin) when failure (zclose won't close 691 * negative fd)). 692 */ 693 obj->maps[i].fd = -1; 694 obj->maps[i].inner_map_fd = -1; 695 } 696 697 /* 698 * Fill obj->maps using data in "maps" section. 699 */ 700 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { 701 GElf_Sym sym; 702 const char *map_name; 703 struct bpf_map_def *def; 704 705 if (!gelf_getsym(symbols, i, &sym)) 706 continue; 707 if (sym.st_shndx != obj->efile.maps_shndx) 708 continue; 709 710 map_name = elf_strptr(obj->efile.elf, 711 obj->efile.strtabidx, 712 sym.st_name); 713 obj->maps[map_idx].offset = sym.st_value; 714 if (sym.st_value + map_def_sz > data->d_size) { 715 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n", 716 obj->path, map_name); 717 return -EINVAL; 718 } 719 720 obj->maps[map_idx].name = strdup(map_name); 721 if (!obj->maps[map_idx].name) { 722 pr_warning("failed to alloc map name\n"); 723 return -ENOMEM; 724 } 725 pr_debug("map %d is \"%s\"\n", map_idx, 726 obj->maps[map_idx].name); 727 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); 728 /* 729 * If the definition of the map in the object file fits in 730 * bpf_map_def, copy it. Any extra fields in our version 731 * of bpf_map_def will default to zero as a result of the 732 * calloc above. 733 */ 734 if (map_def_sz <= sizeof(struct bpf_map_def)) { 735 memcpy(&obj->maps[map_idx].def, def, map_def_sz); 736 } else { 737 /* 738 * Here the map structure being read is bigger than what 739 * we expect, truncate if the excess bits are all zero. 740 * If they are not zero, reject this map as 741 * incompatible. 742 */ 743 char *b; 744 for (b = ((char *)def) + sizeof(struct bpf_map_def); 745 b < ((char *)def) + map_def_sz; b++) { 746 if (*b != 0) { 747 pr_warning("maps section in %s: \"%s\" " 748 "has unrecognized, non-zero " 749 "options\n", 750 obj->path, map_name); 751 if (strict) 752 return -EINVAL; 753 } 754 } 755 memcpy(&obj->maps[map_idx].def, def, 756 sizeof(struct bpf_map_def)); 757 } 758 map_idx++; 759 } 760 761 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map); 762 return 0; 763 } 764 765 static bool section_have_execinstr(struct bpf_object *obj, int idx) 766 { 767 Elf_Scn *scn; 768 GElf_Shdr sh; 769 770 scn = elf_getscn(obj->efile.elf, idx); 771 if (!scn) 772 return false; 773 774 if (gelf_getshdr(scn, &sh) != &sh) 775 return false; 776 777 if (sh.sh_flags & SHF_EXECINSTR) 778 return true; 779 780 return false; 781 } 782 783 static int bpf_object__elf_collect(struct bpf_object *obj, int flags) 784 { 785 Elf *elf = obj->efile.elf; 786 GElf_Ehdr *ep = &obj->efile.ehdr; 787 Elf_Data *btf_ext_data = NULL; 788 Elf_Scn *scn = NULL; 789 int idx = 0, err = 0; 790 791 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 792 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { 793 pr_warning("failed to get e_shstrndx from %s\n", 794 obj->path); 795 return -LIBBPF_ERRNO__FORMAT; 796 } 797 798 while ((scn = elf_nextscn(elf, scn)) != NULL) { 799 char *name; 800 GElf_Shdr sh; 801 Elf_Data *data; 802 803 idx++; 804 if (gelf_getshdr(scn, &sh) != &sh) { 805 pr_warning("failed to get section(%d) header from %s\n", 806 idx, obj->path); 807 err = -LIBBPF_ERRNO__FORMAT; 808 goto out; 809 } 810 811 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); 812 if (!name) { 813 pr_warning("failed to get section(%d) name from %s\n", 814 idx, obj->path); 815 err = -LIBBPF_ERRNO__FORMAT; 816 goto out; 817 } 818 819 data = elf_getdata(scn, 0); 820 if (!data) { 821 pr_warning("failed to get section(%d) data from %s(%s)\n", 822 idx, name, obj->path); 823 err = -LIBBPF_ERRNO__FORMAT; 824 goto out; 825 } 826 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", 827 idx, name, (unsigned long)data->d_size, 828 (int)sh.sh_link, (unsigned long)sh.sh_flags, 829 (int)sh.sh_type); 830 831 if (strcmp(name, "license") == 0) 832 err = bpf_object__init_license(obj, 833 data->d_buf, 834 data->d_size); 835 else if (strcmp(name, "version") == 0) 836 err = bpf_object__init_kversion(obj, 837 data->d_buf, 838 data->d_size); 839 else if (strcmp(name, "maps") == 0) 840 obj->efile.maps_shndx = idx; 841 else if (strcmp(name, BTF_ELF_SEC) == 0) { 842 obj->btf = btf__new(data->d_buf, data->d_size); 843 if (IS_ERR(obj->btf) || btf__load(obj->btf)) { 844 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 845 BTF_ELF_SEC, PTR_ERR(obj->btf)); 846 if (!IS_ERR(obj->btf)) 847 btf__free(obj->btf); 848 obj->btf = NULL; 849 } 850 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { 851 btf_ext_data = data; 852 } else if (sh.sh_type == SHT_SYMTAB) { 853 if (obj->efile.symbols) { 854 pr_warning("bpf: multiple SYMTAB in %s\n", 855 obj->path); 856 err = -LIBBPF_ERRNO__FORMAT; 857 } else { 858 obj->efile.symbols = data; 859 obj->efile.strtabidx = sh.sh_link; 860 } 861 } else if ((sh.sh_type == SHT_PROGBITS) && 862 (sh.sh_flags & SHF_EXECINSTR) && 863 (data->d_size > 0)) { 864 if (strcmp(name, ".text") == 0) 865 obj->efile.text_shndx = idx; 866 err = bpf_object__add_program(obj, data->d_buf, 867 data->d_size, name, idx); 868 if (err) { 869 char errmsg[STRERR_BUFSIZE]; 870 char *cp = libbpf_strerror_r(-err, errmsg, 871 sizeof(errmsg)); 872 873 pr_warning("failed to alloc program %s (%s): %s", 874 name, obj->path, cp); 875 } 876 } else if (sh.sh_type == SHT_REL) { 877 void *reloc = obj->efile.reloc; 878 int nr_reloc = obj->efile.nr_reloc + 1; 879 int sec = sh.sh_info; /* points to other section */ 880 881 /* Only do relo for section with exec instructions */ 882 if (!section_have_execinstr(obj, sec)) { 883 pr_debug("skip relo %s(%d) for section(%d)\n", 884 name, idx, sec); 885 continue; 886 } 887 888 reloc = reallocarray(reloc, nr_reloc, 889 sizeof(*obj->efile.reloc)); 890 if (!reloc) { 891 pr_warning("realloc failed\n"); 892 err = -ENOMEM; 893 } else { 894 int n = nr_reloc - 1; 895 896 obj->efile.reloc = reloc; 897 obj->efile.nr_reloc = nr_reloc; 898 899 obj->efile.reloc[n].shdr = sh; 900 obj->efile.reloc[n].data = data; 901 } 902 } else { 903 pr_debug("skip section(%d) %s\n", idx, name); 904 } 905 if (err) 906 goto out; 907 } 908 909 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { 910 pr_warning("Corrupted ELF file: index of strtab invalid\n"); 911 return LIBBPF_ERRNO__FORMAT; 912 } 913 if (btf_ext_data) { 914 if (!obj->btf) { 915 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", 916 BTF_EXT_ELF_SEC, BTF_ELF_SEC); 917 } else { 918 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, 919 btf_ext_data->d_size); 920 if (IS_ERR(obj->btf_ext)) { 921 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 922 BTF_EXT_ELF_SEC, 923 PTR_ERR(obj->btf_ext)); 924 obj->btf_ext = NULL; 925 } 926 } 927 } 928 if (obj->efile.maps_shndx >= 0) { 929 err = bpf_object__init_maps(obj, flags); 930 if (err) 931 goto out; 932 } 933 err = bpf_object__init_prog_names(obj); 934 out: 935 return err; 936 } 937 938 static struct bpf_program * 939 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) 940 { 941 struct bpf_program *prog; 942 size_t i; 943 944 for (i = 0; i < obj->nr_programs; i++) { 945 prog = &obj->programs[i]; 946 if (prog->idx == idx) 947 return prog; 948 } 949 return NULL; 950 } 951 952 struct bpf_program * 953 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title) 954 { 955 struct bpf_program *pos; 956 957 bpf_object__for_each_program(pos, obj) { 958 if (pos->section_name && !strcmp(pos->section_name, title)) 959 return pos; 960 } 961 return NULL; 962 } 963 964 static int 965 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, 966 Elf_Data *data, struct bpf_object *obj) 967 { 968 Elf_Data *symbols = obj->efile.symbols; 969 int text_shndx = obj->efile.text_shndx; 970 int maps_shndx = obj->efile.maps_shndx; 971 struct bpf_map *maps = obj->maps; 972 size_t nr_maps = obj->nr_maps; 973 int i, nrels; 974 975 pr_debug("collecting relocating info for: '%s'\n", 976 prog->section_name); 977 nrels = shdr->sh_size / shdr->sh_entsize; 978 979 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); 980 if (!prog->reloc_desc) { 981 pr_warning("failed to alloc memory in relocation\n"); 982 return -ENOMEM; 983 } 984 prog->nr_reloc = nrels; 985 986 for (i = 0; i < nrels; i++) { 987 GElf_Sym sym; 988 GElf_Rel rel; 989 unsigned int insn_idx; 990 struct bpf_insn *insns = prog->insns; 991 size_t map_idx; 992 993 if (!gelf_getrel(data, i, &rel)) { 994 pr_warning("relocation: failed to get %d reloc\n", i); 995 return -LIBBPF_ERRNO__FORMAT; 996 } 997 998 if (!gelf_getsym(symbols, 999 GELF_R_SYM(rel.r_info), 1000 &sym)) { 1001 pr_warning("relocation: symbol %"PRIx64" not found\n", 1002 GELF_R_SYM(rel.r_info)); 1003 return -LIBBPF_ERRNO__FORMAT; 1004 } 1005 pr_debug("relo for %lld value %lld name %d\n", 1006 (long long) (rel.r_info >> 32), 1007 (long long) sym.st_value, sym.st_name); 1008 1009 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) { 1010 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n", 1011 prog->section_name, sym.st_shndx); 1012 return -LIBBPF_ERRNO__RELOC; 1013 } 1014 1015 insn_idx = rel.r_offset / sizeof(struct bpf_insn); 1016 pr_debug("relocation: insn_idx=%u\n", insn_idx); 1017 1018 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) { 1019 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) { 1020 pr_warning("incorrect bpf_call opcode\n"); 1021 return -LIBBPF_ERRNO__RELOC; 1022 } 1023 prog->reloc_desc[i].type = RELO_CALL; 1024 prog->reloc_desc[i].insn_idx = insn_idx; 1025 prog->reloc_desc[i].text_off = sym.st_value; 1026 obj->has_pseudo_calls = true; 1027 continue; 1028 } 1029 1030 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { 1031 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", 1032 insn_idx, insns[insn_idx].code); 1033 return -LIBBPF_ERRNO__RELOC; 1034 } 1035 1036 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */ 1037 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 1038 if (maps[map_idx].offset == sym.st_value) { 1039 pr_debug("relocation: find map %zd (%s) for insn %u\n", 1040 map_idx, maps[map_idx].name, insn_idx); 1041 break; 1042 } 1043 } 1044 1045 if (map_idx >= nr_maps) { 1046 pr_warning("bpf relocation: map_idx %d large than %d\n", 1047 (int)map_idx, (int)nr_maps - 1); 1048 return -LIBBPF_ERRNO__RELOC; 1049 } 1050 1051 prog->reloc_desc[i].type = RELO_LD64; 1052 prog->reloc_desc[i].insn_idx = insn_idx; 1053 prog->reloc_desc[i].map_idx = map_idx; 1054 } 1055 return 0; 1056 } 1057 1058 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) 1059 { 1060 struct bpf_map_def *def = &map->def; 1061 __u32 key_type_id, value_type_id; 1062 int ret; 1063 1064 ret = btf__get_map_kv_tids(btf, map->name, def->key_size, 1065 def->value_size, &key_type_id, 1066 &value_type_id); 1067 if (ret) 1068 return ret; 1069 1070 map->btf_key_type_id = key_type_id; 1071 map->btf_value_type_id = value_type_id; 1072 1073 return 0; 1074 } 1075 1076 int bpf_map__reuse_fd(struct bpf_map *map, int fd) 1077 { 1078 struct bpf_map_info info = {}; 1079 __u32 len = sizeof(info); 1080 int new_fd, err; 1081 char *new_name; 1082 1083 err = bpf_obj_get_info_by_fd(fd, &info, &len); 1084 if (err) 1085 return err; 1086 1087 new_name = strdup(info.name); 1088 if (!new_name) 1089 return -errno; 1090 1091 new_fd = open("/", O_RDONLY | O_CLOEXEC); 1092 if (new_fd < 0) 1093 goto err_free_new_name; 1094 1095 new_fd = dup3(fd, new_fd, O_CLOEXEC); 1096 if (new_fd < 0) 1097 goto err_close_new_fd; 1098 1099 err = zclose(map->fd); 1100 if (err) 1101 goto err_close_new_fd; 1102 free(map->name); 1103 1104 map->fd = new_fd; 1105 map->name = new_name; 1106 map->def.type = info.type; 1107 map->def.key_size = info.key_size; 1108 map->def.value_size = info.value_size; 1109 map->def.max_entries = info.max_entries; 1110 map->def.map_flags = info.map_flags; 1111 map->btf_key_type_id = info.btf_key_type_id; 1112 map->btf_value_type_id = info.btf_value_type_id; 1113 1114 return 0; 1115 1116 err_close_new_fd: 1117 close(new_fd); 1118 err_free_new_name: 1119 free(new_name); 1120 return -errno; 1121 } 1122 1123 int bpf_map__resize(struct bpf_map *map, __u32 max_entries) 1124 { 1125 if (!map || !max_entries) 1126 return -EINVAL; 1127 1128 /* If map already created, its attributes can't be changed. */ 1129 if (map->fd >= 0) 1130 return -EBUSY; 1131 1132 map->def.max_entries = max_entries; 1133 1134 return 0; 1135 } 1136 1137 static int 1138 bpf_object__probe_name(struct bpf_object *obj) 1139 { 1140 struct bpf_load_program_attr attr; 1141 char *cp, errmsg[STRERR_BUFSIZE]; 1142 struct bpf_insn insns[] = { 1143 BPF_MOV64_IMM(BPF_REG_0, 0), 1144 BPF_EXIT_INSN(), 1145 }; 1146 int ret; 1147 1148 /* make sure basic loading works */ 1149 1150 memset(&attr, 0, sizeof(attr)); 1151 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 1152 attr.insns = insns; 1153 attr.insns_cnt = ARRAY_SIZE(insns); 1154 attr.license = "GPL"; 1155 1156 ret = bpf_load_program_xattr(&attr, NULL, 0); 1157 if (ret < 0) { 1158 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1159 pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n", 1160 __func__, cp, errno); 1161 return -errno; 1162 } 1163 close(ret); 1164 1165 /* now try the same program, but with the name */ 1166 1167 attr.name = "test"; 1168 ret = bpf_load_program_xattr(&attr, NULL, 0); 1169 if (ret >= 0) { 1170 obj->caps.name = 1; 1171 close(ret); 1172 } 1173 1174 return 0; 1175 } 1176 1177 static int 1178 bpf_object__probe_caps(struct bpf_object *obj) 1179 { 1180 return bpf_object__probe_name(obj); 1181 } 1182 1183 static int 1184 bpf_object__create_maps(struct bpf_object *obj) 1185 { 1186 struct bpf_create_map_attr create_attr = {}; 1187 unsigned int i; 1188 int err; 1189 1190 for (i = 0; i < obj->nr_maps; i++) { 1191 struct bpf_map *map = &obj->maps[i]; 1192 struct bpf_map_def *def = &map->def; 1193 char *cp, errmsg[STRERR_BUFSIZE]; 1194 int *pfd = &map->fd; 1195 1196 if (map->fd >= 0) { 1197 pr_debug("skip map create (preset) %s: fd=%d\n", 1198 map->name, map->fd); 1199 continue; 1200 } 1201 1202 if (obj->caps.name) 1203 create_attr.name = map->name; 1204 create_attr.map_ifindex = map->map_ifindex; 1205 create_attr.map_type = def->type; 1206 create_attr.map_flags = def->map_flags; 1207 create_attr.key_size = def->key_size; 1208 create_attr.value_size = def->value_size; 1209 create_attr.max_entries = def->max_entries; 1210 create_attr.btf_fd = 0; 1211 create_attr.btf_key_type_id = 0; 1212 create_attr.btf_value_type_id = 0; 1213 if (bpf_map_type__is_map_in_map(def->type) && 1214 map->inner_map_fd >= 0) 1215 create_attr.inner_map_fd = map->inner_map_fd; 1216 1217 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) { 1218 create_attr.btf_fd = btf__fd(obj->btf); 1219 create_attr.btf_key_type_id = map->btf_key_type_id; 1220 create_attr.btf_value_type_id = map->btf_value_type_id; 1221 } 1222 1223 *pfd = bpf_create_map_xattr(&create_attr); 1224 if (*pfd < 0 && create_attr.btf_key_type_id) { 1225 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1226 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", 1227 map->name, cp, errno); 1228 create_attr.btf_fd = 0; 1229 create_attr.btf_key_type_id = 0; 1230 create_attr.btf_value_type_id = 0; 1231 map->btf_key_type_id = 0; 1232 map->btf_value_type_id = 0; 1233 *pfd = bpf_create_map_xattr(&create_attr); 1234 } 1235 1236 if (*pfd < 0) { 1237 size_t j; 1238 1239 err = *pfd; 1240 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1241 pr_warning("failed to create map (name: '%s'): %s\n", 1242 map->name, cp); 1243 for (j = 0; j < i; j++) 1244 zclose(obj->maps[j].fd); 1245 return err; 1246 } 1247 pr_debug("create map %s: fd=%d\n", map->name, *pfd); 1248 } 1249 1250 return 0; 1251 } 1252 1253 static int 1254 check_btf_ext_reloc_err(struct bpf_program *prog, int err, 1255 void *btf_prog_info, const char *info_name) 1256 { 1257 if (err != -ENOENT) { 1258 pr_warning("Error in loading %s for sec %s.\n", 1259 info_name, prog->section_name); 1260 return err; 1261 } 1262 1263 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */ 1264 1265 if (btf_prog_info) { 1266 /* 1267 * Some info has already been found but has problem 1268 * in the last btf_ext reloc. Must have to error 1269 * out. 1270 */ 1271 pr_warning("Error in relocating %s for sec %s.\n", 1272 info_name, prog->section_name); 1273 return err; 1274 } 1275 1276 /* 1277 * Have problem loading the very first info. Ignore 1278 * the rest. 1279 */ 1280 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n", 1281 info_name, prog->section_name, info_name); 1282 return 0; 1283 } 1284 1285 static int 1286 bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj, 1287 const char *section_name, __u32 insn_offset) 1288 { 1289 int err; 1290 1291 if (!insn_offset || prog->func_info) { 1292 /* 1293 * !insn_offset => main program 1294 * 1295 * For sub prog, the main program's func_info has to 1296 * be loaded first (i.e. prog->func_info != NULL) 1297 */ 1298 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext, 1299 section_name, insn_offset, 1300 &prog->func_info, 1301 &prog->func_info_cnt); 1302 if (err) 1303 return check_btf_ext_reloc_err(prog, err, 1304 prog->func_info, 1305 "bpf_func_info"); 1306 1307 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext); 1308 } 1309 1310 if (!insn_offset || prog->line_info) { 1311 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext, 1312 section_name, insn_offset, 1313 &prog->line_info, 1314 &prog->line_info_cnt); 1315 if (err) 1316 return check_btf_ext_reloc_err(prog, err, 1317 prog->line_info, 1318 "bpf_line_info"); 1319 1320 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); 1321 } 1322 1323 if (!insn_offset) 1324 prog->btf_fd = btf__fd(obj->btf); 1325 1326 return 0; 1327 } 1328 1329 static int 1330 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, 1331 struct reloc_desc *relo) 1332 { 1333 struct bpf_insn *insn, *new_insn; 1334 struct bpf_program *text; 1335 size_t new_cnt; 1336 int err; 1337 1338 if (relo->type != RELO_CALL) 1339 return -LIBBPF_ERRNO__RELOC; 1340 1341 if (prog->idx == obj->efile.text_shndx) { 1342 pr_warning("relo in .text insn %d into off %d\n", 1343 relo->insn_idx, relo->text_off); 1344 return -LIBBPF_ERRNO__RELOC; 1345 } 1346 1347 if (prog->main_prog_cnt == 0) { 1348 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx); 1349 if (!text) { 1350 pr_warning("no .text section found yet relo into text exist\n"); 1351 return -LIBBPF_ERRNO__RELOC; 1352 } 1353 new_cnt = prog->insns_cnt + text->insns_cnt; 1354 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn)); 1355 if (!new_insn) { 1356 pr_warning("oom in prog realloc\n"); 1357 return -ENOMEM; 1358 } 1359 1360 if (obj->btf_ext) { 1361 err = bpf_program_reloc_btf_ext(prog, obj, 1362 text->section_name, 1363 prog->insns_cnt); 1364 if (err) 1365 return err; 1366 } 1367 1368 memcpy(new_insn + prog->insns_cnt, text->insns, 1369 text->insns_cnt * sizeof(*insn)); 1370 prog->insns = new_insn; 1371 prog->main_prog_cnt = prog->insns_cnt; 1372 prog->insns_cnt = new_cnt; 1373 pr_debug("added %zd insn from %s to prog %s\n", 1374 text->insns_cnt, text->section_name, 1375 prog->section_name); 1376 } 1377 insn = &prog->insns[relo->insn_idx]; 1378 insn->imm += prog->main_prog_cnt - relo->insn_idx; 1379 return 0; 1380 } 1381 1382 static int 1383 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) 1384 { 1385 int i, err; 1386 1387 if (!prog) 1388 return 0; 1389 1390 if (obj->btf_ext) { 1391 err = bpf_program_reloc_btf_ext(prog, obj, 1392 prog->section_name, 0); 1393 if (err) 1394 return err; 1395 } 1396 1397 if (!prog->reloc_desc) 1398 return 0; 1399 1400 for (i = 0; i < prog->nr_reloc; i++) { 1401 if (prog->reloc_desc[i].type == RELO_LD64) { 1402 struct bpf_insn *insns = prog->insns; 1403 int insn_idx, map_idx; 1404 1405 insn_idx = prog->reloc_desc[i].insn_idx; 1406 map_idx = prog->reloc_desc[i].map_idx; 1407 1408 if (insn_idx >= (int)prog->insns_cnt) { 1409 pr_warning("relocation out of range: '%s'\n", 1410 prog->section_name); 1411 return -LIBBPF_ERRNO__RELOC; 1412 } 1413 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; 1414 insns[insn_idx].imm = obj->maps[map_idx].fd; 1415 } else { 1416 err = bpf_program__reloc_text(prog, obj, 1417 &prog->reloc_desc[i]); 1418 if (err) 1419 return err; 1420 } 1421 } 1422 1423 zfree(&prog->reloc_desc); 1424 prog->nr_reloc = 0; 1425 return 0; 1426 } 1427 1428 1429 static int 1430 bpf_object__relocate(struct bpf_object *obj) 1431 { 1432 struct bpf_program *prog; 1433 size_t i; 1434 int err; 1435 1436 for (i = 0; i < obj->nr_programs; i++) { 1437 prog = &obj->programs[i]; 1438 1439 err = bpf_program__relocate(prog, obj); 1440 if (err) { 1441 pr_warning("failed to relocate '%s'\n", 1442 prog->section_name); 1443 return err; 1444 } 1445 } 1446 return 0; 1447 } 1448 1449 static int bpf_object__collect_reloc(struct bpf_object *obj) 1450 { 1451 int i, err; 1452 1453 if (!obj_elf_valid(obj)) { 1454 pr_warning("Internal error: elf object is closed\n"); 1455 return -LIBBPF_ERRNO__INTERNAL; 1456 } 1457 1458 for (i = 0; i < obj->efile.nr_reloc; i++) { 1459 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr; 1460 Elf_Data *data = obj->efile.reloc[i].data; 1461 int idx = shdr->sh_info; 1462 struct bpf_program *prog; 1463 1464 if (shdr->sh_type != SHT_REL) { 1465 pr_warning("internal error at %d\n", __LINE__); 1466 return -LIBBPF_ERRNO__INTERNAL; 1467 } 1468 1469 prog = bpf_object__find_prog_by_idx(obj, idx); 1470 if (!prog) { 1471 pr_warning("relocation failed: no section(%d)\n", idx); 1472 return -LIBBPF_ERRNO__RELOC; 1473 } 1474 1475 err = bpf_program__collect_reloc(prog, 1476 shdr, data, 1477 obj); 1478 if (err) 1479 return err; 1480 } 1481 return 0; 1482 } 1483 1484 static int 1485 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, 1486 char *license, __u32 kern_version, int *pfd) 1487 { 1488 struct bpf_load_program_attr load_attr; 1489 char *cp, errmsg[STRERR_BUFSIZE]; 1490 char *log_buf; 1491 int ret; 1492 1493 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); 1494 load_attr.prog_type = prog->type; 1495 load_attr.expected_attach_type = prog->expected_attach_type; 1496 if (prog->caps->name) 1497 load_attr.name = prog->name; 1498 load_attr.insns = insns; 1499 load_attr.insns_cnt = insns_cnt; 1500 load_attr.license = license; 1501 load_attr.kern_version = kern_version; 1502 load_attr.prog_ifindex = prog->prog_ifindex; 1503 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; 1504 load_attr.func_info = prog->func_info; 1505 load_attr.func_info_rec_size = prog->func_info_rec_size; 1506 load_attr.func_info_cnt = prog->func_info_cnt; 1507 load_attr.line_info = prog->line_info; 1508 load_attr.line_info_rec_size = prog->line_info_rec_size; 1509 load_attr.line_info_cnt = prog->line_info_cnt; 1510 if (!load_attr.insns || !load_attr.insns_cnt) 1511 return -EINVAL; 1512 1513 log_buf = malloc(BPF_LOG_BUF_SIZE); 1514 if (!log_buf) 1515 pr_warning("Alloc log buffer for bpf loader error, continue without log\n"); 1516 1517 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE); 1518 1519 if (ret >= 0) { 1520 *pfd = ret; 1521 ret = 0; 1522 goto out; 1523 } 1524 1525 ret = -LIBBPF_ERRNO__LOAD; 1526 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1527 pr_warning("load bpf program failed: %s\n", cp); 1528 1529 if (log_buf && log_buf[0] != '\0') { 1530 ret = -LIBBPF_ERRNO__VERIFY; 1531 pr_warning("-- BEGIN DUMP LOG ---\n"); 1532 pr_warning("\n%s\n", log_buf); 1533 pr_warning("-- END LOG --\n"); 1534 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) { 1535 pr_warning("Program too large (%zu insns), at most %d insns\n", 1536 load_attr.insns_cnt, BPF_MAXINSNS); 1537 ret = -LIBBPF_ERRNO__PROG2BIG; 1538 } else { 1539 /* Wrong program type? */ 1540 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { 1541 int fd; 1542 1543 load_attr.prog_type = BPF_PROG_TYPE_KPROBE; 1544 load_attr.expected_attach_type = 0; 1545 fd = bpf_load_program_xattr(&load_attr, NULL, 0); 1546 if (fd >= 0) { 1547 close(fd); 1548 ret = -LIBBPF_ERRNO__PROGTYPE; 1549 goto out; 1550 } 1551 } 1552 1553 if (log_buf) 1554 ret = -LIBBPF_ERRNO__KVER; 1555 } 1556 1557 out: 1558 free(log_buf); 1559 return ret; 1560 } 1561 1562 int 1563 bpf_program__load(struct bpf_program *prog, 1564 char *license, __u32 kern_version) 1565 { 1566 int err = 0, fd, i; 1567 1568 if (prog->instances.nr < 0 || !prog->instances.fds) { 1569 if (prog->preprocessor) { 1570 pr_warning("Internal error: can't load program '%s'\n", 1571 prog->section_name); 1572 return -LIBBPF_ERRNO__INTERNAL; 1573 } 1574 1575 prog->instances.fds = malloc(sizeof(int)); 1576 if (!prog->instances.fds) { 1577 pr_warning("Not enough memory for BPF fds\n"); 1578 return -ENOMEM; 1579 } 1580 prog->instances.nr = 1; 1581 prog->instances.fds[0] = -1; 1582 } 1583 1584 if (!prog->preprocessor) { 1585 if (prog->instances.nr != 1) { 1586 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n", 1587 prog->section_name, prog->instances.nr); 1588 } 1589 err = load_program(prog, prog->insns, prog->insns_cnt, 1590 license, kern_version, &fd); 1591 if (!err) 1592 prog->instances.fds[0] = fd; 1593 goto out; 1594 } 1595 1596 for (i = 0; i < prog->instances.nr; i++) { 1597 struct bpf_prog_prep_result result; 1598 bpf_program_prep_t preprocessor = prog->preprocessor; 1599 1600 memset(&result, 0, sizeof(result)); 1601 err = preprocessor(prog, i, prog->insns, 1602 prog->insns_cnt, &result); 1603 if (err) { 1604 pr_warning("Preprocessing the %dth instance of program '%s' failed\n", 1605 i, prog->section_name); 1606 goto out; 1607 } 1608 1609 if (!result.new_insn_ptr || !result.new_insn_cnt) { 1610 pr_debug("Skip loading the %dth instance of program '%s'\n", 1611 i, prog->section_name); 1612 prog->instances.fds[i] = -1; 1613 if (result.pfd) 1614 *result.pfd = -1; 1615 continue; 1616 } 1617 1618 err = load_program(prog, result.new_insn_ptr, 1619 result.new_insn_cnt, 1620 license, kern_version, &fd); 1621 1622 if (err) { 1623 pr_warning("Loading the %dth instance of program '%s' failed\n", 1624 i, prog->section_name); 1625 goto out; 1626 } 1627 1628 if (result.pfd) 1629 *result.pfd = fd; 1630 prog->instances.fds[i] = fd; 1631 } 1632 out: 1633 if (err) 1634 pr_warning("failed to load program '%s'\n", 1635 prog->section_name); 1636 zfree(&prog->insns); 1637 prog->insns_cnt = 0; 1638 return err; 1639 } 1640 1641 static bool bpf_program__is_function_storage(struct bpf_program *prog, 1642 struct bpf_object *obj) 1643 { 1644 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls; 1645 } 1646 1647 static int 1648 bpf_object__load_progs(struct bpf_object *obj) 1649 { 1650 size_t i; 1651 int err; 1652 1653 for (i = 0; i < obj->nr_programs; i++) { 1654 if (bpf_program__is_function_storage(&obj->programs[i], obj)) 1655 continue; 1656 err = bpf_program__load(&obj->programs[i], 1657 obj->license, 1658 obj->kern_version); 1659 if (err) 1660 return err; 1661 } 1662 return 0; 1663 } 1664 1665 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) 1666 { 1667 switch (type) { 1668 case BPF_PROG_TYPE_SOCKET_FILTER: 1669 case BPF_PROG_TYPE_SCHED_CLS: 1670 case BPF_PROG_TYPE_SCHED_ACT: 1671 case BPF_PROG_TYPE_XDP: 1672 case BPF_PROG_TYPE_CGROUP_SKB: 1673 case BPF_PROG_TYPE_CGROUP_SOCK: 1674 case BPF_PROG_TYPE_LWT_IN: 1675 case BPF_PROG_TYPE_LWT_OUT: 1676 case BPF_PROG_TYPE_LWT_XMIT: 1677 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 1678 case BPF_PROG_TYPE_SOCK_OPS: 1679 case BPF_PROG_TYPE_SK_SKB: 1680 case BPF_PROG_TYPE_CGROUP_DEVICE: 1681 case BPF_PROG_TYPE_SK_MSG: 1682 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1683 case BPF_PROG_TYPE_LIRC_MODE2: 1684 case BPF_PROG_TYPE_SK_REUSEPORT: 1685 case BPF_PROG_TYPE_FLOW_DISSECTOR: 1686 case BPF_PROG_TYPE_UNSPEC: 1687 case BPF_PROG_TYPE_TRACEPOINT: 1688 case BPF_PROG_TYPE_RAW_TRACEPOINT: 1689 case BPF_PROG_TYPE_PERF_EVENT: 1690 return false; 1691 case BPF_PROG_TYPE_KPROBE: 1692 default: 1693 return true; 1694 } 1695 } 1696 1697 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver) 1698 { 1699 if (needs_kver && obj->kern_version == 0) { 1700 pr_warning("%s doesn't provide kernel version\n", 1701 obj->path); 1702 return -LIBBPF_ERRNO__KVERSION; 1703 } 1704 return 0; 1705 } 1706 1707 static struct bpf_object * 1708 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz, 1709 bool needs_kver, int flags) 1710 { 1711 struct bpf_object *obj; 1712 int err; 1713 1714 if (elf_version(EV_CURRENT) == EV_NONE) { 1715 pr_warning("failed to init libelf for %s\n", path); 1716 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 1717 } 1718 1719 obj = bpf_object__new(path, obj_buf, obj_buf_sz); 1720 if (IS_ERR(obj)) 1721 return obj; 1722 1723 CHECK_ERR(bpf_object__elf_init(obj), err, out); 1724 CHECK_ERR(bpf_object__check_endianness(obj), err, out); 1725 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out); 1726 CHECK_ERR(bpf_object__collect_reloc(obj), err, out); 1727 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out); 1728 1729 bpf_object__elf_finish(obj); 1730 return obj; 1731 out: 1732 bpf_object__close(obj); 1733 return ERR_PTR(err); 1734 } 1735 1736 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr, 1737 int flags) 1738 { 1739 /* param validation */ 1740 if (!attr->file) 1741 return NULL; 1742 1743 pr_debug("loading %s\n", attr->file); 1744 1745 return __bpf_object__open(attr->file, NULL, 0, 1746 bpf_prog_type__needs_kver(attr->prog_type), 1747 flags); 1748 } 1749 1750 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) 1751 { 1752 return __bpf_object__open_xattr(attr, 0); 1753 } 1754 1755 struct bpf_object *bpf_object__open(const char *path) 1756 { 1757 struct bpf_object_open_attr attr = { 1758 .file = path, 1759 .prog_type = BPF_PROG_TYPE_UNSPEC, 1760 }; 1761 1762 return bpf_object__open_xattr(&attr); 1763 } 1764 1765 struct bpf_object *bpf_object__open_buffer(void *obj_buf, 1766 size_t obj_buf_sz, 1767 const char *name) 1768 { 1769 char tmp_name[64]; 1770 1771 /* param validation */ 1772 if (!obj_buf || obj_buf_sz <= 0) 1773 return NULL; 1774 1775 if (!name) { 1776 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", 1777 (unsigned long)obj_buf, 1778 (unsigned long)obj_buf_sz); 1779 tmp_name[sizeof(tmp_name) - 1] = '\0'; 1780 name = tmp_name; 1781 } 1782 pr_debug("loading object '%s' from buffer\n", 1783 name); 1784 1785 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true); 1786 } 1787 1788 int bpf_object__unload(struct bpf_object *obj) 1789 { 1790 size_t i; 1791 1792 if (!obj) 1793 return -EINVAL; 1794 1795 for (i = 0; i < obj->nr_maps; i++) 1796 zclose(obj->maps[i].fd); 1797 1798 for (i = 0; i < obj->nr_programs; i++) 1799 bpf_program__unload(&obj->programs[i]); 1800 1801 return 0; 1802 } 1803 1804 int bpf_object__load(struct bpf_object *obj) 1805 { 1806 int err; 1807 1808 if (!obj) 1809 return -EINVAL; 1810 1811 if (obj->loaded) { 1812 pr_warning("object should not be loaded twice\n"); 1813 return -EINVAL; 1814 } 1815 1816 obj->loaded = true; 1817 1818 CHECK_ERR(bpf_object__probe_caps(obj), err, out); 1819 CHECK_ERR(bpf_object__create_maps(obj), err, out); 1820 CHECK_ERR(bpf_object__relocate(obj), err, out); 1821 CHECK_ERR(bpf_object__load_progs(obj), err, out); 1822 1823 return 0; 1824 out: 1825 bpf_object__unload(obj); 1826 pr_warning("failed to load object '%s'\n", obj->path); 1827 return err; 1828 } 1829 1830 static int check_path(const char *path) 1831 { 1832 char *cp, errmsg[STRERR_BUFSIZE]; 1833 struct statfs st_fs; 1834 char *dname, *dir; 1835 int err = 0; 1836 1837 if (path == NULL) 1838 return -EINVAL; 1839 1840 dname = strdup(path); 1841 if (dname == NULL) 1842 return -ENOMEM; 1843 1844 dir = dirname(dname); 1845 if (statfs(dir, &st_fs)) { 1846 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1847 pr_warning("failed to statfs %s: %s\n", dir, cp); 1848 err = -errno; 1849 } 1850 free(dname); 1851 1852 if (!err && st_fs.f_type != BPF_FS_MAGIC) { 1853 pr_warning("specified path %s is not on BPF FS\n", path); 1854 err = -EINVAL; 1855 } 1856 1857 return err; 1858 } 1859 1860 int bpf_program__pin_instance(struct bpf_program *prog, const char *path, 1861 int instance) 1862 { 1863 char *cp, errmsg[STRERR_BUFSIZE]; 1864 int err; 1865 1866 err = check_path(path); 1867 if (err) 1868 return err; 1869 1870 if (prog == NULL) { 1871 pr_warning("invalid program pointer\n"); 1872 return -EINVAL; 1873 } 1874 1875 if (instance < 0 || instance >= prog->instances.nr) { 1876 pr_warning("invalid prog instance %d of prog %s (max %d)\n", 1877 instance, prog->section_name, prog->instances.nr); 1878 return -EINVAL; 1879 } 1880 1881 if (bpf_obj_pin(prog->instances.fds[instance], path)) { 1882 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1883 pr_warning("failed to pin program: %s\n", cp); 1884 return -errno; 1885 } 1886 pr_debug("pinned program '%s'\n", path); 1887 1888 return 0; 1889 } 1890 1891 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, 1892 int instance) 1893 { 1894 int err; 1895 1896 err = check_path(path); 1897 if (err) 1898 return err; 1899 1900 if (prog == NULL) { 1901 pr_warning("invalid program pointer\n"); 1902 return -EINVAL; 1903 } 1904 1905 if (instance < 0 || instance >= prog->instances.nr) { 1906 pr_warning("invalid prog instance %d of prog %s (max %d)\n", 1907 instance, prog->section_name, prog->instances.nr); 1908 return -EINVAL; 1909 } 1910 1911 err = unlink(path); 1912 if (err != 0) 1913 return -errno; 1914 pr_debug("unpinned program '%s'\n", path); 1915 1916 return 0; 1917 } 1918 1919 static int make_dir(const char *path) 1920 { 1921 char *cp, errmsg[STRERR_BUFSIZE]; 1922 int err = 0; 1923 1924 if (mkdir(path, 0700) && errno != EEXIST) 1925 err = -errno; 1926 1927 if (err) { 1928 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 1929 pr_warning("failed to mkdir %s: %s\n", path, cp); 1930 } 1931 return err; 1932 } 1933 1934 int bpf_program__pin(struct bpf_program *prog, const char *path) 1935 { 1936 int i, err; 1937 1938 err = check_path(path); 1939 if (err) 1940 return err; 1941 1942 if (prog == NULL) { 1943 pr_warning("invalid program pointer\n"); 1944 return -EINVAL; 1945 } 1946 1947 if (prog->instances.nr <= 0) { 1948 pr_warning("no instances of prog %s to pin\n", 1949 prog->section_name); 1950 return -EINVAL; 1951 } 1952 1953 if (prog->instances.nr == 1) { 1954 /* don't create subdirs when pinning single instance */ 1955 return bpf_program__pin_instance(prog, path, 0); 1956 } 1957 1958 err = make_dir(path); 1959 if (err) 1960 return err; 1961 1962 for (i = 0; i < prog->instances.nr; i++) { 1963 char buf[PATH_MAX]; 1964 int len; 1965 1966 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 1967 if (len < 0) { 1968 err = -EINVAL; 1969 goto err_unpin; 1970 } else if (len >= PATH_MAX) { 1971 err = -ENAMETOOLONG; 1972 goto err_unpin; 1973 } 1974 1975 err = bpf_program__pin_instance(prog, buf, i); 1976 if (err) 1977 goto err_unpin; 1978 } 1979 1980 return 0; 1981 1982 err_unpin: 1983 for (i = i - 1; i >= 0; i--) { 1984 char buf[PATH_MAX]; 1985 int len; 1986 1987 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 1988 if (len < 0) 1989 continue; 1990 else if (len >= PATH_MAX) 1991 continue; 1992 1993 bpf_program__unpin_instance(prog, buf, i); 1994 } 1995 1996 rmdir(path); 1997 1998 return err; 1999 } 2000 2001 int bpf_program__unpin(struct bpf_program *prog, const char *path) 2002 { 2003 int i, err; 2004 2005 err = check_path(path); 2006 if (err) 2007 return err; 2008 2009 if (prog == NULL) { 2010 pr_warning("invalid program pointer\n"); 2011 return -EINVAL; 2012 } 2013 2014 if (prog->instances.nr <= 0) { 2015 pr_warning("no instances of prog %s to pin\n", 2016 prog->section_name); 2017 return -EINVAL; 2018 } 2019 2020 if (prog->instances.nr == 1) { 2021 /* don't create subdirs when pinning single instance */ 2022 return bpf_program__unpin_instance(prog, path, 0); 2023 } 2024 2025 for (i = 0; i < prog->instances.nr; i++) { 2026 char buf[PATH_MAX]; 2027 int len; 2028 2029 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 2030 if (len < 0) 2031 return -EINVAL; 2032 else if (len >= PATH_MAX) 2033 return -ENAMETOOLONG; 2034 2035 err = bpf_program__unpin_instance(prog, buf, i); 2036 if (err) 2037 return err; 2038 } 2039 2040 err = rmdir(path); 2041 if (err) 2042 return -errno; 2043 2044 return 0; 2045 } 2046 2047 int bpf_map__pin(struct bpf_map *map, const char *path) 2048 { 2049 char *cp, errmsg[STRERR_BUFSIZE]; 2050 int err; 2051 2052 err = check_path(path); 2053 if (err) 2054 return err; 2055 2056 if (map == NULL) { 2057 pr_warning("invalid map pointer\n"); 2058 return -EINVAL; 2059 } 2060 2061 if (bpf_obj_pin(map->fd, path)) { 2062 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2063 pr_warning("failed to pin map: %s\n", cp); 2064 return -errno; 2065 } 2066 2067 pr_debug("pinned map '%s'\n", path); 2068 2069 return 0; 2070 } 2071 2072 int bpf_map__unpin(struct bpf_map *map, const char *path) 2073 { 2074 int err; 2075 2076 err = check_path(path); 2077 if (err) 2078 return err; 2079 2080 if (map == NULL) { 2081 pr_warning("invalid map pointer\n"); 2082 return -EINVAL; 2083 } 2084 2085 err = unlink(path); 2086 if (err != 0) 2087 return -errno; 2088 pr_debug("unpinned map '%s'\n", path); 2089 2090 return 0; 2091 } 2092 2093 int bpf_object__pin_maps(struct bpf_object *obj, const char *path) 2094 { 2095 struct bpf_map *map; 2096 int err; 2097 2098 if (!obj) 2099 return -ENOENT; 2100 2101 if (!obj->loaded) { 2102 pr_warning("object not yet loaded; load it first\n"); 2103 return -ENOENT; 2104 } 2105 2106 err = make_dir(path); 2107 if (err) 2108 return err; 2109 2110 bpf_object__for_each_map(map, obj) { 2111 char buf[PATH_MAX]; 2112 int len; 2113 2114 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2115 bpf_map__name(map)); 2116 if (len < 0) { 2117 err = -EINVAL; 2118 goto err_unpin_maps; 2119 } else if (len >= PATH_MAX) { 2120 err = -ENAMETOOLONG; 2121 goto err_unpin_maps; 2122 } 2123 2124 err = bpf_map__pin(map, buf); 2125 if (err) 2126 goto err_unpin_maps; 2127 } 2128 2129 return 0; 2130 2131 err_unpin_maps: 2132 while ((map = bpf_map__prev(map, obj))) { 2133 char buf[PATH_MAX]; 2134 int len; 2135 2136 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2137 bpf_map__name(map)); 2138 if (len < 0) 2139 continue; 2140 else if (len >= PATH_MAX) 2141 continue; 2142 2143 bpf_map__unpin(map, buf); 2144 } 2145 2146 return err; 2147 } 2148 2149 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) 2150 { 2151 struct bpf_map *map; 2152 int err; 2153 2154 if (!obj) 2155 return -ENOENT; 2156 2157 bpf_object__for_each_map(map, obj) { 2158 char buf[PATH_MAX]; 2159 int len; 2160 2161 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2162 bpf_map__name(map)); 2163 if (len < 0) 2164 return -EINVAL; 2165 else if (len >= PATH_MAX) 2166 return -ENAMETOOLONG; 2167 2168 err = bpf_map__unpin(map, buf); 2169 if (err) 2170 return err; 2171 } 2172 2173 return 0; 2174 } 2175 2176 int bpf_object__pin_programs(struct bpf_object *obj, const char *path) 2177 { 2178 struct bpf_program *prog; 2179 int err; 2180 2181 if (!obj) 2182 return -ENOENT; 2183 2184 if (!obj->loaded) { 2185 pr_warning("object not yet loaded; load it first\n"); 2186 return -ENOENT; 2187 } 2188 2189 err = make_dir(path); 2190 if (err) 2191 return err; 2192 2193 bpf_object__for_each_program(prog, obj) { 2194 char buf[PATH_MAX]; 2195 int len; 2196 2197 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2198 prog->pin_name); 2199 if (len < 0) { 2200 err = -EINVAL; 2201 goto err_unpin_programs; 2202 } else if (len >= PATH_MAX) { 2203 err = -ENAMETOOLONG; 2204 goto err_unpin_programs; 2205 } 2206 2207 err = bpf_program__pin(prog, buf); 2208 if (err) 2209 goto err_unpin_programs; 2210 } 2211 2212 return 0; 2213 2214 err_unpin_programs: 2215 while ((prog = bpf_program__prev(prog, obj))) { 2216 char buf[PATH_MAX]; 2217 int len; 2218 2219 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2220 prog->pin_name); 2221 if (len < 0) 2222 continue; 2223 else if (len >= PATH_MAX) 2224 continue; 2225 2226 bpf_program__unpin(prog, buf); 2227 } 2228 2229 return err; 2230 } 2231 2232 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) 2233 { 2234 struct bpf_program *prog; 2235 int err; 2236 2237 if (!obj) 2238 return -ENOENT; 2239 2240 bpf_object__for_each_program(prog, obj) { 2241 char buf[PATH_MAX]; 2242 int len; 2243 2244 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2245 prog->pin_name); 2246 if (len < 0) 2247 return -EINVAL; 2248 else if (len >= PATH_MAX) 2249 return -ENAMETOOLONG; 2250 2251 err = bpf_program__unpin(prog, buf); 2252 if (err) 2253 return err; 2254 } 2255 2256 return 0; 2257 } 2258 2259 int bpf_object__pin(struct bpf_object *obj, const char *path) 2260 { 2261 int err; 2262 2263 err = bpf_object__pin_maps(obj, path); 2264 if (err) 2265 return err; 2266 2267 err = bpf_object__pin_programs(obj, path); 2268 if (err) { 2269 bpf_object__unpin_maps(obj, path); 2270 return err; 2271 } 2272 2273 return 0; 2274 } 2275 2276 void bpf_object__close(struct bpf_object *obj) 2277 { 2278 size_t i; 2279 2280 if (!obj) 2281 return; 2282 2283 if (obj->clear_priv) 2284 obj->clear_priv(obj, obj->priv); 2285 2286 bpf_object__elf_finish(obj); 2287 bpf_object__unload(obj); 2288 btf__free(obj->btf); 2289 btf_ext__free(obj->btf_ext); 2290 2291 for (i = 0; i < obj->nr_maps; i++) { 2292 zfree(&obj->maps[i].name); 2293 if (obj->maps[i].clear_priv) 2294 obj->maps[i].clear_priv(&obj->maps[i], 2295 obj->maps[i].priv); 2296 obj->maps[i].priv = NULL; 2297 obj->maps[i].clear_priv = NULL; 2298 } 2299 zfree(&obj->maps); 2300 obj->nr_maps = 0; 2301 2302 if (obj->programs && obj->nr_programs) { 2303 for (i = 0; i < obj->nr_programs; i++) 2304 bpf_program__exit(&obj->programs[i]); 2305 } 2306 zfree(&obj->programs); 2307 2308 list_del(&obj->list); 2309 free(obj); 2310 } 2311 2312 struct bpf_object * 2313 bpf_object__next(struct bpf_object *prev) 2314 { 2315 struct bpf_object *next; 2316 2317 if (!prev) 2318 next = list_first_entry(&bpf_objects_list, 2319 struct bpf_object, 2320 list); 2321 else 2322 next = list_next_entry(prev, list); 2323 2324 /* Empty list is noticed here so don't need checking on entry. */ 2325 if (&next->list == &bpf_objects_list) 2326 return NULL; 2327 2328 return next; 2329 } 2330 2331 const char *bpf_object__name(struct bpf_object *obj) 2332 { 2333 return obj ? obj->path : ERR_PTR(-EINVAL); 2334 } 2335 2336 unsigned int bpf_object__kversion(struct bpf_object *obj) 2337 { 2338 return obj ? obj->kern_version : 0; 2339 } 2340 2341 struct btf *bpf_object__btf(struct bpf_object *obj) 2342 { 2343 return obj ? obj->btf : NULL; 2344 } 2345 2346 int bpf_object__btf_fd(const struct bpf_object *obj) 2347 { 2348 return obj->btf ? btf__fd(obj->btf) : -1; 2349 } 2350 2351 int bpf_object__set_priv(struct bpf_object *obj, void *priv, 2352 bpf_object_clear_priv_t clear_priv) 2353 { 2354 if (obj->priv && obj->clear_priv) 2355 obj->clear_priv(obj, obj->priv); 2356 2357 obj->priv = priv; 2358 obj->clear_priv = clear_priv; 2359 return 0; 2360 } 2361 2362 void *bpf_object__priv(struct bpf_object *obj) 2363 { 2364 return obj ? obj->priv : ERR_PTR(-EINVAL); 2365 } 2366 2367 static struct bpf_program * 2368 __bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward) 2369 { 2370 size_t nr_programs = obj->nr_programs; 2371 ssize_t idx; 2372 2373 if (!nr_programs) 2374 return NULL; 2375 2376 if (!p) 2377 /* Iter from the beginning */ 2378 return forward ? &obj->programs[0] : 2379 &obj->programs[nr_programs - 1]; 2380 2381 if (p->obj != obj) { 2382 pr_warning("error: program handler doesn't match object\n"); 2383 return NULL; 2384 } 2385 2386 idx = (p - obj->programs) + (forward ? 1 : -1); 2387 if (idx >= obj->nr_programs || idx < 0) 2388 return NULL; 2389 return &obj->programs[idx]; 2390 } 2391 2392 struct bpf_program * 2393 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) 2394 { 2395 struct bpf_program *prog = prev; 2396 2397 do { 2398 prog = __bpf_program__iter(prog, obj, true); 2399 } while (prog && bpf_program__is_function_storage(prog, obj)); 2400 2401 return prog; 2402 } 2403 2404 struct bpf_program * 2405 bpf_program__prev(struct bpf_program *next, struct bpf_object *obj) 2406 { 2407 struct bpf_program *prog = next; 2408 2409 do { 2410 prog = __bpf_program__iter(prog, obj, false); 2411 } while (prog && bpf_program__is_function_storage(prog, obj)); 2412 2413 return prog; 2414 } 2415 2416 int bpf_program__set_priv(struct bpf_program *prog, void *priv, 2417 bpf_program_clear_priv_t clear_priv) 2418 { 2419 if (prog->priv && prog->clear_priv) 2420 prog->clear_priv(prog, prog->priv); 2421 2422 prog->priv = priv; 2423 prog->clear_priv = clear_priv; 2424 return 0; 2425 } 2426 2427 void *bpf_program__priv(struct bpf_program *prog) 2428 { 2429 return prog ? prog->priv : ERR_PTR(-EINVAL); 2430 } 2431 2432 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) 2433 { 2434 prog->prog_ifindex = ifindex; 2435 } 2436 2437 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy) 2438 { 2439 const char *title; 2440 2441 title = prog->section_name; 2442 if (needs_copy) { 2443 title = strdup(title); 2444 if (!title) { 2445 pr_warning("failed to strdup program title\n"); 2446 return ERR_PTR(-ENOMEM); 2447 } 2448 } 2449 2450 return title; 2451 } 2452 2453 int bpf_program__fd(struct bpf_program *prog) 2454 { 2455 return bpf_program__nth_fd(prog, 0); 2456 } 2457 2458 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, 2459 bpf_program_prep_t prep) 2460 { 2461 int *instances_fds; 2462 2463 if (nr_instances <= 0 || !prep) 2464 return -EINVAL; 2465 2466 if (prog->instances.nr > 0 || prog->instances.fds) { 2467 pr_warning("Can't set pre-processor after loading\n"); 2468 return -EINVAL; 2469 } 2470 2471 instances_fds = malloc(sizeof(int) * nr_instances); 2472 if (!instances_fds) { 2473 pr_warning("alloc memory failed for fds\n"); 2474 return -ENOMEM; 2475 } 2476 2477 /* fill all fd with -1 */ 2478 memset(instances_fds, -1, sizeof(int) * nr_instances); 2479 2480 prog->instances.nr = nr_instances; 2481 prog->instances.fds = instances_fds; 2482 prog->preprocessor = prep; 2483 return 0; 2484 } 2485 2486 int bpf_program__nth_fd(struct bpf_program *prog, int n) 2487 { 2488 int fd; 2489 2490 if (!prog) 2491 return -EINVAL; 2492 2493 if (n >= prog->instances.nr || n < 0) { 2494 pr_warning("Can't get the %dth fd from program %s: only %d instances\n", 2495 n, prog->section_name, prog->instances.nr); 2496 return -EINVAL; 2497 } 2498 2499 fd = prog->instances.fds[n]; 2500 if (fd < 0) { 2501 pr_warning("%dth instance of program '%s' is invalid\n", 2502 n, prog->section_name); 2503 return -ENOENT; 2504 } 2505 2506 return fd; 2507 } 2508 2509 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) 2510 { 2511 prog->type = type; 2512 } 2513 2514 static bool bpf_program__is_type(struct bpf_program *prog, 2515 enum bpf_prog_type type) 2516 { 2517 return prog ? (prog->type == type) : false; 2518 } 2519 2520 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \ 2521 int bpf_program__set_##NAME(struct bpf_program *prog) \ 2522 { \ 2523 if (!prog) \ 2524 return -EINVAL; \ 2525 bpf_program__set_type(prog, TYPE); \ 2526 return 0; \ 2527 } \ 2528 \ 2529 bool bpf_program__is_##NAME(struct bpf_program *prog) \ 2530 { \ 2531 return bpf_program__is_type(prog, TYPE); \ 2532 } \ 2533 2534 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); 2535 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); 2536 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); 2537 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); 2538 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); 2539 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); 2540 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); 2541 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); 2542 2543 void bpf_program__set_expected_attach_type(struct bpf_program *prog, 2544 enum bpf_attach_type type) 2545 { 2546 prog->expected_attach_type = type; 2547 } 2548 2549 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \ 2550 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype } 2551 2552 /* Programs that can NOT be attached. */ 2553 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0) 2554 2555 /* Programs that can be attached. */ 2556 #define BPF_APROG_SEC(string, ptype, atype) \ 2557 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype) 2558 2559 /* Programs that must specify expected attach type at load time. */ 2560 #define BPF_EAPROG_SEC(string, ptype, eatype) \ 2561 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype) 2562 2563 /* Programs that can be attached but attach type can't be identified by section 2564 * name. Kept for backward compatibility. 2565 */ 2566 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype) 2567 2568 static const struct { 2569 const char *sec; 2570 size_t len; 2571 enum bpf_prog_type prog_type; 2572 enum bpf_attach_type expected_attach_type; 2573 int is_attachable; 2574 enum bpf_attach_type attach_type; 2575 } section_names[] = { 2576 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), 2577 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE), 2578 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE), 2579 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), 2580 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), 2581 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT), 2582 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), 2583 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), 2584 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), 2585 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), 2586 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), 2587 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), 2588 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), 2589 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB, 2590 BPF_CGROUP_INET_INGRESS), 2591 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, 2592 BPF_CGROUP_INET_EGRESS), 2593 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), 2594 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, 2595 BPF_CGROUP_INET_SOCK_CREATE), 2596 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, 2597 BPF_CGROUP_INET4_POST_BIND), 2598 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK, 2599 BPF_CGROUP_INET6_POST_BIND), 2600 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE, 2601 BPF_CGROUP_DEVICE), 2602 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS, 2603 BPF_CGROUP_SOCK_OPS), 2604 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB, 2605 BPF_SK_SKB_STREAM_PARSER), 2606 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB, 2607 BPF_SK_SKB_STREAM_VERDICT), 2608 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB), 2609 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG, 2610 BPF_SK_MSG_VERDICT), 2611 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2, 2612 BPF_LIRC_MODE2), 2613 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR, 2614 BPF_FLOW_DISSECTOR), 2615 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2616 BPF_CGROUP_INET4_BIND), 2617 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2618 BPF_CGROUP_INET6_BIND), 2619 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2620 BPF_CGROUP_INET4_CONNECT), 2621 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2622 BPF_CGROUP_INET6_CONNECT), 2623 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2624 BPF_CGROUP_UDP4_SENDMSG), 2625 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2626 BPF_CGROUP_UDP6_SENDMSG), 2627 }; 2628 2629 #undef BPF_PROG_SEC_IMPL 2630 #undef BPF_PROG_SEC 2631 #undef BPF_APROG_SEC 2632 #undef BPF_EAPROG_SEC 2633 #undef BPF_APROG_COMPAT 2634 2635 #define MAX_TYPE_NAME_SIZE 32 2636 2637 static char *libbpf_get_type_names(bool attach_type) 2638 { 2639 int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE; 2640 char *buf; 2641 2642 buf = malloc(len); 2643 if (!buf) 2644 return NULL; 2645 2646 buf[0] = '\0'; 2647 /* Forge string buf with all available names */ 2648 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 2649 if (attach_type && !section_names[i].is_attachable) 2650 continue; 2651 2652 if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) { 2653 free(buf); 2654 return NULL; 2655 } 2656 strcat(buf, " "); 2657 strcat(buf, section_names[i].sec); 2658 } 2659 2660 return buf; 2661 } 2662 2663 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, 2664 enum bpf_attach_type *expected_attach_type) 2665 { 2666 char *type_names; 2667 int i; 2668 2669 if (!name) 2670 return -EINVAL; 2671 2672 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 2673 if (strncmp(name, section_names[i].sec, section_names[i].len)) 2674 continue; 2675 *prog_type = section_names[i].prog_type; 2676 *expected_attach_type = section_names[i].expected_attach_type; 2677 return 0; 2678 } 2679 pr_warning("failed to guess program type based on ELF section name '%s'\n", name); 2680 type_names = libbpf_get_type_names(false); 2681 if (type_names != NULL) { 2682 pr_info("supported section(type) names are:%s\n", type_names); 2683 free(type_names); 2684 } 2685 2686 return -EINVAL; 2687 } 2688 2689 int libbpf_attach_type_by_name(const char *name, 2690 enum bpf_attach_type *attach_type) 2691 { 2692 char *type_names; 2693 int i; 2694 2695 if (!name) 2696 return -EINVAL; 2697 2698 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 2699 if (strncmp(name, section_names[i].sec, section_names[i].len)) 2700 continue; 2701 if (!section_names[i].is_attachable) 2702 return -EINVAL; 2703 *attach_type = section_names[i].attach_type; 2704 return 0; 2705 } 2706 pr_warning("failed to guess attach type based on ELF section name '%s'\n", name); 2707 type_names = libbpf_get_type_names(true); 2708 if (type_names != NULL) { 2709 pr_info("attachable section(type) names are:%s\n", type_names); 2710 free(type_names); 2711 } 2712 2713 return -EINVAL; 2714 } 2715 2716 static int 2717 bpf_program__identify_section(struct bpf_program *prog, 2718 enum bpf_prog_type *prog_type, 2719 enum bpf_attach_type *expected_attach_type) 2720 { 2721 return libbpf_prog_type_by_name(prog->section_name, prog_type, 2722 expected_attach_type); 2723 } 2724 2725 int bpf_map__fd(struct bpf_map *map) 2726 { 2727 return map ? map->fd : -EINVAL; 2728 } 2729 2730 const struct bpf_map_def *bpf_map__def(struct bpf_map *map) 2731 { 2732 return map ? &map->def : ERR_PTR(-EINVAL); 2733 } 2734 2735 const char *bpf_map__name(struct bpf_map *map) 2736 { 2737 return map ? map->name : NULL; 2738 } 2739 2740 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) 2741 { 2742 return map ? map->btf_key_type_id : 0; 2743 } 2744 2745 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) 2746 { 2747 return map ? map->btf_value_type_id : 0; 2748 } 2749 2750 int bpf_map__set_priv(struct bpf_map *map, void *priv, 2751 bpf_map_clear_priv_t clear_priv) 2752 { 2753 if (!map) 2754 return -EINVAL; 2755 2756 if (map->priv) { 2757 if (map->clear_priv) 2758 map->clear_priv(map, map->priv); 2759 } 2760 2761 map->priv = priv; 2762 map->clear_priv = clear_priv; 2763 return 0; 2764 } 2765 2766 void *bpf_map__priv(struct bpf_map *map) 2767 { 2768 return map ? map->priv : ERR_PTR(-EINVAL); 2769 } 2770 2771 bool bpf_map__is_offload_neutral(struct bpf_map *map) 2772 { 2773 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 2774 } 2775 2776 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) 2777 { 2778 map->map_ifindex = ifindex; 2779 } 2780 2781 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) 2782 { 2783 if (!bpf_map_type__is_map_in_map(map->def.type)) { 2784 pr_warning("error: unsupported map type\n"); 2785 return -EINVAL; 2786 } 2787 if (map->inner_map_fd != -1) { 2788 pr_warning("error: inner_map_fd already specified\n"); 2789 return -EINVAL; 2790 } 2791 map->inner_map_fd = fd; 2792 return 0; 2793 } 2794 2795 static struct bpf_map * 2796 __bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i) 2797 { 2798 ssize_t idx; 2799 struct bpf_map *s, *e; 2800 2801 if (!obj || !obj->maps) 2802 return NULL; 2803 2804 s = obj->maps; 2805 e = obj->maps + obj->nr_maps; 2806 2807 if ((m < s) || (m >= e)) { 2808 pr_warning("error in %s: map handler doesn't belong to object\n", 2809 __func__); 2810 return NULL; 2811 } 2812 2813 idx = (m - obj->maps) + i; 2814 if (idx >= obj->nr_maps || idx < 0) 2815 return NULL; 2816 return &obj->maps[idx]; 2817 } 2818 2819 struct bpf_map * 2820 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj) 2821 { 2822 if (prev == NULL) 2823 return obj->maps; 2824 2825 return __bpf_map__iter(prev, obj, 1); 2826 } 2827 2828 struct bpf_map * 2829 bpf_map__prev(struct bpf_map *next, struct bpf_object *obj) 2830 { 2831 if (next == NULL) { 2832 if (!obj->nr_maps) 2833 return NULL; 2834 return obj->maps + obj->nr_maps - 1; 2835 } 2836 2837 return __bpf_map__iter(next, obj, -1); 2838 } 2839 2840 struct bpf_map * 2841 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name) 2842 { 2843 struct bpf_map *pos; 2844 2845 bpf_object__for_each_map(pos, obj) { 2846 if (pos->name && !strcmp(pos->name, name)) 2847 return pos; 2848 } 2849 return NULL; 2850 } 2851 2852 int 2853 bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name) 2854 { 2855 return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); 2856 } 2857 2858 struct bpf_map * 2859 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) 2860 { 2861 int i; 2862 2863 for (i = 0; i < obj->nr_maps; i++) { 2864 if (obj->maps[i].offset == offset) 2865 return &obj->maps[i]; 2866 } 2867 return ERR_PTR(-ENOENT); 2868 } 2869 2870 long libbpf_get_error(const void *ptr) 2871 { 2872 if (IS_ERR(ptr)) 2873 return PTR_ERR(ptr); 2874 return 0; 2875 } 2876 2877 int bpf_prog_load(const char *file, enum bpf_prog_type type, 2878 struct bpf_object **pobj, int *prog_fd) 2879 { 2880 struct bpf_prog_load_attr attr; 2881 2882 memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); 2883 attr.file = file; 2884 attr.prog_type = type; 2885 attr.expected_attach_type = 0; 2886 2887 return bpf_prog_load_xattr(&attr, pobj, prog_fd); 2888 } 2889 2890 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, 2891 struct bpf_object **pobj, int *prog_fd) 2892 { 2893 struct bpf_object_open_attr open_attr = { 2894 .file = attr->file, 2895 .prog_type = attr->prog_type, 2896 }; 2897 struct bpf_program *prog, *first_prog = NULL; 2898 enum bpf_attach_type expected_attach_type; 2899 enum bpf_prog_type prog_type; 2900 struct bpf_object *obj; 2901 struct bpf_map *map; 2902 int err; 2903 2904 if (!attr) 2905 return -EINVAL; 2906 if (!attr->file) 2907 return -EINVAL; 2908 2909 obj = bpf_object__open_xattr(&open_attr); 2910 if (IS_ERR_OR_NULL(obj)) 2911 return -ENOENT; 2912 2913 bpf_object__for_each_program(prog, obj) { 2914 /* 2915 * If type is not specified, try to guess it based on 2916 * section name. 2917 */ 2918 prog_type = attr->prog_type; 2919 prog->prog_ifindex = attr->ifindex; 2920 expected_attach_type = attr->expected_attach_type; 2921 if (prog_type == BPF_PROG_TYPE_UNSPEC) { 2922 err = bpf_program__identify_section(prog, &prog_type, 2923 &expected_attach_type); 2924 if (err < 0) { 2925 bpf_object__close(obj); 2926 return -EINVAL; 2927 } 2928 } 2929 2930 bpf_program__set_type(prog, prog_type); 2931 bpf_program__set_expected_attach_type(prog, 2932 expected_attach_type); 2933 2934 if (!first_prog) 2935 first_prog = prog; 2936 } 2937 2938 bpf_object__for_each_map(map, obj) { 2939 if (!bpf_map__is_offload_neutral(map)) 2940 map->map_ifindex = attr->ifindex; 2941 } 2942 2943 if (!first_prog) { 2944 pr_warning("object file doesn't contain bpf program\n"); 2945 bpf_object__close(obj); 2946 return -ENOENT; 2947 } 2948 2949 err = bpf_object__load(obj); 2950 if (err) { 2951 bpf_object__close(obj); 2952 return -EINVAL; 2953 } 2954 2955 *pobj = obj; 2956 *prog_fd = bpf_program__fd(first_prog); 2957 return 0; 2958 } 2959 2960 enum bpf_perf_event_ret 2961 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, 2962 void **copy_mem, size_t *copy_size, 2963 bpf_perf_event_print_t fn, void *private_data) 2964 { 2965 struct perf_event_mmap_page *header = mmap_mem; 2966 __u64 data_head = ring_buffer_read_head(header); 2967 __u64 data_tail = header->data_tail; 2968 void *base = ((__u8 *)header) + page_size; 2969 int ret = LIBBPF_PERF_EVENT_CONT; 2970 struct perf_event_header *ehdr; 2971 size_t ehdr_size; 2972 2973 while (data_head != data_tail) { 2974 ehdr = base + (data_tail & (mmap_size - 1)); 2975 ehdr_size = ehdr->size; 2976 2977 if (((void *)ehdr) + ehdr_size > base + mmap_size) { 2978 void *copy_start = ehdr; 2979 size_t len_first = base + mmap_size - copy_start; 2980 size_t len_secnd = ehdr_size - len_first; 2981 2982 if (*copy_size < ehdr_size) { 2983 free(*copy_mem); 2984 *copy_mem = malloc(ehdr_size); 2985 if (!*copy_mem) { 2986 *copy_size = 0; 2987 ret = LIBBPF_PERF_EVENT_ERROR; 2988 break; 2989 } 2990 *copy_size = ehdr_size; 2991 } 2992 2993 memcpy(*copy_mem, copy_start, len_first); 2994 memcpy(*copy_mem + len_first, base, len_secnd); 2995 ehdr = *copy_mem; 2996 } 2997 2998 ret = fn(ehdr, private_data); 2999 data_tail += ehdr_size; 3000 if (ret != LIBBPF_PERF_EVENT_CONT) 3001 break; 3002 } 3003 3004 ring_buffer_write_tail(header, data_tail); 3005 return ret; 3006 } 3007 3008 struct bpf_prog_info_array_desc { 3009 int array_offset; /* e.g. offset of jited_prog_insns */ 3010 int count_offset; /* e.g. offset of jited_prog_len */ 3011 int size_offset; /* > 0: offset of rec size, 3012 * < 0: fix size of -size_offset 3013 */ 3014 }; 3015 3016 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = { 3017 [BPF_PROG_INFO_JITED_INSNS] = { 3018 offsetof(struct bpf_prog_info, jited_prog_insns), 3019 offsetof(struct bpf_prog_info, jited_prog_len), 3020 -1, 3021 }, 3022 [BPF_PROG_INFO_XLATED_INSNS] = { 3023 offsetof(struct bpf_prog_info, xlated_prog_insns), 3024 offsetof(struct bpf_prog_info, xlated_prog_len), 3025 -1, 3026 }, 3027 [BPF_PROG_INFO_MAP_IDS] = { 3028 offsetof(struct bpf_prog_info, map_ids), 3029 offsetof(struct bpf_prog_info, nr_map_ids), 3030 -(int)sizeof(__u32), 3031 }, 3032 [BPF_PROG_INFO_JITED_KSYMS] = { 3033 offsetof(struct bpf_prog_info, jited_ksyms), 3034 offsetof(struct bpf_prog_info, nr_jited_ksyms), 3035 -(int)sizeof(__u64), 3036 }, 3037 [BPF_PROG_INFO_JITED_FUNC_LENS] = { 3038 offsetof(struct bpf_prog_info, jited_func_lens), 3039 offsetof(struct bpf_prog_info, nr_jited_func_lens), 3040 -(int)sizeof(__u32), 3041 }, 3042 [BPF_PROG_INFO_FUNC_INFO] = { 3043 offsetof(struct bpf_prog_info, func_info), 3044 offsetof(struct bpf_prog_info, nr_func_info), 3045 offsetof(struct bpf_prog_info, func_info_rec_size), 3046 }, 3047 [BPF_PROG_INFO_LINE_INFO] = { 3048 offsetof(struct bpf_prog_info, line_info), 3049 offsetof(struct bpf_prog_info, nr_line_info), 3050 offsetof(struct bpf_prog_info, line_info_rec_size), 3051 }, 3052 [BPF_PROG_INFO_JITED_LINE_INFO] = { 3053 offsetof(struct bpf_prog_info, jited_line_info), 3054 offsetof(struct bpf_prog_info, nr_jited_line_info), 3055 offsetof(struct bpf_prog_info, jited_line_info_rec_size), 3056 }, 3057 [BPF_PROG_INFO_PROG_TAGS] = { 3058 offsetof(struct bpf_prog_info, prog_tags), 3059 offsetof(struct bpf_prog_info, nr_prog_tags), 3060 -(int)sizeof(__u8) * BPF_TAG_SIZE, 3061 }, 3062 3063 }; 3064 3065 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset) 3066 { 3067 __u32 *array = (__u32 *)info; 3068 3069 if (offset >= 0) 3070 return array[offset / sizeof(__u32)]; 3071 return -(int)offset; 3072 } 3073 3074 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset) 3075 { 3076 __u64 *array = (__u64 *)info; 3077 3078 if (offset >= 0) 3079 return array[offset / sizeof(__u64)]; 3080 return -(int)offset; 3081 } 3082 3083 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset, 3084 __u32 val) 3085 { 3086 __u32 *array = (__u32 *)info; 3087 3088 if (offset >= 0) 3089 array[offset / sizeof(__u32)] = val; 3090 } 3091 3092 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset, 3093 __u64 val) 3094 { 3095 __u64 *array = (__u64 *)info; 3096 3097 if (offset >= 0) 3098 array[offset / sizeof(__u64)] = val; 3099 } 3100 3101 struct bpf_prog_info_linear * 3102 bpf_program__get_prog_info_linear(int fd, __u64 arrays) 3103 { 3104 struct bpf_prog_info_linear *info_linear; 3105 struct bpf_prog_info info = {}; 3106 __u32 info_len = sizeof(info); 3107 __u32 data_len = 0; 3108 int i, err; 3109 void *ptr; 3110 3111 if (arrays >> BPF_PROG_INFO_LAST_ARRAY) 3112 return ERR_PTR(-EINVAL); 3113 3114 /* step 1: get array dimensions */ 3115 err = bpf_obj_get_info_by_fd(fd, &info, &info_len); 3116 if (err) { 3117 pr_debug("can't get prog info: %s", strerror(errno)); 3118 return ERR_PTR(-EFAULT); 3119 } 3120 3121 /* step 2: calculate total size of all arrays */ 3122 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3123 bool include_array = (arrays & (1UL << i)) > 0; 3124 struct bpf_prog_info_array_desc *desc; 3125 __u32 count, size; 3126 3127 desc = bpf_prog_info_array_desc + i; 3128 3129 /* kernel is too old to support this field */ 3130 if (info_len < desc->array_offset + sizeof(__u32) || 3131 info_len < desc->count_offset + sizeof(__u32) || 3132 (desc->size_offset > 0 && info_len < desc->size_offset)) 3133 include_array = false; 3134 3135 if (!include_array) { 3136 arrays &= ~(1UL << i); /* clear the bit */ 3137 continue; 3138 } 3139 3140 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3141 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3142 3143 data_len += count * size; 3144 } 3145 3146 /* step 3: allocate continuous memory */ 3147 data_len = roundup(data_len, sizeof(__u64)); 3148 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len); 3149 if (!info_linear) 3150 return ERR_PTR(-ENOMEM); 3151 3152 /* step 4: fill data to info_linear->info */ 3153 info_linear->arrays = arrays; 3154 memset(&info_linear->info, 0, sizeof(info)); 3155 ptr = info_linear->data; 3156 3157 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3158 struct bpf_prog_info_array_desc *desc; 3159 __u32 count, size; 3160 3161 if ((arrays & (1UL << i)) == 0) 3162 continue; 3163 3164 desc = bpf_prog_info_array_desc + i; 3165 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3166 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3167 bpf_prog_info_set_offset_u32(&info_linear->info, 3168 desc->count_offset, count); 3169 bpf_prog_info_set_offset_u32(&info_linear->info, 3170 desc->size_offset, size); 3171 bpf_prog_info_set_offset_u64(&info_linear->info, 3172 desc->array_offset, 3173 ptr_to_u64(ptr)); 3174 ptr += count * size; 3175 } 3176 3177 /* step 5: call syscall again to get required arrays */ 3178 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); 3179 if (err) { 3180 pr_debug("can't get prog info: %s", strerror(errno)); 3181 free(info_linear); 3182 return ERR_PTR(-EFAULT); 3183 } 3184 3185 /* step 6: verify the data */ 3186 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3187 struct bpf_prog_info_array_desc *desc; 3188 __u32 v1, v2; 3189 3190 if ((arrays & (1UL << i)) == 0) 3191 continue; 3192 3193 desc = bpf_prog_info_array_desc + i; 3194 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3195 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 3196 desc->count_offset); 3197 if (v1 != v2) 3198 pr_warning("%s: mismatch in element count\n", __func__); 3199 3200 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3201 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 3202 desc->size_offset); 3203 if (v1 != v2) 3204 pr_warning("%s: mismatch in rec size\n", __func__); 3205 } 3206 3207 /* step 7: update info_len and data_len */ 3208 info_linear->info_len = sizeof(struct bpf_prog_info); 3209 info_linear->data_len = data_len; 3210 3211 return info_linear; 3212 } 3213 3214 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear) 3215 { 3216 int i; 3217 3218 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3219 struct bpf_prog_info_array_desc *desc; 3220 __u64 addr, offs; 3221 3222 if ((info_linear->arrays & (1UL << i)) == 0) 3223 continue; 3224 3225 desc = bpf_prog_info_array_desc + i; 3226 addr = bpf_prog_info_read_offset_u64(&info_linear->info, 3227 desc->array_offset); 3228 offs = addr - ptr_to_u64(info_linear->data); 3229 bpf_prog_info_set_offset_u64(&info_linear->info, 3230 desc->array_offset, offs); 3231 } 3232 } 3233 3234 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) 3235 { 3236 int i; 3237 3238 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3239 struct bpf_prog_info_array_desc *desc; 3240 __u64 addr, offs; 3241 3242 if ((info_linear->arrays & (1UL << i)) == 0) 3243 continue; 3244 3245 desc = bpf_prog_info_array_desc + i; 3246 offs = bpf_prog_info_read_offset_u64(&info_linear->info, 3247 desc->array_offset); 3248 addr = offs + ptr_to_u64(info_linear->data); 3249 bpf_prog_info_set_offset_u64(&info_linear->info, 3250 desc->array_offset, addr); 3251 } 3252 } 3253