1 // SPDX-License-Identifier: LGPL-2.1 2 3 /* 4 * Common eBPF ELF object loading operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * Copyright (C) 2017 Nicira, Inc. 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU Lesser General Public 13 * License as published by the Free Software Foundation; 14 * version 2.1 of the License (not later!) 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU Lesser General Public License for more details. 20 * 21 * You should have received a copy of the GNU Lesser General Public 22 * License along with this program; if not, see <http://www.gnu.org/licenses> 23 */ 24 25 #include <stdlib.h> 26 #include <stdio.h> 27 #include <stdarg.h> 28 #include <libgen.h> 29 #include <inttypes.h> 30 #include <string.h> 31 #include <unistd.h> 32 #include <fcntl.h> 33 #include <errno.h> 34 #include <perf-sys.h> 35 #include <asm/unistd.h> 36 #include <linux/err.h> 37 #include <linux/kernel.h> 38 #include <linux/bpf.h> 39 #include <linux/list.h> 40 #include <linux/limits.h> 41 #include <sys/stat.h> 42 #include <sys/types.h> 43 #include <sys/vfs.h> 44 #include <libelf.h> 45 #include <gelf.h> 46 47 #include "libbpf.h" 48 #include "bpf.h" 49 #include "btf.h" 50 51 #ifndef EM_BPF 52 #define EM_BPF 247 53 #endif 54 55 #ifndef BPF_FS_MAGIC 56 #define BPF_FS_MAGIC 0xcafe4a11 57 #endif 58 59 #define __printf(a, b) __attribute__((format(printf, a, b))) 60 61 __printf(1, 2) 62 static int __base_pr(const char *format, ...) 63 { 64 va_list args; 65 int err; 66 67 va_start(args, format); 68 err = vfprintf(stderr, format, args); 69 va_end(args); 70 return err; 71 } 72 73 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr; 74 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr; 75 static __printf(1, 2) libbpf_print_fn_t __pr_debug; 76 77 #define __pr(func, fmt, ...) \ 78 do { \ 79 if ((func)) \ 80 (func)("libbpf: " fmt, ##__VA_ARGS__); \ 81 } while (0) 82 83 #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__) 84 #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__) 85 #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__) 86 87 void libbpf_set_print(libbpf_print_fn_t warn, 88 libbpf_print_fn_t info, 89 libbpf_print_fn_t debug) 90 { 91 __pr_warning = warn; 92 __pr_info = info; 93 __pr_debug = debug; 94 } 95 96 #define STRERR_BUFSIZE 128 97 98 #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START) 99 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) 100 #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START) 101 102 static const char *libbpf_strerror_table[NR_ERRNO] = { 103 [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf", 104 [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid", 105 [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost", 106 [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch", 107 [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf", 108 [ERRCODE_OFFSET(RELOC)] = "Relocation failed", 109 [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading", 110 [ERRCODE_OFFSET(PROG2BIG)] = "Program too big", 111 [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version", 112 [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type", 113 [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message", 114 [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence", 115 }; 116 117 int libbpf_strerror(int err, char *buf, size_t size) 118 { 119 if (!buf || !size) 120 return -1; 121 122 err = err > 0 ? err : -err; 123 124 if (err < __LIBBPF_ERRNO__START) { 125 int ret; 126 127 ret = strerror_r(err, buf, size); 128 buf[size - 1] = '\0'; 129 return ret; 130 } 131 132 if (err < __LIBBPF_ERRNO__END) { 133 const char *msg; 134 135 msg = libbpf_strerror_table[ERRNO_OFFSET(err)]; 136 snprintf(buf, size, "%s", msg); 137 buf[size - 1] = '\0'; 138 return 0; 139 } 140 141 snprintf(buf, size, "Unknown libbpf error %d", err); 142 buf[size - 1] = '\0'; 143 return -1; 144 } 145 146 #define CHECK_ERR(action, err, out) do { \ 147 err = action; \ 148 if (err) \ 149 goto out; \ 150 } while(0) 151 152 153 /* Copied from tools/perf/util/util.h */ 154 #ifndef zfree 155 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) 156 #endif 157 158 #ifndef zclose 159 # define zclose(fd) ({ \ 160 int ___err = 0; \ 161 if ((fd) >= 0) \ 162 ___err = close((fd)); \ 163 fd = -1; \ 164 ___err; }) 165 #endif 166 167 #ifdef HAVE_LIBELF_MMAP_SUPPORT 168 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP 169 #else 170 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ 171 #endif 172 173 /* 174 * bpf_prog should be a better name but it has been used in 175 * linux/filter.h. 176 */ 177 struct bpf_program { 178 /* Index in elf obj file, for relocation use. */ 179 int idx; 180 char *name; 181 int prog_ifindex; 182 char *section_name; 183 struct bpf_insn *insns; 184 size_t insns_cnt, main_prog_cnt; 185 enum bpf_prog_type type; 186 187 struct reloc_desc { 188 enum { 189 RELO_LD64, 190 RELO_CALL, 191 } type; 192 int insn_idx; 193 union { 194 int map_idx; 195 int text_off; 196 }; 197 } *reloc_desc; 198 int nr_reloc; 199 200 struct { 201 int nr; 202 int *fds; 203 } instances; 204 bpf_program_prep_t preprocessor; 205 206 struct bpf_object *obj; 207 void *priv; 208 bpf_program_clear_priv_t clear_priv; 209 210 enum bpf_attach_type expected_attach_type; 211 }; 212 213 struct bpf_map { 214 int fd; 215 char *name; 216 size_t offset; 217 int map_ifindex; 218 struct bpf_map_def def; 219 uint32_t btf_key_type_id; 220 uint32_t btf_value_type_id; 221 void *priv; 222 bpf_map_clear_priv_t clear_priv; 223 }; 224 225 static LIST_HEAD(bpf_objects_list); 226 227 struct bpf_object { 228 char license[64]; 229 u32 kern_version; 230 231 struct bpf_program *programs; 232 size_t nr_programs; 233 struct bpf_map *maps; 234 size_t nr_maps; 235 236 bool loaded; 237 238 /* 239 * Information when doing elf related work. Only valid if fd 240 * is valid. 241 */ 242 struct { 243 int fd; 244 void *obj_buf; 245 size_t obj_buf_sz; 246 Elf *elf; 247 GElf_Ehdr ehdr; 248 Elf_Data *symbols; 249 size_t strtabidx; 250 struct { 251 GElf_Shdr shdr; 252 Elf_Data *data; 253 } *reloc; 254 int nr_reloc; 255 int maps_shndx; 256 int text_shndx; 257 } efile; 258 /* 259 * All loaded bpf_object is linked in a list, which is 260 * hidden to caller. bpf_objects__<func> handlers deal with 261 * all objects. 262 */ 263 struct list_head list; 264 265 struct btf *btf; 266 267 void *priv; 268 bpf_object_clear_priv_t clear_priv; 269 270 char path[]; 271 }; 272 #define obj_elf_valid(o) ((o)->efile.elf) 273 274 static void bpf_program__unload(struct bpf_program *prog) 275 { 276 int i; 277 278 if (!prog) 279 return; 280 281 /* 282 * If the object is opened but the program was never loaded, 283 * it is possible that prog->instances.nr == -1. 284 */ 285 if (prog->instances.nr > 0) { 286 for (i = 0; i < prog->instances.nr; i++) 287 zclose(prog->instances.fds[i]); 288 } else if (prog->instances.nr != -1) { 289 pr_warning("Internal error: instances.nr is %d\n", 290 prog->instances.nr); 291 } 292 293 prog->instances.nr = -1; 294 zfree(&prog->instances.fds); 295 } 296 297 static void bpf_program__exit(struct bpf_program *prog) 298 { 299 if (!prog) 300 return; 301 302 if (prog->clear_priv) 303 prog->clear_priv(prog, prog->priv); 304 305 prog->priv = NULL; 306 prog->clear_priv = NULL; 307 308 bpf_program__unload(prog); 309 zfree(&prog->name); 310 zfree(&prog->section_name); 311 zfree(&prog->insns); 312 zfree(&prog->reloc_desc); 313 314 prog->nr_reloc = 0; 315 prog->insns_cnt = 0; 316 prog->idx = -1; 317 } 318 319 static int 320 bpf_program__init(void *data, size_t size, char *section_name, int idx, 321 struct bpf_program *prog) 322 { 323 if (size < sizeof(struct bpf_insn)) { 324 pr_warning("corrupted section '%s'\n", section_name); 325 return -EINVAL; 326 } 327 328 bzero(prog, sizeof(*prog)); 329 330 prog->section_name = strdup(section_name); 331 if (!prog->section_name) { 332 pr_warning("failed to alloc name for prog under section(%d) %s\n", 333 idx, section_name); 334 goto errout; 335 } 336 337 prog->insns = malloc(size); 338 if (!prog->insns) { 339 pr_warning("failed to alloc insns for prog under section %s\n", 340 section_name); 341 goto errout; 342 } 343 prog->insns_cnt = size / sizeof(struct bpf_insn); 344 memcpy(prog->insns, data, 345 prog->insns_cnt * sizeof(struct bpf_insn)); 346 prog->idx = idx; 347 prog->instances.fds = NULL; 348 prog->instances.nr = -1; 349 prog->type = BPF_PROG_TYPE_KPROBE; 350 351 return 0; 352 errout: 353 bpf_program__exit(prog); 354 return -ENOMEM; 355 } 356 357 static int 358 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, 359 char *section_name, int idx) 360 { 361 struct bpf_program prog, *progs; 362 int nr_progs, err; 363 364 err = bpf_program__init(data, size, section_name, idx, &prog); 365 if (err) 366 return err; 367 368 progs = obj->programs; 369 nr_progs = obj->nr_programs; 370 371 progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1)); 372 if (!progs) { 373 /* 374 * In this case the original obj->programs 375 * is still valid, so don't need special treat for 376 * bpf_close_object(). 377 */ 378 pr_warning("failed to alloc a new program under section '%s'\n", 379 section_name); 380 bpf_program__exit(&prog); 381 return -ENOMEM; 382 } 383 384 pr_debug("found program %s\n", prog.section_name); 385 obj->programs = progs; 386 obj->nr_programs = nr_progs + 1; 387 prog.obj = obj; 388 progs[nr_progs] = prog; 389 return 0; 390 } 391 392 static int 393 bpf_object__init_prog_names(struct bpf_object *obj) 394 { 395 Elf_Data *symbols = obj->efile.symbols; 396 struct bpf_program *prog; 397 size_t pi, si; 398 399 for (pi = 0; pi < obj->nr_programs; pi++) { 400 const char *name = NULL; 401 402 prog = &obj->programs[pi]; 403 if (prog->idx == obj->efile.text_shndx) { 404 name = ".text"; 405 goto skip_search; 406 } 407 408 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; 409 si++) { 410 GElf_Sym sym; 411 412 if (!gelf_getsym(symbols, si, &sym)) 413 continue; 414 if (sym.st_shndx != prog->idx) 415 continue; 416 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL) 417 continue; 418 419 name = elf_strptr(obj->efile.elf, 420 obj->efile.strtabidx, 421 sym.st_name); 422 if (!name) { 423 pr_warning("failed to get sym name string for prog %s\n", 424 prog->section_name); 425 return -LIBBPF_ERRNO__LIBELF; 426 } 427 } 428 429 if (!name) { 430 pr_warning("failed to find sym for prog %s\n", 431 prog->section_name); 432 return -EINVAL; 433 } 434 skip_search: 435 prog->name = strdup(name); 436 if (!prog->name) { 437 pr_warning("failed to allocate memory for prog sym %s\n", 438 name); 439 return -ENOMEM; 440 } 441 } 442 443 return 0; 444 } 445 446 static struct bpf_object *bpf_object__new(const char *path, 447 void *obj_buf, 448 size_t obj_buf_sz) 449 { 450 struct bpf_object *obj; 451 452 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); 453 if (!obj) { 454 pr_warning("alloc memory failed for %s\n", path); 455 return ERR_PTR(-ENOMEM); 456 } 457 458 strcpy(obj->path, path); 459 obj->efile.fd = -1; 460 461 /* 462 * Caller of this function should also calls 463 * bpf_object__elf_finish() after data collection to return 464 * obj_buf to user. If not, we should duplicate the buffer to 465 * avoid user freeing them before elf finish. 466 */ 467 obj->efile.obj_buf = obj_buf; 468 obj->efile.obj_buf_sz = obj_buf_sz; 469 obj->efile.maps_shndx = -1; 470 471 obj->loaded = false; 472 473 INIT_LIST_HEAD(&obj->list); 474 list_add(&obj->list, &bpf_objects_list); 475 return obj; 476 } 477 478 static void bpf_object__elf_finish(struct bpf_object *obj) 479 { 480 if (!obj_elf_valid(obj)) 481 return; 482 483 if (obj->efile.elf) { 484 elf_end(obj->efile.elf); 485 obj->efile.elf = NULL; 486 } 487 obj->efile.symbols = NULL; 488 489 zfree(&obj->efile.reloc); 490 obj->efile.nr_reloc = 0; 491 zclose(obj->efile.fd); 492 obj->efile.obj_buf = NULL; 493 obj->efile.obj_buf_sz = 0; 494 } 495 496 static int bpf_object__elf_init(struct bpf_object *obj) 497 { 498 int err = 0; 499 GElf_Ehdr *ep; 500 501 if (obj_elf_valid(obj)) { 502 pr_warning("elf init: internal error\n"); 503 return -LIBBPF_ERRNO__LIBELF; 504 } 505 506 if (obj->efile.obj_buf_sz > 0) { 507 /* 508 * obj_buf should have been validated by 509 * bpf_object__open_buffer(). 510 */ 511 obj->efile.elf = elf_memory(obj->efile.obj_buf, 512 obj->efile.obj_buf_sz); 513 } else { 514 obj->efile.fd = open(obj->path, O_RDONLY); 515 if (obj->efile.fd < 0) { 516 pr_warning("failed to open %s: %s\n", obj->path, 517 strerror(errno)); 518 return -errno; 519 } 520 521 obj->efile.elf = elf_begin(obj->efile.fd, 522 LIBBPF_ELF_C_READ_MMAP, 523 NULL); 524 } 525 526 if (!obj->efile.elf) { 527 pr_warning("failed to open %s as ELF file\n", 528 obj->path); 529 err = -LIBBPF_ERRNO__LIBELF; 530 goto errout; 531 } 532 533 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { 534 pr_warning("failed to get EHDR from %s\n", 535 obj->path); 536 err = -LIBBPF_ERRNO__FORMAT; 537 goto errout; 538 } 539 ep = &obj->efile.ehdr; 540 541 /* Old LLVM set e_machine to EM_NONE */ 542 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) { 543 pr_warning("%s is not an eBPF object file\n", 544 obj->path); 545 err = -LIBBPF_ERRNO__FORMAT; 546 goto errout; 547 } 548 549 return 0; 550 errout: 551 bpf_object__elf_finish(obj); 552 return err; 553 } 554 555 static int 556 bpf_object__check_endianness(struct bpf_object *obj) 557 { 558 static unsigned int const endian = 1; 559 560 switch (obj->efile.ehdr.e_ident[EI_DATA]) { 561 case ELFDATA2LSB: 562 /* We are big endian, BPF obj is little endian. */ 563 if (*(unsigned char const *)&endian != 1) 564 goto mismatch; 565 break; 566 567 case ELFDATA2MSB: 568 /* We are little endian, BPF obj is big endian. */ 569 if (*(unsigned char const *)&endian != 0) 570 goto mismatch; 571 break; 572 default: 573 return -LIBBPF_ERRNO__ENDIAN; 574 } 575 576 return 0; 577 578 mismatch: 579 pr_warning("Error: endianness mismatch.\n"); 580 return -LIBBPF_ERRNO__ENDIAN; 581 } 582 583 static int 584 bpf_object__init_license(struct bpf_object *obj, 585 void *data, size_t size) 586 { 587 memcpy(obj->license, data, 588 min(size, sizeof(obj->license) - 1)); 589 pr_debug("license of %s is %s\n", obj->path, obj->license); 590 return 0; 591 } 592 593 static int 594 bpf_object__init_kversion(struct bpf_object *obj, 595 void *data, size_t size) 596 { 597 u32 kver; 598 599 if (size != sizeof(kver)) { 600 pr_warning("invalid kver section in %s\n", obj->path); 601 return -LIBBPF_ERRNO__FORMAT; 602 } 603 memcpy(&kver, data, sizeof(kver)); 604 obj->kern_version = kver; 605 pr_debug("kernel version of %s is %x\n", obj->path, 606 obj->kern_version); 607 return 0; 608 } 609 610 static int compare_bpf_map(const void *_a, const void *_b) 611 { 612 const struct bpf_map *a = _a; 613 const struct bpf_map *b = _b; 614 615 return a->offset - b->offset; 616 } 617 618 static int 619 bpf_object__init_maps(struct bpf_object *obj) 620 { 621 int i, map_idx, map_def_sz, nr_maps = 0; 622 Elf_Scn *scn; 623 Elf_Data *data; 624 Elf_Data *symbols = obj->efile.symbols; 625 626 if (obj->efile.maps_shndx < 0) 627 return -EINVAL; 628 if (!symbols) 629 return -EINVAL; 630 631 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx); 632 if (scn) 633 data = elf_getdata(scn, NULL); 634 if (!scn || !data) { 635 pr_warning("failed to get Elf_Data from map section %d\n", 636 obj->efile.maps_shndx); 637 return -EINVAL; 638 } 639 640 /* 641 * Count number of maps. Each map has a name. 642 * Array of maps is not supported: only the first element is 643 * considered. 644 * 645 * TODO: Detect array of map and report error. 646 */ 647 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { 648 GElf_Sym sym; 649 650 if (!gelf_getsym(symbols, i, &sym)) 651 continue; 652 if (sym.st_shndx != obj->efile.maps_shndx) 653 continue; 654 nr_maps++; 655 } 656 657 /* Alloc obj->maps and fill nr_maps. */ 658 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path, 659 nr_maps, data->d_size); 660 661 if (!nr_maps) 662 return 0; 663 664 /* Assume equally sized map definitions */ 665 map_def_sz = data->d_size / nr_maps; 666 if (!data->d_size || (data->d_size % nr_maps) != 0) { 667 pr_warning("unable to determine map definition size " 668 "section %s, %d maps in %zd bytes\n", 669 obj->path, nr_maps, data->d_size); 670 return -EINVAL; 671 } 672 673 obj->maps = calloc(nr_maps, sizeof(obj->maps[0])); 674 if (!obj->maps) { 675 pr_warning("alloc maps for object failed\n"); 676 return -ENOMEM; 677 } 678 obj->nr_maps = nr_maps; 679 680 /* 681 * fill all fd with -1 so won't close incorrect 682 * fd (fd=0 is stdin) when failure (zclose won't close 683 * negative fd)). 684 */ 685 for (i = 0; i < nr_maps; i++) 686 obj->maps[i].fd = -1; 687 688 /* 689 * Fill obj->maps using data in "maps" section. 690 */ 691 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { 692 GElf_Sym sym; 693 const char *map_name; 694 struct bpf_map_def *def; 695 696 if (!gelf_getsym(symbols, i, &sym)) 697 continue; 698 if (sym.st_shndx != obj->efile.maps_shndx) 699 continue; 700 701 map_name = elf_strptr(obj->efile.elf, 702 obj->efile.strtabidx, 703 sym.st_name); 704 obj->maps[map_idx].offset = sym.st_value; 705 if (sym.st_value + map_def_sz > data->d_size) { 706 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n", 707 obj->path, map_name); 708 return -EINVAL; 709 } 710 711 obj->maps[map_idx].name = strdup(map_name); 712 if (!obj->maps[map_idx].name) { 713 pr_warning("failed to alloc map name\n"); 714 return -ENOMEM; 715 } 716 pr_debug("map %d is \"%s\"\n", map_idx, 717 obj->maps[map_idx].name); 718 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); 719 /* 720 * If the definition of the map in the object file fits in 721 * bpf_map_def, copy it. Any extra fields in our version 722 * of bpf_map_def will default to zero as a result of the 723 * calloc above. 724 */ 725 if (map_def_sz <= sizeof(struct bpf_map_def)) { 726 memcpy(&obj->maps[map_idx].def, def, map_def_sz); 727 } else { 728 /* 729 * Here the map structure being read is bigger than what 730 * we expect, truncate if the excess bits are all zero. 731 * If they are not zero, reject this map as 732 * incompatible. 733 */ 734 char *b; 735 for (b = ((char *)def) + sizeof(struct bpf_map_def); 736 b < ((char *)def) + map_def_sz; b++) { 737 if (*b != 0) { 738 pr_warning("maps section in %s: \"%s\" " 739 "has unrecognized, non-zero " 740 "options\n", 741 obj->path, map_name); 742 return -EINVAL; 743 } 744 } 745 memcpy(&obj->maps[map_idx].def, def, 746 sizeof(struct bpf_map_def)); 747 } 748 map_idx++; 749 } 750 751 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map); 752 return 0; 753 } 754 755 static bool section_have_execinstr(struct bpf_object *obj, int idx) 756 { 757 Elf_Scn *scn; 758 GElf_Shdr sh; 759 760 scn = elf_getscn(obj->efile.elf, idx); 761 if (!scn) 762 return false; 763 764 if (gelf_getshdr(scn, &sh) != &sh) 765 return false; 766 767 if (sh.sh_flags & SHF_EXECINSTR) 768 return true; 769 770 return false; 771 } 772 773 static int bpf_object__elf_collect(struct bpf_object *obj) 774 { 775 Elf *elf = obj->efile.elf; 776 GElf_Ehdr *ep = &obj->efile.ehdr; 777 Elf_Scn *scn = NULL; 778 int idx = 0, err = 0; 779 780 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 781 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { 782 pr_warning("failed to get e_shstrndx from %s\n", 783 obj->path); 784 return -LIBBPF_ERRNO__FORMAT; 785 } 786 787 while ((scn = elf_nextscn(elf, scn)) != NULL) { 788 char *name; 789 GElf_Shdr sh; 790 Elf_Data *data; 791 792 idx++; 793 if (gelf_getshdr(scn, &sh) != &sh) { 794 pr_warning("failed to get section(%d) header from %s\n", 795 idx, obj->path); 796 err = -LIBBPF_ERRNO__FORMAT; 797 goto out; 798 } 799 800 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); 801 if (!name) { 802 pr_warning("failed to get section(%d) name from %s\n", 803 idx, obj->path); 804 err = -LIBBPF_ERRNO__FORMAT; 805 goto out; 806 } 807 808 data = elf_getdata(scn, 0); 809 if (!data) { 810 pr_warning("failed to get section(%d) data from %s(%s)\n", 811 idx, name, obj->path); 812 err = -LIBBPF_ERRNO__FORMAT; 813 goto out; 814 } 815 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", 816 idx, name, (unsigned long)data->d_size, 817 (int)sh.sh_link, (unsigned long)sh.sh_flags, 818 (int)sh.sh_type); 819 820 if (strcmp(name, "license") == 0) 821 err = bpf_object__init_license(obj, 822 data->d_buf, 823 data->d_size); 824 else if (strcmp(name, "version") == 0) 825 err = bpf_object__init_kversion(obj, 826 data->d_buf, 827 data->d_size); 828 else if (strcmp(name, "maps") == 0) 829 obj->efile.maps_shndx = idx; 830 else if (strcmp(name, BTF_ELF_SEC) == 0) { 831 obj->btf = btf__new(data->d_buf, data->d_size, 832 __pr_debug); 833 if (IS_ERR(obj->btf)) { 834 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 835 BTF_ELF_SEC, PTR_ERR(obj->btf)); 836 obj->btf = NULL; 837 } 838 } else if (sh.sh_type == SHT_SYMTAB) { 839 if (obj->efile.symbols) { 840 pr_warning("bpf: multiple SYMTAB in %s\n", 841 obj->path); 842 err = -LIBBPF_ERRNO__FORMAT; 843 } else { 844 obj->efile.symbols = data; 845 obj->efile.strtabidx = sh.sh_link; 846 } 847 } else if ((sh.sh_type == SHT_PROGBITS) && 848 (sh.sh_flags & SHF_EXECINSTR) && 849 (data->d_size > 0)) { 850 if (strcmp(name, ".text") == 0) 851 obj->efile.text_shndx = idx; 852 err = bpf_object__add_program(obj, data->d_buf, 853 data->d_size, name, idx); 854 if (err) { 855 char errmsg[STRERR_BUFSIZE]; 856 857 strerror_r(-err, errmsg, sizeof(errmsg)); 858 pr_warning("failed to alloc program %s (%s): %s", 859 name, obj->path, errmsg); 860 } 861 } else if (sh.sh_type == SHT_REL) { 862 void *reloc = obj->efile.reloc; 863 int nr_reloc = obj->efile.nr_reloc + 1; 864 int sec = sh.sh_info; /* points to other section */ 865 866 /* Only do relo for section with exec instructions */ 867 if (!section_have_execinstr(obj, sec)) { 868 pr_debug("skip relo %s(%d) for section(%d)\n", 869 name, idx, sec); 870 continue; 871 } 872 873 reloc = realloc(reloc, 874 sizeof(*obj->efile.reloc) * nr_reloc); 875 if (!reloc) { 876 pr_warning("realloc failed\n"); 877 err = -ENOMEM; 878 } else { 879 int n = nr_reloc - 1; 880 881 obj->efile.reloc = reloc; 882 obj->efile.nr_reloc = nr_reloc; 883 884 obj->efile.reloc[n].shdr = sh; 885 obj->efile.reloc[n].data = data; 886 } 887 } else { 888 pr_debug("skip section(%d) %s\n", idx, name); 889 } 890 if (err) 891 goto out; 892 } 893 894 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { 895 pr_warning("Corrupted ELF file: index of strtab invalid\n"); 896 return LIBBPF_ERRNO__FORMAT; 897 } 898 if (obj->efile.maps_shndx >= 0) { 899 err = bpf_object__init_maps(obj); 900 if (err) 901 goto out; 902 } 903 err = bpf_object__init_prog_names(obj); 904 out: 905 return err; 906 } 907 908 static struct bpf_program * 909 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) 910 { 911 struct bpf_program *prog; 912 size_t i; 913 914 for (i = 0; i < obj->nr_programs; i++) { 915 prog = &obj->programs[i]; 916 if (prog->idx == idx) 917 return prog; 918 } 919 return NULL; 920 } 921 922 static int 923 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, 924 Elf_Data *data, struct bpf_object *obj) 925 { 926 Elf_Data *symbols = obj->efile.symbols; 927 int text_shndx = obj->efile.text_shndx; 928 int maps_shndx = obj->efile.maps_shndx; 929 struct bpf_map *maps = obj->maps; 930 size_t nr_maps = obj->nr_maps; 931 int i, nrels; 932 933 pr_debug("collecting relocating info for: '%s'\n", 934 prog->section_name); 935 nrels = shdr->sh_size / shdr->sh_entsize; 936 937 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); 938 if (!prog->reloc_desc) { 939 pr_warning("failed to alloc memory in relocation\n"); 940 return -ENOMEM; 941 } 942 prog->nr_reloc = nrels; 943 944 for (i = 0; i < nrels; i++) { 945 GElf_Sym sym; 946 GElf_Rel rel; 947 unsigned int insn_idx; 948 struct bpf_insn *insns = prog->insns; 949 size_t map_idx; 950 951 if (!gelf_getrel(data, i, &rel)) { 952 pr_warning("relocation: failed to get %d reloc\n", i); 953 return -LIBBPF_ERRNO__FORMAT; 954 } 955 956 if (!gelf_getsym(symbols, 957 GELF_R_SYM(rel.r_info), 958 &sym)) { 959 pr_warning("relocation: symbol %"PRIx64" not found\n", 960 GELF_R_SYM(rel.r_info)); 961 return -LIBBPF_ERRNO__FORMAT; 962 } 963 pr_debug("relo for %lld value %lld name %d\n", 964 (long long) (rel.r_info >> 32), 965 (long long) sym.st_value, sym.st_name); 966 967 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) { 968 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n", 969 prog->section_name, sym.st_shndx); 970 return -LIBBPF_ERRNO__RELOC; 971 } 972 973 insn_idx = rel.r_offset / sizeof(struct bpf_insn); 974 pr_debug("relocation: insn_idx=%u\n", insn_idx); 975 976 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) { 977 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) { 978 pr_warning("incorrect bpf_call opcode\n"); 979 return -LIBBPF_ERRNO__RELOC; 980 } 981 prog->reloc_desc[i].type = RELO_CALL; 982 prog->reloc_desc[i].insn_idx = insn_idx; 983 prog->reloc_desc[i].text_off = sym.st_value; 984 continue; 985 } 986 987 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { 988 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", 989 insn_idx, insns[insn_idx].code); 990 return -LIBBPF_ERRNO__RELOC; 991 } 992 993 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */ 994 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 995 if (maps[map_idx].offset == sym.st_value) { 996 pr_debug("relocation: find map %zd (%s) for insn %u\n", 997 map_idx, maps[map_idx].name, insn_idx); 998 break; 999 } 1000 } 1001 1002 if (map_idx >= nr_maps) { 1003 pr_warning("bpf relocation: map_idx %d large than %d\n", 1004 (int)map_idx, (int)nr_maps - 1); 1005 return -LIBBPF_ERRNO__RELOC; 1006 } 1007 1008 prog->reloc_desc[i].type = RELO_LD64; 1009 prog->reloc_desc[i].insn_idx = insn_idx; 1010 prog->reloc_desc[i].map_idx = map_idx; 1011 } 1012 return 0; 1013 } 1014 1015 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) 1016 { 1017 struct bpf_map_def *def = &map->def; 1018 const size_t max_name = 256; 1019 int64_t key_size, value_size; 1020 int32_t key_id, value_id; 1021 char name[max_name]; 1022 1023 /* Find key type by name from BTF */ 1024 if (snprintf(name, max_name, "%s_key", map->name) == max_name) { 1025 pr_warning("map:%s length of BTF key_type:%s_key is too long\n", 1026 map->name, map->name); 1027 return -EINVAL; 1028 } 1029 1030 key_id = btf__find_by_name(btf, name); 1031 if (key_id < 0) { 1032 pr_debug("map:%s key_type:%s cannot be found in BTF\n", 1033 map->name, name); 1034 return key_id; 1035 } 1036 1037 key_size = btf__resolve_size(btf, key_id); 1038 if (key_size < 0) { 1039 pr_warning("map:%s key_type:%s cannot get the BTF type_size\n", 1040 map->name, name); 1041 return key_size; 1042 } 1043 1044 if (def->key_size != key_size) { 1045 pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n", 1046 map->name, name, (unsigned int)key_size, def->key_size); 1047 return -EINVAL; 1048 } 1049 1050 /* Find value type from BTF */ 1051 if (snprintf(name, max_name, "%s_value", map->name) == max_name) { 1052 pr_warning("map:%s length of BTF value_type:%s_value is too long\n", 1053 map->name, map->name); 1054 return -EINVAL; 1055 } 1056 1057 value_id = btf__find_by_name(btf, name); 1058 if (value_id < 0) { 1059 pr_debug("map:%s value_type:%s cannot be found in BTF\n", 1060 map->name, name); 1061 return value_id; 1062 } 1063 1064 value_size = btf__resolve_size(btf, value_id); 1065 if (value_size < 0) { 1066 pr_warning("map:%s value_type:%s cannot get the BTF type_size\n", 1067 map->name, name); 1068 return value_size; 1069 } 1070 1071 if (def->value_size != value_size) { 1072 pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n", 1073 map->name, name, (unsigned int)value_size, def->value_size); 1074 return -EINVAL; 1075 } 1076 1077 map->btf_key_type_id = key_id; 1078 map->btf_value_type_id = value_id; 1079 1080 return 0; 1081 } 1082 1083 static int 1084 bpf_object__create_maps(struct bpf_object *obj) 1085 { 1086 struct bpf_create_map_attr create_attr = {}; 1087 unsigned int i; 1088 int err; 1089 1090 for (i = 0; i < obj->nr_maps; i++) { 1091 struct bpf_map *map = &obj->maps[i]; 1092 struct bpf_map_def *def = &map->def; 1093 int *pfd = &map->fd; 1094 1095 create_attr.name = map->name; 1096 create_attr.map_ifindex = map->map_ifindex; 1097 create_attr.map_type = def->type; 1098 create_attr.map_flags = def->map_flags; 1099 create_attr.key_size = def->key_size; 1100 create_attr.value_size = def->value_size; 1101 create_attr.max_entries = def->max_entries; 1102 create_attr.btf_fd = 0; 1103 create_attr.btf_key_type_id = 0; 1104 create_attr.btf_value_type_id = 0; 1105 1106 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) { 1107 create_attr.btf_fd = btf__fd(obj->btf); 1108 create_attr.btf_key_type_id = map->btf_key_type_id; 1109 create_attr.btf_value_type_id = map->btf_value_type_id; 1110 } 1111 1112 *pfd = bpf_create_map_xattr(&create_attr); 1113 if (*pfd < 0 && create_attr.btf_key_type_id) { 1114 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", 1115 map->name, strerror(errno), errno); 1116 create_attr.btf_fd = 0; 1117 create_attr.btf_key_type_id = 0; 1118 create_attr.btf_value_type_id = 0; 1119 map->btf_key_type_id = 0; 1120 map->btf_value_type_id = 0; 1121 *pfd = bpf_create_map_xattr(&create_attr); 1122 } 1123 1124 if (*pfd < 0) { 1125 size_t j; 1126 1127 err = *pfd; 1128 pr_warning("failed to create map (name: '%s'): %s\n", 1129 map->name, 1130 strerror(errno)); 1131 for (j = 0; j < i; j++) 1132 zclose(obj->maps[j].fd); 1133 return err; 1134 } 1135 pr_debug("create map %s: fd=%d\n", map->name, *pfd); 1136 } 1137 1138 return 0; 1139 } 1140 1141 static int 1142 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, 1143 struct reloc_desc *relo) 1144 { 1145 struct bpf_insn *insn, *new_insn; 1146 struct bpf_program *text; 1147 size_t new_cnt; 1148 1149 if (relo->type != RELO_CALL) 1150 return -LIBBPF_ERRNO__RELOC; 1151 1152 if (prog->idx == obj->efile.text_shndx) { 1153 pr_warning("relo in .text insn %d into off %d\n", 1154 relo->insn_idx, relo->text_off); 1155 return -LIBBPF_ERRNO__RELOC; 1156 } 1157 1158 if (prog->main_prog_cnt == 0) { 1159 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx); 1160 if (!text) { 1161 pr_warning("no .text section found yet relo into text exist\n"); 1162 return -LIBBPF_ERRNO__RELOC; 1163 } 1164 new_cnt = prog->insns_cnt + text->insns_cnt; 1165 new_insn = realloc(prog->insns, new_cnt * sizeof(*insn)); 1166 if (!new_insn) { 1167 pr_warning("oom in prog realloc\n"); 1168 return -ENOMEM; 1169 } 1170 memcpy(new_insn + prog->insns_cnt, text->insns, 1171 text->insns_cnt * sizeof(*insn)); 1172 prog->insns = new_insn; 1173 prog->main_prog_cnt = prog->insns_cnt; 1174 prog->insns_cnt = new_cnt; 1175 pr_debug("added %zd insn from %s to prog %s\n", 1176 text->insns_cnt, text->section_name, 1177 prog->section_name); 1178 } 1179 insn = &prog->insns[relo->insn_idx]; 1180 insn->imm += prog->main_prog_cnt - relo->insn_idx; 1181 return 0; 1182 } 1183 1184 static int 1185 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) 1186 { 1187 int i, err; 1188 1189 if (!prog || !prog->reloc_desc) 1190 return 0; 1191 1192 for (i = 0; i < prog->nr_reloc; i++) { 1193 if (prog->reloc_desc[i].type == RELO_LD64) { 1194 struct bpf_insn *insns = prog->insns; 1195 int insn_idx, map_idx; 1196 1197 insn_idx = prog->reloc_desc[i].insn_idx; 1198 map_idx = prog->reloc_desc[i].map_idx; 1199 1200 if (insn_idx >= (int)prog->insns_cnt) { 1201 pr_warning("relocation out of range: '%s'\n", 1202 prog->section_name); 1203 return -LIBBPF_ERRNO__RELOC; 1204 } 1205 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; 1206 insns[insn_idx].imm = obj->maps[map_idx].fd; 1207 } else { 1208 err = bpf_program__reloc_text(prog, obj, 1209 &prog->reloc_desc[i]); 1210 if (err) 1211 return err; 1212 } 1213 } 1214 1215 zfree(&prog->reloc_desc); 1216 prog->nr_reloc = 0; 1217 return 0; 1218 } 1219 1220 1221 static int 1222 bpf_object__relocate(struct bpf_object *obj) 1223 { 1224 struct bpf_program *prog; 1225 size_t i; 1226 int err; 1227 1228 for (i = 0; i < obj->nr_programs; i++) { 1229 prog = &obj->programs[i]; 1230 1231 err = bpf_program__relocate(prog, obj); 1232 if (err) { 1233 pr_warning("failed to relocate '%s'\n", 1234 prog->section_name); 1235 return err; 1236 } 1237 } 1238 return 0; 1239 } 1240 1241 static int bpf_object__collect_reloc(struct bpf_object *obj) 1242 { 1243 int i, err; 1244 1245 if (!obj_elf_valid(obj)) { 1246 pr_warning("Internal error: elf object is closed\n"); 1247 return -LIBBPF_ERRNO__INTERNAL; 1248 } 1249 1250 for (i = 0; i < obj->efile.nr_reloc; i++) { 1251 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr; 1252 Elf_Data *data = obj->efile.reloc[i].data; 1253 int idx = shdr->sh_info; 1254 struct bpf_program *prog; 1255 1256 if (shdr->sh_type != SHT_REL) { 1257 pr_warning("internal error at %d\n", __LINE__); 1258 return -LIBBPF_ERRNO__INTERNAL; 1259 } 1260 1261 prog = bpf_object__find_prog_by_idx(obj, idx); 1262 if (!prog) { 1263 pr_warning("relocation failed: no section(%d)\n", idx); 1264 return -LIBBPF_ERRNO__RELOC; 1265 } 1266 1267 err = bpf_program__collect_reloc(prog, 1268 shdr, data, 1269 obj); 1270 if (err) 1271 return err; 1272 } 1273 return 0; 1274 } 1275 1276 static int 1277 load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type, 1278 const char *name, struct bpf_insn *insns, int insns_cnt, 1279 char *license, u32 kern_version, int *pfd, int prog_ifindex) 1280 { 1281 struct bpf_load_program_attr load_attr; 1282 char *log_buf; 1283 int ret; 1284 1285 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); 1286 load_attr.prog_type = type; 1287 load_attr.expected_attach_type = expected_attach_type; 1288 load_attr.name = name; 1289 load_attr.insns = insns; 1290 load_attr.insns_cnt = insns_cnt; 1291 load_attr.license = license; 1292 load_attr.kern_version = kern_version; 1293 load_attr.prog_ifindex = prog_ifindex; 1294 1295 if (!load_attr.insns || !load_attr.insns_cnt) 1296 return -EINVAL; 1297 1298 log_buf = malloc(BPF_LOG_BUF_SIZE); 1299 if (!log_buf) 1300 pr_warning("Alloc log buffer for bpf loader error, continue without log\n"); 1301 1302 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE); 1303 1304 if (ret >= 0) { 1305 *pfd = ret; 1306 ret = 0; 1307 goto out; 1308 } 1309 1310 ret = -LIBBPF_ERRNO__LOAD; 1311 pr_warning("load bpf program failed: %s\n", strerror(errno)); 1312 1313 if (log_buf && log_buf[0] != '\0') { 1314 ret = -LIBBPF_ERRNO__VERIFY; 1315 pr_warning("-- BEGIN DUMP LOG ---\n"); 1316 pr_warning("\n%s\n", log_buf); 1317 pr_warning("-- END LOG --\n"); 1318 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) { 1319 pr_warning("Program too large (%zu insns), at most %d insns\n", 1320 load_attr.insns_cnt, BPF_MAXINSNS); 1321 ret = -LIBBPF_ERRNO__PROG2BIG; 1322 } else { 1323 /* Wrong program type? */ 1324 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { 1325 int fd; 1326 1327 load_attr.prog_type = BPF_PROG_TYPE_KPROBE; 1328 load_attr.expected_attach_type = 0; 1329 fd = bpf_load_program_xattr(&load_attr, NULL, 0); 1330 if (fd >= 0) { 1331 close(fd); 1332 ret = -LIBBPF_ERRNO__PROGTYPE; 1333 goto out; 1334 } 1335 } 1336 1337 if (log_buf) 1338 ret = -LIBBPF_ERRNO__KVER; 1339 } 1340 1341 out: 1342 free(log_buf); 1343 return ret; 1344 } 1345 1346 static int 1347 bpf_program__load(struct bpf_program *prog, 1348 char *license, u32 kern_version) 1349 { 1350 int err = 0, fd, i; 1351 1352 if (prog->instances.nr < 0 || !prog->instances.fds) { 1353 if (prog->preprocessor) { 1354 pr_warning("Internal error: can't load program '%s'\n", 1355 prog->section_name); 1356 return -LIBBPF_ERRNO__INTERNAL; 1357 } 1358 1359 prog->instances.fds = malloc(sizeof(int)); 1360 if (!prog->instances.fds) { 1361 pr_warning("Not enough memory for BPF fds\n"); 1362 return -ENOMEM; 1363 } 1364 prog->instances.nr = 1; 1365 prog->instances.fds[0] = -1; 1366 } 1367 1368 if (!prog->preprocessor) { 1369 if (prog->instances.nr != 1) { 1370 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n", 1371 prog->section_name, prog->instances.nr); 1372 } 1373 err = load_program(prog->type, prog->expected_attach_type, 1374 prog->name, prog->insns, prog->insns_cnt, 1375 license, kern_version, &fd, 1376 prog->prog_ifindex); 1377 if (!err) 1378 prog->instances.fds[0] = fd; 1379 goto out; 1380 } 1381 1382 for (i = 0; i < prog->instances.nr; i++) { 1383 struct bpf_prog_prep_result result; 1384 bpf_program_prep_t preprocessor = prog->preprocessor; 1385 1386 bzero(&result, sizeof(result)); 1387 err = preprocessor(prog, i, prog->insns, 1388 prog->insns_cnt, &result); 1389 if (err) { 1390 pr_warning("Preprocessing the %dth instance of program '%s' failed\n", 1391 i, prog->section_name); 1392 goto out; 1393 } 1394 1395 if (!result.new_insn_ptr || !result.new_insn_cnt) { 1396 pr_debug("Skip loading the %dth instance of program '%s'\n", 1397 i, prog->section_name); 1398 prog->instances.fds[i] = -1; 1399 if (result.pfd) 1400 *result.pfd = -1; 1401 continue; 1402 } 1403 1404 err = load_program(prog->type, prog->expected_attach_type, 1405 prog->name, result.new_insn_ptr, 1406 result.new_insn_cnt, 1407 license, kern_version, &fd, 1408 prog->prog_ifindex); 1409 1410 if (err) { 1411 pr_warning("Loading the %dth instance of program '%s' failed\n", 1412 i, prog->section_name); 1413 goto out; 1414 } 1415 1416 if (result.pfd) 1417 *result.pfd = fd; 1418 prog->instances.fds[i] = fd; 1419 } 1420 out: 1421 if (err) 1422 pr_warning("failed to load program '%s'\n", 1423 prog->section_name); 1424 zfree(&prog->insns); 1425 prog->insns_cnt = 0; 1426 return err; 1427 } 1428 1429 static int 1430 bpf_object__load_progs(struct bpf_object *obj) 1431 { 1432 size_t i; 1433 int err; 1434 1435 for (i = 0; i < obj->nr_programs; i++) { 1436 if (obj->programs[i].idx == obj->efile.text_shndx) 1437 continue; 1438 err = bpf_program__load(&obj->programs[i], 1439 obj->license, 1440 obj->kern_version); 1441 if (err) 1442 return err; 1443 } 1444 return 0; 1445 } 1446 1447 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) 1448 { 1449 switch (type) { 1450 case BPF_PROG_TYPE_SOCKET_FILTER: 1451 case BPF_PROG_TYPE_SCHED_CLS: 1452 case BPF_PROG_TYPE_SCHED_ACT: 1453 case BPF_PROG_TYPE_XDP: 1454 case BPF_PROG_TYPE_CGROUP_SKB: 1455 case BPF_PROG_TYPE_CGROUP_SOCK: 1456 case BPF_PROG_TYPE_LWT_IN: 1457 case BPF_PROG_TYPE_LWT_OUT: 1458 case BPF_PROG_TYPE_LWT_XMIT: 1459 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 1460 case BPF_PROG_TYPE_SOCK_OPS: 1461 case BPF_PROG_TYPE_SK_SKB: 1462 case BPF_PROG_TYPE_CGROUP_DEVICE: 1463 case BPF_PROG_TYPE_SK_MSG: 1464 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1465 case BPF_PROG_TYPE_LIRC_MODE2: 1466 return false; 1467 case BPF_PROG_TYPE_UNSPEC: 1468 case BPF_PROG_TYPE_KPROBE: 1469 case BPF_PROG_TYPE_TRACEPOINT: 1470 case BPF_PROG_TYPE_PERF_EVENT: 1471 case BPF_PROG_TYPE_RAW_TRACEPOINT: 1472 default: 1473 return true; 1474 } 1475 } 1476 1477 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver) 1478 { 1479 if (needs_kver && obj->kern_version == 0) { 1480 pr_warning("%s doesn't provide kernel version\n", 1481 obj->path); 1482 return -LIBBPF_ERRNO__KVERSION; 1483 } 1484 return 0; 1485 } 1486 1487 static struct bpf_object * 1488 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz, 1489 bool needs_kver) 1490 { 1491 struct bpf_object *obj; 1492 int err; 1493 1494 if (elf_version(EV_CURRENT) == EV_NONE) { 1495 pr_warning("failed to init libelf for %s\n", path); 1496 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 1497 } 1498 1499 obj = bpf_object__new(path, obj_buf, obj_buf_sz); 1500 if (IS_ERR(obj)) 1501 return obj; 1502 1503 CHECK_ERR(bpf_object__elf_init(obj), err, out); 1504 CHECK_ERR(bpf_object__check_endianness(obj), err, out); 1505 CHECK_ERR(bpf_object__elf_collect(obj), err, out); 1506 CHECK_ERR(bpf_object__collect_reloc(obj), err, out); 1507 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out); 1508 1509 bpf_object__elf_finish(obj); 1510 return obj; 1511 out: 1512 bpf_object__close(obj); 1513 return ERR_PTR(err); 1514 } 1515 1516 struct bpf_object *bpf_object__open(const char *path) 1517 { 1518 /* param validation */ 1519 if (!path) 1520 return NULL; 1521 1522 pr_debug("loading %s\n", path); 1523 1524 return __bpf_object__open(path, NULL, 0, true); 1525 } 1526 1527 struct bpf_object *bpf_object__open_buffer(void *obj_buf, 1528 size_t obj_buf_sz, 1529 const char *name) 1530 { 1531 char tmp_name[64]; 1532 1533 /* param validation */ 1534 if (!obj_buf || obj_buf_sz <= 0) 1535 return NULL; 1536 1537 if (!name) { 1538 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", 1539 (unsigned long)obj_buf, 1540 (unsigned long)obj_buf_sz); 1541 tmp_name[sizeof(tmp_name) - 1] = '\0'; 1542 name = tmp_name; 1543 } 1544 pr_debug("loading object '%s' from buffer\n", 1545 name); 1546 1547 return __bpf_object__open(name, obj_buf, obj_buf_sz, true); 1548 } 1549 1550 int bpf_object__unload(struct bpf_object *obj) 1551 { 1552 size_t i; 1553 1554 if (!obj) 1555 return -EINVAL; 1556 1557 for (i = 0; i < obj->nr_maps; i++) 1558 zclose(obj->maps[i].fd); 1559 1560 for (i = 0; i < obj->nr_programs; i++) 1561 bpf_program__unload(&obj->programs[i]); 1562 1563 return 0; 1564 } 1565 1566 int bpf_object__load(struct bpf_object *obj) 1567 { 1568 int err; 1569 1570 if (!obj) 1571 return -EINVAL; 1572 1573 if (obj->loaded) { 1574 pr_warning("object should not be loaded twice\n"); 1575 return -EINVAL; 1576 } 1577 1578 obj->loaded = true; 1579 1580 CHECK_ERR(bpf_object__create_maps(obj), err, out); 1581 CHECK_ERR(bpf_object__relocate(obj), err, out); 1582 CHECK_ERR(bpf_object__load_progs(obj), err, out); 1583 1584 return 0; 1585 out: 1586 bpf_object__unload(obj); 1587 pr_warning("failed to load object '%s'\n", obj->path); 1588 return err; 1589 } 1590 1591 static int check_path(const char *path) 1592 { 1593 struct statfs st_fs; 1594 char *dname, *dir; 1595 int err = 0; 1596 1597 if (path == NULL) 1598 return -EINVAL; 1599 1600 dname = strdup(path); 1601 if (dname == NULL) 1602 return -ENOMEM; 1603 1604 dir = dirname(dname); 1605 if (statfs(dir, &st_fs)) { 1606 pr_warning("failed to statfs %s: %s\n", dir, strerror(errno)); 1607 err = -errno; 1608 } 1609 free(dname); 1610 1611 if (!err && st_fs.f_type != BPF_FS_MAGIC) { 1612 pr_warning("specified path %s is not on BPF FS\n", path); 1613 err = -EINVAL; 1614 } 1615 1616 return err; 1617 } 1618 1619 int bpf_program__pin_instance(struct bpf_program *prog, const char *path, 1620 int instance) 1621 { 1622 int err; 1623 1624 err = check_path(path); 1625 if (err) 1626 return err; 1627 1628 if (prog == NULL) { 1629 pr_warning("invalid program pointer\n"); 1630 return -EINVAL; 1631 } 1632 1633 if (instance < 0 || instance >= prog->instances.nr) { 1634 pr_warning("invalid prog instance %d of prog %s (max %d)\n", 1635 instance, prog->section_name, prog->instances.nr); 1636 return -EINVAL; 1637 } 1638 1639 if (bpf_obj_pin(prog->instances.fds[instance], path)) { 1640 pr_warning("failed to pin program: %s\n", strerror(errno)); 1641 return -errno; 1642 } 1643 pr_debug("pinned program '%s'\n", path); 1644 1645 return 0; 1646 } 1647 1648 static int make_dir(const char *path) 1649 { 1650 int err = 0; 1651 1652 if (mkdir(path, 0700) && errno != EEXIST) 1653 err = -errno; 1654 1655 if (err) 1656 pr_warning("failed to mkdir %s: %s\n", path, strerror(-err)); 1657 return err; 1658 } 1659 1660 int bpf_program__pin(struct bpf_program *prog, const char *path) 1661 { 1662 int i, err; 1663 1664 err = check_path(path); 1665 if (err) 1666 return err; 1667 1668 if (prog == NULL) { 1669 pr_warning("invalid program pointer\n"); 1670 return -EINVAL; 1671 } 1672 1673 if (prog->instances.nr <= 0) { 1674 pr_warning("no instances of prog %s to pin\n", 1675 prog->section_name); 1676 return -EINVAL; 1677 } 1678 1679 err = make_dir(path); 1680 if (err) 1681 return err; 1682 1683 for (i = 0; i < prog->instances.nr; i++) { 1684 char buf[PATH_MAX]; 1685 int len; 1686 1687 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 1688 if (len < 0) 1689 return -EINVAL; 1690 else if (len >= PATH_MAX) 1691 return -ENAMETOOLONG; 1692 1693 err = bpf_program__pin_instance(prog, buf, i); 1694 if (err) 1695 return err; 1696 } 1697 1698 return 0; 1699 } 1700 1701 int bpf_map__pin(struct bpf_map *map, const char *path) 1702 { 1703 int err; 1704 1705 err = check_path(path); 1706 if (err) 1707 return err; 1708 1709 if (map == NULL) { 1710 pr_warning("invalid map pointer\n"); 1711 return -EINVAL; 1712 } 1713 1714 if (bpf_obj_pin(map->fd, path)) { 1715 pr_warning("failed to pin map: %s\n", strerror(errno)); 1716 return -errno; 1717 } 1718 1719 pr_debug("pinned map '%s'\n", path); 1720 return 0; 1721 } 1722 1723 int bpf_object__pin(struct bpf_object *obj, const char *path) 1724 { 1725 struct bpf_program *prog; 1726 struct bpf_map *map; 1727 int err; 1728 1729 if (!obj) 1730 return -ENOENT; 1731 1732 if (!obj->loaded) { 1733 pr_warning("object not yet loaded; load it first\n"); 1734 return -ENOENT; 1735 } 1736 1737 err = make_dir(path); 1738 if (err) 1739 return err; 1740 1741 bpf_map__for_each(map, obj) { 1742 char buf[PATH_MAX]; 1743 int len; 1744 1745 len = snprintf(buf, PATH_MAX, "%s/%s", path, 1746 bpf_map__name(map)); 1747 if (len < 0) 1748 return -EINVAL; 1749 else if (len >= PATH_MAX) 1750 return -ENAMETOOLONG; 1751 1752 err = bpf_map__pin(map, buf); 1753 if (err) 1754 return err; 1755 } 1756 1757 bpf_object__for_each_program(prog, obj) { 1758 char buf[PATH_MAX]; 1759 int len; 1760 1761 len = snprintf(buf, PATH_MAX, "%s/%s", path, 1762 prog->section_name); 1763 if (len < 0) 1764 return -EINVAL; 1765 else if (len >= PATH_MAX) 1766 return -ENAMETOOLONG; 1767 1768 err = bpf_program__pin(prog, buf); 1769 if (err) 1770 return err; 1771 } 1772 1773 return 0; 1774 } 1775 1776 void bpf_object__close(struct bpf_object *obj) 1777 { 1778 size_t i; 1779 1780 if (!obj) 1781 return; 1782 1783 if (obj->clear_priv) 1784 obj->clear_priv(obj, obj->priv); 1785 1786 bpf_object__elf_finish(obj); 1787 bpf_object__unload(obj); 1788 btf__free(obj->btf); 1789 1790 for (i = 0; i < obj->nr_maps; i++) { 1791 zfree(&obj->maps[i].name); 1792 if (obj->maps[i].clear_priv) 1793 obj->maps[i].clear_priv(&obj->maps[i], 1794 obj->maps[i].priv); 1795 obj->maps[i].priv = NULL; 1796 obj->maps[i].clear_priv = NULL; 1797 } 1798 zfree(&obj->maps); 1799 obj->nr_maps = 0; 1800 1801 if (obj->programs && obj->nr_programs) { 1802 for (i = 0; i < obj->nr_programs; i++) 1803 bpf_program__exit(&obj->programs[i]); 1804 } 1805 zfree(&obj->programs); 1806 1807 list_del(&obj->list); 1808 free(obj); 1809 } 1810 1811 struct bpf_object * 1812 bpf_object__next(struct bpf_object *prev) 1813 { 1814 struct bpf_object *next; 1815 1816 if (!prev) 1817 next = list_first_entry(&bpf_objects_list, 1818 struct bpf_object, 1819 list); 1820 else 1821 next = list_next_entry(prev, list); 1822 1823 /* Empty list is noticed here so don't need checking on entry. */ 1824 if (&next->list == &bpf_objects_list) 1825 return NULL; 1826 1827 return next; 1828 } 1829 1830 const char *bpf_object__name(struct bpf_object *obj) 1831 { 1832 return obj ? obj->path : ERR_PTR(-EINVAL); 1833 } 1834 1835 unsigned int bpf_object__kversion(struct bpf_object *obj) 1836 { 1837 return obj ? obj->kern_version : 0; 1838 } 1839 1840 int bpf_object__btf_fd(const struct bpf_object *obj) 1841 { 1842 return obj->btf ? btf__fd(obj->btf) : -1; 1843 } 1844 1845 int bpf_object__set_priv(struct bpf_object *obj, void *priv, 1846 bpf_object_clear_priv_t clear_priv) 1847 { 1848 if (obj->priv && obj->clear_priv) 1849 obj->clear_priv(obj, obj->priv); 1850 1851 obj->priv = priv; 1852 obj->clear_priv = clear_priv; 1853 return 0; 1854 } 1855 1856 void *bpf_object__priv(struct bpf_object *obj) 1857 { 1858 return obj ? obj->priv : ERR_PTR(-EINVAL); 1859 } 1860 1861 struct bpf_program * 1862 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) 1863 { 1864 size_t idx; 1865 1866 if (!obj->programs) 1867 return NULL; 1868 /* First handler */ 1869 if (prev == NULL) 1870 return &obj->programs[0]; 1871 1872 if (prev->obj != obj) { 1873 pr_warning("error: program handler doesn't match object\n"); 1874 return NULL; 1875 } 1876 1877 idx = (prev - obj->programs) + 1; 1878 if (idx >= obj->nr_programs) 1879 return NULL; 1880 return &obj->programs[idx]; 1881 } 1882 1883 int bpf_program__set_priv(struct bpf_program *prog, void *priv, 1884 bpf_program_clear_priv_t clear_priv) 1885 { 1886 if (prog->priv && prog->clear_priv) 1887 prog->clear_priv(prog, prog->priv); 1888 1889 prog->priv = priv; 1890 prog->clear_priv = clear_priv; 1891 return 0; 1892 } 1893 1894 void *bpf_program__priv(struct bpf_program *prog) 1895 { 1896 return prog ? prog->priv : ERR_PTR(-EINVAL); 1897 } 1898 1899 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy) 1900 { 1901 const char *title; 1902 1903 title = prog->section_name; 1904 if (needs_copy) { 1905 title = strdup(title); 1906 if (!title) { 1907 pr_warning("failed to strdup program title\n"); 1908 return ERR_PTR(-ENOMEM); 1909 } 1910 } 1911 1912 return title; 1913 } 1914 1915 int bpf_program__fd(struct bpf_program *prog) 1916 { 1917 return bpf_program__nth_fd(prog, 0); 1918 } 1919 1920 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, 1921 bpf_program_prep_t prep) 1922 { 1923 int *instances_fds; 1924 1925 if (nr_instances <= 0 || !prep) 1926 return -EINVAL; 1927 1928 if (prog->instances.nr > 0 || prog->instances.fds) { 1929 pr_warning("Can't set pre-processor after loading\n"); 1930 return -EINVAL; 1931 } 1932 1933 instances_fds = malloc(sizeof(int) * nr_instances); 1934 if (!instances_fds) { 1935 pr_warning("alloc memory failed for fds\n"); 1936 return -ENOMEM; 1937 } 1938 1939 /* fill all fd with -1 */ 1940 memset(instances_fds, -1, sizeof(int) * nr_instances); 1941 1942 prog->instances.nr = nr_instances; 1943 prog->instances.fds = instances_fds; 1944 prog->preprocessor = prep; 1945 return 0; 1946 } 1947 1948 int bpf_program__nth_fd(struct bpf_program *prog, int n) 1949 { 1950 int fd; 1951 1952 if (n >= prog->instances.nr || n < 0) { 1953 pr_warning("Can't get the %dth fd from program %s: only %d instances\n", 1954 n, prog->section_name, prog->instances.nr); 1955 return -EINVAL; 1956 } 1957 1958 fd = prog->instances.fds[n]; 1959 if (fd < 0) { 1960 pr_warning("%dth instance of program '%s' is invalid\n", 1961 n, prog->section_name); 1962 return -ENOENT; 1963 } 1964 1965 return fd; 1966 } 1967 1968 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) 1969 { 1970 prog->type = type; 1971 } 1972 1973 static bool bpf_program__is_type(struct bpf_program *prog, 1974 enum bpf_prog_type type) 1975 { 1976 return prog ? (prog->type == type) : false; 1977 } 1978 1979 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \ 1980 int bpf_program__set_##NAME(struct bpf_program *prog) \ 1981 { \ 1982 if (!prog) \ 1983 return -EINVAL; \ 1984 bpf_program__set_type(prog, TYPE); \ 1985 return 0; \ 1986 } \ 1987 \ 1988 bool bpf_program__is_##NAME(struct bpf_program *prog) \ 1989 { \ 1990 return bpf_program__is_type(prog, TYPE); \ 1991 } \ 1992 1993 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); 1994 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); 1995 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); 1996 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); 1997 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); 1998 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); 1999 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); 2000 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); 2001 2002 void bpf_program__set_expected_attach_type(struct bpf_program *prog, 2003 enum bpf_attach_type type) 2004 { 2005 prog->expected_attach_type = type; 2006 } 2007 2008 #define BPF_PROG_SEC_FULL(string, ptype, atype) \ 2009 { string, sizeof(string) - 1, ptype, atype } 2010 2011 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_FULL(string, ptype, 0) 2012 2013 #define BPF_S_PROG_SEC(string, ptype) \ 2014 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK, ptype) 2015 2016 #define BPF_SA_PROG_SEC(string, ptype) \ 2017 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ptype) 2018 2019 static const struct { 2020 const char *sec; 2021 size_t len; 2022 enum bpf_prog_type prog_type; 2023 enum bpf_attach_type expected_attach_type; 2024 } section_names[] = { 2025 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), 2026 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE), 2027 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE), 2028 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), 2029 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), 2030 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT), 2031 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), 2032 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), 2033 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), 2034 BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), 2035 BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK), 2036 BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE), 2037 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), 2038 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), 2039 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), 2040 BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS), 2041 BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB), 2042 BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG), 2043 BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND), 2044 BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND), 2045 BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT), 2046 BPF_SA_PROG_SEC("cgroup/connect6", BPF_CGROUP_INET6_CONNECT), 2047 BPF_SA_PROG_SEC("cgroup/sendmsg4", BPF_CGROUP_UDP4_SENDMSG), 2048 BPF_SA_PROG_SEC("cgroup/sendmsg6", BPF_CGROUP_UDP6_SENDMSG), 2049 BPF_S_PROG_SEC("cgroup/post_bind4", BPF_CGROUP_INET4_POST_BIND), 2050 BPF_S_PROG_SEC("cgroup/post_bind6", BPF_CGROUP_INET6_POST_BIND), 2051 }; 2052 2053 #undef BPF_PROG_SEC 2054 #undef BPF_PROG_SEC_FULL 2055 #undef BPF_S_PROG_SEC 2056 #undef BPF_SA_PROG_SEC 2057 2058 static int bpf_program__identify_section(struct bpf_program *prog) 2059 { 2060 int i; 2061 2062 if (!prog->section_name) 2063 goto err; 2064 2065 for (i = 0; i < ARRAY_SIZE(section_names); i++) 2066 if (strncmp(prog->section_name, section_names[i].sec, 2067 section_names[i].len) == 0) 2068 return i; 2069 2070 err: 2071 pr_warning("failed to guess program type based on section name %s\n", 2072 prog->section_name); 2073 2074 return -1; 2075 } 2076 2077 int bpf_map__fd(struct bpf_map *map) 2078 { 2079 return map ? map->fd : -EINVAL; 2080 } 2081 2082 const struct bpf_map_def *bpf_map__def(struct bpf_map *map) 2083 { 2084 return map ? &map->def : ERR_PTR(-EINVAL); 2085 } 2086 2087 const char *bpf_map__name(struct bpf_map *map) 2088 { 2089 return map ? map->name : NULL; 2090 } 2091 2092 uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map) 2093 { 2094 return map ? map->btf_key_type_id : 0; 2095 } 2096 2097 uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map) 2098 { 2099 return map ? map->btf_value_type_id : 0; 2100 } 2101 2102 int bpf_map__set_priv(struct bpf_map *map, void *priv, 2103 bpf_map_clear_priv_t clear_priv) 2104 { 2105 if (!map) 2106 return -EINVAL; 2107 2108 if (map->priv) { 2109 if (map->clear_priv) 2110 map->clear_priv(map, map->priv); 2111 } 2112 2113 map->priv = priv; 2114 map->clear_priv = clear_priv; 2115 return 0; 2116 } 2117 2118 void *bpf_map__priv(struct bpf_map *map) 2119 { 2120 return map ? map->priv : ERR_PTR(-EINVAL); 2121 } 2122 2123 struct bpf_map * 2124 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj) 2125 { 2126 size_t idx; 2127 struct bpf_map *s, *e; 2128 2129 if (!obj || !obj->maps) 2130 return NULL; 2131 2132 s = obj->maps; 2133 e = obj->maps + obj->nr_maps; 2134 2135 if (prev == NULL) 2136 return s; 2137 2138 if ((prev < s) || (prev >= e)) { 2139 pr_warning("error in %s: map handler doesn't belong to object\n", 2140 __func__); 2141 return NULL; 2142 } 2143 2144 idx = (prev - obj->maps) + 1; 2145 if (idx >= obj->nr_maps) 2146 return NULL; 2147 return &obj->maps[idx]; 2148 } 2149 2150 struct bpf_map * 2151 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name) 2152 { 2153 struct bpf_map *pos; 2154 2155 bpf_map__for_each(pos, obj) { 2156 if (pos->name && !strcmp(pos->name, name)) 2157 return pos; 2158 } 2159 return NULL; 2160 } 2161 2162 struct bpf_map * 2163 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) 2164 { 2165 int i; 2166 2167 for (i = 0; i < obj->nr_maps; i++) { 2168 if (obj->maps[i].offset == offset) 2169 return &obj->maps[i]; 2170 } 2171 return ERR_PTR(-ENOENT); 2172 } 2173 2174 long libbpf_get_error(const void *ptr) 2175 { 2176 if (IS_ERR(ptr)) 2177 return PTR_ERR(ptr); 2178 return 0; 2179 } 2180 2181 int bpf_prog_load(const char *file, enum bpf_prog_type type, 2182 struct bpf_object **pobj, int *prog_fd) 2183 { 2184 struct bpf_prog_load_attr attr; 2185 2186 memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); 2187 attr.file = file; 2188 attr.prog_type = type; 2189 attr.expected_attach_type = 0; 2190 2191 return bpf_prog_load_xattr(&attr, pobj, prog_fd); 2192 } 2193 2194 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, 2195 struct bpf_object **pobj, int *prog_fd) 2196 { 2197 struct bpf_program *prog, *first_prog = NULL; 2198 enum bpf_attach_type expected_attach_type; 2199 enum bpf_prog_type prog_type; 2200 struct bpf_object *obj; 2201 struct bpf_map *map; 2202 int section_idx; 2203 int err; 2204 2205 if (!attr) 2206 return -EINVAL; 2207 if (!attr->file) 2208 return -EINVAL; 2209 2210 obj = __bpf_object__open(attr->file, NULL, 0, 2211 bpf_prog_type__needs_kver(attr->prog_type)); 2212 if (IS_ERR_OR_NULL(obj)) 2213 return -ENOENT; 2214 2215 bpf_object__for_each_program(prog, obj) { 2216 /* 2217 * If type is not specified, try to guess it based on 2218 * section name. 2219 */ 2220 prog_type = attr->prog_type; 2221 prog->prog_ifindex = attr->ifindex; 2222 expected_attach_type = attr->expected_attach_type; 2223 if (prog_type == BPF_PROG_TYPE_UNSPEC) { 2224 section_idx = bpf_program__identify_section(prog); 2225 if (section_idx < 0) { 2226 bpf_object__close(obj); 2227 return -EINVAL; 2228 } 2229 prog_type = section_names[section_idx].prog_type; 2230 expected_attach_type = 2231 section_names[section_idx].expected_attach_type; 2232 } 2233 2234 bpf_program__set_type(prog, prog_type); 2235 bpf_program__set_expected_attach_type(prog, 2236 expected_attach_type); 2237 2238 if (prog->idx != obj->efile.text_shndx && !first_prog) 2239 first_prog = prog; 2240 } 2241 2242 bpf_map__for_each(map, obj) { 2243 map->map_ifindex = attr->ifindex; 2244 } 2245 2246 if (!first_prog) { 2247 pr_warning("object file doesn't contain bpf program\n"); 2248 bpf_object__close(obj); 2249 return -ENOENT; 2250 } 2251 2252 err = bpf_object__load(obj); 2253 if (err) { 2254 bpf_object__close(obj); 2255 return -EINVAL; 2256 } 2257 2258 *pobj = obj; 2259 *prog_fd = bpf_program__fd(first_prog); 2260 return 0; 2261 } 2262 2263 enum bpf_perf_event_ret 2264 bpf_perf_event_read_simple(void *mem, unsigned long size, 2265 unsigned long page_size, void **buf, size_t *buf_len, 2266 bpf_perf_event_print_t fn, void *priv) 2267 { 2268 volatile struct perf_event_mmap_page *header = mem; 2269 __u64 data_tail = header->data_tail; 2270 __u64 data_head = header->data_head; 2271 void *base, *begin, *end; 2272 int ret; 2273 2274 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */ 2275 if (data_head == data_tail) 2276 return LIBBPF_PERF_EVENT_CONT; 2277 2278 base = ((char *)header) + page_size; 2279 2280 begin = base + data_tail % size; 2281 end = base + data_head % size; 2282 2283 while (begin != end) { 2284 struct perf_event_header *ehdr; 2285 2286 ehdr = begin; 2287 if (begin + ehdr->size > base + size) { 2288 long len = base + size - begin; 2289 2290 if (*buf_len < ehdr->size) { 2291 free(*buf); 2292 *buf = malloc(ehdr->size); 2293 if (!*buf) { 2294 ret = LIBBPF_PERF_EVENT_ERROR; 2295 break; 2296 } 2297 *buf_len = ehdr->size; 2298 } 2299 2300 memcpy(*buf, begin, len); 2301 memcpy(*buf + len, base, ehdr->size - len); 2302 ehdr = (void *)*buf; 2303 begin = base + ehdr->size - len; 2304 } else if (begin + ehdr->size == base + size) { 2305 begin = base; 2306 } else { 2307 begin += ehdr->size; 2308 } 2309 2310 ret = fn(ehdr, priv); 2311 if (ret != LIBBPF_PERF_EVENT_CONT) 2312 break; 2313 2314 data_tail += ehdr->size; 2315 } 2316 2317 __sync_synchronize(); /* smp_mb() */ 2318 header->data_tail = data_tail; 2319 2320 return ret; 2321 } 2322