1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * common eBPF ELF operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU Lesser General Public 12 * License as published by the Free Software Foundation; 13 * version 2.1 of the License (not later!) 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU Lesser General Public License for more details. 19 * 20 * You should have received a copy of the GNU Lesser General Public 21 * License along with this program; if not, see <http://www.gnu.org/licenses> 22 */ 23 24 #include <stdlib.h> 25 #include <string.h> 26 #include <memory.h> 27 #include <unistd.h> 28 #include <asm/unistd.h> 29 #include <errno.h> 30 #include <linux/bpf.h> 31 #include <linux/filter.h> 32 #include <linux/kernel.h> 33 #include <limits.h> 34 #include <sys/resource.h> 35 #include "bpf.h" 36 #include "libbpf.h" 37 #include "libbpf_internal.h" 38 39 /* 40 * When building perf, unistd.h is overridden. __NR_bpf is 41 * required to be defined explicitly. 42 */ 43 #ifndef __NR_bpf 44 # if defined(__i386__) 45 # define __NR_bpf 357 46 # elif defined(__x86_64__) 47 # define __NR_bpf 321 48 # elif defined(__aarch64__) 49 # define __NR_bpf 280 50 # elif defined(__sparc__) 51 # define __NR_bpf 349 52 # elif defined(__s390__) 53 # define __NR_bpf 351 54 # elif defined(__arc__) 55 # define __NR_bpf 280 56 # elif defined(__mips__) && defined(_ABIO32) 57 # define __NR_bpf 4355 58 # elif defined(__mips__) && defined(_ABIN32) 59 # define __NR_bpf 6319 60 # elif defined(__mips__) && defined(_ABI64) 61 # define __NR_bpf 5315 62 # else 63 # error __NR_bpf not defined. libbpf does not support your arch. 64 # endif 65 #endif 66 67 static inline __u64 ptr_to_u64(const void *ptr) 68 { 69 return (__u64) (unsigned long) ptr; 70 } 71 72 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, 73 unsigned int size) 74 { 75 return syscall(__NR_bpf, cmd, attr, size); 76 } 77 78 static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr, 79 unsigned int size) 80 { 81 int fd; 82 83 fd = sys_bpf(cmd, attr, size); 84 return ensure_good_fd(fd); 85 } 86 87 #define PROG_LOAD_ATTEMPTS 5 88 89 static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts) 90 { 91 int fd; 92 93 do { 94 fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size); 95 } while (fd < 0 && errno == EAGAIN && --attempts > 0); 96 97 return fd; 98 } 99 100 /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to 101 * memcg-based memory accounting for BPF maps and progs. This was done in [0]. 102 * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in 103 * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF. 104 * 105 * [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/ 106 * [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper") 107 */ 108 int probe_memcg_account(void) 109 { 110 const size_t prog_load_attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd); 111 struct bpf_insn insns[] = { 112 BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), 113 BPF_EXIT_INSN(), 114 }; 115 size_t insn_cnt = ARRAY_SIZE(insns); 116 union bpf_attr attr; 117 int prog_fd; 118 119 /* attempt loading freplace trying to use custom BTF */ 120 memset(&attr, 0, prog_load_attr_sz); 121 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 122 attr.insns = ptr_to_u64(insns); 123 attr.insn_cnt = insn_cnt; 124 attr.license = ptr_to_u64("GPL"); 125 126 prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, prog_load_attr_sz); 127 if (prog_fd >= 0) { 128 close(prog_fd); 129 return 1; 130 } 131 return 0; 132 } 133 134 static bool memlock_bumped; 135 static rlim_t memlock_rlim = RLIM_INFINITY; 136 137 int libbpf_set_memlock_rlim(size_t memlock_bytes) 138 { 139 if (memlock_bumped) 140 return libbpf_err(-EBUSY); 141 142 memlock_rlim = memlock_bytes; 143 return 0; 144 } 145 146 int bump_rlimit_memlock(void) 147 { 148 struct rlimit rlim; 149 150 /* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */ 151 if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT)) 152 return 0; 153 154 memlock_bumped = true; 155 156 /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */ 157 if (memlock_rlim == 0) 158 return 0; 159 160 rlim.rlim_cur = rlim.rlim_max = memlock_rlim; 161 if (setrlimit(RLIMIT_MEMLOCK, &rlim)) 162 return -errno; 163 164 return 0; 165 } 166 167 int bpf_map_create(enum bpf_map_type map_type, 168 const char *map_name, 169 __u32 key_size, 170 __u32 value_size, 171 __u32 max_entries, 172 const struct bpf_map_create_opts *opts) 173 { 174 const size_t attr_sz = offsetofend(union bpf_attr, map_extra); 175 union bpf_attr attr; 176 int fd; 177 178 bump_rlimit_memlock(); 179 180 memset(&attr, 0, attr_sz); 181 182 if (!OPTS_VALID(opts, bpf_map_create_opts)) 183 return libbpf_err(-EINVAL); 184 185 attr.map_type = map_type; 186 if (map_name) 187 libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); 188 attr.key_size = key_size; 189 attr.value_size = value_size; 190 attr.max_entries = max_entries; 191 192 attr.btf_fd = OPTS_GET(opts, btf_fd, 0); 193 attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); 194 attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); 195 attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); 196 197 attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); 198 attr.map_flags = OPTS_GET(opts, map_flags, 0); 199 attr.map_extra = OPTS_GET(opts, map_extra, 0); 200 attr.numa_node = OPTS_GET(opts, numa_node, 0); 201 attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0); 202 203 fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz); 204 return libbpf_err_errno(fd); 205 } 206 207 static void * 208 alloc_zero_tailing_info(const void *orecord, __u32 cnt, 209 __u32 actual_rec_size, __u32 expected_rec_size) 210 { 211 __u64 info_len = (__u64)actual_rec_size * cnt; 212 void *info, *nrecord; 213 int i; 214 215 info = malloc(info_len); 216 if (!info) 217 return NULL; 218 219 /* zero out bytes kernel does not understand */ 220 nrecord = info; 221 for (i = 0; i < cnt; i++) { 222 memcpy(nrecord, orecord, expected_rec_size); 223 memset(nrecord + expected_rec_size, 0, 224 actual_rec_size - expected_rec_size); 225 orecord += actual_rec_size; 226 nrecord += actual_rec_size; 227 } 228 229 return info; 230 } 231 232 int bpf_prog_load(enum bpf_prog_type prog_type, 233 const char *prog_name, const char *license, 234 const struct bpf_insn *insns, size_t insn_cnt, 235 const struct bpf_prog_load_opts *opts) 236 { 237 void *finfo = NULL, *linfo = NULL; 238 const char *func_info, *line_info; 239 __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; 240 __u32 func_info_rec_size, line_info_rec_size; 241 int fd, attempts; 242 union bpf_attr attr; 243 char *log_buf; 244 245 bump_rlimit_memlock(); 246 247 if (!OPTS_VALID(opts, bpf_prog_load_opts)) 248 return libbpf_err(-EINVAL); 249 250 attempts = OPTS_GET(opts, attempts, 0); 251 if (attempts < 0) 252 return libbpf_err(-EINVAL); 253 if (attempts == 0) 254 attempts = PROG_LOAD_ATTEMPTS; 255 256 memset(&attr, 0, sizeof(attr)); 257 258 attr.prog_type = prog_type; 259 attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); 260 261 attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0); 262 attr.prog_flags = OPTS_GET(opts, prog_flags, 0); 263 attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0); 264 attr.kern_version = OPTS_GET(opts, kern_version, 0); 265 266 if (prog_name) 267 libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); 268 attr.license = ptr_to_u64(license); 269 270 if (insn_cnt > UINT_MAX) 271 return libbpf_err(-E2BIG); 272 273 attr.insns = ptr_to_u64(insns); 274 attr.insn_cnt = (__u32)insn_cnt; 275 276 attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); 277 attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0); 278 279 if (attach_prog_fd && attach_btf_obj_fd) 280 return libbpf_err(-EINVAL); 281 282 attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0); 283 if (attach_prog_fd) 284 attr.attach_prog_fd = attach_prog_fd; 285 else 286 attr.attach_btf_obj_fd = attach_btf_obj_fd; 287 288 log_buf = OPTS_GET(opts, log_buf, NULL); 289 log_size = OPTS_GET(opts, log_size, 0); 290 log_level = OPTS_GET(opts, log_level, 0); 291 292 if (!!log_buf != !!log_size) 293 return libbpf_err(-EINVAL); 294 if (log_level > (4 | 2 | 1)) 295 return libbpf_err(-EINVAL); 296 if (log_level && !log_buf) 297 return libbpf_err(-EINVAL); 298 299 func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); 300 func_info = OPTS_GET(opts, func_info, NULL); 301 attr.func_info_rec_size = func_info_rec_size; 302 attr.func_info = ptr_to_u64(func_info); 303 attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0); 304 305 line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0); 306 line_info = OPTS_GET(opts, line_info, NULL); 307 attr.line_info_rec_size = line_info_rec_size; 308 attr.line_info = ptr_to_u64(line_info); 309 attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); 310 311 attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); 312 313 if (log_level) { 314 attr.log_buf = ptr_to_u64(log_buf); 315 attr.log_size = log_size; 316 attr.log_level = log_level; 317 } 318 319 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 320 if (fd >= 0) 321 return fd; 322 323 /* After bpf_prog_load, the kernel may modify certain attributes 324 * to give user space a hint how to deal with loading failure. 325 * Check to see whether we can make some changes and load again. 326 */ 327 while (errno == E2BIG && (!finfo || !linfo)) { 328 if (!finfo && attr.func_info_cnt && 329 attr.func_info_rec_size < func_info_rec_size) { 330 /* try with corrected func info records */ 331 finfo = alloc_zero_tailing_info(func_info, 332 attr.func_info_cnt, 333 func_info_rec_size, 334 attr.func_info_rec_size); 335 if (!finfo) { 336 errno = E2BIG; 337 goto done; 338 } 339 340 attr.func_info = ptr_to_u64(finfo); 341 attr.func_info_rec_size = func_info_rec_size; 342 } else if (!linfo && attr.line_info_cnt && 343 attr.line_info_rec_size < line_info_rec_size) { 344 linfo = alloc_zero_tailing_info(line_info, 345 attr.line_info_cnt, 346 line_info_rec_size, 347 attr.line_info_rec_size); 348 if (!linfo) { 349 errno = E2BIG; 350 goto done; 351 } 352 353 attr.line_info = ptr_to_u64(linfo); 354 attr.line_info_rec_size = line_info_rec_size; 355 } else { 356 break; 357 } 358 359 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 360 if (fd >= 0) 361 goto done; 362 } 363 364 if (log_level == 0 && log_buf) { 365 /* log_level == 0 with non-NULL log_buf requires retrying on error 366 * with log_level == 1 and log_buf/log_buf_size set, to get details of 367 * failure 368 */ 369 attr.log_buf = ptr_to_u64(log_buf); 370 attr.log_size = log_size; 371 attr.log_level = 1; 372 373 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 374 } 375 done: 376 /* free() doesn't affect errno, so we don't need to restore it */ 377 free(finfo); 378 free(linfo); 379 return libbpf_err_errno(fd); 380 } 381 382 int bpf_map_update_elem(int fd, const void *key, const void *value, 383 __u64 flags) 384 { 385 union bpf_attr attr; 386 int ret; 387 388 memset(&attr, 0, sizeof(attr)); 389 attr.map_fd = fd; 390 attr.key = ptr_to_u64(key); 391 attr.value = ptr_to_u64(value); 392 attr.flags = flags; 393 394 ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); 395 return libbpf_err_errno(ret); 396 } 397 398 int bpf_map_lookup_elem(int fd, const void *key, void *value) 399 { 400 union bpf_attr attr; 401 int ret; 402 403 memset(&attr, 0, sizeof(attr)); 404 attr.map_fd = fd; 405 attr.key = ptr_to_u64(key); 406 attr.value = ptr_to_u64(value); 407 408 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 409 return libbpf_err_errno(ret); 410 } 411 412 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) 413 { 414 union bpf_attr attr; 415 int ret; 416 417 memset(&attr, 0, sizeof(attr)); 418 attr.map_fd = fd; 419 attr.key = ptr_to_u64(key); 420 attr.value = ptr_to_u64(value); 421 attr.flags = flags; 422 423 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 424 return libbpf_err_errno(ret); 425 } 426 427 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) 428 { 429 union bpf_attr attr; 430 int ret; 431 432 memset(&attr, 0, sizeof(attr)); 433 attr.map_fd = fd; 434 attr.key = ptr_to_u64(key); 435 attr.value = ptr_to_u64(value); 436 437 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); 438 return libbpf_err_errno(ret); 439 } 440 441 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags) 442 { 443 union bpf_attr attr; 444 int ret; 445 446 memset(&attr, 0, sizeof(attr)); 447 attr.map_fd = fd; 448 attr.key = ptr_to_u64(key); 449 attr.value = ptr_to_u64(value); 450 attr.flags = flags; 451 452 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); 453 return libbpf_err_errno(ret); 454 } 455 456 int bpf_map_delete_elem(int fd, const void *key) 457 { 458 union bpf_attr attr; 459 int ret; 460 461 memset(&attr, 0, sizeof(attr)); 462 attr.map_fd = fd; 463 attr.key = ptr_to_u64(key); 464 465 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 466 return libbpf_err_errno(ret); 467 } 468 469 int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags) 470 { 471 union bpf_attr attr; 472 int ret; 473 474 memset(&attr, 0, sizeof(attr)); 475 attr.map_fd = fd; 476 attr.key = ptr_to_u64(key); 477 attr.flags = flags; 478 479 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 480 return libbpf_err_errno(ret); 481 } 482 483 int bpf_map_get_next_key(int fd, const void *key, void *next_key) 484 { 485 union bpf_attr attr; 486 int ret; 487 488 memset(&attr, 0, sizeof(attr)); 489 attr.map_fd = fd; 490 attr.key = ptr_to_u64(key); 491 attr.next_key = ptr_to_u64(next_key); 492 493 ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); 494 return libbpf_err_errno(ret); 495 } 496 497 int bpf_map_freeze(int fd) 498 { 499 union bpf_attr attr; 500 int ret; 501 502 memset(&attr, 0, sizeof(attr)); 503 attr.map_fd = fd; 504 505 ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr)); 506 return libbpf_err_errno(ret); 507 } 508 509 static int bpf_map_batch_common(int cmd, int fd, void *in_batch, 510 void *out_batch, void *keys, void *values, 511 __u32 *count, 512 const struct bpf_map_batch_opts *opts) 513 { 514 union bpf_attr attr; 515 int ret; 516 517 if (!OPTS_VALID(opts, bpf_map_batch_opts)) 518 return libbpf_err(-EINVAL); 519 520 memset(&attr, 0, sizeof(attr)); 521 attr.batch.map_fd = fd; 522 attr.batch.in_batch = ptr_to_u64(in_batch); 523 attr.batch.out_batch = ptr_to_u64(out_batch); 524 attr.batch.keys = ptr_to_u64(keys); 525 attr.batch.values = ptr_to_u64(values); 526 attr.batch.count = *count; 527 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); 528 attr.batch.flags = OPTS_GET(opts, flags, 0); 529 530 ret = sys_bpf(cmd, &attr, sizeof(attr)); 531 *count = attr.batch.count; 532 533 return libbpf_err_errno(ret); 534 } 535 536 int bpf_map_delete_batch(int fd, const void *keys, __u32 *count, 537 const struct bpf_map_batch_opts *opts) 538 { 539 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL, 540 NULL, (void *)keys, NULL, count, opts); 541 } 542 543 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, 544 void *values, __u32 *count, 545 const struct bpf_map_batch_opts *opts) 546 { 547 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch, 548 out_batch, keys, values, count, opts); 549 } 550 551 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, 552 void *keys, void *values, __u32 *count, 553 const struct bpf_map_batch_opts *opts) 554 { 555 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH, 556 fd, in_batch, out_batch, keys, values, 557 count, opts); 558 } 559 560 int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count, 561 const struct bpf_map_batch_opts *opts) 562 { 563 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL, 564 (void *)keys, (void *)values, count, opts); 565 } 566 567 int bpf_obj_pin(int fd, const char *pathname) 568 { 569 union bpf_attr attr; 570 int ret; 571 572 memset(&attr, 0, sizeof(attr)); 573 attr.pathname = ptr_to_u64((void *)pathname); 574 attr.bpf_fd = fd; 575 576 ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr)); 577 return libbpf_err_errno(ret); 578 } 579 580 int bpf_obj_get(const char *pathname) 581 { 582 union bpf_attr attr; 583 int fd; 584 585 memset(&attr, 0, sizeof(attr)); 586 attr.pathname = ptr_to_u64((void *)pathname); 587 588 fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr)); 589 return libbpf_err_errno(fd); 590 } 591 592 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, 593 unsigned int flags) 594 { 595 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts, 596 .flags = flags, 597 ); 598 599 return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts); 600 } 601 602 int bpf_prog_attach_opts(int prog_fd, int target_fd, 603 enum bpf_attach_type type, 604 const struct bpf_prog_attach_opts *opts) 605 { 606 union bpf_attr attr; 607 int ret; 608 609 if (!OPTS_VALID(opts, bpf_prog_attach_opts)) 610 return libbpf_err(-EINVAL); 611 612 memset(&attr, 0, sizeof(attr)); 613 attr.target_fd = target_fd; 614 attr.attach_bpf_fd = prog_fd; 615 attr.attach_type = type; 616 attr.attach_flags = OPTS_GET(opts, flags, 0); 617 attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0); 618 619 ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); 620 return libbpf_err_errno(ret); 621 } 622 623 __attribute__((alias("bpf_prog_attach_opts"))) 624 int bpf_prog_attach_xattr(int prog_fd, int target_fd, 625 enum bpf_attach_type type, 626 const struct bpf_prog_attach_opts *opts); 627 628 int bpf_prog_detach(int target_fd, enum bpf_attach_type type) 629 { 630 union bpf_attr attr; 631 int ret; 632 633 memset(&attr, 0, sizeof(attr)); 634 attr.target_fd = target_fd; 635 attr.attach_type = type; 636 637 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 638 return libbpf_err_errno(ret); 639 } 640 641 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) 642 { 643 union bpf_attr attr; 644 int ret; 645 646 memset(&attr, 0, sizeof(attr)); 647 attr.target_fd = target_fd; 648 attr.attach_bpf_fd = prog_fd; 649 attr.attach_type = type; 650 651 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 652 return libbpf_err_errno(ret); 653 } 654 655 int bpf_link_create(int prog_fd, int target_fd, 656 enum bpf_attach_type attach_type, 657 const struct bpf_link_create_opts *opts) 658 { 659 __u32 target_btf_id, iter_info_len; 660 union bpf_attr attr; 661 int fd, err; 662 663 if (!OPTS_VALID(opts, bpf_link_create_opts)) 664 return libbpf_err(-EINVAL); 665 666 iter_info_len = OPTS_GET(opts, iter_info_len, 0); 667 target_btf_id = OPTS_GET(opts, target_btf_id, 0); 668 669 /* validate we don't have unexpected combinations of non-zero fields */ 670 if (iter_info_len || target_btf_id) { 671 if (iter_info_len && target_btf_id) 672 return libbpf_err(-EINVAL); 673 if (!OPTS_ZEROED(opts, target_btf_id)) 674 return libbpf_err(-EINVAL); 675 } 676 677 memset(&attr, 0, sizeof(attr)); 678 attr.link_create.prog_fd = prog_fd; 679 attr.link_create.target_fd = target_fd; 680 attr.link_create.attach_type = attach_type; 681 attr.link_create.flags = OPTS_GET(opts, flags, 0); 682 683 if (target_btf_id) { 684 attr.link_create.target_btf_id = target_btf_id; 685 goto proceed; 686 } 687 688 switch (attach_type) { 689 case BPF_TRACE_ITER: 690 attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0)); 691 attr.link_create.iter_info_len = iter_info_len; 692 break; 693 case BPF_PERF_EVENT: 694 attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0); 695 if (!OPTS_ZEROED(opts, perf_event)) 696 return libbpf_err(-EINVAL); 697 break; 698 case BPF_TRACE_KPROBE_MULTI: 699 attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0); 700 attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0); 701 attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0)); 702 attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0)); 703 attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0)); 704 if (!OPTS_ZEROED(opts, kprobe_multi)) 705 return libbpf_err(-EINVAL); 706 break; 707 case BPF_TRACE_FENTRY: 708 case BPF_TRACE_FEXIT: 709 case BPF_MODIFY_RETURN: 710 case BPF_LSM_MAC: 711 attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0); 712 if (!OPTS_ZEROED(opts, tracing)) 713 return libbpf_err(-EINVAL); 714 break; 715 default: 716 if (!OPTS_ZEROED(opts, flags)) 717 return libbpf_err(-EINVAL); 718 break; 719 } 720 proceed: 721 fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr)); 722 if (fd >= 0) 723 return fd; 724 /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry 725 * and other similar programs 726 */ 727 err = -errno; 728 if (err != -EINVAL) 729 return libbpf_err(err); 730 731 /* if user used features not supported by 732 * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately 733 */ 734 if (attr.link_create.target_fd || attr.link_create.target_btf_id) 735 return libbpf_err(err); 736 if (!OPTS_ZEROED(opts, sz)) 737 return libbpf_err(err); 738 739 /* otherwise, for few select kinds of programs that can be 740 * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as 741 * a fallback for older kernels 742 */ 743 switch (attach_type) { 744 case BPF_TRACE_RAW_TP: 745 case BPF_LSM_MAC: 746 case BPF_TRACE_FENTRY: 747 case BPF_TRACE_FEXIT: 748 case BPF_MODIFY_RETURN: 749 return bpf_raw_tracepoint_open(NULL, prog_fd); 750 default: 751 return libbpf_err(err); 752 } 753 } 754 755 int bpf_link_detach(int link_fd) 756 { 757 union bpf_attr attr; 758 int ret; 759 760 memset(&attr, 0, sizeof(attr)); 761 attr.link_detach.link_fd = link_fd; 762 763 ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr)); 764 return libbpf_err_errno(ret); 765 } 766 767 int bpf_link_update(int link_fd, int new_prog_fd, 768 const struct bpf_link_update_opts *opts) 769 { 770 union bpf_attr attr; 771 int ret; 772 773 if (!OPTS_VALID(opts, bpf_link_update_opts)) 774 return libbpf_err(-EINVAL); 775 776 memset(&attr, 0, sizeof(attr)); 777 attr.link_update.link_fd = link_fd; 778 attr.link_update.new_prog_fd = new_prog_fd; 779 attr.link_update.flags = OPTS_GET(opts, flags, 0); 780 attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); 781 782 ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr)); 783 return libbpf_err_errno(ret); 784 } 785 786 int bpf_iter_create(int link_fd) 787 { 788 union bpf_attr attr; 789 int fd; 790 791 memset(&attr, 0, sizeof(attr)); 792 attr.iter_create.link_fd = link_fd; 793 794 fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr)); 795 return libbpf_err_errno(fd); 796 } 797 798 int bpf_prog_query_opts(int target_fd, 799 enum bpf_attach_type type, 800 struct bpf_prog_query_opts *opts) 801 { 802 union bpf_attr attr; 803 int ret; 804 805 if (!OPTS_VALID(opts, bpf_prog_query_opts)) 806 return libbpf_err(-EINVAL); 807 808 memset(&attr, 0, sizeof(attr)); 809 810 attr.query.target_fd = target_fd; 811 attr.query.attach_type = type; 812 attr.query.query_flags = OPTS_GET(opts, query_flags, 0); 813 attr.query.prog_cnt = OPTS_GET(opts, prog_cnt, 0); 814 attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL)); 815 attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL)); 816 817 ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr)); 818 819 OPTS_SET(opts, attach_flags, attr.query.attach_flags); 820 OPTS_SET(opts, prog_cnt, attr.query.prog_cnt); 821 822 return libbpf_err_errno(ret); 823 } 824 825 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, 826 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) 827 { 828 LIBBPF_OPTS(bpf_prog_query_opts, opts); 829 int ret; 830 831 opts.query_flags = query_flags; 832 opts.prog_ids = prog_ids; 833 opts.prog_cnt = *prog_cnt; 834 835 ret = bpf_prog_query_opts(target_fd, type, &opts); 836 837 if (attach_flags) 838 *attach_flags = opts.attach_flags; 839 *prog_cnt = opts.prog_cnt; 840 841 return libbpf_err_errno(ret); 842 } 843 844 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) 845 { 846 union bpf_attr attr; 847 int ret; 848 849 if (!OPTS_VALID(opts, bpf_test_run_opts)) 850 return libbpf_err(-EINVAL); 851 852 memset(&attr, 0, sizeof(attr)); 853 attr.test.prog_fd = prog_fd; 854 attr.test.batch_size = OPTS_GET(opts, batch_size, 0); 855 attr.test.cpu = OPTS_GET(opts, cpu, 0); 856 attr.test.flags = OPTS_GET(opts, flags, 0); 857 attr.test.repeat = OPTS_GET(opts, repeat, 0); 858 attr.test.duration = OPTS_GET(opts, duration, 0); 859 attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0); 860 attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0); 861 attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0); 862 attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0); 863 attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL)); 864 attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL)); 865 attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); 866 attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); 867 868 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); 869 870 OPTS_SET(opts, data_size_out, attr.test.data_size_out); 871 OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); 872 OPTS_SET(opts, duration, attr.test.duration); 873 OPTS_SET(opts, retval, attr.test.retval); 874 875 return libbpf_err_errno(ret); 876 } 877 878 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) 879 { 880 union bpf_attr attr; 881 int err; 882 883 memset(&attr, 0, sizeof(attr)); 884 attr.start_id = start_id; 885 886 err = sys_bpf(cmd, &attr, sizeof(attr)); 887 if (!err) 888 *next_id = attr.next_id; 889 890 return libbpf_err_errno(err); 891 } 892 893 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) 894 { 895 return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID); 896 } 897 898 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) 899 { 900 return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID); 901 } 902 903 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) 904 { 905 return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID); 906 } 907 908 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id) 909 { 910 return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID); 911 } 912 913 int bpf_prog_get_fd_by_id(__u32 id) 914 { 915 union bpf_attr attr; 916 int fd; 917 918 memset(&attr, 0, sizeof(attr)); 919 attr.prog_id = id; 920 921 fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); 922 return libbpf_err_errno(fd); 923 } 924 925 int bpf_map_get_fd_by_id(__u32 id) 926 { 927 union bpf_attr attr; 928 int fd; 929 930 memset(&attr, 0, sizeof(attr)); 931 attr.map_id = id; 932 933 fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); 934 return libbpf_err_errno(fd); 935 } 936 937 int bpf_btf_get_fd_by_id(__u32 id) 938 { 939 union bpf_attr attr; 940 int fd; 941 942 memset(&attr, 0, sizeof(attr)); 943 attr.btf_id = id; 944 945 fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); 946 return libbpf_err_errno(fd); 947 } 948 949 int bpf_link_get_fd_by_id(__u32 id) 950 { 951 union bpf_attr attr; 952 int fd; 953 954 memset(&attr, 0, sizeof(attr)); 955 attr.link_id = id; 956 957 fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr)); 958 return libbpf_err_errno(fd); 959 } 960 961 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) 962 { 963 union bpf_attr attr; 964 int err; 965 966 memset(&attr, 0, sizeof(attr)); 967 attr.info.bpf_fd = bpf_fd; 968 attr.info.info_len = *info_len; 969 attr.info.info = ptr_to_u64(info); 970 971 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)); 972 973 if (!err) 974 *info_len = attr.info.info_len; 975 976 return libbpf_err_errno(err); 977 } 978 979 int bpf_raw_tracepoint_open(const char *name, int prog_fd) 980 { 981 union bpf_attr attr; 982 int fd; 983 984 memset(&attr, 0, sizeof(attr)); 985 attr.raw_tracepoint.name = ptr_to_u64(name); 986 attr.raw_tracepoint.prog_fd = prog_fd; 987 988 fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); 989 return libbpf_err_errno(fd); 990 } 991 992 int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts) 993 { 994 const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level); 995 union bpf_attr attr; 996 char *log_buf; 997 size_t log_size; 998 __u32 log_level; 999 int fd; 1000 1001 bump_rlimit_memlock(); 1002 1003 memset(&attr, 0, attr_sz); 1004 1005 if (!OPTS_VALID(opts, bpf_btf_load_opts)) 1006 return libbpf_err(-EINVAL); 1007 1008 log_buf = OPTS_GET(opts, log_buf, NULL); 1009 log_size = OPTS_GET(opts, log_size, 0); 1010 log_level = OPTS_GET(opts, log_level, 0); 1011 1012 if (log_size > UINT_MAX) 1013 return libbpf_err(-EINVAL); 1014 if (log_size && !log_buf) 1015 return libbpf_err(-EINVAL); 1016 1017 attr.btf = ptr_to_u64(btf_data); 1018 attr.btf_size = btf_size; 1019 /* log_level == 0 and log_buf != NULL means "try loading without 1020 * log_buf, but retry with log_buf and log_level=1 on error", which is 1021 * consistent across low-level and high-level BTF and program loading 1022 * APIs within libbpf and provides a sensible behavior in practice 1023 */ 1024 if (log_level) { 1025 attr.btf_log_buf = ptr_to_u64(log_buf); 1026 attr.btf_log_size = (__u32)log_size; 1027 attr.btf_log_level = log_level; 1028 } 1029 1030 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); 1031 if (fd < 0 && log_buf && log_level == 0) { 1032 attr.btf_log_buf = ptr_to_u64(log_buf); 1033 attr.btf_log_size = (__u32)log_size; 1034 attr.btf_log_level = 1; 1035 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); 1036 } 1037 return libbpf_err_errno(fd); 1038 } 1039 1040 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, 1041 __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, 1042 __u64 *probe_addr) 1043 { 1044 union bpf_attr attr = {}; 1045 int err; 1046 1047 attr.task_fd_query.pid = pid; 1048 attr.task_fd_query.fd = fd; 1049 attr.task_fd_query.flags = flags; 1050 attr.task_fd_query.buf = ptr_to_u64(buf); 1051 attr.task_fd_query.buf_len = *buf_len; 1052 1053 err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr)); 1054 1055 *buf_len = attr.task_fd_query.buf_len; 1056 *prog_id = attr.task_fd_query.prog_id; 1057 *fd_type = attr.task_fd_query.fd_type; 1058 *probe_offset = attr.task_fd_query.probe_offset; 1059 *probe_addr = attr.task_fd_query.probe_addr; 1060 1061 return libbpf_err_errno(err); 1062 } 1063 1064 int bpf_enable_stats(enum bpf_stats_type type) 1065 { 1066 union bpf_attr attr; 1067 int fd; 1068 1069 memset(&attr, 0, sizeof(attr)); 1070 attr.enable_stats.type = type; 1071 1072 fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr)); 1073 return libbpf_err_errno(fd); 1074 } 1075 1076 int bpf_prog_bind_map(int prog_fd, int map_fd, 1077 const struct bpf_prog_bind_opts *opts) 1078 { 1079 union bpf_attr attr; 1080 int ret; 1081 1082 if (!OPTS_VALID(opts, bpf_prog_bind_opts)) 1083 return libbpf_err(-EINVAL); 1084 1085 memset(&attr, 0, sizeof(attr)); 1086 attr.prog_bind_map.prog_fd = prog_fd; 1087 attr.prog_bind_map.map_fd = map_fd; 1088 attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); 1089 1090 ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr)); 1091 return libbpf_err_errno(ret); 1092 } 1093