1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * common eBPF ELF operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU Lesser General Public 12 * License as published by the Free Software Foundation; 13 * version 2.1 of the License (not later!) 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU Lesser General Public License for more details. 19 * 20 * You should have received a copy of the GNU Lesser General Public 21 * License along with this program; if not, see <http://www.gnu.org/licenses> 22 */ 23 24 #include <stdlib.h> 25 #include <string.h> 26 #include <memory.h> 27 #include <unistd.h> 28 #include <asm/unistd.h> 29 #include <errno.h> 30 #include <linux/bpf.h> 31 #include <linux/filter.h> 32 #include <linux/kernel.h> 33 #include <limits.h> 34 #include <sys/resource.h> 35 #include "bpf.h" 36 #include "libbpf.h" 37 #include "libbpf_internal.h" 38 39 /* 40 * When building perf, unistd.h is overridden. __NR_bpf is 41 * required to be defined explicitly. 42 */ 43 #ifndef __NR_bpf 44 # if defined(__i386__) 45 # define __NR_bpf 357 46 # elif defined(__x86_64__) 47 # define __NR_bpf 321 48 # elif defined(__aarch64__) 49 # define __NR_bpf 280 50 # elif defined(__sparc__) 51 # define __NR_bpf 349 52 # elif defined(__s390__) 53 # define __NR_bpf 351 54 # elif defined(__arc__) 55 # define __NR_bpf 280 56 # elif defined(__mips__) && defined(_ABIO32) 57 # define __NR_bpf 4355 58 # elif defined(__mips__) && defined(_ABIN32) 59 # define __NR_bpf 6319 60 # elif defined(__mips__) && defined(_ABI64) 61 # define __NR_bpf 5315 62 # else 63 # error __NR_bpf not defined. libbpf does not support your arch. 64 # endif 65 #endif 66 67 static inline __u64 ptr_to_u64(const void *ptr) 68 { 69 return (__u64) (unsigned long) ptr; 70 } 71 72 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, 73 unsigned int size) 74 { 75 return syscall(__NR_bpf, cmd, attr, size); 76 } 77 78 static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr, 79 unsigned int size) 80 { 81 int fd; 82 83 fd = sys_bpf(cmd, attr, size); 84 return ensure_good_fd(fd); 85 } 86 87 int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts) 88 { 89 int fd; 90 91 do { 92 fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size); 93 } while (fd < 0 && errno == EAGAIN && --attempts > 0); 94 95 return fd; 96 } 97 98 /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to 99 * memcg-based memory accounting for BPF maps and progs. This was done in [0]. 100 * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in 101 * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF. 102 * 103 * [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/ 104 * [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper") 105 */ 106 int probe_memcg_account(int token_fd) 107 { 108 const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); 109 struct bpf_insn insns[] = { 110 BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), 111 BPF_EXIT_INSN(), 112 }; 113 size_t insn_cnt = ARRAY_SIZE(insns); 114 union bpf_attr attr; 115 int prog_fd; 116 117 /* attempt loading freplace trying to use custom BTF */ 118 memset(&attr, 0, attr_sz); 119 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 120 attr.insns = ptr_to_u64(insns); 121 attr.insn_cnt = insn_cnt; 122 attr.license = ptr_to_u64("GPL"); 123 attr.prog_token_fd = token_fd; 124 if (token_fd) 125 attr.prog_flags |= BPF_F_TOKEN_FD; 126 127 prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz); 128 if (prog_fd >= 0) { 129 close(prog_fd); 130 return 1; 131 } 132 return 0; 133 } 134 135 static bool memlock_bumped; 136 static rlim_t memlock_rlim = RLIM_INFINITY; 137 138 int libbpf_set_memlock_rlim(size_t memlock_bytes) 139 { 140 if (memlock_bumped) 141 return libbpf_err(-EBUSY); 142 143 memlock_rlim = memlock_bytes; 144 return 0; 145 } 146 147 int bump_rlimit_memlock(void) 148 { 149 struct rlimit rlim; 150 151 /* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */ 152 if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT)) 153 return 0; 154 155 memlock_bumped = true; 156 157 /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */ 158 if (memlock_rlim == 0) 159 return 0; 160 161 rlim.rlim_cur = rlim.rlim_max = memlock_rlim; 162 if (setrlimit(RLIMIT_MEMLOCK, &rlim)) 163 return -errno; 164 165 return 0; 166 } 167 168 int bpf_map_create(enum bpf_map_type map_type, 169 const char *map_name, 170 __u32 key_size, 171 __u32 value_size, 172 __u32 max_entries, 173 const struct bpf_map_create_opts *opts) 174 { 175 const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd); 176 union bpf_attr attr; 177 int fd; 178 179 bump_rlimit_memlock(); 180 181 memset(&attr, 0, attr_sz); 182 183 if (!OPTS_VALID(opts, bpf_map_create_opts)) 184 return libbpf_err(-EINVAL); 185 186 attr.map_type = map_type; 187 if (map_name && feat_supported(NULL, FEAT_PROG_NAME)) 188 libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); 189 attr.key_size = key_size; 190 attr.value_size = value_size; 191 attr.max_entries = max_entries; 192 193 attr.btf_fd = OPTS_GET(opts, btf_fd, 0); 194 attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); 195 attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); 196 attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); 197 attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0); 198 199 attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); 200 attr.map_flags = OPTS_GET(opts, map_flags, 0); 201 attr.map_extra = OPTS_GET(opts, map_extra, 0); 202 attr.numa_node = OPTS_GET(opts, numa_node, 0); 203 attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0); 204 205 attr.map_token_fd = OPTS_GET(opts, token_fd, 0); 206 207 fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz); 208 return libbpf_err_errno(fd); 209 } 210 211 static void * 212 alloc_zero_tailing_info(const void *orecord, __u32 cnt, 213 __u32 actual_rec_size, __u32 expected_rec_size) 214 { 215 __u64 info_len = (__u64)actual_rec_size * cnt; 216 void *info, *nrecord; 217 int i; 218 219 info = malloc(info_len); 220 if (!info) 221 return NULL; 222 223 /* zero out bytes kernel does not understand */ 224 nrecord = info; 225 for (i = 0; i < cnt; i++) { 226 memcpy(nrecord, orecord, expected_rec_size); 227 memset(nrecord + expected_rec_size, 0, 228 actual_rec_size - expected_rec_size); 229 orecord += actual_rec_size; 230 nrecord += actual_rec_size; 231 } 232 233 return info; 234 } 235 236 int bpf_prog_load(enum bpf_prog_type prog_type, 237 const char *prog_name, const char *license, 238 const struct bpf_insn *insns, size_t insn_cnt, 239 struct bpf_prog_load_opts *opts) 240 { 241 const size_t attr_sz = offsetofend(union bpf_attr, fd_array_cnt); 242 void *finfo = NULL, *linfo = NULL; 243 const char *func_info, *line_info; 244 __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; 245 __u32 func_info_rec_size, line_info_rec_size; 246 int fd, attempts; 247 union bpf_attr attr; 248 char *log_buf; 249 250 bump_rlimit_memlock(); 251 252 if (!OPTS_VALID(opts, bpf_prog_load_opts)) 253 return libbpf_err(-EINVAL); 254 255 attempts = OPTS_GET(opts, attempts, 0); 256 if (attempts < 0) 257 return libbpf_err(-EINVAL); 258 if (attempts == 0) 259 attempts = PROG_LOAD_ATTEMPTS; 260 261 memset(&attr, 0, attr_sz); 262 263 attr.prog_type = prog_type; 264 attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); 265 266 attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0); 267 attr.prog_flags = OPTS_GET(opts, prog_flags, 0); 268 attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0); 269 attr.kern_version = OPTS_GET(opts, kern_version, 0); 270 attr.prog_token_fd = OPTS_GET(opts, token_fd, 0); 271 272 if (prog_name && feat_supported(NULL, FEAT_PROG_NAME)) 273 libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); 274 attr.license = ptr_to_u64(license); 275 276 if (insn_cnt > UINT_MAX) 277 return libbpf_err(-E2BIG); 278 279 attr.insns = ptr_to_u64(insns); 280 attr.insn_cnt = (__u32)insn_cnt; 281 282 attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); 283 attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0); 284 285 if (attach_prog_fd && attach_btf_obj_fd) 286 return libbpf_err(-EINVAL); 287 288 attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0); 289 if (attach_prog_fd) 290 attr.attach_prog_fd = attach_prog_fd; 291 else 292 attr.attach_btf_obj_fd = attach_btf_obj_fd; 293 294 log_buf = OPTS_GET(opts, log_buf, NULL); 295 log_size = OPTS_GET(opts, log_size, 0); 296 log_level = OPTS_GET(opts, log_level, 0); 297 298 if (!!log_buf != !!log_size) 299 return libbpf_err(-EINVAL); 300 301 func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); 302 func_info = OPTS_GET(opts, func_info, NULL); 303 attr.func_info_rec_size = func_info_rec_size; 304 attr.func_info = ptr_to_u64(func_info); 305 attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0); 306 307 line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0); 308 line_info = OPTS_GET(opts, line_info, NULL); 309 attr.line_info_rec_size = line_info_rec_size; 310 attr.line_info = ptr_to_u64(line_info); 311 attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); 312 313 attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); 314 attr.fd_array_cnt = OPTS_GET(opts, fd_array_cnt, 0); 315 316 if (log_level) { 317 attr.log_buf = ptr_to_u64(log_buf); 318 attr.log_size = log_size; 319 attr.log_level = log_level; 320 } 321 322 fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 323 OPTS_SET(opts, log_true_size, attr.log_true_size); 324 if (fd >= 0) 325 return fd; 326 327 /* After bpf_prog_load, the kernel may modify certain attributes 328 * to give user space a hint how to deal with loading failure. 329 * Check to see whether we can make some changes and load again. 330 */ 331 while (errno == E2BIG && (!finfo || !linfo)) { 332 if (!finfo && attr.func_info_cnt && 333 attr.func_info_rec_size < func_info_rec_size) { 334 /* try with corrected func info records */ 335 finfo = alloc_zero_tailing_info(func_info, 336 attr.func_info_cnt, 337 func_info_rec_size, 338 attr.func_info_rec_size); 339 if (!finfo) { 340 errno = E2BIG; 341 goto done; 342 } 343 344 attr.func_info = ptr_to_u64(finfo); 345 attr.func_info_rec_size = func_info_rec_size; 346 } else if (!linfo && attr.line_info_cnt && 347 attr.line_info_rec_size < line_info_rec_size) { 348 linfo = alloc_zero_tailing_info(line_info, 349 attr.line_info_cnt, 350 line_info_rec_size, 351 attr.line_info_rec_size); 352 if (!linfo) { 353 errno = E2BIG; 354 goto done; 355 } 356 357 attr.line_info = ptr_to_u64(linfo); 358 attr.line_info_rec_size = line_info_rec_size; 359 } else { 360 break; 361 } 362 363 fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 364 OPTS_SET(opts, log_true_size, attr.log_true_size); 365 if (fd >= 0) 366 goto done; 367 } 368 369 if (log_level == 0 && log_buf) { 370 /* log_level == 0 with non-NULL log_buf requires retrying on error 371 * with log_level == 1 and log_buf/log_buf_size set, to get details of 372 * failure 373 */ 374 attr.log_buf = ptr_to_u64(log_buf); 375 attr.log_size = log_size; 376 attr.log_level = 1; 377 378 fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 379 OPTS_SET(opts, log_true_size, attr.log_true_size); 380 } 381 done: 382 /* free() doesn't affect errno, so we don't need to restore it */ 383 free(finfo); 384 free(linfo); 385 return libbpf_err_errno(fd); 386 } 387 388 int bpf_map_update_elem(int fd, const void *key, const void *value, 389 __u64 flags) 390 { 391 const size_t attr_sz = offsetofend(union bpf_attr, flags); 392 union bpf_attr attr; 393 int ret; 394 395 memset(&attr, 0, attr_sz); 396 attr.map_fd = fd; 397 attr.key = ptr_to_u64(key); 398 attr.value = ptr_to_u64(value); 399 attr.flags = flags; 400 401 ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz); 402 return libbpf_err_errno(ret); 403 } 404 405 int bpf_map_lookup_elem(int fd, const void *key, void *value) 406 { 407 const size_t attr_sz = offsetofend(union bpf_attr, flags); 408 union bpf_attr attr; 409 int ret; 410 411 memset(&attr, 0, attr_sz); 412 attr.map_fd = fd; 413 attr.key = ptr_to_u64(key); 414 attr.value = ptr_to_u64(value); 415 416 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz); 417 return libbpf_err_errno(ret); 418 } 419 420 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) 421 { 422 const size_t attr_sz = offsetofend(union bpf_attr, flags); 423 union bpf_attr attr; 424 int ret; 425 426 memset(&attr, 0, attr_sz); 427 attr.map_fd = fd; 428 attr.key = ptr_to_u64(key); 429 attr.value = ptr_to_u64(value); 430 attr.flags = flags; 431 432 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz); 433 return libbpf_err_errno(ret); 434 } 435 436 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) 437 { 438 const size_t attr_sz = offsetofend(union bpf_attr, flags); 439 union bpf_attr attr; 440 int ret; 441 442 memset(&attr, 0, attr_sz); 443 attr.map_fd = fd; 444 attr.key = ptr_to_u64(key); 445 attr.value = ptr_to_u64(value); 446 447 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz); 448 return libbpf_err_errno(ret); 449 } 450 451 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags) 452 { 453 const size_t attr_sz = offsetofend(union bpf_attr, flags); 454 union bpf_attr attr; 455 int ret; 456 457 memset(&attr, 0, attr_sz); 458 attr.map_fd = fd; 459 attr.key = ptr_to_u64(key); 460 attr.value = ptr_to_u64(value); 461 attr.flags = flags; 462 463 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz); 464 return libbpf_err_errno(ret); 465 } 466 467 int bpf_map_delete_elem(int fd, const void *key) 468 { 469 const size_t attr_sz = offsetofend(union bpf_attr, flags); 470 union bpf_attr attr; 471 int ret; 472 473 memset(&attr, 0, attr_sz); 474 attr.map_fd = fd; 475 attr.key = ptr_to_u64(key); 476 477 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); 478 return libbpf_err_errno(ret); 479 } 480 481 int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags) 482 { 483 const size_t attr_sz = offsetofend(union bpf_attr, flags); 484 union bpf_attr attr; 485 int ret; 486 487 memset(&attr, 0, attr_sz); 488 attr.map_fd = fd; 489 attr.key = ptr_to_u64(key); 490 attr.flags = flags; 491 492 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); 493 return libbpf_err_errno(ret); 494 } 495 496 int bpf_map_get_next_key(int fd, const void *key, void *next_key) 497 { 498 const size_t attr_sz = offsetofend(union bpf_attr, next_key); 499 union bpf_attr attr; 500 int ret; 501 502 memset(&attr, 0, attr_sz); 503 attr.map_fd = fd; 504 attr.key = ptr_to_u64(key); 505 attr.next_key = ptr_to_u64(next_key); 506 507 ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, attr_sz); 508 return libbpf_err_errno(ret); 509 } 510 511 int bpf_map_freeze(int fd) 512 { 513 const size_t attr_sz = offsetofend(union bpf_attr, map_fd); 514 union bpf_attr attr; 515 int ret; 516 517 memset(&attr, 0, attr_sz); 518 attr.map_fd = fd; 519 520 ret = sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz); 521 return libbpf_err_errno(ret); 522 } 523 524 static int bpf_map_batch_common(int cmd, int fd, void *in_batch, 525 void *out_batch, void *keys, void *values, 526 __u32 *count, 527 const struct bpf_map_batch_opts *opts) 528 { 529 const size_t attr_sz = offsetofend(union bpf_attr, batch); 530 union bpf_attr attr; 531 int ret; 532 533 if (!OPTS_VALID(opts, bpf_map_batch_opts)) 534 return libbpf_err(-EINVAL); 535 536 memset(&attr, 0, attr_sz); 537 attr.batch.map_fd = fd; 538 attr.batch.in_batch = ptr_to_u64(in_batch); 539 attr.batch.out_batch = ptr_to_u64(out_batch); 540 attr.batch.keys = ptr_to_u64(keys); 541 attr.batch.values = ptr_to_u64(values); 542 attr.batch.count = *count; 543 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); 544 attr.batch.flags = OPTS_GET(opts, flags, 0); 545 546 ret = sys_bpf(cmd, &attr, attr_sz); 547 *count = attr.batch.count; 548 549 return libbpf_err_errno(ret); 550 } 551 552 int bpf_map_delete_batch(int fd, const void *keys, __u32 *count, 553 const struct bpf_map_batch_opts *opts) 554 { 555 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL, 556 NULL, (void *)keys, NULL, count, opts); 557 } 558 559 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, 560 void *values, __u32 *count, 561 const struct bpf_map_batch_opts *opts) 562 { 563 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch, 564 out_batch, keys, values, count, opts); 565 } 566 567 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, 568 void *keys, void *values, __u32 *count, 569 const struct bpf_map_batch_opts *opts) 570 { 571 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH, 572 fd, in_batch, out_batch, keys, values, 573 count, opts); 574 } 575 576 int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count, 577 const struct bpf_map_batch_opts *opts) 578 { 579 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL, 580 (void *)keys, (void *)values, count, opts); 581 } 582 583 int bpf_obj_pin_opts(int fd, const char *pathname, const struct bpf_obj_pin_opts *opts) 584 { 585 const size_t attr_sz = offsetofend(union bpf_attr, path_fd); 586 union bpf_attr attr; 587 int ret; 588 589 if (!OPTS_VALID(opts, bpf_obj_pin_opts)) 590 return libbpf_err(-EINVAL); 591 592 memset(&attr, 0, attr_sz); 593 attr.path_fd = OPTS_GET(opts, path_fd, 0); 594 attr.pathname = ptr_to_u64((void *)pathname); 595 attr.file_flags = OPTS_GET(opts, file_flags, 0); 596 attr.bpf_fd = fd; 597 598 ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz); 599 return libbpf_err_errno(ret); 600 } 601 602 int bpf_obj_pin(int fd, const char *pathname) 603 { 604 return bpf_obj_pin_opts(fd, pathname, NULL); 605 } 606 607 int bpf_obj_get(const char *pathname) 608 { 609 return bpf_obj_get_opts(pathname, NULL); 610 } 611 612 int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts) 613 { 614 const size_t attr_sz = offsetofend(union bpf_attr, path_fd); 615 union bpf_attr attr; 616 int fd; 617 618 if (!OPTS_VALID(opts, bpf_obj_get_opts)) 619 return libbpf_err(-EINVAL); 620 621 memset(&attr, 0, attr_sz); 622 attr.path_fd = OPTS_GET(opts, path_fd, 0); 623 attr.pathname = ptr_to_u64((void *)pathname); 624 attr.file_flags = OPTS_GET(opts, file_flags, 0); 625 626 fd = sys_bpf_fd(BPF_OBJ_GET, &attr, attr_sz); 627 return libbpf_err_errno(fd); 628 } 629 630 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, 631 unsigned int flags) 632 { 633 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts, 634 .flags = flags, 635 ); 636 637 return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts); 638 } 639 640 int bpf_prog_attach_opts(int prog_fd, int target, enum bpf_attach_type type, 641 const struct bpf_prog_attach_opts *opts) 642 { 643 const size_t attr_sz = offsetofend(union bpf_attr, expected_revision); 644 __u32 relative_id, flags; 645 int ret, relative_fd; 646 union bpf_attr attr; 647 648 if (!OPTS_VALID(opts, bpf_prog_attach_opts)) 649 return libbpf_err(-EINVAL); 650 651 relative_id = OPTS_GET(opts, relative_id, 0); 652 relative_fd = OPTS_GET(opts, relative_fd, 0); 653 flags = OPTS_GET(opts, flags, 0); 654 655 /* validate we don't have unexpected combinations of non-zero fields */ 656 if (relative_fd && relative_id) 657 return libbpf_err(-EINVAL); 658 659 memset(&attr, 0, attr_sz); 660 attr.target_fd = target; 661 attr.attach_bpf_fd = prog_fd; 662 attr.attach_type = type; 663 attr.replace_bpf_fd = OPTS_GET(opts, replace_fd, 0); 664 attr.expected_revision = OPTS_GET(opts, expected_revision, 0); 665 666 if (relative_id) { 667 attr.attach_flags = flags | BPF_F_ID; 668 attr.relative_id = relative_id; 669 } else { 670 attr.attach_flags = flags; 671 attr.relative_fd = relative_fd; 672 } 673 674 ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz); 675 return libbpf_err_errno(ret); 676 } 677 678 int bpf_prog_detach_opts(int prog_fd, int target, enum bpf_attach_type type, 679 const struct bpf_prog_detach_opts *opts) 680 { 681 const size_t attr_sz = offsetofend(union bpf_attr, expected_revision); 682 __u32 relative_id, flags; 683 int ret, relative_fd; 684 union bpf_attr attr; 685 686 if (!OPTS_VALID(opts, bpf_prog_detach_opts)) 687 return libbpf_err(-EINVAL); 688 689 relative_id = OPTS_GET(opts, relative_id, 0); 690 relative_fd = OPTS_GET(opts, relative_fd, 0); 691 flags = OPTS_GET(opts, flags, 0); 692 693 /* validate we don't have unexpected combinations of non-zero fields */ 694 if (relative_fd && relative_id) 695 return libbpf_err(-EINVAL); 696 697 memset(&attr, 0, attr_sz); 698 attr.target_fd = target; 699 attr.attach_bpf_fd = prog_fd; 700 attr.attach_type = type; 701 attr.expected_revision = OPTS_GET(opts, expected_revision, 0); 702 703 if (relative_id) { 704 attr.attach_flags = flags | BPF_F_ID; 705 attr.relative_id = relative_id; 706 } else { 707 attr.attach_flags = flags; 708 attr.relative_fd = relative_fd; 709 } 710 711 ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz); 712 return libbpf_err_errno(ret); 713 } 714 715 int bpf_prog_detach(int target_fd, enum bpf_attach_type type) 716 { 717 return bpf_prog_detach_opts(0, target_fd, type, NULL); 718 } 719 720 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) 721 { 722 return bpf_prog_detach_opts(prog_fd, target_fd, type, NULL); 723 } 724 725 int bpf_link_create(int prog_fd, int target_fd, 726 enum bpf_attach_type attach_type, 727 const struct bpf_link_create_opts *opts) 728 { 729 const size_t attr_sz = offsetofend(union bpf_attr, link_create); 730 __u32 target_btf_id, iter_info_len, relative_id; 731 int fd, err, relative_fd; 732 union bpf_attr attr; 733 734 if (!OPTS_VALID(opts, bpf_link_create_opts)) 735 return libbpf_err(-EINVAL); 736 737 iter_info_len = OPTS_GET(opts, iter_info_len, 0); 738 target_btf_id = OPTS_GET(opts, target_btf_id, 0); 739 740 /* validate we don't have unexpected combinations of non-zero fields */ 741 if (iter_info_len || target_btf_id) { 742 if (iter_info_len && target_btf_id) 743 return libbpf_err(-EINVAL); 744 if (!OPTS_ZEROED(opts, target_btf_id)) 745 return libbpf_err(-EINVAL); 746 } 747 748 memset(&attr, 0, attr_sz); 749 attr.link_create.prog_fd = prog_fd; 750 attr.link_create.target_fd = target_fd; 751 attr.link_create.attach_type = attach_type; 752 attr.link_create.flags = OPTS_GET(opts, flags, 0); 753 754 if (target_btf_id) { 755 attr.link_create.target_btf_id = target_btf_id; 756 goto proceed; 757 } 758 759 switch (attach_type) { 760 case BPF_TRACE_ITER: 761 attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0)); 762 attr.link_create.iter_info_len = iter_info_len; 763 break; 764 case BPF_PERF_EVENT: 765 attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0); 766 if (!OPTS_ZEROED(opts, perf_event)) 767 return libbpf_err(-EINVAL); 768 break; 769 case BPF_TRACE_KPROBE_MULTI: 770 case BPF_TRACE_KPROBE_SESSION: 771 attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0); 772 attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0); 773 attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0)); 774 attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0)); 775 attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0)); 776 if (!OPTS_ZEROED(opts, kprobe_multi)) 777 return libbpf_err(-EINVAL); 778 break; 779 case BPF_TRACE_UPROBE_MULTI: 780 case BPF_TRACE_UPROBE_SESSION: 781 attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0); 782 attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0); 783 attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0)); 784 attr.link_create.uprobe_multi.offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.offsets, 0)); 785 attr.link_create.uprobe_multi.ref_ctr_offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.ref_ctr_offsets, 0)); 786 attr.link_create.uprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, uprobe_multi.cookies, 0)); 787 attr.link_create.uprobe_multi.pid = OPTS_GET(opts, uprobe_multi.pid, 0); 788 if (!OPTS_ZEROED(opts, uprobe_multi)) 789 return libbpf_err(-EINVAL); 790 break; 791 case BPF_TRACE_RAW_TP: 792 case BPF_TRACE_FENTRY: 793 case BPF_TRACE_FEXIT: 794 case BPF_MODIFY_RETURN: 795 case BPF_LSM_MAC: 796 attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0); 797 if (!OPTS_ZEROED(opts, tracing)) 798 return libbpf_err(-EINVAL); 799 break; 800 case BPF_NETFILTER: 801 attr.link_create.netfilter.pf = OPTS_GET(opts, netfilter.pf, 0); 802 attr.link_create.netfilter.hooknum = OPTS_GET(opts, netfilter.hooknum, 0); 803 attr.link_create.netfilter.priority = OPTS_GET(opts, netfilter.priority, 0); 804 attr.link_create.netfilter.flags = OPTS_GET(opts, netfilter.flags, 0); 805 if (!OPTS_ZEROED(opts, netfilter)) 806 return libbpf_err(-EINVAL); 807 break; 808 case BPF_TCX_INGRESS: 809 case BPF_TCX_EGRESS: 810 relative_fd = OPTS_GET(opts, tcx.relative_fd, 0); 811 relative_id = OPTS_GET(opts, tcx.relative_id, 0); 812 if (relative_fd && relative_id) 813 return libbpf_err(-EINVAL); 814 if (relative_id) { 815 attr.link_create.tcx.relative_id = relative_id; 816 attr.link_create.flags |= BPF_F_ID; 817 } else { 818 attr.link_create.tcx.relative_fd = relative_fd; 819 } 820 attr.link_create.tcx.expected_revision = OPTS_GET(opts, tcx.expected_revision, 0); 821 if (!OPTS_ZEROED(opts, tcx)) 822 return libbpf_err(-EINVAL); 823 break; 824 case BPF_NETKIT_PRIMARY: 825 case BPF_NETKIT_PEER: 826 relative_fd = OPTS_GET(opts, netkit.relative_fd, 0); 827 relative_id = OPTS_GET(opts, netkit.relative_id, 0); 828 if (relative_fd && relative_id) 829 return libbpf_err(-EINVAL); 830 if (relative_id) { 831 attr.link_create.netkit.relative_id = relative_id; 832 attr.link_create.flags |= BPF_F_ID; 833 } else { 834 attr.link_create.netkit.relative_fd = relative_fd; 835 } 836 attr.link_create.netkit.expected_revision = OPTS_GET(opts, netkit.expected_revision, 0); 837 if (!OPTS_ZEROED(opts, netkit)) 838 return libbpf_err(-EINVAL); 839 break; 840 default: 841 if (!OPTS_ZEROED(opts, flags)) 842 return libbpf_err(-EINVAL); 843 break; 844 } 845 proceed: 846 fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, attr_sz); 847 if (fd >= 0) 848 return fd; 849 /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry 850 * and other similar programs 851 */ 852 err = -errno; 853 if (err != -EINVAL) 854 return libbpf_err(err); 855 856 /* if user used features not supported by 857 * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately 858 */ 859 if (attr.link_create.target_fd || attr.link_create.target_btf_id) 860 return libbpf_err(err); 861 if (!OPTS_ZEROED(opts, sz)) 862 return libbpf_err(err); 863 864 /* otherwise, for few select kinds of programs that can be 865 * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as 866 * a fallback for older kernels 867 */ 868 switch (attach_type) { 869 case BPF_TRACE_RAW_TP: 870 case BPF_LSM_MAC: 871 case BPF_TRACE_FENTRY: 872 case BPF_TRACE_FEXIT: 873 case BPF_MODIFY_RETURN: 874 return bpf_raw_tracepoint_open(NULL, prog_fd); 875 default: 876 return libbpf_err(err); 877 } 878 } 879 880 int bpf_link_detach(int link_fd) 881 { 882 const size_t attr_sz = offsetofend(union bpf_attr, link_detach); 883 union bpf_attr attr; 884 int ret; 885 886 memset(&attr, 0, attr_sz); 887 attr.link_detach.link_fd = link_fd; 888 889 ret = sys_bpf(BPF_LINK_DETACH, &attr, attr_sz); 890 return libbpf_err_errno(ret); 891 } 892 893 int bpf_link_update(int link_fd, int new_prog_fd, 894 const struct bpf_link_update_opts *opts) 895 { 896 const size_t attr_sz = offsetofend(union bpf_attr, link_update); 897 union bpf_attr attr; 898 int ret; 899 900 if (!OPTS_VALID(opts, bpf_link_update_opts)) 901 return libbpf_err(-EINVAL); 902 903 if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0)) 904 return libbpf_err(-EINVAL); 905 906 memset(&attr, 0, attr_sz); 907 attr.link_update.link_fd = link_fd; 908 attr.link_update.new_prog_fd = new_prog_fd; 909 attr.link_update.flags = OPTS_GET(opts, flags, 0); 910 if (OPTS_GET(opts, old_prog_fd, 0)) 911 attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); 912 else if (OPTS_GET(opts, old_map_fd, 0)) 913 attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0); 914 915 ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz); 916 return libbpf_err_errno(ret); 917 } 918 919 int bpf_iter_create(int link_fd) 920 { 921 const size_t attr_sz = offsetofend(union bpf_attr, iter_create); 922 union bpf_attr attr; 923 int fd; 924 925 memset(&attr, 0, attr_sz); 926 attr.iter_create.link_fd = link_fd; 927 928 fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, attr_sz); 929 return libbpf_err_errno(fd); 930 } 931 932 int bpf_prog_query_opts(int target, enum bpf_attach_type type, 933 struct bpf_prog_query_opts *opts) 934 { 935 const size_t attr_sz = offsetofend(union bpf_attr, query); 936 union bpf_attr attr; 937 int ret; 938 939 if (!OPTS_VALID(opts, bpf_prog_query_opts)) 940 return libbpf_err(-EINVAL); 941 942 memset(&attr, 0, attr_sz); 943 attr.query.target_fd = target; 944 attr.query.attach_type = type; 945 attr.query.query_flags = OPTS_GET(opts, query_flags, 0); 946 attr.query.count = OPTS_GET(opts, count, 0); 947 attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL)); 948 attr.query.link_ids = ptr_to_u64(OPTS_GET(opts, link_ids, NULL)); 949 attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL)); 950 attr.query.link_attach_flags = ptr_to_u64(OPTS_GET(opts, link_attach_flags, NULL)); 951 952 ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz); 953 954 OPTS_SET(opts, attach_flags, attr.query.attach_flags); 955 OPTS_SET(opts, revision, attr.query.revision); 956 OPTS_SET(opts, count, attr.query.count); 957 958 return libbpf_err_errno(ret); 959 } 960 961 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, 962 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) 963 { 964 LIBBPF_OPTS(bpf_prog_query_opts, opts); 965 int ret; 966 967 opts.query_flags = query_flags; 968 opts.prog_ids = prog_ids; 969 opts.prog_cnt = *prog_cnt; 970 971 ret = bpf_prog_query_opts(target_fd, type, &opts); 972 973 if (attach_flags) 974 *attach_flags = opts.attach_flags; 975 *prog_cnt = opts.prog_cnt; 976 977 return libbpf_err_errno(ret); 978 } 979 980 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) 981 { 982 const size_t attr_sz = offsetofend(union bpf_attr, test); 983 union bpf_attr attr; 984 int ret; 985 986 if (!OPTS_VALID(opts, bpf_test_run_opts)) 987 return libbpf_err(-EINVAL); 988 989 memset(&attr, 0, attr_sz); 990 attr.test.prog_fd = prog_fd; 991 attr.test.batch_size = OPTS_GET(opts, batch_size, 0); 992 attr.test.cpu = OPTS_GET(opts, cpu, 0); 993 attr.test.flags = OPTS_GET(opts, flags, 0); 994 attr.test.repeat = OPTS_GET(opts, repeat, 0); 995 attr.test.duration = OPTS_GET(opts, duration, 0); 996 attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0); 997 attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0); 998 attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0); 999 attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0); 1000 attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL)); 1001 attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL)); 1002 attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); 1003 attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); 1004 1005 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, attr_sz); 1006 1007 OPTS_SET(opts, data_size_out, attr.test.data_size_out); 1008 OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); 1009 OPTS_SET(opts, duration, attr.test.duration); 1010 OPTS_SET(opts, retval, attr.test.retval); 1011 1012 return libbpf_err_errno(ret); 1013 } 1014 1015 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) 1016 { 1017 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 1018 union bpf_attr attr; 1019 int err; 1020 1021 memset(&attr, 0, attr_sz); 1022 attr.start_id = start_id; 1023 1024 err = sys_bpf(cmd, &attr, attr_sz); 1025 if (!err) 1026 *next_id = attr.next_id; 1027 1028 return libbpf_err_errno(err); 1029 } 1030 1031 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) 1032 { 1033 return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID); 1034 } 1035 1036 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) 1037 { 1038 return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID); 1039 } 1040 1041 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) 1042 { 1043 return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID); 1044 } 1045 1046 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id) 1047 { 1048 return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID); 1049 } 1050 1051 int bpf_prog_get_fd_by_id_opts(__u32 id, 1052 const struct bpf_get_fd_by_id_opts *opts) 1053 { 1054 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 1055 union bpf_attr attr; 1056 int fd; 1057 1058 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 1059 return libbpf_err(-EINVAL); 1060 1061 memset(&attr, 0, attr_sz); 1062 attr.prog_id = id; 1063 attr.open_flags = OPTS_GET(opts, open_flags, 0); 1064 1065 fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz); 1066 return libbpf_err_errno(fd); 1067 } 1068 1069 int bpf_prog_get_fd_by_id(__u32 id) 1070 { 1071 return bpf_prog_get_fd_by_id_opts(id, NULL); 1072 } 1073 1074 int bpf_map_get_fd_by_id_opts(__u32 id, 1075 const struct bpf_get_fd_by_id_opts *opts) 1076 { 1077 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 1078 union bpf_attr attr; 1079 int fd; 1080 1081 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 1082 return libbpf_err(-EINVAL); 1083 1084 memset(&attr, 0, attr_sz); 1085 attr.map_id = id; 1086 attr.open_flags = OPTS_GET(opts, open_flags, 0); 1087 1088 fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz); 1089 return libbpf_err_errno(fd); 1090 } 1091 1092 int bpf_map_get_fd_by_id(__u32 id) 1093 { 1094 return bpf_map_get_fd_by_id_opts(id, NULL); 1095 } 1096 1097 int bpf_btf_get_fd_by_id_opts(__u32 id, 1098 const struct bpf_get_fd_by_id_opts *opts) 1099 { 1100 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 1101 union bpf_attr attr; 1102 int fd; 1103 1104 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 1105 return libbpf_err(-EINVAL); 1106 1107 memset(&attr, 0, attr_sz); 1108 attr.btf_id = id; 1109 attr.open_flags = OPTS_GET(opts, open_flags, 0); 1110 1111 fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz); 1112 return libbpf_err_errno(fd); 1113 } 1114 1115 int bpf_btf_get_fd_by_id(__u32 id) 1116 { 1117 return bpf_btf_get_fd_by_id_opts(id, NULL); 1118 } 1119 1120 int bpf_link_get_fd_by_id_opts(__u32 id, 1121 const struct bpf_get_fd_by_id_opts *opts) 1122 { 1123 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 1124 union bpf_attr attr; 1125 int fd; 1126 1127 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 1128 return libbpf_err(-EINVAL); 1129 1130 memset(&attr, 0, attr_sz); 1131 attr.link_id = id; 1132 attr.open_flags = OPTS_GET(opts, open_flags, 0); 1133 1134 fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz); 1135 return libbpf_err_errno(fd); 1136 } 1137 1138 int bpf_link_get_fd_by_id(__u32 id) 1139 { 1140 return bpf_link_get_fd_by_id_opts(id, NULL); 1141 } 1142 1143 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) 1144 { 1145 const size_t attr_sz = offsetofend(union bpf_attr, info); 1146 union bpf_attr attr; 1147 int err; 1148 1149 memset(&attr, 0, attr_sz); 1150 attr.info.bpf_fd = bpf_fd; 1151 attr.info.info_len = *info_len; 1152 attr.info.info = ptr_to_u64(info); 1153 1154 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz); 1155 if (!err) 1156 *info_len = attr.info.info_len; 1157 return libbpf_err_errno(err); 1158 } 1159 1160 int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len) 1161 { 1162 return bpf_obj_get_info_by_fd(prog_fd, info, info_len); 1163 } 1164 1165 int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len) 1166 { 1167 return bpf_obj_get_info_by_fd(map_fd, info, info_len); 1168 } 1169 1170 int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len) 1171 { 1172 return bpf_obj_get_info_by_fd(btf_fd, info, info_len); 1173 } 1174 1175 int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len) 1176 { 1177 return bpf_obj_get_info_by_fd(link_fd, info, info_len); 1178 } 1179 1180 int bpf_raw_tracepoint_open_opts(int prog_fd, struct bpf_raw_tp_opts *opts) 1181 { 1182 const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint); 1183 union bpf_attr attr; 1184 int fd; 1185 1186 if (!OPTS_VALID(opts, bpf_raw_tp_opts)) 1187 return libbpf_err(-EINVAL); 1188 1189 memset(&attr, 0, attr_sz); 1190 attr.raw_tracepoint.prog_fd = prog_fd; 1191 attr.raw_tracepoint.name = ptr_to_u64(OPTS_GET(opts, tp_name, NULL)); 1192 attr.raw_tracepoint.cookie = OPTS_GET(opts, cookie, 0); 1193 1194 fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz); 1195 return libbpf_err_errno(fd); 1196 } 1197 1198 int bpf_raw_tracepoint_open(const char *name, int prog_fd) 1199 { 1200 LIBBPF_OPTS(bpf_raw_tp_opts, opts, .tp_name = name); 1201 1202 return bpf_raw_tracepoint_open_opts(prog_fd, &opts); 1203 } 1204 1205 int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts) 1206 { 1207 const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd); 1208 union bpf_attr attr; 1209 char *log_buf; 1210 size_t log_size; 1211 __u32 log_level; 1212 int fd; 1213 1214 bump_rlimit_memlock(); 1215 1216 memset(&attr, 0, attr_sz); 1217 1218 if (!OPTS_VALID(opts, bpf_btf_load_opts)) 1219 return libbpf_err(-EINVAL); 1220 1221 log_buf = OPTS_GET(opts, log_buf, NULL); 1222 log_size = OPTS_GET(opts, log_size, 0); 1223 log_level = OPTS_GET(opts, log_level, 0); 1224 1225 if (log_size > UINT_MAX) 1226 return libbpf_err(-EINVAL); 1227 if (log_size && !log_buf) 1228 return libbpf_err(-EINVAL); 1229 1230 attr.btf = ptr_to_u64(btf_data); 1231 attr.btf_size = btf_size; 1232 1233 attr.btf_flags = OPTS_GET(opts, btf_flags, 0); 1234 attr.btf_token_fd = OPTS_GET(opts, token_fd, 0); 1235 1236 /* log_level == 0 and log_buf != NULL means "try loading without 1237 * log_buf, but retry with log_buf and log_level=1 on error", which is 1238 * consistent across low-level and high-level BTF and program loading 1239 * APIs within libbpf and provides a sensible behavior in practice 1240 */ 1241 if (log_level) { 1242 attr.btf_log_buf = ptr_to_u64(log_buf); 1243 attr.btf_log_size = (__u32)log_size; 1244 attr.btf_log_level = log_level; 1245 } 1246 1247 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); 1248 if (fd < 0 && log_buf && log_level == 0) { 1249 attr.btf_log_buf = ptr_to_u64(log_buf); 1250 attr.btf_log_size = (__u32)log_size; 1251 attr.btf_log_level = 1; 1252 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); 1253 } 1254 1255 OPTS_SET(opts, log_true_size, attr.btf_log_true_size); 1256 return libbpf_err_errno(fd); 1257 } 1258 1259 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, 1260 __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, 1261 __u64 *probe_addr) 1262 { 1263 const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query); 1264 union bpf_attr attr; 1265 int err; 1266 1267 memset(&attr, 0, attr_sz); 1268 attr.task_fd_query.pid = pid; 1269 attr.task_fd_query.fd = fd; 1270 attr.task_fd_query.flags = flags; 1271 attr.task_fd_query.buf = ptr_to_u64(buf); 1272 attr.task_fd_query.buf_len = *buf_len; 1273 1274 err = sys_bpf(BPF_TASK_FD_QUERY, &attr, attr_sz); 1275 1276 *buf_len = attr.task_fd_query.buf_len; 1277 *prog_id = attr.task_fd_query.prog_id; 1278 *fd_type = attr.task_fd_query.fd_type; 1279 *probe_offset = attr.task_fd_query.probe_offset; 1280 *probe_addr = attr.task_fd_query.probe_addr; 1281 1282 return libbpf_err_errno(err); 1283 } 1284 1285 int bpf_enable_stats(enum bpf_stats_type type) 1286 { 1287 const size_t attr_sz = offsetofend(union bpf_attr, enable_stats); 1288 union bpf_attr attr; 1289 int fd; 1290 1291 memset(&attr, 0, attr_sz); 1292 attr.enable_stats.type = type; 1293 1294 fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, attr_sz); 1295 return libbpf_err_errno(fd); 1296 } 1297 1298 int bpf_prog_bind_map(int prog_fd, int map_fd, 1299 const struct bpf_prog_bind_opts *opts) 1300 { 1301 const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map); 1302 union bpf_attr attr; 1303 int ret; 1304 1305 if (!OPTS_VALID(opts, bpf_prog_bind_opts)) 1306 return libbpf_err(-EINVAL); 1307 1308 memset(&attr, 0, attr_sz); 1309 attr.prog_bind_map.prog_fd = prog_fd; 1310 attr.prog_bind_map.map_fd = map_fd; 1311 attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); 1312 1313 ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz); 1314 return libbpf_err_errno(ret); 1315 } 1316 1317 int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts) 1318 { 1319 const size_t attr_sz = offsetofend(union bpf_attr, token_create); 1320 union bpf_attr attr; 1321 int fd; 1322 1323 if (!OPTS_VALID(opts, bpf_token_create_opts)) 1324 return libbpf_err(-EINVAL); 1325 1326 memset(&attr, 0, attr_sz); 1327 attr.token_create.bpffs_fd = bpffs_fd; 1328 attr.token_create.flags = OPTS_GET(opts, flags, 0); 1329 1330 fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz); 1331 return libbpf_err_errno(fd); 1332 } 1333