1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * common eBPF ELF operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU Lesser General Public 12 * License as published by the Free Software Foundation; 13 * version 2.1 of the License (not later!) 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU Lesser General Public License for more details. 19 * 20 * You should have received a copy of the GNU Lesser General Public 21 * License along with this program; if not, see <http://www.gnu.org/licenses> 22 */ 23 24 #include <stdlib.h> 25 #include <string.h> 26 #include <memory.h> 27 #include <unistd.h> 28 #include <asm/unistd.h> 29 #include <errno.h> 30 #include <linux/bpf.h> 31 #include <linux/filter.h> 32 #include <linux/kernel.h> 33 #include <limits.h> 34 #include <sys/resource.h> 35 #include "bpf.h" 36 #include "libbpf.h" 37 #include "libbpf_internal.h" 38 39 /* 40 * When building perf, unistd.h is overridden. __NR_bpf is 41 * required to be defined explicitly. 42 */ 43 #ifndef __NR_bpf 44 # if defined(__i386__) 45 # define __NR_bpf 357 46 # elif defined(__x86_64__) 47 # define __NR_bpf 321 48 # elif defined(__aarch64__) 49 # define __NR_bpf 280 50 # elif defined(__sparc__) 51 # define __NR_bpf 349 52 # elif defined(__s390__) 53 # define __NR_bpf 351 54 # elif defined(__arc__) 55 # define __NR_bpf 280 56 # elif defined(__mips__) && defined(_ABIO32) 57 # define __NR_bpf 4355 58 # elif defined(__mips__) && defined(_ABIN32) 59 # define __NR_bpf 6319 60 # elif defined(__mips__) && defined(_ABI64) 61 # define __NR_bpf 5315 62 # else 63 # error __NR_bpf not defined. libbpf does not support your arch. 64 # endif 65 #endif 66 67 static inline __u64 ptr_to_u64(const void *ptr) 68 { 69 return (__u64) (unsigned long) ptr; 70 } 71 72 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, 73 unsigned int size) 74 { 75 return syscall(__NR_bpf, cmd, attr, size); 76 } 77 78 static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr, 79 unsigned int size) 80 { 81 int fd; 82 83 fd = sys_bpf(cmd, attr, size); 84 return ensure_good_fd(fd); 85 } 86 87 int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts) 88 { 89 int fd; 90 91 do { 92 fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size); 93 } while (fd < 0 && errno == EAGAIN && --attempts > 0); 94 95 return fd; 96 } 97 98 /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to 99 * memcg-based memory accounting for BPF maps and progs. This was done in [0]. 100 * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in 101 * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF. 102 * 103 * [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/ 104 * [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper") 105 */ 106 int probe_memcg_account(int token_fd) 107 { 108 const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); 109 struct bpf_insn insns[] = { 110 BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), 111 BPF_EXIT_INSN(), 112 }; 113 size_t insn_cnt = ARRAY_SIZE(insns); 114 union bpf_attr attr; 115 int prog_fd; 116 117 /* attempt loading freplace trying to use custom BTF */ 118 memset(&attr, 0, attr_sz); 119 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 120 attr.insns = ptr_to_u64(insns); 121 attr.insn_cnt = insn_cnt; 122 attr.license = ptr_to_u64("GPL"); 123 attr.prog_token_fd = token_fd; 124 if (token_fd) 125 attr.prog_flags |= BPF_F_TOKEN_FD; 126 127 prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz); 128 if (prog_fd >= 0) { 129 close(prog_fd); 130 return 1; 131 } 132 return 0; 133 } 134 135 static bool memlock_bumped; 136 static rlim_t memlock_rlim = RLIM_INFINITY; 137 138 int libbpf_set_memlock_rlim(size_t memlock_bytes) 139 { 140 if (memlock_bumped) 141 return libbpf_err(-EBUSY); 142 143 memlock_rlim = memlock_bytes; 144 return 0; 145 } 146 147 int bump_rlimit_memlock(void) 148 { 149 struct rlimit rlim; 150 151 /* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */ 152 if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT)) 153 return 0; 154 155 memlock_bumped = true; 156 157 /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */ 158 if (memlock_rlim == 0) 159 return 0; 160 161 rlim.rlim_cur = rlim.rlim_max = memlock_rlim; 162 if (setrlimit(RLIMIT_MEMLOCK, &rlim)) 163 return -errno; 164 165 return 0; 166 } 167 168 int bpf_map_create(enum bpf_map_type map_type, 169 const char *map_name, 170 __u32 key_size, 171 __u32 value_size, 172 __u32 max_entries, 173 const struct bpf_map_create_opts *opts) 174 { 175 const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd); 176 union bpf_attr attr; 177 int fd; 178 179 bump_rlimit_memlock(); 180 181 memset(&attr, 0, attr_sz); 182 183 if (!OPTS_VALID(opts, bpf_map_create_opts)) 184 return libbpf_err(-EINVAL); 185 186 attr.map_type = map_type; 187 if (map_name && feat_supported(NULL, FEAT_PROG_NAME)) 188 libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); 189 attr.key_size = key_size; 190 attr.value_size = value_size; 191 attr.max_entries = max_entries; 192 193 attr.btf_fd = OPTS_GET(opts, btf_fd, 0); 194 attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); 195 attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); 196 attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); 197 attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0); 198 199 attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); 200 attr.map_flags = OPTS_GET(opts, map_flags, 0); 201 attr.map_extra = OPTS_GET(opts, map_extra, 0); 202 attr.numa_node = OPTS_GET(opts, numa_node, 0); 203 attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0); 204 205 attr.map_token_fd = OPTS_GET(opts, token_fd, 0); 206 207 fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz); 208 return libbpf_err_errno(fd); 209 } 210 211 static void * 212 alloc_zero_tailing_info(const void *orecord, __u32 cnt, 213 __u32 actual_rec_size, __u32 expected_rec_size) 214 { 215 __u64 info_len = (__u64)actual_rec_size * cnt; 216 void *info, *nrecord; 217 int i; 218 219 info = malloc(info_len); 220 if (!info) 221 return NULL; 222 223 /* zero out bytes kernel does not understand */ 224 nrecord = info; 225 for (i = 0; i < cnt; i++) { 226 memcpy(nrecord, orecord, expected_rec_size); 227 memset(nrecord + expected_rec_size, 0, 228 actual_rec_size - expected_rec_size); 229 orecord += actual_rec_size; 230 nrecord += actual_rec_size; 231 } 232 233 return info; 234 } 235 236 int bpf_prog_load(enum bpf_prog_type prog_type, 237 const char *prog_name, const char *license, 238 const struct bpf_insn *insns, size_t insn_cnt, 239 struct bpf_prog_load_opts *opts) 240 { 241 const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); 242 void *finfo = NULL, *linfo = NULL; 243 const char *func_info, *line_info; 244 __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; 245 __u32 func_info_rec_size, line_info_rec_size; 246 int fd, attempts; 247 union bpf_attr attr; 248 char *log_buf; 249 250 bump_rlimit_memlock(); 251 252 if (!OPTS_VALID(opts, bpf_prog_load_opts)) 253 return libbpf_err(-EINVAL); 254 255 attempts = OPTS_GET(opts, attempts, 0); 256 if (attempts < 0) 257 return libbpf_err(-EINVAL); 258 if (attempts == 0) 259 attempts = PROG_LOAD_ATTEMPTS; 260 261 memset(&attr, 0, attr_sz); 262 263 attr.prog_type = prog_type; 264 attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); 265 266 attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0); 267 attr.prog_flags = OPTS_GET(opts, prog_flags, 0); 268 attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0); 269 attr.kern_version = OPTS_GET(opts, kern_version, 0); 270 attr.prog_token_fd = OPTS_GET(opts, token_fd, 0); 271 272 if (prog_name && feat_supported(NULL, FEAT_PROG_NAME)) 273 libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); 274 attr.license = ptr_to_u64(license); 275 276 if (insn_cnt > UINT_MAX) 277 return libbpf_err(-E2BIG); 278 279 attr.insns = ptr_to_u64(insns); 280 attr.insn_cnt = (__u32)insn_cnt; 281 282 attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); 283 attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0); 284 285 if (attach_prog_fd && attach_btf_obj_fd) 286 return libbpf_err(-EINVAL); 287 288 attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0); 289 if (attach_prog_fd) 290 attr.attach_prog_fd = attach_prog_fd; 291 else 292 attr.attach_btf_obj_fd = attach_btf_obj_fd; 293 294 log_buf = OPTS_GET(opts, log_buf, NULL); 295 log_size = OPTS_GET(opts, log_size, 0); 296 log_level = OPTS_GET(opts, log_level, 0); 297 298 if (!!log_buf != !!log_size) 299 return libbpf_err(-EINVAL); 300 301 func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); 302 func_info = OPTS_GET(opts, func_info, NULL); 303 attr.func_info_rec_size = func_info_rec_size; 304 attr.func_info = ptr_to_u64(func_info); 305 attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0); 306 307 line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0); 308 line_info = OPTS_GET(opts, line_info, NULL); 309 attr.line_info_rec_size = line_info_rec_size; 310 attr.line_info = ptr_to_u64(line_info); 311 attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); 312 313 attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); 314 315 if (log_level) { 316 attr.log_buf = ptr_to_u64(log_buf); 317 attr.log_size = log_size; 318 attr.log_level = log_level; 319 } 320 321 fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 322 OPTS_SET(opts, log_true_size, attr.log_true_size); 323 if (fd >= 0) 324 return fd; 325 326 /* After bpf_prog_load, the kernel may modify certain attributes 327 * to give user space a hint how to deal with loading failure. 328 * Check to see whether we can make some changes and load again. 329 */ 330 while (errno == E2BIG && (!finfo || !linfo)) { 331 if (!finfo && attr.func_info_cnt && 332 attr.func_info_rec_size < func_info_rec_size) { 333 /* try with corrected func info records */ 334 finfo = alloc_zero_tailing_info(func_info, 335 attr.func_info_cnt, 336 func_info_rec_size, 337 attr.func_info_rec_size); 338 if (!finfo) { 339 errno = E2BIG; 340 goto done; 341 } 342 343 attr.func_info = ptr_to_u64(finfo); 344 attr.func_info_rec_size = func_info_rec_size; 345 } else if (!linfo && attr.line_info_cnt && 346 attr.line_info_rec_size < line_info_rec_size) { 347 linfo = alloc_zero_tailing_info(line_info, 348 attr.line_info_cnt, 349 line_info_rec_size, 350 attr.line_info_rec_size); 351 if (!linfo) { 352 errno = E2BIG; 353 goto done; 354 } 355 356 attr.line_info = ptr_to_u64(linfo); 357 attr.line_info_rec_size = line_info_rec_size; 358 } else { 359 break; 360 } 361 362 fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 363 OPTS_SET(opts, log_true_size, attr.log_true_size); 364 if (fd >= 0) 365 goto done; 366 } 367 368 if (log_level == 0 && log_buf) { 369 /* log_level == 0 with non-NULL log_buf requires retrying on error 370 * with log_level == 1 and log_buf/log_buf_size set, to get details of 371 * failure 372 */ 373 attr.log_buf = ptr_to_u64(log_buf); 374 attr.log_size = log_size; 375 attr.log_level = 1; 376 377 fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 378 OPTS_SET(opts, log_true_size, attr.log_true_size); 379 } 380 done: 381 /* free() doesn't affect errno, so we don't need to restore it */ 382 free(finfo); 383 free(linfo); 384 return libbpf_err_errno(fd); 385 } 386 387 int bpf_map_update_elem(int fd, const void *key, const void *value, 388 __u64 flags) 389 { 390 const size_t attr_sz = offsetofend(union bpf_attr, flags); 391 union bpf_attr attr; 392 int ret; 393 394 memset(&attr, 0, attr_sz); 395 attr.map_fd = fd; 396 attr.key = ptr_to_u64(key); 397 attr.value = ptr_to_u64(value); 398 attr.flags = flags; 399 400 ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz); 401 return libbpf_err_errno(ret); 402 } 403 404 int bpf_map_lookup_elem(int fd, const void *key, void *value) 405 { 406 const size_t attr_sz = offsetofend(union bpf_attr, flags); 407 union bpf_attr attr; 408 int ret; 409 410 memset(&attr, 0, attr_sz); 411 attr.map_fd = fd; 412 attr.key = ptr_to_u64(key); 413 attr.value = ptr_to_u64(value); 414 415 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz); 416 return libbpf_err_errno(ret); 417 } 418 419 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) 420 { 421 const size_t attr_sz = offsetofend(union bpf_attr, flags); 422 union bpf_attr attr; 423 int ret; 424 425 memset(&attr, 0, attr_sz); 426 attr.map_fd = fd; 427 attr.key = ptr_to_u64(key); 428 attr.value = ptr_to_u64(value); 429 attr.flags = flags; 430 431 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz); 432 return libbpf_err_errno(ret); 433 } 434 435 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) 436 { 437 const size_t attr_sz = offsetofend(union bpf_attr, flags); 438 union bpf_attr attr; 439 int ret; 440 441 memset(&attr, 0, attr_sz); 442 attr.map_fd = fd; 443 attr.key = ptr_to_u64(key); 444 attr.value = ptr_to_u64(value); 445 446 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz); 447 return libbpf_err_errno(ret); 448 } 449 450 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags) 451 { 452 const size_t attr_sz = offsetofend(union bpf_attr, flags); 453 union bpf_attr attr; 454 int ret; 455 456 memset(&attr, 0, attr_sz); 457 attr.map_fd = fd; 458 attr.key = ptr_to_u64(key); 459 attr.value = ptr_to_u64(value); 460 attr.flags = flags; 461 462 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz); 463 return libbpf_err_errno(ret); 464 } 465 466 int bpf_map_delete_elem(int fd, const void *key) 467 { 468 const size_t attr_sz = offsetofend(union bpf_attr, flags); 469 union bpf_attr attr; 470 int ret; 471 472 memset(&attr, 0, attr_sz); 473 attr.map_fd = fd; 474 attr.key = ptr_to_u64(key); 475 476 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); 477 return libbpf_err_errno(ret); 478 } 479 480 int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags) 481 { 482 const size_t attr_sz = offsetofend(union bpf_attr, flags); 483 union bpf_attr attr; 484 int ret; 485 486 memset(&attr, 0, attr_sz); 487 attr.map_fd = fd; 488 attr.key = ptr_to_u64(key); 489 attr.flags = flags; 490 491 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); 492 return libbpf_err_errno(ret); 493 } 494 495 int bpf_map_get_next_key(int fd, const void *key, void *next_key) 496 { 497 const size_t attr_sz = offsetofend(union bpf_attr, next_key); 498 union bpf_attr attr; 499 int ret; 500 501 memset(&attr, 0, attr_sz); 502 attr.map_fd = fd; 503 attr.key = ptr_to_u64(key); 504 attr.next_key = ptr_to_u64(next_key); 505 506 ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, attr_sz); 507 return libbpf_err_errno(ret); 508 } 509 510 int bpf_map_freeze(int fd) 511 { 512 const size_t attr_sz = offsetofend(union bpf_attr, map_fd); 513 union bpf_attr attr; 514 int ret; 515 516 memset(&attr, 0, attr_sz); 517 attr.map_fd = fd; 518 519 ret = sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz); 520 return libbpf_err_errno(ret); 521 } 522 523 static int bpf_map_batch_common(int cmd, int fd, void *in_batch, 524 void *out_batch, void *keys, void *values, 525 __u32 *count, 526 const struct bpf_map_batch_opts *opts) 527 { 528 const size_t attr_sz = offsetofend(union bpf_attr, batch); 529 union bpf_attr attr; 530 int ret; 531 532 if (!OPTS_VALID(opts, bpf_map_batch_opts)) 533 return libbpf_err(-EINVAL); 534 535 memset(&attr, 0, attr_sz); 536 attr.batch.map_fd = fd; 537 attr.batch.in_batch = ptr_to_u64(in_batch); 538 attr.batch.out_batch = ptr_to_u64(out_batch); 539 attr.batch.keys = ptr_to_u64(keys); 540 attr.batch.values = ptr_to_u64(values); 541 attr.batch.count = *count; 542 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); 543 attr.batch.flags = OPTS_GET(opts, flags, 0); 544 545 ret = sys_bpf(cmd, &attr, attr_sz); 546 *count = attr.batch.count; 547 548 return libbpf_err_errno(ret); 549 } 550 551 int bpf_map_delete_batch(int fd, const void *keys, __u32 *count, 552 const struct bpf_map_batch_opts *opts) 553 { 554 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL, 555 NULL, (void *)keys, NULL, count, opts); 556 } 557 558 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, 559 void *values, __u32 *count, 560 const struct bpf_map_batch_opts *opts) 561 { 562 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch, 563 out_batch, keys, values, count, opts); 564 } 565 566 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, 567 void *keys, void *values, __u32 *count, 568 const struct bpf_map_batch_opts *opts) 569 { 570 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH, 571 fd, in_batch, out_batch, keys, values, 572 count, opts); 573 } 574 575 int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count, 576 const struct bpf_map_batch_opts *opts) 577 { 578 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL, 579 (void *)keys, (void *)values, count, opts); 580 } 581 582 int bpf_obj_pin_opts(int fd, const char *pathname, const struct bpf_obj_pin_opts *opts) 583 { 584 const size_t attr_sz = offsetofend(union bpf_attr, path_fd); 585 union bpf_attr attr; 586 int ret; 587 588 if (!OPTS_VALID(opts, bpf_obj_pin_opts)) 589 return libbpf_err(-EINVAL); 590 591 memset(&attr, 0, attr_sz); 592 attr.path_fd = OPTS_GET(opts, path_fd, 0); 593 attr.pathname = ptr_to_u64((void *)pathname); 594 attr.file_flags = OPTS_GET(opts, file_flags, 0); 595 attr.bpf_fd = fd; 596 597 ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz); 598 return libbpf_err_errno(ret); 599 } 600 601 int bpf_obj_pin(int fd, const char *pathname) 602 { 603 return bpf_obj_pin_opts(fd, pathname, NULL); 604 } 605 606 int bpf_obj_get(const char *pathname) 607 { 608 return bpf_obj_get_opts(pathname, NULL); 609 } 610 611 int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts) 612 { 613 const size_t attr_sz = offsetofend(union bpf_attr, path_fd); 614 union bpf_attr attr; 615 int fd; 616 617 if (!OPTS_VALID(opts, bpf_obj_get_opts)) 618 return libbpf_err(-EINVAL); 619 620 memset(&attr, 0, attr_sz); 621 attr.path_fd = OPTS_GET(opts, path_fd, 0); 622 attr.pathname = ptr_to_u64((void *)pathname); 623 attr.file_flags = OPTS_GET(opts, file_flags, 0); 624 625 fd = sys_bpf_fd(BPF_OBJ_GET, &attr, attr_sz); 626 return libbpf_err_errno(fd); 627 } 628 629 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, 630 unsigned int flags) 631 { 632 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts, 633 .flags = flags, 634 ); 635 636 return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts); 637 } 638 639 int bpf_prog_attach_opts(int prog_fd, int target, enum bpf_attach_type type, 640 const struct bpf_prog_attach_opts *opts) 641 { 642 const size_t attr_sz = offsetofend(union bpf_attr, expected_revision); 643 __u32 relative_id, flags; 644 int ret, relative_fd; 645 union bpf_attr attr; 646 647 if (!OPTS_VALID(opts, bpf_prog_attach_opts)) 648 return libbpf_err(-EINVAL); 649 650 relative_id = OPTS_GET(opts, relative_id, 0); 651 relative_fd = OPTS_GET(opts, relative_fd, 0); 652 flags = OPTS_GET(opts, flags, 0); 653 654 /* validate we don't have unexpected combinations of non-zero fields */ 655 if (relative_fd && relative_id) 656 return libbpf_err(-EINVAL); 657 658 memset(&attr, 0, attr_sz); 659 attr.target_fd = target; 660 attr.attach_bpf_fd = prog_fd; 661 attr.attach_type = type; 662 attr.replace_bpf_fd = OPTS_GET(opts, replace_fd, 0); 663 attr.expected_revision = OPTS_GET(opts, expected_revision, 0); 664 665 if (relative_id) { 666 attr.attach_flags = flags | BPF_F_ID; 667 attr.relative_id = relative_id; 668 } else { 669 attr.attach_flags = flags; 670 attr.relative_fd = relative_fd; 671 } 672 673 ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz); 674 return libbpf_err_errno(ret); 675 } 676 677 int bpf_prog_detach_opts(int prog_fd, int target, enum bpf_attach_type type, 678 const struct bpf_prog_detach_opts *opts) 679 { 680 const size_t attr_sz = offsetofend(union bpf_attr, expected_revision); 681 __u32 relative_id, flags; 682 int ret, relative_fd; 683 union bpf_attr attr; 684 685 if (!OPTS_VALID(opts, bpf_prog_detach_opts)) 686 return libbpf_err(-EINVAL); 687 688 relative_id = OPTS_GET(opts, relative_id, 0); 689 relative_fd = OPTS_GET(opts, relative_fd, 0); 690 flags = OPTS_GET(opts, flags, 0); 691 692 /* validate we don't have unexpected combinations of non-zero fields */ 693 if (relative_fd && relative_id) 694 return libbpf_err(-EINVAL); 695 696 memset(&attr, 0, attr_sz); 697 attr.target_fd = target; 698 attr.attach_bpf_fd = prog_fd; 699 attr.attach_type = type; 700 attr.expected_revision = OPTS_GET(opts, expected_revision, 0); 701 702 if (relative_id) { 703 attr.attach_flags = flags | BPF_F_ID; 704 attr.relative_id = relative_id; 705 } else { 706 attr.attach_flags = flags; 707 attr.relative_fd = relative_fd; 708 } 709 710 ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz); 711 return libbpf_err_errno(ret); 712 } 713 714 int bpf_prog_detach(int target_fd, enum bpf_attach_type type) 715 { 716 return bpf_prog_detach_opts(0, target_fd, type, NULL); 717 } 718 719 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) 720 { 721 return bpf_prog_detach_opts(prog_fd, target_fd, type, NULL); 722 } 723 724 int bpf_link_create(int prog_fd, int target_fd, 725 enum bpf_attach_type attach_type, 726 const struct bpf_link_create_opts *opts) 727 { 728 const size_t attr_sz = offsetofend(union bpf_attr, link_create); 729 __u32 target_btf_id, iter_info_len, relative_id; 730 int fd, err, relative_fd; 731 union bpf_attr attr; 732 733 if (!OPTS_VALID(opts, bpf_link_create_opts)) 734 return libbpf_err(-EINVAL); 735 736 iter_info_len = OPTS_GET(opts, iter_info_len, 0); 737 target_btf_id = OPTS_GET(opts, target_btf_id, 0); 738 739 /* validate we don't have unexpected combinations of non-zero fields */ 740 if (iter_info_len || target_btf_id) { 741 if (iter_info_len && target_btf_id) 742 return libbpf_err(-EINVAL); 743 if (!OPTS_ZEROED(opts, target_btf_id)) 744 return libbpf_err(-EINVAL); 745 } 746 747 memset(&attr, 0, attr_sz); 748 attr.link_create.prog_fd = prog_fd; 749 attr.link_create.target_fd = target_fd; 750 attr.link_create.attach_type = attach_type; 751 attr.link_create.flags = OPTS_GET(opts, flags, 0); 752 753 if (target_btf_id) { 754 attr.link_create.target_btf_id = target_btf_id; 755 goto proceed; 756 } 757 758 switch (attach_type) { 759 case BPF_TRACE_ITER: 760 attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0)); 761 attr.link_create.iter_info_len = iter_info_len; 762 break; 763 case BPF_PERF_EVENT: 764 attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0); 765 if (!OPTS_ZEROED(opts, perf_event)) 766 return libbpf_err(-EINVAL); 767 break; 768 case BPF_TRACE_KPROBE_MULTI: 769 case BPF_TRACE_KPROBE_SESSION: 770 attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0); 771 attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0); 772 attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0)); 773 attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0)); 774 attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0)); 775 if (!OPTS_ZEROED(opts, kprobe_multi)) 776 return libbpf_err(-EINVAL); 777 break; 778 case BPF_TRACE_UPROBE_MULTI: 779 case BPF_TRACE_UPROBE_SESSION: 780 attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0); 781 attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0); 782 attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0)); 783 attr.link_create.uprobe_multi.offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.offsets, 0)); 784 attr.link_create.uprobe_multi.ref_ctr_offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.ref_ctr_offsets, 0)); 785 attr.link_create.uprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, uprobe_multi.cookies, 0)); 786 attr.link_create.uprobe_multi.pid = OPTS_GET(opts, uprobe_multi.pid, 0); 787 if (!OPTS_ZEROED(opts, uprobe_multi)) 788 return libbpf_err(-EINVAL); 789 break; 790 case BPF_TRACE_RAW_TP: 791 case BPF_TRACE_FENTRY: 792 case BPF_TRACE_FEXIT: 793 case BPF_MODIFY_RETURN: 794 case BPF_LSM_MAC: 795 attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0); 796 if (!OPTS_ZEROED(opts, tracing)) 797 return libbpf_err(-EINVAL); 798 break; 799 case BPF_NETFILTER: 800 attr.link_create.netfilter.pf = OPTS_GET(opts, netfilter.pf, 0); 801 attr.link_create.netfilter.hooknum = OPTS_GET(opts, netfilter.hooknum, 0); 802 attr.link_create.netfilter.priority = OPTS_GET(opts, netfilter.priority, 0); 803 attr.link_create.netfilter.flags = OPTS_GET(opts, netfilter.flags, 0); 804 if (!OPTS_ZEROED(opts, netfilter)) 805 return libbpf_err(-EINVAL); 806 break; 807 case BPF_TCX_INGRESS: 808 case BPF_TCX_EGRESS: 809 relative_fd = OPTS_GET(opts, tcx.relative_fd, 0); 810 relative_id = OPTS_GET(opts, tcx.relative_id, 0); 811 if (relative_fd && relative_id) 812 return libbpf_err(-EINVAL); 813 if (relative_id) { 814 attr.link_create.tcx.relative_id = relative_id; 815 attr.link_create.flags |= BPF_F_ID; 816 } else { 817 attr.link_create.tcx.relative_fd = relative_fd; 818 } 819 attr.link_create.tcx.expected_revision = OPTS_GET(opts, tcx.expected_revision, 0); 820 if (!OPTS_ZEROED(opts, tcx)) 821 return libbpf_err(-EINVAL); 822 break; 823 case BPF_NETKIT_PRIMARY: 824 case BPF_NETKIT_PEER: 825 relative_fd = OPTS_GET(opts, netkit.relative_fd, 0); 826 relative_id = OPTS_GET(opts, netkit.relative_id, 0); 827 if (relative_fd && relative_id) 828 return libbpf_err(-EINVAL); 829 if (relative_id) { 830 attr.link_create.netkit.relative_id = relative_id; 831 attr.link_create.flags |= BPF_F_ID; 832 } else { 833 attr.link_create.netkit.relative_fd = relative_fd; 834 } 835 attr.link_create.netkit.expected_revision = OPTS_GET(opts, netkit.expected_revision, 0); 836 if (!OPTS_ZEROED(opts, netkit)) 837 return libbpf_err(-EINVAL); 838 break; 839 default: 840 if (!OPTS_ZEROED(opts, flags)) 841 return libbpf_err(-EINVAL); 842 break; 843 } 844 proceed: 845 fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, attr_sz); 846 if (fd >= 0) 847 return fd; 848 /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry 849 * and other similar programs 850 */ 851 err = -errno; 852 if (err != -EINVAL) 853 return libbpf_err(err); 854 855 /* if user used features not supported by 856 * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately 857 */ 858 if (attr.link_create.target_fd || attr.link_create.target_btf_id) 859 return libbpf_err(err); 860 if (!OPTS_ZEROED(opts, sz)) 861 return libbpf_err(err); 862 863 /* otherwise, for few select kinds of programs that can be 864 * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as 865 * a fallback for older kernels 866 */ 867 switch (attach_type) { 868 case BPF_TRACE_RAW_TP: 869 case BPF_LSM_MAC: 870 case BPF_TRACE_FENTRY: 871 case BPF_TRACE_FEXIT: 872 case BPF_MODIFY_RETURN: 873 return bpf_raw_tracepoint_open(NULL, prog_fd); 874 default: 875 return libbpf_err(err); 876 } 877 } 878 879 int bpf_link_detach(int link_fd) 880 { 881 const size_t attr_sz = offsetofend(union bpf_attr, link_detach); 882 union bpf_attr attr; 883 int ret; 884 885 memset(&attr, 0, attr_sz); 886 attr.link_detach.link_fd = link_fd; 887 888 ret = sys_bpf(BPF_LINK_DETACH, &attr, attr_sz); 889 return libbpf_err_errno(ret); 890 } 891 892 int bpf_link_update(int link_fd, int new_prog_fd, 893 const struct bpf_link_update_opts *opts) 894 { 895 const size_t attr_sz = offsetofend(union bpf_attr, link_update); 896 union bpf_attr attr; 897 int ret; 898 899 if (!OPTS_VALID(opts, bpf_link_update_opts)) 900 return libbpf_err(-EINVAL); 901 902 if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0)) 903 return libbpf_err(-EINVAL); 904 905 memset(&attr, 0, attr_sz); 906 attr.link_update.link_fd = link_fd; 907 attr.link_update.new_prog_fd = new_prog_fd; 908 attr.link_update.flags = OPTS_GET(opts, flags, 0); 909 if (OPTS_GET(opts, old_prog_fd, 0)) 910 attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); 911 else if (OPTS_GET(opts, old_map_fd, 0)) 912 attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0); 913 914 ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz); 915 return libbpf_err_errno(ret); 916 } 917 918 int bpf_iter_create(int link_fd) 919 { 920 const size_t attr_sz = offsetofend(union bpf_attr, iter_create); 921 union bpf_attr attr; 922 int fd; 923 924 memset(&attr, 0, attr_sz); 925 attr.iter_create.link_fd = link_fd; 926 927 fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, attr_sz); 928 return libbpf_err_errno(fd); 929 } 930 931 int bpf_prog_query_opts(int target, enum bpf_attach_type type, 932 struct bpf_prog_query_opts *opts) 933 { 934 const size_t attr_sz = offsetofend(union bpf_attr, query); 935 union bpf_attr attr; 936 int ret; 937 938 if (!OPTS_VALID(opts, bpf_prog_query_opts)) 939 return libbpf_err(-EINVAL); 940 941 memset(&attr, 0, attr_sz); 942 attr.query.target_fd = target; 943 attr.query.attach_type = type; 944 attr.query.query_flags = OPTS_GET(opts, query_flags, 0); 945 attr.query.count = OPTS_GET(opts, count, 0); 946 attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL)); 947 attr.query.link_ids = ptr_to_u64(OPTS_GET(opts, link_ids, NULL)); 948 attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL)); 949 attr.query.link_attach_flags = ptr_to_u64(OPTS_GET(opts, link_attach_flags, NULL)); 950 951 ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz); 952 953 OPTS_SET(opts, attach_flags, attr.query.attach_flags); 954 OPTS_SET(opts, revision, attr.query.revision); 955 OPTS_SET(opts, count, attr.query.count); 956 957 return libbpf_err_errno(ret); 958 } 959 960 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, 961 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) 962 { 963 LIBBPF_OPTS(bpf_prog_query_opts, opts); 964 int ret; 965 966 opts.query_flags = query_flags; 967 opts.prog_ids = prog_ids; 968 opts.prog_cnt = *prog_cnt; 969 970 ret = bpf_prog_query_opts(target_fd, type, &opts); 971 972 if (attach_flags) 973 *attach_flags = opts.attach_flags; 974 *prog_cnt = opts.prog_cnt; 975 976 return libbpf_err_errno(ret); 977 } 978 979 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) 980 { 981 const size_t attr_sz = offsetofend(union bpf_attr, test); 982 union bpf_attr attr; 983 int ret; 984 985 if (!OPTS_VALID(opts, bpf_test_run_opts)) 986 return libbpf_err(-EINVAL); 987 988 memset(&attr, 0, attr_sz); 989 attr.test.prog_fd = prog_fd; 990 attr.test.batch_size = OPTS_GET(opts, batch_size, 0); 991 attr.test.cpu = OPTS_GET(opts, cpu, 0); 992 attr.test.flags = OPTS_GET(opts, flags, 0); 993 attr.test.repeat = OPTS_GET(opts, repeat, 0); 994 attr.test.duration = OPTS_GET(opts, duration, 0); 995 attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0); 996 attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0); 997 attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0); 998 attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0); 999 attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL)); 1000 attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL)); 1001 attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); 1002 attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); 1003 1004 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, attr_sz); 1005 1006 OPTS_SET(opts, data_size_out, attr.test.data_size_out); 1007 OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); 1008 OPTS_SET(opts, duration, attr.test.duration); 1009 OPTS_SET(opts, retval, attr.test.retval); 1010 1011 return libbpf_err_errno(ret); 1012 } 1013 1014 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) 1015 { 1016 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 1017 union bpf_attr attr; 1018 int err; 1019 1020 memset(&attr, 0, attr_sz); 1021 attr.start_id = start_id; 1022 1023 err = sys_bpf(cmd, &attr, attr_sz); 1024 if (!err) 1025 *next_id = attr.next_id; 1026 1027 return libbpf_err_errno(err); 1028 } 1029 1030 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) 1031 { 1032 return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID); 1033 } 1034 1035 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) 1036 { 1037 return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID); 1038 } 1039 1040 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) 1041 { 1042 return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID); 1043 } 1044 1045 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id) 1046 { 1047 return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID); 1048 } 1049 1050 int bpf_prog_get_fd_by_id_opts(__u32 id, 1051 const struct bpf_get_fd_by_id_opts *opts) 1052 { 1053 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 1054 union bpf_attr attr; 1055 int fd; 1056 1057 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 1058 return libbpf_err(-EINVAL); 1059 1060 memset(&attr, 0, attr_sz); 1061 attr.prog_id = id; 1062 attr.open_flags = OPTS_GET(opts, open_flags, 0); 1063 1064 fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz); 1065 return libbpf_err_errno(fd); 1066 } 1067 1068 int bpf_prog_get_fd_by_id(__u32 id) 1069 { 1070 return bpf_prog_get_fd_by_id_opts(id, NULL); 1071 } 1072 1073 int bpf_map_get_fd_by_id_opts(__u32 id, 1074 const struct bpf_get_fd_by_id_opts *opts) 1075 { 1076 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 1077 union bpf_attr attr; 1078 int fd; 1079 1080 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 1081 return libbpf_err(-EINVAL); 1082 1083 memset(&attr, 0, attr_sz); 1084 attr.map_id = id; 1085 attr.open_flags = OPTS_GET(opts, open_flags, 0); 1086 1087 fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz); 1088 return libbpf_err_errno(fd); 1089 } 1090 1091 int bpf_map_get_fd_by_id(__u32 id) 1092 { 1093 return bpf_map_get_fd_by_id_opts(id, NULL); 1094 } 1095 1096 int bpf_btf_get_fd_by_id_opts(__u32 id, 1097 const struct bpf_get_fd_by_id_opts *opts) 1098 { 1099 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 1100 union bpf_attr attr; 1101 int fd; 1102 1103 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 1104 return libbpf_err(-EINVAL); 1105 1106 memset(&attr, 0, attr_sz); 1107 attr.btf_id = id; 1108 attr.open_flags = OPTS_GET(opts, open_flags, 0); 1109 1110 fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz); 1111 return libbpf_err_errno(fd); 1112 } 1113 1114 int bpf_btf_get_fd_by_id(__u32 id) 1115 { 1116 return bpf_btf_get_fd_by_id_opts(id, NULL); 1117 } 1118 1119 int bpf_link_get_fd_by_id_opts(__u32 id, 1120 const struct bpf_get_fd_by_id_opts *opts) 1121 { 1122 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 1123 union bpf_attr attr; 1124 int fd; 1125 1126 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 1127 return libbpf_err(-EINVAL); 1128 1129 memset(&attr, 0, attr_sz); 1130 attr.link_id = id; 1131 attr.open_flags = OPTS_GET(opts, open_flags, 0); 1132 1133 fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz); 1134 return libbpf_err_errno(fd); 1135 } 1136 1137 int bpf_link_get_fd_by_id(__u32 id) 1138 { 1139 return bpf_link_get_fd_by_id_opts(id, NULL); 1140 } 1141 1142 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) 1143 { 1144 const size_t attr_sz = offsetofend(union bpf_attr, info); 1145 union bpf_attr attr; 1146 int err; 1147 1148 memset(&attr, 0, attr_sz); 1149 attr.info.bpf_fd = bpf_fd; 1150 attr.info.info_len = *info_len; 1151 attr.info.info = ptr_to_u64(info); 1152 1153 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz); 1154 if (!err) 1155 *info_len = attr.info.info_len; 1156 return libbpf_err_errno(err); 1157 } 1158 1159 int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len) 1160 { 1161 return bpf_obj_get_info_by_fd(prog_fd, info, info_len); 1162 } 1163 1164 int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len) 1165 { 1166 return bpf_obj_get_info_by_fd(map_fd, info, info_len); 1167 } 1168 1169 int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len) 1170 { 1171 return bpf_obj_get_info_by_fd(btf_fd, info, info_len); 1172 } 1173 1174 int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len) 1175 { 1176 return bpf_obj_get_info_by_fd(link_fd, info, info_len); 1177 } 1178 1179 int bpf_raw_tracepoint_open_opts(int prog_fd, struct bpf_raw_tp_opts *opts) 1180 { 1181 const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint); 1182 union bpf_attr attr; 1183 int fd; 1184 1185 if (!OPTS_VALID(opts, bpf_raw_tp_opts)) 1186 return libbpf_err(-EINVAL); 1187 1188 memset(&attr, 0, attr_sz); 1189 attr.raw_tracepoint.prog_fd = prog_fd; 1190 attr.raw_tracepoint.name = ptr_to_u64(OPTS_GET(opts, tp_name, NULL)); 1191 attr.raw_tracepoint.cookie = OPTS_GET(opts, cookie, 0); 1192 1193 fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz); 1194 return libbpf_err_errno(fd); 1195 } 1196 1197 int bpf_raw_tracepoint_open(const char *name, int prog_fd) 1198 { 1199 LIBBPF_OPTS(bpf_raw_tp_opts, opts, .tp_name = name); 1200 1201 return bpf_raw_tracepoint_open_opts(prog_fd, &opts); 1202 } 1203 1204 int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts) 1205 { 1206 const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd); 1207 union bpf_attr attr; 1208 char *log_buf; 1209 size_t log_size; 1210 __u32 log_level; 1211 int fd; 1212 1213 bump_rlimit_memlock(); 1214 1215 memset(&attr, 0, attr_sz); 1216 1217 if (!OPTS_VALID(opts, bpf_btf_load_opts)) 1218 return libbpf_err(-EINVAL); 1219 1220 log_buf = OPTS_GET(opts, log_buf, NULL); 1221 log_size = OPTS_GET(opts, log_size, 0); 1222 log_level = OPTS_GET(opts, log_level, 0); 1223 1224 if (log_size > UINT_MAX) 1225 return libbpf_err(-EINVAL); 1226 if (log_size && !log_buf) 1227 return libbpf_err(-EINVAL); 1228 1229 attr.btf = ptr_to_u64(btf_data); 1230 attr.btf_size = btf_size; 1231 1232 attr.btf_flags = OPTS_GET(opts, btf_flags, 0); 1233 attr.btf_token_fd = OPTS_GET(opts, token_fd, 0); 1234 1235 /* log_level == 0 and log_buf != NULL means "try loading without 1236 * log_buf, but retry with log_buf and log_level=1 on error", which is 1237 * consistent across low-level and high-level BTF and program loading 1238 * APIs within libbpf and provides a sensible behavior in practice 1239 */ 1240 if (log_level) { 1241 attr.btf_log_buf = ptr_to_u64(log_buf); 1242 attr.btf_log_size = (__u32)log_size; 1243 attr.btf_log_level = log_level; 1244 } 1245 1246 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); 1247 if (fd < 0 && log_buf && log_level == 0) { 1248 attr.btf_log_buf = ptr_to_u64(log_buf); 1249 attr.btf_log_size = (__u32)log_size; 1250 attr.btf_log_level = 1; 1251 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); 1252 } 1253 1254 OPTS_SET(opts, log_true_size, attr.btf_log_true_size); 1255 return libbpf_err_errno(fd); 1256 } 1257 1258 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, 1259 __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, 1260 __u64 *probe_addr) 1261 { 1262 const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query); 1263 union bpf_attr attr; 1264 int err; 1265 1266 memset(&attr, 0, attr_sz); 1267 attr.task_fd_query.pid = pid; 1268 attr.task_fd_query.fd = fd; 1269 attr.task_fd_query.flags = flags; 1270 attr.task_fd_query.buf = ptr_to_u64(buf); 1271 attr.task_fd_query.buf_len = *buf_len; 1272 1273 err = sys_bpf(BPF_TASK_FD_QUERY, &attr, attr_sz); 1274 1275 *buf_len = attr.task_fd_query.buf_len; 1276 *prog_id = attr.task_fd_query.prog_id; 1277 *fd_type = attr.task_fd_query.fd_type; 1278 *probe_offset = attr.task_fd_query.probe_offset; 1279 *probe_addr = attr.task_fd_query.probe_addr; 1280 1281 return libbpf_err_errno(err); 1282 } 1283 1284 int bpf_enable_stats(enum bpf_stats_type type) 1285 { 1286 const size_t attr_sz = offsetofend(union bpf_attr, enable_stats); 1287 union bpf_attr attr; 1288 int fd; 1289 1290 memset(&attr, 0, attr_sz); 1291 attr.enable_stats.type = type; 1292 1293 fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, attr_sz); 1294 return libbpf_err_errno(fd); 1295 } 1296 1297 int bpf_prog_bind_map(int prog_fd, int map_fd, 1298 const struct bpf_prog_bind_opts *opts) 1299 { 1300 const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map); 1301 union bpf_attr attr; 1302 int ret; 1303 1304 if (!OPTS_VALID(opts, bpf_prog_bind_opts)) 1305 return libbpf_err(-EINVAL); 1306 1307 memset(&attr, 0, attr_sz); 1308 attr.prog_bind_map.prog_fd = prog_fd; 1309 attr.prog_bind_map.map_fd = map_fd; 1310 attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); 1311 1312 ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz); 1313 return libbpf_err_errno(ret); 1314 } 1315 1316 int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts) 1317 { 1318 const size_t attr_sz = offsetofend(union bpf_attr, token_create); 1319 union bpf_attr attr; 1320 int fd; 1321 1322 if (!OPTS_VALID(opts, bpf_token_create_opts)) 1323 return libbpf_err(-EINVAL); 1324 1325 memset(&attr, 0, attr_sz); 1326 attr.token_create.bpffs_fd = bpffs_fd; 1327 attr.token_create.flags = OPTS_GET(opts, flags, 0); 1328 1329 fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz); 1330 return libbpf_err_errno(fd); 1331 } 1332