1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * common eBPF ELF operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU Lesser General Public 12 * License as published by the Free Software Foundation; 13 * version 2.1 of the License (not later!) 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU Lesser General Public License for more details. 19 * 20 * You should have received a copy of the GNU Lesser General Public 21 * License along with this program; if not, see <http://www.gnu.org/licenses> 22 */ 23 24 #include <stdlib.h> 25 #include <string.h> 26 #include <memory.h> 27 #include <unistd.h> 28 #include <asm/unistd.h> 29 #include <errno.h> 30 #include <linux/bpf.h> 31 #include <linux/filter.h> 32 #include <linux/kernel.h> 33 #include <limits.h> 34 #include <sys/resource.h> 35 #include "bpf.h" 36 #include "libbpf.h" 37 #include "libbpf_internal.h" 38 39 /* 40 * When building perf, unistd.h is overridden. __NR_bpf is 41 * required to be defined explicitly. 42 */ 43 #ifndef __NR_bpf 44 # if defined(__i386__) 45 # define __NR_bpf 357 46 # elif defined(__x86_64__) 47 # define __NR_bpf 321 48 # elif defined(__aarch64__) 49 # define __NR_bpf 280 50 # elif defined(__sparc__) 51 # define __NR_bpf 349 52 # elif defined(__s390__) 53 # define __NR_bpf 351 54 # elif defined(__arc__) 55 # define __NR_bpf 280 56 # elif defined(__mips__) && defined(_ABIO32) 57 # define __NR_bpf 4355 58 # elif defined(__mips__) && defined(_ABIN32) 59 # define __NR_bpf 6319 60 # elif defined(__mips__) && defined(_ABI64) 61 # define __NR_bpf 5315 62 # else 63 # error __NR_bpf not defined. libbpf does not support your arch. 64 # endif 65 #endif 66 67 static inline __u64 ptr_to_u64(const void *ptr) 68 { 69 return (__u64) (unsigned long) ptr; 70 } 71 72 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, 73 unsigned int size) 74 { 75 return syscall(__NR_bpf, cmd, attr, size); 76 } 77 78 static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr, 79 unsigned int size) 80 { 81 int fd; 82 83 fd = sys_bpf(cmd, attr, size); 84 return ensure_good_fd(fd); 85 } 86 87 int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts) 88 { 89 int fd; 90 91 do { 92 fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size); 93 } while (fd < 0 && errno == EAGAIN && --attempts > 0); 94 95 return fd; 96 } 97 98 /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to 99 * memcg-based memory accounting for BPF maps and progs. This was done in [0]. 100 * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in 101 * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF. 102 * 103 * [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/ 104 * [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper") 105 */ 106 int probe_memcg_account(void) 107 { 108 const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd); 109 struct bpf_insn insns[] = { 110 BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), 111 BPF_EXIT_INSN(), 112 }; 113 size_t insn_cnt = ARRAY_SIZE(insns); 114 union bpf_attr attr; 115 int prog_fd; 116 117 /* attempt loading freplace trying to use custom BTF */ 118 memset(&attr, 0, attr_sz); 119 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 120 attr.insns = ptr_to_u64(insns); 121 attr.insn_cnt = insn_cnt; 122 attr.license = ptr_to_u64("GPL"); 123 124 prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz); 125 if (prog_fd >= 0) { 126 close(prog_fd); 127 return 1; 128 } 129 return 0; 130 } 131 132 static bool memlock_bumped; 133 static rlim_t memlock_rlim = RLIM_INFINITY; 134 135 int libbpf_set_memlock_rlim(size_t memlock_bytes) 136 { 137 if (memlock_bumped) 138 return libbpf_err(-EBUSY); 139 140 memlock_rlim = memlock_bytes; 141 return 0; 142 } 143 144 int bump_rlimit_memlock(void) 145 { 146 struct rlimit rlim; 147 148 /* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */ 149 if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT)) 150 return 0; 151 152 memlock_bumped = true; 153 154 /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */ 155 if (memlock_rlim == 0) 156 return 0; 157 158 rlim.rlim_cur = rlim.rlim_max = memlock_rlim; 159 if (setrlimit(RLIMIT_MEMLOCK, &rlim)) 160 return -errno; 161 162 return 0; 163 } 164 165 int bpf_map_create(enum bpf_map_type map_type, 166 const char *map_name, 167 __u32 key_size, 168 __u32 value_size, 169 __u32 max_entries, 170 const struct bpf_map_create_opts *opts) 171 { 172 const size_t attr_sz = offsetofend(union bpf_attr, map_extra); 173 union bpf_attr attr; 174 int fd; 175 176 bump_rlimit_memlock(); 177 178 memset(&attr, 0, attr_sz); 179 180 if (!OPTS_VALID(opts, bpf_map_create_opts)) 181 return libbpf_err(-EINVAL); 182 183 attr.map_type = map_type; 184 if (map_name && kernel_supports(NULL, FEAT_PROG_NAME)) 185 libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); 186 attr.key_size = key_size; 187 attr.value_size = value_size; 188 attr.max_entries = max_entries; 189 190 attr.btf_fd = OPTS_GET(opts, btf_fd, 0); 191 attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); 192 attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); 193 attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); 194 195 attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); 196 attr.map_flags = OPTS_GET(opts, map_flags, 0); 197 attr.map_extra = OPTS_GET(opts, map_extra, 0); 198 attr.numa_node = OPTS_GET(opts, numa_node, 0); 199 attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0); 200 201 fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz); 202 return libbpf_err_errno(fd); 203 } 204 205 static void * 206 alloc_zero_tailing_info(const void *orecord, __u32 cnt, 207 __u32 actual_rec_size, __u32 expected_rec_size) 208 { 209 __u64 info_len = (__u64)actual_rec_size * cnt; 210 void *info, *nrecord; 211 int i; 212 213 info = malloc(info_len); 214 if (!info) 215 return NULL; 216 217 /* zero out bytes kernel does not understand */ 218 nrecord = info; 219 for (i = 0; i < cnt; i++) { 220 memcpy(nrecord, orecord, expected_rec_size); 221 memset(nrecord + expected_rec_size, 0, 222 actual_rec_size - expected_rec_size); 223 orecord += actual_rec_size; 224 nrecord += actual_rec_size; 225 } 226 227 return info; 228 } 229 230 int bpf_prog_load(enum bpf_prog_type prog_type, 231 const char *prog_name, const char *license, 232 const struct bpf_insn *insns, size_t insn_cnt, 233 struct bpf_prog_load_opts *opts) 234 { 235 const size_t attr_sz = offsetofend(union bpf_attr, log_true_size); 236 void *finfo = NULL, *linfo = NULL; 237 const char *func_info, *line_info; 238 __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; 239 __u32 func_info_rec_size, line_info_rec_size; 240 int fd, attempts; 241 union bpf_attr attr; 242 char *log_buf; 243 244 bump_rlimit_memlock(); 245 246 if (!OPTS_VALID(opts, bpf_prog_load_opts)) 247 return libbpf_err(-EINVAL); 248 249 attempts = OPTS_GET(opts, attempts, 0); 250 if (attempts < 0) 251 return libbpf_err(-EINVAL); 252 if (attempts == 0) 253 attempts = PROG_LOAD_ATTEMPTS; 254 255 memset(&attr, 0, attr_sz); 256 257 attr.prog_type = prog_type; 258 attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); 259 260 attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0); 261 attr.prog_flags = OPTS_GET(opts, prog_flags, 0); 262 attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0); 263 attr.kern_version = OPTS_GET(opts, kern_version, 0); 264 265 if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME)) 266 libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); 267 attr.license = ptr_to_u64(license); 268 269 if (insn_cnt > UINT_MAX) 270 return libbpf_err(-E2BIG); 271 272 attr.insns = ptr_to_u64(insns); 273 attr.insn_cnt = (__u32)insn_cnt; 274 275 attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); 276 attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0); 277 278 if (attach_prog_fd && attach_btf_obj_fd) 279 return libbpf_err(-EINVAL); 280 281 attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0); 282 if (attach_prog_fd) 283 attr.attach_prog_fd = attach_prog_fd; 284 else 285 attr.attach_btf_obj_fd = attach_btf_obj_fd; 286 287 log_buf = OPTS_GET(opts, log_buf, NULL); 288 log_size = OPTS_GET(opts, log_size, 0); 289 log_level = OPTS_GET(opts, log_level, 0); 290 291 if (!!log_buf != !!log_size) 292 return libbpf_err(-EINVAL); 293 294 func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); 295 func_info = OPTS_GET(opts, func_info, NULL); 296 attr.func_info_rec_size = func_info_rec_size; 297 attr.func_info = ptr_to_u64(func_info); 298 attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0); 299 300 line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0); 301 line_info = OPTS_GET(opts, line_info, NULL); 302 attr.line_info_rec_size = line_info_rec_size; 303 attr.line_info = ptr_to_u64(line_info); 304 attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); 305 306 attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); 307 308 if (log_level) { 309 attr.log_buf = ptr_to_u64(log_buf); 310 attr.log_size = log_size; 311 attr.log_level = log_level; 312 } 313 314 fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 315 OPTS_SET(opts, log_true_size, attr.log_true_size); 316 if (fd >= 0) 317 return fd; 318 319 /* After bpf_prog_load, the kernel may modify certain attributes 320 * to give user space a hint how to deal with loading failure. 321 * Check to see whether we can make some changes and load again. 322 */ 323 while (errno == E2BIG && (!finfo || !linfo)) { 324 if (!finfo && attr.func_info_cnt && 325 attr.func_info_rec_size < func_info_rec_size) { 326 /* try with corrected func info records */ 327 finfo = alloc_zero_tailing_info(func_info, 328 attr.func_info_cnt, 329 func_info_rec_size, 330 attr.func_info_rec_size); 331 if (!finfo) { 332 errno = E2BIG; 333 goto done; 334 } 335 336 attr.func_info = ptr_to_u64(finfo); 337 attr.func_info_rec_size = func_info_rec_size; 338 } else if (!linfo && attr.line_info_cnt && 339 attr.line_info_rec_size < line_info_rec_size) { 340 linfo = alloc_zero_tailing_info(line_info, 341 attr.line_info_cnt, 342 line_info_rec_size, 343 attr.line_info_rec_size); 344 if (!linfo) { 345 errno = E2BIG; 346 goto done; 347 } 348 349 attr.line_info = ptr_to_u64(linfo); 350 attr.line_info_rec_size = line_info_rec_size; 351 } else { 352 break; 353 } 354 355 fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 356 OPTS_SET(opts, log_true_size, attr.log_true_size); 357 if (fd >= 0) 358 goto done; 359 } 360 361 if (log_level == 0 && log_buf) { 362 /* log_level == 0 with non-NULL log_buf requires retrying on error 363 * with log_level == 1 and log_buf/log_buf_size set, to get details of 364 * failure 365 */ 366 attr.log_buf = ptr_to_u64(log_buf); 367 attr.log_size = log_size; 368 attr.log_level = 1; 369 370 fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 371 OPTS_SET(opts, log_true_size, attr.log_true_size); 372 } 373 done: 374 /* free() doesn't affect errno, so we don't need to restore it */ 375 free(finfo); 376 free(linfo); 377 return libbpf_err_errno(fd); 378 } 379 380 int bpf_map_update_elem(int fd, const void *key, const void *value, 381 __u64 flags) 382 { 383 const size_t attr_sz = offsetofend(union bpf_attr, flags); 384 union bpf_attr attr; 385 int ret; 386 387 memset(&attr, 0, attr_sz); 388 attr.map_fd = fd; 389 attr.key = ptr_to_u64(key); 390 attr.value = ptr_to_u64(value); 391 attr.flags = flags; 392 393 ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz); 394 return libbpf_err_errno(ret); 395 } 396 397 int bpf_map_lookup_elem(int fd, const void *key, void *value) 398 { 399 const size_t attr_sz = offsetofend(union bpf_attr, flags); 400 union bpf_attr attr; 401 int ret; 402 403 memset(&attr, 0, attr_sz); 404 attr.map_fd = fd; 405 attr.key = ptr_to_u64(key); 406 attr.value = ptr_to_u64(value); 407 408 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz); 409 return libbpf_err_errno(ret); 410 } 411 412 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) 413 { 414 const size_t attr_sz = offsetofend(union bpf_attr, flags); 415 union bpf_attr attr; 416 int ret; 417 418 memset(&attr, 0, attr_sz); 419 attr.map_fd = fd; 420 attr.key = ptr_to_u64(key); 421 attr.value = ptr_to_u64(value); 422 attr.flags = flags; 423 424 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz); 425 return libbpf_err_errno(ret); 426 } 427 428 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) 429 { 430 const size_t attr_sz = offsetofend(union bpf_attr, flags); 431 union bpf_attr attr; 432 int ret; 433 434 memset(&attr, 0, attr_sz); 435 attr.map_fd = fd; 436 attr.key = ptr_to_u64(key); 437 attr.value = ptr_to_u64(value); 438 439 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz); 440 return libbpf_err_errno(ret); 441 } 442 443 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags) 444 { 445 const size_t attr_sz = offsetofend(union bpf_attr, flags); 446 union bpf_attr attr; 447 int ret; 448 449 memset(&attr, 0, attr_sz); 450 attr.map_fd = fd; 451 attr.key = ptr_to_u64(key); 452 attr.value = ptr_to_u64(value); 453 attr.flags = flags; 454 455 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz); 456 return libbpf_err_errno(ret); 457 } 458 459 int bpf_map_delete_elem(int fd, const void *key) 460 { 461 const size_t attr_sz = offsetofend(union bpf_attr, flags); 462 union bpf_attr attr; 463 int ret; 464 465 memset(&attr, 0, attr_sz); 466 attr.map_fd = fd; 467 attr.key = ptr_to_u64(key); 468 469 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); 470 return libbpf_err_errno(ret); 471 } 472 473 int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags) 474 { 475 const size_t attr_sz = offsetofend(union bpf_attr, flags); 476 union bpf_attr attr; 477 int ret; 478 479 memset(&attr, 0, attr_sz); 480 attr.map_fd = fd; 481 attr.key = ptr_to_u64(key); 482 attr.flags = flags; 483 484 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); 485 return libbpf_err_errno(ret); 486 } 487 488 int bpf_map_get_next_key(int fd, const void *key, void *next_key) 489 { 490 const size_t attr_sz = offsetofend(union bpf_attr, next_key); 491 union bpf_attr attr; 492 int ret; 493 494 memset(&attr, 0, attr_sz); 495 attr.map_fd = fd; 496 attr.key = ptr_to_u64(key); 497 attr.next_key = ptr_to_u64(next_key); 498 499 ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, attr_sz); 500 return libbpf_err_errno(ret); 501 } 502 503 int bpf_map_freeze(int fd) 504 { 505 const size_t attr_sz = offsetofend(union bpf_attr, map_fd); 506 union bpf_attr attr; 507 int ret; 508 509 memset(&attr, 0, attr_sz); 510 attr.map_fd = fd; 511 512 ret = sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz); 513 return libbpf_err_errno(ret); 514 } 515 516 static int bpf_map_batch_common(int cmd, int fd, void *in_batch, 517 void *out_batch, void *keys, void *values, 518 __u32 *count, 519 const struct bpf_map_batch_opts *opts) 520 { 521 const size_t attr_sz = offsetofend(union bpf_attr, batch); 522 union bpf_attr attr; 523 int ret; 524 525 if (!OPTS_VALID(opts, bpf_map_batch_opts)) 526 return libbpf_err(-EINVAL); 527 528 memset(&attr, 0, attr_sz); 529 attr.batch.map_fd = fd; 530 attr.batch.in_batch = ptr_to_u64(in_batch); 531 attr.batch.out_batch = ptr_to_u64(out_batch); 532 attr.batch.keys = ptr_to_u64(keys); 533 attr.batch.values = ptr_to_u64(values); 534 attr.batch.count = *count; 535 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); 536 attr.batch.flags = OPTS_GET(opts, flags, 0); 537 538 ret = sys_bpf(cmd, &attr, attr_sz); 539 *count = attr.batch.count; 540 541 return libbpf_err_errno(ret); 542 } 543 544 int bpf_map_delete_batch(int fd, const void *keys, __u32 *count, 545 const struct bpf_map_batch_opts *opts) 546 { 547 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL, 548 NULL, (void *)keys, NULL, count, opts); 549 } 550 551 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, 552 void *values, __u32 *count, 553 const struct bpf_map_batch_opts *opts) 554 { 555 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch, 556 out_batch, keys, values, count, opts); 557 } 558 559 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, 560 void *keys, void *values, __u32 *count, 561 const struct bpf_map_batch_opts *opts) 562 { 563 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH, 564 fd, in_batch, out_batch, keys, values, 565 count, opts); 566 } 567 568 int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count, 569 const struct bpf_map_batch_opts *opts) 570 { 571 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL, 572 (void *)keys, (void *)values, count, opts); 573 } 574 575 int bpf_obj_pin(int fd, const char *pathname) 576 { 577 const size_t attr_sz = offsetofend(union bpf_attr, file_flags); 578 union bpf_attr attr; 579 int ret; 580 581 memset(&attr, 0, attr_sz); 582 attr.pathname = ptr_to_u64((void *)pathname); 583 attr.bpf_fd = fd; 584 585 ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz); 586 return libbpf_err_errno(ret); 587 } 588 589 int bpf_obj_get(const char *pathname) 590 { 591 return bpf_obj_get_opts(pathname, NULL); 592 } 593 594 int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts) 595 { 596 const size_t attr_sz = offsetofend(union bpf_attr, file_flags); 597 union bpf_attr attr; 598 int fd; 599 600 if (!OPTS_VALID(opts, bpf_obj_get_opts)) 601 return libbpf_err(-EINVAL); 602 603 memset(&attr, 0, attr_sz); 604 attr.pathname = ptr_to_u64((void *)pathname); 605 attr.file_flags = OPTS_GET(opts, file_flags, 0); 606 607 fd = sys_bpf_fd(BPF_OBJ_GET, &attr, attr_sz); 608 return libbpf_err_errno(fd); 609 } 610 611 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, 612 unsigned int flags) 613 { 614 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts, 615 .flags = flags, 616 ); 617 618 return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts); 619 } 620 621 int bpf_prog_attach_opts(int prog_fd, int target_fd, 622 enum bpf_attach_type type, 623 const struct bpf_prog_attach_opts *opts) 624 { 625 const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd); 626 union bpf_attr attr; 627 int ret; 628 629 if (!OPTS_VALID(opts, bpf_prog_attach_opts)) 630 return libbpf_err(-EINVAL); 631 632 memset(&attr, 0, attr_sz); 633 attr.target_fd = target_fd; 634 attr.attach_bpf_fd = prog_fd; 635 attr.attach_type = type; 636 attr.attach_flags = OPTS_GET(opts, flags, 0); 637 attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0); 638 639 ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz); 640 return libbpf_err_errno(ret); 641 } 642 643 int bpf_prog_detach(int target_fd, enum bpf_attach_type type) 644 { 645 const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd); 646 union bpf_attr attr; 647 int ret; 648 649 memset(&attr, 0, attr_sz); 650 attr.target_fd = target_fd; 651 attr.attach_type = type; 652 653 ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz); 654 return libbpf_err_errno(ret); 655 } 656 657 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) 658 { 659 const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd); 660 union bpf_attr attr; 661 int ret; 662 663 memset(&attr, 0, attr_sz); 664 attr.target_fd = target_fd; 665 attr.attach_bpf_fd = prog_fd; 666 attr.attach_type = type; 667 668 ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz); 669 return libbpf_err_errno(ret); 670 } 671 672 int bpf_link_create(int prog_fd, int target_fd, 673 enum bpf_attach_type attach_type, 674 const struct bpf_link_create_opts *opts) 675 { 676 const size_t attr_sz = offsetofend(union bpf_attr, link_create); 677 __u32 target_btf_id, iter_info_len; 678 union bpf_attr attr; 679 int fd, err; 680 681 if (!OPTS_VALID(opts, bpf_link_create_opts)) 682 return libbpf_err(-EINVAL); 683 684 iter_info_len = OPTS_GET(opts, iter_info_len, 0); 685 target_btf_id = OPTS_GET(opts, target_btf_id, 0); 686 687 /* validate we don't have unexpected combinations of non-zero fields */ 688 if (iter_info_len || target_btf_id) { 689 if (iter_info_len && target_btf_id) 690 return libbpf_err(-EINVAL); 691 if (!OPTS_ZEROED(opts, target_btf_id)) 692 return libbpf_err(-EINVAL); 693 } 694 695 memset(&attr, 0, attr_sz); 696 attr.link_create.prog_fd = prog_fd; 697 attr.link_create.target_fd = target_fd; 698 attr.link_create.attach_type = attach_type; 699 attr.link_create.flags = OPTS_GET(opts, flags, 0); 700 701 if (target_btf_id) { 702 attr.link_create.target_btf_id = target_btf_id; 703 goto proceed; 704 } 705 706 switch (attach_type) { 707 case BPF_TRACE_ITER: 708 attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0)); 709 attr.link_create.iter_info_len = iter_info_len; 710 break; 711 case BPF_PERF_EVENT: 712 attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0); 713 if (!OPTS_ZEROED(opts, perf_event)) 714 return libbpf_err(-EINVAL); 715 break; 716 case BPF_TRACE_KPROBE_MULTI: 717 attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0); 718 attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0); 719 attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0)); 720 attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0)); 721 attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0)); 722 if (!OPTS_ZEROED(opts, kprobe_multi)) 723 return libbpf_err(-EINVAL); 724 break; 725 case BPF_TRACE_FENTRY: 726 case BPF_TRACE_FEXIT: 727 case BPF_MODIFY_RETURN: 728 case BPF_LSM_MAC: 729 attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0); 730 if (!OPTS_ZEROED(opts, tracing)) 731 return libbpf_err(-EINVAL); 732 break; 733 default: 734 if (!OPTS_ZEROED(opts, flags)) 735 return libbpf_err(-EINVAL); 736 break; 737 } 738 proceed: 739 fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, attr_sz); 740 if (fd >= 0) 741 return fd; 742 /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry 743 * and other similar programs 744 */ 745 err = -errno; 746 if (err != -EINVAL) 747 return libbpf_err(err); 748 749 /* if user used features not supported by 750 * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately 751 */ 752 if (attr.link_create.target_fd || attr.link_create.target_btf_id) 753 return libbpf_err(err); 754 if (!OPTS_ZEROED(opts, sz)) 755 return libbpf_err(err); 756 757 /* otherwise, for few select kinds of programs that can be 758 * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as 759 * a fallback for older kernels 760 */ 761 switch (attach_type) { 762 case BPF_TRACE_RAW_TP: 763 case BPF_LSM_MAC: 764 case BPF_TRACE_FENTRY: 765 case BPF_TRACE_FEXIT: 766 case BPF_MODIFY_RETURN: 767 return bpf_raw_tracepoint_open(NULL, prog_fd); 768 default: 769 return libbpf_err(err); 770 } 771 } 772 773 int bpf_link_detach(int link_fd) 774 { 775 const size_t attr_sz = offsetofend(union bpf_attr, link_detach); 776 union bpf_attr attr; 777 int ret; 778 779 memset(&attr, 0, attr_sz); 780 attr.link_detach.link_fd = link_fd; 781 782 ret = sys_bpf(BPF_LINK_DETACH, &attr, attr_sz); 783 return libbpf_err_errno(ret); 784 } 785 786 int bpf_link_update(int link_fd, int new_prog_fd, 787 const struct bpf_link_update_opts *opts) 788 { 789 const size_t attr_sz = offsetofend(union bpf_attr, link_update); 790 union bpf_attr attr; 791 int ret; 792 793 if (!OPTS_VALID(opts, bpf_link_update_opts)) 794 return libbpf_err(-EINVAL); 795 796 if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0)) 797 return libbpf_err(-EINVAL); 798 799 memset(&attr, 0, attr_sz); 800 attr.link_update.link_fd = link_fd; 801 attr.link_update.new_prog_fd = new_prog_fd; 802 attr.link_update.flags = OPTS_GET(opts, flags, 0); 803 if (OPTS_GET(opts, old_prog_fd, 0)) 804 attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); 805 else if (OPTS_GET(opts, old_map_fd, 0)) 806 attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0); 807 808 ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz); 809 return libbpf_err_errno(ret); 810 } 811 812 int bpf_iter_create(int link_fd) 813 { 814 const size_t attr_sz = offsetofend(union bpf_attr, iter_create); 815 union bpf_attr attr; 816 int fd; 817 818 memset(&attr, 0, attr_sz); 819 attr.iter_create.link_fd = link_fd; 820 821 fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, attr_sz); 822 return libbpf_err_errno(fd); 823 } 824 825 int bpf_prog_query_opts(int target_fd, 826 enum bpf_attach_type type, 827 struct bpf_prog_query_opts *opts) 828 { 829 const size_t attr_sz = offsetofend(union bpf_attr, query); 830 union bpf_attr attr; 831 int ret; 832 833 if (!OPTS_VALID(opts, bpf_prog_query_opts)) 834 return libbpf_err(-EINVAL); 835 836 memset(&attr, 0, attr_sz); 837 838 attr.query.target_fd = target_fd; 839 attr.query.attach_type = type; 840 attr.query.query_flags = OPTS_GET(opts, query_flags, 0); 841 attr.query.prog_cnt = OPTS_GET(opts, prog_cnt, 0); 842 attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL)); 843 attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL)); 844 845 ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz); 846 847 OPTS_SET(opts, attach_flags, attr.query.attach_flags); 848 OPTS_SET(opts, prog_cnt, attr.query.prog_cnt); 849 850 return libbpf_err_errno(ret); 851 } 852 853 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, 854 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) 855 { 856 LIBBPF_OPTS(bpf_prog_query_opts, opts); 857 int ret; 858 859 opts.query_flags = query_flags; 860 opts.prog_ids = prog_ids; 861 opts.prog_cnt = *prog_cnt; 862 863 ret = bpf_prog_query_opts(target_fd, type, &opts); 864 865 if (attach_flags) 866 *attach_flags = opts.attach_flags; 867 *prog_cnt = opts.prog_cnt; 868 869 return libbpf_err_errno(ret); 870 } 871 872 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) 873 { 874 const size_t attr_sz = offsetofend(union bpf_attr, test); 875 union bpf_attr attr; 876 int ret; 877 878 if (!OPTS_VALID(opts, bpf_test_run_opts)) 879 return libbpf_err(-EINVAL); 880 881 memset(&attr, 0, attr_sz); 882 attr.test.prog_fd = prog_fd; 883 attr.test.batch_size = OPTS_GET(opts, batch_size, 0); 884 attr.test.cpu = OPTS_GET(opts, cpu, 0); 885 attr.test.flags = OPTS_GET(opts, flags, 0); 886 attr.test.repeat = OPTS_GET(opts, repeat, 0); 887 attr.test.duration = OPTS_GET(opts, duration, 0); 888 attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0); 889 attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0); 890 attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0); 891 attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0); 892 attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL)); 893 attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL)); 894 attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); 895 attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); 896 897 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, attr_sz); 898 899 OPTS_SET(opts, data_size_out, attr.test.data_size_out); 900 OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); 901 OPTS_SET(opts, duration, attr.test.duration); 902 OPTS_SET(opts, retval, attr.test.retval); 903 904 return libbpf_err_errno(ret); 905 } 906 907 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) 908 { 909 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 910 union bpf_attr attr; 911 int err; 912 913 memset(&attr, 0, attr_sz); 914 attr.start_id = start_id; 915 916 err = sys_bpf(cmd, &attr, attr_sz); 917 if (!err) 918 *next_id = attr.next_id; 919 920 return libbpf_err_errno(err); 921 } 922 923 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) 924 { 925 return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID); 926 } 927 928 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) 929 { 930 return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID); 931 } 932 933 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) 934 { 935 return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID); 936 } 937 938 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id) 939 { 940 return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID); 941 } 942 943 int bpf_prog_get_fd_by_id_opts(__u32 id, 944 const struct bpf_get_fd_by_id_opts *opts) 945 { 946 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 947 union bpf_attr attr; 948 int fd; 949 950 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 951 return libbpf_err(-EINVAL); 952 953 memset(&attr, 0, attr_sz); 954 attr.prog_id = id; 955 attr.open_flags = OPTS_GET(opts, open_flags, 0); 956 957 fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz); 958 return libbpf_err_errno(fd); 959 } 960 961 int bpf_prog_get_fd_by_id(__u32 id) 962 { 963 return bpf_prog_get_fd_by_id_opts(id, NULL); 964 } 965 966 int bpf_map_get_fd_by_id_opts(__u32 id, 967 const struct bpf_get_fd_by_id_opts *opts) 968 { 969 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 970 union bpf_attr attr; 971 int fd; 972 973 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 974 return libbpf_err(-EINVAL); 975 976 memset(&attr, 0, attr_sz); 977 attr.map_id = id; 978 attr.open_flags = OPTS_GET(opts, open_flags, 0); 979 980 fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz); 981 return libbpf_err_errno(fd); 982 } 983 984 int bpf_map_get_fd_by_id(__u32 id) 985 { 986 return bpf_map_get_fd_by_id_opts(id, NULL); 987 } 988 989 int bpf_btf_get_fd_by_id_opts(__u32 id, 990 const struct bpf_get_fd_by_id_opts *opts) 991 { 992 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 993 union bpf_attr attr; 994 int fd; 995 996 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 997 return libbpf_err(-EINVAL); 998 999 memset(&attr, 0, attr_sz); 1000 attr.btf_id = id; 1001 attr.open_flags = OPTS_GET(opts, open_flags, 0); 1002 1003 fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz); 1004 return libbpf_err_errno(fd); 1005 } 1006 1007 int bpf_btf_get_fd_by_id(__u32 id) 1008 { 1009 return bpf_btf_get_fd_by_id_opts(id, NULL); 1010 } 1011 1012 int bpf_link_get_fd_by_id_opts(__u32 id, 1013 const struct bpf_get_fd_by_id_opts *opts) 1014 { 1015 const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 1016 union bpf_attr attr; 1017 int fd; 1018 1019 if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) 1020 return libbpf_err(-EINVAL); 1021 1022 memset(&attr, 0, attr_sz); 1023 attr.link_id = id; 1024 attr.open_flags = OPTS_GET(opts, open_flags, 0); 1025 1026 fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz); 1027 return libbpf_err_errno(fd); 1028 } 1029 1030 int bpf_link_get_fd_by_id(__u32 id) 1031 { 1032 return bpf_link_get_fd_by_id_opts(id, NULL); 1033 } 1034 1035 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) 1036 { 1037 const size_t attr_sz = offsetofend(union bpf_attr, info); 1038 union bpf_attr attr; 1039 int err; 1040 1041 memset(&attr, 0, attr_sz); 1042 attr.info.bpf_fd = bpf_fd; 1043 attr.info.info_len = *info_len; 1044 attr.info.info = ptr_to_u64(info); 1045 1046 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz); 1047 if (!err) 1048 *info_len = attr.info.info_len; 1049 return libbpf_err_errno(err); 1050 } 1051 1052 int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len) 1053 { 1054 return bpf_obj_get_info_by_fd(prog_fd, info, info_len); 1055 } 1056 1057 int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len) 1058 { 1059 return bpf_obj_get_info_by_fd(map_fd, info, info_len); 1060 } 1061 1062 int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len) 1063 { 1064 return bpf_obj_get_info_by_fd(btf_fd, info, info_len); 1065 } 1066 1067 int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len) 1068 { 1069 return bpf_obj_get_info_by_fd(link_fd, info, info_len); 1070 } 1071 1072 int bpf_raw_tracepoint_open(const char *name, int prog_fd) 1073 { 1074 const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint); 1075 union bpf_attr attr; 1076 int fd; 1077 1078 memset(&attr, 0, attr_sz); 1079 attr.raw_tracepoint.name = ptr_to_u64(name); 1080 attr.raw_tracepoint.prog_fd = prog_fd; 1081 1082 fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz); 1083 return libbpf_err_errno(fd); 1084 } 1085 1086 int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts) 1087 { 1088 const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size); 1089 union bpf_attr attr; 1090 char *log_buf; 1091 size_t log_size; 1092 __u32 log_level; 1093 int fd; 1094 1095 bump_rlimit_memlock(); 1096 1097 memset(&attr, 0, attr_sz); 1098 1099 if (!OPTS_VALID(opts, bpf_btf_load_opts)) 1100 return libbpf_err(-EINVAL); 1101 1102 log_buf = OPTS_GET(opts, log_buf, NULL); 1103 log_size = OPTS_GET(opts, log_size, 0); 1104 log_level = OPTS_GET(opts, log_level, 0); 1105 1106 if (log_size > UINT_MAX) 1107 return libbpf_err(-EINVAL); 1108 if (log_size && !log_buf) 1109 return libbpf_err(-EINVAL); 1110 1111 attr.btf = ptr_to_u64(btf_data); 1112 attr.btf_size = btf_size; 1113 /* log_level == 0 and log_buf != NULL means "try loading without 1114 * log_buf, but retry with log_buf and log_level=1 on error", which is 1115 * consistent across low-level and high-level BTF and program loading 1116 * APIs within libbpf and provides a sensible behavior in practice 1117 */ 1118 if (log_level) { 1119 attr.btf_log_buf = ptr_to_u64(log_buf); 1120 attr.btf_log_size = (__u32)log_size; 1121 attr.btf_log_level = log_level; 1122 } 1123 1124 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); 1125 if (fd < 0 && log_buf && log_level == 0) { 1126 attr.btf_log_buf = ptr_to_u64(log_buf); 1127 attr.btf_log_size = (__u32)log_size; 1128 attr.btf_log_level = 1; 1129 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); 1130 } 1131 1132 OPTS_SET(opts, log_true_size, attr.btf_log_true_size); 1133 return libbpf_err_errno(fd); 1134 } 1135 1136 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, 1137 __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, 1138 __u64 *probe_addr) 1139 { 1140 const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query); 1141 union bpf_attr attr; 1142 int err; 1143 1144 memset(&attr, 0, attr_sz); 1145 attr.task_fd_query.pid = pid; 1146 attr.task_fd_query.fd = fd; 1147 attr.task_fd_query.flags = flags; 1148 attr.task_fd_query.buf = ptr_to_u64(buf); 1149 attr.task_fd_query.buf_len = *buf_len; 1150 1151 err = sys_bpf(BPF_TASK_FD_QUERY, &attr, attr_sz); 1152 1153 *buf_len = attr.task_fd_query.buf_len; 1154 *prog_id = attr.task_fd_query.prog_id; 1155 *fd_type = attr.task_fd_query.fd_type; 1156 *probe_offset = attr.task_fd_query.probe_offset; 1157 *probe_addr = attr.task_fd_query.probe_addr; 1158 1159 return libbpf_err_errno(err); 1160 } 1161 1162 int bpf_enable_stats(enum bpf_stats_type type) 1163 { 1164 const size_t attr_sz = offsetofend(union bpf_attr, enable_stats); 1165 union bpf_attr attr; 1166 int fd; 1167 1168 memset(&attr, 0, attr_sz); 1169 attr.enable_stats.type = type; 1170 1171 fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, attr_sz); 1172 return libbpf_err_errno(fd); 1173 } 1174 1175 int bpf_prog_bind_map(int prog_fd, int map_fd, 1176 const struct bpf_prog_bind_opts *opts) 1177 { 1178 const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map); 1179 union bpf_attr attr; 1180 int ret; 1181 1182 if (!OPTS_VALID(opts, bpf_prog_bind_opts)) 1183 return libbpf_err(-EINVAL); 1184 1185 memset(&attr, 0, attr_sz); 1186 attr.prog_bind_map.prog_fd = prog_fd; 1187 attr.prog_bind_map.map_fd = map_fd; 1188 attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); 1189 1190 ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz); 1191 return libbpf_err_errno(ret); 1192 } 1193