1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * common eBPF ELF operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU Lesser General Public 12 * License as published by the Free Software Foundation; 13 * version 2.1 of the License (not later!) 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU Lesser General Public License for more details. 19 * 20 * You should have received a copy of the GNU Lesser General Public 21 * License along with this program; if not, see <http://www.gnu.org/licenses> 22 */ 23 24 #include <stdlib.h> 25 #include <string.h> 26 #include <memory.h> 27 #include <unistd.h> 28 #include <asm/unistd.h> 29 #include <errno.h> 30 #include <linux/bpf.h> 31 #include <limits.h> 32 #include "bpf.h" 33 #include "libbpf.h" 34 #include "libbpf_internal.h" 35 36 /* 37 * When building perf, unistd.h is overridden. __NR_bpf is 38 * required to be defined explicitly. 39 */ 40 #ifndef __NR_bpf 41 # if defined(__i386__) 42 # define __NR_bpf 357 43 # elif defined(__x86_64__) 44 # define __NR_bpf 321 45 # elif defined(__aarch64__) 46 # define __NR_bpf 280 47 # elif defined(__sparc__) 48 # define __NR_bpf 349 49 # elif defined(__s390__) 50 # define __NR_bpf 351 51 # elif defined(__arc__) 52 # define __NR_bpf 280 53 # else 54 # error __NR_bpf not defined. libbpf does not support your arch. 55 # endif 56 #endif 57 58 static inline __u64 ptr_to_u64(const void *ptr) 59 { 60 return (__u64) (unsigned long) ptr; 61 } 62 63 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, 64 unsigned int size) 65 { 66 return syscall(__NR_bpf, cmd, attr, size); 67 } 68 69 static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr, 70 unsigned int size) 71 { 72 int fd; 73 74 fd = sys_bpf(cmd, attr, size); 75 return ensure_good_fd(fd); 76 } 77 78 #define PROG_LOAD_ATTEMPTS 5 79 80 static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts) 81 { 82 int fd; 83 84 do { 85 fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size); 86 } while (fd < 0 && errno == EAGAIN && --attempts > 0); 87 88 return fd; 89 } 90 91 int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr) 92 { 93 union bpf_attr attr; 94 int fd; 95 96 memset(&attr, '\0', sizeof(attr)); 97 98 attr.map_type = create_attr->map_type; 99 attr.key_size = create_attr->key_size; 100 attr.value_size = create_attr->value_size; 101 attr.max_entries = create_attr->max_entries; 102 attr.map_flags = create_attr->map_flags; 103 if (create_attr->name) 104 memcpy(attr.map_name, create_attr->name, 105 min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1)); 106 attr.numa_node = create_attr->numa_node; 107 attr.btf_fd = create_attr->btf_fd; 108 attr.btf_key_type_id = create_attr->btf_key_type_id; 109 attr.btf_value_type_id = create_attr->btf_value_type_id; 110 attr.map_ifindex = create_attr->map_ifindex; 111 if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) 112 attr.btf_vmlinux_value_type_id = 113 create_attr->btf_vmlinux_value_type_id; 114 else 115 attr.inner_map_fd = create_attr->inner_map_fd; 116 attr.map_extra = create_attr->map_extra; 117 118 fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr)); 119 return libbpf_err_errno(fd); 120 } 121 122 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) 123 { 124 struct bpf_create_map_params p = {}; 125 126 p.map_type = create_attr->map_type; 127 p.key_size = create_attr->key_size; 128 p.value_size = create_attr->value_size; 129 p.max_entries = create_attr->max_entries; 130 p.map_flags = create_attr->map_flags; 131 p.name = create_attr->name; 132 p.numa_node = create_attr->numa_node; 133 p.btf_fd = create_attr->btf_fd; 134 p.btf_key_type_id = create_attr->btf_key_type_id; 135 p.btf_value_type_id = create_attr->btf_value_type_id; 136 p.map_ifindex = create_attr->map_ifindex; 137 if (p.map_type == BPF_MAP_TYPE_STRUCT_OPS) 138 p.btf_vmlinux_value_type_id = 139 create_attr->btf_vmlinux_value_type_id; 140 else 141 p.inner_map_fd = create_attr->inner_map_fd; 142 143 return libbpf__bpf_create_map_xattr(&p); 144 } 145 146 int bpf_create_map_node(enum bpf_map_type map_type, const char *name, 147 int key_size, int value_size, int max_entries, 148 __u32 map_flags, int node) 149 { 150 struct bpf_create_map_attr map_attr = {}; 151 152 map_attr.name = name; 153 map_attr.map_type = map_type; 154 map_attr.map_flags = map_flags; 155 map_attr.key_size = key_size; 156 map_attr.value_size = value_size; 157 map_attr.max_entries = max_entries; 158 if (node >= 0) { 159 map_attr.numa_node = node; 160 map_attr.map_flags |= BPF_F_NUMA_NODE; 161 } 162 163 return bpf_create_map_xattr(&map_attr); 164 } 165 166 int bpf_create_map(enum bpf_map_type map_type, int key_size, 167 int value_size, int max_entries, __u32 map_flags) 168 { 169 struct bpf_create_map_attr map_attr = {}; 170 171 map_attr.map_type = map_type; 172 map_attr.map_flags = map_flags; 173 map_attr.key_size = key_size; 174 map_attr.value_size = value_size; 175 map_attr.max_entries = max_entries; 176 177 return bpf_create_map_xattr(&map_attr); 178 } 179 180 int bpf_create_map_name(enum bpf_map_type map_type, const char *name, 181 int key_size, int value_size, int max_entries, 182 __u32 map_flags) 183 { 184 struct bpf_create_map_attr map_attr = {}; 185 186 map_attr.name = name; 187 map_attr.map_type = map_type; 188 map_attr.map_flags = map_flags; 189 map_attr.key_size = key_size; 190 map_attr.value_size = value_size; 191 map_attr.max_entries = max_entries; 192 193 return bpf_create_map_xattr(&map_attr); 194 } 195 196 int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, 197 int key_size, int inner_map_fd, int max_entries, 198 __u32 map_flags, int node) 199 { 200 union bpf_attr attr; 201 int fd; 202 203 memset(&attr, '\0', sizeof(attr)); 204 205 attr.map_type = map_type; 206 attr.key_size = key_size; 207 attr.value_size = 4; 208 attr.inner_map_fd = inner_map_fd; 209 attr.max_entries = max_entries; 210 attr.map_flags = map_flags; 211 if (name) 212 memcpy(attr.map_name, name, 213 min(strlen(name), BPF_OBJ_NAME_LEN - 1)); 214 215 if (node >= 0) { 216 attr.map_flags |= BPF_F_NUMA_NODE; 217 attr.numa_node = node; 218 } 219 220 fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr)); 221 return libbpf_err_errno(fd); 222 } 223 224 int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, 225 int key_size, int inner_map_fd, int max_entries, 226 __u32 map_flags) 227 { 228 return bpf_create_map_in_map_node(map_type, name, key_size, 229 inner_map_fd, max_entries, map_flags, 230 -1); 231 } 232 233 static void * 234 alloc_zero_tailing_info(const void *orecord, __u32 cnt, 235 __u32 actual_rec_size, __u32 expected_rec_size) 236 { 237 __u64 info_len = (__u64)actual_rec_size * cnt; 238 void *info, *nrecord; 239 int i; 240 241 info = malloc(info_len); 242 if (!info) 243 return NULL; 244 245 /* zero out bytes kernel does not understand */ 246 nrecord = info; 247 for (i = 0; i < cnt; i++) { 248 memcpy(nrecord, orecord, expected_rec_size); 249 memset(nrecord + expected_rec_size, 0, 250 actual_rec_size - expected_rec_size); 251 orecord += actual_rec_size; 252 nrecord += actual_rec_size; 253 } 254 255 return info; 256 } 257 258 DEFAULT_VERSION(bpf_prog_load_v0_6_0, bpf_prog_load, LIBBPF_0.6.0) 259 int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, 260 const char *prog_name, const char *license, 261 const struct bpf_insn *insns, size_t insn_cnt, 262 const struct bpf_prog_load_opts *opts) 263 { 264 void *finfo = NULL, *linfo = NULL; 265 const char *func_info, *line_info; 266 __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; 267 __u32 func_info_rec_size, line_info_rec_size; 268 int fd, attempts; 269 union bpf_attr attr; 270 char *log_buf; 271 272 if (!OPTS_VALID(opts, bpf_prog_load_opts)) 273 return libbpf_err(-EINVAL); 274 275 attempts = OPTS_GET(opts, attempts, 0); 276 if (attempts < 0) 277 return libbpf_err(-EINVAL); 278 if (attempts == 0) 279 attempts = PROG_LOAD_ATTEMPTS; 280 281 memset(&attr, 0, sizeof(attr)); 282 283 attr.prog_type = prog_type; 284 attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); 285 286 attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0); 287 attr.prog_flags = OPTS_GET(opts, prog_flags, 0); 288 attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0); 289 attr.kern_version = OPTS_GET(opts, kern_version, 0); 290 291 if (prog_name) 292 strncat(attr.prog_name, prog_name, sizeof(attr.prog_name) - 1); 293 attr.license = ptr_to_u64(license); 294 295 if (insn_cnt > UINT_MAX) 296 return libbpf_err(-E2BIG); 297 298 attr.insns = ptr_to_u64(insns); 299 attr.insn_cnt = (__u32)insn_cnt; 300 301 attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); 302 attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0); 303 304 if (attach_prog_fd && attach_btf_obj_fd) 305 return libbpf_err(-EINVAL); 306 307 attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0); 308 if (attach_prog_fd) 309 attr.attach_prog_fd = attach_prog_fd; 310 else 311 attr.attach_btf_obj_fd = attach_btf_obj_fd; 312 313 log_buf = OPTS_GET(opts, log_buf, NULL); 314 log_size = OPTS_GET(opts, log_size, 0); 315 log_level = OPTS_GET(opts, log_level, 0); 316 317 if (!!log_buf != !!log_size) 318 return libbpf_err(-EINVAL); 319 if (log_level > (4 | 2 | 1)) 320 return libbpf_err(-EINVAL); 321 if (log_level && !log_buf) 322 return libbpf_err(-EINVAL); 323 324 attr.log_level = log_level; 325 attr.log_buf = ptr_to_u64(log_buf); 326 attr.log_size = log_size; 327 328 func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); 329 func_info = OPTS_GET(opts, func_info, NULL); 330 attr.func_info_rec_size = func_info_rec_size; 331 attr.func_info = ptr_to_u64(func_info); 332 attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0); 333 334 line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0); 335 line_info = OPTS_GET(opts, line_info, NULL); 336 attr.line_info_rec_size = line_info_rec_size; 337 attr.line_info = ptr_to_u64(line_info); 338 attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); 339 340 attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); 341 342 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 343 if (fd >= 0) 344 return fd; 345 346 /* After bpf_prog_load, the kernel may modify certain attributes 347 * to give user space a hint how to deal with loading failure. 348 * Check to see whether we can make some changes and load again. 349 */ 350 while (errno == E2BIG && (!finfo || !linfo)) { 351 if (!finfo && attr.func_info_cnt && 352 attr.func_info_rec_size < func_info_rec_size) { 353 /* try with corrected func info records */ 354 finfo = alloc_zero_tailing_info(func_info, 355 attr.func_info_cnt, 356 func_info_rec_size, 357 attr.func_info_rec_size); 358 if (!finfo) { 359 errno = E2BIG; 360 goto done; 361 } 362 363 attr.func_info = ptr_to_u64(finfo); 364 attr.func_info_rec_size = func_info_rec_size; 365 } else if (!linfo && attr.line_info_cnt && 366 attr.line_info_rec_size < line_info_rec_size) { 367 linfo = alloc_zero_tailing_info(line_info, 368 attr.line_info_cnt, 369 line_info_rec_size, 370 attr.line_info_rec_size); 371 if (!linfo) { 372 errno = E2BIG; 373 goto done; 374 } 375 376 attr.line_info = ptr_to_u64(linfo); 377 attr.line_info_rec_size = line_info_rec_size; 378 } else { 379 break; 380 } 381 382 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 383 if (fd >= 0) 384 goto done; 385 } 386 387 if (log_level || !log_buf) 388 goto done; 389 390 /* Try again with log */ 391 log_buf[0] = 0; 392 attr.log_buf = ptr_to_u64(log_buf); 393 attr.log_size = log_size; 394 attr.log_level = 1; 395 396 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 397 done: 398 /* free() doesn't affect errno, so we don't need to restore it */ 399 free(finfo); 400 free(linfo); 401 return libbpf_err_errno(fd); 402 } 403 404 __attribute__((alias("bpf_load_program_xattr2"))) 405 int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, 406 char *log_buf, size_t log_buf_sz); 407 408 static int bpf_load_program_xattr2(const struct bpf_load_program_attr *load_attr, 409 char *log_buf, size_t log_buf_sz) 410 { 411 LIBBPF_OPTS(bpf_prog_load_opts, p); 412 413 if (!load_attr || !log_buf != !log_buf_sz) 414 return libbpf_err(-EINVAL); 415 416 p.expected_attach_type = load_attr->expected_attach_type; 417 switch (load_attr->prog_type) { 418 case BPF_PROG_TYPE_STRUCT_OPS: 419 case BPF_PROG_TYPE_LSM: 420 p.attach_btf_id = load_attr->attach_btf_id; 421 break; 422 case BPF_PROG_TYPE_TRACING: 423 case BPF_PROG_TYPE_EXT: 424 p.attach_btf_id = load_attr->attach_btf_id; 425 p.attach_prog_fd = load_attr->attach_prog_fd; 426 break; 427 default: 428 p.prog_ifindex = load_attr->prog_ifindex; 429 p.kern_version = load_attr->kern_version; 430 } 431 p.log_level = load_attr->log_level; 432 p.log_buf = log_buf; 433 p.log_size = log_buf_sz; 434 p.prog_btf_fd = load_attr->prog_btf_fd; 435 p.func_info_rec_size = load_attr->func_info_rec_size; 436 p.func_info_cnt = load_attr->func_info_cnt; 437 p.func_info = load_attr->func_info; 438 p.line_info_rec_size = load_attr->line_info_rec_size; 439 p.line_info_cnt = load_attr->line_info_cnt; 440 p.line_info = load_attr->line_info; 441 p.prog_flags = load_attr->prog_flags; 442 443 return bpf_prog_load(load_attr->prog_type, load_attr->name, load_attr->license, 444 load_attr->insns, load_attr->insns_cnt, &p); 445 } 446 447 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, 448 size_t insns_cnt, const char *license, 449 __u32 kern_version, char *log_buf, 450 size_t log_buf_sz) 451 { 452 struct bpf_load_program_attr load_attr; 453 454 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); 455 load_attr.prog_type = type; 456 load_attr.expected_attach_type = 0; 457 load_attr.name = NULL; 458 load_attr.insns = insns; 459 load_attr.insns_cnt = insns_cnt; 460 load_attr.license = license; 461 load_attr.kern_version = kern_version; 462 463 return bpf_load_program_xattr2(&load_attr, log_buf, log_buf_sz); 464 } 465 466 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, 467 size_t insns_cnt, __u32 prog_flags, const char *license, 468 __u32 kern_version, char *log_buf, size_t log_buf_sz, 469 int log_level) 470 { 471 union bpf_attr attr; 472 int fd; 473 474 memset(&attr, 0, sizeof(attr)); 475 attr.prog_type = type; 476 attr.insn_cnt = (__u32)insns_cnt; 477 attr.insns = ptr_to_u64(insns); 478 attr.license = ptr_to_u64(license); 479 attr.log_buf = ptr_to_u64(log_buf); 480 attr.log_size = log_buf_sz; 481 attr.log_level = log_level; 482 log_buf[0] = 0; 483 attr.kern_version = kern_version; 484 attr.prog_flags = prog_flags; 485 486 fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS); 487 return libbpf_err_errno(fd); 488 } 489 490 int bpf_map_update_elem(int fd, const void *key, const void *value, 491 __u64 flags) 492 { 493 union bpf_attr attr; 494 int ret; 495 496 memset(&attr, 0, sizeof(attr)); 497 attr.map_fd = fd; 498 attr.key = ptr_to_u64(key); 499 attr.value = ptr_to_u64(value); 500 attr.flags = flags; 501 502 ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); 503 return libbpf_err_errno(ret); 504 } 505 506 int bpf_map_lookup_elem(int fd, const void *key, void *value) 507 { 508 union bpf_attr attr; 509 int ret; 510 511 memset(&attr, 0, sizeof(attr)); 512 attr.map_fd = fd; 513 attr.key = ptr_to_u64(key); 514 attr.value = ptr_to_u64(value); 515 516 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 517 return libbpf_err_errno(ret); 518 } 519 520 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) 521 { 522 union bpf_attr attr; 523 int ret; 524 525 memset(&attr, 0, sizeof(attr)); 526 attr.map_fd = fd; 527 attr.key = ptr_to_u64(key); 528 attr.value = ptr_to_u64(value); 529 attr.flags = flags; 530 531 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 532 return libbpf_err_errno(ret); 533 } 534 535 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) 536 { 537 union bpf_attr attr; 538 int ret; 539 540 memset(&attr, 0, sizeof(attr)); 541 attr.map_fd = fd; 542 attr.key = ptr_to_u64(key); 543 attr.value = ptr_to_u64(value); 544 545 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); 546 return libbpf_err_errno(ret); 547 } 548 549 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags) 550 { 551 union bpf_attr attr; 552 553 memset(&attr, 0, sizeof(attr)); 554 attr.map_fd = fd; 555 attr.key = ptr_to_u64(key); 556 attr.value = ptr_to_u64(value); 557 attr.flags = flags; 558 559 return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); 560 } 561 562 int bpf_map_delete_elem(int fd, const void *key) 563 { 564 union bpf_attr attr; 565 int ret; 566 567 memset(&attr, 0, sizeof(attr)); 568 attr.map_fd = fd; 569 attr.key = ptr_to_u64(key); 570 571 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 572 return libbpf_err_errno(ret); 573 } 574 575 int bpf_map_get_next_key(int fd, const void *key, void *next_key) 576 { 577 union bpf_attr attr; 578 int ret; 579 580 memset(&attr, 0, sizeof(attr)); 581 attr.map_fd = fd; 582 attr.key = ptr_to_u64(key); 583 attr.next_key = ptr_to_u64(next_key); 584 585 ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); 586 return libbpf_err_errno(ret); 587 } 588 589 int bpf_map_freeze(int fd) 590 { 591 union bpf_attr attr; 592 int ret; 593 594 memset(&attr, 0, sizeof(attr)); 595 attr.map_fd = fd; 596 597 ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr)); 598 return libbpf_err_errno(ret); 599 } 600 601 static int bpf_map_batch_common(int cmd, int fd, void *in_batch, 602 void *out_batch, void *keys, void *values, 603 __u32 *count, 604 const struct bpf_map_batch_opts *opts) 605 { 606 union bpf_attr attr; 607 int ret; 608 609 if (!OPTS_VALID(opts, bpf_map_batch_opts)) 610 return libbpf_err(-EINVAL); 611 612 memset(&attr, 0, sizeof(attr)); 613 attr.batch.map_fd = fd; 614 attr.batch.in_batch = ptr_to_u64(in_batch); 615 attr.batch.out_batch = ptr_to_u64(out_batch); 616 attr.batch.keys = ptr_to_u64(keys); 617 attr.batch.values = ptr_to_u64(values); 618 attr.batch.count = *count; 619 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); 620 attr.batch.flags = OPTS_GET(opts, flags, 0); 621 622 ret = sys_bpf(cmd, &attr, sizeof(attr)); 623 *count = attr.batch.count; 624 625 return libbpf_err_errno(ret); 626 } 627 628 int bpf_map_delete_batch(int fd, void *keys, __u32 *count, 629 const struct bpf_map_batch_opts *opts) 630 { 631 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL, 632 NULL, keys, NULL, count, opts); 633 } 634 635 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, 636 void *values, __u32 *count, 637 const struct bpf_map_batch_opts *opts) 638 { 639 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch, 640 out_batch, keys, values, count, opts); 641 } 642 643 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, 644 void *keys, void *values, __u32 *count, 645 const struct bpf_map_batch_opts *opts) 646 { 647 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH, 648 fd, in_batch, out_batch, keys, values, 649 count, opts); 650 } 651 652 int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count, 653 const struct bpf_map_batch_opts *opts) 654 { 655 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL, 656 keys, values, count, opts); 657 } 658 659 int bpf_obj_pin(int fd, const char *pathname) 660 { 661 union bpf_attr attr; 662 int ret; 663 664 memset(&attr, 0, sizeof(attr)); 665 attr.pathname = ptr_to_u64((void *)pathname); 666 attr.bpf_fd = fd; 667 668 ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr)); 669 return libbpf_err_errno(ret); 670 } 671 672 int bpf_obj_get(const char *pathname) 673 { 674 union bpf_attr attr; 675 int fd; 676 677 memset(&attr, 0, sizeof(attr)); 678 attr.pathname = ptr_to_u64((void *)pathname); 679 680 fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr)); 681 return libbpf_err_errno(fd); 682 } 683 684 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, 685 unsigned int flags) 686 { 687 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts, 688 .flags = flags, 689 ); 690 691 return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts); 692 } 693 694 int bpf_prog_attach_xattr(int prog_fd, int target_fd, 695 enum bpf_attach_type type, 696 const struct bpf_prog_attach_opts *opts) 697 { 698 union bpf_attr attr; 699 int ret; 700 701 if (!OPTS_VALID(opts, bpf_prog_attach_opts)) 702 return libbpf_err(-EINVAL); 703 704 memset(&attr, 0, sizeof(attr)); 705 attr.target_fd = target_fd; 706 attr.attach_bpf_fd = prog_fd; 707 attr.attach_type = type; 708 attr.attach_flags = OPTS_GET(opts, flags, 0); 709 attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0); 710 711 ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); 712 return libbpf_err_errno(ret); 713 } 714 715 int bpf_prog_detach(int target_fd, enum bpf_attach_type type) 716 { 717 union bpf_attr attr; 718 int ret; 719 720 memset(&attr, 0, sizeof(attr)); 721 attr.target_fd = target_fd; 722 attr.attach_type = type; 723 724 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 725 return libbpf_err_errno(ret); 726 } 727 728 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) 729 { 730 union bpf_attr attr; 731 int ret; 732 733 memset(&attr, 0, sizeof(attr)); 734 attr.target_fd = target_fd; 735 attr.attach_bpf_fd = prog_fd; 736 attr.attach_type = type; 737 738 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 739 return libbpf_err_errno(ret); 740 } 741 742 int bpf_link_create(int prog_fd, int target_fd, 743 enum bpf_attach_type attach_type, 744 const struct bpf_link_create_opts *opts) 745 { 746 __u32 target_btf_id, iter_info_len; 747 union bpf_attr attr; 748 int fd; 749 750 if (!OPTS_VALID(opts, bpf_link_create_opts)) 751 return libbpf_err(-EINVAL); 752 753 iter_info_len = OPTS_GET(opts, iter_info_len, 0); 754 target_btf_id = OPTS_GET(opts, target_btf_id, 0); 755 756 /* validate we don't have unexpected combinations of non-zero fields */ 757 if (iter_info_len || target_btf_id) { 758 if (iter_info_len && target_btf_id) 759 return libbpf_err(-EINVAL); 760 if (!OPTS_ZEROED(opts, target_btf_id)) 761 return libbpf_err(-EINVAL); 762 } 763 764 memset(&attr, 0, sizeof(attr)); 765 attr.link_create.prog_fd = prog_fd; 766 attr.link_create.target_fd = target_fd; 767 attr.link_create.attach_type = attach_type; 768 attr.link_create.flags = OPTS_GET(opts, flags, 0); 769 770 if (target_btf_id) { 771 attr.link_create.target_btf_id = target_btf_id; 772 goto proceed; 773 } 774 775 switch (attach_type) { 776 case BPF_TRACE_ITER: 777 attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0)); 778 attr.link_create.iter_info_len = iter_info_len; 779 break; 780 case BPF_PERF_EVENT: 781 attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0); 782 if (!OPTS_ZEROED(opts, perf_event)) 783 return libbpf_err(-EINVAL); 784 break; 785 default: 786 if (!OPTS_ZEROED(opts, flags)) 787 return libbpf_err(-EINVAL); 788 break; 789 } 790 proceed: 791 fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr)); 792 return libbpf_err_errno(fd); 793 } 794 795 int bpf_link_detach(int link_fd) 796 { 797 union bpf_attr attr; 798 int ret; 799 800 memset(&attr, 0, sizeof(attr)); 801 attr.link_detach.link_fd = link_fd; 802 803 ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr)); 804 return libbpf_err_errno(ret); 805 } 806 807 int bpf_link_update(int link_fd, int new_prog_fd, 808 const struct bpf_link_update_opts *opts) 809 { 810 union bpf_attr attr; 811 int ret; 812 813 if (!OPTS_VALID(opts, bpf_link_update_opts)) 814 return libbpf_err(-EINVAL); 815 816 memset(&attr, 0, sizeof(attr)); 817 attr.link_update.link_fd = link_fd; 818 attr.link_update.new_prog_fd = new_prog_fd; 819 attr.link_update.flags = OPTS_GET(opts, flags, 0); 820 attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); 821 822 ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr)); 823 return libbpf_err_errno(ret); 824 } 825 826 int bpf_iter_create(int link_fd) 827 { 828 union bpf_attr attr; 829 int fd; 830 831 memset(&attr, 0, sizeof(attr)); 832 attr.iter_create.link_fd = link_fd; 833 834 fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr)); 835 return libbpf_err_errno(fd); 836 } 837 838 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, 839 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) 840 { 841 union bpf_attr attr; 842 int ret; 843 844 memset(&attr, 0, sizeof(attr)); 845 attr.query.target_fd = target_fd; 846 attr.query.attach_type = type; 847 attr.query.query_flags = query_flags; 848 attr.query.prog_cnt = *prog_cnt; 849 attr.query.prog_ids = ptr_to_u64(prog_ids); 850 851 ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr)); 852 853 if (attach_flags) 854 *attach_flags = attr.query.attach_flags; 855 *prog_cnt = attr.query.prog_cnt; 856 857 return libbpf_err_errno(ret); 858 } 859 860 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, 861 void *data_out, __u32 *size_out, __u32 *retval, 862 __u32 *duration) 863 { 864 union bpf_attr attr; 865 int ret; 866 867 memset(&attr, 0, sizeof(attr)); 868 attr.test.prog_fd = prog_fd; 869 attr.test.data_in = ptr_to_u64(data); 870 attr.test.data_out = ptr_to_u64(data_out); 871 attr.test.data_size_in = size; 872 attr.test.repeat = repeat; 873 874 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); 875 876 if (size_out) 877 *size_out = attr.test.data_size_out; 878 if (retval) 879 *retval = attr.test.retval; 880 if (duration) 881 *duration = attr.test.duration; 882 883 return libbpf_err_errno(ret); 884 } 885 886 int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr) 887 { 888 union bpf_attr attr; 889 int ret; 890 891 if (!test_attr->data_out && test_attr->data_size_out > 0) 892 return libbpf_err(-EINVAL); 893 894 memset(&attr, 0, sizeof(attr)); 895 attr.test.prog_fd = test_attr->prog_fd; 896 attr.test.data_in = ptr_to_u64(test_attr->data_in); 897 attr.test.data_out = ptr_to_u64(test_attr->data_out); 898 attr.test.data_size_in = test_attr->data_size_in; 899 attr.test.data_size_out = test_attr->data_size_out; 900 attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in); 901 attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out); 902 attr.test.ctx_size_in = test_attr->ctx_size_in; 903 attr.test.ctx_size_out = test_attr->ctx_size_out; 904 attr.test.repeat = test_attr->repeat; 905 906 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); 907 908 test_attr->data_size_out = attr.test.data_size_out; 909 test_attr->ctx_size_out = attr.test.ctx_size_out; 910 test_attr->retval = attr.test.retval; 911 test_attr->duration = attr.test.duration; 912 913 return libbpf_err_errno(ret); 914 } 915 916 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) 917 { 918 union bpf_attr attr; 919 int ret; 920 921 if (!OPTS_VALID(opts, bpf_test_run_opts)) 922 return libbpf_err(-EINVAL); 923 924 memset(&attr, 0, sizeof(attr)); 925 attr.test.prog_fd = prog_fd; 926 attr.test.cpu = OPTS_GET(opts, cpu, 0); 927 attr.test.flags = OPTS_GET(opts, flags, 0); 928 attr.test.repeat = OPTS_GET(opts, repeat, 0); 929 attr.test.duration = OPTS_GET(opts, duration, 0); 930 attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0); 931 attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0); 932 attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0); 933 attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0); 934 attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL)); 935 attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL)); 936 attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); 937 attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); 938 939 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); 940 941 OPTS_SET(opts, data_size_out, attr.test.data_size_out); 942 OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); 943 OPTS_SET(opts, duration, attr.test.duration); 944 OPTS_SET(opts, retval, attr.test.retval); 945 946 return libbpf_err_errno(ret); 947 } 948 949 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) 950 { 951 union bpf_attr attr; 952 int err; 953 954 memset(&attr, 0, sizeof(attr)); 955 attr.start_id = start_id; 956 957 err = sys_bpf(cmd, &attr, sizeof(attr)); 958 if (!err) 959 *next_id = attr.next_id; 960 961 return libbpf_err_errno(err); 962 } 963 964 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) 965 { 966 return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID); 967 } 968 969 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) 970 { 971 return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID); 972 } 973 974 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) 975 { 976 return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID); 977 } 978 979 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id) 980 { 981 return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID); 982 } 983 984 int bpf_prog_get_fd_by_id(__u32 id) 985 { 986 union bpf_attr attr; 987 int fd; 988 989 memset(&attr, 0, sizeof(attr)); 990 attr.prog_id = id; 991 992 fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); 993 return libbpf_err_errno(fd); 994 } 995 996 int bpf_map_get_fd_by_id(__u32 id) 997 { 998 union bpf_attr attr; 999 int fd; 1000 1001 memset(&attr, 0, sizeof(attr)); 1002 attr.map_id = id; 1003 1004 fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); 1005 return libbpf_err_errno(fd); 1006 } 1007 1008 int bpf_btf_get_fd_by_id(__u32 id) 1009 { 1010 union bpf_attr attr; 1011 int fd; 1012 1013 memset(&attr, 0, sizeof(attr)); 1014 attr.btf_id = id; 1015 1016 fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); 1017 return libbpf_err_errno(fd); 1018 } 1019 1020 int bpf_link_get_fd_by_id(__u32 id) 1021 { 1022 union bpf_attr attr; 1023 int fd; 1024 1025 memset(&attr, 0, sizeof(attr)); 1026 attr.link_id = id; 1027 1028 fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr)); 1029 return libbpf_err_errno(fd); 1030 } 1031 1032 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) 1033 { 1034 union bpf_attr attr; 1035 int err; 1036 1037 memset(&attr, 0, sizeof(attr)); 1038 attr.info.bpf_fd = bpf_fd; 1039 attr.info.info_len = *info_len; 1040 attr.info.info = ptr_to_u64(info); 1041 1042 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)); 1043 1044 if (!err) 1045 *info_len = attr.info.info_len; 1046 1047 return libbpf_err_errno(err); 1048 } 1049 1050 int bpf_raw_tracepoint_open(const char *name, int prog_fd) 1051 { 1052 union bpf_attr attr; 1053 int fd; 1054 1055 memset(&attr, 0, sizeof(attr)); 1056 attr.raw_tracepoint.name = ptr_to_u64(name); 1057 attr.raw_tracepoint.prog_fd = prog_fd; 1058 1059 fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); 1060 return libbpf_err_errno(fd); 1061 } 1062 1063 int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, 1064 bool do_log) 1065 { 1066 union bpf_attr attr = {}; 1067 int fd; 1068 1069 attr.btf = ptr_to_u64(btf); 1070 attr.btf_size = btf_size; 1071 1072 retry: 1073 if (do_log && log_buf && log_buf_size) { 1074 attr.btf_log_level = 1; 1075 attr.btf_log_size = log_buf_size; 1076 attr.btf_log_buf = ptr_to_u64(log_buf); 1077 } 1078 1079 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr)); 1080 1081 if (fd < 0 && !do_log && log_buf && log_buf_size) { 1082 do_log = true; 1083 goto retry; 1084 } 1085 1086 return libbpf_err_errno(fd); 1087 } 1088 1089 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, 1090 __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, 1091 __u64 *probe_addr) 1092 { 1093 union bpf_attr attr = {}; 1094 int err; 1095 1096 attr.task_fd_query.pid = pid; 1097 attr.task_fd_query.fd = fd; 1098 attr.task_fd_query.flags = flags; 1099 attr.task_fd_query.buf = ptr_to_u64(buf); 1100 attr.task_fd_query.buf_len = *buf_len; 1101 1102 err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr)); 1103 1104 *buf_len = attr.task_fd_query.buf_len; 1105 *prog_id = attr.task_fd_query.prog_id; 1106 *fd_type = attr.task_fd_query.fd_type; 1107 *probe_offset = attr.task_fd_query.probe_offset; 1108 *probe_addr = attr.task_fd_query.probe_addr; 1109 1110 return libbpf_err_errno(err); 1111 } 1112 1113 int bpf_enable_stats(enum bpf_stats_type type) 1114 { 1115 union bpf_attr attr; 1116 int fd; 1117 1118 memset(&attr, 0, sizeof(attr)); 1119 attr.enable_stats.type = type; 1120 1121 fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr)); 1122 return libbpf_err_errno(fd); 1123 } 1124 1125 int bpf_prog_bind_map(int prog_fd, int map_fd, 1126 const struct bpf_prog_bind_opts *opts) 1127 { 1128 union bpf_attr attr; 1129 int ret; 1130 1131 if (!OPTS_VALID(opts, bpf_prog_bind_opts)) 1132 return libbpf_err(-EINVAL); 1133 1134 memset(&attr, 0, sizeof(attr)); 1135 attr.prog_bind_map.prog_fd = prog_fd; 1136 attr.prog_bind_map.map_fd = map_fd; 1137 attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); 1138 1139 ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr)); 1140 return libbpf_err_errno(ret); 1141 } 1142