1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 3 /* 4 * Common eBPF ELF object loading operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 */ 10 #ifndef __LIBBPF_LIBBPF_H 11 #define __LIBBPF_LIBBPF_H 12 13 #include <stdarg.h> 14 #include <stdio.h> 15 #include <stdint.h> 16 #include <stdbool.h> 17 #include <sys/types.h> // for size_t 18 #include <linux/bpf.h> 19 20 #include "libbpf_common.h" 21 #include "libbpf_legacy.h" 22 23 #ifdef __cplusplus 24 extern "C" { 25 #endif 26 27 LIBBPF_API __u32 libbpf_major_version(void); 28 LIBBPF_API __u32 libbpf_minor_version(void); 29 LIBBPF_API const char *libbpf_version_string(void); 30 31 enum libbpf_errno { 32 __LIBBPF_ERRNO__START = 4000, 33 34 /* Something wrong in libelf */ 35 LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START, 36 LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */ 37 LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */ 38 LIBBPF_ERRNO__ENDIAN, /* Endian mismatch */ 39 LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */ 40 LIBBPF_ERRNO__RELOC, /* Relocation failed */ 41 LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */ 42 LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */ 43 LIBBPF_ERRNO__PROG2BIG, /* Program too big */ 44 LIBBPF_ERRNO__KVER, /* Incorrect kernel version */ 45 LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */ 46 LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */ 47 LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */ 48 LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */ 49 __LIBBPF_ERRNO__END, 50 }; 51 52 LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size); 53 54 /** 55 * @brief **libbpf_bpf_attach_type_str()** converts the provided attach type 56 * value into a textual representation. 57 * @param t The attach type. 58 * @return Pointer to a static string identifying the attach type. NULL is 59 * returned for unknown **bpf_attach_type** values. 60 */ 61 LIBBPF_API const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t); 62 63 /** 64 * @brief **libbpf_bpf_link_type_str()** converts the provided link type value 65 * into a textual representation. 66 * @param t The link type. 67 * @return Pointer to a static string identifying the link type. NULL is 68 * returned for unknown **bpf_link_type** values. 69 */ 70 LIBBPF_API const char *libbpf_bpf_link_type_str(enum bpf_link_type t); 71 72 /** 73 * @brief **libbpf_bpf_map_type_str()** converts the provided map type value 74 * into a textual representation. 75 * @param t The map type. 76 * @return Pointer to a static string identifying the map type. NULL is 77 * returned for unknown **bpf_map_type** values. 78 */ 79 LIBBPF_API const char *libbpf_bpf_map_type_str(enum bpf_map_type t); 80 81 /** 82 * @brief **libbpf_bpf_prog_type_str()** converts the provided program type 83 * value into a textual representation. 84 * @param t The program type. 85 * @return Pointer to a static string identifying the program type. NULL is 86 * returned for unknown **bpf_prog_type** values. 87 */ 88 LIBBPF_API const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t); 89 90 enum libbpf_print_level { 91 LIBBPF_WARN, 92 LIBBPF_INFO, 93 LIBBPF_DEBUG, 94 }; 95 96 typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level, 97 const char *, va_list ap); 98 99 /** 100 * @brief **libbpf_set_print()** sets user-provided log callback function to 101 * be used for libbpf warnings and informational messages. 102 * @param fn The log print function. If NULL, libbpf won't print anything. 103 * @return Pointer to old print function. 104 * 105 * This function is thread-safe. 106 */ 107 LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn); 108 109 /* Hide internal to user */ 110 struct bpf_object; 111 112 struct bpf_object_open_opts { 113 /* size of this struct, for forward/backward compatibility */ 114 size_t sz; 115 /* object name override, if provided: 116 * - for object open from file, this will override setting object 117 * name from file path's base name; 118 * - for object open from memory buffer, this will specify an object 119 * name and will override default "<addr>-<buf-size>" name; 120 */ 121 const char *object_name; 122 /* parse map definitions non-strictly, allowing extra attributes/data */ 123 bool relaxed_maps; 124 /* maps that set the 'pinning' attribute in their definition will have 125 * their pin_path attribute set to a file in this directory, and be 126 * auto-pinned to that path on load; defaults to "/sys/fs/bpf". 127 */ 128 const char *pin_root_path; 129 130 __u32 :32; /* stub out now removed attach_prog_fd */ 131 132 /* Additional kernel config content that augments and overrides 133 * system Kconfig for CONFIG_xxx externs. 134 */ 135 const char *kconfig; 136 /* Path to the custom BTF to be used for BPF CO-RE relocations. 137 * This custom BTF completely replaces the use of vmlinux BTF 138 * for the purpose of CO-RE relocations. 139 * NOTE: any other BPF feature (e.g., fentry/fexit programs, 140 * struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux. 141 */ 142 const char *btf_custom_path; 143 /* Pointer to a buffer for storing kernel logs for applicable BPF 144 * commands. Valid kernel_log_size has to be specified as well and are 145 * passed-through to bpf() syscall. Keep in mind that kernel might 146 * fail operation with -ENOSPC error if provided buffer is too small 147 * to contain entire log output. 148 * See the comment below for kernel_log_level for interaction between 149 * log_buf and log_level settings. 150 * 151 * If specified, this log buffer will be passed for: 152 * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden 153 * with bpf_program__set_log() on per-program level, to get 154 * BPF verifier log output. 155 * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get 156 * BTF sanity checking log. 157 * 158 * Each BPF command (BPF_BTF_LOAD or BPF_PROG_LOAD) will overwrite 159 * previous contents, so if you need more fine-grained control, set 160 * per-program buffer with bpf_program__set_log_buf() to preserve each 161 * individual program's verification log. Keep using kernel_log_buf 162 * for BTF verification log, if necessary. 163 */ 164 char *kernel_log_buf; 165 size_t kernel_log_size; 166 /* 167 * Log level can be set independently from log buffer. Log_level=0 168 * means that libbpf will attempt loading BTF or program without any 169 * logging requested, but will retry with either its own or custom log 170 * buffer, if provided, and log_level=1 on any error. 171 * And vice versa, setting log_level>0 will request BTF or prog 172 * loading with verbose log from the first attempt (and as such also 173 * for successfully loaded BTF or program), and the actual log buffer 174 * could be either libbpf's own auto-allocated log buffer, if 175 * kernel_log_buffer is NULL, or user-provided custom kernel_log_buf. 176 * If user didn't provide custom log buffer, libbpf will emit captured 177 * logs through its print callback. 178 */ 179 __u32 kernel_log_level; 180 /* Path to BPF FS mount point to derive BPF token from. 181 * 182 * Created BPF token will be used for all bpf() syscall operations 183 * that accept BPF token (e.g., map creation, BTF and program loads, 184 * etc) automatically within instantiated BPF object. 185 * 186 * If bpf_token_path is not specified, libbpf will consult 187 * LIBBPF_BPF_TOKEN_PATH environment variable. If set, it will be 188 * taken as a value of bpf_token_path option and will force libbpf to 189 * either create BPF token from provided custom BPF FS path, or will 190 * disable implicit BPF token creation, if envvar value is an empty 191 * string. bpf_token_path overrides LIBBPF_BPF_TOKEN_PATH, if both are 192 * set at the same time. 193 * 194 * Setting bpf_token_path option to empty string disables libbpf's 195 * automatic attempt to create BPF token from default BPF FS mount 196 * point (/sys/fs/bpf), in case this default behavior is undesirable. 197 */ 198 const char *bpf_token_path; 199 200 size_t :0; 201 }; 202 #define bpf_object_open_opts__last_field bpf_token_path 203 204 /** 205 * @brief **bpf_object__open()** creates a bpf_object by opening 206 * the BPF ELF object file pointed to by the passed path and loading it 207 * into memory. 208 * @param path BPF object file path. 209 * @return pointer to the new bpf_object; or NULL is returned on error, 210 * error code is stored in errno 211 */ 212 LIBBPF_API struct bpf_object *bpf_object__open(const char *path); 213 214 /** 215 * @brief **bpf_object__open_file()** creates a bpf_object by opening 216 * the BPF ELF object file pointed to by the passed path and loading it 217 * into memory. 218 * @param path BPF object file path 219 * @param opts options for how to load the bpf object, this parameter is 220 * optional and can be set to NULL 221 * @return pointer to the new bpf_object; or NULL is returned on error, 222 * error code is stored in errno 223 */ 224 LIBBPF_API struct bpf_object * 225 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts); 226 227 /** 228 * @brief **bpf_object__open_mem()** creates a bpf_object by reading 229 * the BPF objects raw bytes from a memory buffer containing a valid 230 * BPF ELF object file. 231 * @param obj_buf pointer to the buffer containing ELF file bytes 232 * @param obj_buf_sz number of bytes in the buffer 233 * @param opts options for how to load the bpf object 234 * @return pointer to the new bpf_object; or NULL is returned on error, 235 * error code is stored in errno 236 */ 237 LIBBPF_API struct bpf_object * 238 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, 239 const struct bpf_object_open_opts *opts); 240 241 /** 242 * @brief **bpf_object__load()** loads BPF object into kernel. 243 * @param obj Pointer to a valid BPF object instance returned by 244 * **bpf_object__open*()** APIs 245 * @return 0, on success; negative error code, otherwise, error code is 246 * stored in errno 247 */ 248 LIBBPF_API int bpf_object__load(struct bpf_object *obj); 249 250 /** 251 * @brief **bpf_object__close()** closes a BPF object and releases all 252 * resources. 253 * @param obj Pointer to a valid BPF object 254 */ 255 LIBBPF_API void bpf_object__close(struct bpf_object *obj); 256 257 /** 258 * @brief **bpf_object__pin_maps()** pins each map contained within 259 * the BPF object at the passed directory. 260 * @param obj Pointer to a valid BPF object 261 * @param path A directory where maps should be pinned. 262 * @return 0, on success; negative error code, otherwise 263 * 264 * If `path` is NULL `bpf_map__pin` (which is being used on each map) 265 * will use the pin_path attribute of each map. In this case, maps that 266 * don't have a pin_path set will be ignored. 267 */ 268 LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path); 269 270 /** 271 * @brief **bpf_object__unpin_maps()** unpins each map contained within 272 * the BPF object found in the passed directory. 273 * @param obj Pointer to a valid BPF object 274 * @param path A directory where pinned maps should be searched for. 275 * @return 0, on success; negative error code, otherwise 276 * 277 * If `path` is NULL `bpf_map__unpin` (which is being used on each map) 278 * will use the pin_path attribute of each map. In this case, maps that 279 * don't have a pin_path set will be ignored. 280 */ 281 LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj, 282 const char *path); 283 LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj, 284 const char *path); 285 LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj, 286 const char *path); 287 LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path); 288 LIBBPF_API int bpf_object__unpin(struct bpf_object *object, const char *path); 289 290 LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj); 291 LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj); 292 LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version); 293 294 struct btf; 295 LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj); 296 LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj); 297 298 LIBBPF_API struct bpf_program * 299 bpf_object__find_program_by_name(const struct bpf_object *obj, 300 const char *name); 301 302 LIBBPF_API int 303 libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, 304 enum bpf_attach_type *expected_attach_type); 305 LIBBPF_API int libbpf_attach_type_by_name(const char *name, 306 enum bpf_attach_type *attach_type); 307 LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name, 308 enum bpf_attach_type attach_type); 309 310 /* Accessors of bpf_program */ 311 struct bpf_program; 312 313 LIBBPF_API struct bpf_program * 314 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog); 315 316 #define bpf_object__for_each_program(pos, obj) \ 317 for ((pos) = bpf_object__next_program((obj), NULL); \ 318 (pos) != NULL; \ 319 (pos) = bpf_object__next_program((obj), (pos))) 320 321 LIBBPF_API struct bpf_program * 322 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog); 323 324 LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, 325 __u32 ifindex); 326 327 LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog); 328 LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog); 329 LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog); 330 LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload); 331 LIBBPF_API bool bpf_program__autoattach(const struct bpf_program *prog); 332 LIBBPF_API void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach); 333 334 struct bpf_insn; 335 336 /** 337 * @brief **bpf_program__insns()** gives read-only access to BPF program's 338 * underlying BPF instructions. 339 * @param prog BPF program for which to return instructions 340 * @return a pointer to an array of BPF instructions that belong to the 341 * specified BPF program 342 * 343 * Returned pointer is always valid and not NULL. Number of `struct bpf_insn` 344 * pointed to can be fetched using **bpf_program__insn_cnt()** API. 345 * 346 * Keep in mind, libbpf can modify and append/delete BPF program's 347 * instructions as it processes BPF object file and prepares everything for 348 * uploading into the kernel. So depending on the point in BPF object 349 * lifetime, **bpf_program__insns()** can return different sets of 350 * instructions. As an example, during BPF object load phase BPF program 351 * instructions will be CO-RE-relocated, BPF subprograms instructions will be 352 * appended, ldimm64 instructions will have FDs embedded, etc. So instructions 353 * returned before **bpf_object__load()** and after it might be quite 354 * different. 355 */ 356 LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog); 357 358 /** 359 * @brief **bpf_program__set_insns()** can set BPF program's underlying 360 * BPF instructions. 361 * 362 * WARNING: This is a very advanced libbpf API and users need to know 363 * what they are doing. This should be used from prog_prepare_load_fn 364 * callback only. 365 * 366 * @param prog BPF program for which to return instructions 367 * @param new_insns a pointer to an array of BPF instructions 368 * @param new_insn_cnt number of `struct bpf_insn`'s that form 369 * specified BPF program 370 * @return 0, on success; negative error code, otherwise 371 */ 372 LIBBPF_API int bpf_program__set_insns(struct bpf_program *prog, 373 struct bpf_insn *new_insns, size_t new_insn_cnt); 374 375 /** 376 * @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s 377 * that form specified BPF program. 378 * @param prog BPF program for which to return number of BPF instructions 379 * 380 * See **bpf_program__insns()** documentation for notes on how libbpf can 381 * change instructions and their count during different phases of 382 * **bpf_object** lifetime. 383 */ 384 LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog); 385 386 LIBBPF_API int bpf_program__fd(const struct bpf_program *prog); 387 388 /** 389 * @brief **bpf_program__pin()** pins the BPF program to a file 390 * in the BPF FS specified by a path. This increments the programs 391 * reference count, allowing it to stay loaded after the process 392 * which loaded it has exited. 393 * 394 * @param prog BPF program to pin, must already be loaded 395 * @param path file path in a BPF file system 396 * @return 0, on success; negative error code, otherwise 397 */ 398 LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path); 399 400 /** 401 * @brief **bpf_program__unpin()** unpins the BPF program from a file 402 * in the BPFFS specified by a path. This decrements the programs 403 * reference count. 404 * 405 * The file pinning the BPF program can also be unlinked by a different 406 * process in which case this function will return an error. 407 * 408 * @param prog BPF program to unpin 409 * @param path file path to the pin in a BPF file system 410 * @return 0, on success; negative error code, otherwise 411 */ 412 LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path); 413 LIBBPF_API void bpf_program__unload(struct bpf_program *prog); 414 415 struct bpf_link; 416 417 LIBBPF_API struct bpf_link *bpf_link__open(const char *path); 418 LIBBPF_API int bpf_link__fd(const struct bpf_link *link); 419 LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link); 420 /** 421 * @brief **bpf_link__pin()** pins the BPF link to a file 422 * in the BPF FS specified by a path. This increments the links 423 * reference count, allowing it to stay loaded after the process 424 * which loaded it has exited. 425 * 426 * @param link BPF link to pin, must already be loaded 427 * @param path file path in a BPF file system 428 * @return 0, on success; negative error code, otherwise 429 */ 430 431 LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path); 432 433 /** 434 * @brief **bpf_link__unpin()** unpins the BPF link from a file 435 * in the BPFFS specified by a path. This decrements the links 436 * reference count. 437 * 438 * The file pinning the BPF link can also be unlinked by a different 439 * process in which case this function will return an error. 440 * 441 * @param prog BPF program to unpin 442 * @param path file path to the pin in a BPF file system 443 * @return 0, on success; negative error code, otherwise 444 */ 445 LIBBPF_API int bpf_link__unpin(struct bpf_link *link); 446 LIBBPF_API int bpf_link__update_program(struct bpf_link *link, 447 struct bpf_program *prog); 448 LIBBPF_API void bpf_link__disconnect(struct bpf_link *link); 449 LIBBPF_API int bpf_link__detach(struct bpf_link *link); 450 LIBBPF_API int bpf_link__destroy(struct bpf_link *link); 451 452 /** 453 * @brief **bpf_program__attach()** is a generic function for attaching 454 * a BPF program based on auto-detection of program type, attach type, 455 * and extra paremeters, where applicable. 456 * 457 * @param prog BPF program to attach 458 * @return Reference to the newly created BPF link; or NULL is returned on error, 459 * error code is stored in errno 460 * 461 * This is supported for: 462 * - kprobe/kretprobe (depends on SEC() definition) 463 * - uprobe/uretprobe (depends on SEC() definition) 464 * - tracepoint 465 * - raw tracepoint 466 * - tracing programs (typed raw TP/fentry/fexit/fmod_ret) 467 */ 468 LIBBPF_API struct bpf_link * 469 bpf_program__attach(const struct bpf_program *prog); 470 471 struct bpf_perf_event_opts { 472 /* size of this struct, for forward/backward compatibility */ 473 size_t sz; 474 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 475 __u64 bpf_cookie; 476 /* don't use BPF link when attach BPF program */ 477 bool force_ioctl_attach; 478 size_t :0; 479 }; 480 #define bpf_perf_event_opts__last_field force_ioctl_attach 481 482 LIBBPF_API struct bpf_link * 483 bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd); 484 485 LIBBPF_API struct bpf_link * 486 bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, 487 const struct bpf_perf_event_opts *opts); 488 489 /** 490 * enum probe_attach_mode - the mode to attach kprobe/uprobe 491 * 492 * force libbpf to attach kprobe/uprobe in specific mode, -ENOTSUP will 493 * be returned if it is not supported by the kernel. 494 */ 495 enum probe_attach_mode { 496 /* attach probe in latest supported mode by kernel */ 497 PROBE_ATTACH_MODE_DEFAULT = 0, 498 /* attach probe in legacy mode, using debugfs/tracefs */ 499 PROBE_ATTACH_MODE_LEGACY, 500 /* create perf event with perf_event_open() syscall */ 501 PROBE_ATTACH_MODE_PERF, 502 /* attach probe with BPF link */ 503 PROBE_ATTACH_MODE_LINK, 504 }; 505 506 struct bpf_kprobe_opts { 507 /* size of this struct, for forward/backward compatibility */ 508 size_t sz; 509 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 510 __u64 bpf_cookie; 511 /* function's offset to install kprobe to */ 512 size_t offset; 513 /* kprobe is return probe */ 514 bool retprobe; 515 /* kprobe attach mode */ 516 enum probe_attach_mode attach_mode; 517 size_t :0; 518 }; 519 #define bpf_kprobe_opts__last_field attach_mode 520 521 LIBBPF_API struct bpf_link * 522 bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe, 523 const char *func_name); 524 LIBBPF_API struct bpf_link * 525 bpf_program__attach_kprobe_opts(const struct bpf_program *prog, 526 const char *func_name, 527 const struct bpf_kprobe_opts *opts); 528 529 struct bpf_kprobe_multi_opts { 530 /* size of this struct, for forward/backward compatibility */ 531 size_t sz; 532 /* array of function symbols to attach */ 533 const char **syms; 534 /* array of function addresses to attach */ 535 const unsigned long *addrs; 536 /* array of user-provided values fetchable through bpf_get_attach_cookie */ 537 const __u64 *cookies; 538 /* number of elements in syms/addrs/cookies arrays */ 539 size_t cnt; 540 /* create return kprobes */ 541 bool retprobe; 542 /* create session kprobes */ 543 bool session; 544 size_t :0; 545 }; 546 547 #define bpf_kprobe_multi_opts__last_field session 548 549 LIBBPF_API struct bpf_link * 550 bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, 551 const char *pattern, 552 const struct bpf_kprobe_multi_opts *opts); 553 554 struct bpf_uprobe_multi_opts { 555 /* size of this struct, for forward/backward compatibility */ 556 size_t sz; 557 /* array of function symbols to attach to */ 558 const char **syms; 559 /* array of function addresses to attach to */ 560 const unsigned long *offsets; 561 /* optional, array of associated ref counter offsets */ 562 const unsigned long *ref_ctr_offsets; 563 /* optional, array of associated BPF cookies */ 564 const __u64 *cookies; 565 /* number of elements in syms/addrs/cookies arrays */ 566 size_t cnt; 567 /* create return uprobes */ 568 bool retprobe; 569 size_t :0; 570 }; 571 572 #define bpf_uprobe_multi_opts__last_field retprobe 573 574 /** 575 * @brief **bpf_program__attach_uprobe_multi()** attaches a BPF program 576 * to multiple uprobes with uprobe_multi link. 577 * 578 * User can specify 2 mutually exclusive set of inputs: 579 * 580 * 1) use only path/func_pattern/pid arguments 581 * 582 * 2) use path/pid with allowed combinations of 583 * syms/offsets/ref_ctr_offsets/cookies/cnt 584 * 585 * - syms and offsets are mutually exclusive 586 * - ref_ctr_offsets and cookies are optional 587 * 588 * 589 * @param prog BPF program to attach 590 * @param pid Process ID to attach the uprobe to, 0 for self (own process), 591 * -1 for all processes 592 * @param binary_path Path to binary 593 * @param func_pattern Regular expression to specify functions to attach 594 * BPF program to 595 * @param opts Additional options (see **struct bpf_uprobe_multi_opts**) 596 * @return 0, on success; negative error code, otherwise 597 */ 598 LIBBPF_API struct bpf_link * 599 bpf_program__attach_uprobe_multi(const struct bpf_program *prog, 600 pid_t pid, 601 const char *binary_path, 602 const char *func_pattern, 603 const struct bpf_uprobe_multi_opts *opts); 604 605 struct bpf_ksyscall_opts { 606 /* size of this struct, for forward/backward compatibility */ 607 size_t sz; 608 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 609 __u64 bpf_cookie; 610 /* attach as return probe? */ 611 bool retprobe; 612 size_t :0; 613 }; 614 #define bpf_ksyscall_opts__last_field retprobe 615 616 /** 617 * @brief **bpf_program__attach_ksyscall()** attaches a BPF program 618 * to kernel syscall handler of a specified syscall. Optionally it's possible 619 * to request to install retprobe that will be triggered at syscall exit. It's 620 * also possible to associate BPF cookie (though options). 621 * 622 * Libbpf automatically will determine correct full kernel function name, 623 * which depending on system architecture and kernel version/configuration 624 * could be of the form __<arch>_sys_<syscall> or __se_sys_<syscall>, and will 625 * attach specified program using kprobe/kretprobe mechanism. 626 * 627 * **bpf_program__attach_ksyscall()** is an API counterpart of declarative 628 * **SEC("ksyscall/<syscall>")** annotation of BPF programs. 629 * 630 * At the moment **SEC("ksyscall")** and **bpf_program__attach_ksyscall()** do 631 * not handle all the calling convention quirks for mmap(), clone() and compat 632 * syscalls. It also only attaches to "native" syscall interfaces. If host 633 * system supports compat syscalls or defines 32-bit syscalls in 64-bit 634 * kernel, such syscall interfaces won't be attached to by libbpf. 635 * 636 * These limitations may or may not change in the future. Therefore it is 637 * recommended to use SEC("kprobe") for these syscalls or if working with 638 * compat and 32-bit interfaces is required. 639 * 640 * @param prog BPF program to attach 641 * @param syscall_name Symbolic name of the syscall (e.g., "bpf") 642 * @param opts Additional options (see **struct bpf_ksyscall_opts**) 643 * @return Reference to the newly created BPF link; or NULL is returned on 644 * error, error code is stored in errno 645 */ 646 LIBBPF_API struct bpf_link * 647 bpf_program__attach_ksyscall(const struct bpf_program *prog, 648 const char *syscall_name, 649 const struct bpf_ksyscall_opts *opts); 650 651 struct bpf_uprobe_opts { 652 /* size of this struct, for forward/backward compatibility */ 653 size_t sz; 654 /* offset of kernel reference counted USDT semaphore, added in 655 * a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe") 656 */ 657 size_t ref_ctr_offset; 658 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 659 __u64 bpf_cookie; 660 /* uprobe is return probe, invoked at function return time */ 661 bool retprobe; 662 /* Function name to attach to. Could be an unqualified ("abc") or library-qualified 663 * "abc@LIBXYZ" name. To specify function entry, func_name should be set while 664 * func_offset argument to bpf_prog__attach_uprobe_opts() should be 0. To trace an 665 * offset within a function, specify func_name and use func_offset argument to specify 666 * offset within the function. Shared library functions must specify the shared library 667 * binary_path. 668 */ 669 const char *func_name; 670 /* uprobe attach mode */ 671 enum probe_attach_mode attach_mode; 672 size_t :0; 673 }; 674 #define bpf_uprobe_opts__last_field attach_mode 675 676 /** 677 * @brief **bpf_program__attach_uprobe()** attaches a BPF program 678 * to the userspace function which is found by binary path and 679 * offset. You can optionally specify a particular proccess to attach 680 * to. You can also optionally attach the program to the function 681 * exit instead of entry. 682 * 683 * @param prog BPF program to attach 684 * @param retprobe Attach to function exit 685 * @param pid Process ID to attach the uprobe to, 0 for self (own process), 686 * -1 for all processes 687 * @param binary_path Path to binary that contains the function symbol 688 * @param func_offset Offset within the binary of the function symbol 689 * @return Reference to the newly created BPF link; or NULL is returned on error, 690 * error code is stored in errno 691 */ 692 LIBBPF_API struct bpf_link * 693 bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe, 694 pid_t pid, const char *binary_path, 695 size_t func_offset); 696 697 /** 698 * @brief **bpf_program__attach_uprobe_opts()** is just like 699 * bpf_program__attach_uprobe() except with a options struct 700 * for various configurations. 701 * 702 * @param prog BPF program to attach 703 * @param pid Process ID to attach the uprobe to, 0 for self (own process), 704 * -1 for all processes 705 * @param binary_path Path to binary that contains the function symbol 706 * @param func_offset Offset within the binary of the function symbol 707 * @param opts Options for altering program attachment 708 * @return Reference to the newly created BPF link; or NULL is returned on error, 709 * error code is stored in errno 710 */ 711 LIBBPF_API struct bpf_link * 712 bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, 713 const char *binary_path, size_t func_offset, 714 const struct bpf_uprobe_opts *opts); 715 716 struct bpf_usdt_opts { 717 /* size of this struct, for forward/backward compatibility */ 718 size_t sz; 719 /* custom user-provided value accessible through usdt_cookie() */ 720 __u64 usdt_cookie; 721 size_t :0; 722 }; 723 #define bpf_usdt_opts__last_field usdt_cookie 724 725 /** 726 * @brief **bpf_program__attach_usdt()** is just like 727 * bpf_program__attach_uprobe_opts() except it covers USDT (User-space 728 * Statically Defined Tracepoint) attachment, instead of attaching to 729 * user-space function entry or exit. 730 * 731 * @param prog BPF program to attach 732 * @param pid Process ID to attach the uprobe to, 0 for self (own process), 733 * -1 for all processes 734 * @param binary_path Path to binary that contains provided USDT probe 735 * @param usdt_provider USDT provider name 736 * @param usdt_name USDT probe name 737 * @param opts Options for altering program attachment 738 * @return Reference to the newly created BPF link; or NULL is returned on error, 739 * error code is stored in errno 740 */ 741 LIBBPF_API struct bpf_link * 742 bpf_program__attach_usdt(const struct bpf_program *prog, 743 pid_t pid, const char *binary_path, 744 const char *usdt_provider, const char *usdt_name, 745 const struct bpf_usdt_opts *opts); 746 747 struct bpf_tracepoint_opts { 748 /* size of this struct, for forward/backward compatibility */ 749 size_t sz; 750 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 751 __u64 bpf_cookie; 752 }; 753 #define bpf_tracepoint_opts__last_field bpf_cookie 754 755 LIBBPF_API struct bpf_link * 756 bpf_program__attach_tracepoint(const struct bpf_program *prog, 757 const char *tp_category, 758 const char *tp_name); 759 LIBBPF_API struct bpf_link * 760 bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, 761 const char *tp_category, 762 const char *tp_name, 763 const struct bpf_tracepoint_opts *opts); 764 765 struct bpf_raw_tracepoint_opts { 766 size_t sz; /* size of this struct for forward/backward compatibility */ 767 __u64 cookie; 768 size_t :0; 769 }; 770 #define bpf_raw_tracepoint_opts__last_field cookie 771 772 LIBBPF_API struct bpf_link * 773 bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, 774 const char *tp_name); 775 LIBBPF_API struct bpf_link * 776 bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog, 777 const char *tp_name, 778 struct bpf_raw_tracepoint_opts *opts); 779 780 struct bpf_trace_opts { 781 /* size of this struct, for forward/backward compatibility */ 782 size_t sz; 783 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 784 __u64 cookie; 785 }; 786 #define bpf_trace_opts__last_field cookie 787 788 LIBBPF_API struct bpf_link * 789 bpf_program__attach_trace(const struct bpf_program *prog); 790 LIBBPF_API struct bpf_link * 791 bpf_program__attach_trace_opts(const struct bpf_program *prog, const struct bpf_trace_opts *opts); 792 793 LIBBPF_API struct bpf_link * 794 bpf_program__attach_lsm(const struct bpf_program *prog); 795 LIBBPF_API struct bpf_link * 796 bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd); 797 LIBBPF_API struct bpf_link * 798 bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd); 799 LIBBPF_API struct bpf_link * 800 bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd); 801 LIBBPF_API struct bpf_link * 802 bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex); 803 LIBBPF_API struct bpf_link * 804 bpf_program__attach_freplace(const struct bpf_program *prog, 805 int target_fd, const char *attach_func_name); 806 807 struct bpf_netfilter_opts { 808 /* size of this struct, for forward/backward compatibility */ 809 size_t sz; 810 811 __u32 pf; 812 __u32 hooknum; 813 __s32 priority; 814 __u32 flags; 815 }; 816 #define bpf_netfilter_opts__last_field flags 817 818 LIBBPF_API struct bpf_link * 819 bpf_program__attach_netfilter(const struct bpf_program *prog, 820 const struct bpf_netfilter_opts *opts); 821 822 struct bpf_tcx_opts { 823 /* size of this struct, for forward/backward compatibility */ 824 size_t sz; 825 __u32 flags; 826 __u32 relative_fd; 827 __u32 relative_id; 828 __u64 expected_revision; 829 size_t :0; 830 }; 831 #define bpf_tcx_opts__last_field expected_revision 832 833 LIBBPF_API struct bpf_link * 834 bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, 835 const struct bpf_tcx_opts *opts); 836 837 struct bpf_netkit_opts { 838 /* size of this struct, for forward/backward compatibility */ 839 size_t sz; 840 __u32 flags; 841 __u32 relative_fd; 842 __u32 relative_id; 843 __u64 expected_revision; 844 size_t :0; 845 }; 846 #define bpf_netkit_opts__last_field expected_revision 847 848 LIBBPF_API struct bpf_link * 849 bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, 850 const struct bpf_netkit_opts *opts); 851 852 struct bpf_map; 853 854 LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map); 855 LIBBPF_API int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map); 856 857 struct bpf_iter_attach_opts { 858 size_t sz; /* size of this struct for forward/backward compatibility */ 859 union bpf_iter_link_info *link_info; 860 __u32 link_info_len; 861 }; 862 #define bpf_iter_attach_opts__last_field link_info_len 863 864 LIBBPF_API struct bpf_link * 865 bpf_program__attach_iter(const struct bpf_program *prog, 866 const struct bpf_iter_attach_opts *opts); 867 868 LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog); 869 870 /** 871 * @brief **bpf_program__set_type()** sets the program 872 * type of the passed BPF program. 873 * @param prog BPF program to set the program type for 874 * @param type program type to set the BPF map to have 875 * @return error code; or 0 if no error. An error occurs 876 * if the object is already loaded. 877 * 878 * This must be called before the BPF object is loaded, 879 * otherwise it has no effect and an error is returned. 880 */ 881 LIBBPF_API int bpf_program__set_type(struct bpf_program *prog, 882 enum bpf_prog_type type); 883 884 LIBBPF_API enum bpf_attach_type 885 bpf_program__expected_attach_type(const struct bpf_program *prog); 886 887 /** 888 * @brief **bpf_program__set_expected_attach_type()** sets the 889 * attach type of the passed BPF program. This is used for 890 * auto-detection of attachment when programs are loaded. 891 * @param prog BPF program to set the attach type for 892 * @param type attach type to set the BPF map to have 893 * @return error code; or 0 if no error. An error occurs 894 * if the object is already loaded. 895 * 896 * This must be called before the BPF object is loaded, 897 * otherwise it has no effect and an error is returned. 898 */ 899 LIBBPF_API int 900 bpf_program__set_expected_attach_type(struct bpf_program *prog, 901 enum bpf_attach_type type); 902 903 LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog); 904 LIBBPF_API int bpf_program__set_flags(struct bpf_program *prog, __u32 flags); 905 906 /* Per-program log level and log buffer getters/setters. 907 * See bpf_object_open_opts comments regarding log_level and log_buf 908 * interactions. 909 */ 910 LIBBPF_API __u32 bpf_program__log_level(const struct bpf_program *prog); 911 LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level); 912 LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size); 913 LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size); 914 915 /** 916 * @brief **bpf_program__set_attach_target()** sets BTF-based attach target 917 * for supported BPF program types: 918 * - BTF-aware raw tracepoints (tp_btf); 919 * - fentry/fexit/fmod_ret; 920 * - lsm; 921 * - freplace. 922 * @param prog BPF program to set the attach type for 923 * @param type attach type to set the BPF map to have 924 * @return error code; or 0 if no error occurred. 925 */ 926 LIBBPF_API int 927 bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, 928 const char *attach_func_name); 929 930 /** 931 * @brief **bpf_object__find_map_by_name()** returns BPF map of 932 * the given name, if it exists within the passed BPF object 933 * @param obj BPF object 934 * @param name name of the BPF map 935 * @return BPF map instance, if such map exists within the BPF object; 936 * or NULL otherwise. 937 */ 938 LIBBPF_API struct bpf_map * 939 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name); 940 941 LIBBPF_API int 942 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name); 943 944 LIBBPF_API struct bpf_map * 945 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map); 946 947 #define bpf_object__for_each_map(pos, obj) \ 948 for ((pos) = bpf_object__next_map((obj), NULL); \ 949 (pos) != NULL; \ 950 (pos) = bpf_object__next_map((obj), (pos))) 951 #define bpf_map__for_each bpf_object__for_each_map 952 953 LIBBPF_API struct bpf_map * 954 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map); 955 956 /** 957 * @brief **bpf_map__set_autocreate()** sets whether libbpf has to auto-create 958 * BPF map during BPF object load phase. 959 * @param map the BPF map instance 960 * @param autocreate whether to create BPF map during BPF object load 961 * @return 0 on success; -EBUSY if BPF object was already loaded 962 * 963 * **bpf_map__set_autocreate()** allows to opt-out from libbpf auto-creating 964 * BPF map. By default, libbpf will attempt to create every single BPF map 965 * defined in BPF object file using BPF_MAP_CREATE command of bpf() syscall 966 * and fill in map FD in BPF instructions. 967 * 968 * This API allows to opt-out of this process for specific map instance. This 969 * can be useful if host kernel doesn't support such BPF map type or used 970 * combination of flags and user application wants to avoid creating such 971 * a map in the first place. User is still responsible to make sure that their 972 * BPF-side code that expects to use such missing BPF map is recognized by BPF 973 * verifier as dead code, otherwise BPF verifier will reject such BPF program. 974 */ 975 LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate); 976 LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map); 977 978 /** 979 * @brief **bpf_map__fd()** gets the file descriptor of the passed 980 * BPF map 981 * @param map the BPF map instance 982 * @return the file descriptor; or -EINVAL in case of an error 983 */ 984 LIBBPF_API int bpf_map__fd(const struct bpf_map *map); 985 LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); 986 /* get map name */ 987 LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); 988 /* get/set map type */ 989 LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map); 990 LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type); 991 /* get/set map size (max_entries) */ 992 LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map); 993 LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries); 994 /* get/set map flags */ 995 LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map); 996 LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags); 997 /* get/set map NUMA node */ 998 LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map); 999 LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node); 1000 /* get/set map key size */ 1001 LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map); 1002 LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size); 1003 /* get map value size */ 1004 LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map); 1005 /** 1006 * @brief **bpf_map__set_value_size()** sets map value size. 1007 * @param map the BPF map instance 1008 * @return 0, on success; negative error, otherwise 1009 * 1010 * There is a special case for maps with associated memory-mapped regions, like 1011 * the global data section maps (bss, data, rodata). When this function is used 1012 * on such a map, the mapped region is resized. Afterward, an attempt is made to 1013 * adjust the corresponding BTF info. This attempt is best-effort and can only 1014 * succeed if the last variable of the data section map is an array. The array 1015 * BTF type is replaced by a new BTF array type with a different length. 1016 * Any previously existing pointers returned from bpf_map__initial_value() or 1017 * corresponding data section skeleton pointer must be reinitialized. 1018 */ 1019 LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size); 1020 /* get map key/value BTF type IDs */ 1021 LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); 1022 LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); 1023 /* get/set map if_index */ 1024 LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); 1025 LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); 1026 /* get/set map map_extra flags */ 1027 LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map); 1028 LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra); 1029 1030 LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, 1031 const void *data, size_t size); 1032 LIBBPF_API void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize); 1033 1034 /** 1035 * @brief **bpf_map__is_internal()** tells the caller whether or not the 1036 * passed map is a special map created by libbpf automatically for things like 1037 * global variables, __ksym externs, Kconfig values, etc 1038 * @param map the bpf_map 1039 * @return true, if the map is an internal map; false, otherwise 1040 */ 1041 LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); 1042 1043 /** 1044 * @brief **bpf_map__set_pin_path()** sets the path attribute that tells where the 1045 * BPF map should be pinned. This does not actually create the 'pin'. 1046 * @param map The bpf_map 1047 * @param path The path 1048 * @return 0, on success; negative error, otherwise 1049 */ 1050 LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); 1051 1052 /** 1053 * @brief **bpf_map__pin_path()** gets the path attribute that tells where the 1054 * BPF map should be pinned. 1055 * @param map The bpf_map 1056 * @return The path string; which can be NULL 1057 */ 1058 LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map); 1059 1060 /** 1061 * @brief **bpf_map__is_pinned()** tells the caller whether or not the 1062 * passed map has been pinned via a 'pin' file. 1063 * @param map The bpf_map 1064 * @return true, if the map is pinned; false, otherwise 1065 */ 1066 LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); 1067 1068 /** 1069 * @brief **bpf_map__pin()** creates a file that serves as a 'pin' 1070 * for the BPF map. This increments the reference count on the 1071 * BPF map which will keep the BPF map loaded even after the 1072 * userspace process which loaded it has exited. 1073 * @param map The bpf_map to pin 1074 * @param path A file path for the 'pin' 1075 * @return 0, on success; negative error, otherwise 1076 * 1077 * If `path` is NULL the maps `pin_path` attribute will be used. If this is 1078 * also NULL, an error will be returned and the map will not be pinned. 1079 */ 1080 LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); 1081 1082 /** 1083 * @brief **bpf_map__unpin()** removes the file that serves as a 1084 * 'pin' for the BPF map. 1085 * @param map The bpf_map to unpin 1086 * @param path A file path for the 'pin' 1087 * @return 0, on success; negative error, otherwise 1088 * 1089 * The `path` parameter can be NULL, in which case the `pin_path` 1090 * map attribute is unpinned. If both the `path` parameter and 1091 * `pin_path` map attribute are set, they must be equal. 1092 */ 1093 LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); 1094 1095 LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd); 1096 LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map); 1097 1098 /** 1099 * @brief **bpf_map__lookup_elem()** allows to lookup BPF map value 1100 * corresponding to provided key. 1101 * @param map BPF map to lookup element in 1102 * @param key pointer to memory containing bytes of the key used for lookup 1103 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** 1104 * @param value pointer to memory in which looked up value will be stored 1105 * @param value_sz size in byte of value data memory; it has to match BPF map 1106 * definition's **value_size**. For per-CPU BPF maps value size has to be 1107 * a product of BPF map value size and number of possible CPUs in the system 1108 * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for 1109 * per-CPU values value size has to be aligned up to closest 8 bytes for 1110 * alignment reasons, so expected size is: `round_up(value_size, 8) 1111 * * libbpf_num_possible_cpus()`. 1112 * @flags extra flags passed to kernel for this operation 1113 * @return 0, on success; negative error, otherwise 1114 * 1115 * **bpf_map__lookup_elem()** is high-level equivalent of 1116 * **bpf_map_lookup_elem()** API with added check for key and value size. 1117 */ 1118 LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map, 1119 const void *key, size_t key_sz, 1120 void *value, size_t value_sz, __u64 flags); 1121 1122 /** 1123 * @brief **bpf_map__update_elem()** allows to insert or update value in BPF 1124 * map that corresponds to provided key. 1125 * @param map BPF map to insert to or update element in 1126 * @param key pointer to memory containing bytes of the key 1127 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** 1128 * @param value pointer to memory containing bytes of the value 1129 * @param value_sz size in byte of value data memory; it has to match BPF map 1130 * definition's **value_size**. For per-CPU BPF maps value size has to be 1131 * a product of BPF map value size and number of possible CPUs in the system 1132 * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for 1133 * per-CPU values value size has to be aligned up to closest 8 bytes for 1134 * alignment reasons, so expected size is: `round_up(value_size, 8) 1135 * * libbpf_num_possible_cpus()`. 1136 * @flags extra flags passed to kernel for this operation 1137 * @return 0, on success; negative error, otherwise 1138 * 1139 * **bpf_map__update_elem()** is high-level equivalent of 1140 * **bpf_map_update_elem()** API with added check for key and value size. 1141 */ 1142 LIBBPF_API int bpf_map__update_elem(const struct bpf_map *map, 1143 const void *key, size_t key_sz, 1144 const void *value, size_t value_sz, __u64 flags); 1145 1146 /** 1147 * @brief **bpf_map__delete_elem()** allows to delete element in BPF map that 1148 * corresponds to provided key. 1149 * @param map BPF map to delete element from 1150 * @param key pointer to memory containing bytes of the key 1151 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** 1152 * @flags extra flags passed to kernel for this operation 1153 * @return 0, on success; negative error, otherwise 1154 * 1155 * **bpf_map__delete_elem()** is high-level equivalent of 1156 * **bpf_map_delete_elem()** API with added check for key size. 1157 */ 1158 LIBBPF_API int bpf_map__delete_elem(const struct bpf_map *map, 1159 const void *key, size_t key_sz, __u64 flags); 1160 1161 /** 1162 * @brief **bpf_map__lookup_and_delete_elem()** allows to lookup BPF map value 1163 * corresponding to provided key and atomically delete it afterwards. 1164 * @param map BPF map to lookup element in 1165 * @param key pointer to memory containing bytes of the key used for lookup 1166 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** 1167 * @param value pointer to memory in which looked up value will be stored 1168 * @param value_sz size in byte of value data memory; it has to match BPF map 1169 * definition's **value_size**. For per-CPU BPF maps value size has to be 1170 * a product of BPF map value size and number of possible CPUs in the system 1171 * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for 1172 * per-CPU values value size has to be aligned up to closest 8 bytes for 1173 * alignment reasons, so expected size is: `round_up(value_size, 8) 1174 * * libbpf_num_possible_cpus()`. 1175 * @flags extra flags passed to kernel for this operation 1176 * @return 0, on success; negative error, otherwise 1177 * 1178 * **bpf_map__lookup_and_delete_elem()** is high-level equivalent of 1179 * **bpf_map_lookup_and_delete_elem()** API with added check for key and value size. 1180 */ 1181 LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map, 1182 const void *key, size_t key_sz, 1183 void *value, size_t value_sz, __u64 flags); 1184 1185 /** 1186 * @brief **bpf_map__get_next_key()** allows to iterate BPF map keys by 1187 * fetching next key that follows current key. 1188 * @param map BPF map to fetch next key from 1189 * @param cur_key pointer to memory containing bytes of current key or NULL to 1190 * fetch the first key 1191 * @param next_key pointer to memory to write next key into 1192 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** 1193 * @return 0, on success; -ENOENT if **cur_key** is the last key in BPF map; 1194 * negative error, otherwise 1195 * 1196 * **bpf_map__get_next_key()** is high-level equivalent of 1197 * **bpf_map_get_next_key()** API with added check for key size. 1198 */ 1199 LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map, 1200 const void *cur_key, void *next_key, size_t key_sz); 1201 1202 struct bpf_xdp_set_link_opts { 1203 size_t sz; 1204 int old_fd; 1205 size_t :0; 1206 }; 1207 #define bpf_xdp_set_link_opts__last_field old_fd 1208 1209 struct bpf_xdp_attach_opts { 1210 size_t sz; 1211 int old_prog_fd; 1212 size_t :0; 1213 }; 1214 #define bpf_xdp_attach_opts__last_field old_prog_fd 1215 1216 struct bpf_xdp_query_opts { 1217 size_t sz; 1218 __u32 prog_id; /* output */ 1219 __u32 drv_prog_id; /* output */ 1220 __u32 hw_prog_id; /* output */ 1221 __u32 skb_prog_id; /* output */ 1222 __u8 attach_mode; /* output */ 1223 __u64 feature_flags; /* output */ 1224 __u32 xdp_zc_max_segs; /* output */ 1225 size_t :0; 1226 }; 1227 #define bpf_xdp_query_opts__last_field xdp_zc_max_segs 1228 1229 LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, 1230 const struct bpf_xdp_attach_opts *opts); 1231 LIBBPF_API int bpf_xdp_detach(int ifindex, __u32 flags, 1232 const struct bpf_xdp_attach_opts *opts); 1233 LIBBPF_API int bpf_xdp_query(int ifindex, int flags, struct bpf_xdp_query_opts *opts); 1234 LIBBPF_API int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id); 1235 1236 /* TC related API */ 1237 enum bpf_tc_attach_point { 1238 BPF_TC_INGRESS = 1 << 0, 1239 BPF_TC_EGRESS = 1 << 1, 1240 BPF_TC_CUSTOM = 1 << 2, 1241 }; 1242 1243 #define BPF_TC_PARENT(a, b) \ 1244 ((((a) << 16) & 0xFFFF0000U) | ((b) & 0x0000FFFFU)) 1245 1246 enum bpf_tc_flags { 1247 BPF_TC_F_REPLACE = 1 << 0, 1248 }; 1249 1250 struct bpf_tc_hook { 1251 size_t sz; 1252 int ifindex; 1253 enum bpf_tc_attach_point attach_point; 1254 __u32 parent; 1255 size_t :0; 1256 }; 1257 #define bpf_tc_hook__last_field parent 1258 1259 struct bpf_tc_opts { 1260 size_t sz; 1261 int prog_fd; 1262 __u32 flags; 1263 __u32 prog_id; 1264 __u32 handle; 1265 __u32 priority; 1266 size_t :0; 1267 }; 1268 #define bpf_tc_opts__last_field priority 1269 1270 LIBBPF_API int bpf_tc_hook_create(struct bpf_tc_hook *hook); 1271 LIBBPF_API int bpf_tc_hook_destroy(struct bpf_tc_hook *hook); 1272 LIBBPF_API int bpf_tc_attach(const struct bpf_tc_hook *hook, 1273 struct bpf_tc_opts *opts); 1274 LIBBPF_API int bpf_tc_detach(const struct bpf_tc_hook *hook, 1275 const struct bpf_tc_opts *opts); 1276 LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook, 1277 struct bpf_tc_opts *opts); 1278 1279 /* Ring buffer APIs */ 1280 struct ring_buffer; 1281 struct ring; 1282 struct user_ring_buffer; 1283 1284 typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size); 1285 1286 struct ring_buffer_opts { 1287 size_t sz; /* size of this struct, for forward/backward compatibility */ 1288 }; 1289 1290 #define ring_buffer_opts__last_field sz 1291 1292 LIBBPF_API struct ring_buffer * 1293 ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx, 1294 const struct ring_buffer_opts *opts); 1295 LIBBPF_API void ring_buffer__free(struct ring_buffer *rb); 1296 LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd, 1297 ring_buffer_sample_fn sample_cb, void *ctx); 1298 LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms); 1299 LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb); 1300 LIBBPF_API int ring_buffer__consume_n(struct ring_buffer *rb, size_t n); 1301 LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb); 1302 1303 /** 1304 * @brief **ring_buffer__ring()** returns the ringbuffer object inside a given 1305 * ringbuffer manager representing a single BPF_MAP_TYPE_RINGBUF map instance. 1306 * 1307 * @param rb A ringbuffer manager object. 1308 * @param idx An index into the ringbuffers contained within the ringbuffer 1309 * manager object. The index is 0-based and corresponds to the order in which 1310 * ring_buffer__add was called. 1311 * @return A ringbuffer object on success; NULL and errno set if the index is 1312 * invalid. 1313 */ 1314 LIBBPF_API struct ring *ring_buffer__ring(struct ring_buffer *rb, 1315 unsigned int idx); 1316 1317 /** 1318 * @brief **ring__consumer_pos()** returns the current consumer position in the 1319 * given ringbuffer. 1320 * 1321 * @param r A ringbuffer object. 1322 * @return The current consumer position. 1323 */ 1324 LIBBPF_API unsigned long ring__consumer_pos(const struct ring *r); 1325 1326 /** 1327 * @brief **ring__producer_pos()** returns the current producer position in the 1328 * given ringbuffer. 1329 * 1330 * @param r A ringbuffer object. 1331 * @return The current producer position. 1332 */ 1333 LIBBPF_API unsigned long ring__producer_pos(const struct ring *r); 1334 1335 /** 1336 * @brief **ring__avail_data_size()** returns the number of bytes in the 1337 * ringbuffer not yet consumed. This has no locking associated with it, so it 1338 * can be inaccurate if operations are ongoing while this is called. However, it 1339 * should still show the correct trend over the long-term. 1340 * 1341 * @param r A ringbuffer object. 1342 * @return The number of bytes not yet consumed. 1343 */ 1344 LIBBPF_API size_t ring__avail_data_size(const struct ring *r); 1345 1346 /** 1347 * @brief **ring__size()** returns the total size of the ringbuffer's map data 1348 * area (excluding special producer/consumer pages). Effectively this gives the 1349 * amount of usable bytes of data inside the ringbuffer. 1350 * 1351 * @param r A ringbuffer object. 1352 * @return The total size of the ringbuffer map data area. 1353 */ 1354 LIBBPF_API size_t ring__size(const struct ring *r); 1355 1356 /** 1357 * @brief **ring__map_fd()** returns the file descriptor underlying the given 1358 * ringbuffer. 1359 * 1360 * @param r A ringbuffer object. 1361 * @return The underlying ringbuffer file descriptor 1362 */ 1363 LIBBPF_API int ring__map_fd(const struct ring *r); 1364 1365 /** 1366 * @brief **ring__consume()** consumes available ringbuffer data without event 1367 * polling. 1368 * 1369 * @param r A ringbuffer object. 1370 * @return The number of records consumed (or INT_MAX, whichever is less), or 1371 * a negative number if any of the callbacks return an error. 1372 */ 1373 LIBBPF_API int ring__consume(struct ring *r); 1374 1375 /** 1376 * @brief **ring__consume_n()** consumes up to a requested amount of items from 1377 * a ringbuffer without event polling. 1378 * 1379 * @param r A ringbuffer object. 1380 * @param n Maximum amount of items to consume. 1381 * @return The number of items consumed, or a negative number if any of the 1382 * callbacks return an error. 1383 */ 1384 LIBBPF_API int ring__consume_n(struct ring *r, size_t n); 1385 1386 struct user_ring_buffer_opts { 1387 size_t sz; /* size of this struct, for forward/backward compatibility */ 1388 }; 1389 1390 #define user_ring_buffer_opts__last_field sz 1391 1392 /** 1393 * @brief **user_ring_buffer__new()** creates a new instance of a user ring 1394 * buffer. 1395 * 1396 * @param map_fd A file descriptor to a BPF_MAP_TYPE_USER_RINGBUF map. 1397 * @param opts Options for how the ring buffer should be created. 1398 * @return A user ring buffer on success; NULL and errno being set on a 1399 * failure. 1400 */ 1401 LIBBPF_API struct user_ring_buffer * 1402 user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts); 1403 1404 /** 1405 * @brief **user_ring_buffer__reserve()** reserves a pointer to a sample in the 1406 * user ring buffer. 1407 * @param rb A pointer to a user ring buffer. 1408 * @param size The size of the sample, in bytes. 1409 * @return A pointer to an 8-byte aligned reserved region of the user ring 1410 * buffer; NULL, and errno being set if a sample could not be reserved. 1411 * 1412 * This function is *not* thread safe, and callers must synchronize accessing 1413 * this function if there are multiple producers. If a size is requested that 1414 * is larger than the size of the entire ring buffer, errno will be set to 1415 * E2BIG and NULL is returned. If the ring buffer could accommodate the size, 1416 * but currently does not have enough space, errno is set to ENOSPC and NULL is 1417 * returned. 1418 * 1419 * After initializing the sample, callers must invoke 1420 * **user_ring_buffer__submit()** to post the sample to the kernel. Otherwise, 1421 * the sample must be freed with **user_ring_buffer__discard()**. 1422 */ 1423 LIBBPF_API void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size); 1424 1425 /** 1426 * @brief **user_ring_buffer__reserve_blocking()** reserves a record in the 1427 * ring buffer, possibly blocking for up to @timeout_ms until a sample becomes 1428 * available. 1429 * @param rb The user ring buffer. 1430 * @param size The size of the sample, in bytes. 1431 * @param timeout_ms The amount of time, in milliseconds, for which the caller 1432 * should block when waiting for a sample. -1 causes the caller to block 1433 * indefinitely. 1434 * @return A pointer to an 8-byte aligned reserved region of the user ring 1435 * buffer; NULL, and errno being set if a sample could not be reserved. 1436 * 1437 * This function is *not* thread safe, and callers must synchronize 1438 * accessing this function if there are multiple producers 1439 * 1440 * If **timeout_ms** is -1, the function will block indefinitely until a sample 1441 * becomes available. Otherwise, **timeout_ms** must be non-negative, or errno 1442 * is set to EINVAL, and NULL is returned. If **timeout_ms** is 0, no blocking 1443 * will occur and the function will return immediately after attempting to 1444 * reserve a sample. 1445 * 1446 * If **size** is larger than the size of the entire ring buffer, errno is set 1447 * to E2BIG and NULL is returned. If the ring buffer could accommodate 1448 * **size**, but currently does not have enough space, the caller will block 1449 * until at most **timeout_ms** has elapsed. If insufficient space is available 1450 * at that time, errno is set to ENOSPC, and NULL is returned. 1451 * 1452 * The kernel guarantees that it will wake up this thread to check if 1453 * sufficient space is available in the ring buffer at least once per 1454 * invocation of the **bpf_ringbuf_drain()** helper function, provided that at 1455 * least one sample is consumed, and the BPF program did not invoke the 1456 * function with BPF_RB_NO_WAKEUP. A wakeup may occur sooner than that, but the 1457 * kernel does not guarantee this. If the helper function is invoked with 1458 * BPF_RB_FORCE_WAKEUP, a wakeup event will be sent even if no sample is 1459 * consumed. 1460 * 1461 * When a sample of size **size** is found within **timeout_ms**, a pointer to 1462 * the sample is returned. After initializing the sample, callers must invoke 1463 * **user_ring_buffer__submit()** to post the sample to the ring buffer. 1464 * Otherwise, the sample must be freed with **user_ring_buffer__discard()**. 1465 */ 1466 LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, 1467 __u32 size, 1468 int timeout_ms); 1469 1470 /** 1471 * @brief **user_ring_buffer__submit()** submits a previously reserved sample 1472 * into the ring buffer. 1473 * @param rb The user ring buffer. 1474 * @param sample A reserved sample. 1475 * 1476 * It is not necessary to synchronize amongst multiple producers when invoking 1477 * this function. 1478 */ 1479 LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample); 1480 1481 /** 1482 * @brief **user_ring_buffer__discard()** discards a previously reserved sample. 1483 * @param rb The user ring buffer. 1484 * @param sample A reserved sample. 1485 * 1486 * It is not necessary to synchronize amongst multiple producers when invoking 1487 * this function. 1488 */ 1489 LIBBPF_API void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample); 1490 1491 /** 1492 * @brief **user_ring_buffer__free()** frees a ring buffer that was previously 1493 * created with **user_ring_buffer__new()**. 1494 * @param rb The user ring buffer being freed. 1495 */ 1496 LIBBPF_API void user_ring_buffer__free(struct user_ring_buffer *rb); 1497 1498 /* Perf buffer APIs */ 1499 struct perf_buffer; 1500 1501 typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu, 1502 void *data, __u32 size); 1503 typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt); 1504 1505 /* common use perf buffer options */ 1506 struct perf_buffer_opts { 1507 size_t sz; 1508 __u32 sample_period; 1509 size_t :0; 1510 }; 1511 #define perf_buffer_opts__last_field sample_period 1512 1513 /** 1514 * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified 1515 * BPF_PERF_EVENT_ARRAY map 1516 * @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF 1517 * code to send data over to user-space 1518 * @param page_cnt number of memory pages allocated for each per-CPU buffer 1519 * @param sample_cb function called on each received data record 1520 * @param lost_cb function called when record loss has occurred 1521 * @param ctx user-provided extra context passed into *sample_cb* and *lost_cb* 1522 * @return a new instance of struct perf_buffer on success, NULL on error with 1523 * *errno* containing an error code 1524 */ 1525 LIBBPF_API struct perf_buffer * 1526 perf_buffer__new(int map_fd, size_t page_cnt, 1527 perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx, 1528 const struct perf_buffer_opts *opts); 1529 1530 enum bpf_perf_event_ret { 1531 LIBBPF_PERF_EVENT_DONE = 0, 1532 LIBBPF_PERF_EVENT_ERROR = -1, 1533 LIBBPF_PERF_EVENT_CONT = -2, 1534 }; 1535 1536 struct perf_event_header; 1537 1538 typedef enum bpf_perf_event_ret 1539 (*perf_buffer_event_fn)(void *ctx, int cpu, struct perf_event_header *event); 1540 1541 /* raw perf buffer options, giving most power and control */ 1542 struct perf_buffer_raw_opts { 1543 size_t sz; 1544 long :0; 1545 long :0; 1546 /* if cpu_cnt == 0, open all on all possible CPUs (up to the number of 1547 * max_entries of given PERF_EVENT_ARRAY map) 1548 */ 1549 int cpu_cnt; 1550 /* if cpu_cnt > 0, cpus is an array of CPUs to open ring buffers on */ 1551 int *cpus; 1552 /* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */ 1553 int *map_keys; 1554 }; 1555 #define perf_buffer_raw_opts__last_field map_keys 1556 1557 struct perf_event_attr; 1558 1559 LIBBPF_API struct perf_buffer * 1560 perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr, 1561 perf_buffer_event_fn event_cb, void *ctx, 1562 const struct perf_buffer_raw_opts *opts); 1563 1564 LIBBPF_API void perf_buffer__free(struct perf_buffer *pb); 1565 LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb); 1566 LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms); 1567 LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb); 1568 LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx); 1569 LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb); 1570 LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx); 1571 /** 1572 * @brief **perf_buffer__buffer()** returns the per-cpu raw mmap()'ed underlying 1573 * memory region of the ring buffer. 1574 * This ring buffer can be used to implement a custom events consumer. 1575 * The ring buffer starts with the *struct perf_event_mmap_page*, which 1576 * holds the ring buffer managment fields, when accessing the header 1577 * structure it's important to be SMP aware. 1578 * You can refer to *perf_event_read_simple* for a simple example. 1579 * @param pb the perf buffer structure 1580 * @param buf_idx the buffer index to retreive 1581 * @param buf (out) gets the base pointer of the mmap()'ed memory 1582 * @param buf_size (out) gets the size of the mmap()'ed region 1583 * @return 0 on success, negative error code for failure 1584 */ 1585 LIBBPF_API int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, 1586 size_t *buf_size); 1587 1588 struct bpf_prog_linfo; 1589 struct bpf_prog_info; 1590 1591 LIBBPF_API void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo); 1592 LIBBPF_API struct bpf_prog_linfo * 1593 bpf_prog_linfo__new(const struct bpf_prog_info *info); 1594 LIBBPF_API const struct bpf_line_info * 1595 bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo, 1596 __u64 addr, __u32 func_idx, __u32 nr_skip); 1597 LIBBPF_API const struct bpf_line_info * 1598 bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, 1599 __u32 insn_off, __u32 nr_skip); 1600 1601 /* 1602 * Probe for supported system features 1603 * 1604 * Note that running many of these probes in a short amount of time can cause 1605 * the kernel to reach the maximal size of lockable memory allowed for the 1606 * user, causing subsequent probes to fail. In this case, the caller may want 1607 * to adjust that limit with setrlimit(). 1608 */ 1609 1610 /** 1611 * @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports 1612 * BPF programs of a given type. 1613 * @param prog_type BPF program type to detect kernel support for 1614 * @param opts reserved for future extensibility, should be NULL 1615 * @return 1, if given program type is supported; 0, if given program type is 1616 * not supported; negative error code if feature detection failed or can't be 1617 * performed 1618 * 1619 * Make sure the process has required set of CAP_* permissions (or runs as 1620 * root) when performing feature checking. 1621 */ 1622 LIBBPF_API int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts); 1623 /** 1624 * @brief **libbpf_probe_bpf_map_type()** detects if host kernel supports 1625 * BPF maps of a given type. 1626 * @param map_type BPF map type to detect kernel support for 1627 * @param opts reserved for future extensibility, should be NULL 1628 * @return 1, if given map type is supported; 0, if given map type is 1629 * not supported; negative error code if feature detection failed or can't be 1630 * performed 1631 * 1632 * Make sure the process has required set of CAP_* permissions (or runs as 1633 * root) when performing feature checking. 1634 */ 1635 LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts); 1636 /** 1637 * @brief **libbpf_probe_bpf_helper()** detects if host kernel supports the 1638 * use of a given BPF helper from specified BPF program type. 1639 * @param prog_type BPF program type used to check the support of BPF helper 1640 * @param helper_id BPF helper ID (enum bpf_func_id) to check support for 1641 * @param opts reserved for future extensibility, should be NULL 1642 * @return 1, if given combination of program type and helper is supported; 0, 1643 * if the combination is not supported; negative error code if feature 1644 * detection for provided input arguments failed or can't be performed 1645 * 1646 * Make sure the process has required set of CAP_* permissions (or runs as 1647 * root) when performing feature checking. 1648 */ 1649 LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, 1650 enum bpf_func_id helper_id, const void *opts); 1651 1652 /** 1653 * @brief **libbpf_num_possible_cpus()** is a helper function to get the 1654 * number of possible CPUs that the host kernel supports and expects. 1655 * @return number of possible CPUs; or error code on failure 1656 * 1657 * Example usage: 1658 * 1659 * int ncpus = libbpf_num_possible_cpus(); 1660 * if (ncpus < 0) { 1661 * // error handling 1662 * } 1663 * long values[ncpus]; 1664 * bpf_map_lookup_elem(per_cpu_map_fd, key, values); 1665 */ 1666 LIBBPF_API int libbpf_num_possible_cpus(void); 1667 1668 struct bpf_map_skeleton { 1669 const char *name; 1670 struct bpf_map **map; 1671 void **mmaped; 1672 }; 1673 1674 struct bpf_prog_skeleton { 1675 const char *name; 1676 struct bpf_program **prog; 1677 struct bpf_link **link; 1678 }; 1679 1680 struct bpf_object_skeleton { 1681 size_t sz; /* size of this struct, for forward/backward compatibility */ 1682 1683 const char *name; 1684 const void *data; 1685 size_t data_sz; 1686 1687 struct bpf_object **obj; 1688 1689 int map_cnt; 1690 int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ 1691 struct bpf_map_skeleton *maps; 1692 1693 int prog_cnt; 1694 int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ 1695 struct bpf_prog_skeleton *progs; 1696 }; 1697 1698 LIBBPF_API int 1699 bpf_object__open_skeleton(struct bpf_object_skeleton *s, 1700 const struct bpf_object_open_opts *opts); 1701 LIBBPF_API int bpf_object__load_skeleton(struct bpf_object_skeleton *s); 1702 LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s); 1703 LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s); 1704 LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s); 1705 1706 struct bpf_var_skeleton { 1707 const char *name; 1708 struct bpf_map **map; 1709 void **addr; 1710 }; 1711 1712 struct bpf_object_subskeleton { 1713 size_t sz; /* size of this struct, for forward/backward compatibility */ 1714 1715 const struct bpf_object *obj; 1716 1717 int map_cnt; 1718 int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ 1719 struct bpf_map_skeleton *maps; 1720 1721 int prog_cnt; 1722 int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ 1723 struct bpf_prog_skeleton *progs; 1724 1725 int var_cnt; 1726 int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */ 1727 struct bpf_var_skeleton *vars; 1728 }; 1729 1730 LIBBPF_API int 1731 bpf_object__open_subskeleton(struct bpf_object_subskeleton *s); 1732 LIBBPF_API void 1733 bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s); 1734 1735 struct gen_loader_opts { 1736 size_t sz; /* size of this struct, for forward/backward compatibility */ 1737 const char *data; 1738 const char *insns; 1739 __u32 data_sz; 1740 __u32 insns_sz; 1741 }; 1742 1743 #define gen_loader_opts__last_field insns_sz 1744 LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj, 1745 struct gen_loader_opts *opts); 1746 1747 enum libbpf_tristate { 1748 TRI_NO = 0, 1749 TRI_YES = 1, 1750 TRI_MODULE = 2, 1751 }; 1752 1753 struct bpf_linker_opts { 1754 /* size of this struct, for forward/backward compatibility */ 1755 size_t sz; 1756 }; 1757 #define bpf_linker_opts__last_field sz 1758 1759 struct bpf_linker_file_opts { 1760 /* size of this struct, for forward/backward compatibility */ 1761 size_t sz; 1762 }; 1763 #define bpf_linker_file_opts__last_field sz 1764 1765 struct bpf_linker; 1766 1767 LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts); 1768 LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, 1769 const char *filename, 1770 const struct bpf_linker_file_opts *opts); 1771 LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker); 1772 LIBBPF_API void bpf_linker__free(struct bpf_linker *linker); 1773 1774 /* 1775 * Custom handling of BPF program's SEC() definitions 1776 */ 1777 1778 struct bpf_prog_load_opts; /* defined in bpf.h */ 1779 1780 /* Called during bpf_object__open() for each recognized BPF program. Callback 1781 * can use various bpf_program__set_*() setters to adjust whatever properties 1782 * are necessary. 1783 */ 1784 typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie); 1785 1786 /* Called right before libbpf performs bpf_prog_load() to load BPF program 1787 * into the kernel. Callback can adjust opts as necessary. 1788 */ 1789 typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog, 1790 struct bpf_prog_load_opts *opts, long cookie); 1791 1792 /* Called during skeleton attach or through bpf_program__attach(). If 1793 * auto-attach is not supported, callback should return 0 and set link to 1794 * NULL (it's not considered an error during skeleton attach, but it will be 1795 * an error for bpf_program__attach() calls). On error, error should be 1796 * returned directly and link set to NULL. On success, return 0 and set link 1797 * to a valid struct bpf_link. 1798 */ 1799 typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie, 1800 struct bpf_link **link); 1801 1802 struct libbpf_prog_handler_opts { 1803 /* size of this struct, for forward/backward compatibility */ 1804 size_t sz; 1805 /* User-provided value that is passed to prog_setup_fn, 1806 * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to 1807 * register one set of callbacks for multiple SEC() definitions and 1808 * still be able to distinguish them, if necessary. For example, 1809 * libbpf itself is using this to pass necessary flags (e.g., 1810 * sleepable flag) to a common internal SEC() handler. 1811 */ 1812 long cookie; 1813 /* BPF program initialization callback (see libbpf_prog_setup_fn_t). 1814 * Callback is optional, pass NULL if it's not necessary. 1815 */ 1816 libbpf_prog_setup_fn_t prog_setup_fn; 1817 /* BPF program loading callback (see libbpf_prog_prepare_load_fn_t). 1818 * Callback is optional, pass NULL if it's not necessary. 1819 */ 1820 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; 1821 /* BPF program attach callback (see libbpf_prog_attach_fn_t). 1822 * Callback is optional, pass NULL if it's not necessary. 1823 */ 1824 libbpf_prog_attach_fn_t prog_attach_fn; 1825 }; 1826 #define libbpf_prog_handler_opts__last_field prog_attach_fn 1827 1828 /** 1829 * @brief **libbpf_register_prog_handler()** registers a custom BPF program 1830 * SEC() handler. 1831 * @param sec section prefix for which custom handler is registered 1832 * @param prog_type BPF program type associated with specified section 1833 * @param exp_attach_type Expected BPF attach type associated with specified section 1834 * @param opts optional cookie, callbacks, and other extra options 1835 * @return Non-negative handler ID is returned on success. This handler ID has 1836 * to be passed to *libbpf_unregister_prog_handler()* to unregister such 1837 * custom handler. Negative error code is returned on error. 1838 * 1839 * *sec* defines which SEC() definitions are handled by this custom handler 1840 * registration. *sec* can have few different forms: 1841 * - if *sec* is just a plain string (e.g., "abc"), it will match only 1842 * SEC("abc"). If BPF program specifies SEC("abc/whatever") it will result 1843 * in an error; 1844 * - if *sec* is of the form "abc/", proper SEC() form is 1845 * SEC("abc/something"), where acceptable "something" should be checked by 1846 * *prog_init_fn* callback, if there are additional restrictions; 1847 * - if *sec* is of the form "abc+", it will successfully match both 1848 * SEC("abc") and SEC("abc/whatever") forms; 1849 * - if *sec* is NULL, custom handler is registered for any BPF program that 1850 * doesn't match any of the registered (custom or libbpf's own) SEC() 1851 * handlers. There could be only one such generic custom handler registered 1852 * at any given time. 1853 * 1854 * All custom handlers (except the one with *sec* == NULL) are processed 1855 * before libbpf's own SEC() handlers. It is allowed to "override" libbpf's 1856 * SEC() handlers by registering custom ones for the same section prefix 1857 * (i.e., it's possible to have custom SEC("perf_event/LLC-load-misses") 1858 * handler). 1859 * 1860 * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), 1861 * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs 1862 * to ensure synchronization if there is a risk of running this API from 1863 * multiple threads simultaneously. 1864 */ 1865 LIBBPF_API int libbpf_register_prog_handler(const char *sec, 1866 enum bpf_prog_type prog_type, 1867 enum bpf_attach_type exp_attach_type, 1868 const struct libbpf_prog_handler_opts *opts); 1869 /** 1870 * @brief *libbpf_unregister_prog_handler()* unregisters previously registered 1871 * custom BPF program SEC() handler. 1872 * @param handler_id handler ID returned by *libbpf_register_prog_handler()* 1873 * after successful registration 1874 * @return 0 on success, negative error code if handler isn't found 1875 * 1876 * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), 1877 * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs 1878 * to ensure synchronization if there is a risk of running this API from 1879 * multiple threads simultaneously. 1880 */ 1881 LIBBPF_API int libbpf_unregister_prog_handler(int handler_id); 1882 1883 #ifdef __cplusplus 1884 } /* extern "C" */ 1885 #endif 1886 1887 #endif /* __LIBBPF_LIBBPF_H */ 1888