1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 3 /* 4 * Common eBPF ELF object loading operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 */ 10 #ifndef __LIBBPF_LIBBPF_H 11 #define __LIBBPF_LIBBPF_H 12 13 #include <stdarg.h> 14 #include <stdio.h> 15 #include <stdint.h> 16 #include <stdbool.h> 17 #include <sys/types.h> // for size_t 18 #include <linux/bpf.h> 19 20 #include "libbpf_common.h" 21 #include "libbpf_legacy.h" 22 23 #ifdef __cplusplus 24 extern "C" { 25 #endif 26 27 LIBBPF_API __u32 libbpf_major_version(void); 28 LIBBPF_API __u32 libbpf_minor_version(void); 29 LIBBPF_API const char *libbpf_version_string(void); 30 31 enum libbpf_errno { 32 __LIBBPF_ERRNO__START = 4000, 33 34 /* Something wrong in libelf */ 35 LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START, 36 LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */ 37 LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */ 38 LIBBPF_ERRNO__ENDIAN, /* Endian mismatch */ 39 LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */ 40 LIBBPF_ERRNO__RELOC, /* Relocation failed */ 41 LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */ 42 LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */ 43 LIBBPF_ERRNO__PROG2BIG, /* Program too big */ 44 LIBBPF_ERRNO__KVER, /* Incorrect kernel version */ 45 LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */ 46 LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */ 47 LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */ 48 LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */ 49 __LIBBPF_ERRNO__END, 50 }; 51 52 LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size); 53 54 /** 55 * @brief **libbpf_bpf_attach_type_str()** converts the provided attach type 56 * value into a textual representation. 57 * @param t The attach type. 58 * @return Pointer to a static string identifying the attach type. NULL is 59 * returned for unknown **bpf_attach_type** values. 60 */ 61 LIBBPF_API const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t); 62 63 /** 64 * @brief **libbpf_bpf_link_type_str()** converts the provided link type value 65 * into a textual representation. 66 * @param t The link type. 67 * @return Pointer to a static string identifying the link type. NULL is 68 * returned for unknown **bpf_link_type** values. 69 */ 70 LIBBPF_API const char *libbpf_bpf_link_type_str(enum bpf_link_type t); 71 72 /** 73 * @brief **libbpf_bpf_map_type_str()** converts the provided map type value 74 * into a textual representation. 75 * @param t The map type. 76 * @return Pointer to a static string identifying the map type. NULL is 77 * returned for unknown **bpf_map_type** values. 78 */ 79 LIBBPF_API const char *libbpf_bpf_map_type_str(enum bpf_map_type t); 80 81 /** 82 * @brief **libbpf_bpf_prog_type_str()** converts the provided program type 83 * value into a textual representation. 84 * @param t The program type. 85 * @return Pointer to a static string identifying the program type. NULL is 86 * returned for unknown **bpf_prog_type** values. 87 */ 88 LIBBPF_API const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t); 89 90 enum libbpf_print_level { 91 LIBBPF_WARN, 92 LIBBPF_INFO, 93 LIBBPF_DEBUG, 94 }; 95 96 typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level, 97 const char *, va_list ap); 98 99 /** 100 * @brief **libbpf_set_print()** sets user-provided log callback function to 101 * be used for libbpf warnings and informational messages. 102 * @param fn The log print function. If NULL, libbpf won't print anything. 103 * @return Pointer to old print function. 104 * 105 * This function is thread-safe. 106 */ 107 LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn); 108 109 /* Hide internal to user */ 110 struct bpf_object; 111 112 struct bpf_object_open_opts { 113 /* size of this struct, for forward/backward compatibility */ 114 size_t sz; 115 /* object name override, if provided: 116 * - for object open from file, this will override setting object 117 * name from file path's base name; 118 * - for object open from memory buffer, this will specify an object 119 * name and will override default "<addr>-<buf-size>" name; 120 */ 121 const char *object_name; 122 /* parse map definitions non-strictly, allowing extra attributes/data */ 123 bool relaxed_maps; 124 /* maps that set the 'pinning' attribute in their definition will have 125 * their pin_path attribute set to a file in this directory, and be 126 * auto-pinned to that path on load; defaults to "/sys/fs/bpf". 127 */ 128 const char *pin_root_path; 129 130 __u32 :32; /* stub out now removed attach_prog_fd */ 131 132 /* Additional kernel config content that augments and overrides 133 * system Kconfig for CONFIG_xxx externs. 134 */ 135 const char *kconfig; 136 /* Path to the custom BTF to be used for BPF CO-RE relocations. 137 * This custom BTF completely replaces the use of vmlinux BTF 138 * for the purpose of CO-RE relocations. 139 * NOTE: any other BPF feature (e.g., fentry/fexit programs, 140 * struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux. 141 */ 142 const char *btf_custom_path; 143 /* Pointer to a buffer for storing kernel logs for applicable BPF 144 * commands. Valid kernel_log_size has to be specified as well and are 145 * passed-through to bpf() syscall. Keep in mind that kernel might 146 * fail operation with -ENOSPC error if provided buffer is too small 147 * to contain entire log output. 148 * See the comment below for kernel_log_level for interaction between 149 * log_buf and log_level settings. 150 * 151 * If specified, this log buffer will be passed for: 152 * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden 153 * with bpf_program__set_log() on per-program level, to get 154 * BPF verifier log output. 155 * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get 156 * BTF sanity checking log. 157 * 158 * Each BPF command (BPF_BTF_LOAD or BPF_PROG_LOAD) will overwrite 159 * previous contents, so if you need more fine-grained control, set 160 * per-program buffer with bpf_program__set_log_buf() to preserve each 161 * individual program's verification log. Keep using kernel_log_buf 162 * for BTF verification log, if necessary. 163 */ 164 char *kernel_log_buf; 165 size_t kernel_log_size; 166 /* 167 * Log level can be set independently from log buffer. Log_level=0 168 * means that libbpf will attempt loading BTF or program without any 169 * logging requested, but will retry with either its own or custom log 170 * buffer, if provided, and log_level=1 on any error. 171 * And vice versa, setting log_level>0 will request BTF or prog 172 * loading with verbose log from the first attempt (and as such also 173 * for successfully loaded BTF or program), and the actual log buffer 174 * could be either libbpf's own auto-allocated log buffer, if 175 * kernel_log_buffer is NULL, or user-provided custom kernel_log_buf. 176 * If user didn't provide custom log buffer, libbpf will emit captured 177 * logs through its print callback. 178 */ 179 __u32 kernel_log_level; 180 /* Path to BPF FS mount point to derive BPF token from. 181 * 182 * Created BPF token will be used for all bpf() syscall operations 183 * that accept BPF token (e.g., map creation, BTF and program loads, 184 * etc) automatically within instantiated BPF object. 185 * 186 * If bpf_token_path is not specified, libbpf will consult 187 * LIBBPF_BPF_TOKEN_PATH environment variable. If set, it will be 188 * taken as a value of bpf_token_path option and will force libbpf to 189 * either create BPF token from provided custom BPF FS path, or will 190 * disable implicit BPF token creation, if envvar value is an empty 191 * string. bpf_token_path overrides LIBBPF_BPF_TOKEN_PATH, if both are 192 * set at the same time. 193 * 194 * Setting bpf_token_path option to empty string disables libbpf's 195 * automatic attempt to create BPF token from default BPF FS mount 196 * point (/sys/fs/bpf), in case this default behavior is undesirable. 197 */ 198 const char *bpf_token_path; 199 200 size_t :0; 201 }; 202 #define bpf_object_open_opts__last_field bpf_token_path 203 204 /** 205 * @brief **bpf_object__open()** creates a bpf_object by opening 206 * the BPF ELF object file pointed to by the passed path and loading it 207 * into memory. 208 * @param path BPF object file path. 209 * @return pointer to the new bpf_object; or NULL is returned on error, 210 * error code is stored in errno 211 */ 212 LIBBPF_API struct bpf_object *bpf_object__open(const char *path); 213 214 /** 215 * @brief **bpf_object__open_file()** creates a bpf_object by opening 216 * the BPF ELF object file pointed to by the passed path and loading it 217 * into memory. 218 * @param path BPF object file path 219 * @param opts options for how to load the bpf object, this parameter is 220 * optional and can be set to NULL 221 * @return pointer to the new bpf_object; or NULL is returned on error, 222 * error code is stored in errno 223 */ 224 LIBBPF_API struct bpf_object * 225 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts); 226 227 /** 228 * @brief **bpf_object__open_mem()** creates a bpf_object by reading 229 * the BPF objects raw bytes from a memory buffer containing a valid 230 * BPF ELF object file. 231 * @param obj_buf pointer to the buffer containing ELF file bytes 232 * @param obj_buf_sz number of bytes in the buffer 233 * @param opts options for how to load the bpf object 234 * @return pointer to the new bpf_object; or NULL is returned on error, 235 * error code is stored in errno 236 */ 237 LIBBPF_API struct bpf_object * 238 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, 239 const struct bpf_object_open_opts *opts); 240 241 /** 242 * @brief **bpf_object__load()** loads BPF object into kernel. 243 * @param obj Pointer to a valid BPF object instance returned by 244 * **bpf_object__open*()** APIs 245 * @return 0, on success; negative error code, otherwise, error code is 246 * stored in errno 247 */ 248 LIBBPF_API int bpf_object__load(struct bpf_object *obj); 249 250 /** 251 * @brief **bpf_object__close()** closes a BPF object and releases all 252 * resources. 253 * @param obj Pointer to a valid BPF object 254 */ 255 LIBBPF_API void bpf_object__close(struct bpf_object *obj); 256 257 /** 258 * @brief **bpf_object__pin_maps()** pins each map contained within 259 * the BPF object at the passed directory. 260 * @param obj Pointer to a valid BPF object 261 * @param path A directory where maps should be pinned. 262 * @return 0, on success; negative error code, otherwise 263 * 264 * If `path` is NULL `bpf_map__pin` (which is being used on each map) 265 * will use the pin_path attribute of each map. In this case, maps that 266 * don't have a pin_path set will be ignored. 267 */ 268 LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path); 269 270 /** 271 * @brief **bpf_object__unpin_maps()** unpins each map contained within 272 * the BPF object found in the passed directory. 273 * @param obj Pointer to a valid BPF object 274 * @param path A directory where pinned maps should be searched for. 275 * @return 0, on success; negative error code, otherwise 276 * 277 * If `path` is NULL `bpf_map__unpin` (which is being used on each map) 278 * will use the pin_path attribute of each map. In this case, maps that 279 * don't have a pin_path set will be ignored. 280 */ 281 LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj, 282 const char *path); 283 LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj, 284 const char *path); 285 LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj, 286 const char *path); 287 LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path); 288 LIBBPF_API int bpf_object__unpin(struct bpf_object *object, const char *path); 289 290 LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj); 291 LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj); 292 LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version); 293 294 struct btf; 295 LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj); 296 LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj); 297 298 LIBBPF_API struct bpf_program * 299 bpf_object__find_program_by_name(const struct bpf_object *obj, 300 const char *name); 301 302 LIBBPF_API int 303 libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, 304 enum bpf_attach_type *expected_attach_type); 305 LIBBPF_API int libbpf_attach_type_by_name(const char *name, 306 enum bpf_attach_type *attach_type); 307 LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name, 308 enum bpf_attach_type attach_type); 309 310 /* Accessors of bpf_program */ 311 struct bpf_program; 312 313 LIBBPF_API struct bpf_program * 314 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog); 315 316 #define bpf_object__for_each_program(pos, obj) \ 317 for ((pos) = bpf_object__next_program((obj), NULL); \ 318 (pos) != NULL; \ 319 (pos) = bpf_object__next_program((obj), (pos))) 320 321 LIBBPF_API struct bpf_program * 322 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog); 323 324 LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, 325 __u32 ifindex); 326 327 LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog); 328 LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog); 329 LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog); 330 LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload); 331 LIBBPF_API bool bpf_program__autoattach(const struct bpf_program *prog); 332 LIBBPF_API void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach); 333 334 struct bpf_insn; 335 336 /** 337 * @brief **bpf_program__insns()** gives read-only access to BPF program's 338 * underlying BPF instructions. 339 * @param prog BPF program for which to return instructions 340 * @return a pointer to an array of BPF instructions that belong to the 341 * specified BPF program 342 * 343 * Returned pointer is always valid and not NULL. Number of `struct bpf_insn` 344 * pointed to can be fetched using **bpf_program__insn_cnt()** API. 345 * 346 * Keep in mind, libbpf can modify and append/delete BPF program's 347 * instructions as it processes BPF object file and prepares everything for 348 * uploading into the kernel. So depending on the point in BPF object 349 * lifetime, **bpf_program__insns()** can return different sets of 350 * instructions. As an example, during BPF object load phase BPF program 351 * instructions will be CO-RE-relocated, BPF subprograms instructions will be 352 * appended, ldimm64 instructions will have FDs embedded, etc. So instructions 353 * returned before **bpf_object__load()** and after it might be quite 354 * different. 355 */ 356 LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog); 357 358 /** 359 * @brief **bpf_program__set_insns()** can set BPF program's underlying 360 * BPF instructions. 361 * 362 * WARNING: This is a very advanced libbpf API and users need to know 363 * what they are doing. This should be used from prog_prepare_load_fn 364 * callback only. 365 * 366 * @param prog BPF program for which to return instructions 367 * @param new_insns a pointer to an array of BPF instructions 368 * @param new_insn_cnt number of `struct bpf_insn`'s that form 369 * specified BPF program 370 * @return 0, on success; negative error code, otherwise 371 */ 372 LIBBPF_API int bpf_program__set_insns(struct bpf_program *prog, 373 struct bpf_insn *new_insns, size_t new_insn_cnt); 374 375 /** 376 * @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s 377 * that form specified BPF program. 378 * @param prog BPF program for which to return number of BPF instructions 379 * 380 * See **bpf_program__insns()** documentation for notes on how libbpf can 381 * change instructions and their count during different phases of 382 * **bpf_object** lifetime. 383 */ 384 LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog); 385 386 LIBBPF_API int bpf_program__fd(const struct bpf_program *prog); 387 388 /** 389 * @brief **bpf_program__pin()** pins the BPF program to a file 390 * in the BPF FS specified by a path. This increments the programs 391 * reference count, allowing it to stay loaded after the process 392 * which loaded it has exited. 393 * 394 * @param prog BPF program to pin, must already be loaded 395 * @param path file path in a BPF file system 396 * @return 0, on success; negative error code, otherwise 397 */ 398 LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path); 399 400 /** 401 * @brief **bpf_program__unpin()** unpins the BPF program from a file 402 * in the BPFFS specified by a path. This decrements the programs 403 * reference count. 404 * 405 * The file pinning the BPF program can also be unlinked by a different 406 * process in which case this function will return an error. 407 * 408 * @param prog BPF program to unpin 409 * @param path file path to the pin in a BPF file system 410 * @return 0, on success; negative error code, otherwise 411 */ 412 LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path); 413 LIBBPF_API void bpf_program__unload(struct bpf_program *prog); 414 415 struct bpf_link; 416 417 LIBBPF_API struct bpf_link *bpf_link__open(const char *path); 418 LIBBPF_API int bpf_link__fd(const struct bpf_link *link); 419 LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link); 420 /** 421 * @brief **bpf_link__pin()** pins the BPF link to a file 422 * in the BPF FS specified by a path. This increments the links 423 * reference count, allowing it to stay loaded after the process 424 * which loaded it has exited. 425 * 426 * @param link BPF link to pin, must already be loaded 427 * @param path file path in a BPF file system 428 * @return 0, on success; negative error code, otherwise 429 */ 430 431 LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path); 432 433 /** 434 * @brief **bpf_link__unpin()** unpins the BPF link from a file 435 * in the BPFFS specified by a path. This decrements the links 436 * reference count. 437 * 438 * The file pinning the BPF link can also be unlinked by a different 439 * process in which case this function will return an error. 440 * 441 * @param prog BPF program to unpin 442 * @param path file path to the pin in a BPF file system 443 * @return 0, on success; negative error code, otherwise 444 */ 445 LIBBPF_API int bpf_link__unpin(struct bpf_link *link); 446 LIBBPF_API int bpf_link__update_program(struct bpf_link *link, 447 struct bpf_program *prog); 448 LIBBPF_API void bpf_link__disconnect(struct bpf_link *link); 449 LIBBPF_API int bpf_link__detach(struct bpf_link *link); 450 LIBBPF_API int bpf_link__destroy(struct bpf_link *link); 451 452 /** 453 * @brief **bpf_program__attach()** is a generic function for attaching 454 * a BPF program based on auto-detection of program type, attach type, 455 * and extra paremeters, where applicable. 456 * 457 * @param prog BPF program to attach 458 * @return Reference to the newly created BPF link; or NULL is returned on error, 459 * error code is stored in errno 460 * 461 * This is supported for: 462 * - kprobe/kretprobe (depends on SEC() definition) 463 * - uprobe/uretprobe (depends on SEC() definition) 464 * - tracepoint 465 * - raw tracepoint 466 * - tracing programs (typed raw TP/fentry/fexit/fmod_ret) 467 */ 468 LIBBPF_API struct bpf_link * 469 bpf_program__attach(const struct bpf_program *prog); 470 471 struct bpf_perf_event_opts { 472 /* size of this struct, for forward/backward compatibility */ 473 size_t sz; 474 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 475 __u64 bpf_cookie; 476 /* don't use BPF link when attach BPF program */ 477 bool force_ioctl_attach; 478 size_t :0; 479 }; 480 #define bpf_perf_event_opts__last_field force_ioctl_attach 481 482 LIBBPF_API struct bpf_link * 483 bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd); 484 485 LIBBPF_API struct bpf_link * 486 bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, 487 const struct bpf_perf_event_opts *opts); 488 489 /** 490 * enum probe_attach_mode - the mode to attach kprobe/uprobe 491 * 492 * force libbpf to attach kprobe/uprobe in specific mode, -ENOTSUP will 493 * be returned if it is not supported by the kernel. 494 */ 495 enum probe_attach_mode { 496 /* attach probe in latest supported mode by kernel */ 497 PROBE_ATTACH_MODE_DEFAULT = 0, 498 /* attach probe in legacy mode, using debugfs/tracefs */ 499 PROBE_ATTACH_MODE_LEGACY, 500 /* create perf event with perf_event_open() syscall */ 501 PROBE_ATTACH_MODE_PERF, 502 /* attach probe with BPF link */ 503 PROBE_ATTACH_MODE_LINK, 504 }; 505 506 struct bpf_kprobe_opts { 507 /* size of this struct, for forward/backward compatibility */ 508 size_t sz; 509 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 510 __u64 bpf_cookie; 511 /* function's offset to install kprobe to */ 512 size_t offset; 513 /* kprobe is return probe */ 514 bool retprobe; 515 /* kprobe attach mode */ 516 enum probe_attach_mode attach_mode; 517 size_t :0; 518 }; 519 #define bpf_kprobe_opts__last_field attach_mode 520 521 LIBBPF_API struct bpf_link * 522 bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe, 523 const char *func_name); 524 LIBBPF_API struct bpf_link * 525 bpf_program__attach_kprobe_opts(const struct bpf_program *prog, 526 const char *func_name, 527 const struct bpf_kprobe_opts *opts); 528 529 struct bpf_kprobe_multi_opts { 530 /* size of this struct, for forward/backward compatibility */ 531 size_t sz; 532 /* array of function symbols to attach */ 533 const char **syms; 534 /* array of function addresses to attach */ 535 const unsigned long *addrs; 536 /* array of user-provided values fetchable through bpf_get_attach_cookie */ 537 const __u64 *cookies; 538 /* number of elements in syms/addrs/cookies arrays */ 539 size_t cnt; 540 /* create return kprobes */ 541 bool retprobe; 542 size_t :0; 543 }; 544 545 #define bpf_kprobe_multi_opts__last_field retprobe 546 547 LIBBPF_API struct bpf_link * 548 bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, 549 const char *pattern, 550 const struct bpf_kprobe_multi_opts *opts); 551 552 struct bpf_uprobe_multi_opts { 553 /* size of this struct, for forward/backward compatibility */ 554 size_t sz; 555 /* array of function symbols to attach to */ 556 const char **syms; 557 /* array of function addresses to attach to */ 558 const unsigned long *offsets; 559 /* optional, array of associated ref counter offsets */ 560 const unsigned long *ref_ctr_offsets; 561 /* optional, array of associated BPF cookies */ 562 const __u64 *cookies; 563 /* number of elements in syms/addrs/cookies arrays */ 564 size_t cnt; 565 /* create return uprobes */ 566 bool retprobe; 567 size_t :0; 568 }; 569 570 #define bpf_uprobe_multi_opts__last_field retprobe 571 572 /** 573 * @brief **bpf_program__attach_uprobe_multi()** attaches a BPF program 574 * to multiple uprobes with uprobe_multi link. 575 * 576 * User can specify 2 mutually exclusive set of inputs: 577 * 578 * 1) use only path/func_pattern/pid arguments 579 * 580 * 2) use path/pid with allowed combinations of 581 * syms/offsets/ref_ctr_offsets/cookies/cnt 582 * 583 * - syms and offsets are mutually exclusive 584 * - ref_ctr_offsets and cookies are optional 585 * 586 * 587 * @param prog BPF program to attach 588 * @param pid Process ID to attach the uprobe to, 0 for self (own process), 589 * -1 for all processes 590 * @param binary_path Path to binary 591 * @param func_pattern Regular expression to specify functions to attach 592 * BPF program to 593 * @param opts Additional options (see **struct bpf_uprobe_multi_opts**) 594 * @return 0, on success; negative error code, otherwise 595 */ 596 LIBBPF_API struct bpf_link * 597 bpf_program__attach_uprobe_multi(const struct bpf_program *prog, 598 pid_t pid, 599 const char *binary_path, 600 const char *func_pattern, 601 const struct bpf_uprobe_multi_opts *opts); 602 603 struct bpf_ksyscall_opts { 604 /* size of this struct, for forward/backward compatibility */ 605 size_t sz; 606 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 607 __u64 bpf_cookie; 608 /* attach as return probe? */ 609 bool retprobe; 610 size_t :0; 611 }; 612 #define bpf_ksyscall_opts__last_field retprobe 613 614 /** 615 * @brief **bpf_program__attach_ksyscall()** attaches a BPF program 616 * to kernel syscall handler of a specified syscall. Optionally it's possible 617 * to request to install retprobe that will be triggered at syscall exit. It's 618 * also possible to associate BPF cookie (though options). 619 * 620 * Libbpf automatically will determine correct full kernel function name, 621 * which depending on system architecture and kernel version/configuration 622 * could be of the form __<arch>_sys_<syscall> or __se_sys_<syscall>, and will 623 * attach specified program using kprobe/kretprobe mechanism. 624 * 625 * **bpf_program__attach_ksyscall()** is an API counterpart of declarative 626 * **SEC("ksyscall/<syscall>")** annotation of BPF programs. 627 * 628 * At the moment **SEC("ksyscall")** and **bpf_program__attach_ksyscall()** do 629 * not handle all the calling convention quirks for mmap(), clone() and compat 630 * syscalls. It also only attaches to "native" syscall interfaces. If host 631 * system supports compat syscalls or defines 32-bit syscalls in 64-bit 632 * kernel, such syscall interfaces won't be attached to by libbpf. 633 * 634 * These limitations may or may not change in the future. Therefore it is 635 * recommended to use SEC("kprobe") for these syscalls or if working with 636 * compat and 32-bit interfaces is required. 637 * 638 * @param prog BPF program to attach 639 * @param syscall_name Symbolic name of the syscall (e.g., "bpf") 640 * @param opts Additional options (see **struct bpf_ksyscall_opts**) 641 * @return Reference to the newly created BPF link; or NULL is returned on 642 * error, error code is stored in errno 643 */ 644 LIBBPF_API struct bpf_link * 645 bpf_program__attach_ksyscall(const struct bpf_program *prog, 646 const char *syscall_name, 647 const struct bpf_ksyscall_opts *opts); 648 649 struct bpf_uprobe_opts { 650 /* size of this struct, for forward/backward compatibility */ 651 size_t sz; 652 /* offset of kernel reference counted USDT semaphore, added in 653 * a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe") 654 */ 655 size_t ref_ctr_offset; 656 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 657 __u64 bpf_cookie; 658 /* uprobe is return probe, invoked at function return time */ 659 bool retprobe; 660 /* Function name to attach to. Could be an unqualified ("abc") or library-qualified 661 * "abc@LIBXYZ" name. To specify function entry, func_name should be set while 662 * func_offset argument to bpf_prog__attach_uprobe_opts() should be 0. To trace an 663 * offset within a function, specify func_name and use func_offset argument to specify 664 * offset within the function. Shared library functions must specify the shared library 665 * binary_path. 666 */ 667 const char *func_name; 668 /* uprobe attach mode */ 669 enum probe_attach_mode attach_mode; 670 size_t :0; 671 }; 672 #define bpf_uprobe_opts__last_field attach_mode 673 674 /** 675 * @brief **bpf_program__attach_uprobe()** attaches a BPF program 676 * to the userspace function which is found by binary path and 677 * offset. You can optionally specify a particular proccess to attach 678 * to. You can also optionally attach the program to the function 679 * exit instead of entry. 680 * 681 * @param prog BPF program to attach 682 * @param retprobe Attach to function exit 683 * @param pid Process ID to attach the uprobe to, 0 for self (own process), 684 * -1 for all processes 685 * @param binary_path Path to binary that contains the function symbol 686 * @param func_offset Offset within the binary of the function symbol 687 * @return Reference to the newly created BPF link; or NULL is returned on error, 688 * error code is stored in errno 689 */ 690 LIBBPF_API struct bpf_link * 691 bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe, 692 pid_t pid, const char *binary_path, 693 size_t func_offset); 694 695 /** 696 * @brief **bpf_program__attach_uprobe_opts()** is just like 697 * bpf_program__attach_uprobe() except with a options struct 698 * for various configurations. 699 * 700 * @param prog BPF program to attach 701 * @param pid Process ID to attach the uprobe to, 0 for self (own process), 702 * -1 for all processes 703 * @param binary_path Path to binary that contains the function symbol 704 * @param func_offset Offset within the binary of the function symbol 705 * @param opts Options for altering program attachment 706 * @return Reference to the newly created BPF link; or NULL is returned on error, 707 * error code is stored in errno 708 */ 709 LIBBPF_API struct bpf_link * 710 bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, 711 const char *binary_path, size_t func_offset, 712 const struct bpf_uprobe_opts *opts); 713 714 struct bpf_usdt_opts { 715 /* size of this struct, for forward/backward compatibility */ 716 size_t sz; 717 /* custom user-provided value accessible through usdt_cookie() */ 718 __u64 usdt_cookie; 719 size_t :0; 720 }; 721 #define bpf_usdt_opts__last_field usdt_cookie 722 723 /** 724 * @brief **bpf_program__attach_usdt()** is just like 725 * bpf_program__attach_uprobe_opts() except it covers USDT (User-space 726 * Statically Defined Tracepoint) attachment, instead of attaching to 727 * user-space function entry or exit. 728 * 729 * @param prog BPF program to attach 730 * @param pid Process ID to attach the uprobe to, 0 for self (own process), 731 * -1 for all processes 732 * @param binary_path Path to binary that contains provided USDT probe 733 * @param usdt_provider USDT provider name 734 * @param usdt_name USDT probe name 735 * @param opts Options for altering program attachment 736 * @return Reference to the newly created BPF link; or NULL is returned on error, 737 * error code is stored in errno 738 */ 739 LIBBPF_API struct bpf_link * 740 bpf_program__attach_usdt(const struct bpf_program *prog, 741 pid_t pid, const char *binary_path, 742 const char *usdt_provider, const char *usdt_name, 743 const struct bpf_usdt_opts *opts); 744 745 struct bpf_tracepoint_opts { 746 /* size of this struct, for forward/backward compatibility */ 747 size_t sz; 748 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 749 __u64 bpf_cookie; 750 }; 751 #define bpf_tracepoint_opts__last_field bpf_cookie 752 753 LIBBPF_API struct bpf_link * 754 bpf_program__attach_tracepoint(const struct bpf_program *prog, 755 const char *tp_category, 756 const char *tp_name); 757 LIBBPF_API struct bpf_link * 758 bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, 759 const char *tp_category, 760 const char *tp_name, 761 const struct bpf_tracepoint_opts *opts); 762 763 struct bpf_raw_tracepoint_opts { 764 size_t sz; /* size of this struct for forward/backward compatibility */ 765 __u64 cookie; 766 size_t :0; 767 }; 768 #define bpf_raw_tracepoint_opts__last_field cookie 769 770 LIBBPF_API struct bpf_link * 771 bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, 772 const char *tp_name); 773 LIBBPF_API struct bpf_link * 774 bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog, 775 const char *tp_name, 776 struct bpf_raw_tracepoint_opts *opts); 777 778 struct bpf_trace_opts { 779 /* size of this struct, for forward/backward compatibility */ 780 size_t sz; 781 /* custom user-provided value fetchable through bpf_get_attach_cookie() */ 782 __u64 cookie; 783 }; 784 #define bpf_trace_opts__last_field cookie 785 786 LIBBPF_API struct bpf_link * 787 bpf_program__attach_trace(const struct bpf_program *prog); 788 LIBBPF_API struct bpf_link * 789 bpf_program__attach_trace_opts(const struct bpf_program *prog, const struct bpf_trace_opts *opts); 790 791 LIBBPF_API struct bpf_link * 792 bpf_program__attach_lsm(const struct bpf_program *prog); 793 LIBBPF_API struct bpf_link * 794 bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd); 795 LIBBPF_API struct bpf_link * 796 bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd); 797 LIBBPF_API struct bpf_link * 798 bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd); 799 LIBBPF_API struct bpf_link * 800 bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex); 801 LIBBPF_API struct bpf_link * 802 bpf_program__attach_freplace(const struct bpf_program *prog, 803 int target_fd, const char *attach_func_name); 804 805 struct bpf_netfilter_opts { 806 /* size of this struct, for forward/backward compatibility */ 807 size_t sz; 808 809 __u32 pf; 810 __u32 hooknum; 811 __s32 priority; 812 __u32 flags; 813 }; 814 #define bpf_netfilter_opts__last_field flags 815 816 LIBBPF_API struct bpf_link * 817 bpf_program__attach_netfilter(const struct bpf_program *prog, 818 const struct bpf_netfilter_opts *opts); 819 820 struct bpf_tcx_opts { 821 /* size of this struct, for forward/backward compatibility */ 822 size_t sz; 823 __u32 flags; 824 __u32 relative_fd; 825 __u32 relative_id; 826 __u64 expected_revision; 827 size_t :0; 828 }; 829 #define bpf_tcx_opts__last_field expected_revision 830 831 LIBBPF_API struct bpf_link * 832 bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, 833 const struct bpf_tcx_opts *opts); 834 835 struct bpf_netkit_opts { 836 /* size of this struct, for forward/backward compatibility */ 837 size_t sz; 838 __u32 flags; 839 __u32 relative_fd; 840 __u32 relative_id; 841 __u64 expected_revision; 842 size_t :0; 843 }; 844 #define bpf_netkit_opts__last_field expected_revision 845 846 LIBBPF_API struct bpf_link * 847 bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, 848 const struct bpf_netkit_opts *opts); 849 850 struct bpf_map; 851 852 LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map); 853 LIBBPF_API int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map); 854 855 struct bpf_iter_attach_opts { 856 size_t sz; /* size of this struct for forward/backward compatibility */ 857 union bpf_iter_link_info *link_info; 858 __u32 link_info_len; 859 }; 860 #define bpf_iter_attach_opts__last_field link_info_len 861 862 LIBBPF_API struct bpf_link * 863 bpf_program__attach_iter(const struct bpf_program *prog, 864 const struct bpf_iter_attach_opts *opts); 865 866 LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog); 867 868 /** 869 * @brief **bpf_program__set_type()** sets the program 870 * type of the passed BPF program. 871 * @param prog BPF program to set the program type for 872 * @param type program type to set the BPF map to have 873 * @return error code; or 0 if no error. An error occurs 874 * if the object is already loaded. 875 * 876 * This must be called before the BPF object is loaded, 877 * otherwise it has no effect and an error is returned. 878 */ 879 LIBBPF_API int bpf_program__set_type(struct bpf_program *prog, 880 enum bpf_prog_type type); 881 882 LIBBPF_API enum bpf_attach_type 883 bpf_program__expected_attach_type(const struct bpf_program *prog); 884 885 /** 886 * @brief **bpf_program__set_expected_attach_type()** sets the 887 * attach type of the passed BPF program. This is used for 888 * auto-detection of attachment when programs are loaded. 889 * @param prog BPF program to set the attach type for 890 * @param type attach type to set the BPF map to have 891 * @return error code; or 0 if no error. An error occurs 892 * if the object is already loaded. 893 * 894 * This must be called before the BPF object is loaded, 895 * otherwise it has no effect and an error is returned. 896 */ 897 LIBBPF_API int 898 bpf_program__set_expected_attach_type(struct bpf_program *prog, 899 enum bpf_attach_type type); 900 901 LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog); 902 LIBBPF_API int bpf_program__set_flags(struct bpf_program *prog, __u32 flags); 903 904 /* Per-program log level and log buffer getters/setters. 905 * See bpf_object_open_opts comments regarding log_level and log_buf 906 * interactions. 907 */ 908 LIBBPF_API __u32 bpf_program__log_level(const struct bpf_program *prog); 909 LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level); 910 LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size); 911 LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size); 912 913 /** 914 * @brief **bpf_program__set_attach_target()** sets BTF-based attach target 915 * for supported BPF program types: 916 * - BTF-aware raw tracepoints (tp_btf); 917 * - fentry/fexit/fmod_ret; 918 * - lsm; 919 * - freplace. 920 * @param prog BPF program to set the attach type for 921 * @param type attach type to set the BPF map to have 922 * @return error code; or 0 if no error occurred. 923 */ 924 LIBBPF_API int 925 bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, 926 const char *attach_func_name); 927 928 /** 929 * @brief **bpf_object__find_map_by_name()** returns BPF map of 930 * the given name, if it exists within the passed BPF object 931 * @param obj BPF object 932 * @param name name of the BPF map 933 * @return BPF map instance, if such map exists within the BPF object; 934 * or NULL otherwise. 935 */ 936 LIBBPF_API struct bpf_map * 937 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name); 938 939 LIBBPF_API int 940 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name); 941 942 LIBBPF_API struct bpf_map * 943 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map); 944 945 #define bpf_object__for_each_map(pos, obj) \ 946 for ((pos) = bpf_object__next_map((obj), NULL); \ 947 (pos) != NULL; \ 948 (pos) = bpf_object__next_map((obj), (pos))) 949 #define bpf_map__for_each bpf_object__for_each_map 950 951 LIBBPF_API struct bpf_map * 952 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map); 953 954 /** 955 * @brief **bpf_map__set_autocreate()** sets whether libbpf has to auto-create 956 * BPF map during BPF object load phase. 957 * @param map the BPF map instance 958 * @param autocreate whether to create BPF map during BPF object load 959 * @return 0 on success; -EBUSY if BPF object was already loaded 960 * 961 * **bpf_map__set_autocreate()** allows to opt-out from libbpf auto-creating 962 * BPF map. By default, libbpf will attempt to create every single BPF map 963 * defined in BPF object file using BPF_MAP_CREATE command of bpf() syscall 964 * and fill in map FD in BPF instructions. 965 * 966 * This API allows to opt-out of this process for specific map instance. This 967 * can be useful if host kernel doesn't support such BPF map type or used 968 * combination of flags and user application wants to avoid creating such 969 * a map in the first place. User is still responsible to make sure that their 970 * BPF-side code that expects to use such missing BPF map is recognized by BPF 971 * verifier as dead code, otherwise BPF verifier will reject such BPF program. 972 */ 973 LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate); 974 LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map); 975 976 /** 977 * @brief **bpf_map__fd()** gets the file descriptor of the passed 978 * BPF map 979 * @param map the BPF map instance 980 * @return the file descriptor; or -EINVAL in case of an error 981 */ 982 LIBBPF_API int bpf_map__fd(const struct bpf_map *map); 983 LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); 984 /* get map name */ 985 LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); 986 /* get/set map type */ 987 LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map); 988 LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type); 989 /* get/set map size (max_entries) */ 990 LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map); 991 LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries); 992 /* get/set map flags */ 993 LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map); 994 LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags); 995 /* get/set map NUMA node */ 996 LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map); 997 LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node); 998 /* get/set map key size */ 999 LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map); 1000 LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size); 1001 /* get map value size */ 1002 LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map); 1003 /** 1004 * @brief **bpf_map__set_value_size()** sets map value size. 1005 * @param map the BPF map instance 1006 * @return 0, on success; negative error, otherwise 1007 * 1008 * There is a special case for maps with associated memory-mapped regions, like 1009 * the global data section maps (bss, data, rodata). When this function is used 1010 * on such a map, the mapped region is resized. Afterward, an attempt is made to 1011 * adjust the corresponding BTF info. This attempt is best-effort and can only 1012 * succeed if the last variable of the data section map is an array. The array 1013 * BTF type is replaced by a new BTF array type with a different length. 1014 * Any previously existing pointers returned from bpf_map__initial_value() or 1015 * corresponding data section skeleton pointer must be reinitialized. 1016 */ 1017 LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size); 1018 /* get map key/value BTF type IDs */ 1019 LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); 1020 LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); 1021 /* get/set map if_index */ 1022 LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); 1023 LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); 1024 /* get/set map map_extra flags */ 1025 LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map); 1026 LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra); 1027 1028 LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, 1029 const void *data, size_t size); 1030 LIBBPF_API void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize); 1031 1032 /** 1033 * @brief **bpf_map__is_internal()** tells the caller whether or not the 1034 * passed map is a special map created by libbpf automatically for things like 1035 * global variables, __ksym externs, Kconfig values, etc 1036 * @param map the bpf_map 1037 * @return true, if the map is an internal map; false, otherwise 1038 */ 1039 LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); 1040 1041 /** 1042 * @brief **bpf_map__set_pin_path()** sets the path attribute that tells where the 1043 * BPF map should be pinned. This does not actually create the 'pin'. 1044 * @param map The bpf_map 1045 * @param path The path 1046 * @return 0, on success; negative error, otherwise 1047 */ 1048 LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); 1049 1050 /** 1051 * @brief **bpf_map__pin_path()** gets the path attribute that tells where the 1052 * BPF map should be pinned. 1053 * @param map The bpf_map 1054 * @return The path string; which can be NULL 1055 */ 1056 LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map); 1057 1058 /** 1059 * @brief **bpf_map__is_pinned()** tells the caller whether or not the 1060 * passed map has been pinned via a 'pin' file. 1061 * @param map The bpf_map 1062 * @return true, if the map is pinned; false, otherwise 1063 */ 1064 LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); 1065 1066 /** 1067 * @brief **bpf_map__pin()** creates a file that serves as a 'pin' 1068 * for the BPF map. This increments the reference count on the 1069 * BPF map which will keep the BPF map loaded even after the 1070 * userspace process which loaded it has exited. 1071 * @param map The bpf_map to pin 1072 * @param path A file path for the 'pin' 1073 * @return 0, on success; negative error, otherwise 1074 * 1075 * If `path` is NULL the maps `pin_path` attribute will be used. If this is 1076 * also NULL, an error will be returned and the map will not be pinned. 1077 */ 1078 LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); 1079 1080 /** 1081 * @brief **bpf_map__unpin()** removes the file that serves as a 1082 * 'pin' for the BPF map. 1083 * @param map The bpf_map to unpin 1084 * @param path A file path for the 'pin' 1085 * @return 0, on success; negative error, otherwise 1086 * 1087 * The `path` parameter can be NULL, in which case the `pin_path` 1088 * map attribute is unpinned. If both the `path` parameter and 1089 * `pin_path` map attribute are set, they must be equal. 1090 */ 1091 LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); 1092 1093 LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd); 1094 LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map); 1095 1096 /** 1097 * @brief **bpf_map__lookup_elem()** allows to lookup BPF map value 1098 * corresponding to provided key. 1099 * @param map BPF map to lookup element in 1100 * @param key pointer to memory containing bytes of the key used for lookup 1101 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** 1102 * @param value pointer to memory in which looked up value will be stored 1103 * @param value_sz size in byte of value data memory; it has to match BPF map 1104 * definition's **value_size**. For per-CPU BPF maps value size has to be 1105 * a product of BPF map value size and number of possible CPUs in the system 1106 * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for 1107 * per-CPU values value size has to be aligned up to closest 8 bytes for 1108 * alignment reasons, so expected size is: `round_up(value_size, 8) 1109 * * libbpf_num_possible_cpus()`. 1110 * @flags extra flags passed to kernel for this operation 1111 * @return 0, on success; negative error, otherwise 1112 * 1113 * **bpf_map__lookup_elem()** is high-level equivalent of 1114 * **bpf_map_lookup_elem()** API with added check for key and value size. 1115 */ 1116 LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map, 1117 const void *key, size_t key_sz, 1118 void *value, size_t value_sz, __u64 flags); 1119 1120 /** 1121 * @brief **bpf_map__update_elem()** allows to insert or update value in BPF 1122 * map that corresponds to provided key. 1123 * @param map BPF map to insert to or update element in 1124 * @param key pointer to memory containing bytes of the key 1125 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** 1126 * @param value pointer to memory containing bytes of the value 1127 * @param value_sz size in byte of value data memory; it has to match BPF map 1128 * definition's **value_size**. For per-CPU BPF maps value size has to be 1129 * a product of BPF map value size and number of possible CPUs in the system 1130 * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for 1131 * per-CPU values value size has to be aligned up to closest 8 bytes for 1132 * alignment reasons, so expected size is: `round_up(value_size, 8) 1133 * * libbpf_num_possible_cpus()`. 1134 * @flags extra flags passed to kernel for this operation 1135 * @return 0, on success; negative error, otherwise 1136 * 1137 * **bpf_map__update_elem()** is high-level equivalent of 1138 * **bpf_map_update_elem()** API with added check for key and value size. 1139 */ 1140 LIBBPF_API int bpf_map__update_elem(const struct bpf_map *map, 1141 const void *key, size_t key_sz, 1142 const void *value, size_t value_sz, __u64 flags); 1143 1144 /** 1145 * @brief **bpf_map__delete_elem()** allows to delete element in BPF map that 1146 * corresponds to provided key. 1147 * @param map BPF map to delete element from 1148 * @param key pointer to memory containing bytes of the key 1149 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** 1150 * @flags extra flags passed to kernel for this operation 1151 * @return 0, on success; negative error, otherwise 1152 * 1153 * **bpf_map__delete_elem()** is high-level equivalent of 1154 * **bpf_map_delete_elem()** API with added check for key size. 1155 */ 1156 LIBBPF_API int bpf_map__delete_elem(const struct bpf_map *map, 1157 const void *key, size_t key_sz, __u64 flags); 1158 1159 /** 1160 * @brief **bpf_map__lookup_and_delete_elem()** allows to lookup BPF map value 1161 * corresponding to provided key and atomically delete it afterwards. 1162 * @param map BPF map to lookup element in 1163 * @param key pointer to memory containing bytes of the key used for lookup 1164 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** 1165 * @param value pointer to memory in which looked up value will be stored 1166 * @param value_sz size in byte of value data memory; it has to match BPF map 1167 * definition's **value_size**. For per-CPU BPF maps value size has to be 1168 * a product of BPF map value size and number of possible CPUs in the system 1169 * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for 1170 * per-CPU values value size has to be aligned up to closest 8 bytes for 1171 * alignment reasons, so expected size is: `round_up(value_size, 8) 1172 * * libbpf_num_possible_cpus()`. 1173 * @flags extra flags passed to kernel for this operation 1174 * @return 0, on success; negative error, otherwise 1175 * 1176 * **bpf_map__lookup_and_delete_elem()** is high-level equivalent of 1177 * **bpf_map_lookup_and_delete_elem()** API with added check for key and value size. 1178 */ 1179 LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map, 1180 const void *key, size_t key_sz, 1181 void *value, size_t value_sz, __u64 flags); 1182 1183 /** 1184 * @brief **bpf_map__get_next_key()** allows to iterate BPF map keys by 1185 * fetching next key that follows current key. 1186 * @param map BPF map to fetch next key from 1187 * @param cur_key pointer to memory containing bytes of current key or NULL to 1188 * fetch the first key 1189 * @param next_key pointer to memory to write next key into 1190 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** 1191 * @return 0, on success; -ENOENT if **cur_key** is the last key in BPF map; 1192 * negative error, otherwise 1193 * 1194 * **bpf_map__get_next_key()** is high-level equivalent of 1195 * **bpf_map_get_next_key()** API with added check for key size. 1196 */ 1197 LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map, 1198 const void *cur_key, void *next_key, size_t key_sz); 1199 1200 struct bpf_xdp_set_link_opts { 1201 size_t sz; 1202 int old_fd; 1203 size_t :0; 1204 }; 1205 #define bpf_xdp_set_link_opts__last_field old_fd 1206 1207 struct bpf_xdp_attach_opts { 1208 size_t sz; 1209 int old_prog_fd; 1210 size_t :0; 1211 }; 1212 #define bpf_xdp_attach_opts__last_field old_prog_fd 1213 1214 struct bpf_xdp_query_opts { 1215 size_t sz; 1216 __u32 prog_id; /* output */ 1217 __u32 drv_prog_id; /* output */ 1218 __u32 hw_prog_id; /* output */ 1219 __u32 skb_prog_id; /* output */ 1220 __u8 attach_mode; /* output */ 1221 __u64 feature_flags; /* output */ 1222 __u32 xdp_zc_max_segs; /* output */ 1223 size_t :0; 1224 }; 1225 #define bpf_xdp_query_opts__last_field xdp_zc_max_segs 1226 1227 LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, 1228 const struct bpf_xdp_attach_opts *opts); 1229 LIBBPF_API int bpf_xdp_detach(int ifindex, __u32 flags, 1230 const struct bpf_xdp_attach_opts *opts); 1231 LIBBPF_API int bpf_xdp_query(int ifindex, int flags, struct bpf_xdp_query_opts *opts); 1232 LIBBPF_API int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id); 1233 1234 /* TC related API */ 1235 enum bpf_tc_attach_point { 1236 BPF_TC_INGRESS = 1 << 0, 1237 BPF_TC_EGRESS = 1 << 1, 1238 BPF_TC_CUSTOM = 1 << 2, 1239 }; 1240 1241 #define BPF_TC_PARENT(a, b) \ 1242 ((((a) << 16) & 0xFFFF0000U) | ((b) & 0x0000FFFFU)) 1243 1244 enum bpf_tc_flags { 1245 BPF_TC_F_REPLACE = 1 << 0, 1246 }; 1247 1248 struct bpf_tc_hook { 1249 size_t sz; 1250 int ifindex; 1251 enum bpf_tc_attach_point attach_point; 1252 __u32 parent; 1253 size_t :0; 1254 }; 1255 #define bpf_tc_hook__last_field parent 1256 1257 struct bpf_tc_opts { 1258 size_t sz; 1259 int prog_fd; 1260 __u32 flags; 1261 __u32 prog_id; 1262 __u32 handle; 1263 __u32 priority; 1264 size_t :0; 1265 }; 1266 #define bpf_tc_opts__last_field priority 1267 1268 LIBBPF_API int bpf_tc_hook_create(struct bpf_tc_hook *hook); 1269 LIBBPF_API int bpf_tc_hook_destroy(struct bpf_tc_hook *hook); 1270 LIBBPF_API int bpf_tc_attach(const struct bpf_tc_hook *hook, 1271 struct bpf_tc_opts *opts); 1272 LIBBPF_API int bpf_tc_detach(const struct bpf_tc_hook *hook, 1273 const struct bpf_tc_opts *opts); 1274 LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook, 1275 struct bpf_tc_opts *opts); 1276 1277 /* Ring buffer APIs */ 1278 struct ring_buffer; 1279 struct ring; 1280 struct user_ring_buffer; 1281 1282 typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size); 1283 1284 struct ring_buffer_opts { 1285 size_t sz; /* size of this struct, for forward/backward compatibility */ 1286 }; 1287 1288 #define ring_buffer_opts__last_field sz 1289 1290 LIBBPF_API struct ring_buffer * 1291 ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx, 1292 const struct ring_buffer_opts *opts); 1293 LIBBPF_API void ring_buffer__free(struct ring_buffer *rb); 1294 LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd, 1295 ring_buffer_sample_fn sample_cb, void *ctx); 1296 LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms); 1297 LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb); 1298 LIBBPF_API int ring_buffer__consume_n(struct ring_buffer *rb, size_t n); 1299 LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb); 1300 1301 /** 1302 * @brief **ring_buffer__ring()** returns the ringbuffer object inside a given 1303 * ringbuffer manager representing a single BPF_MAP_TYPE_RINGBUF map instance. 1304 * 1305 * @param rb A ringbuffer manager object. 1306 * @param idx An index into the ringbuffers contained within the ringbuffer 1307 * manager object. The index is 0-based and corresponds to the order in which 1308 * ring_buffer__add was called. 1309 * @return A ringbuffer object on success; NULL and errno set if the index is 1310 * invalid. 1311 */ 1312 LIBBPF_API struct ring *ring_buffer__ring(struct ring_buffer *rb, 1313 unsigned int idx); 1314 1315 /** 1316 * @brief **ring__consumer_pos()** returns the current consumer position in the 1317 * given ringbuffer. 1318 * 1319 * @param r A ringbuffer object. 1320 * @return The current consumer position. 1321 */ 1322 LIBBPF_API unsigned long ring__consumer_pos(const struct ring *r); 1323 1324 /** 1325 * @brief **ring__producer_pos()** returns the current producer position in the 1326 * given ringbuffer. 1327 * 1328 * @param r A ringbuffer object. 1329 * @return The current producer position. 1330 */ 1331 LIBBPF_API unsigned long ring__producer_pos(const struct ring *r); 1332 1333 /** 1334 * @brief **ring__avail_data_size()** returns the number of bytes in the 1335 * ringbuffer not yet consumed. This has no locking associated with it, so it 1336 * can be inaccurate if operations are ongoing while this is called. However, it 1337 * should still show the correct trend over the long-term. 1338 * 1339 * @param r A ringbuffer object. 1340 * @return The number of bytes not yet consumed. 1341 */ 1342 LIBBPF_API size_t ring__avail_data_size(const struct ring *r); 1343 1344 /** 1345 * @brief **ring__size()** returns the total size of the ringbuffer's map data 1346 * area (excluding special producer/consumer pages). Effectively this gives the 1347 * amount of usable bytes of data inside the ringbuffer. 1348 * 1349 * @param r A ringbuffer object. 1350 * @return The total size of the ringbuffer map data area. 1351 */ 1352 LIBBPF_API size_t ring__size(const struct ring *r); 1353 1354 /** 1355 * @brief **ring__map_fd()** returns the file descriptor underlying the given 1356 * ringbuffer. 1357 * 1358 * @param r A ringbuffer object. 1359 * @return The underlying ringbuffer file descriptor 1360 */ 1361 LIBBPF_API int ring__map_fd(const struct ring *r); 1362 1363 /** 1364 * @brief **ring__consume()** consumes available ringbuffer data without event 1365 * polling. 1366 * 1367 * @param r A ringbuffer object. 1368 * @return The number of records consumed (or INT_MAX, whichever is less), or 1369 * a negative number if any of the callbacks return an error. 1370 */ 1371 LIBBPF_API int ring__consume(struct ring *r); 1372 1373 /** 1374 * @brief **ring__consume_n()** consumes up to a requested amount of items from 1375 * a ringbuffer without event polling. 1376 * 1377 * @param r A ringbuffer object. 1378 * @param n Maximum amount of items to consume. 1379 * @return The number of items consumed, or a negative number if any of the 1380 * callbacks return an error. 1381 */ 1382 LIBBPF_API int ring__consume_n(struct ring *r, size_t n); 1383 1384 struct user_ring_buffer_opts { 1385 size_t sz; /* size of this struct, for forward/backward compatibility */ 1386 }; 1387 1388 #define user_ring_buffer_opts__last_field sz 1389 1390 /** 1391 * @brief **user_ring_buffer__new()** creates a new instance of a user ring 1392 * buffer. 1393 * 1394 * @param map_fd A file descriptor to a BPF_MAP_TYPE_USER_RINGBUF map. 1395 * @param opts Options for how the ring buffer should be created. 1396 * @return A user ring buffer on success; NULL and errno being set on a 1397 * failure. 1398 */ 1399 LIBBPF_API struct user_ring_buffer * 1400 user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts); 1401 1402 /** 1403 * @brief **user_ring_buffer__reserve()** reserves a pointer to a sample in the 1404 * user ring buffer. 1405 * @param rb A pointer to a user ring buffer. 1406 * @param size The size of the sample, in bytes. 1407 * @return A pointer to an 8-byte aligned reserved region of the user ring 1408 * buffer; NULL, and errno being set if a sample could not be reserved. 1409 * 1410 * This function is *not* thread safe, and callers must synchronize accessing 1411 * this function if there are multiple producers. If a size is requested that 1412 * is larger than the size of the entire ring buffer, errno will be set to 1413 * E2BIG and NULL is returned. If the ring buffer could accommodate the size, 1414 * but currently does not have enough space, errno is set to ENOSPC and NULL is 1415 * returned. 1416 * 1417 * After initializing the sample, callers must invoke 1418 * **user_ring_buffer__submit()** to post the sample to the kernel. Otherwise, 1419 * the sample must be freed with **user_ring_buffer__discard()**. 1420 */ 1421 LIBBPF_API void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size); 1422 1423 /** 1424 * @brief **user_ring_buffer__reserve_blocking()** reserves a record in the 1425 * ring buffer, possibly blocking for up to @timeout_ms until a sample becomes 1426 * available. 1427 * @param rb The user ring buffer. 1428 * @param size The size of the sample, in bytes. 1429 * @param timeout_ms The amount of time, in milliseconds, for which the caller 1430 * should block when waiting for a sample. -1 causes the caller to block 1431 * indefinitely. 1432 * @return A pointer to an 8-byte aligned reserved region of the user ring 1433 * buffer; NULL, and errno being set if a sample could not be reserved. 1434 * 1435 * This function is *not* thread safe, and callers must synchronize 1436 * accessing this function if there are multiple producers 1437 * 1438 * If **timeout_ms** is -1, the function will block indefinitely until a sample 1439 * becomes available. Otherwise, **timeout_ms** must be non-negative, or errno 1440 * is set to EINVAL, and NULL is returned. If **timeout_ms** is 0, no blocking 1441 * will occur and the function will return immediately after attempting to 1442 * reserve a sample. 1443 * 1444 * If **size** is larger than the size of the entire ring buffer, errno is set 1445 * to E2BIG and NULL is returned. If the ring buffer could accommodate 1446 * **size**, but currently does not have enough space, the caller will block 1447 * until at most **timeout_ms** has elapsed. If insufficient space is available 1448 * at that time, errno is set to ENOSPC, and NULL is returned. 1449 * 1450 * The kernel guarantees that it will wake up this thread to check if 1451 * sufficient space is available in the ring buffer at least once per 1452 * invocation of the **bpf_ringbuf_drain()** helper function, provided that at 1453 * least one sample is consumed, and the BPF program did not invoke the 1454 * function with BPF_RB_NO_WAKEUP. A wakeup may occur sooner than that, but the 1455 * kernel does not guarantee this. If the helper function is invoked with 1456 * BPF_RB_FORCE_WAKEUP, a wakeup event will be sent even if no sample is 1457 * consumed. 1458 * 1459 * When a sample of size **size** is found within **timeout_ms**, a pointer to 1460 * the sample is returned. After initializing the sample, callers must invoke 1461 * **user_ring_buffer__submit()** to post the sample to the ring buffer. 1462 * Otherwise, the sample must be freed with **user_ring_buffer__discard()**. 1463 */ 1464 LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, 1465 __u32 size, 1466 int timeout_ms); 1467 1468 /** 1469 * @brief **user_ring_buffer__submit()** submits a previously reserved sample 1470 * into the ring buffer. 1471 * @param rb The user ring buffer. 1472 * @param sample A reserved sample. 1473 * 1474 * It is not necessary to synchronize amongst multiple producers when invoking 1475 * this function. 1476 */ 1477 LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample); 1478 1479 /** 1480 * @brief **user_ring_buffer__discard()** discards a previously reserved sample. 1481 * @param rb The user ring buffer. 1482 * @param sample A reserved sample. 1483 * 1484 * It is not necessary to synchronize amongst multiple producers when invoking 1485 * this function. 1486 */ 1487 LIBBPF_API void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample); 1488 1489 /** 1490 * @brief **user_ring_buffer__free()** frees a ring buffer that was previously 1491 * created with **user_ring_buffer__new()**. 1492 * @param rb The user ring buffer being freed. 1493 */ 1494 LIBBPF_API void user_ring_buffer__free(struct user_ring_buffer *rb); 1495 1496 /* Perf buffer APIs */ 1497 struct perf_buffer; 1498 1499 typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu, 1500 void *data, __u32 size); 1501 typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt); 1502 1503 /* common use perf buffer options */ 1504 struct perf_buffer_opts { 1505 size_t sz; 1506 __u32 sample_period; 1507 size_t :0; 1508 }; 1509 #define perf_buffer_opts__last_field sample_period 1510 1511 /** 1512 * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified 1513 * BPF_PERF_EVENT_ARRAY map 1514 * @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF 1515 * code to send data over to user-space 1516 * @param page_cnt number of memory pages allocated for each per-CPU buffer 1517 * @param sample_cb function called on each received data record 1518 * @param lost_cb function called when record loss has occurred 1519 * @param ctx user-provided extra context passed into *sample_cb* and *lost_cb* 1520 * @return a new instance of struct perf_buffer on success, NULL on error with 1521 * *errno* containing an error code 1522 */ 1523 LIBBPF_API struct perf_buffer * 1524 perf_buffer__new(int map_fd, size_t page_cnt, 1525 perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx, 1526 const struct perf_buffer_opts *opts); 1527 1528 enum bpf_perf_event_ret { 1529 LIBBPF_PERF_EVENT_DONE = 0, 1530 LIBBPF_PERF_EVENT_ERROR = -1, 1531 LIBBPF_PERF_EVENT_CONT = -2, 1532 }; 1533 1534 struct perf_event_header; 1535 1536 typedef enum bpf_perf_event_ret 1537 (*perf_buffer_event_fn)(void *ctx, int cpu, struct perf_event_header *event); 1538 1539 /* raw perf buffer options, giving most power and control */ 1540 struct perf_buffer_raw_opts { 1541 size_t sz; 1542 long :0; 1543 long :0; 1544 /* if cpu_cnt == 0, open all on all possible CPUs (up to the number of 1545 * max_entries of given PERF_EVENT_ARRAY map) 1546 */ 1547 int cpu_cnt; 1548 /* if cpu_cnt > 0, cpus is an array of CPUs to open ring buffers on */ 1549 int *cpus; 1550 /* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */ 1551 int *map_keys; 1552 }; 1553 #define perf_buffer_raw_opts__last_field map_keys 1554 1555 struct perf_event_attr; 1556 1557 LIBBPF_API struct perf_buffer * 1558 perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr, 1559 perf_buffer_event_fn event_cb, void *ctx, 1560 const struct perf_buffer_raw_opts *opts); 1561 1562 LIBBPF_API void perf_buffer__free(struct perf_buffer *pb); 1563 LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb); 1564 LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms); 1565 LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb); 1566 LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx); 1567 LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb); 1568 LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx); 1569 /** 1570 * @brief **perf_buffer__buffer()** returns the per-cpu raw mmap()'ed underlying 1571 * memory region of the ring buffer. 1572 * This ring buffer can be used to implement a custom events consumer. 1573 * The ring buffer starts with the *struct perf_event_mmap_page*, which 1574 * holds the ring buffer managment fields, when accessing the header 1575 * structure it's important to be SMP aware. 1576 * You can refer to *perf_event_read_simple* for a simple example. 1577 * @param pb the perf buffer structure 1578 * @param buf_idx the buffer index to retreive 1579 * @param buf (out) gets the base pointer of the mmap()'ed memory 1580 * @param buf_size (out) gets the size of the mmap()'ed region 1581 * @return 0 on success, negative error code for failure 1582 */ 1583 LIBBPF_API int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, 1584 size_t *buf_size); 1585 1586 struct bpf_prog_linfo; 1587 struct bpf_prog_info; 1588 1589 LIBBPF_API void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo); 1590 LIBBPF_API struct bpf_prog_linfo * 1591 bpf_prog_linfo__new(const struct bpf_prog_info *info); 1592 LIBBPF_API const struct bpf_line_info * 1593 bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo, 1594 __u64 addr, __u32 func_idx, __u32 nr_skip); 1595 LIBBPF_API const struct bpf_line_info * 1596 bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, 1597 __u32 insn_off, __u32 nr_skip); 1598 1599 /* 1600 * Probe for supported system features 1601 * 1602 * Note that running many of these probes in a short amount of time can cause 1603 * the kernel to reach the maximal size of lockable memory allowed for the 1604 * user, causing subsequent probes to fail. In this case, the caller may want 1605 * to adjust that limit with setrlimit(). 1606 */ 1607 1608 /** 1609 * @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports 1610 * BPF programs of a given type. 1611 * @param prog_type BPF program type to detect kernel support for 1612 * @param opts reserved for future extensibility, should be NULL 1613 * @return 1, if given program type is supported; 0, if given program type is 1614 * not supported; negative error code if feature detection failed or can't be 1615 * performed 1616 * 1617 * Make sure the process has required set of CAP_* permissions (or runs as 1618 * root) when performing feature checking. 1619 */ 1620 LIBBPF_API int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts); 1621 /** 1622 * @brief **libbpf_probe_bpf_map_type()** detects if host kernel supports 1623 * BPF maps of a given type. 1624 * @param map_type BPF map type to detect kernel support for 1625 * @param opts reserved for future extensibility, should be NULL 1626 * @return 1, if given map type is supported; 0, if given map type is 1627 * not supported; negative error code if feature detection failed or can't be 1628 * performed 1629 * 1630 * Make sure the process has required set of CAP_* permissions (or runs as 1631 * root) when performing feature checking. 1632 */ 1633 LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts); 1634 /** 1635 * @brief **libbpf_probe_bpf_helper()** detects if host kernel supports the 1636 * use of a given BPF helper from specified BPF program type. 1637 * @param prog_type BPF program type used to check the support of BPF helper 1638 * @param helper_id BPF helper ID (enum bpf_func_id) to check support for 1639 * @param opts reserved for future extensibility, should be NULL 1640 * @return 1, if given combination of program type and helper is supported; 0, 1641 * if the combination is not supported; negative error code if feature 1642 * detection for provided input arguments failed or can't be performed 1643 * 1644 * Make sure the process has required set of CAP_* permissions (or runs as 1645 * root) when performing feature checking. 1646 */ 1647 LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, 1648 enum bpf_func_id helper_id, const void *opts); 1649 1650 /** 1651 * @brief **libbpf_num_possible_cpus()** is a helper function to get the 1652 * number of possible CPUs that the host kernel supports and expects. 1653 * @return number of possible CPUs; or error code on failure 1654 * 1655 * Example usage: 1656 * 1657 * int ncpus = libbpf_num_possible_cpus(); 1658 * if (ncpus < 0) { 1659 * // error handling 1660 * } 1661 * long values[ncpus]; 1662 * bpf_map_lookup_elem(per_cpu_map_fd, key, values); 1663 */ 1664 LIBBPF_API int libbpf_num_possible_cpus(void); 1665 1666 struct bpf_map_skeleton { 1667 const char *name; 1668 struct bpf_map **map; 1669 void **mmaped; 1670 }; 1671 1672 struct bpf_prog_skeleton { 1673 const char *name; 1674 struct bpf_program **prog; 1675 struct bpf_link **link; 1676 }; 1677 1678 struct bpf_object_skeleton { 1679 size_t sz; /* size of this struct, for forward/backward compatibility */ 1680 1681 const char *name; 1682 const void *data; 1683 size_t data_sz; 1684 1685 struct bpf_object **obj; 1686 1687 int map_cnt; 1688 int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ 1689 struct bpf_map_skeleton *maps; 1690 1691 int prog_cnt; 1692 int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ 1693 struct bpf_prog_skeleton *progs; 1694 }; 1695 1696 LIBBPF_API int 1697 bpf_object__open_skeleton(struct bpf_object_skeleton *s, 1698 const struct bpf_object_open_opts *opts); 1699 LIBBPF_API int bpf_object__load_skeleton(struct bpf_object_skeleton *s); 1700 LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s); 1701 LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s); 1702 LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s); 1703 1704 struct bpf_var_skeleton { 1705 const char *name; 1706 struct bpf_map **map; 1707 void **addr; 1708 }; 1709 1710 struct bpf_object_subskeleton { 1711 size_t sz; /* size of this struct, for forward/backward compatibility */ 1712 1713 const struct bpf_object *obj; 1714 1715 int map_cnt; 1716 int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ 1717 struct bpf_map_skeleton *maps; 1718 1719 int prog_cnt; 1720 int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ 1721 struct bpf_prog_skeleton *progs; 1722 1723 int var_cnt; 1724 int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */ 1725 struct bpf_var_skeleton *vars; 1726 }; 1727 1728 LIBBPF_API int 1729 bpf_object__open_subskeleton(struct bpf_object_subskeleton *s); 1730 LIBBPF_API void 1731 bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s); 1732 1733 struct gen_loader_opts { 1734 size_t sz; /* size of this struct, for forward/backward compatibility */ 1735 const char *data; 1736 const char *insns; 1737 __u32 data_sz; 1738 __u32 insns_sz; 1739 }; 1740 1741 #define gen_loader_opts__last_field insns_sz 1742 LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj, 1743 struct gen_loader_opts *opts); 1744 1745 enum libbpf_tristate { 1746 TRI_NO = 0, 1747 TRI_YES = 1, 1748 TRI_MODULE = 2, 1749 }; 1750 1751 struct bpf_linker_opts { 1752 /* size of this struct, for forward/backward compatibility */ 1753 size_t sz; 1754 }; 1755 #define bpf_linker_opts__last_field sz 1756 1757 struct bpf_linker_file_opts { 1758 /* size of this struct, for forward/backward compatibility */ 1759 size_t sz; 1760 }; 1761 #define bpf_linker_file_opts__last_field sz 1762 1763 struct bpf_linker; 1764 1765 LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts); 1766 LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, 1767 const char *filename, 1768 const struct bpf_linker_file_opts *opts); 1769 LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker); 1770 LIBBPF_API void bpf_linker__free(struct bpf_linker *linker); 1771 1772 /* 1773 * Custom handling of BPF program's SEC() definitions 1774 */ 1775 1776 struct bpf_prog_load_opts; /* defined in bpf.h */ 1777 1778 /* Called during bpf_object__open() for each recognized BPF program. Callback 1779 * can use various bpf_program__set_*() setters to adjust whatever properties 1780 * are necessary. 1781 */ 1782 typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie); 1783 1784 /* Called right before libbpf performs bpf_prog_load() to load BPF program 1785 * into the kernel. Callback can adjust opts as necessary. 1786 */ 1787 typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog, 1788 struct bpf_prog_load_opts *opts, long cookie); 1789 1790 /* Called during skeleton attach or through bpf_program__attach(). If 1791 * auto-attach is not supported, callback should return 0 and set link to 1792 * NULL (it's not considered an error during skeleton attach, but it will be 1793 * an error for bpf_program__attach() calls). On error, error should be 1794 * returned directly and link set to NULL. On success, return 0 and set link 1795 * to a valid struct bpf_link. 1796 */ 1797 typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie, 1798 struct bpf_link **link); 1799 1800 struct libbpf_prog_handler_opts { 1801 /* size of this struct, for forward/backward compatibility */ 1802 size_t sz; 1803 /* User-provided value that is passed to prog_setup_fn, 1804 * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to 1805 * register one set of callbacks for multiple SEC() definitions and 1806 * still be able to distinguish them, if necessary. For example, 1807 * libbpf itself is using this to pass necessary flags (e.g., 1808 * sleepable flag) to a common internal SEC() handler. 1809 */ 1810 long cookie; 1811 /* BPF program initialization callback (see libbpf_prog_setup_fn_t). 1812 * Callback is optional, pass NULL if it's not necessary. 1813 */ 1814 libbpf_prog_setup_fn_t prog_setup_fn; 1815 /* BPF program loading callback (see libbpf_prog_prepare_load_fn_t). 1816 * Callback is optional, pass NULL if it's not necessary. 1817 */ 1818 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; 1819 /* BPF program attach callback (see libbpf_prog_attach_fn_t). 1820 * Callback is optional, pass NULL if it's not necessary. 1821 */ 1822 libbpf_prog_attach_fn_t prog_attach_fn; 1823 }; 1824 #define libbpf_prog_handler_opts__last_field prog_attach_fn 1825 1826 /** 1827 * @brief **libbpf_register_prog_handler()** registers a custom BPF program 1828 * SEC() handler. 1829 * @param sec section prefix for which custom handler is registered 1830 * @param prog_type BPF program type associated with specified section 1831 * @param exp_attach_type Expected BPF attach type associated with specified section 1832 * @param opts optional cookie, callbacks, and other extra options 1833 * @return Non-negative handler ID is returned on success. This handler ID has 1834 * to be passed to *libbpf_unregister_prog_handler()* to unregister such 1835 * custom handler. Negative error code is returned on error. 1836 * 1837 * *sec* defines which SEC() definitions are handled by this custom handler 1838 * registration. *sec* can have few different forms: 1839 * - if *sec* is just a plain string (e.g., "abc"), it will match only 1840 * SEC("abc"). If BPF program specifies SEC("abc/whatever") it will result 1841 * in an error; 1842 * - if *sec* is of the form "abc/", proper SEC() form is 1843 * SEC("abc/something"), where acceptable "something" should be checked by 1844 * *prog_init_fn* callback, if there are additional restrictions; 1845 * - if *sec* is of the form "abc+", it will successfully match both 1846 * SEC("abc") and SEC("abc/whatever") forms; 1847 * - if *sec* is NULL, custom handler is registered for any BPF program that 1848 * doesn't match any of the registered (custom or libbpf's own) SEC() 1849 * handlers. There could be only one such generic custom handler registered 1850 * at any given time. 1851 * 1852 * All custom handlers (except the one with *sec* == NULL) are processed 1853 * before libbpf's own SEC() handlers. It is allowed to "override" libbpf's 1854 * SEC() handlers by registering custom ones for the same section prefix 1855 * (i.e., it's possible to have custom SEC("perf_event/LLC-load-misses") 1856 * handler). 1857 * 1858 * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), 1859 * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs 1860 * to ensure synchronization if there is a risk of running this API from 1861 * multiple threads simultaneously. 1862 */ 1863 LIBBPF_API int libbpf_register_prog_handler(const char *sec, 1864 enum bpf_prog_type prog_type, 1865 enum bpf_attach_type exp_attach_type, 1866 const struct libbpf_prog_handler_opts *opts); 1867 /** 1868 * @brief *libbpf_unregister_prog_handler()* unregisters previously registered 1869 * custom BPF program SEC() handler. 1870 * @param handler_id handler ID returned by *libbpf_register_prog_handler()* 1871 * after successful registration 1872 * @return 0 on success, negative error code if handler isn't found 1873 * 1874 * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), 1875 * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs 1876 * to ensure synchronization if there is a risk of running this API from 1877 * multiple threads simultaneously. 1878 */ 1879 LIBBPF_API int libbpf_unregister_prog_handler(int handler_id); 1880 1881 #ifdef __cplusplus 1882 } /* extern "C" */ 1883 #endif 1884 1885 #endif /* __LIBBPF_LIBBPF_H */ 1886