1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 */ 8 #ifndef _UAPI__LINUX_BPF_H__ 9 #define _UAPI__LINUX_BPF_H__ 10 11 #include <linux/types.h> 12 #include <linux/bpf_common.h> 13 14 /* Extended instruction set based on top of classic BPF */ 15 16 /* instruction classes */ 17 #define BPF_JMP32 0x06 /* jmp mode in word width */ 18 #define BPF_ALU64 0x07 /* alu mode in double word width */ 19 20 /* ld/ldx fields */ 21 #define BPF_DW 0x18 /* double word (64-bit) */ 22 #define BPF_MEMSX 0x80 /* load with sign extension */ 23 #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */ 24 #define BPF_XADD 0xc0 /* exclusive add - legacy name */ 25 26 /* alu/jmp fields */ 27 #define BPF_MOV 0xb0 /* mov reg to reg */ 28 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ 29 30 /* change endianness of a register */ 31 #define BPF_END 0xd0 /* flags for endianness conversion: */ 32 #define BPF_TO_LE 0x00 /* convert to little-endian */ 33 #define BPF_TO_BE 0x08 /* convert to big-endian */ 34 #define BPF_FROM_LE BPF_TO_LE 35 #define BPF_FROM_BE BPF_TO_BE 36 37 /* jmp encodings */ 38 #define BPF_JNE 0x50 /* jump != */ 39 #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ 40 #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ 41 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ 42 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ 43 #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ 44 #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ 45 #define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */ 46 #define BPF_CALL 0x80 /* function call */ 47 #define BPF_EXIT 0x90 /* function return */ 48 49 /* atomic op type fields (stored in immediate) */ 50 #define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */ 51 #define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */ 52 #define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */ 53 54 #define BPF_LOAD_ACQ 0x100 /* load-acquire */ 55 #define BPF_STORE_REL 0x110 /* store-release */ 56 57 enum bpf_cond_pseudo_jmp { 58 BPF_MAY_GOTO = 0, 59 }; 60 61 /* Register numbers */ 62 enum { 63 BPF_REG_0 = 0, 64 BPF_REG_1, 65 BPF_REG_2, 66 BPF_REG_3, 67 BPF_REG_4, 68 BPF_REG_5, 69 BPF_REG_6, 70 BPF_REG_7, 71 BPF_REG_8, 72 BPF_REG_9, 73 BPF_REG_10, 74 __MAX_BPF_REG, 75 }; 76 77 /* BPF has 10 general purpose 64-bit registers and stack frame. */ 78 #define MAX_BPF_REG __MAX_BPF_REG 79 80 struct bpf_insn { 81 __u8 code; /* opcode */ 82 __u8 dst_reg:4; /* dest register */ 83 __u8 src_reg:4; /* source register */ 84 __s16 off; /* signed offset */ 85 __s32 imm; /* signed immediate constant */ 86 }; 87 88 /* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for 89 * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for 90 * the trailing flexible array member) instead. 91 */ 92 struct bpf_lpm_trie_key { 93 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 94 __u8 data[0]; /* Arbitrary size */ 95 }; 96 97 /* Header for bpf_lpm_trie_key structs */ 98 struct bpf_lpm_trie_key_hdr { 99 __u32 prefixlen; 100 }; 101 102 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */ 103 struct bpf_lpm_trie_key_u8 { 104 union { 105 struct bpf_lpm_trie_key_hdr hdr; 106 __u32 prefixlen; 107 }; 108 __u8 data[]; /* Arbitrary size */ 109 }; 110 111 struct bpf_cgroup_storage_key { 112 __u64 cgroup_inode_id; /* cgroup inode id */ 113 __u32 attach_type; /* program attach type (enum bpf_attach_type) */ 114 }; 115 116 enum bpf_cgroup_iter_order { 117 BPF_CGROUP_ITER_ORDER_UNSPEC = 0, 118 BPF_CGROUP_ITER_SELF_ONLY, /* process only a single object. */ 119 BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */ 120 BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */ 121 BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */ 122 }; 123 124 union bpf_iter_link_info { 125 struct { 126 __u32 map_fd; 127 } map; 128 struct { 129 enum bpf_cgroup_iter_order order; 130 131 /* At most one of cgroup_fd and cgroup_id can be non-zero. If 132 * both are zero, the walk starts from the default cgroup v2 133 * root. For walking v1 hierarchy, one should always explicitly 134 * specify cgroup_fd. 135 */ 136 __u32 cgroup_fd; 137 __u64 cgroup_id; 138 } cgroup; 139 /* Parameters of task iterators. */ 140 struct { 141 __u32 tid; 142 __u32 pid; 143 __u32 pid_fd; 144 } task; 145 }; 146 147 /* BPF syscall commands, see bpf(2) man-page for more details. */ 148 /** 149 * DOC: eBPF Syscall Preamble 150 * 151 * The operation to be performed by the **bpf**\ () system call is determined 152 * by the *cmd* argument. Each operation takes an accompanying argument, 153 * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see 154 * below). The size argument is the size of the union pointed to by *attr*. 155 */ 156 /** 157 * DOC: eBPF Syscall Commands 158 * 159 * BPF_MAP_CREATE 160 * Description 161 * Create a map and return a file descriptor that refers to the 162 * map. The close-on-exec file descriptor flag (see **fcntl**\ (2)) 163 * is automatically enabled for the new file descriptor. 164 * 165 * Applying **close**\ (2) to the file descriptor returned by 166 * **BPF_MAP_CREATE** will delete the map (but see NOTES). 167 * 168 * Return 169 * A new file descriptor (a nonnegative integer), or -1 if an 170 * error occurred (in which case, *errno* is set appropriately). 171 * 172 * BPF_MAP_LOOKUP_ELEM 173 * Description 174 * Look up an element with a given *key* in the map referred to 175 * by the file descriptor *map_fd*. 176 * 177 * The *flags* argument may be specified as one of the 178 * following: 179 * 180 * **BPF_F_LOCK** 181 * Look up the value of a spin-locked map without 182 * returning the lock. This must be specified if the 183 * elements contain a spinlock. 184 * 185 * Return 186 * Returns zero on success. On error, -1 is returned and *errno* 187 * is set appropriately. 188 * 189 * BPF_MAP_UPDATE_ELEM 190 * Description 191 * Create or update an element (key/value pair) in a specified map. 192 * 193 * The *flags* argument should be specified as one of the 194 * following: 195 * 196 * **BPF_ANY** 197 * Create a new element or update an existing element. 198 * **BPF_NOEXIST** 199 * Create a new element only if it did not exist. 200 * **BPF_EXIST** 201 * Update an existing element. 202 * **BPF_F_LOCK** 203 * Update a spin_lock-ed map element. 204 * 205 * Return 206 * Returns zero on success. On error, -1 is returned and *errno* 207 * is set appropriately. 208 * 209 * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, 210 * **E2BIG**, **EEXIST**, or **ENOENT**. 211 * 212 * **E2BIG** 213 * The number of elements in the map reached the 214 * *max_entries* limit specified at map creation time. 215 * **EEXIST** 216 * If *flags* specifies **BPF_NOEXIST** and the element 217 * with *key* already exists in the map. 218 * **ENOENT** 219 * If *flags* specifies **BPF_EXIST** and the element with 220 * *key* does not exist in the map. 221 * 222 * BPF_MAP_DELETE_ELEM 223 * Description 224 * Look up and delete an element by key in a specified map. 225 * 226 * Return 227 * Returns zero on success. On error, -1 is returned and *errno* 228 * is set appropriately. 229 * 230 * BPF_MAP_GET_NEXT_KEY 231 * Description 232 * Look up an element by key in a specified map and return the key 233 * of the next element. Can be used to iterate over all elements 234 * in the map. 235 * 236 * Return 237 * Returns zero on success. On error, -1 is returned and *errno* 238 * is set appropriately. 239 * 240 * The following cases can be used to iterate over all elements of 241 * the map: 242 * 243 * * If *key* is not found, the operation returns zero and sets 244 * the *next_key* pointer to the key of the first element. 245 * * If *key* is found, the operation returns zero and sets the 246 * *next_key* pointer to the key of the next element. 247 * * If *key* is the last element, returns -1 and *errno* is set 248 * to **ENOENT**. 249 * 250 * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or 251 * **EINVAL** on error. 252 * 253 * BPF_PROG_LOAD 254 * Description 255 * Verify and load an eBPF program, returning a new file 256 * descriptor associated with the program. 257 * 258 * Applying **close**\ (2) to the file descriptor returned by 259 * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES). 260 * 261 * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is 262 * automatically enabled for the new file descriptor. 263 * 264 * Return 265 * A new file descriptor (a nonnegative integer), or -1 if an 266 * error occurred (in which case, *errno* is set appropriately). 267 * 268 * BPF_OBJ_PIN 269 * Description 270 * Pin an eBPF program or map referred by the specified *bpf_fd* 271 * to the provided *pathname* on the filesystem. 272 * 273 * The *pathname* argument must not contain a dot ("."). 274 * 275 * On success, *pathname* retains a reference to the eBPF object, 276 * preventing deallocation of the object when the original 277 * *bpf_fd* is closed. This allow the eBPF object to live beyond 278 * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent 279 * process. 280 * 281 * Applying **unlink**\ (2) or similar calls to the *pathname* 282 * unpins the object from the filesystem, removing the reference. 283 * If no other file descriptors or filesystem nodes refer to the 284 * same object, it will be deallocated (see NOTES). 285 * 286 * The filesystem type for the parent directory of *pathname* must 287 * be **BPF_FS_MAGIC**. 288 * 289 * Return 290 * Returns zero on success. On error, -1 is returned and *errno* 291 * is set appropriately. 292 * 293 * BPF_OBJ_GET 294 * Description 295 * Open a file descriptor for the eBPF object pinned to the 296 * specified *pathname*. 297 * 298 * Return 299 * A new file descriptor (a nonnegative integer), or -1 if an 300 * error occurred (in which case, *errno* is set appropriately). 301 * 302 * BPF_PROG_ATTACH 303 * Description 304 * Attach an eBPF program to a *target_fd* at the specified 305 * *attach_type* hook. 306 * 307 * The *attach_type* specifies the eBPF attachment point to 308 * attach the program to, and must be one of *bpf_attach_type* 309 * (see below). 310 * 311 * The *attach_bpf_fd* must be a valid file descriptor for a 312 * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap 313 * or sock_ops type corresponding to the specified *attach_type*. 314 * 315 * The *target_fd* must be a valid file descriptor for a kernel 316 * object which depends on the attach type of *attach_bpf_fd*: 317 * 318 * **BPF_PROG_TYPE_CGROUP_DEVICE**, 319 * **BPF_PROG_TYPE_CGROUP_SKB**, 320 * **BPF_PROG_TYPE_CGROUP_SOCK**, 321 * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, 322 * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, 323 * **BPF_PROG_TYPE_CGROUP_SYSCTL**, 324 * **BPF_PROG_TYPE_SOCK_OPS** 325 * 326 * Control Group v2 hierarchy with the eBPF controller 327 * enabled. Requires the kernel to be compiled with 328 * **CONFIG_CGROUP_BPF**. 329 * 330 * **BPF_PROG_TYPE_FLOW_DISSECTOR** 331 * 332 * Network namespace (eg /proc/self/ns/net). 333 * 334 * **BPF_PROG_TYPE_LIRC_MODE2** 335 * 336 * LIRC device path (eg /dev/lircN). Requires the kernel 337 * to be compiled with **CONFIG_BPF_LIRC_MODE2**. 338 * 339 * **BPF_PROG_TYPE_SK_SKB**, 340 * **BPF_PROG_TYPE_SK_MSG** 341 * 342 * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**). 343 * 344 * Return 345 * Returns zero on success. On error, -1 is returned and *errno* 346 * is set appropriately. 347 * 348 * BPF_PROG_DETACH 349 * Description 350 * Detach the eBPF program associated with the *target_fd* at the 351 * hook specified by *attach_type*. The program must have been 352 * previously attached using **BPF_PROG_ATTACH**. 353 * 354 * Return 355 * Returns zero on success. On error, -1 is returned and *errno* 356 * is set appropriately. 357 * 358 * BPF_PROG_TEST_RUN 359 * Description 360 * Run the eBPF program associated with the *prog_fd* a *repeat* 361 * number of times against a provided program context *ctx_in* and 362 * data *data_in*, and return the modified program context 363 * *ctx_out*, *data_out* (for example, packet data), result of the 364 * execution *retval*, and *duration* of the test run. 365 * 366 * The sizes of the buffers provided as input and output 367 * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must 368 * be provided in the corresponding variables *ctx_size_in*, 369 * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any 370 * of these parameters are not provided (ie set to NULL), the 371 * corresponding size field must be zero. 372 * 373 * Some program types have particular requirements: 374 * 375 * **BPF_PROG_TYPE_SK_LOOKUP** 376 * *data_in* and *data_out* must be NULL. 377 * 378 * **BPF_PROG_TYPE_RAW_TRACEPOINT**, 379 * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE** 380 * 381 * *ctx_out*, *data_in* and *data_out* must be NULL. 382 * *repeat* must be zero. 383 * 384 * BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN. 385 * 386 * Return 387 * Returns zero on success. On error, -1 is returned and *errno* 388 * is set appropriately. 389 * 390 * **ENOSPC** 391 * Either *data_size_out* or *ctx_size_out* is too small. 392 * **ENOTSUPP** 393 * This command is not supported by the program type of 394 * the program referred to by *prog_fd*. 395 * 396 * BPF_PROG_GET_NEXT_ID 397 * Description 398 * Fetch the next eBPF program currently loaded into the kernel. 399 * 400 * Looks for the eBPF program with an id greater than *start_id* 401 * and updates *next_id* on success. If no other eBPF programs 402 * remain with ids higher than *start_id*, returns -1 and sets 403 * *errno* to **ENOENT**. 404 * 405 * Return 406 * Returns zero on success. On error, or when no id remains, -1 407 * is returned and *errno* is set appropriately. 408 * 409 * BPF_MAP_GET_NEXT_ID 410 * Description 411 * Fetch the next eBPF map currently loaded into the kernel. 412 * 413 * Looks for the eBPF map with an id greater than *start_id* 414 * and updates *next_id* on success. If no other eBPF maps 415 * remain with ids higher than *start_id*, returns -1 and sets 416 * *errno* to **ENOENT**. 417 * 418 * Return 419 * Returns zero on success. On error, or when no id remains, -1 420 * is returned and *errno* is set appropriately. 421 * 422 * BPF_PROG_GET_FD_BY_ID 423 * Description 424 * Open a file descriptor for the eBPF program corresponding to 425 * *prog_id*. 426 * 427 * Return 428 * A new file descriptor (a nonnegative integer), or -1 if an 429 * error occurred (in which case, *errno* is set appropriately). 430 * 431 * BPF_MAP_GET_FD_BY_ID 432 * Description 433 * Open a file descriptor for the eBPF map corresponding to 434 * *map_id*. 435 * 436 * Return 437 * A new file descriptor (a nonnegative integer), or -1 if an 438 * error occurred (in which case, *errno* is set appropriately). 439 * 440 * BPF_OBJ_GET_INFO_BY_FD 441 * Description 442 * Obtain information about the eBPF object corresponding to 443 * *bpf_fd*. 444 * 445 * Populates up to *info_len* bytes of *info*, which will be in 446 * one of the following formats depending on the eBPF object type 447 * of *bpf_fd*: 448 * 449 * * **struct bpf_prog_info** 450 * * **struct bpf_map_info** 451 * * **struct bpf_btf_info** 452 * * **struct bpf_link_info** 453 * * **struct bpf_token_info** 454 * 455 * Return 456 * Returns zero on success. On error, -1 is returned and *errno* 457 * is set appropriately. 458 * 459 * BPF_PROG_QUERY 460 * Description 461 * Obtain information about eBPF programs associated with the 462 * specified *attach_type* hook. 463 * 464 * The *target_fd* must be a valid file descriptor for a kernel 465 * object which depends on the attach type of *attach_bpf_fd*: 466 * 467 * **BPF_PROG_TYPE_CGROUP_DEVICE**, 468 * **BPF_PROG_TYPE_CGROUP_SKB**, 469 * **BPF_PROG_TYPE_CGROUP_SOCK**, 470 * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, 471 * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, 472 * **BPF_PROG_TYPE_CGROUP_SYSCTL**, 473 * **BPF_PROG_TYPE_SOCK_OPS** 474 * 475 * Control Group v2 hierarchy with the eBPF controller 476 * enabled. Requires the kernel to be compiled with 477 * **CONFIG_CGROUP_BPF**. 478 * 479 * **BPF_PROG_TYPE_FLOW_DISSECTOR** 480 * 481 * Network namespace (eg /proc/self/ns/net). 482 * 483 * **BPF_PROG_TYPE_LIRC_MODE2** 484 * 485 * LIRC device path (eg /dev/lircN). Requires the kernel 486 * to be compiled with **CONFIG_BPF_LIRC_MODE2**. 487 * 488 * **BPF_PROG_QUERY** always fetches the number of programs 489 * attached and the *attach_flags* which were used to attach those 490 * programs. Additionally, if *prog_ids* is nonzero and the number 491 * of attached programs is less than *prog_cnt*, populates 492 * *prog_ids* with the eBPF program ids of the programs attached 493 * at *target_fd*. 494 * 495 * The following flags may alter the result: 496 * 497 * **BPF_F_QUERY_EFFECTIVE** 498 * Only return information regarding programs which are 499 * currently effective at the specified *target_fd*. 500 * 501 * Return 502 * Returns zero on success. On error, -1 is returned and *errno* 503 * is set appropriately. 504 * 505 * BPF_RAW_TRACEPOINT_OPEN 506 * Description 507 * Attach an eBPF program to a tracepoint *name* to access kernel 508 * internal arguments of the tracepoint in their raw form. 509 * 510 * The *prog_fd* must be a valid file descriptor associated with 511 * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**. 512 * 513 * No ABI guarantees are made about the content of tracepoint 514 * arguments exposed to the corresponding eBPF program. 515 * 516 * Applying **close**\ (2) to the file descriptor returned by 517 * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES). 518 * 519 * Return 520 * A new file descriptor (a nonnegative integer), or -1 if an 521 * error occurred (in which case, *errno* is set appropriately). 522 * 523 * BPF_BTF_LOAD 524 * Description 525 * Verify and load BPF Type Format (BTF) metadata into the kernel, 526 * returning a new file descriptor associated with the metadata. 527 * BTF is described in more detail at 528 * https://www.kernel.org/doc/html/latest/bpf/btf.html. 529 * 530 * The *btf* parameter must point to valid memory providing 531 * *btf_size* bytes of BTF binary metadata. 532 * 533 * The returned file descriptor can be passed to other **bpf**\ () 534 * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to 535 * associate the BTF with those objects. 536 * 537 * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional 538 * parameters to specify a *btf_log_buf*, *btf_log_size* and 539 * *btf_log_level* which allow the kernel to return freeform log 540 * output regarding the BTF verification process. 541 * 542 * Return 543 * A new file descriptor (a nonnegative integer), or -1 if an 544 * error occurred (in which case, *errno* is set appropriately). 545 * 546 * BPF_BTF_GET_FD_BY_ID 547 * Description 548 * Open a file descriptor for the BPF Type Format (BTF) 549 * corresponding to *btf_id*. 550 * 551 * Return 552 * A new file descriptor (a nonnegative integer), or -1 if an 553 * error occurred (in which case, *errno* is set appropriately). 554 * 555 * BPF_TASK_FD_QUERY 556 * Description 557 * Obtain information about eBPF programs associated with the 558 * target process identified by *pid* and *fd*. 559 * 560 * If the *pid* and *fd* are associated with a tracepoint, kprobe 561 * or uprobe perf event, then the *prog_id* and *fd_type* will 562 * be populated with the eBPF program id and file descriptor type 563 * of type **bpf_task_fd_type**. If associated with a kprobe or 564 * uprobe, the *probe_offset* and *probe_addr* will also be 565 * populated. Optionally, if *buf* is provided, then up to 566 * *buf_len* bytes of *buf* will be populated with the name of 567 * the tracepoint, kprobe or uprobe. 568 * 569 * The resulting *prog_id* may be introspected in deeper detail 570 * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**. 571 * 572 * Return 573 * Returns zero on success. On error, -1 is returned and *errno* 574 * is set appropriately. 575 * 576 * BPF_MAP_LOOKUP_AND_DELETE_ELEM 577 * Description 578 * Look up an element with the given *key* in the map referred to 579 * by the file descriptor *fd*, and if found, delete the element. 580 * 581 * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map 582 * types, the *flags* argument needs to be set to 0, but for other 583 * map types, it may be specified as: 584 * 585 * **BPF_F_LOCK** 586 * Look up and delete the value of a spin-locked map 587 * without returning the lock. This must be specified if 588 * the elements contain a spinlock. 589 * 590 * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types 591 * implement this command as a "pop" operation, deleting the top 592 * element rather than one corresponding to *key*. 593 * The *key* and *key_len* parameters should be zeroed when 594 * issuing this operation for these map types. 595 * 596 * This command is only valid for the following map types: 597 * * **BPF_MAP_TYPE_QUEUE** 598 * * **BPF_MAP_TYPE_STACK** 599 * * **BPF_MAP_TYPE_HASH** 600 * * **BPF_MAP_TYPE_PERCPU_HASH** 601 * * **BPF_MAP_TYPE_LRU_HASH** 602 * * **BPF_MAP_TYPE_LRU_PERCPU_HASH** 603 * 604 * Return 605 * Returns zero on success. On error, -1 is returned and *errno* 606 * is set appropriately. 607 * 608 * BPF_MAP_FREEZE 609 * Description 610 * Freeze the permissions of the specified map. 611 * 612 * Write permissions may be frozen by passing zero *flags*. 613 * Upon success, no future syscall invocations may alter the 614 * map state of *map_fd*. Write operations from eBPF programs 615 * are still possible for a frozen map. 616 * 617 * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**. 618 * 619 * Return 620 * Returns zero on success. On error, -1 is returned and *errno* 621 * is set appropriately. 622 * 623 * BPF_BTF_GET_NEXT_ID 624 * Description 625 * Fetch the next BPF Type Format (BTF) object currently loaded 626 * into the kernel. 627 * 628 * Looks for the BTF object with an id greater than *start_id* 629 * and updates *next_id* on success. If no other BTF objects 630 * remain with ids higher than *start_id*, returns -1 and sets 631 * *errno* to **ENOENT**. 632 * 633 * Return 634 * Returns zero on success. On error, or when no id remains, -1 635 * is returned and *errno* is set appropriately. 636 * 637 * BPF_MAP_LOOKUP_BATCH 638 * Description 639 * Iterate and fetch multiple elements in a map. 640 * 641 * Two opaque values are used to manage batch operations, 642 * *in_batch* and *out_batch*. Initially, *in_batch* must be set 643 * to NULL to begin the batched operation. After each subsequent 644 * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant 645 * *out_batch* as the *in_batch* for the next operation to 646 * continue iteration from the current point. Both *in_batch* and 647 * *out_batch* must point to memory large enough to hold a key, 648 * except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH, 649 * LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters 650 * must be at least 4 bytes wide regardless of key size. 651 * 652 * The *keys* and *values* are output parameters which must point 653 * to memory large enough to hold *count* items based on the key 654 * and value size of the map *map_fd*. The *keys* buffer must be 655 * of *key_size* * *count*. The *values* buffer must be of 656 * *value_size* * *count*. 657 * 658 * The *elem_flags* argument may be specified as one of the 659 * following: 660 * 661 * **BPF_F_LOCK** 662 * Look up the value of a spin-locked map without 663 * returning the lock. This must be specified if the 664 * elements contain a spinlock. 665 * 666 * On success, *count* elements from the map are copied into the 667 * user buffer, with the keys copied into *keys* and the values 668 * copied into the corresponding indices in *values*. 669 * 670 * If an error is returned and *errno* is not **EFAULT**, *count* 671 * is set to the number of successfully processed elements. 672 * 673 * Return 674 * Returns zero on success. On error, -1 is returned and *errno* 675 * is set appropriately. 676 * 677 * May set *errno* to **ENOSPC** to indicate that *keys* or 678 * *values* is too small to dump an entire bucket during 679 * iteration of a hash-based map type. 680 * 681 * BPF_MAP_LOOKUP_AND_DELETE_BATCH 682 * Description 683 * Iterate and delete all elements in a map. 684 * 685 * This operation has the same behavior as 686 * **BPF_MAP_LOOKUP_BATCH** with two exceptions: 687 * 688 * * Every element that is successfully returned is also deleted 689 * from the map. This is at least *count* elements. Note that 690 * *count* is both an input and an output parameter. 691 * * Upon returning with *errno* set to **EFAULT**, up to 692 * *count* elements may be deleted without returning the keys 693 * and values of the deleted elements. 694 * 695 * Return 696 * Returns zero on success. On error, -1 is returned and *errno* 697 * is set appropriately. 698 * 699 * BPF_MAP_UPDATE_BATCH 700 * Description 701 * Update multiple elements in a map by *key*. 702 * 703 * The *keys* and *values* are input parameters which must point 704 * to memory large enough to hold *count* items based on the key 705 * and value size of the map *map_fd*. The *keys* buffer must be 706 * of *key_size* * *count*. The *values* buffer must be of 707 * *value_size* * *count*. 708 * 709 * Each element specified in *keys* is sequentially updated to the 710 * value in the corresponding index in *values*. The *in_batch* 711 * and *out_batch* parameters are ignored and should be zeroed. 712 * 713 * The *elem_flags* argument should be specified as one of the 714 * following: 715 * 716 * **BPF_ANY** 717 * Create new elements or update a existing elements. 718 * **BPF_NOEXIST** 719 * Create new elements only if they do not exist. 720 * **BPF_EXIST** 721 * Update existing elements. 722 * **BPF_F_LOCK** 723 * Update spin_lock-ed map elements. This must be 724 * specified if the map value contains a spinlock. 725 * 726 * On success, *count* elements from the map are updated. 727 * 728 * If an error is returned and *errno* is not **EFAULT**, *count* 729 * is set to the number of successfully processed elements. 730 * 731 * Return 732 * Returns zero on success. On error, -1 is returned and *errno* 733 * is set appropriately. 734 * 735 * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or 736 * **E2BIG**. **E2BIG** indicates that the number of elements in 737 * the map reached the *max_entries* limit specified at map 738 * creation time. 739 * 740 * May set *errno* to one of the following error codes under 741 * specific circumstances: 742 * 743 * **EEXIST** 744 * If *flags* specifies **BPF_NOEXIST** and the element 745 * with *key* already exists in the map. 746 * **ENOENT** 747 * If *flags* specifies **BPF_EXIST** and the element with 748 * *key* does not exist in the map. 749 * 750 * BPF_MAP_DELETE_BATCH 751 * Description 752 * Delete multiple elements in a map by *key*. 753 * 754 * The *keys* parameter is an input parameter which must point 755 * to memory large enough to hold *count* items based on the key 756 * size of the map *map_fd*, that is, *key_size* * *count*. 757 * 758 * Each element specified in *keys* is sequentially deleted. The 759 * *in_batch*, *out_batch*, and *values* parameters are ignored 760 * and should be zeroed. 761 * 762 * The *elem_flags* argument may be specified as one of the 763 * following: 764 * 765 * **BPF_F_LOCK** 766 * Look up the value of a spin-locked map without 767 * returning the lock. This must be specified if the 768 * elements contain a spinlock. 769 * 770 * On success, *count* elements from the map are updated. 771 * 772 * If an error is returned and *errno* is not **EFAULT**, *count* 773 * is set to the number of successfully processed elements. If 774 * *errno* is **EFAULT**, up to *count* elements may be been 775 * deleted. 776 * 777 * Return 778 * Returns zero on success. On error, -1 is returned and *errno* 779 * is set appropriately. 780 * 781 * BPF_LINK_CREATE 782 * Description 783 * Attach an eBPF program to a *target_fd* at the specified 784 * *attach_type* hook and return a file descriptor handle for 785 * managing the link. 786 * 787 * Return 788 * A new file descriptor (a nonnegative integer), or -1 if an 789 * error occurred (in which case, *errno* is set appropriately). 790 * 791 * BPF_LINK_UPDATE 792 * Description 793 * Update the eBPF program in the specified *link_fd* to 794 * *new_prog_fd*. 795 * 796 * Return 797 * Returns zero on success. On error, -1 is returned and *errno* 798 * is set appropriately. 799 * 800 * BPF_LINK_GET_FD_BY_ID 801 * Description 802 * Open a file descriptor for the eBPF Link corresponding to 803 * *link_id*. 804 * 805 * Return 806 * A new file descriptor (a nonnegative integer), or -1 if an 807 * error occurred (in which case, *errno* is set appropriately). 808 * 809 * BPF_LINK_GET_NEXT_ID 810 * Description 811 * Fetch the next eBPF link currently loaded into the kernel. 812 * 813 * Looks for the eBPF link with an id greater than *start_id* 814 * and updates *next_id* on success. If no other eBPF links 815 * remain with ids higher than *start_id*, returns -1 and sets 816 * *errno* to **ENOENT**. 817 * 818 * Return 819 * Returns zero on success. On error, or when no id remains, -1 820 * is returned and *errno* is set appropriately. 821 * 822 * BPF_ENABLE_STATS 823 * Description 824 * Enable eBPF runtime statistics gathering. 825 * 826 * Runtime statistics gathering for the eBPF runtime is disabled 827 * by default to minimize the corresponding performance overhead. 828 * This command enables statistics globally. 829 * 830 * Multiple programs may independently enable statistics. 831 * After gathering the desired statistics, eBPF runtime statistics 832 * may be disabled again by calling **close**\ (2) for the file 833 * descriptor returned by this function. Statistics will only be 834 * disabled system-wide when all outstanding file descriptors 835 * returned by prior calls for this subcommand are closed. 836 * 837 * Return 838 * A new file descriptor (a nonnegative integer), or -1 if an 839 * error occurred (in which case, *errno* is set appropriately). 840 * 841 * BPF_ITER_CREATE 842 * Description 843 * Create an iterator on top of the specified *link_fd* (as 844 * previously created using **BPF_LINK_CREATE**) and return a 845 * file descriptor that can be used to trigger the iteration. 846 * 847 * If the resulting file descriptor is pinned to the filesystem 848 * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls 849 * for that path will trigger the iterator to read kernel state 850 * using the eBPF program attached to *link_fd*. 851 * 852 * Return 853 * A new file descriptor (a nonnegative integer), or -1 if an 854 * error occurred (in which case, *errno* is set appropriately). 855 * 856 * BPF_LINK_DETACH 857 * Description 858 * Forcefully detach the specified *link_fd* from its 859 * corresponding attachment point. 860 * 861 * Return 862 * Returns zero on success. On error, -1 is returned and *errno* 863 * is set appropriately. 864 * 865 * BPF_PROG_BIND_MAP 866 * Description 867 * Bind a map to the lifetime of an eBPF program. 868 * 869 * The map identified by *map_fd* is bound to the program 870 * identified by *prog_fd* and only released when *prog_fd* is 871 * released. This may be used in cases where metadata should be 872 * associated with a program which otherwise does not contain any 873 * references to the map (for example, embedded in the eBPF 874 * program instructions). 875 * 876 * Return 877 * Returns zero on success. On error, -1 is returned and *errno* 878 * is set appropriately. 879 * 880 * BPF_TOKEN_CREATE 881 * Description 882 * Create BPF token with embedded information about what 883 * BPF-related functionality it allows: 884 * - a set of allowed bpf() syscall commands; 885 * - a set of allowed BPF map types to be created with 886 * BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed; 887 * - a set of allowed BPF program types and BPF program attach 888 * types to be loaded with BPF_PROG_LOAD command, if 889 * BPF_PROG_LOAD itself is allowed. 890 * 891 * BPF token is created (derived) from an instance of BPF FS, 892 * assuming it has necessary delegation mount options specified. 893 * This BPF token can be passed as an extra parameter to various 894 * bpf() syscall commands to grant BPF subsystem functionality to 895 * unprivileged processes. 896 * 897 * When created, BPF token is "associated" with the owning 898 * user namespace of BPF FS instance (super block) that it was 899 * derived from, and subsequent BPF operations performed with 900 * BPF token would be performing capabilities checks (i.e., 901 * CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within 902 * that user namespace. Without BPF token, such capabilities 903 * have to be granted in init user namespace, making bpf() 904 * syscall incompatible with user namespace, for the most part. 905 * 906 * Return 907 * A new file descriptor (a nonnegative integer), or -1 if an 908 * error occurred (in which case, *errno* is set appropriately). 909 * 910 * BPF_PROG_STREAM_READ_BY_FD 911 * Description 912 * Read data of a program's BPF stream. The program is identified 913 * by *prog_fd*, and the stream is identified by the *stream_id*. 914 * The data is copied to a buffer pointed to by *stream_buf*, and 915 * filled less than or equal to *stream_buf_len* bytes. 916 * 917 * Return 918 * Number of bytes read from the stream on success, or -1 if an 919 * error occurred (in which case, *errno* is set appropriately). 920 * 921 * NOTES 922 * eBPF objects (maps and programs) can be shared between processes. 923 * 924 * * After **fork**\ (2), the child inherits file descriptors 925 * referring to the same eBPF objects. 926 * * File descriptors referring to eBPF objects can be transferred over 927 * **unix**\ (7) domain sockets. 928 * * File descriptors referring to eBPF objects can be duplicated in the 929 * usual way, using **dup**\ (2) and similar calls. 930 * * File descriptors referring to eBPF objects can be pinned to the 931 * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2). 932 * 933 * An eBPF object is deallocated only after all file descriptors referring 934 * to the object have been closed and no references remain pinned to the 935 * filesystem or attached (for example, bound to a program or device). 936 */ 937 enum bpf_cmd { 938 BPF_MAP_CREATE, 939 BPF_MAP_LOOKUP_ELEM, 940 BPF_MAP_UPDATE_ELEM, 941 BPF_MAP_DELETE_ELEM, 942 BPF_MAP_GET_NEXT_KEY, 943 BPF_PROG_LOAD, 944 BPF_OBJ_PIN, 945 BPF_OBJ_GET, 946 BPF_PROG_ATTACH, 947 BPF_PROG_DETACH, 948 BPF_PROG_TEST_RUN, 949 BPF_PROG_RUN = BPF_PROG_TEST_RUN, 950 BPF_PROG_GET_NEXT_ID, 951 BPF_MAP_GET_NEXT_ID, 952 BPF_PROG_GET_FD_BY_ID, 953 BPF_MAP_GET_FD_BY_ID, 954 BPF_OBJ_GET_INFO_BY_FD, 955 BPF_PROG_QUERY, 956 BPF_RAW_TRACEPOINT_OPEN, 957 BPF_BTF_LOAD, 958 BPF_BTF_GET_FD_BY_ID, 959 BPF_TASK_FD_QUERY, 960 BPF_MAP_LOOKUP_AND_DELETE_ELEM, 961 BPF_MAP_FREEZE, 962 BPF_BTF_GET_NEXT_ID, 963 BPF_MAP_LOOKUP_BATCH, 964 BPF_MAP_LOOKUP_AND_DELETE_BATCH, 965 BPF_MAP_UPDATE_BATCH, 966 BPF_MAP_DELETE_BATCH, 967 BPF_LINK_CREATE, 968 BPF_LINK_UPDATE, 969 BPF_LINK_GET_FD_BY_ID, 970 BPF_LINK_GET_NEXT_ID, 971 BPF_ENABLE_STATS, 972 BPF_ITER_CREATE, 973 BPF_LINK_DETACH, 974 BPF_PROG_BIND_MAP, 975 BPF_TOKEN_CREATE, 976 BPF_PROG_STREAM_READ_BY_FD, 977 __MAX_BPF_CMD, 978 }; 979 980 enum bpf_map_type { 981 BPF_MAP_TYPE_UNSPEC, 982 BPF_MAP_TYPE_HASH, 983 BPF_MAP_TYPE_ARRAY, 984 BPF_MAP_TYPE_PROG_ARRAY, 985 BPF_MAP_TYPE_PERF_EVENT_ARRAY, 986 BPF_MAP_TYPE_PERCPU_HASH, 987 BPF_MAP_TYPE_PERCPU_ARRAY, 988 BPF_MAP_TYPE_STACK_TRACE, 989 BPF_MAP_TYPE_CGROUP_ARRAY, 990 BPF_MAP_TYPE_LRU_HASH, 991 BPF_MAP_TYPE_LRU_PERCPU_HASH, 992 BPF_MAP_TYPE_LPM_TRIE, 993 BPF_MAP_TYPE_ARRAY_OF_MAPS, 994 BPF_MAP_TYPE_HASH_OF_MAPS, 995 BPF_MAP_TYPE_DEVMAP, 996 BPF_MAP_TYPE_SOCKMAP, 997 BPF_MAP_TYPE_CPUMAP, 998 BPF_MAP_TYPE_XSKMAP, 999 BPF_MAP_TYPE_SOCKHASH, 1000 BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, 1001 /* BPF_MAP_TYPE_CGROUP_STORAGE is available to bpf programs attaching 1002 * to a cgroup. The newer BPF_MAP_TYPE_CGRP_STORAGE is available to 1003 * both cgroup-attached and other progs and supports all functionality 1004 * provided by BPF_MAP_TYPE_CGROUP_STORAGE. So mark 1005 * BPF_MAP_TYPE_CGROUP_STORAGE deprecated. 1006 */ 1007 BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, 1008 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, 1009 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED, 1010 /* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs 1011 * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE + 1012 * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE 1013 * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE 1014 * deprecated. 1015 */ 1016 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED, 1017 BPF_MAP_TYPE_QUEUE, 1018 BPF_MAP_TYPE_STACK, 1019 BPF_MAP_TYPE_SK_STORAGE, 1020 BPF_MAP_TYPE_DEVMAP_HASH, 1021 BPF_MAP_TYPE_STRUCT_OPS, 1022 BPF_MAP_TYPE_RINGBUF, 1023 BPF_MAP_TYPE_INODE_STORAGE, 1024 BPF_MAP_TYPE_TASK_STORAGE, 1025 BPF_MAP_TYPE_BLOOM_FILTER, 1026 BPF_MAP_TYPE_USER_RINGBUF, 1027 BPF_MAP_TYPE_CGRP_STORAGE, 1028 BPF_MAP_TYPE_ARENA, 1029 BPF_MAP_TYPE_INSN_ARRAY, 1030 __MAX_BPF_MAP_TYPE 1031 }; 1032 1033 /* Note that tracing related programs such as 1034 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} 1035 * are not subject to a stable API since kernel internal data 1036 * structures can change from release to release and may 1037 * therefore break existing tracing BPF programs. Tracing BPF 1038 * programs correspond to /a/ specific kernel which is to be 1039 * analyzed, and not /a/ specific kernel /and/ all future ones. 1040 */ 1041 enum bpf_prog_type { 1042 BPF_PROG_TYPE_UNSPEC, 1043 BPF_PROG_TYPE_SOCKET_FILTER, 1044 BPF_PROG_TYPE_KPROBE, 1045 BPF_PROG_TYPE_SCHED_CLS, 1046 BPF_PROG_TYPE_SCHED_ACT, 1047 BPF_PROG_TYPE_TRACEPOINT, 1048 BPF_PROG_TYPE_XDP, 1049 BPF_PROG_TYPE_PERF_EVENT, 1050 BPF_PROG_TYPE_CGROUP_SKB, 1051 BPF_PROG_TYPE_CGROUP_SOCK, 1052 BPF_PROG_TYPE_LWT_IN, 1053 BPF_PROG_TYPE_LWT_OUT, 1054 BPF_PROG_TYPE_LWT_XMIT, 1055 BPF_PROG_TYPE_SOCK_OPS, 1056 BPF_PROG_TYPE_SK_SKB, 1057 BPF_PROG_TYPE_CGROUP_DEVICE, 1058 BPF_PROG_TYPE_SK_MSG, 1059 BPF_PROG_TYPE_RAW_TRACEPOINT, 1060 BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 1061 BPF_PROG_TYPE_LWT_SEG6LOCAL, 1062 BPF_PROG_TYPE_LIRC_MODE2, 1063 BPF_PROG_TYPE_SK_REUSEPORT, 1064 BPF_PROG_TYPE_FLOW_DISSECTOR, 1065 BPF_PROG_TYPE_CGROUP_SYSCTL, 1066 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 1067 BPF_PROG_TYPE_CGROUP_SOCKOPT, 1068 BPF_PROG_TYPE_TRACING, 1069 BPF_PROG_TYPE_STRUCT_OPS, 1070 BPF_PROG_TYPE_EXT, 1071 BPF_PROG_TYPE_LSM, 1072 BPF_PROG_TYPE_SK_LOOKUP, 1073 BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */ 1074 BPF_PROG_TYPE_NETFILTER, 1075 __MAX_BPF_PROG_TYPE 1076 }; 1077 1078 enum bpf_attach_type { 1079 BPF_CGROUP_INET_INGRESS, 1080 BPF_CGROUP_INET_EGRESS, 1081 BPF_CGROUP_INET_SOCK_CREATE, 1082 BPF_CGROUP_SOCK_OPS, 1083 BPF_SK_SKB_STREAM_PARSER, 1084 BPF_SK_SKB_STREAM_VERDICT, 1085 BPF_CGROUP_DEVICE, 1086 BPF_SK_MSG_VERDICT, 1087 BPF_CGROUP_INET4_BIND, 1088 BPF_CGROUP_INET6_BIND, 1089 BPF_CGROUP_INET4_CONNECT, 1090 BPF_CGROUP_INET6_CONNECT, 1091 BPF_CGROUP_INET4_POST_BIND, 1092 BPF_CGROUP_INET6_POST_BIND, 1093 BPF_CGROUP_UDP4_SENDMSG, 1094 BPF_CGROUP_UDP6_SENDMSG, 1095 BPF_LIRC_MODE2, 1096 BPF_FLOW_DISSECTOR, 1097 BPF_CGROUP_SYSCTL, 1098 BPF_CGROUP_UDP4_RECVMSG, 1099 BPF_CGROUP_UDP6_RECVMSG, 1100 BPF_CGROUP_GETSOCKOPT, 1101 BPF_CGROUP_SETSOCKOPT, 1102 BPF_TRACE_RAW_TP, 1103 BPF_TRACE_FENTRY, 1104 BPF_TRACE_FEXIT, 1105 BPF_MODIFY_RETURN, 1106 BPF_LSM_MAC, 1107 BPF_TRACE_ITER, 1108 BPF_CGROUP_INET4_GETPEERNAME, 1109 BPF_CGROUP_INET6_GETPEERNAME, 1110 BPF_CGROUP_INET4_GETSOCKNAME, 1111 BPF_CGROUP_INET6_GETSOCKNAME, 1112 BPF_XDP_DEVMAP, 1113 BPF_CGROUP_INET_SOCK_RELEASE, 1114 BPF_XDP_CPUMAP, 1115 BPF_SK_LOOKUP, 1116 BPF_XDP, 1117 BPF_SK_SKB_VERDICT, 1118 BPF_SK_REUSEPORT_SELECT, 1119 BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, 1120 BPF_PERF_EVENT, 1121 BPF_TRACE_KPROBE_MULTI, 1122 BPF_LSM_CGROUP, 1123 BPF_STRUCT_OPS, 1124 BPF_NETFILTER, 1125 BPF_TCX_INGRESS, 1126 BPF_TCX_EGRESS, 1127 BPF_TRACE_UPROBE_MULTI, 1128 BPF_CGROUP_UNIX_CONNECT, 1129 BPF_CGROUP_UNIX_SENDMSG, 1130 BPF_CGROUP_UNIX_RECVMSG, 1131 BPF_CGROUP_UNIX_GETPEERNAME, 1132 BPF_CGROUP_UNIX_GETSOCKNAME, 1133 BPF_NETKIT_PRIMARY, 1134 BPF_NETKIT_PEER, 1135 BPF_TRACE_KPROBE_SESSION, 1136 BPF_TRACE_UPROBE_SESSION, 1137 __MAX_BPF_ATTACH_TYPE 1138 }; 1139 1140 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 1141 1142 /* Add BPF_LINK_TYPE(type, name) in bpf_types.h to keep bpf_link_type_strs[] 1143 * in sync with the definitions below. 1144 */ 1145 enum bpf_link_type { 1146 BPF_LINK_TYPE_UNSPEC = 0, 1147 BPF_LINK_TYPE_RAW_TRACEPOINT = 1, 1148 BPF_LINK_TYPE_TRACING = 2, 1149 BPF_LINK_TYPE_CGROUP = 3, 1150 BPF_LINK_TYPE_ITER = 4, 1151 BPF_LINK_TYPE_NETNS = 5, 1152 BPF_LINK_TYPE_XDP = 6, 1153 BPF_LINK_TYPE_PERF_EVENT = 7, 1154 BPF_LINK_TYPE_KPROBE_MULTI = 8, 1155 BPF_LINK_TYPE_STRUCT_OPS = 9, 1156 BPF_LINK_TYPE_NETFILTER = 10, 1157 BPF_LINK_TYPE_TCX = 11, 1158 BPF_LINK_TYPE_UPROBE_MULTI = 12, 1159 BPF_LINK_TYPE_NETKIT = 13, 1160 BPF_LINK_TYPE_SOCKMAP = 14, 1161 __MAX_BPF_LINK_TYPE, 1162 }; 1163 1164 #define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE 1165 1166 enum bpf_perf_event_type { 1167 BPF_PERF_EVENT_UNSPEC = 0, 1168 BPF_PERF_EVENT_UPROBE = 1, 1169 BPF_PERF_EVENT_URETPROBE = 2, 1170 BPF_PERF_EVENT_KPROBE = 3, 1171 BPF_PERF_EVENT_KRETPROBE = 4, 1172 BPF_PERF_EVENT_TRACEPOINT = 5, 1173 BPF_PERF_EVENT_EVENT = 6, 1174 }; 1175 1176 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command 1177 * 1178 * NONE(default): No further bpf programs allowed in the subtree. 1179 * 1180 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, 1181 * the program in this cgroup yields to sub-cgroup program. 1182 * 1183 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, 1184 * that cgroup program gets run in addition to the program in this cgroup. 1185 * 1186 * Only one program is allowed to be attached to a cgroup with 1187 * NONE or BPF_F_ALLOW_OVERRIDE flag. 1188 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will 1189 * release old program and attach the new one. Attach flags has to match. 1190 * 1191 * Multiple programs are allowed to be attached to a cgroup with 1192 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order 1193 * (those that were attached first, run first) 1194 * The programs of sub-cgroup are executed first, then programs of 1195 * this cgroup and then programs of parent cgroup. 1196 * When children program makes decision (like picking TCP CA or sock bind) 1197 * parent program has a chance to override it. 1198 * 1199 * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of 1200 * programs for a cgroup. Though it's possible to replace an old program at 1201 * any position by also specifying BPF_F_REPLACE flag and position itself in 1202 * replace_bpf_fd attribute. Old program at this position will be released. 1203 * 1204 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. 1205 * A cgroup with NONE doesn't allow any programs in sub-cgroups. 1206 * Ex1: 1207 * cgrp1 (MULTI progs A, B) -> 1208 * cgrp2 (OVERRIDE prog C) -> 1209 * cgrp3 (MULTI prog D) -> 1210 * cgrp4 (OVERRIDE prog E) -> 1211 * cgrp5 (NONE prog F) 1212 * the event in cgrp5 triggers execution of F,D,A,B in that order. 1213 * if prog F is detached, the execution is E,D,A,B 1214 * if prog F and D are detached, the execution is E,A,B 1215 * if prog F, E and D are detached, the execution is C,A,B 1216 * 1217 * All eligible programs are executed regardless of return code from 1218 * earlier programs. 1219 */ 1220 #define BPF_F_ALLOW_OVERRIDE (1U << 0) 1221 #define BPF_F_ALLOW_MULTI (1U << 1) 1222 /* Generic attachment flags. */ 1223 #define BPF_F_REPLACE (1U << 2) 1224 #define BPF_F_BEFORE (1U << 3) 1225 #define BPF_F_AFTER (1U << 4) 1226 #define BPF_F_ID (1U << 5) 1227 #define BPF_F_PREORDER (1U << 6) 1228 #define BPF_F_LINK BPF_F_LINK /* 1 << 13 */ 1229 1230 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 1231 * verifier will perform strict alignment checking as if the kernel 1232 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 1233 * and NET_IP_ALIGN defined to 2. 1234 */ 1235 #define BPF_F_STRICT_ALIGNMENT (1U << 0) 1236 1237 /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the 1238 * verifier will allow any alignment whatsoever. On platforms 1239 * with strict alignment requirements for loads ands stores (such 1240 * as sparc and mips) the verifier validates that all loads and 1241 * stores provably follow this requirement. This flag turns that 1242 * checking and enforcement off. 1243 * 1244 * It is mostly used for testing when we want to validate the 1245 * context and memory access aspects of the verifier, but because 1246 * of an unaligned access the alignment check would trigger before 1247 * the one we are interested in. 1248 */ 1249 #define BPF_F_ANY_ALIGNMENT (1U << 1) 1250 1251 /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. 1252 * Verifier does sub-register def/use analysis and identifies instructions whose 1253 * def only matters for low 32-bit, high 32-bit is never referenced later 1254 * through implicit zero extension. Therefore verifier notifies JIT back-ends 1255 * that it is safe to ignore clearing high 32-bit for these instructions. This 1256 * saves some back-ends a lot of code-gen. However such optimization is not 1257 * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends 1258 * hence hasn't used verifier's analysis result. But, we really want to have a 1259 * way to be able to verify the correctness of the described optimization on 1260 * x86_64 on which testsuites are frequently exercised. 1261 * 1262 * So, this flag is introduced. Once it is set, verifier will randomize high 1263 * 32-bit for those instructions who has been identified as safe to ignore them. 1264 * Then, if verifier is not doing correct analysis, such randomization will 1265 * regress tests to expose bugs. 1266 */ 1267 #define BPF_F_TEST_RND_HI32 (1U << 2) 1268 1269 /* The verifier internal test flag. Behavior is undefined */ 1270 #define BPF_F_TEST_STATE_FREQ (1U << 3) 1271 1272 /* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will 1273 * restrict map and helper usage for such programs. Sleepable BPF programs can 1274 * only be attached to hooks where kernel execution context allows sleeping. 1275 * Such programs are allowed to use helpers that may sleep like 1276 * bpf_copy_from_user(). 1277 */ 1278 #define BPF_F_SLEEPABLE (1U << 4) 1279 1280 /* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program 1281 * fully support xdp frags. 1282 */ 1283 #define BPF_F_XDP_HAS_FRAGS (1U << 5) 1284 1285 /* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded 1286 * program becomes device-bound but can access XDP metadata. 1287 */ 1288 #define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6) 1289 1290 /* The verifier internal test flag. Behavior is undefined */ 1291 #define BPF_F_TEST_REG_INVARIANTS (1U << 7) 1292 1293 /* link_create.kprobe_multi.flags used in LINK_CREATE command for 1294 * BPF_TRACE_KPROBE_MULTI attach type to create return probe. 1295 */ 1296 enum { 1297 BPF_F_KPROBE_MULTI_RETURN = (1U << 0) 1298 }; 1299 1300 /* link_create.uprobe_multi.flags used in LINK_CREATE command for 1301 * BPF_TRACE_UPROBE_MULTI attach type to create return probe. 1302 */ 1303 enum { 1304 BPF_F_UPROBE_MULTI_RETURN = (1U << 0) 1305 }; 1306 1307 /* link_create.netfilter.flags used in LINK_CREATE command for 1308 * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation. 1309 */ 1310 #define BPF_F_NETFILTER_IP_DEFRAG (1U << 0) 1311 1312 /* When BPF ldimm64's insn[0].src_reg != 0 then this can have 1313 * the following extensions: 1314 * 1315 * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX] 1316 * insn[0].imm: map fd or fd_idx 1317 * insn[1].imm: 0 1318 * insn[0].off: 0 1319 * insn[1].off: 0 1320 * ldimm64 rewrite: address of map 1321 * verifier type: CONST_PTR_TO_MAP 1322 */ 1323 #define BPF_PSEUDO_MAP_FD 1 1324 #define BPF_PSEUDO_MAP_IDX 5 1325 1326 /* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE 1327 * insn[0].imm: map fd or fd_idx 1328 * insn[1].imm: offset into value 1329 * insn[0].off: 0 1330 * insn[1].off: 0 1331 * ldimm64 rewrite: address of map[0]+offset 1332 * verifier type: PTR_TO_MAP_VALUE 1333 */ 1334 #define BPF_PSEUDO_MAP_VALUE 2 1335 #define BPF_PSEUDO_MAP_IDX_VALUE 6 1336 1337 /* insn[0].src_reg: BPF_PSEUDO_BTF_ID 1338 * insn[0].imm: kernel btd id of VAR 1339 * insn[1].imm: 0 1340 * insn[0].off: 0 1341 * insn[1].off: 0 1342 * ldimm64 rewrite: address of the kernel variable 1343 * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var 1344 * is struct/union. 1345 */ 1346 #define BPF_PSEUDO_BTF_ID 3 1347 /* insn[0].src_reg: BPF_PSEUDO_FUNC 1348 * insn[0].imm: insn offset to the func 1349 * insn[1].imm: 0 1350 * insn[0].off: 0 1351 * insn[1].off: 0 1352 * ldimm64 rewrite: address of the function 1353 * verifier type: PTR_TO_FUNC. 1354 */ 1355 #define BPF_PSEUDO_FUNC 4 1356 1357 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative 1358 * offset to another bpf function 1359 */ 1360 #define BPF_PSEUDO_CALL 1 1361 /* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL, 1362 * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel 1363 */ 1364 #define BPF_PSEUDO_KFUNC_CALL 2 1365 1366 enum bpf_addr_space_cast { 1367 BPF_ADDR_SPACE_CAST = 1, 1368 }; 1369 1370 /* flags for BPF_MAP_UPDATE_ELEM command */ 1371 enum { 1372 BPF_ANY = 0, /* create new element or update existing */ 1373 BPF_NOEXIST = 1, /* create new element if it didn't exist */ 1374 BPF_EXIST = 2, /* update existing element */ 1375 BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ 1376 }; 1377 1378 /* flags for BPF_MAP_CREATE command */ 1379 enum { 1380 BPF_F_NO_PREALLOC = (1U << 0), 1381 /* Instead of having one common LRU list in the 1382 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list 1383 * which can scale and perform better. 1384 * Note, the LRU nodes (including free nodes) cannot be moved 1385 * across different LRU lists. 1386 */ 1387 BPF_F_NO_COMMON_LRU = (1U << 1), 1388 /* Specify numa node during map creation */ 1389 BPF_F_NUMA_NODE = (1U << 2), 1390 1391 /* Flags for accessing BPF object from syscall side. */ 1392 BPF_F_RDONLY = (1U << 3), 1393 BPF_F_WRONLY = (1U << 4), 1394 1395 /* Flag for stack_map, store build_id+offset instead of pointer */ 1396 BPF_F_STACK_BUILD_ID = (1U << 5), 1397 1398 /* Zero-initialize hash function seed. This should only be used for testing. */ 1399 BPF_F_ZERO_SEED = (1U << 6), 1400 1401 /* Flags for accessing BPF object from program side. */ 1402 BPF_F_RDONLY_PROG = (1U << 7), 1403 BPF_F_WRONLY_PROG = (1U << 8), 1404 1405 /* Clone map from listener for newly accepted socket */ 1406 BPF_F_CLONE = (1U << 9), 1407 1408 /* Enable memory-mapping BPF map */ 1409 BPF_F_MMAPABLE = (1U << 10), 1410 1411 /* Share perf_event among processes */ 1412 BPF_F_PRESERVE_ELEMS = (1U << 11), 1413 1414 /* Create a map that is suitable to be an inner map with dynamic max entries */ 1415 BPF_F_INNER_MAP = (1U << 12), 1416 1417 /* Create a map that will be registered/unregesitered by the backed bpf_link */ 1418 BPF_F_LINK = (1U << 13), 1419 1420 /* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */ 1421 BPF_F_PATH_FD = (1U << 14), 1422 1423 /* Flag for value_type_btf_obj_fd, the fd is available */ 1424 BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15), 1425 1426 /* BPF token FD is passed in a corresponding command's token_fd field */ 1427 BPF_F_TOKEN_FD = (1U << 16), 1428 1429 /* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */ 1430 BPF_F_SEGV_ON_FAULT = (1U << 17), 1431 1432 /* Do not translate kernel bpf_arena pointers to user pointers */ 1433 BPF_F_NO_USER_CONV = (1U << 18), 1434 1435 /* Enable BPF ringbuf overwrite mode */ 1436 BPF_F_RB_OVERWRITE = (1U << 19), 1437 }; 1438 1439 /* Flags for BPF_PROG_QUERY. */ 1440 1441 /* Query effective (directly attached + inherited from ancestor cgroups) 1442 * programs that will be executed for events within a cgroup. 1443 * attach_flags with this flag are always returned 0. 1444 */ 1445 #define BPF_F_QUERY_EFFECTIVE (1U << 0) 1446 1447 /* Flags for BPF_PROG_TEST_RUN */ 1448 1449 /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ 1450 #define BPF_F_TEST_RUN_ON_CPU (1U << 0) 1451 /* If set, XDP frames will be transmitted after processing */ 1452 #define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) 1453 /* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */ 1454 #define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2) 1455 1456 /* type for BPF_ENABLE_STATS */ 1457 enum bpf_stats_type { 1458 /* enabled run_time_ns and run_cnt */ 1459 BPF_STATS_RUN_TIME = 0, 1460 }; 1461 1462 enum bpf_stack_build_id_status { 1463 /* user space need an empty entry to identify end of a trace */ 1464 BPF_STACK_BUILD_ID_EMPTY = 0, 1465 /* with valid build_id and offset */ 1466 BPF_STACK_BUILD_ID_VALID = 1, 1467 /* couldn't get build_id, fallback to ip */ 1468 BPF_STACK_BUILD_ID_IP = 2, 1469 }; 1470 1471 #define BPF_BUILD_ID_SIZE 20 1472 struct bpf_stack_build_id { 1473 __s32 status; 1474 unsigned char build_id[BPF_BUILD_ID_SIZE]; 1475 union { 1476 __u64 offset; 1477 __u64 ip; 1478 }; 1479 }; 1480 1481 #define BPF_OBJ_NAME_LEN 16U 1482 1483 enum { 1484 BPF_STREAM_STDOUT = 1, 1485 BPF_STREAM_STDERR = 2, 1486 }; 1487 1488 union bpf_attr { 1489 struct { /* anonymous struct used by BPF_MAP_CREATE command */ 1490 __u32 map_type; /* one of enum bpf_map_type */ 1491 __u32 key_size; /* size of key in bytes */ 1492 __u32 value_size; /* size of value in bytes */ 1493 __u32 max_entries; /* max number of entries in a map */ 1494 __u32 map_flags; /* BPF_MAP_CREATE related 1495 * flags defined above. 1496 */ 1497 __u32 inner_map_fd; /* fd pointing to the inner map */ 1498 __u32 numa_node; /* numa node (effective only if 1499 * BPF_F_NUMA_NODE is set). 1500 */ 1501 char map_name[BPF_OBJ_NAME_LEN]; 1502 __u32 map_ifindex; /* ifindex of netdev to create on */ 1503 __u32 btf_fd; /* fd pointing to a BTF type data */ 1504 __u32 btf_key_type_id; /* BTF type_id of the key */ 1505 __u32 btf_value_type_id; /* BTF type_id of the value */ 1506 __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- 1507 * struct stored as the 1508 * map value 1509 */ 1510 /* Any per-map-type extra fields 1511 * 1512 * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the 1513 * number of hash functions (if 0, the bloom filter will default 1514 * to using 5 hash functions). 1515 * 1516 * BPF_MAP_TYPE_ARENA - contains the address where user space 1517 * is going to mmap() the arena. It has to be page aligned. 1518 */ 1519 __u64 map_extra; 1520 1521 __s32 value_type_btf_obj_fd; /* fd pointing to a BTF 1522 * type data for 1523 * btf_vmlinux_value_type_id. 1524 */ 1525 /* BPF token FD to use with BPF_MAP_CREATE operation. 1526 * If provided, map_flags should have BPF_F_TOKEN_FD flag set. 1527 */ 1528 __s32 map_token_fd; 1529 1530 /* Hash of the program that has exclusive access to the map. 1531 */ 1532 __aligned_u64 excl_prog_hash; 1533 /* Size of the passed excl_prog_hash. */ 1534 __u32 excl_prog_hash_size; 1535 }; 1536 1537 struct { /* anonymous struct used by BPF_MAP_*_ELEM and BPF_MAP_FREEZE commands */ 1538 __u32 map_fd; 1539 __aligned_u64 key; 1540 union { 1541 __aligned_u64 value; 1542 __aligned_u64 next_key; 1543 }; 1544 __u64 flags; 1545 }; 1546 1547 struct { /* struct used by BPF_MAP_*_BATCH commands */ 1548 __aligned_u64 in_batch; /* start batch, 1549 * NULL to start from beginning 1550 */ 1551 __aligned_u64 out_batch; /* output: next start batch */ 1552 __aligned_u64 keys; 1553 __aligned_u64 values; 1554 __u32 count; /* input/output: 1555 * input: # of key/value 1556 * elements 1557 * output: # of filled elements 1558 */ 1559 __u32 map_fd; 1560 __u64 elem_flags; 1561 __u64 flags; 1562 } batch; 1563 1564 struct { /* anonymous struct used by BPF_PROG_LOAD command */ 1565 __u32 prog_type; /* one of enum bpf_prog_type */ 1566 __u32 insn_cnt; 1567 __aligned_u64 insns; 1568 __aligned_u64 license; 1569 __u32 log_level; /* verbosity level of verifier */ 1570 __u32 log_size; /* size of user buffer */ 1571 __aligned_u64 log_buf; /* user supplied buffer */ 1572 __u32 kern_version; /* not used */ 1573 __u32 prog_flags; 1574 char prog_name[BPF_OBJ_NAME_LEN]; 1575 __u32 prog_ifindex; /* ifindex of netdev to prep for */ 1576 /* For some prog types expected attach type must be known at 1577 * load time to verify attach type specific parts of prog 1578 * (context accesses, allowed helpers, etc). 1579 */ 1580 __u32 expected_attach_type; 1581 __u32 prog_btf_fd; /* fd pointing to BTF type data */ 1582 __u32 func_info_rec_size; /* userspace bpf_func_info size */ 1583 __aligned_u64 func_info; /* func info */ 1584 __u32 func_info_cnt; /* number of bpf_func_info records */ 1585 __u32 line_info_rec_size; /* userspace bpf_line_info size */ 1586 __aligned_u64 line_info; /* line info */ 1587 __u32 line_info_cnt; /* number of bpf_line_info records */ 1588 __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 1589 union { 1590 /* valid prog_fd to attach to bpf prog */ 1591 __u32 attach_prog_fd; 1592 /* or valid module BTF object fd or 0 to attach to vmlinux */ 1593 __u32 attach_btf_obj_fd; 1594 }; 1595 __u32 core_relo_cnt; /* number of bpf_core_relo */ 1596 __aligned_u64 fd_array; /* array of FDs */ 1597 __aligned_u64 core_relos; 1598 __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ 1599 /* output: actual total log contents size (including termintaing zero). 1600 * It could be both larger than original log_size (if log was 1601 * truncated), or smaller (if log buffer wasn't filled completely). 1602 */ 1603 __u32 log_true_size; 1604 /* BPF token FD to use with BPF_PROG_LOAD operation. 1605 * If provided, prog_flags should have BPF_F_TOKEN_FD flag set. 1606 */ 1607 __s32 prog_token_fd; 1608 /* The fd_array_cnt can be used to pass the length of the 1609 * fd_array array. In this case all the [map] file descriptors 1610 * passed in this array will be bound to the program, even if 1611 * the maps are not referenced directly. The functionality is 1612 * similar to the BPF_PROG_BIND_MAP syscall, but maps can be 1613 * used by the verifier during the program load. If provided, 1614 * then the fd_array[0,...,fd_array_cnt-1] is expected to be 1615 * continuous. 1616 */ 1617 __u32 fd_array_cnt; 1618 /* Pointer to a buffer containing the signature of the BPF 1619 * program. 1620 */ 1621 __aligned_u64 signature; 1622 /* Size of the signature buffer in bytes. */ 1623 __u32 signature_size; 1624 /* ID of the kernel keyring to be used for signature 1625 * verification. 1626 */ 1627 __s32 keyring_id; 1628 }; 1629 1630 struct { /* anonymous struct used by BPF_OBJ_* commands */ 1631 __aligned_u64 pathname; 1632 __u32 bpf_fd; 1633 __u32 file_flags; 1634 /* Same as dirfd in openat() syscall; see openat(2) 1635 * manpage for details of path FD and pathname semantics; 1636 * path_fd should accompanied by BPF_F_PATH_FD flag set in 1637 * file_flags field, otherwise it should be set to zero; 1638 * if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed. 1639 */ 1640 __s32 path_fd; 1641 }; 1642 1643 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 1644 union { 1645 __u32 target_fd; /* target object to attach to or ... */ 1646 __u32 target_ifindex; /* target ifindex */ 1647 }; 1648 __u32 attach_bpf_fd; 1649 __u32 attach_type; 1650 __u32 attach_flags; 1651 __u32 replace_bpf_fd; 1652 union { 1653 __u32 relative_fd; 1654 __u32 relative_id; 1655 }; 1656 __u64 expected_revision; 1657 }; 1658 1659 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ 1660 __u32 prog_fd; 1661 __u32 retval; 1662 __u32 data_size_in; /* input: len of data_in */ 1663 __u32 data_size_out; /* input/output: len of data_out 1664 * returns ENOSPC if data_out 1665 * is too small. 1666 */ 1667 __aligned_u64 data_in; 1668 __aligned_u64 data_out; 1669 __u32 repeat; 1670 __u32 duration; 1671 __u32 ctx_size_in; /* input: len of ctx_in */ 1672 __u32 ctx_size_out; /* input/output: len of ctx_out 1673 * returns ENOSPC if ctx_out 1674 * is too small. 1675 */ 1676 __aligned_u64 ctx_in; 1677 __aligned_u64 ctx_out; 1678 __u32 flags; 1679 __u32 cpu; 1680 __u32 batch_size; 1681 } test; 1682 1683 struct { /* anonymous struct used by BPF_*_GET_*_ID */ 1684 union { 1685 __u32 start_id; 1686 __u32 prog_id; 1687 __u32 map_id; 1688 __u32 btf_id; 1689 __u32 link_id; 1690 }; 1691 __u32 next_id; 1692 __u32 open_flags; 1693 __s32 fd_by_id_token_fd; 1694 }; 1695 1696 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ 1697 __u32 bpf_fd; 1698 __u32 info_len; 1699 __aligned_u64 info; 1700 } info; 1701 1702 struct { /* anonymous struct used by BPF_PROG_QUERY command */ 1703 union { 1704 __u32 target_fd; /* target object to query or ... */ 1705 __u32 target_ifindex; /* target ifindex */ 1706 }; 1707 __u32 attach_type; 1708 __u32 query_flags; 1709 __u32 attach_flags; 1710 __aligned_u64 prog_ids; 1711 union { 1712 __u32 prog_cnt; 1713 __u32 count; 1714 }; 1715 __u32 :32; 1716 /* output: per-program attach_flags. 1717 * not allowed to be set during effective query. 1718 */ 1719 __aligned_u64 prog_attach_flags; 1720 __aligned_u64 link_ids; 1721 __aligned_u64 link_attach_flags; 1722 __u64 revision; 1723 } query; 1724 1725 struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ 1726 __u64 name; 1727 __u32 prog_fd; 1728 __u32 :32; 1729 __aligned_u64 cookie; 1730 } raw_tracepoint; 1731 1732 struct { /* anonymous struct for BPF_BTF_LOAD */ 1733 __aligned_u64 btf; 1734 __aligned_u64 btf_log_buf; 1735 __u32 btf_size; 1736 __u32 btf_log_size; 1737 __u32 btf_log_level; 1738 /* output: actual total log contents size (including termintaing zero). 1739 * It could be both larger than original log_size (if log was 1740 * truncated), or smaller (if log buffer wasn't filled completely). 1741 */ 1742 __u32 btf_log_true_size; 1743 __u32 btf_flags; 1744 /* BPF token FD to use with BPF_BTF_LOAD operation. 1745 * If provided, btf_flags should have BPF_F_TOKEN_FD flag set. 1746 */ 1747 __s32 btf_token_fd; 1748 }; 1749 1750 struct { 1751 __u32 pid; /* input: pid */ 1752 __u32 fd; /* input: fd */ 1753 __u32 flags; /* input: flags */ 1754 __u32 buf_len; /* input/output: buf len */ 1755 __aligned_u64 buf; /* input/output: 1756 * tp_name for tracepoint 1757 * symbol for kprobe 1758 * filename for uprobe 1759 */ 1760 __u32 prog_id; /* output: prod_id */ 1761 __u32 fd_type; /* output: BPF_FD_TYPE_* */ 1762 __u64 probe_offset; /* output: probe_offset */ 1763 __u64 probe_addr; /* output: probe_addr */ 1764 } task_fd_query; 1765 1766 struct { /* struct used by BPF_LINK_CREATE command */ 1767 union { 1768 __u32 prog_fd; /* eBPF program to attach */ 1769 __u32 map_fd; /* struct_ops to attach */ 1770 }; 1771 union { 1772 __u32 target_fd; /* target object to attach to or ... */ 1773 __u32 target_ifindex; /* target ifindex */ 1774 }; 1775 __u32 attach_type; /* attach type */ 1776 __u32 flags; /* extra flags */ 1777 union { 1778 __u32 target_btf_id; /* btf_id of target to attach to */ 1779 struct { 1780 __aligned_u64 iter_info; /* extra bpf_iter_link_info */ 1781 __u32 iter_info_len; /* iter_info length */ 1782 }; 1783 struct { 1784 /* black box user-provided value passed through 1785 * to BPF program at the execution time and 1786 * accessible through bpf_get_attach_cookie() BPF helper 1787 */ 1788 __u64 bpf_cookie; 1789 } perf_event; 1790 struct { 1791 __u32 flags; 1792 __u32 cnt; 1793 __aligned_u64 syms; 1794 __aligned_u64 addrs; 1795 __aligned_u64 cookies; 1796 } kprobe_multi; 1797 struct { 1798 /* this is overlaid with the target_btf_id above. */ 1799 __u32 target_btf_id; 1800 /* black box user-provided value passed through 1801 * to BPF program at the execution time and 1802 * accessible through bpf_get_attach_cookie() BPF helper 1803 */ 1804 __u64 cookie; 1805 } tracing; 1806 struct { 1807 __u32 pf; 1808 __u32 hooknum; 1809 __s32 priority; 1810 __u32 flags; 1811 } netfilter; 1812 struct { 1813 union { 1814 __u32 relative_fd; 1815 __u32 relative_id; 1816 }; 1817 __u64 expected_revision; 1818 } tcx; 1819 struct { 1820 __aligned_u64 path; 1821 __aligned_u64 offsets; 1822 __aligned_u64 ref_ctr_offsets; 1823 __aligned_u64 cookies; 1824 __u32 cnt; 1825 __u32 flags; 1826 __u32 pid; 1827 } uprobe_multi; 1828 struct { 1829 union { 1830 __u32 relative_fd; 1831 __u32 relative_id; 1832 }; 1833 __u64 expected_revision; 1834 } netkit; 1835 struct { 1836 union { 1837 __u32 relative_fd; 1838 __u32 relative_id; 1839 }; 1840 __u64 expected_revision; 1841 } cgroup; 1842 }; 1843 } link_create; 1844 1845 struct { /* struct used by BPF_LINK_UPDATE command */ 1846 __u32 link_fd; /* link fd */ 1847 union { 1848 /* new program fd to update link with */ 1849 __u32 new_prog_fd; 1850 /* new struct_ops map fd to update link with */ 1851 __u32 new_map_fd; 1852 }; 1853 __u32 flags; /* extra flags */ 1854 union { 1855 /* expected link's program fd; is specified only if 1856 * BPF_F_REPLACE flag is set in flags. 1857 */ 1858 __u32 old_prog_fd; 1859 /* expected link's map fd; is specified only 1860 * if BPF_F_REPLACE flag is set. 1861 */ 1862 __u32 old_map_fd; 1863 }; 1864 } link_update; 1865 1866 struct { 1867 __u32 link_fd; 1868 } link_detach; 1869 1870 struct { /* struct used by BPF_ENABLE_STATS command */ 1871 __u32 type; 1872 } enable_stats; 1873 1874 struct { /* struct used by BPF_ITER_CREATE command */ 1875 __u32 link_fd; 1876 __u32 flags; 1877 } iter_create; 1878 1879 struct { /* struct used by BPF_PROG_BIND_MAP command */ 1880 __u32 prog_fd; 1881 __u32 map_fd; 1882 __u32 flags; /* extra flags */ 1883 } prog_bind_map; 1884 1885 struct { /* struct used by BPF_TOKEN_CREATE command */ 1886 __u32 flags; 1887 __u32 bpffs_fd; 1888 } token_create; 1889 1890 struct { 1891 __aligned_u64 stream_buf; 1892 __u32 stream_buf_len; 1893 __u32 stream_id; 1894 __u32 prog_fd; 1895 } prog_stream_read; 1896 1897 } __attribute__((aligned(8))); 1898 1899 /* The description below is an attempt at providing documentation to eBPF 1900 * developers about the multiple available eBPF helper functions. It can be 1901 * parsed and used to produce a manual page. The workflow is the following, 1902 * and requires the rst2man utility: 1903 * 1904 * $ ./scripts/bpf_doc.py \ 1905 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst 1906 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 1907 * $ man /tmp/bpf-helpers.7 1908 * 1909 * Note that in order to produce this external documentation, some RST 1910 * formatting is used in the descriptions to get "bold" and "italics" in 1911 * manual pages. Also note that the few trailing white spaces are 1912 * intentional, removing them would break paragraphs for rst2man. 1913 * 1914 * Start of BPF helper function descriptions: 1915 * 1916 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) 1917 * Description 1918 * Perform a lookup in *map* for an entry associated to *key*. 1919 * Return 1920 * Map value associated to *key*, or **NULL** if no entry was 1921 * found. 1922 * 1923 * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) 1924 * Description 1925 * Add or update the value of the entry associated to *key* in 1926 * *map* with *value*. *flags* is one of: 1927 * 1928 * **BPF_NOEXIST** 1929 * The entry for *key* must not exist in the map. 1930 * **BPF_EXIST** 1931 * The entry for *key* must already exist in the map. 1932 * **BPF_ANY** 1933 * No condition on the existence of the entry for *key*. 1934 * 1935 * Flag value **BPF_NOEXIST** cannot be used for maps of types 1936 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all 1937 * elements always exist), the helper would return an error. 1938 * Return 1939 * 0 on success, or a negative error in case of failure. 1940 * 1941 * long bpf_map_delete_elem(struct bpf_map *map, const void *key) 1942 * Description 1943 * Delete entry with *key* from *map*. 1944 * Return 1945 * 0 on success, or a negative error in case of failure. 1946 * 1947 * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) 1948 * Description 1949 * For tracing programs, safely attempt to read *size* bytes from 1950 * kernel space address *unsafe_ptr* and store the data in *dst*. 1951 * 1952 * Generally, use **bpf_probe_read_user**\ () or 1953 * **bpf_probe_read_kernel**\ () instead. 1954 * Return 1955 * 0 on success, or a negative error in case of failure. 1956 * 1957 * u64 bpf_ktime_get_ns(void) 1958 * Description 1959 * Return the time elapsed since system boot, in nanoseconds. 1960 * Does not include time the system was suspended. 1961 * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) 1962 * Return 1963 * Current *ktime*. 1964 * 1965 * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) 1966 * Description 1967 * This helper is a "printk()-like" facility for debugging. It 1968 * prints a message defined by format *fmt* (of size *fmt_size*) 1969 * to file *\/sys/kernel/tracing/trace* from TraceFS, if 1970 * available. It can take up to three additional **u64** 1971 * arguments (as an eBPF helpers, the total number of arguments is 1972 * limited to five). 1973 * 1974 * Each time the helper is called, it appends a line to the trace. 1975 * Lines are discarded while *\/sys/kernel/tracing/trace* is 1976 * open, use *\/sys/kernel/tracing/trace_pipe* to avoid this. 1977 * The format of the trace is customizable, and the exact output 1978 * one will get depends on the options set in 1979 * *\/sys/kernel/tracing/trace_options* (see also the 1980 * *README* file under the same directory). However, it usually 1981 * defaults to something like: 1982 * 1983 * :: 1984 * 1985 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg> 1986 * 1987 * In the above: 1988 * 1989 * * ``telnet`` is the name of the current task. 1990 * * ``470`` is the PID of the current task. 1991 * * ``001`` is the CPU number on which the task is 1992 * running. 1993 * * In ``.N..``, each character refers to a set of 1994 * options (whether irqs are enabled, scheduling 1995 * options, whether hard/softirqs are running, level of 1996 * preempt_disabled respectively). **N** means that 1997 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** 1998 * are set. 1999 * * ``419421.045894`` is a timestamp. 2000 * * ``0x00000001`` is a fake value used by BPF for the 2001 * instruction pointer register. 2002 * * ``<formatted msg>`` is the message formatted with 2003 * *fmt*. 2004 * 2005 * The conversion specifiers supported by *fmt* are similar, but 2006 * more limited than for printk(). They are **%d**, **%i**, 2007 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, 2008 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size 2009 * of field, padding with zeroes, etc.) is available, and the 2010 * helper will return **-EINVAL** (but print nothing) if it 2011 * encounters an unknown specifier. 2012 * 2013 * Also, note that **bpf_trace_printk**\ () is slow, and should 2014 * only be used for debugging purposes. For this reason, a notice 2015 * block (spanning several lines) is printed to kernel logs and 2016 * states that the helper should not be used "for production use" 2017 * the first time this helper is used (or more precisely, when 2018 * **trace_printk**\ () buffers are allocated). For passing values 2019 * to user space, perf events should be preferred. 2020 * Return 2021 * The number of bytes written to the buffer, or a negative error 2022 * in case of failure. 2023 * 2024 * u32 bpf_get_prandom_u32(void) 2025 * Description 2026 * Get a pseudo-random number. 2027 * 2028 * From a security point of view, this helper uses its own 2029 * pseudo-random internal state, and cannot be used to infer the 2030 * seed of other random functions in the kernel. However, it is 2031 * essential to note that the generator used by the helper is not 2032 * cryptographically secure. 2033 * Return 2034 * A random 32-bit unsigned value. 2035 * 2036 * u32 bpf_get_smp_processor_id(void) 2037 * Description 2038 * Get the SMP (symmetric multiprocessing) processor id. Note that 2039 * all programs run with migration disabled, which means that the 2040 * SMP processor id is stable during all the execution of the 2041 * program. 2042 * Return 2043 * The SMP id of the processor running the program. 2044 * Attributes 2045 * __bpf_fastcall 2046 * 2047 * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) 2048 * Description 2049 * Store *len* bytes from address *from* into the packet 2050 * associated to *skb*, at *offset*. The *flags* are a combination 2051 * of the following values: 2052 * 2053 * **BPF_F_RECOMPUTE_CSUM** 2054 * Automatically update *skb*\ **->csum** after storing the 2055 * bytes. 2056 * **BPF_F_INVALIDATE_HASH** 2057 * Set *skb*\ **->hash**, *skb*\ **->swhash** and *skb*\ 2058 * **->l4hash** to 0. 2059 * 2060 * A call to this helper is susceptible to change the underlying 2061 * packet buffer. Therefore, at load time, all checks on pointers 2062 * previously done by the verifier are invalidated and must be 2063 * performed again, if the helper is used in combination with 2064 * direct packet access. 2065 * Return 2066 * 0 on success, or a negative error in case of failure. 2067 * 2068 * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) 2069 * Description 2070 * Recompute the layer 3 (e.g. IP) checksum for the packet 2071 * associated to *skb*. Computation is incremental, so the helper 2072 * must know the former value of the header field that was 2073 * modified (*from*), the new value of this field (*to*), and the 2074 * number of bytes (2 or 4) for this field, stored in *size*. 2075 * Alternatively, it is possible to store the difference between 2076 * the previous and the new values of the header field in *to*, by 2077 * setting *from* and *size* to 0. For both methods, *offset* 2078 * indicates the location of the IP checksum within the packet. 2079 * 2080 * This helper works in combination with **bpf_csum_diff**\ (), 2081 * which does not update the checksum in-place, but offers more 2082 * flexibility and can handle sizes larger than 2 or 4 for the 2083 * checksum to update. 2084 * 2085 * A call to this helper is susceptible to change the underlying 2086 * packet buffer. Therefore, at load time, all checks on pointers 2087 * previously done by the verifier are invalidated and must be 2088 * performed again, if the helper is used in combination with 2089 * direct packet access. 2090 * Return 2091 * 0 on success, or a negative error in case of failure. 2092 * 2093 * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) 2094 * Description 2095 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the 2096 * packet associated to *skb*. Computation is incremental, so the 2097 * helper must know the former value of the header field that was 2098 * modified (*from*), the new value of this field (*to*), and the 2099 * number of bytes (2 or 4) for this field, stored on the lowest 2100 * four bits of *flags*. Alternatively, it is possible to store 2101 * the difference between the previous and the new values of the 2102 * header field in *to*, by setting *from* and the four lowest 2103 * bits of *flags* to 0. For both methods, *offset* indicates the 2104 * location of the IP checksum within the packet. In addition to 2105 * the size of the field, *flags* can be added (bitwise OR) actual 2106 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left 2107 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and 2108 * for updates resulting in a null checksum the value is set to 2109 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates 2110 * that the modified header field is part of the pseudo-header. 2111 * Flag **BPF_F_IPV6** should be set for IPv6 packets. 2112 * 2113 * This helper works in combination with **bpf_csum_diff**\ (), 2114 * which does not update the checksum in-place, but offers more 2115 * flexibility and can handle sizes larger than 2 or 4 for the 2116 * checksum to update. 2117 * 2118 * A call to this helper is susceptible to change the underlying 2119 * packet buffer. Therefore, at load time, all checks on pointers 2120 * previously done by the verifier are invalidated and must be 2121 * performed again, if the helper is used in combination with 2122 * direct packet access. 2123 * Return 2124 * 0 on success, or a negative error in case of failure. 2125 * 2126 * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) 2127 * Description 2128 * This special helper is used to trigger a "tail call", or in 2129 * other words, to jump into another eBPF program. The same stack 2130 * frame is used (but values on stack and in registers for the 2131 * caller are not accessible to the callee). This mechanism allows 2132 * for program chaining, either for raising the maximum number of 2133 * available eBPF instructions, or to execute given programs in 2134 * conditional blocks. For security reasons, there is an upper 2135 * limit to the number of successive tail calls that can be 2136 * performed. 2137 * 2138 * Upon call of this helper, the program attempts to jump into a 2139 * program referenced at index *index* in *prog_array_map*, a 2140 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes 2141 * *ctx*, a pointer to the context. 2142 * 2143 * If the call succeeds, the kernel immediately runs the first 2144 * instruction of the new program. This is not a function call, 2145 * and it never returns to the previous program. If the call 2146 * fails, then the helper has no effect, and the caller continues 2147 * to run its subsequent instructions. A call can fail if the 2148 * destination program for the jump does not exist (i.e. *index* 2149 * is superior to the number of entries in *prog_array_map*), or 2150 * if the maximum number of tail calls has been reached for this 2151 * chain of programs. This limit is defined in the kernel by the 2152 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), 2153 * which is currently set to 33. 2154 * Return 2155 * 0 on success, or a negative error in case of failure. 2156 * 2157 * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) 2158 * Description 2159 * Clone and redirect the packet associated to *skb* to another 2160 * net device of index *ifindex*. Both ingress and egress 2161 * interfaces can be used for redirection. The **BPF_F_INGRESS** 2162 * value in *flags* is used to make the distinction (ingress path 2163 * is selected if the flag is present, egress path otherwise). 2164 * This is the only flag supported for now. 2165 * 2166 * In comparison with **bpf_redirect**\ () helper, 2167 * **bpf_clone_redirect**\ () has the associated cost of 2168 * duplicating the packet buffer, but this can be executed out of 2169 * the eBPF program. Conversely, **bpf_redirect**\ () is more 2170 * efficient, but it is handled through an action code where the 2171 * redirection happens only after the eBPF program has returned. 2172 * 2173 * A call to this helper is susceptible to change the underlying 2174 * packet buffer. Therefore, at load time, all checks on pointers 2175 * previously done by the verifier are invalidated and must be 2176 * performed again, if the helper is used in combination with 2177 * direct packet access. 2178 * Return 2179 * 0 on success, or a negative error in case of failure. Positive 2180 * error indicates a potential drop or congestion in the target 2181 * device. The particular positive error codes are not defined. 2182 * 2183 * u64 bpf_get_current_pid_tgid(void) 2184 * Description 2185 * Get the current pid and tgid. 2186 * Return 2187 * A 64-bit integer containing the current tgid and pid, and 2188 * created as such: 2189 * *current_task*\ **->tgid << 32 \|** 2190 * *current_task*\ **->pid**. 2191 * 2192 * u64 bpf_get_current_uid_gid(void) 2193 * Description 2194 * Get the current uid and gid. 2195 * Return 2196 * A 64-bit integer containing the current GID and UID, and 2197 * created as such: *current_gid* **<< 32 \|** *current_uid*. 2198 * 2199 * long bpf_get_current_comm(void *buf, u32 size_of_buf) 2200 * Description 2201 * Copy the **comm** attribute of the current task into *buf* of 2202 * *size_of_buf*. The **comm** attribute contains the name of 2203 * the executable (excluding the path) for the current task. The 2204 * *size_of_buf* must be strictly positive. On success, the 2205 * helper makes sure that the *buf* is NUL-terminated. On failure, 2206 * it is filled with zeroes. 2207 * Return 2208 * 0 on success, or a negative error in case of failure. 2209 * 2210 * u32 bpf_get_cgroup_classid(struct sk_buff *skb) 2211 * Description 2212 * Retrieve the classid for the current task, i.e. for the net_cls 2213 * cgroup to which *skb* belongs. 2214 * 2215 * This helper can be used on TC egress path, but not on ingress. 2216 * 2217 * The net_cls cgroup provides an interface to tag network packets 2218 * based on a user-provided identifier for all traffic coming from 2219 * the tasks belonging to the related cgroup. See also the related 2220 * kernel documentation, available from the Linux sources in file 2221 * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. 2222 * 2223 * The Linux kernel has two versions for cgroups: there are 2224 * cgroups v1 and cgroups v2. Both are available to users, who can 2225 * use a mixture of them, but note that the net_cls cgroup is for 2226 * cgroup v1 only. This makes it incompatible with BPF programs 2227 * run on cgroups, which is a cgroup-v2-only feature (a socket can 2228 * only hold data for one version of cgroups at a time). 2229 * 2230 * This helper is only available is the kernel was compiled with 2231 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to 2232 * "**y**" or to "**m**". 2233 * Return 2234 * The classid, or 0 for the default unconfigured classid. 2235 * 2236 * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 2237 * Description 2238 * Push a *vlan_tci* (VLAN tag control information) of protocol 2239 * *vlan_proto* to the packet associated to *skb*, then update 2240 * the checksum. Note that if *vlan_proto* is different from 2241 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to 2242 * be **ETH_P_8021Q**. 2243 * 2244 * A call to this helper is susceptible to change the underlying 2245 * packet buffer. Therefore, at load time, all checks on pointers 2246 * previously done by the verifier are invalidated and must be 2247 * performed again, if the helper is used in combination with 2248 * direct packet access. 2249 * Return 2250 * 0 on success, or a negative error in case of failure. 2251 * 2252 * long bpf_skb_vlan_pop(struct sk_buff *skb) 2253 * Description 2254 * Pop a VLAN header from the packet associated to *skb*. 2255 * 2256 * A call to this helper is susceptible to change the underlying 2257 * packet buffer. Therefore, at load time, all checks on pointers 2258 * previously done by the verifier are invalidated and must be 2259 * performed again, if the helper is used in combination with 2260 * direct packet access. 2261 * Return 2262 * 0 on success, or a negative error in case of failure. 2263 * 2264 * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 2265 * Description 2266 * Get tunnel metadata. This helper takes a pointer *key* to an 2267 * empty **struct bpf_tunnel_key** of **size**, that will be 2268 * filled with tunnel metadata for the packet associated to *skb*. 2269 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which 2270 * indicates that the tunnel is based on IPv6 protocol instead of 2271 * IPv4. 2272 * 2273 * The **struct bpf_tunnel_key** is an object that generalizes the 2274 * principal parameters used by various tunneling protocols into a 2275 * single struct. This way, it can be used to easily make a 2276 * decision based on the contents of the encapsulation header, 2277 * "summarized" in this struct. In particular, it holds the IP 2278 * address of the remote end (IPv4 or IPv6, depending on the case) 2279 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, 2280 * this struct exposes the *key*\ **->tunnel_id**, which is 2281 * generally mapped to a VNI (Virtual Network Identifier), making 2282 * it programmable together with the **bpf_skb_set_tunnel_key**\ 2283 * () helper. 2284 * 2285 * Let's imagine that the following code is part of a program 2286 * attached to the TC ingress interface, on one end of a GRE 2287 * tunnel, and is supposed to filter out all messages coming from 2288 * remote ends with IPv4 address other than 10.0.0.1: 2289 * 2290 * :: 2291 * 2292 * int ret; 2293 * struct bpf_tunnel_key key = {}; 2294 * 2295 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); 2296 * if (ret < 0) 2297 * return TC_ACT_SHOT; // drop packet 2298 * 2299 * if (key.remote_ipv4 != 0x0a000001) 2300 * return TC_ACT_SHOT; // drop packet 2301 * 2302 * return TC_ACT_OK; // accept packet 2303 * 2304 * This interface can also be used with all encapsulation devices 2305 * that can operate in "collect metadata" mode: instead of having 2306 * one network device per specific configuration, the "collect 2307 * metadata" mode only requires a single device where the 2308 * configuration can be extracted from this helper. 2309 * 2310 * This can be used together with various tunnels such as VXLan, 2311 * Geneve, GRE or IP in IP (IPIP). 2312 * Return 2313 * 0 on success, or a negative error in case of failure. 2314 * 2315 * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 2316 * Description 2317 * Populate tunnel metadata for packet associated to *skb.* The 2318 * tunnel metadata is set to the contents of *key*, of *size*. The 2319 * *flags* can be set to a combination of the following values: 2320 * 2321 * **BPF_F_TUNINFO_IPV6** 2322 * Indicate that the tunnel is based on IPv6 protocol 2323 * instead of IPv4. 2324 * **BPF_F_ZERO_CSUM_TX** 2325 * For IPv4 packets, add a flag to tunnel metadata 2326 * indicating that checksum computation should be skipped 2327 * and checksum set to zeroes. 2328 * **BPF_F_DONT_FRAGMENT** 2329 * Add a flag to tunnel metadata indicating that the 2330 * packet should not be fragmented. 2331 * **BPF_F_SEQ_NUMBER** 2332 * Add a flag to tunnel metadata indicating that a 2333 * sequence number should be added to tunnel header before 2334 * sending the packet. This flag was added for GRE 2335 * encapsulation, but might be used with other protocols 2336 * as well in the future. 2337 * **BPF_F_NO_TUNNEL_KEY** 2338 * Add a flag to tunnel metadata indicating that no tunnel 2339 * key should be set in the resulting tunnel header. 2340 * 2341 * Here is a typical usage on the transmit path: 2342 * 2343 * :: 2344 * 2345 * struct bpf_tunnel_key key; 2346 * populate key ... 2347 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); 2348 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); 2349 * 2350 * See also the description of the **bpf_skb_get_tunnel_key**\ () 2351 * helper for additional information. 2352 * Return 2353 * 0 on success, or a negative error in case of failure. 2354 * 2355 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) 2356 * Description 2357 * Read the value of a perf event counter. This helper relies on a 2358 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of 2359 * the perf event counter is selected when *map* is updated with 2360 * perf event file descriptors. The *map* is an array whose size 2361 * is the number of available CPUs, and each cell contains a value 2362 * relative to one CPU. The value to retrieve is indicated by 2363 * *flags*, that contains the index of the CPU to look up, masked 2364 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 2365 * **BPF_F_CURRENT_CPU** to indicate that the value for the 2366 * current CPU should be retrieved. 2367 * 2368 * Note that before Linux 4.13, only hardware perf event can be 2369 * retrieved. 2370 * 2371 * Also, be aware that the newer helper 2372 * **bpf_perf_event_read_value**\ () is recommended over 2373 * **bpf_perf_event_read**\ () in general. The latter has some ABI 2374 * quirks where error and counter value are used as a return code 2375 * (which is wrong to do since ranges may overlap). This issue is 2376 * fixed with **bpf_perf_event_read_value**\ (), which at the same 2377 * time provides more features over the **bpf_perf_event_read**\ 2378 * () interface. Please refer to the description of 2379 * **bpf_perf_event_read_value**\ () for details. 2380 * Return 2381 * The value of the perf event counter read from the map, or a 2382 * negative error code in case of failure. 2383 * 2384 * long bpf_redirect(u32 ifindex, u64 flags) 2385 * Description 2386 * Redirect the packet to another net device of index *ifindex*. 2387 * This helper is somewhat similar to **bpf_clone_redirect**\ 2388 * (), except that the packet is not cloned, which provides 2389 * increased performance. 2390 * 2391 * Except for XDP, both ingress and egress interfaces can be used 2392 * for redirection. The **BPF_F_INGRESS** value in *flags* is used 2393 * to make the distinction (ingress path is selected if the flag 2394 * is present, egress path otherwise). Currently, XDP only 2395 * supports redirection to the egress interface, and accepts no 2396 * flag at all. 2397 * 2398 * The same effect can also be attained with the more generic 2399 * **bpf_redirect_map**\ (), which uses a BPF map to store the 2400 * redirect target instead of providing it directly to the helper. 2401 * Return 2402 * For XDP, the helper returns **XDP_REDIRECT** on success or 2403 * **XDP_ABORTED** on error. For other program types, the values 2404 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on 2405 * error. 2406 * 2407 * u32 bpf_get_route_realm(struct sk_buff *skb) 2408 * Description 2409 * Retrieve the realm or the route, that is to say the 2410 * **tclassid** field of the destination for the *skb*. The 2411 * identifier retrieved is a user-provided tag, similar to the 2412 * one used with the net_cls cgroup (see description for 2413 * **bpf_get_cgroup_classid**\ () helper), but here this tag is 2414 * held by a route (a destination entry), not by a task. 2415 * 2416 * Retrieving this identifier works with the clsact TC egress hook 2417 * (see also **tc-bpf(8)**), or alternatively on conventional 2418 * classful egress qdiscs, but not on TC ingress path. In case of 2419 * clsact TC egress hook, this has the advantage that, internally, 2420 * the destination entry has not been dropped yet in the transmit 2421 * path. Therefore, the destination entry does not need to be 2422 * artificially held via **netif_keep_dst**\ () for a classful 2423 * qdisc until the *skb* is freed. 2424 * 2425 * This helper is available only if the kernel was compiled with 2426 * **CONFIG_IP_ROUTE_CLASSID** configuration option. 2427 * Return 2428 * The realm of the route for the packet associated to *skb*, or 0 2429 * if none was found. 2430 * 2431 * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 2432 * Description 2433 * Write raw *data* blob into a special BPF perf event held by 2434 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 2435 * event must have the following attributes: **PERF_SAMPLE_RAW** 2436 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 2437 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 2438 * 2439 * The *flags* are used to indicate the index in *map* for which 2440 * the value must be put, masked with **BPF_F_INDEX_MASK**. 2441 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 2442 * to indicate that the index of the current CPU core should be 2443 * used. 2444 * 2445 * The value to write, of *size*, is passed through eBPF stack and 2446 * pointed by *data*. 2447 * 2448 * The context of the program *ctx* needs also be passed to the 2449 * helper. 2450 * 2451 * On user space, a program willing to read the values needs to 2452 * call **perf_event_open**\ () on the perf event (either for 2453 * one or for all CPUs) and to store the file descriptor into the 2454 * *map*. This must be done before the eBPF program can send data 2455 * into it. An example is available in file 2456 * *samples/bpf/trace_output_user.c* in the Linux kernel source 2457 * tree (the eBPF program counterpart is in 2458 * *samples/bpf/trace_output.bpf.c*). 2459 * 2460 * **bpf_perf_event_output**\ () achieves better performance 2461 * than **bpf_trace_printk**\ () for sharing data with user 2462 * space, and is much better suitable for streaming data from eBPF 2463 * programs. 2464 * 2465 * Note that this helper is not restricted to tracing use cases 2466 * and can be used with programs attached to TC or XDP as well, 2467 * where it allows for passing data to user space listeners. Data 2468 * can be: 2469 * 2470 * * Only custom structs, 2471 * * Only the packet payload, or 2472 * * A combination of both. 2473 * Return 2474 * 0 on success, or a negative error in case of failure. 2475 * 2476 * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) 2477 * Description 2478 * This helper was provided as an easy way to load data from a 2479 * packet. It can be used to load *len* bytes from *offset* from 2480 * the packet associated to *skb*, into the buffer pointed by 2481 * *to*. 2482 * 2483 * Since Linux 4.7, usage of this helper has mostly been replaced 2484 * by "direct packet access", enabling packet data to be 2485 * manipulated with *skb*\ **->data** and *skb*\ **->data_end** 2486 * pointing respectively to the first byte of packet data and to 2487 * the byte after the last byte of packet data. However, it 2488 * remains useful if one wishes to read large quantities of data 2489 * at once from a packet into the eBPF stack. 2490 * Return 2491 * 0 on success, or a negative error in case of failure. 2492 * 2493 * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) 2494 * Description 2495 * Walk a user or a kernel stack and return its id. To achieve 2496 * this, the helper needs *ctx*, which is a pointer to the context 2497 * on which the tracing program is executed, and a pointer to a 2498 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. 2499 * 2500 * The last argument, *flags*, holds the number of stack frames to 2501 * skip (from 0 to 255), masked with 2502 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 2503 * a combination of the following flags: 2504 * 2505 * **BPF_F_USER_STACK** 2506 * Collect a user space stack instead of a kernel stack. 2507 * **BPF_F_FAST_STACK_CMP** 2508 * Compare stacks by hash only. 2509 * **BPF_F_REUSE_STACKID** 2510 * If two different stacks hash into the same *stackid*, 2511 * discard the old one. 2512 * 2513 * The stack id retrieved is a 32 bit long integer handle which 2514 * can be further combined with other data (including other stack 2515 * ids) and used as a key into maps. This can be useful for 2516 * generating a variety of graphs (such as flame graphs or off-cpu 2517 * graphs). 2518 * 2519 * For walking a stack, this helper is an improvement over 2520 * **bpf_probe_read**\ (), which can be used with unrolled loops 2521 * but is not efficient and consumes a lot of eBPF instructions. 2522 * Instead, **bpf_get_stackid**\ () can collect up to 2523 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that 2524 * this limit can be controlled with the **sysctl** program, and 2525 * that it should be manually increased in order to profile long 2526 * user stacks (such as stacks for Java programs). To do so, use: 2527 * 2528 * :: 2529 * 2530 * # sysctl kernel.perf_event_max_stack=<new value> 2531 * Return 2532 * The positive or null stack id on success, or a negative error 2533 * in case of failure. 2534 * 2535 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) 2536 * Description 2537 * Compute a checksum difference, from the raw buffer pointed by 2538 * *from*, of length *from_size* (that must be a multiple of 4), 2539 * towards the raw buffer pointed by *to*, of size *to_size* 2540 * (same remark). An optional *seed* can be added to the value 2541 * (this can be cascaded, the seed may come from a previous call 2542 * to the helper). 2543 * 2544 * This is flexible enough to be used in several ways: 2545 * 2546 * * With *from_size* == 0, *to_size* > 0 and *seed* set to 2547 * checksum, it can be used when pushing new data. 2548 * * With *from_size* > 0, *to_size* == 0 and *seed* set to 2549 * checksum, it can be used when removing data from a packet. 2550 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it 2551 * can be used to compute a diff. Note that *from_size* and 2552 * *to_size* do not need to be equal. 2553 * 2554 * This helper can be used in combination with 2555 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to 2556 * which one can feed in the difference computed with 2557 * **bpf_csum_diff**\ (). 2558 * Return 2559 * The checksum result, or a negative error code in case of 2560 * failure. 2561 * 2562 * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 2563 * Description 2564 * Retrieve tunnel options metadata for the packet associated to 2565 * *skb*, and store the raw tunnel option data to the buffer *opt* 2566 * of *size*. 2567 * 2568 * This helper can be used with encapsulation devices that can 2569 * operate in "collect metadata" mode (please refer to the related 2570 * note in the description of **bpf_skb_get_tunnel_key**\ () for 2571 * more details). A particular example where this can be used is 2572 * in combination with the Geneve encapsulation protocol, where it 2573 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) 2574 * and retrieving arbitrary TLVs (Type-Length-Value headers) from 2575 * the eBPF program. This allows for full customization of these 2576 * headers. 2577 * Return 2578 * The size of the option data retrieved. 2579 * 2580 * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 2581 * Description 2582 * Set tunnel options metadata for the packet associated to *skb* 2583 * to the option data contained in the raw buffer *opt* of *size*. 2584 * 2585 * See also the description of the **bpf_skb_get_tunnel_opt**\ () 2586 * helper for additional information. 2587 * Return 2588 * 0 on success, or a negative error in case of failure. 2589 * 2590 * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) 2591 * Description 2592 * Change the protocol of the *skb* to *proto*. Currently 2593 * supported are transition from IPv4 to IPv6, and from IPv6 to 2594 * IPv4. The helper takes care of the groundwork for the 2595 * transition, including resizing the socket buffer. The eBPF 2596 * program is expected to fill the new headers, if any, via 2597 * **skb_store_bytes**\ () and to recompute the checksums with 2598 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ 2599 * (). The main case for this helper is to perform NAT64 2600 * operations out of an eBPF program. 2601 * 2602 * Internally, the GSO type is marked as dodgy so that headers are 2603 * checked and segments are recalculated by the GSO/GRO engine. 2604 * The size for GSO target is adapted as well. 2605 * 2606 * All values for *flags* are reserved for future usage, and must 2607 * be left at zero. 2608 * 2609 * A call to this helper is susceptible to change the underlying 2610 * packet buffer. Therefore, at load time, all checks on pointers 2611 * previously done by the verifier are invalidated and must be 2612 * performed again, if the helper is used in combination with 2613 * direct packet access. 2614 * Return 2615 * 0 on success, or a negative error in case of failure. 2616 * 2617 * long bpf_skb_change_type(struct sk_buff *skb, u32 type) 2618 * Description 2619 * Change the packet type for the packet associated to *skb*. This 2620 * comes down to setting *skb*\ **->pkt_type** to *type*, except 2621 * the eBPF program does not have a write access to *skb*\ 2622 * **->pkt_type** beside this helper. Using a helper here allows 2623 * for graceful handling of errors. 2624 * 2625 * The major use case is to change incoming *skb*s to 2626 * **PACKET_HOST** in a programmatic way instead of having to 2627 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for 2628 * example. 2629 * 2630 * Note that *type* only allows certain values. At this time, they 2631 * are: 2632 * 2633 * **PACKET_HOST** 2634 * Packet is for us. 2635 * **PACKET_BROADCAST** 2636 * Send packet to all. 2637 * **PACKET_MULTICAST** 2638 * Send packet to group. 2639 * **PACKET_OTHERHOST** 2640 * Send packet to someone else. 2641 * Return 2642 * 0 on success, or a negative error in case of failure. 2643 * 2644 * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) 2645 * Description 2646 * Check whether *skb* is a descendant of the cgroup2 held by 2647 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 2648 * Return 2649 * The return value depends on the result of the test, and can be: 2650 * 2651 * * 0, if the *skb* failed the cgroup2 descendant test. 2652 * * 1, if the *skb* succeeded the cgroup2 descendant test. 2653 * * A negative error code, if an error occurred. 2654 * 2655 * u32 bpf_get_hash_recalc(struct sk_buff *skb) 2656 * Description 2657 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is 2658 * not set, in particular if the hash was cleared due to mangling, 2659 * recompute this hash. Later accesses to the hash can be done 2660 * directly with *skb*\ **->hash**. 2661 * 2662 * Calling **bpf_set_hash_invalid**\ (), changing a packet 2663 * prototype with **bpf_skb_change_proto**\ (), or calling 2664 * **bpf_skb_store_bytes**\ () with the 2665 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear 2666 * the hash and to trigger a new computation for the next call to 2667 * **bpf_get_hash_recalc**\ (). 2668 * Return 2669 * The 32-bit hash. 2670 * 2671 * u64 bpf_get_current_task(void) 2672 * Description 2673 * Get the current task. 2674 * Return 2675 * A pointer to the current task struct. 2676 * 2677 * long bpf_probe_write_user(void *dst, const void *src, u32 len) 2678 * Description 2679 * Attempt in a safe way to write *len* bytes from the buffer 2680 * *src* to *dst* in memory. It only works for threads that are in 2681 * user context, and *dst* must be a valid user space address. 2682 * 2683 * This helper should not be used to implement any kind of 2684 * security mechanism because of TOC-TOU attacks, but rather to 2685 * debug, divert, and manipulate execution of semi-cooperative 2686 * processes. 2687 * 2688 * Keep in mind that this feature is meant for experiments, and it 2689 * has a risk of crashing the system and running programs. 2690 * Therefore, when an eBPF program using this helper is attached, 2691 * a warning including PID and process name is printed to kernel 2692 * logs. 2693 * Return 2694 * 0 on success, or a negative error in case of failure. 2695 * 2696 * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) 2697 * Description 2698 * Check whether the probe is being run is the context of a given 2699 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by 2700 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 2701 * Return 2702 * The return value depends on the result of the test, and can be: 2703 * 2704 * * 1, if current task belongs to the cgroup2. 2705 * * 0, if current task does not belong to the cgroup2. 2706 * * A negative error code, if an error occurred. 2707 * 2708 * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) 2709 * Description 2710 * Resize (trim or grow) the packet associated to *skb* to the 2711 * new *len*. The *flags* are reserved for future usage, and must 2712 * be left at zero. 2713 * 2714 * The basic idea is that the helper performs the needed work to 2715 * change the size of the packet, then the eBPF program rewrites 2716 * the rest via helpers like **bpf_skb_store_bytes**\ (), 2717 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () 2718 * and others. This helper is a slow path utility intended for 2719 * replies with control messages. And because it is targeted for 2720 * slow path, the helper itself can afford to be slow: it 2721 * implicitly linearizes, unclones and drops offloads from the 2722 * *skb*. 2723 * 2724 * A call to this helper is susceptible to change the underlying 2725 * packet buffer. Therefore, at load time, all checks on pointers 2726 * previously done by the verifier are invalidated and must be 2727 * performed again, if the helper is used in combination with 2728 * direct packet access. 2729 * Return 2730 * 0 on success, or a negative error in case of failure. 2731 * 2732 * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) 2733 * Description 2734 * Pull in non-linear data in case the *skb* is non-linear and not 2735 * all of *len* are part of the linear section. Make *len* bytes 2736 * from *skb* readable and writable. If a zero value is passed for 2737 * *len*, then all bytes in the linear part of *skb* will be made 2738 * readable and writable. 2739 * 2740 * This helper is only needed for reading and writing with direct 2741 * packet access. 2742 * 2743 * For direct packet access, testing that offsets to access 2744 * are within packet boundaries (test on *skb*\ **->data_end**) is 2745 * susceptible to fail if offsets are invalid, or if the requested 2746 * data is in non-linear parts of the *skb*. On failure the 2747 * program can just bail out, or in the case of a non-linear 2748 * buffer, use a helper to make the data available. The 2749 * **bpf_skb_load_bytes**\ () helper is a first solution to access 2750 * the data. Another one consists in using **bpf_skb_pull_data** 2751 * to pull in once the non-linear parts, then retesting and 2752 * eventually access the data. 2753 * 2754 * At the same time, this also makes sure the *skb* is uncloned, 2755 * which is a necessary condition for direct write. As this needs 2756 * to be an invariant for the write part only, the verifier 2757 * detects writes and adds a prologue that is calling 2758 * **bpf_skb_pull_data()** to effectively unclone the *skb* from 2759 * the very beginning in case it is indeed cloned. 2760 * 2761 * A call to this helper is susceptible to change the underlying 2762 * packet buffer. Therefore, at load time, all checks on pointers 2763 * previously done by the verifier are invalidated and must be 2764 * performed again, if the helper is used in combination with 2765 * direct packet access. 2766 * Return 2767 * 0 on success, or a negative error in case of failure. 2768 * 2769 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) 2770 * Description 2771 * Add the checksum *csum* into *skb*\ **->csum** in case the 2772 * driver has supplied a checksum for the entire packet into that 2773 * field. Return an error otherwise. This helper is intended to be 2774 * used in combination with **bpf_csum_diff**\ (), in particular 2775 * when the checksum needs to be updated after data has been 2776 * written into the packet through direct packet access. 2777 * Return 2778 * The checksum on success, or a negative error code in case of 2779 * failure. 2780 * 2781 * void bpf_set_hash_invalid(struct sk_buff *skb) 2782 * Description 2783 * Invalidate the current *skb*\ **->hash**. It can be used after 2784 * mangling on headers through direct packet access, in order to 2785 * indicate that the hash is outdated and to trigger a 2786 * recalculation the next time the kernel tries to access this 2787 * hash or when the **bpf_get_hash_recalc**\ () helper is called. 2788 * Return 2789 * void. 2790 * 2791 * long bpf_get_numa_node_id(void) 2792 * Description 2793 * Return the id of the current NUMA node. The primary use case 2794 * for this helper is the selection of sockets for the local NUMA 2795 * node, when the program is attached to sockets using the 2796 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), 2797 * but the helper is also available to other eBPF program types, 2798 * similarly to **bpf_get_smp_processor_id**\ (). 2799 * Return 2800 * The id of current NUMA node. 2801 * 2802 * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) 2803 * Description 2804 * Grows headroom of packet associated to *skb* and adjusts the 2805 * offset of the MAC header accordingly, adding *len* bytes of 2806 * space. It automatically extends and reallocates memory as 2807 * required. 2808 * 2809 * This helper can be used on a layer 3 *skb* to push a MAC header 2810 * for redirection into a layer 2 device. 2811 * 2812 * All values for *flags* are reserved for future usage, and must 2813 * be left at zero. 2814 * 2815 * A call to this helper is susceptible to change the underlying 2816 * packet buffer. Therefore, at load time, all checks on pointers 2817 * previously done by the verifier are invalidated and must be 2818 * performed again, if the helper is used in combination with 2819 * direct packet access. 2820 * Return 2821 * 0 on success, or a negative error in case of failure. 2822 * 2823 * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) 2824 * Description 2825 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that 2826 * it is possible to use a negative value for *delta*. This helper 2827 * can be used to prepare the packet for pushing or popping 2828 * headers. 2829 * 2830 * A call to this helper is susceptible to change the underlying 2831 * packet buffer. Therefore, at load time, all checks on pointers 2832 * previously done by the verifier are invalidated and must be 2833 * performed again, if the helper is used in combination with 2834 * direct packet access. 2835 * Return 2836 * 0 on success, or a negative error in case of failure. 2837 * 2838 * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) 2839 * Description 2840 * Copy a NUL terminated string from an unsafe kernel address 2841 * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for 2842 * more details. 2843 * 2844 * Generally, use **bpf_probe_read_user_str**\ () or 2845 * **bpf_probe_read_kernel_str**\ () instead. 2846 * Return 2847 * On success, the strictly positive length of the string, 2848 * including the trailing NUL character. On error, a negative 2849 * value. 2850 * 2851 * u64 bpf_get_socket_cookie(struct sk_buff *skb) 2852 * Description 2853 * If the **struct sk_buff** pointed by *skb* has a known socket, 2854 * retrieve the cookie (generated by the kernel) of this socket. 2855 * If no cookie has been set yet, generate a new cookie. Once 2856 * generated, the socket cookie remains stable for the life of the 2857 * socket. This helper can be useful for monitoring per socket 2858 * networking traffic statistics as it provides a global socket 2859 * identifier that can be assumed unique. 2860 * Return 2861 * A 8-byte long unique number on success, or 0 if the socket 2862 * field is missing inside *skb*. 2863 * 2864 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) 2865 * Description 2866 * Equivalent to bpf_get_socket_cookie() helper that accepts 2867 * *skb*, but gets socket from **struct bpf_sock_addr** context. 2868 * Return 2869 * A 8-byte long unique number. 2870 * 2871 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) 2872 * Description 2873 * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts 2874 * *skb*, but gets socket from **struct bpf_sock_ops** context. 2875 * Return 2876 * A 8-byte long unique number. 2877 * 2878 * u64 bpf_get_socket_cookie(struct sock *sk) 2879 * Description 2880 * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts 2881 * *sk*, but gets socket from a BTF **struct sock**. This helper 2882 * also works for sleepable programs. 2883 * Return 2884 * A 8-byte long unique number or 0 if *sk* is NULL. 2885 * 2886 * u32 bpf_get_socket_uid(struct sk_buff *skb) 2887 * Description 2888 * Get the owner UID of the socked associated to *skb*. 2889 * Return 2890 * The owner UID of the socket associated to *skb*. If the socket 2891 * is **NULL**, or if it is not a full socket (i.e. if it is a 2892 * time-wait or a request socket instead), **overflowuid** value 2893 * is returned (note that **overflowuid** might also be the actual 2894 * UID value for the socket). 2895 * 2896 * long bpf_set_hash(struct sk_buff *skb, u32 hash) 2897 * Description 2898 * Set the full hash for *skb* (set the field *skb*\ **->hash**) 2899 * to value *hash*. 2900 * Return 2901 * 0 2902 * 2903 * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) 2904 * Description 2905 * Emulate a call to **setsockopt()** on the socket associated to 2906 * *bpf_socket*, which must be a full socket. The *level* at 2907 * which the option resides and the name *optname* of the option 2908 * must be specified, see **setsockopt(2)** for more information. 2909 * The option value of length *optlen* is pointed by *optval*. 2910 * 2911 * *bpf_socket* should be one of the following: 2912 * 2913 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 2914 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, 2915 * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. 2916 * 2917 * This helper actually implements a subset of **setsockopt()**. 2918 * It supports the following *level*\ s: 2919 * 2920 * * **SOL_SOCKET**, which supports the following *optname*\ s: 2921 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, 2922 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, 2923 * **SO_BINDTODEVICE**, **SO_KEEPALIVE**, **SO_REUSEADDR**, 2924 * **SO_REUSEPORT**, **SO_BINDTOIFINDEX**, **SO_TXREHASH**. 2925 * * **IPPROTO_TCP**, which supports the following *optname*\ s: 2926 * **TCP_CONGESTION**, **TCP_BPF_IW**, 2927 * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, 2928 * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, 2929 * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**, 2930 * **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**, 2931 * **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**, 2932 * **TCP_BPF_RTO_MIN**, **TCP_BPF_SOCK_OPS_CB_FLAGS**. 2933 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 2934 * * **IPPROTO_IPV6**, which supports the following *optname*\ s: 2935 * **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**. 2936 * Return 2937 * 0 on success, or a negative error in case of failure. 2938 * 2939 * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) 2940 * Description 2941 * Grow or shrink the room for data in the packet associated to 2942 * *skb* by *len_diff*, and according to the selected *mode*. 2943 * 2944 * By default, the helper will reset any offloaded checksum 2945 * indicator of the skb to CHECKSUM_NONE. This can be avoided 2946 * by the following flag: 2947 * 2948 * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded 2949 * checksum data of the skb to CHECKSUM_NONE. 2950 * 2951 * There are two supported modes at this time: 2952 * 2953 * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer 2954 * (room space is added or removed between the layer 2 and 2955 * layer 3 headers). 2956 * 2957 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer 2958 * (room space is added or removed between the layer 3 and 2959 * layer 4 headers). 2960 * 2961 * The following flags are supported at this time: 2962 * 2963 * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. 2964 * Adjusting mss in this way is not allowed for datagrams. 2965 * 2966 * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, 2967 * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: 2968 * Any new space is reserved to hold a tunnel header. 2969 * Configure skb offsets and other fields accordingly. 2970 * 2971 * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, 2972 * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: 2973 * Use with ENCAP_L3 flags to further specify the tunnel type. 2974 * 2975 * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): 2976 * Use with ENCAP_L3/L4 flags to further specify the tunnel 2977 * type; *len* is the length of the inner MAC header. 2978 * 2979 * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**: 2980 * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the 2981 * L2 type as Ethernet. 2982 * 2983 * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**, 2984 * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**: 2985 * Indicate the new IP header version after decapsulating the outer 2986 * IP header. Used when the inner and outer IP versions are different. 2987 * 2988 * A call to this helper is susceptible to change the underlying 2989 * packet buffer. Therefore, at load time, all checks on pointers 2990 * previously done by the verifier are invalidated and must be 2991 * performed again, if the helper is used in combination with 2992 * direct packet access. 2993 * Return 2994 * 0 on success, or a negative error in case of failure. 2995 * 2996 * long bpf_redirect_map(struct bpf_map *map, u64 key, u64 flags) 2997 * Description 2998 * Redirect the packet to the endpoint referenced by *map* at 2999 * index *key*. Depending on its type, this *map* can contain 3000 * references to net devices (for forwarding packets through other 3001 * ports), or to CPUs (for redirecting XDP frames to another CPU; 3002 * but this is only implemented for native XDP (with driver 3003 * support) as of this writing). 3004 * 3005 * The lower two bits of *flags* are used as the return code if 3006 * the map lookup fails. This is so that the return value can be 3007 * one of the XDP program return codes up to **XDP_TX**, as chosen 3008 * by the caller. The higher bits of *flags* can be set to 3009 * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below. 3010 * 3011 * With BPF_F_BROADCAST the packet will be broadcasted to all the 3012 * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress 3013 * interface will be excluded when do broadcasting. 3014 * 3015 * See also **bpf_redirect**\ (), which only supports redirecting 3016 * to an ifindex, but doesn't require a map to do so. 3017 * Return 3018 * **XDP_REDIRECT** on success, or the value of the two lower bits 3019 * of the *flags* argument on error. 3020 * 3021 * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) 3022 * Description 3023 * Redirect the packet to the socket referenced by *map* (of type 3024 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 3025 * egress interfaces can be used for redirection. The 3026 * **BPF_F_INGRESS** value in *flags* is used to make the 3027 * distinction (ingress path is selected if the flag is present, 3028 * egress path otherwise). This is the only flag supported for now. 3029 * Return 3030 * **SK_PASS** on success, or **SK_DROP** on error. 3031 * 3032 * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 3033 * Description 3034 * Add an entry to, or update a *map* referencing sockets. The 3035 * *skops* is used as a new value for the entry associated to 3036 * *key*. *flags* is one of: 3037 * 3038 * **BPF_NOEXIST** 3039 * The entry for *key* must not exist in the map. 3040 * **BPF_EXIST** 3041 * The entry for *key* must already exist in the map. 3042 * **BPF_ANY** 3043 * No condition on the existence of the entry for *key*. 3044 * 3045 * If the *map* has eBPF programs (parser and verdict), those will 3046 * be inherited by the socket being added. If the socket is 3047 * already attached to eBPF programs, this results in an error. 3048 * Return 3049 * 0 on success, or a negative error in case of failure. 3050 * 3051 * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) 3052 * Description 3053 * Adjust the address pointed by *xdp_md*\ **->data_meta** by 3054 * *delta* (which can be positive or negative). Note that this 3055 * operation modifies the address stored in *xdp_md*\ **->data**, 3056 * so the latter must be loaded only after the helper has been 3057 * called. 3058 * 3059 * The use of *xdp_md*\ **->data_meta** is optional and programs 3060 * are not required to use it. The rationale is that when the 3061 * packet is processed with XDP (e.g. as DoS filter), it is 3062 * possible to push further meta data along with it before passing 3063 * to the stack, and to give the guarantee that an ingress eBPF 3064 * program attached as a TC classifier on the same device can pick 3065 * this up for further post-processing. Since TC works with socket 3066 * buffers, it remains possible to set from XDP the **mark** or 3067 * **priority** pointers, or other pointers for the socket buffer. 3068 * Having this scratch space generic and programmable allows for 3069 * more flexibility as the user is free to store whatever meta 3070 * data they need. 3071 * 3072 * A call to this helper is susceptible to change the underlying 3073 * packet buffer. Therefore, at load time, all checks on pointers 3074 * previously done by the verifier are invalidated and must be 3075 * performed again, if the helper is used in combination with 3076 * direct packet access. 3077 * Return 3078 * 0 on success, or a negative error in case of failure. 3079 * 3080 * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) 3081 * Description 3082 * Read the value of a perf event counter, and store it into *buf* 3083 * of size *buf_size*. This helper relies on a *map* of type 3084 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event 3085 * counter is selected when *map* is updated with perf event file 3086 * descriptors. The *map* is an array whose size is the number of 3087 * available CPUs, and each cell contains a value relative to one 3088 * CPU. The value to retrieve is indicated by *flags*, that 3089 * contains the index of the CPU to look up, masked with 3090 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 3091 * **BPF_F_CURRENT_CPU** to indicate that the value for the 3092 * current CPU should be retrieved. 3093 * 3094 * This helper behaves in a way close to 3095 * **bpf_perf_event_read**\ () helper, save that instead of 3096 * just returning the value observed, it fills the *buf* 3097 * structure. This allows for additional data to be retrieved: in 3098 * particular, the enabled and running times (in *buf*\ 3099 * **->enabled** and *buf*\ **->running**, respectively) are 3100 * copied. In general, **bpf_perf_event_read_value**\ () is 3101 * recommended over **bpf_perf_event_read**\ (), which has some 3102 * ABI issues and provides fewer functionalities. 3103 * 3104 * These values are interesting, because hardware PMU (Performance 3105 * Monitoring Unit) counters are limited resources. When there are 3106 * more PMU based perf events opened than available counters, 3107 * kernel will multiplex these events so each event gets certain 3108 * percentage (but not all) of the PMU time. In case that 3109 * multiplexing happens, the number of samples or counter value 3110 * will not reflect the case compared to when no multiplexing 3111 * occurs. This makes comparison between different runs difficult. 3112 * Typically, the counter value should be normalized before 3113 * comparing to other experiments. The usual normalization is done 3114 * as follows. 3115 * 3116 * :: 3117 * 3118 * normalized_counter = counter * t_enabled / t_running 3119 * 3120 * Where t_enabled is the time enabled for event and t_running is 3121 * the time running for event since last normalization. The 3122 * enabled and running times are accumulated since the perf event 3123 * open. To achieve scaling factor between two invocations of an 3124 * eBPF program, users can use CPU id as the key (which is 3125 * typical for perf array usage model) to remember the previous 3126 * value and do the calculation inside the eBPF program. 3127 * Return 3128 * 0 on success, or a negative error in case of failure. 3129 * 3130 * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) 3131 * Description 3132 * For an eBPF program attached to a perf event, retrieve the 3133 * value of the event counter associated to *ctx* and store it in 3134 * the structure pointed by *buf* and of size *buf_size*. Enabled 3135 * and running times are also stored in the structure (see 3136 * description of helper **bpf_perf_event_read_value**\ () for 3137 * more details). 3138 * Return 3139 * 0 on success, or a negative error in case of failure. 3140 * 3141 * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) 3142 * Description 3143 * Emulate a call to **getsockopt()** on the socket associated to 3144 * *bpf_socket*, which must be a full socket. The *level* at 3145 * which the option resides and the name *optname* of the option 3146 * must be specified, see **getsockopt(2)** for more information. 3147 * The retrieved value is stored in the structure pointed by 3148 * *opval* and of length *optlen*. 3149 * 3150 * *bpf_socket* should be one of the following: 3151 * 3152 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 3153 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, 3154 * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. 3155 * 3156 * This helper actually implements a subset of **getsockopt()**. 3157 * It supports the same set of *optname*\ s that is supported by 3158 * the **bpf_setsockopt**\ () helper. The exceptions are 3159 * **TCP_BPF_*** is **bpf_setsockopt**\ () only and 3160 * **TCP_SAVED_SYN** is **bpf_getsockopt**\ () only. 3161 * Return 3162 * 0 on success, or a negative error in case of failure. 3163 * 3164 * long bpf_override_return(struct pt_regs *regs, u64 rc) 3165 * Description 3166 * Used for error injection, this helper uses kprobes to override 3167 * the return value of the probed function, and to set it to *rc*. 3168 * The first argument is the context *regs* on which the kprobe 3169 * works. 3170 * 3171 * This helper works by setting the PC (program counter) 3172 * to an override function which is run in place of the original 3173 * probed function. This means the probed function is not run at 3174 * all. The replacement function just returns with the required 3175 * value. 3176 * 3177 * This helper has security implications, and thus is subject to 3178 * restrictions. It is only available if the kernel was compiled 3179 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration 3180 * option, and in this case it only works on functions tagged with 3181 * **ALLOW_ERROR_INJECTION** in the kernel code. 3182 * Return 3183 * 0 3184 * 3185 * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) 3186 * Description 3187 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field 3188 * for the full TCP socket associated to *bpf_sock_ops* to 3189 * *argval*. 3190 * 3191 * The primary use of this field is to determine if there should 3192 * be calls to eBPF programs of type 3193 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP 3194 * code. A program of the same type can change its value, per 3195 * connection and as necessary, when the connection is 3196 * established. This field is directly accessible for reading, but 3197 * this helper must be used for updates in order to return an 3198 * error if an eBPF program tries to set a callback that is not 3199 * supported in the current kernel. 3200 * 3201 * *argval* is a flag array which can combine these flags: 3202 * 3203 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) 3204 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) 3205 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) 3206 * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) 3207 * 3208 * Therefore, this function can be used to clear a callback flag by 3209 * setting the appropriate bit to zero. e.g. to disable the RTO 3210 * callback: 3211 * 3212 * **bpf_sock_ops_cb_flags_set(bpf_sock,** 3213 * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** 3214 * 3215 * Here are some examples of where one could call such eBPF 3216 * program: 3217 * 3218 * * When RTO fires. 3219 * * When a packet is retransmitted. 3220 * * When the connection terminates. 3221 * * When a packet is sent. 3222 * * When a packet is received. 3223 * Return 3224 * Code **-EINVAL** if the socket is not a full TCP socket; 3225 * otherwise, a positive number containing the bits that could not 3226 * be set is returned (which comes down to 0 if all bits were set 3227 * as required). 3228 * 3229 * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) 3230 * Description 3231 * This helper is used in programs implementing policies at the 3232 * socket level. If the message *msg* is allowed to pass (i.e. if 3233 * the verdict eBPF program returns **SK_PASS**), redirect it to 3234 * the socket referenced by *map* (of type 3235 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 3236 * egress interfaces can be used for redirection. The 3237 * **BPF_F_INGRESS** value in *flags* is used to make the 3238 * distinction (ingress path is selected if the flag is present, 3239 * egress path otherwise). This is the only flag supported for now. 3240 * Return 3241 * **SK_PASS** on success, or **SK_DROP** on error. 3242 * 3243 * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) 3244 * Description 3245 * For socket policies, apply the verdict of the eBPF program to 3246 * the next *bytes* (number of bytes) of message *msg*. 3247 * 3248 * For example, this helper can be used in the following cases: 3249 * 3250 * * A single **sendmsg**\ () or **sendfile**\ () system call 3251 * contains multiple logical messages that the eBPF program is 3252 * supposed to read and for which it should apply a verdict. 3253 * * An eBPF program only cares to read the first *bytes* of a 3254 * *msg*. If the message has a large payload, then setting up 3255 * and calling the eBPF program repeatedly for all bytes, even 3256 * though the verdict is already known, would create unnecessary 3257 * overhead. 3258 * 3259 * When called from within an eBPF program, the helper sets a 3260 * counter internal to the BPF infrastructure, that is used to 3261 * apply the last verdict to the next *bytes*. If *bytes* is 3262 * smaller than the current data being processed from a 3263 * **sendmsg**\ () or **sendfile**\ () system call, the first 3264 * *bytes* will be sent and the eBPF program will be re-run with 3265 * the pointer for start of data pointing to byte number *bytes* 3266 * **+ 1**. If *bytes* is larger than the current data being 3267 * processed, then the eBPF verdict will be applied to multiple 3268 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are 3269 * consumed. 3270 * 3271 * Note that if a socket closes with the internal counter holding 3272 * a non-zero value, this is not a problem because data is not 3273 * being buffered for *bytes* and is sent as it is received. 3274 * Return 3275 * 0 3276 * 3277 * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) 3278 * Description 3279 * For socket policies, prevent the execution of the verdict eBPF 3280 * program for message *msg* until *bytes* (byte number) have been 3281 * accumulated. 3282 * 3283 * This can be used when one needs a specific number of bytes 3284 * before a verdict can be assigned, even if the data spans 3285 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme 3286 * case would be a user calling **sendmsg**\ () repeatedly with 3287 * 1-byte long message segments. Obviously, this is bad for 3288 * performance, but it is still valid. If the eBPF program needs 3289 * *bytes* bytes to validate a header, this helper can be used to 3290 * prevent the eBPF program to be called again until *bytes* have 3291 * been accumulated. 3292 * Return 3293 * 0 3294 * 3295 * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) 3296 * Description 3297 * For socket policies, pull in non-linear data from user space 3298 * for *msg* and set pointers *msg*\ **->data** and *msg*\ 3299 * **->data_end** to *start* and *end* bytes offsets into *msg*, 3300 * respectively. 3301 * 3302 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 3303 * *msg* it can only parse data that the (**data**, **data_end**) 3304 * pointers have already consumed. For **sendmsg**\ () hooks this 3305 * is likely the first scatterlist element. But for calls relying 3306 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will 3307 * be the range (**0**, **0**) because the data is shared with 3308 * user space and by default the objective is to avoid allowing 3309 * user space to modify data while (or after) eBPF verdict is 3310 * being decided. This helper can be used to pull in data and to 3311 * set the start and end pointer to given values. Data will be 3312 * copied if necessary (i.e. if data was not linear and if start 3313 * and end pointers do not point to the same chunk). 3314 * 3315 * A call to this helper is susceptible to change the underlying 3316 * packet buffer. Therefore, at load time, all checks on pointers 3317 * previously done by the verifier are invalidated and must be 3318 * performed again, if the helper is used in combination with 3319 * direct packet access. 3320 * 3321 * All values for *flags* are reserved for future usage, and must 3322 * be left at zero. 3323 * Return 3324 * 0 on success, or a negative error in case of failure. 3325 * 3326 * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) 3327 * Description 3328 * Bind the socket associated to *ctx* to the address pointed by 3329 * *addr*, of length *addr_len*. This allows for making outgoing 3330 * connection from the desired IP address, which can be useful for 3331 * example when all processes inside a cgroup should use one 3332 * single IP address on a host that has multiple IP configured. 3333 * 3334 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The 3335 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or 3336 * **AF_INET6**). It's advised to pass zero port (**sin_port** 3337 * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like 3338 * behavior and lets the kernel efficiently pick up an unused 3339 * port as long as 4-tuple is unique. Passing non-zero port might 3340 * lead to degraded performance. 3341 * Return 3342 * 0 on success, or a negative error in case of failure. 3343 * 3344 * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) 3345 * Description 3346 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is 3347 * possible to both shrink and grow the packet tail. 3348 * Shrink done via *delta* being a negative integer. 3349 * 3350 * A call to this helper is susceptible to change the underlying 3351 * packet buffer. Therefore, at load time, all checks on pointers 3352 * previously done by the verifier are invalidated and must be 3353 * performed again, if the helper is used in combination with 3354 * direct packet access. 3355 * Return 3356 * 0 on success, or a negative error in case of failure. 3357 * 3358 * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) 3359 * Description 3360 * Retrieve the XFRM state (IP transform framework, see also 3361 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. 3362 * 3363 * The retrieved value is stored in the **struct bpf_xfrm_state** 3364 * pointed by *xfrm_state* and of length *size*. 3365 * 3366 * All values for *flags* are reserved for future usage, and must 3367 * be left at zero. 3368 * 3369 * This helper is available only if the kernel was compiled with 3370 * **CONFIG_XFRM** configuration option. 3371 * Return 3372 * 0 on success, or a negative error in case of failure. 3373 * 3374 * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) 3375 * Description 3376 * Return a user or a kernel stack in bpf program provided buffer. 3377 * To achieve this, the helper needs *ctx*, which is a pointer 3378 * to the context on which the tracing program is executed. 3379 * To store the stacktrace, the bpf program provides *buf* with 3380 * a nonnegative *size*. 3381 * 3382 * The last argument, *flags*, holds the number of stack frames to 3383 * skip (from 0 to 255), masked with 3384 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 3385 * the following flags: 3386 * 3387 * **BPF_F_USER_STACK** 3388 * Collect a user space stack instead of a kernel stack. 3389 * **BPF_F_USER_BUILD_ID** 3390 * Collect (build_id, file_offset) instead of ips for user 3391 * stack, only valid if **BPF_F_USER_STACK** is also 3392 * specified. 3393 * 3394 * *file_offset* is an offset relative to the beginning 3395 * of the executable or shared object file backing the vma 3396 * which the *ip* falls in. It is *not* an offset relative 3397 * to that object's base address. Accordingly, it must be 3398 * adjusted by adding (sh_addr - sh_offset), where 3399 * sh_{addr,offset} correspond to the executable section 3400 * containing *file_offset* in the object, for comparisons 3401 * to symbols' st_value to be valid. 3402 * 3403 * **bpf_get_stack**\ () can collect up to 3404 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 3405 * to sufficient large buffer size. Note that 3406 * this limit can be controlled with the **sysctl** program, and 3407 * that it should be manually increased in order to profile long 3408 * user stacks (such as stacks for Java programs). To do so, use: 3409 * 3410 * :: 3411 * 3412 * # sysctl kernel.perf_event_max_stack=<new value> 3413 * Return 3414 * The non-negative copied *buf* length equal to or less than 3415 * *size* on success, or a negative error in case of failure. 3416 * 3417 * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) 3418 * Description 3419 * This helper is similar to **bpf_skb_load_bytes**\ () in that 3420 * it provides an easy way to load *len* bytes from *offset* 3421 * from the packet associated to *skb*, into the buffer pointed 3422 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that 3423 * a fifth argument *start_header* exists in order to select a 3424 * base offset to start from. *start_header* can be one of: 3425 * 3426 * **BPF_HDR_START_MAC** 3427 * Base offset to load data from is *skb*'s mac header. 3428 * **BPF_HDR_START_NET** 3429 * Base offset to load data from is *skb*'s network header. 3430 * 3431 * In general, "direct packet access" is the preferred method to 3432 * access packet data, however, this helper is in particular useful 3433 * in socket filters where *skb*\ **->data** does not always point 3434 * to the start of the mac header and where "direct packet access" 3435 * is not available. 3436 * Return 3437 * 0 on success, or a negative error in case of failure. 3438 * 3439 * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) 3440 * Description 3441 * Do FIB lookup in kernel tables using parameters in *params*. 3442 * If lookup is successful and result shows packet is to be 3443 * forwarded, the neighbor tables are searched for the nexthop. 3444 * If successful (ie., FIB lookup shows forwarding and nexthop 3445 * is resolved), the nexthop address is returned in ipv4_dst 3446 * or ipv6_dst based on family, smac is set to mac address of 3447 * egress device, dmac is set to nexthop mac address, rt_metric 3448 * is set to metric from route (IPv4/IPv6 only), and ifindex 3449 * is set to the device index of the nexthop from the FIB lookup. 3450 * 3451 * *plen* argument is the size of the passed in struct. 3452 * *flags* argument can be a combination of one or more of the 3453 * following values: 3454 * 3455 * **BPF_FIB_LOOKUP_DIRECT** 3456 * Do a direct table lookup vs full lookup using FIB 3457 * rules. 3458 * **BPF_FIB_LOOKUP_TBID** 3459 * Used with BPF_FIB_LOOKUP_DIRECT. 3460 * Use the routing table ID present in *params*->tbid 3461 * for the fib lookup. 3462 * **BPF_FIB_LOOKUP_OUTPUT** 3463 * Perform lookup from an egress perspective (default is 3464 * ingress). 3465 * **BPF_FIB_LOOKUP_SKIP_NEIGH** 3466 * Skip the neighbour table lookup. *params*->dmac 3467 * and *params*->smac will not be set as output. A common 3468 * use case is to call **bpf_redirect_neigh**\ () after 3469 * doing **bpf_fib_lookup**\ (). 3470 * **BPF_FIB_LOOKUP_SRC** 3471 * Derive and set source IP addr in *params*->ipv{4,6}_src 3472 * for the nexthop. If the src addr cannot be derived, 3473 * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this 3474 * case, *params*->dmac and *params*->smac are not set either. 3475 * **BPF_FIB_LOOKUP_MARK** 3476 * Use the mark present in *params*->mark for the fib lookup. 3477 * This option should not be used with BPF_FIB_LOOKUP_DIRECT, 3478 * as it only has meaning for full lookups. 3479 * 3480 * *ctx* is either **struct xdp_md** for XDP programs or 3481 * **struct sk_buff** tc cls_act programs. 3482 * Return 3483 * * < 0 if any input argument is invalid 3484 * * 0 on success (packet is forwarded, nexthop neighbor exists) 3485 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the 3486 * packet is not forwarded or needs assist from full stack 3487 * 3488 * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU 3489 * was exceeded and output params->mtu_result contains the MTU. 3490 * 3491 * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 3492 * Description 3493 * Add an entry to, or update a sockhash *map* referencing sockets. 3494 * The *skops* is used as a new value for the entry associated to 3495 * *key*. *flags* is one of: 3496 * 3497 * **BPF_NOEXIST** 3498 * The entry for *key* must not exist in the map. 3499 * **BPF_EXIST** 3500 * The entry for *key* must already exist in the map. 3501 * **BPF_ANY** 3502 * No condition on the existence of the entry for *key*. 3503 * 3504 * If the *map* has eBPF programs (parser and verdict), those will 3505 * be inherited by the socket being added. If the socket is 3506 * already attached to eBPF programs, this results in an error. 3507 * Return 3508 * 0 on success, or a negative error in case of failure. 3509 * 3510 * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) 3511 * Description 3512 * This helper is used in programs implementing policies at the 3513 * socket level. If the message *msg* is allowed to pass (i.e. if 3514 * the verdict eBPF program returns **SK_PASS**), redirect it to 3515 * the socket referenced by *map* (of type 3516 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 3517 * egress interfaces can be used for redirection. The 3518 * **BPF_F_INGRESS** value in *flags* is used to make the 3519 * distinction (ingress path is selected if the flag is present, 3520 * egress path otherwise). This is the only flag supported for now. 3521 * Return 3522 * **SK_PASS** on success, or **SK_DROP** on error. 3523 * 3524 * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) 3525 * Description 3526 * This helper is used in programs implementing policies at the 3527 * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. 3528 * if the verdict eBPF program returns **SK_PASS**), redirect it 3529 * to the socket referenced by *map* (of type 3530 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 3531 * egress interfaces can be used for redirection. The 3532 * **BPF_F_INGRESS** value in *flags* is used to make the 3533 * distinction (ingress path is selected if the flag is present, 3534 * egress otherwise). This is the only flag supported for now. 3535 * Return 3536 * **SK_PASS** on success, or **SK_DROP** on error. 3537 * 3538 * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) 3539 * Description 3540 * Encapsulate the packet associated to *skb* within a Layer 3 3541 * protocol header. This header is provided in the buffer at 3542 * address *hdr*, with *len* its size in bytes. *type* indicates 3543 * the protocol of the header and can be one of: 3544 * 3545 * **BPF_LWT_ENCAP_SEG6** 3546 * IPv6 encapsulation with Segment Routing Header 3547 * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, 3548 * the IPv6 header is computed by the kernel. 3549 * **BPF_LWT_ENCAP_SEG6_INLINE** 3550 * Only works if *skb* contains an IPv6 packet. Insert a 3551 * Segment Routing Header (**struct ipv6_sr_hdr**) inside 3552 * the IPv6 header. 3553 * **BPF_LWT_ENCAP_IP** 3554 * IP encapsulation (GRE/GUE/IPIP/etc). The outer header 3555 * must be IPv4 or IPv6, followed by zero or more 3556 * additional headers, up to **LWT_BPF_MAX_HEADROOM** 3557 * total bytes in all prepended headers. Please note that 3558 * if **skb_is_gso**\ (*skb*) is true, no more than two 3559 * headers can be prepended, and the inner header, if 3560 * present, should be either GRE or UDP/GUE. 3561 * 3562 * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs 3563 * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can 3564 * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and 3565 * **BPF_PROG_TYPE_LWT_XMIT**. 3566 * 3567 * A call to this helper is susceptible to change the underlying 3568 * packet buffer. Therefore, at load time, all checks on pointers 3569 * previously done by the verifier are invalidated and must be 3570 * performed again, if the helper is used in combination with 3571 * direct packet access. 3572 * Return 3573 * 0 on success, or a negative error in case of failure. 3574 * 3575 * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) 3576 * Description 3577 * Store *len* bytes from address *from* into the packet 3578 * associated to *skb*, at *offset*. Only the flags, tag and TLVs 3579 * inside the outermost IPv6 Segment Routing Header can be 3580 * modified through this helper. 3581 * 3582 * A call to this helper is susceptible to change the underlying 3583 * packet buffer. Therefore, at load time, all checks on pointers 3584 * previously done by the verifier are invalidated and must be 3585 * performed again, if the helper is used in combination with 3586 * direct packet access. 3587 * Return 3588 * 0 on success, or a negative error in case of failure. 3589 * 3590 * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) 3591 * Description 3592 * Adjust the size allocated to TLVs in the outermost IPv6 3593 * Segment Routing Header contained in the packet associated to 3594 * *skb*, at position *offset* by *delta* bytes. Only offsets 3595 * after the segments are accepted. *delta* can be as well 3596 * positive (growing) as negative (shrinking). 3597 * 3598 * A call to this helper is susceptible to change the underlying 3599 * packet buffer. Therefore, at load time, all checks on pointers 3600 * previously done by the verifier are invalidated and must be 3601 * performed again, if the helper is used in combination with 3602 * direct packet access. 3603 * Return 3604 * 0 on success, or a negative error in case of failure. 3605 * 3606 * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) 3607 * Description 3608 * Apply an IPv6 Segment Routing action of type *action* to the 3609 * packet associated to *skb*. Each action takes a parameter 3610 * contained at address *param*, and of length *param_len* bytes. 3611 * *action* can be one of: 3612 * 3613 * **SEG6_LOCAL_ACTION_END_X** 3614 * End.X action: Endpoint with Layer-3 cross-connect. 3615 * Type of *param*: **struct in6_addr**. 3616 * **SEG6_LOCAL_ACTION_END_T** 3617 * End.T action: Endpoint with specific IPv6 table lookup. 3618 * Type of *param*: **int**. 3619 * **SEG6_LOCAL_ACTION_END_B6** 3620 * End.B6 action: Endpoint bound to an SRv6 policy. 3621 * Type of *param*: **struct ipv6_sr_hdr**. 3622 * **SEG6_LOCAL_ACTION_END_B6_ENCAP** 3623 * End.B6.Encap action: Endpoint bound to an SRv6 3624 * encapsulation policy. 3625 * Type of *param*: **struct ipv6_sr_hdr**. 3626 * 3627 * A call to this helper is susceptible to change the underlying 3628 * packet buffer. Therefore, at load time, all checks on pointers 3629 * previously done by the verifier are invalidated and must be 3630 * performed again, if the helper is used in combination with 3631 * direct packet access. 3632 * Return 3633 * 0 on success, or a negative error in case of failure. 3634 * 3635 * long bpf_rc_repeat(void *ctx) 3636 * Description 3637 * This helper is used in programs implementing IR decoding, to 3638 * report a successfully decoded repeat key message. This delays 3639 * the generation of a key up event for previously generated 3640 * key down event. 3641 * 3642 * Some IR protocols like NEC have a special IR message for 3643 * repeating last button, for when a button is held down. 3644 * 3645 * The *ctx* should point to the lirc sample as passed into 3646 * the program. 3647 * 3648 * This helper is only available is the kernel was compiled with 3649 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 3650 * "**y**". 3651 * Return 3652 * 0 3653 * 3654 * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) 3655 * Description 3656 * This helper is used in programs implementing IR decoding, to 3657 * report a successfully decoded key press with *scancode*, 3658 * *toggle* value in the given *protocol*. The scancode will be 3659 * translated to a keycode using the rc keymap, and reported as 3660 * an input key down event. After a period a key up event is 3661 * generated. This period can be extended by calling either 3662 * **bpf_rc_keydown**\ () again with the same values, or calling 3663 * **bpf_rc_repeat**\ (). 3664 * 3665 * Some protocols include a toggle bit, in case the button was 3666 * released and pressed again between consecutive scancodes. 3667 * 3668 * The *ctx* should point to the lirc sample as passed into 3669 * the program. 3670 * 3671 * The *protocol* is the decoded protocol number (see 3672 * **enum rc_proto** for some predefined values). 3673 * 3674 * This helper is only available is the kernel was compiled with 3675 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 3676 * "**y**". 3677 * Return 3678 * 0 3679 * 3680 * u64 bpf_skb_cgroup_id(struct sk_buff *skb) 3681 * Description 3682 * Return the cgroup v2 id of the socket associated with the *skb*. 3683 * This is roughly similar to the **bpf_get_cgroup_classid**\ () 3684 * helper for cgroup v1 by providing a tag resp. identifier that 3685 * can be matched on or used for map lookups e.g. to implement 3686 * policy. The cgroup v2 id of a given path in the hierarchy is 3687 * exposed in user space through the f_handle API in order to get 3688 * to the same 64-bit id. 3689 * 3690 * This helper can be used on TC egress path, but not on ingress, 3691 * and is available only if the kernel was compiled with the 3692 * **CONFIG_SOCK_CGROUP_DATA** configuration option. 3693 * Return 3694 * The id is returned or 0 in case the id could not be retrieved. 3695 * 3696 * u64 bpf_get_current_cgroup_id(void) 3697 * Description 3698 * Get the current cgroup id based on the cgroup within which 3699 * the current task is running. 3700 * Return 3701 * A 64-bit integer containing the current cgroup id based 3702 * on the cgroup within which the current task is running. 3703 * 3704 * void *bpf_get_local_storage(void *map, u64 flags) 3705 * Description 3706 * Get the pointer to the local storage area. 3707 * The type and the size of the local storage is defined 3708 * by the *map* argument. 3709 * The *flags* meaning is specific for each map type, 3710 * and has to be 0 for cgroup local storage. 3711 * 3712 * Depending on the BPF program type, a local storage area 3713 * can be shared between multiple instances of the BPF program, 3714 * running simultaneously. 3715 * 3716 * A user should care about the synchronization by himself. 3717 * For example, by using the **BPF_ATOMIC** instructions to alter 3718 * the shared data. 3719 * Return 3720 * A pointer to the local storage area. 3721 * 3722 * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) 3723 * Description 3724 * Select a **SO_REUSEPORT** socket from a 3725 * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*. 3726 * It checks the selected socket is matching the incoming 3727 * request in the socket buffer. 3728 * Return 3729 * 0 on success, or a negative error in case of failure. 3730 * 3731 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) 3732 * Description 3733 * Return id of cgroup v2 that is ancestor of cgroup associated 3734 * with the *skb* at the *ancestor_level*. The root cgroup is at 3735 * *ancestor_level* zero and each step down the hierarchy 3736 * increments the level. If *ancestor_level* == level of cgroup 3737 * associated with *skb*, then return value will be same as that 3738 * of **bpf_skb_cgroup_id**\ (). 3739 * 3740 * The helper is useful to implement policies based on cgroups 3741 * that are upper in hierarchy than immediate cgroup associated 3742 * with *skb*. 3743 * 3744 * The format of returned id and helper limitations are same as in 3745 * **bpf_skb_cgroup_id**\ (). 3746 * Return 3747 * The id is returned or 0 in case the id could not be retrieved. 3748 * 3749 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 3750 * Description 3751 * Look for TCP socket matching *tuple*, optionally in a child 3752 * network namespace *netns*. The return value must be checked, 3753 * and if non-**NULL**, released via **bpf_sk_release**\ (). 3754 * 3755 * The *ctx* should point to the context of the program, such as 3756 * the skb or socket (depending on the hook in use). This is used 3757 * to determine the base network namespace for the lookup. 3758 * 3759 * *tuple_size* must be one of: 3760 * 3761 * **sizeof**\ (*tuple*\ **->ipv4**) 3762 * Look for an IPv4 socket. 3763 * **sizeof**\ (*tuple*\ **->ipv6**) 3764 * Look for an IPv6 socket. 3765 * 3766 * If the *netns* is a negative signed 32-bit integer, then the 3767 * socket lookup table in the netns associated with the *ctx* 3768 * will be used. For the TC hooks, this is the netns of the device 3769 * in the skb. For socket hooks, this is the netns of the socket. 3770 * If *netns* is any other signed 32-bit value greater than or 3771 * equal to zero then it specifies the ID of the netns relative to 3772 * the netns associated with the *ctx*. *netns* values beyond the 3773 * range of 32-bit integers are reserved for future use. 3774 * 3775 * All values for *flags* are reserved for future usage, and must 3776 * be left at zero. 3777 * 3778 * This helper is available only if the kernel was compiled with 3779 * **CONFIG_NET** configuration option. 3780 * Return 3781 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 3782 * For sockets with reuseport option, the **struct bpf_sock** 3783 * result is from *reuse*\ **->socks**\ [] using the hash of the 3784 * tuple. 3785 * 3786 * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 3787 * Description 3788 * Look for UDP socket matching *tuple*, optionally in a child 3789 * network namespace *netns*. The return value must be checked, 3790 * and if non-**NULL**, released via **bpf_sk_release**\ (). 3791 * 3792 * The *ctx* should point to the context of the program, such as 3793 * the skb or socket (depending on the hook in use). This is used 3794 * to determine the base network namespace for the lookup. 3795 * 3796 * *tuple_size* must be one of: 3797 * 3798 * **sizeof**\ (*tuple*\ **->ipv4**) 3799 * Look for an IPv4 socket. 3800 * **sizeof**\ (*tuple*\ **->ipv6**) 3801 * Look for an IPv6 socket. 3802 * 3803 * If the *netns* is a negative signed 32-bit integer, then the 3804 * socket lookup table in the netns associated with the *ctx* 3805 * will be used. For the TC hooks, this is the netns of the device 3806 * in the skb. For socket hooks, this is the netns of the socket. 3807 * If *netns* is any other signed 32-bit value greater than or 3808 * equal to zero then it specifies the ID of the netns relative to 3809 * the netns associated with the *ctx*. *netns* values beyond the 3810 * range of 32-bit integers are reserved for future use. 3811 * 3812 * All values for *flags* are reserved for future usage, and must 3813 * be left at zero. 3814 * 3815 * This helper is available only if the kernel was compiled with 3816 * **CONFIG_NET** configuration option. 3817 * Return 3818 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 3819 * For sockets with reuseport option, the **struct bpf_sock** 3820 * result is from *reuse*\ **->socks**\ [] using the hash of the 3821 * tuple. 3822 * 3823 * long bpf_sk_release(void *sock) 3824 * Description 3825 * Release the reference held by *sock*. *sock* must be a 3826 * non-**NULL** pointer that was returned from 3827 * **bpf_sk_lookup_xxx**\ (). 3828 * Return 3829 * 0 on success, or a negative error in case of failure. 3830 * 3831 * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) 3832 * Description 3833 * Push an element *value* in *map*. *flags* is one of: 3834 * 3835 * **BPF_EXIST** 3836 * If the queue/stack is full, the oldest element is 3837 * removed to make room for this. 3838 * Return 3839 * 0 on success, or a negative error in case of failure. 3840 * 3841 * long bpf_map_pop_elem(struct bpf_map *map, void *value) 3842 * Description 3843 * Pop an element from *map*. 3844 * Return 3845 * 0 on success, or a negative error in case of failure. 3846 * 3847 * long bpf_map_peek_elem(struct bpf_map *map, void *value) 3848 * Description 3849 * Get an element from *map* without removing it. 3850 * Return 3851 * 0 on success, or a negative error in case of failure. 3852 * 3853 * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 3854 * Description 3855 * For socket policies, insert *len* bytes into *msg* at offset 3856 * *start*. 3857 * 3858 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 3859 * *msg* it may want to insert metadata or options into the *msg*. 3860 * This can later be read and used by any of the lower layer BPF 3861 * hooks. 3862 * 3863 * This helper may fail if under memory pressure (a malloc 3864 * fails) in these cases BPF programs will get an appropriate 3865 * error and BPF programs will need to handle them. 3866 * Return 3867 * 0 on success, or a negative error in case of failure. 3868 * 3869 * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 3870 * Description 3871 * Will remove *len* bytes from a *msg* starting at byte *start*. 3872 * This may result in **ENOMEM** errors under certain situations if 3873 * an allocation and copy are required due to a full ring buffer. 3874 * However, the helper will try to avoid doing the allocation 3875 * if possible. Other errors can occur if input parameters are 3876 * invalid either due to *start* byte not being valid part of *msg* 3877 * payload and/or *pop* value being to large. 3878 * Return 3879 * 0 on success, or a negative error in case of failure. 3880 * 3881 * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) 3882 * Description 3883 * This helper is used in programs implementing IR decoding, to 3884 * report a successfully decoded pointer movement. 3885 * 3886 * The *ctx* should point to the lirc sample as passed into 3887 * the program. 3888 * 3889 * This helper is only available is the kernel was compiled with 3890 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 3891 * "**y**". 3892 * Return 3893 * 0 3894 * 3895 * long bpf_spin_lock(struct bpf_spin_lock *lock) 3896 * Description 3897 * Acquire a spinlock represented by the pointer *lock*, which is 3898 * stored as part of a value of a map. Taking the lock allows to 3899 * safely update the rest of the fields in that value. The 3900 * spinlock can (and must) later be released with a call to 3901 * **bpf_spin_unlock**\ (\ *lock*\ ). 3902 * 3903 * Spinlocks in BPF programs come with a number of restrictions 3904 * and constraints: 3905 * 3906 * * **bpf_spin_lock** objects are only allowed inside maps of 3907 * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this 3908 * list could be extended in the future). 3909 * * BTF description of the map is mandatory. 3910 * * The BPF program can take ONE lock at a time, since taking two 3911 * or more could cause dead locks. 3912 * * Only one **struct bpf_spin_lock** is allowed per map element. 3913 * * When the lock is taken, calls (either BPF to BPF or helpers) 3914 * are not allowed. 3915 * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not 3916 * allowed inside a spinlock-ed region. 3917 * * The BPF program MUST call **bpf_spin_unlock**\ () to release 3918 * the lock, on all execution paths, before it returns. 3919 * * The BPF program can access **struct bpf_spin_lock** only via 3920 * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () 3921 * helpers. Loading or storing data into the **struct 3922 * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. 3923 * * To use the **bpf_spin_lock**\ () helper, the BTF description 3924 * of the map value must be a struct and have **struct 3925 * bpf_spin_lock** *anyname*\ **;** field at the top level. 3926 * Nested lock inside another struct is not allowed. 3927 * * The **struct bpf_spin_lock** *lock* field in a map value must 3928 * be aligned on a multiple of 4 bytes in that value. 3929 * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy 3930 * the **bpf_spin_lock** field to user space. 3931 * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from 3932 * a BPF program, do not update the **bpf_spin_lock** field. 3933 * * **bpf_spin_lock** cannot be on the stack or inside a 3934 * networking packet (it can only be inside of a map values). 3935 * * **bpf_spin_lock** is available to root only. 3936 * * Tracing programs and socket filter programs cannot use 3937 * **bpf_spin_lock**\ () due to insufficient preemption checks 3938 * (but this may change in the future). 3939 * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. 3940 * Return 3941 * 0 3942 * 3943 * long bpf_spin_unlock(struct bpf_spin_lock *lock) 3944 * Description 3945 * Release the *lock* previously locked by a call to 3946 * **bpf_spin_lock**\ (\ *lock*\ ). 3947 * Return 3948 * 0 3949 * 3950 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) 3951 * Description 3952 * This helper gets a **struct bpf_sock** pointer such 3953 * that all the fields in this **bpf_sock** can be accessed. 3954 * Return 3955 * A **struct bpf_sock** pointer on success, or **NULL** in 3956 * case of failure. 3957 * 3958 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) 3959 * Description 3960 * This helper gets a **struct bpf_tcp_sock** pointer from a 3961 * **struct bpf_sock** pointer. 3962 * Return 3963 * A **struct bpf_tcp_sock** pointer on success, or **NULL** in 3964 * case of failure. 3965 * 3966 * long bpf_skb_ecn_set_ce(struct sk_buff *skb) 3967 * Description 3968 * Set ECN (Explicit Congestion Notification) field of IP header 3969 * to **CE** (Congestion Encountered) if current value is **ECT** 3970 * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 3971 * and IPv4. 3972 * Return 3973 * 1 if the **CE** flag is set (either by the current helper call 3974 * or because it was already present), 0 if it is not set. 3975 * 3976 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) 3977 * Description 3978 * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. 3979 * **bpf_sk_release**\ () is unnecessary and not allowed. 3980 * Return 3981 * A **struct bpf_sock** pointer on success, or **NULL** in 3982 * case of failure. 3983 * 3984 * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 3985 * Description 3986 * Look for TCP socket matching *tuple*, optionally in a child 3987 * network namespace *netns*. The return value must be checked, 3988 * and if non-**NULL**, released via **bpf_sk_release**\ (). 3989 * 3990 * This function is identical to **bpf_sk_lookup_tcp**\ (), except 3991 * that it also returns timewait or request sockets. Use 3992 * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the 3993 * full structure. 3994 * 3995 * This helper is available only if the kernel was compiled with 3996 * **CONFIG_NET** configuration option. 3997 * Return 3998 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 3999 * For sockets with reuseport option, the **struct bpf_sock** 4000 * result is from *reuse*\ **->socks**\ [] using the hash of the 4001 * tuple. 4002 * 4003 * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 4004 * Description 4005 * Check whether *iph* and *th* contain a valid SYN cookie ACK for 4006 * the listening socket in *sk*. 4007 * 4008 * *iph* points to the start of the IPv4 or IPv6 header, while 4009 * *iph_len* contains **sizeof**\ (**struct iphdr**) or 4010 * **sizeof**\ (**struct ipv6hdr**). 4011 * 4012 * *th* points to the start of the TCP header, while *th_len* 4013 * contains the length of the TCP header (at least 4014 * **sizeof**\ (**struct tcphdr**)). 4015 * Return 4016 * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative 4017 * error otherwise. 4018 * 4019 * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) 4020 * Description 4021 * Get name of sysctl in /proc/sys/ and copy it into provided by 4022 * program buffer *buf* of size *buf_len*. 4023 * 4024 * The buffer is always NUL terminated, unless it's zero-sized. 4025 * 4026 * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is 4027 * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name 4028 * only (e.g. "tcp_mem"). 4029 * Return 4030 * Number of character copied (not including the trailing NUL). 4031 * 4032 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 4033 * truncated name in this case). 4034 * 4035 * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 4036 * Description 4037 * Get current value of sysctl as it is presented in /proc/sys 4038 * (incl. newline, etc), and copy it as a string into provided 4039 * by program buffer *buf* of size *buf_len*. 4040 * 4041 * The whole value is copied, no matter what file position user 4042 * space issued e.g. sys_read at. 4043 * 4044 * The buffer is always NUL terminated, unless it's zero-sized. 4045 * Return 4046 * Number of character copied (not including the trailing NUL). 4047 * 4048 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 4049 * truncated name in this case). 4050 * 4051 * **-EINVAL** if current value was unavailable, e.g. because 4052 * sysctl is uninitialized and read returns -EIO for it. 4053 * 4054 * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 4055 * Description 4056 * Get new value being written by user space to sysctl (before 4057 * the actual write happens) and copy it as a string into 4058 * provided by program buffer *buf* of size *buf_len*. 4059 * 4060 * User space may write new value at file position > 0. 4061 * 4062 * The buffer is always NUL terminated, unless it's zero-sized. 4063 * Return 4064 * Number of character copied (not including the trailing NUL). 4065 * 4066 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 4067 * truncated name in this case). 4068 * 4069 * **-EINVAL** if sysctl is being read. 4070 * 4071 * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) 4072 * Description 4073 * Override new value being written by user space to sysctl with 4074 * value provided by program in buffer *buf* of size *buf_len*. 4075 * 4076 * *buf* should contain a string in same form as provided by user 4077 * space on sysctl write. 4078 * 4079 * User space may write new value at file position > 0. To override 4080 * the whole sysctl value file position should be set to zero. 4081 * Return 4082 * 0 on success. 4083 * 4084 * **-E2BIG** if the *buf_len* is too big. 4085 * 4086 * **-EINVAL** if sysctl is being read. 4087 * 4088 * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) 4089 * Description 4090 * Convert the initial part of the string from buffer *buf* of 4091 * size *buf_len* to a long integer according to the given base 4092 * and save the result in *res*. 4093 * 4094 * The string may begin with an arbitrary amount of white space 4095 * (as determined by **isspace**\ (3)) followed by a single 4096 * optional '**-**' sign. 4097 * 4098 * Five least significant bits of *flags* encode base, other bits 4099 * are currently unused. 4100 * 4101 * Base must be either 8, 10, 16 or 0 to detect it automatically 4102 * similar to user space **strtol**\ (3). 4103 * Return 4104 * Number of characters consumed on success. Must be positive but 4105 * no more than *buf_len*. 4106 * 4107 * **-EINVAL** if no valid digits were found or unsupported base 4108 * was provided. 4109 * 4110 * **-ERANGE** if resulting value was out of range. 4111 * 4112 * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) 4113 * Description 4114 * Convert the initial part of the string from buffer *buf* of 4115 * size *buf_len* to an unsigned long integer according to the 4116 * given base and save the result in *res*. 4117 * 4118 * The string may begin with an arbitrary amount of white space 4119 * (as determined by **isspace**\ (3)). 4120 * 4121 * Five least significant bits of *flags* encode base, other bits 4122 * are currently unused. 4123 * 4124 * Base must be either 8, 10, 16 or 0 to detect it automatically 4125 * similar to user space **strtoul**\ (3). 4126 * Return 4127 * Number of characters consumed on success. Must be positive but 4128 * no more than *buf_len*. 4129 * 4130 * **-EINVAL** if no valid digits were found or unsupported base 4131 * was provided. 4132 * 4133 * **-ERANGE** if resulting value was out of range. 4134 * 4135 * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags) 4136 * Description 4137 * Get a bpf-local-storage from a *sk*. 4138 * 4139 * Logically, it could be thought of getting the value from 4140 * a *map* with *sk* as the **key**. From this 4141 * perspective, the usage is not much different from 4142 * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this 4143 * helper enforces the key must be a full socket and the map must 4144 * be a **BPF_MAP_TYPE_SK_STORAGE** also. 4145 * 4146 * Underneath, the value is stored locally at *sk* instead of 4147 * the *map*. The *map* is used as the bpf-local-storage 4148 * "type". The bpf-local-storage "type" (i.e. the *map*) is 4149 * searched against all bpf-local-storages residing at *sk*. 4150 * 4151 * *sk* is a kernel **struct sock** pointer for LSM program. 4152 * *sk* is a **struct bpf_sock** pointer for other program types. 4153 * 4154 * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be 4155 * used such that a new bpf-local-storage will be 4156 * created if one does not exist. *value* can be used 4157 * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify 4158 * the initial value of a bpf-local-storage. If *value* is 4159 * **NULL**, the new bpf-local-storage will be zero initialized. 4160 * Return 4161 * A bpf-local-storage pointer is returned on success. 4162 * 4163 * **NULL** if not found or there was an error in adding 4164 * a new bpf-local-storage. 4165 * 4166 * long bpf_sk_storage_delete(struct bpf_map *map, void *sk) 4167 * Description 4168 * Delete a bpf-local-storage from a *sk*. 4169 * Return 4170 * 0 on success. 4171 * 4172 * **-ENOENT** if the bpf-local-storage cannot be found. 4173 * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). 4174 * 4175 * long bpf_send_signal(u32 sig) 4176 * Description 4177 * Send signal *sig* to the process of the current task. 4178 * The signal may be delivered to any of this process's threads. 4179 * Return 4180 * 0 on success or successfully queued. 4181 * 4182 * **-EBUSY** if work queue under nmi is full. 4183 * 4184 * **-EINVAL** if *sig* is invalid. 4185 * 4186 * **-EPERM** if no permission to send the *sig*. 4187 * 4188 * **-EAGAIN** if bpf program can try again. 4189 * 4190 * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 4191 * Description 4192 * Try to issue a SYN cookie for the packet with corresponding 4193 * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. 4194 * 4195 * *iph* points to the start of the IPv4 or IPv6 header, while 4196 * *iph_len* contains **sizeof**\ (**struct iphdr**) or 4197 * **sizeof**\ (**struct ipv6hdr**). 4198 * 4199 * *th* points to the start of the TCP header, while *th_len* 4200 * contains the length of the TCP header with options (at least 4201 * **sizeof**\ (**struct tcphdr**)). 4202 * Return 4203 * On success, lower 32 bits hold the generated SYN cookie in 4204 * followed by 16 bits which hold the MSS value for that cookie, 4205 * and the top 16 bits are unused. 4206 * 4207 * On failure, the returned value is one of the following: 4208 * 4209 * **-EINVAL** SYN cookie cannot be issued due to error 4210 * 4211 * **-ENOENT** SYN cookie should not be issued (no SYN flood) 4212 * 4213 * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies 4214 * 4215 * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 4216 * 4217 * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 4218 * Description 4219 * Write raw *data* blob into a special BPF perf event held by 4220 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 4221 * event must have the following attributes: **PERF_SAMPLE_RAW** 4222 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 4223 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 4224 * 4225 * The *flags* are used to indicate the index in *map* for which 4226 * the value must be put, masked with **BPF_F_INDEX_MASK**. 4227 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 4228 * to indicate that the index of the current CPU core should be 4229 * used. 4230 * 4231 * The value to write, of *size*, is passed through eBPF stack and 4232 * pointed by *data*. 4233 * 4234 * *ctx* is a pointer to in-kernel struct sk_buff. 4235 * 4236 * This helper is similar to **bpf_perf_event_output**\ () but 4237 * restricted to raw_tracepoint bpf programs. 4238 * Return 4239 * 0 on success, or a negative error in case of failure. 4240 * 4241 * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) 4242 * Description 4243 * Safely attempt to read *size* bytes from user space address 4244 * *unsafe_ptr* and store the data in *dst*. 4245 * Return 4246 * 0 on success, or a negative error in case of failure. 4247 * 4248 * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) 4249 * Description 4250 * Safely attempt to read *size* bytes from kernel space address 4251 * *unsafe_ptr* and store the data in *dst*. 4252 * Return 4253 * 0 on success, or a negative error in case of failure. 4254 * 4255 * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) 4256 * Description 4257 * Copy a NUL terminated string from an unsafe user address 4258 * *unsafe_ptr* to *dst*. The *size* should include the 4259 * terminating NUL byte. In case the string length is smaller than 4260 * *size*, the target is not padded with further NUL bytes. If the 4261 * string length is larger than *size*, just *size*-1 bytes are 4262 * copied and the last byte is set to NUL. 4263 * 4264 * On success, returns the number of bytes that were written, 4265 * including the terminal NUL. This makes this helper useful in 4266 * tracing programs for reading strings, and more importantly to 4267 * get its length at runtime. See the following snippet: 4268 * 4269 * :: 4270 * 4271 * SEC("kprobe/sys_open") 4272 * void bpf_sys_open(struct pt_regs *ctx) 4273 * { 4274 * char buf[PATHLEN]; // PATHLEN is defined to 256 4275 * int res = bpf_probe_read_user_str(buf, sizeof(buf), 4276 * ctx->di); 4277 * 4278 * // Consume buf, for example push it to 4279 * // userspace via bpf_perf_event_output(); we 4280 * // can use res (the string length) as event 4281 * // size, after checking its boundaries. 4282 * } 4283 * 4284 * In comparison, using **bpf_probe_read_user**\ () helper here 4285 * instead to read the string would require to estimate the length 4286 * at compile time, and would often result in copying more memory 4287 * than necessary. 4288 * 4289 * Another useful use case is when parsing individual process 4290 * arguments or individual environment variables navigating 4291 * *current*\ **->mm->arg_start** and *current*\ 4292 * **->mm->env_start**: using this helper and the return value, 4293 * one can quickly iterate at the right offset of the memory area. 4294 * Return 4295 * On success, the strictly positive length of the output string, 4296 * including the trailing NUL character. On error, a negative 4297 * value. 4298 * 4299 * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) 4300 * Description 4301 * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* 4302 * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. 4303 * Return 4304 * On success, the strictly positive length of the string, including 4305 * the trailing NUL character. On error, a negative value. 4306 * 4307 * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) 4308 * Description 4309 * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. 4310 * *rcv_nxt* is the ack_seq to be sent out. 4311 * Return 4312 * 0 on success, or a negative error in case of failure. 4313 * 4314 * long bpf_send_signal_thread(u32 sig) 4315 * Description 4316 * Send signal *sig* to the thread corresponding to the current task. 4317 * Return 4318 * 0 on success or successfully queued. 4319 * 4320 * **-EBUSY** if work queue under nmi is full. 4321 * 4322 * **-EINVAL** if *sig* is invalid. 4323 * 4324 * **-EPERM** if no permission to send the *sig*. 4325 * 4326 * **-EAGAIN** if bpf program can try again. 4327 * 4328 * u64 bpf_jiffies64(void) 4329 * Description 4330 * Obtain the 64bit jiffies 4331 * Return 4332 * The 64 bit jiffies 4333 * 4334 * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) 4335 * Description 4336 * For an eBPF program attached to a perf event, retrieve the 4337 * branch records (**struct perf_branch_entry**) associated to *ctx* 4338 * and store it in the buffer pointed by *buf* up to size 4339 * *size* bytes. 4340 * Return 4341 * On success, number of bytes written to *buf*. On error, a 4342 * negative value. 4343 * 4344 * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to 4345 * instead return the number of bytes required to store all the 4346 * branch entries. If this flag is set, *buf* may be NULL. 4347 * 4348 * **-EINVAL** if arguments invalid or **size** not a multiple 4349 * of **sizeof**\ (**struct perf_branch_entry**\ ). 4350 * 4351 * **-ENOENT** if architecture does not support branch records. 4352 * 4353 * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) 4354 * Description 4355 * Returns 0 on success, values for *pid* and *tgid* as seen from the current 4356 * *namespace* will be returned in *nsdata*. 4357 * Return 4358 * 0 on success, or one of the following in case of failure: 4359 * 4360 * **-EINVAL** if dev and inum supplied don't match dev_t and inode number 4361 * with nsfs of current task, or if dev conversion to dev_t lost high bits. 4362 * 4363 * **-ENOENT** if pidns does not exists for the current task. 4364 * 4365 * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 4366 * Description 4367 * Write raw *data* blob into a special BPF perf event held by 4368 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 4369 * event must have the following attributes: **PERF_SAMPLE_RAW** 4370 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 4371 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 4372 * 4373 * The *flags* are used to indicate the index in *map* for which 4374 * the value must be put, masked with **BPF_F_INDEX_MASK**. 4375 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 4376 * to indicate that the index of the current CPU core should be 4377 * used. 4378 * 4379 * The value to write, of *size*, is passed through eBPF stack and 4380 * pointed by *data*. 4381 * 4382 * *ctx* is a pointer to in-kernel struct xdp_buff. 4383 * 4384 * This helper is similar to **bpf_perf_eventoutput**\ () but 4385 * restricted to raw_tracepoint bpf programs. 4386 * Return 4387 * 0 on success, or a negative error in case of failure. 4388 * 4389 * u64 bpf_get_netns_cookie(void *ctx) 4390 * Description 4391 * Retrieve the cookie (generated by the kernel) of the network 4392 * namespace the input *ctx* is associated with. The network 4393 * namespace cookie remains stable for its lifetime and provides 4394 * a global identifier that can be assumed unique. If *ctx* is 4395 * NULL, then the helper returns the cookie for the initial 4396 * network namespace. The cookie itself is very similar to that 4397 * of **bpf_get_socket_cookie**\ () helper, but for network 4398 * namespaces instead of sockets. 4399 * Return 4400 * A 8-byte long opaque number. 4401 * 4402 * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) 4403 * Description 4404 * Return id of cgroup v2 that is ancestor of the cgroup associated 4405 * with the current task at the *ancestor_level*. The root cgroup 4406 * is at *ancestor_level* zero and each step down the hierarchy 4407 * increments the level. If *ancestor_level* == level of cgroup 4408 * associated with the current task, then return value will be the 4409 * same as that of **bpf_get_current_cgroup_id**\ (). 4410 * 4411 * The helper is useful to implement policies based on cgroups 4412 * that are upper in hierarchy than immediate cgroup associated 4413 * with the current task. 4414 * 4415 * The format of returned id and helper limitations are same as in 4416 * **bpf_get_current_cgroup_id**\ (). 4417 * Return 4418 * The id is returned or 0 in case the id could not be retrieved. 4419 * 4420 * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags) 4421 * Description 4422 * Helper is overloaded depending on BPF program type. This 4423 * description applies to **BPF_PROG_TYPE_SCHED_CLS** and 4424 * **BPF_PROG_TYPE_SCHED_ACT** programs. 4425 * 4426 * Assign the *sk* to the *skb*. When combined with appropriate 4427 * routing configuration to receive the packet towards the socket, 4428 * will cause *skb* to be delivered to the specified socket. 4429 * Subsequent redirection of *skb* via **bpf_redirect**\ (), 4430 * **bpf_clone_redirect**\ () or other methods outside of BPF may 4431 * interfere with successful delivery to the socket. 4432 * 4433 * This operation is only valid from TC ingress path. 4434 * 4435 * The *flags* argument must be zero. 4436 * Return 4437 * 0 on success, or a negative error in case of failure: 4438 * 4439 * **-EINVAL** if specified *flags* are not supported. 4440 * 4441 * **-ENOENT** if the socket is unavailable for assignment. 4442 * 4443 * **-ENETUNREACH** if the socket is unreachable (wrong netns). 4444 * 4445 * **-EOPNOTSUPP** if the operation is not supported, for example 4446 * a call from outside of TC ingress. 4447 * 4448 * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) 4449 * Description 4450 * Helper is overloaded depending on BPF program type. This 4451 * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. 4452 * 4453 * Select the *sk* as a result of a socket lookup. 4454 * 4455 * For the operation to succeed passed socket must be compatible 4456 * with the packet description provided by the *ctx* object. 4457 * 4458 * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must 4459 * be an exact match. While IP family (**AF_INET** or 4460 * **AF_INET6**) must be compatible, that is IPv6 sockets 4461 * that are not v6-only can be selected for IPv4 packets. 4462 * 4463 * Only TCP listeners and UDP unconnected sockets can be 4464 * selected. *sk* can also be NULL to reset any previous 4465 * selection. 4466 * 4467 * *flags* argument can combination of following values: 4468 * 4469 * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous 4470 * socket selection, potentially done by a BPF program 4471 * that ran before us. 4472 * 4473 * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip 4474 * load-balancing within reuseport group for the socket 4475 * being selected. 4476 * 4477 * On success *ctx->sk* will point to the selected socket. 4478 * 4479 * Return 4480 * 0 on success, or a negative errno in case of failure. 4481 * 4482 * * **-EAFNOSUPPORT** if socket family (*sk->family*) is 4483 * not compatible with packet family (*ctx->family*). 4484 * 4485 * * **-EEXIST** if socket has been already selected, 4486 * potentially by another program, and 4487 * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. 4488 * 4489 * * **-EINVAL** if unsupported flags were specified. 4490 * 4491 * * **-EPROTOTYPE** if socket L4 protocol 4492 * (*sk->protocol*) doesn't match packet protocol 4493 * (*ctx->protocol*). 4494 * 4495 * * **-ESOCKTNOSUPPORT** if socket is not in allowed 4496 * state (TCP listening or UDP unconnected). 4497 * 4498 * u64 bpf_ktime_get_boot_ns(void) 4499 * Description 4500 * Return the time elapsed since system boot, in nanoseconds. 4501 * Does include the time the system was suspended. 4502 * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) 4503 * Return 4504 * Current *ktime*. 4505 * 4506 * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) 4507 * Description 4508 * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print 4509 * out the format string. 4510 * The *m* represents the seq_file. The *fmt* and *fmt_size* are for 4511 * the format string itself. The *data* and *data_len* are format string 4512 * arguments. The *data* are a **u64** array and corresponding format string 4513 * values are stored in the array. For strings and pointers where pointees 4514 * are accessed, only the pointer values are stored in the *data* array. 4515 * The *data_len* is the size of *data* in bytes - must be a multiple of 8. 4516 * 4517 * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. 4518 * Reading kernel memory may fail due to either invalid address or 4519 * valid address but requiring a major memory fault. If reading kernel memory 4520 * fails, the string for **%s** will be an empty string, and the ip 4521 * address for **%p{i,I}{4,6}** will be 0. Not returning error to 4522 * bpf program is consistent with what **bpf_trace_printk**\ () does for now. 4523 * Return 4524 * 0 on success, or a negative error in case of failure: 4525 * 4526 * **-EBUSY** if per-CPU memory copy buffer is busy, can try again 4527 * by returning 1 from bpf program. 4528 * 4529 * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. 4530 * 4531 * **-E2BIG** if *fmt* contains too many format specifiers. 4532 * 4533 * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 4534 * 4535 * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) 4536 * Description 4537 * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. 4538 * The *m* represents the seq_file. The *data* and *len* represent the 4539 * data to write in bytes. 4540 * Return 4541 * 0 on success, or a negative error in case of failure: 4542 * 4543 * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 4544 * 4545 * u64 bpf_sk_cgroup_id(void *sk) 4546 * Description 4547 * Return the cgroup v2 id of the socket *sk*. 4548 * 4549 * *sk* must be a non-**NULL** pointer to a socket, e.g. one 4550 * returned from **bpf_sk_lookup_xxx**\ (), 4551 * **bpf_sk_fullsock**\ (), etc. The format of returned id is 4552 * same as in **bpf_skb_cgroup_id**\ (). 4553 * 4554 * This helper is available only if the kernel was compiled with 4555 * the **CONFIG_SOCK_CGROUP_DATA** configuration option. 4556 * Return 4557 * The id is returned or 0 in case the id could not be retrieved. 4558 * 4559 * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level) 4560 * Description 4561 * Return id of cgroup v2 that is ancestor of cgroup associated 4562 * with the *sk* at the *ancestor_level*. The root cgroup is at 4563 * *ancestor_level* zero and each step down the hierarchy 4564 * increments the level. If *ancestor_level* == level of cgroup 4565 * associated with *sk*, then return value will be same as that 4566 * of **bpf_sk_cgroup_id**\ (). 4567 * 4568 * The helper is useful to implement policies based on cgroups 4569 * that are upper in hierarchy than immediate cgroup associated 4570 * with *sk*. 4571 * 4572 * The format of returned id and helper limitations are same as in 4573 * **bpf_sk_cgroup_id**\ (). 4574 * Return 4575 * The id is returned or 0 in case the id could not be retrieved. 4576 * 4577 * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) 4578 * Description 4579 * Copy *size* bytes from *data* into a ring buffer *ringbuf*. 4580 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 4581 * of new data availability is sent. 4582 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 4583 * of new data availability is sent unconditionally. 4584 * If **0** is specified in *flags*, an adaptive notification 4585 * of new data availability is sent. 4586 * 4587 * An adaptive notification is a notification sent whenever the user-space 4588 * process has caught up and consumed all available payloads. In case the user-space 4589 * process is still processing a previous payload, then no notification is needed 4590 * as it will process the newly added payload automatically. 4591 * Return 4592 * 0 on success, or a negative error in case of failure. 4593 * 4594 * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) 4595 * Description 4596 * Reserve *size* bytes of payload in a ring buffer *ringbuf*. 4597 * *flags* must be 0. 4598 * Return 4599 * Valid pointer with *size* bytes of memory available; NULL, 4600 * otherwise. 4601 * 4602 * void bpf_ringbuf_submit(void *data, u64 flags) 4603 * Description 4604 * Submit reserved ring buffer sample, pointed to by *data*. 4605 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 4606 * of new data availability is sent. 4607 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 4608 * of new data availability is sent unconditionally. 4609 * If **0** is specified in *flags*, an adaptive notification 4610 * of new data availability is sent. 4611 * 4612 * See 'bpf_ringbuf_output()' for the definition of adaptive notification. 4613 * Return 4614 * Nothing. Always succeeds. 4615 * 4616 * void bpf_ringbuf_discard(void *data, u64 flags) 4617 * Description 4618 * Discard reserved ring buffer sample, pointed to by *data*. 4619 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 4620 * of new data availability is sent. 4621 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 4622 * of new data availability is sent unconditionally. 4623 * If **0** is specified in *flags*, an adaptive notification 4624 * of new data availability is sent. 4625 * 4626 * See 'bpf_ringbuf_output()' for the definition of adaptive notification. 4627 * Return 4628 * Nothing. Always succeeds. 4629 * 4630 * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) 4631 * Description 4632 * Query various characteristics of provided ring buffer. What 4633 * exactly is queries is determined by *flags*: 4634 * 4635 * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. 4636 * * **BPF_RB_RING_SIZE**: The size of ring buffer. 4637 * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). 4638 * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). 4639 * 4640 * Data returned is just a momentary snapshot of actual values 4641 * and could be inaccurate, so this facility should be used to 4642 * power heuristics and for reporting, not to make 100% correct 4643 * calculation. 4644 * Return 4645 * Requested value, or 0, if *flags* are not recognized. 4646 * 4647 * long bpf_csum_level(struct sk_buff *skb, u64 level) 4648 * Description 4649 * Change the skbs checksum level by one layer up or down, or 4650 * reset it entirely to none in order to have the stack perform 4651 * checksum validation. The level is applicable to the following 4652 * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of 4653 * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | 4654 * through **bpf_skb_adjust_room**\ () helper with passing in 4655 * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call 4656 * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since 4657 * the UDP header is removed. Similarly, an encap of the latter 4658 * into the former could be accompanied by a helper call to 4659 * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the 4660 * skb is still intended to be processed in higher layers of the 4661 * stack instead of just egressing at tc. 4662 * 4663 * There are three supported level settings at this time: 4664 * 4665 * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs 4666 * with CHECKSUM_UNNECESSARY. 4667 * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs 4668 * with CHECKSUM_UNNECESSARY. 4669 * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and 4670 * sets CHECKSUM_NONE to force checksum validation by the stack. 4671 * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current 4672 * skb->csum_level. 4673 * Return 4674 * 0 on success, or a negative error in case of failure. In the 4675 * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level 4676 * is returned or the error code -EACCES in case the skb is not 4677 * subject to CHECKSUM_UNNECESSARY. 4678 * 4679 * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) 4680 * Description 4681 * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. 4682 * Return 4683 * *sk* if casting is valid, or **NULL** otherwise. 4684 * 4685 * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) 4686 * Description 4687 * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. 4688 * Return 4689 * *sk* if casting is valid, or **NULL** otherwise. 4690 * 4691 * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) 4692 * Description 4693 * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. 4694 * Return 4695 * *sk* if casting is valid, or **NULL** otherwise. 4696 * 4697 * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) 4698 * Description 4699 * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. 4700 * Return 4701 * *sk* if casting is valid, or **NULL** otherwise. 4702 * 4703 * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) 4704 * Description 4705 * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. 4706 * Return 4707 * *sk* if casting is valid, or **NULL** otherwise. 4708 * 4709 * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) 4710 * Description 4711 * Return a user or a kernel stack in bpf program provided buffer. 4712 * Note: the user stack will only be populated if the *task* is 4713 * the current task; all other tasks will return -EOPNOTSUPP. 4714 * To achieve this, the helper needs *task*, which is a valid 4715 * pointer to **struct task_struct**. To store the stacktrace, the 4716 * bpf program provides *buf* with a nonnegative *size*. 4717 * 4718 * The last argument, *flags*, holds the number of stack frames to 4719 * skip (from 0 to 255), masked with 4720 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 4721 * the following flags: 4722 * 4723 * **BPF_F_USER_STACK** 4724 * Collect a user space stack instead of a kernel stack. 4725 * The *task* must be the current task. 4726 * **BPF_F_USER_BUILD_ID** 4727 * Collect buildid+offset instead of ips for user stack, 4728 * only valid if **BPF_F_USER_STACK** is also specified. 4729 * 4730 * **bpf_get_task_stack**\ () can collect up to 4731 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 4732 * to sufficient large buffer size. Note that 4733 * this limit can be controlled with the **sysctl** program, and 4734 * that it should be manually increased in order to profile long 4735 * user stacks (such as stacks for Java programs). To do so, use: 4736 * 4737 * :: 4738 * 4739 * # sysctl kernel.perf_event_max_stack=<new value> 4740 * Return 4741 * The non-negative copied *buf* length equal to or less than 4742 * *size* on success, or a negative error in case of failure. 4743 * 4744 * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) 4745 * Description 4746 * Load header option. Support reading a particular TCP header 4747 * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). 4748 * 4749 * If *flags* is 0, it will search the option from the 4750 * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** 4751 * has details on what skb_data contains under different 4752 * *skops*\ **->op**. 4753 * 4754 * The first byte of the *searchby_res* specifies the 4755 * kind that it wants to search. 4756 * 4757 * If the searching kind is an experimental kind 4758 * (i.e. 253 or 254 according to RFC6994). It also 4759 * needs to specify the "magic" which is either 4760 * 2 bytes or 4 bytes. It then also needs to 4761 * specify the size of the magic by using 4762 * the 2nd byte which is "kind-length" of a TCP 4763 * header option and the "kind-length" also 4764 * includes the first 2 bytes "kind" and "kind-length" 4765 * itself as a normal TCP header option also does. 4766 * 4767 * For example, to search experimental kind 254 with 4768 * 2 byte magic 0xeB9F, the searchby_res should be 4769 * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. 4770 * 4771 * To search for the standard window scale option (3), 4772 * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. 4773 * Note, kind-length must be 0 for regular option. 4774 * 4775 * Searching for No-Op (0) and End-of-Option-List (1) are 4776 * not supported. 4777 * 4778 * *len* must be at least 2 bytes which is the minimal size 4779 * of a header option. 4780 * 4781 * Supported flags: 4782 * 4783 * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the 4784 * saved_syn packet or the just-received syn packet. 4785 * 4786 * Return 4787 * > 0 when found, the header option is copied to *searchby_res*. 4788 * The return value is the total length copied. On failure, a 4789 * negative error code is returned: 4790 * 4791 * **-EINVAL** if a parameter is invalid. 4792 * 4793 * **-ENOMSG** if the option is not found. 4794 * 4795 * **-ENOENT** if no syn packet is available when 4796 * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. 4797 * 4798 * **-ENOSPC** if there is not enough space. Only *len* number of 4799 * bytes are copied. 4800 * 4801 * **-EFAULT** on failure to parse the header options in the 4802 * packet. 4803 * 4804 * **-EPERM** if the helper cannot be used under the current 4805 * *skops*\ **->op**. 4806 * 4807 * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags) 4808 * Description 4809 * Store header option. The data will be copied 4810 * from buffer *from* with length *len* to the TCP header. 4811 * 4812 * The buffer *from* should have the whole option that 4813 * includes the kind, kind-length, and the actual 4814 * option data. The *len* must be at least kind-length 4815 * long. The kind-length does not have to be 4 byte 4816 * aligned. The kernel will take care of the padding 4817 * and setting the 4 bytes aligned value to th->doff. 4818 * 4819 * This helper will check for duplicated option 4820 * by searching the same option in the outgoing skb. 4821 * 4822 * This helper can only be called during 4823 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. 4824 * 4825 * Return 4826 * 0 on success, or negative error in case of failure: 4827 * 4828 * **-EINVAL** If param is invalid. 4829 * 4830 * **-ENOSPC** if there is not enough space in the header. 4831 * Nothing has been written 4832 * 4833 * **-EEXIST** if the option already exists. 4834 * 4835 * **-EFAULT** on failure to parse the existing header options. 4836 * 4837 * **-EPERM** if the helper cannot be used under the current 4838 * *skops*\ **->op**. 4839 * 4840 * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags) 4841 * Description 4842 * Reserve *len* bytes for the bpf header option. The 4843 * space will be used by **bpf_store_hdr_opt**\ () later in 4844 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. 4845 * 4846 * If **bpf_reserve_hdr_opt**\ () is called multiple times, 4847 * the total number of bytes will be reserved. 4848 * 4849 * This helper can only be called during 4850 * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. 4851 * 4852 * Return 4853 * 0 on success, or negative error in case of failure: 4854 * 4855 * **-EINVAL** if a parameter is invalid. 4856 * 4857 * **-ENOSPC** if there is not enough space in the header. 4858 * 4859 * **-EPERM** if the helper cannot be used under the current 4860 * *skops*\ **->op**. 4861 * 4862 * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags) 4863 * Description 4864 * Get a bpf_local_storage from an *inode*. 4865 * 4866 * Logically, it could be thought of as getting the value from 4867 * a *map* with *inode* as the **key**. From this 4868 * perspective, the usage is not much different from 4869 * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this 4870 * helper enforces the key must be an inode and the map must also 4871 * be a **BPF_MAP_TYPE_INODE_STORAGE**. 4872 * 4873 * Underneath, the value is stored locally at *inode* instead of 4874 * the *map*. The *map* is used as the bpf-local-storage 4875 * "type". The bpf-local-storage "type" (i.e. the *map*) is 4876 * searched against all bpf_local_storage residing at *inode*. 4877 * 4878 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be 4879 * used such that a new bpf_local_storage will be 4880 * created if one does not exist. *value* can be used 4881 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify 4882 * the initial value of a bpf_local_storage. If *value* is 4883 * **NULL**, the new bpf_local_storage will be zero initialized. 4884 * Return 4885 * A bpf_local_storage pointer is returned on success. 4886 * 4887 * **NULL** if not found or there was an error in adding 4888 * a new bpf_local_storage. 4889 * 4890 * int bpf_inode_storage_delete(struct bpf_map *map, void *inode) 4891 * Description 4892 * Delete a bpf_local_storage from an *inode*. 4893 * Return 4894 * 0 on success. 4895 * 4896 * **-ENOENT** if the bpf_local_storage cannot be found. 4897 * 4898 * long bpf_d_path(const struct path *path, char *buf, u32 sz) 4899 * Description 4900 * Return full path for given **struct path** object, which 4901 * needs to be the kernel BTF *path* object. The path is 4902 * returned in the provided buffer *buf* of size *sz* and 4903 * is zero terminated. 4904 * 4905 * Return 4906 * On success, the strictly positive length of the string, 4907 * including the trailing NUL character. On error, a negative 4908 * value. 4909 * 4910 * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr) 4911 * Description 4912 * Read *size* bytes from user space address *user_ptr* and store 4913 * the data in *dst*. This is a wrapper of **copy_from_user**\ (). 4914 * Return 4915 * 0 on success, or a negative error in case of failure. 4916 * 4917 * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags) 4918 * Description 4919 * Use BTF to store a string representation of *ptr*->ptr in *str*, 4920 * using *ptr*->type_id. This value should specify the type 4921 * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) 4922 * can be used to look up vmlinux BTF type ids. Traversing the 4923 * data structure using BTF, the type information and values are 4924 * stored in the first *str_size* - 1 bytes of *str*. Safe copy of 4925 * the pointer data is carried out to avoid kernel crashes during 4926 * operation. Smaller types can use string space on the stack; 4927 * larger programs can use map data to store the string 4928 * representation. 4929 * 4930 * The string can be subsequently shared with userspace via 4931 * bpf_perf_event_output() or ring buffer interfaces. 4932 * bpf_trace_printk() is to be avoided as it places too small 4933 * a limit on string size to be useful. 4934 * 4935 * *flags* is a combination of 4936 * 4937 * **BTF_F_COMPACT** 4938 * no formatting around type information 4939 * **BTF_F_NONAME** 4940 * no struct/union member names/types 4941 * **BTF_F_PTR_RAW** 4942 * show raw (unobfuscated) pointer values; 4943 * equivalent to printk specifier %px. 4944 * **BTF_F_ZERO** 4945 * show zero-valued struct/union members; they 4946 * are not displayed by default 4947 * 4948 * Return 4949 * The number of bytes that were written (or would have been 4950 * written if output had to be truncated due to string size), 4951 * or a negative error in cases of failure. 4952 * 4953 * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags) 4954 * Description 4955 * Use BTF to write to seq_write a string representation of 4956 * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). 4957 * *flags* are identical to those used for bpf_snprintf_btf. 4958 * Return 4959 * 0 on success or a negative error in case of failure. 4960 * 4961 * u64 bpf_skb_cgroup_classid(struct sk_buff *skb) 4962 * Description 4963 * See **bpf_get_cgroup_classid**\ () for the main description. 4964 * This helper differs from **bpf_get_cgroup_classid**\ () in that 4965 * the cgroup v1 net_cls class is retrieved only from the *skb*'s 4966 * associated socket instead of the current process. 4967 * Return 4968 * The id is returned or 0 in case the id could not be retrieved. 4969 * 4970 * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags) 4971 * Description 4972 * Redirect the packet to another net device of index *ifindex* 4973 * and fill in L2 addresses from neighboring subsystem. This helper 4974 * is somewhat similar to **bpf_redirect**\ (), except that it 4975 * populates L2 addresses as well, meaning, internally, the helper 4976 * relies on the neighbor lookup for the L2 address of the nexthop. 4977 * 4978 * The helper will perform a FIB lookup based on the skb's 4979 * networking header to get the address of the next hop, unless 4980 * this is supplied by the caller in the *params* argument. The 4981 * *plen* argument indicates the len of *params* and should be set 4982 * to 0 if *params* is NULL. 4983 * 4984 * The *flags* argument is reserved and must be 0. The helper is 4985 * currently only supported for tc BPF program types, and enabled 4986 * for IPv4 and IPv6 protocols. 4987 * Return 4988 * The helper returns **TC_ACT_REDIRECT** on success or 4989 * **TC_ACT_SHOT** on error. 4990 * 4991 * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu) 4992 * Description 4993 * Take a pointer to a percpu ksym, *percpu_ptr*, and return a 4994 * pointer to the percpu kernel variable on *cpu*. A ksym is an 4995 * extern variable decorated with '__ksym'. For ksym, there is a 4996 * global var (either static or global) defined of the same name 4997 * in the kernel. The ksym is percpu if the global var is percpu. 4998 * The returned pointer points to the global percpu var on *cpu*. 4999 * 5000 * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the 5001 * kernel, except that bpf_per_cpu_ptr() may return NULL. This 5002 * happens if *cpu* is larger than nr_cpu_ids. The caller of 5003 * bpf_per_cpu_ptr() must check the returned value. 5004 * Return 5005 * A pointer pointing to the kernel percpu variable on *cpu*, or 5006 * NULL, if *cpu* is invalid. 5007 * 5008 * void *bpf_this_cpu_ptr(const void *percpu_ptr) 5009 * Description 5010 * Take a pointer to a percpu ksym, *percpu_ptr*, and return a 5011 * pointer to the percpu kernel variable on this cpu. See the 5012 * description of 'ksym' in **bpf_per_cpu_ptr**\ (). 5013 * 5014 * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in 5015 * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would 5016 * never return NULL. 5017 * Return 5018 * A pointer pointing to the kernel percpu variable on this cpu. 5019 * 5020 * long bpf_redirect_peer(u32 ifindex, u64 flags) 5021 * Description 5022 * Redirect the packet to another net device of index *ifindex*. 5023 * This helper is somewhat similar to **bpf_redirect**\ (), except 5024 * that the redirection happens to the *ifindex*' peer device and 5025 * the netns switch takes place from ingress to ingress without 5026 * going through the CPU's backlog queue. 5027 * 5028 * *skb*\ **->mark** and *skb*\ **->tstamp** are not cleared during 5029 * the netns switch. 5030 * 5031 * The *flags* argument is reserved and must be 0. The helper is 5032 * currently only supported for tc BPF program types at the 5033 * ingress hook and for veth and netkit target device types. The 5034 * peer device must reside in a different network namespace. 5035 * Return 5036 * The helper returns **TC_ACT_REDIRECT** on success or 5037 * **TC_ACT_SHOT** on error. 5038 * 5039 * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags) 5040 * Description 5041 * Get a bpf_local_storage from the *task*. 5042 * 5043 * Logically, it could be thought of as getting the value from 5044 * a *map* with *task* as the **key**. From this 5045 * perspective, the usage is not much different from 5046 * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this 5047 * helper enforces the key must be a task_struct and the map must also 5048 * be a **BPF_MAP_TYPE_TASK_STORAGE**. 5049 * 5050 * Underneath, the value is stored locally at *task* instead of 5051 * the *map*. The *map* is used as the bpf-local-storage 5052 * "type". The bpf-local-storage "type" (i.e. the *map*) is 5053 * searched against all bpf_local_storage residing at *task*. 5054 * 5055 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be 5056 * used such that a new bpf_local_storage will be 5057 * created if one does not exist. *value* can be used 5058 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify 5059 * the initial value of a bpf_local_storage. If *value* is 5060 * **NULL**, the new bpf_local_storage will be zero initialized. 5061 * Return 5062 * A bpf_local_storage pointer is returned on success. 5063 * 5064 * **NULL** if not found or there was an error in adding 5065 * a new bpf_local_storage. 5066 * 5067 * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task) 5068 * Description 5069 * Delete a bpf_local_storage from a *task*. 5070 * Return 5071 * 0 on success. 5072 * 5073 * **-ENOENT** if the bpf_local_storage cannot be found. 5074 * 5075 * struct task_struct *bpf_get_current_task_btf(void) 5076 * Description 5077 * Return a BTF pointer to the "current" task. 5078 * This pointer can also be used in helpers that accept an 5079 * *ARG_PTR_TO_BTF_ID* of type *task_struct*. 5080 * Return 5081 * Pointer to the current task. 5082 * 5083 * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags) 5084 * Description 5085 * Set or clear certain options on *bprm*: 5086 * 5087 * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit 5088 * which sets the **AT_SECURE** auxv for glibc. The bit 5089 * is cleared if the flag is not specified. 5090 * Return 5091 * **-EINVAL** if invalid *flags* are passed, zero otherwise. 5092 * 5093 * u64 bpf_ktime_get_coarse_ns(void) 5094 * Description 5095 * Return a coarse-grained version of the time elapsed since 5096 * system boot, in nanoseconds. Does not include time the system 5097 * was suspended. 5098 * 5099 * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**) 5100 * Return 5101 * Current *ktime*. 5102 * 5103 * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size) 5104 * Description 5105 * Returns the stored IMA hash of the *inode* (if it's available). 5106 * If the hash is larger than *size*, then only *size* 5107 * bytes will be copied to *dst* 5108 * Return 5109 * The **hash_algo** is returned on success, 5110 * **-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if 5111 * invalid arguments are passed. 5112 * 5113 * struct socket *bpf_sock_from_file(struct file *file) 5114 * Description 5115 * If the given file represents a socket, returns the associated 5116 * socket. 5117 * Return 5118 * A pointer to a struct socket on success or NULL if the file is 5119 * not a socket. 5120 * 5121 * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags) 5122 * Description 5123 * Check packet size against exceeding MTU of net device (based 5124 * on *ifindex*). This helper will likely be used in combination 5125 * with helpers that adjust/change the packet size. 5126 * 5127 * The argument *len_diff* can be used for querying with a planned 5128 * size change. This allows to check MTU prior to changing packet 5129 * ctx. Providing a *len_diff* adjustment that is larger than the 5130 * actual packet size (resulting in negative packet size) will in 5131 * principle not exceed the MTU, which is why it is not considered 5132 * a failure. Other BPF helpers are needed for performing the 5133 * planned size change; therefore the responsibility for catching 5134 * a negative packet size belongs in those helpers. 5135 * 5136 * Specifying *ifindex* zero means the MTU check is performed 5137 * against the current net device. This is practical if this isn't 5138 * used prior to redirect. 5139 * 5140 * On input *mtu_len* must be a valid pointer, else verifier will 5141 * reject BPF program. If the value *mtu_len* is initialized to 5142 * zero then the ctx packet size is use. When value *mtu_len* is 5143 * provided as input this specify the L3 length that the MTU check 5144 * is done against. Remember XDP and TC length operate at L2, but 5145 * this value is L3 as this correlate to MTU and IP-header tot_len 5146 * values which are L3 (similar behavior as bpf_fib_lookup). 5147 * 5148 * The Linux kernel route table can configure MTUs on a more 5149 * specific per route level, which is not provided by this helper. 5150 * For route level MTU checks use the **bpf_fib_lookup**\ () 5151 * helper. 5152 * 5153 * *ctx* is either **struct xdp_md** for XDP programs or 5154 * **struct sk_buff** for tc cls_act programs. 5155 * 5156 * The *flags* argument can be a combination of one or more of the 5157 * following values: 5158 * 5159 * **BPF_MTU_CHK_SEGS** 5160 * This flag will only works for *ctx* **struct sk_buff**. 5161 * If packet context contains extra packet segment buffers 5162 * (often knows as GSO skb), then MTU check is harder to 5163 * check at this point, because in transmit path it is 5164 * possible for the skb packet to get re-segmented 5165 * (depending on net device features). This could still be 5166 * a MTU violation, so this flag enables performing MTU 5167 * check against segments, with a different violation 5168 * return code to tell it apart. Check cannot use len_diff. 5169 * 5170 * On return *mtu_len* pointer contains the MTU value of the net 5171 * device. Remember the net device configured MTU is the L3 size, 5172 * which is returned here and XDP and TC length operate at L2. 5173 * Helper take this into account for you, but remember when using 5174 * MTU value in your BPF-code. 5175 * 5176 * Return 5177 * * 0 on success, and populate MTU value in *mtu_len* pointer. 5178 * 5179 * * < 0 if any input argument is invalid (*mtu_len* not updated) 5180 * 5181 * MTU violations return positive values, but also populate MTU 5182 * value in *mtu_len* pointer, as this can be needed for 5183 * implementing PMTU handing: 5184 * 5185 * * **BPF_MTU_CHK_RET_FRAG_NEEDED** 5186 * * **BPF_MTU_CHK_RET_SEGS_TOOBIG** 5187 * 5188 * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags) 5189 * Description 5190 * For each element in **map**, call **callback_fn** function with 5191 * **map**, **callback_ctx** and other map-specific parameters. 5192 * The **callback_fn** should be a static function and 5193 * the **callback_ctx** should be a pointer to the stack. 5194 * The **flags** is used to control certain aspects of the helper. 5195 * Currently, the **flags** must be 0. 5196 * 5197 * The following are a list of supported map types and their 5198 * respective expected callback signatures: 5199 * 5200 * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH, 5201 * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, 5202 * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY 5203 * 5204 * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx); 5205 * 5206 * For per_cpu maps, the map_value is the value on the cpu where the 5207 * bpf_prog is running. 5208 * 5209 * If **callback_fn** return 0, the helper will continue to the next 5210 * element. If return value is 1, the helper will skip the rest of 5211 * elements and return. Other return values are not used now. 5212 * 5213 * Return 5214 * The number of traversed map elements for success, **-EINVAL** for 5215 * invalid **flags**. 5216 * 5217 * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len) 5218 * Description 5219 * Outputs a string into the **str** buffer of size **str_size** 5220 * based on a format string stored in a read-only map pointed by 5221 * **fmt**. 5222 * 5223 * Each format specifier in **fmt** corresponds to one u64 element 5224 * in the **data** array. For strings and pointers where pointees 5225 * are accessed, only the pointer values are stored in the *data* 5226 * array. The *data_len* is the size of *data* in bytes - must be 5227 * a multiple of 8. 5228 * 5229 * Formats **%s** and **%p{i,I}{4,6}** require to read kernel 5230 * memory. Reading kernel memory may fail due to either invalid 5231 * address or valid address but requiring a major memory fault. If 5232 * reading kernel memory fails, the string for **%s** will be an 5233 * empty string, and the ip address for **%p{i,I}{4,6}** will be 0. 5234 * Not returning error to bpf program is consistent with what 5235 * **bpf_trace_printk**\ () does for now. 5236 * 5237 * Return 5238 * The strictly positive length of the formatted string, including 5239 * the trailing zero character. If the return value is greater than 5240 * **str_size**, **str** contains a truncated string, guaranteed to 5241 * be zero-terminated except when **str_size** is 0. 5242 * 5243 * Or **-EBUSY** if the per-CPU memory copy buffer is busy. 5244 * 5245 * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size) 5246 * Description 5247 * Execute bpf syscall with given arguments. 5248 * Return 5249 * A syscall result. 5250 * 5251 * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags) 5252 * Description 5253 * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs. 5254 * Return 5255 * Returns btf_id and btf_obj_fd in lower and upper 32 bits. 5256 * 5257 * long bpf_sys_close(u32 fd) 5258 * Description 5259 * Execute close syscall for given FD. 5260 * Return 5261 * A syscall result. 5262 * 5263 * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags) 5264 * Description 5265 * Initialize the timer. 5266 * First 4 bits of *flags* specify clockid. 5267 * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. 5268 * All other bits of *flags* are reserved. 5269 * The verifier will reject the program if *timer* is not from 5270 * the same *map*. 5271 * Return 5272 * 0 on success. 5273 * **-EBUSY** if *timer* is already initialized. 5274 * **-EINVAL** if invalid *flags* are passed. 5275 * **-EPERM** if *timer* is in a map that doesn't have any user references. 5276 * The user space should either hold a file descriptor to a map with timers 5277 * or pin such map in bpffs. When map is unpinned or file descriptor is 5278 * closed all timers in the map will be cancelled and freed. 5279 * 5280 * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn) 5281 * Description 5282 * Configure the timer to call *callback_fn* static function. 5283 * Return 5284 * 0 on success. 5285 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. 5286 * **-EPERM** if *timer* is in a map that doesn't have any user references. 5287 * The user space should either hold a file descriptor to a map with timers 5288 * or pin such map in bpffs. When map is unpinned or file descriptor is 5289 * closed all timers in the map will be cancelled and freed. 5290 * 5291 * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags) 5292 * Description 5293 * Set timer expiration N nanoseconds from the current time. The 5294 * configured callback will be invoked in soft irq context on some cpu 5295 * and will not repeat unless another bpf_timer_start() is made. 5296 * In such case the next invocation can migrate to a different cpu. 5297 * Since struct bpf_timer is a field inside map element the map 5298 * owns the timer. The bpf_timer_set_callback() will increment refcnt 5299 * of BPF program to make sure that callback_fn code stays valid. 5300 * When user space reference to a map reaches zero all timers 5301 * in a map are cancelled and corresponding program's refcnts are 5302 * decremented. This is done to make sure that Ctrl-C of a user 5303 * process doesn't leave any timers running. If map is pinned in 5304 * bpffs the callback_fn can re-arm itself indefinitely. 5305 * bpf_map_update/delete_elem() helpers and user space sys_bpf commands 5306 * cancel and free the timer in the given map element. 5307 * The map can contain timers that invoke callback_fn-s from different 5308 * programs. The same callback_fn can serve different timers from 5309 * different maps if key/value layout matches across maps. 5310 * Every bpf_timer_set_callback() can have different callback_fn. 5311 * 5312 * *flags* can be one of: 5313 * 5314 * **BPF_F_TIMER_ABS** 5315 * Start the timer in absolute expire value instead of the 5316 * default relative one. 5317 * **BPF_F_TIMER_CPU_PIN** 5318 * Timer will be pinned to the CPU of the caller. 5319 * 5320 * Return 5321 * 0 on success. 5322 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier 5323 * or invalid *flags* are passed. 5324 * 5325 * long bpf_timer_cancel(struct bpf_timer *timer) 5326 * Description 5327 * Cancel the timer and wait for callback_fn to finish if it was running. 5328 * Return 5329 * 0 if the timer was not active. 5330 * 1 if the timer was active. 5331 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. 5332 * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its 5333 * own timer which would have led to a deadlock otherwise. 5334 * 5335 * u64 bpf_get_func_ip(void *ctx) 5336 * Description 5337 * Get address of the traced function (for tracing and kprobe programs). 5338 * 5339 * When called for kprobe program attached as uprobe it returns 5340 * probe address for both entry and return uprobe. 5341 * 5342 * Return 5343 * Address of the traced function for kprobe. 5344 * 0 for kprobes placed within the function (not at the entry). 5345 * Address of the probe for uprobe and return uprobe. 5346 * 5347 * u64 bpf_get_attach_cookie(void *ctx) 5348 * Description 5349 * Get bpf_cookie value provided (optionally) during the program 5350 * attachment. It might be different for each individual 5351 * attachment, even if BPF program itself is the same. 5352 * Expects BPF program context *ctx* as a first argument. 5353 * 5354 * Supported for the following program types: 5355 * - kprobe/uprobe; 5356 * - tracepoint; 5357 * - perf_event. 5358 * Return 5359 * Value specified by user at BPF link creation/attachment time 5360 * or 0, if it was not specified. 5361 * 5362 * long bpf_task_pt_regs(struct task_struct *task) 5363 * Description 5364 * Get the struct pt_regs associated with **task**. 5365 * Return 5366 * A pointer to struct pt_regs. 5367 * 5368 * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags) 5369 * Description 5370 * Get branch trace from hardware engines like Intel LBR. The 5371 * hardware engine is stopped shortly after the helper is 5372 * called. Therefore, the user need to filter branch entries 5373 * based on the actual use case. To capture branch trace 5374 * before the trigger point of the BPF program, the helper 5375 * should be called at the beginning of the BPF program. 5376 * 5377 * The data is stored as struct perf_branch_entry into output 5378 * buffer *entries*. *size* is the size of *entries* in bytes. 5379 * *flags* is reserved for now and must be zero. 5380 * 5381 * Return 5382 * On success, number of bytes written to *buf*. On error, a 5383 * negative value. 5384 * 5385 * **-EINVAL** if *flags* is not zero. 5386 * 5387 * **-ENOENT** if architecture does not support branch records. 5388 * 5389 * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len) 5390 * Description 5391 * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64 5392 * to format and can handle more format args as a result. 5393 * 5394 * Arguments are to be used as in **bpf_seq_printf**\ () helper. 5395 * Return 5396 * The number of bytes written to the buffer, or a negative error 5397 * in case of failure. 5398 * 5399 * struct unix_sock *bpf_skc_to_unix_sock(void *sk) 5400 * Description 5401 * Dynamically cast a *sk* pointer to a *unix_sock* pointer. 5402 * Return 5403 * *sk* if casting is valid, or **NULL** otherwise. 5404 * 5405 * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res) 5406 * Description 5407 * Get the address of a kernel symbol, returned in *res*. *res* is 5408 * set to 0 if the symbol is not found. 5409 * Return 5410 * On success, zero. On error, a negative value. 5411 * 5412 * **-EINVAL** if *flags* is not zero. 5413 * 5414 * **-EINVAL** if string *name* is not the same size as *name_sz*. 5415 * 5416 * **-ENOENT** if symbol is not found. 5417 * 5418 * **-EPERM** if caller does not have permission to obtain kernel address. 5419 * 5420 * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags) 5421 * Description 5422 * Find vma of *task* that contains *addr*, call *callback_fn* 5423 * function with *task*, *vma*, and *callback_ctx*. 5424 * The *callback_fn* should be a static function and 5425 * the *callback_ctx* should be a pointer to the stack. 5426 * The *flags* is used to control certain aspects of the helper. 5427 * Currently, the *flags* must be 0. 5428 * 5429 * The expected callback signature is 5430 * 5431 * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx); 5432 * 5433 * Return 5434 * 0 on success. 5435 * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. 5436 * **-EBUSY** if failed to try lock mmap_lock. 5437 * **-EINVAL** for invalid **flags**. 5438 * 5439 * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags) 5440 * Description 5441 * For **nr_loops**, call **callback_fn** function 5442 * with **callback_ctx** as the context parameter. 5443 * The **callback_fn** should be a static function and 5444 * the **callback_ctx** should be a pointer to the stack. 5445 * The **flags** is used to control certain aspects of the helper. 5446 * Currently, the **flags** must be 0. Currently, nr_loops is 5447 * limited to 1 << 23 (~8 million) loops. 5448 * 5449 * long (\*callback_fn)(u64 index, void \*ctx); 5450 * 5451 * where **index** is the current index in the loop. The index 5452 * is zero-indexed. 5453 * 5454 * If **callback_fn** returns 0, the helper will continue to the next 5455 * loop. If return value is 1, the helper will skip the rest of 5456 * the loops and return. Other return values are not used now, 5457 * and will be rejected by the verifier. 5458 * 5459 * Return 5460 * The number of loops performed, **-EINVAL** for invalid **flags**, 5461 * **-E2BIG** if **nr_loops** exceeds the maximum number of loops. 5462 * 5463 * long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2) 5464 * Description 5465 * Do strncmp() between **s1** and **s2**. **s1** doesn't need 5466 * to be null-terminated and **s1_sz** is the maximum storage 5467 * size of **s1**. **s2** must be a read-only string. 5468 * Return 5469 * An integer less than, equal to, or greater than zero 5470 * if the first **s1_sz** bytes of **s1** is found to be 5471 * less than, to match, or be greater than **s2**. 5472 * 5473 * long bpf_get_func_arg(void *ctx, u32 n, u64 *value) 5474 * Description 5475 * Get **n**-th argument register (zero based) of the traced function (for tracing programs) 5476 * returned in **value**. 5477 * 5478 * Return 5479 * 0 on success. 5480 * **-EINVAL** if n >= argument register count of traced function. 5481 * 5482 * long bpf_get_func_ret(void *ctx, u64 *value) 5483 * Description 5484 * Get return value of the traced function (for tracing programs) 5485 * in **value**. 5486 * 5487 * Return 5488 * 0 on success. 5489 * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN. 5490 * 5491 * long bpf_get_func_arg_cnt(void *ctx) 5492 * Description 5493 * Get number of registers of the traced function (for tracing programs) where 5494 * function arguments are stored in these registers. 5495 * 5496 * Return 5497 * The number of argument registers of the traced function. 5498 * 5499 * int bpf_get_retval(void) 5500 * Description 5501 * Get the BPF program's return value that will be returned to the upper layers. 5502 * 5503 * This helper is currently supported by cgroup programs and only by the hooks 5504 * where BPF program's return value is returned to the userspace via errno. 5505 * Return 5506 * The BPF program's return value. 5507 * 5508 * int bpf_set_retval(int retval) 5509 * Description 5510 * Set the BPF program's return value that will be returned to the upper layers. 5511 * 5512 * This helper is currently supported by cgroup programs and only by the hooks 5513 * where BPF program's return value is returned to the userspace via errno. 5514 * 5515 * Note that there is the following corner case where the program exports an error 5516 * via bpf_set_retval but signals success via 'return 1': 5517 * 5518 * bpf_set_retval(-EPERM); 5519 * return 1; 5520 * 5521 * In this case, the BPF program's return value will use helper's -EPERM. This 5522 * still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case. 5523 * 5524 * Return 5525 * 0 on success, or a negative error in case of failure. 5526 * 5527 * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md) 5528 * Description 5529 * Get the total size of a given xdp buff (linear and paged area) 5530 * Return 5531 * The total size of a given xdp buffer. 5532 * 5533 * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) 5534 * Description 5535 * This helper is provided as an easy way to load data from a 5536 * xdp buffer. It can be used to load *len* bytes from *offset* from 5537 * the frame associated to *xdp_md*, into the buffer pointed by 5538 * *buf*. 5539 * Return 5540 * 0 on success, or a negative error in case of failure. 5541 * 5542 * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) 5543 * Description 5544 * Store *len* bytes from buffer *buf* into the frame 5545 * associated to *xdp_md*, at *offset*. 5546 * Return 5547 * 0 on success, or a negative error in case of failure. 5548 * 5549 * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags) 5550 * Description 5551 * Read *size* bytes from user space address *user_ptr* in *tsk*'s 5552 * address space, and stores the data in *dst*. *flags* is not 5553 * used yet and is provided for future extensibility. This helper 5554 * can only be used by sleepable programs. 5555 * Return 5556 * 0 on success, or a negative error in case of failure. On error 5557 * *dst* buffer is zeroed out. 5558 * 5559 * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type) 5560 * Description 5561 * Change the __sk_buff->tstamp_type to *tstamp_type* 5562 * and set *tstamp* to the __sk_buff->tstamp together. 5563 * 5564 * If there is no need to change the __sk_buff->tstamp_type, 5565 * the tstamp value can be directly written to __sk_buff->tstamp 5566 * instead. 5567 * 5568 * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that 5569 * will be kept during bpf_redirect_*(). A non zero 5570 * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO 5571 * *tstamp_type*. 5572 * 5573 * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used 5574 * with a zero *tstamp*. 5575 * 5576 * Only IPv4 and IPv6 skb->protocol are supported. 5577 * 5578 * This function is most useful when it needs to set a 5579 * mono delivery time to __sk_buff->tstamp and then 5580 * bpf_redirect_*() to the egress of an iface. For example, 5581 * changing the (rcv) timestamp in __sk_buff->tstamp at 5582 * ingress to a mono delivery time and then bpf_redirect_*() 5583 * to sch_fq@phy-dev. 5584 * Return 5585 * 0 on success. 5586 * **-EINVAL** for invalid input 5587 * **-EOPNOTSUPP** for unsupported protocol 5588 * 5589 * long bpf_ima_file_hash(struct file *file, void *dst, u32 size) 5590 * Description 5591 * Returns a calculated IMA hash of the *file*. 5592 * If the hash is larger than *size*, then only *size* 5593 * bytes will be copied to *dst* 5594 * Return 5595 * The **hash_algo** is returned on success, 5596 * **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if 5597 * invalid arguments are passed. 5598 * 5599 * void *bpf_kptr_xchg(void *dst, void *ptr) 5600 * Description 5601 * Exchange kptr at pointer *dst* with *ptr*, and return the old value. 5602 * *dst* can be map value or local kptr. *ptr* can be NULL, otherwise 5603 * it must be a referenced pointer which will be released when this helper 5604 * is called. 5605 * Return 5606 * The old value of kptr (which can be NULL). The returned pointer 5607 * if not NULL, is a reference which must be released using its 5608 * corresponding release function, or moved into a BPF map before 5609 * program exit. 5610 * 5611 * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu) 5612 * Description 5613 * Perform a lookup in *percpu map* for an entry associated to 5614 * *key* on *cpu*. 5615 * Return 5616 * Map value associated to *key* on *cpu*, or **NULL** if no entry 5617 * was found or *cpu* is invalid. 5618 * 5619 * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk) 5620 * Description 5621 * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer. 5622 * Return 5623 * *sk* if casting is valid, or **NULL** otherwise. 5624 * 5625 * long bpf_dynptr_from_mem(void *data, u64 size, u64 flags, struct bpf_dynptr *ptr) 5626 * Description 5627 * Get a dynptr to local memory *data*. 5628 * 5629 * *data* must be a ptr to a map value. 5630 * The maximum *size* supported is DYNPTR_MAX_SIZE. 5631 * *flags* is currently unused. 5632 * Return 5633 * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE, 5634 * -EINVAL if flags is not 0. 5635 * 5636 * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr) 5637 * Description 5638 * Reserve *size* bytes of payload in a ring buffer *ringbuf* 5639 * through the dynptr interface. *flags* must be 0. 5640 * 5641 * Please note that a corresponding bpf_ringbuf_submit_dynptr or 5642 * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the 5643 * reservation fails. This is enforced by the verifier. 5644 * Return 5645 * 0 on success, or a negative error in case of failure. 5646 * 5647 * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags) 5648 * Description 5649 * Submit reserved ring buffer sample, pointed to by *data*, 5650 * through the dynptr interface. This is a no-op if the dynptr is 5651 * invalid/null. 5652 * 5653 * For more information on *flags*, please see 5654 * 'bpf_ringbuf_submit'. 5655 * Return 5656 * Nothing. Always succeeds. 5657 * 5658 * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags) 5659 * Description 5660 * Discard reserved ring buffer sample through the dynptr 5661 * interface. This is a no-op if the dynptr is invalid/null. 5662 * 5663 * For more information on *flags*, please see 5664 * 'bpf_ringbuf_discard'. 5665 * Return 5666 * Nothing. Always succeeds. 5667 * 5668 * long bpf_dynptr_read(void *dst, u64 len, const struct bpf_dynptr *src, u64 offset, u64 flags) 5669 * Description 5670 * Read *len* bytes from *src* into *dst*, starting from *offset* 5671 * into *src*. 5672 * *flags* is currently unused. 5673 * Return 5674 * 0 on success, -E2BIG if *offset* + *len* exceeds the length 5675 * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if 5676 * *flags* is not 0. 5677 * 5678 * long bpf_dynptr_write(const struct bpf_dynptr *dst, u64 offset, void *src, u64 len, u64 flags) 5679 * Description 5680 * Write *len* bytes from *src* into *dst*, starting from *offset* 5681 * into *dst*. 5682 * 5683 * *flags* must be 0 except for skb-type dynptrs. 5684 * 5685 * For skb-type dynptrs: 5686 * * All data slices of the dynptr are automatically 5687 * invalidated after **bpf_dynptr_write**\ (). This is 5688 * because writing may pull the skb and change the 5689 * underlying packet buffer. 5690 * 5691 * * For *flags*, please see the flags accepted by 5692 * **bpf_skb_store_bytes**\ (). 5693 * Return 5694 * 0 on success, -E2BIG if *offset* + *len* exceeds the length 5695 * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* 5696 * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs, 5697 * other errors correspond to errors returned by **bpf_skb_store_bytes**\ (). 5698 * 5699 * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u64 offset, u64 len) 5700 * Description 5701 * Get a pointer to the underlying dynptr data. 5702 * 5703 * *len* must be a statically known value. The returned data slice 5704 * is invalidated whenever the dynptr is invalidated. 5705 * 5706 * skb and xdp type dynptrs may not use bpf_dynptr_data. They should 5707 * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr. 5708 * Return 5709 * Pointer to the underlying dynptr data, NULL if the dynptr is 5710 * read-only, if the dynptr is invalid, or if the offset and length 5711 * is out of bounds. 5712 * 5713 * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len) 5714 * Description 5715 * Try to issue a SYN cookie for the packet with corresponding 5716 * IPv4/TCP headers, *iph* and *th*, without depending on a 5717 * listening socket. 5718 * 5719 * *iph* points to the IPv4 header. 5720 * 5721 * *th* points to the start of the TCP header, while *th_len* 5722 * contains the length of the TCP header (at least 5723 * **sizeof**\ (**struct tcphdr**)). 5724 * Return 5725 * On success, lower 32 bits hold the generated SYN cookie in 5726 * followed by 16 bits which hold the MSS value for that cookie, 5727 * and the top 16 bits are unused. 5728 * 5729 * On failure, the returned value is one of the following: 5730 * 5731 * **-EINVAL** if *th_len* is invalid. 5732 * 5733 * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len) 5734 * Description 5735 * Try to issue a SYN cookie for the packet with corresponding 5736 * IPv6/TCP headers, *iph* and *th*, without depending on a 5737 * listening socket. 5738 * 5739 * *iph* points to the IPv6 header. 5740 * 5741 * *th* points to the start of the TCP header, while *th_len* 5742 * contains the length of the TCP header (at least 5743 * **sizeof**\ (**struct tcphdr**)). 5744 * Return 5745 * On success, lower 32 bits hold the generated SYN cookie in 5746 * followed by 16 bits which hold the MSS value for that cookie, 5747 * and the top 16 bits are unused. 5748 * 5749 * On failure, the returned value is one of the following: 5750 * 5751 * **-EINVAL** if *th_len* is invalid. 5752 * 5753 * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. 5754 * 5755 * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th) 5756 * Description 5757 * Check whether *iph* and *th* contain a valid SYN cookie ACK 5758 * without depending on a listening socket. 5759 * 5760 * *iph* points to the IPv4 header. 5761 * 5762 * *th* points to the TCP header. 5763 * Return 5764 * 0 if *iph* and *th* are a valid SYN cookie ACK. 5765 * 5766 * On failure, the returned value is one of the following: 5767 * 5768 * **-EACCES** if the SYN cookie is not valid. 5769 * 5770 * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th) 5771 * Description 5772 * Check whether *iph* and *th* contain a valid SYN cookie ACK 5773 * without depending on a listening socket. 5774 * 5775 * *iph* points to the IPv6 header. 5776 * 5777 * *th* points to the TCP header. 5778 * Return 5779 * 0 if *iph* and *th* are a valid SYN cookie ACK. 5780 * 5781 * On failure, the returned value is one of the following: 5782 * 5783 * **-EACCES** if the SYN cookie is not valid. 5784 * 5785 * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. 5786 * 5787 * u64 bpf_ktime_get_tai_ns(void) 5788 * Description 5789 * A nonsettable system-wide clock derived from wall-clock time but 5790 * ignoring leap seconds. This clock does not experience 5791 * discontinuities and backwards jumps caused by NTP inserting leap 5792 * seconds as CLOCK_REALTIME does. 5793 * 5794 * See: **clock_gettime**\ (**CLOCK_TAI**) 5795 * Return 5796 * Current *ktime*. 5797 * 5798 * long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags) 5799 * Description 5800 * Drain samples from the specified user ring buffer, and invoke 5801 * the provided callback for each such sample: 5802 * 5803 * long (\*callback_fn)(const struct bpf_dynptr \*dynptr, void \*ctx); 5804 * 5805 * If **callback_fn** returns 0, the helper will continue to try 5806 * and drain the next sample, up to a maximum of 5807 * BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1, 5808 * the helper will skip the rest of the samples and return. Other 5809 * return values are not used now, and will be rejected by the 5810 * verifier. 5811 * Return 5812 * The number of drained samples if no error was encountered while 5813 * draining samples, or 0 if no samples were present in the ring 5814 * buffer. If a user-space producer was epoll-waiting on this map, 5815 * and at least one sample was drained, they will receive an event 5816 * notification notifying them of available space in the ring 5817 * buffer. If the BPF_RB_NO_WAKEUP flag is passed to this 5818 * function, no wakeup notification will be sent. If the 5819 * BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will 5820 * be sent even if no sample was drained. 5821 * 5822 * On failure, the returned value is one of the following: 5823 * 5824 * **-EBUSY** if the ring buffer is contended, and another calling 5825 * context was concurrently draining the ring buffer. 5826 * 5827 * **-EINVAL** if user-space is not properly tracking the ring 5828 * buffer due to the producer position not being aligned to 8 5829 * bytes, a sample not being aligned to 8 bytes, or the producer 5830 * position not matching the advertised length of a sample. 5831 * 5832 * **-E2BIG** if user-space has tried to publish a sample which is 5833 * larger than the size of the ring buffer, or which cannot fit 5834 * within a struct bpf_dynptr. 5835 * 5836 * void *bpf_cgrp_storage_get(struct bpf_map *map, struct cgroup *cgroup, void *value, u64 flags) 5837 * Description 5838 * Get a bpf_local_storage from the *cgroup*. 5839 * 5840 * Logically, it could be thought of as getting the value from 5841 * a *map* with *cgroup* as the **key**. From this 5842 * perspective, the usage is not much different from 5843 * **bpf_map_lookup_elem**\ (*map*, **&**\ *cgroup*) except this 5844 * helper enforces the key must be a cgroup struct and the map must also 5845 * be a **BPF_MAP_TYPE_CGRP_STORAGE**. 5846 * 5847 * In reality, the local-storage value is embedded directly inside of the 5848 * *cgroup* object itself, rather than being located in the 5849 * **BPF_MAP_TYPE_CGRP_STORAGE** map. When the local-storage value is 5850 * queried for some *map* on a *cgroup* object, the kernel will perform an 5851 * O(n) iteration over all of the live local-storage values for that 5852 * *cgroup* object until the local-storage value for the *map* is found. 5853 * 5854 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be 5855 * used such that a new bpf_local_storage will be 5856 * created if one does not exist. *value* can be used 5857 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify 5858 * the initial value of a bpf_local_storage. If *value* is 5859 * **NULL**, the new bpf_local_storage will be zero initialized. 5860 * Return 5861 * A bpf_local_storage pointer is returned on success. 5862 * 5863 * **NULL** if not found or there was an error in adding 5864 * a new bpf_local_storage. 5865 * 5866 * long bpf_cgrp_storage_delete(struct bpf_map *map, struct cgroup *cgroup) 5867 * Description 5868 * Delete a bpf_local_storage from a *cgroup*. 5869 * Return 5870 * 0 on success. 5871 * 5872 * **-ENOENT** if the bpf_local_storage cannot be found. 5873 */ 5874 #define ___BPF_FUNC_MAPPER(FN, ctx...) \ 5875 FN(unspec, 0, ##ctx) \ 5876 FN(map_lookup_elem, 1, ##ctx) \ 5877 FN(map_update_elem, 2, ##ctx) \ 5878 FN(map_delete_elem, 3, ##ctx) \ 5879 FN(probe_read, 4, ##ctx) \ 5880 FN(ktime_get_ns, 5, ##ctx) \ 5881 FN(trace_printk, 6, ##ctx) \ 5882 FN(get_prandom_u32, 7, ##ctx) \ 5883 FN(get_smp_processor_id, 8, ##ctx) \ 5884 FN(skb_store_bytes, 9, ##ctx) \ 5885 FN(l3_csum_replace, 10, ##ctx) \ 5886 FN(l4_csum_replace, 11, ##ctx) \ 5887 FN(tail_call, 12, ##ctx) \ 5888 FN(clone_redirect, 13, ##ctx) \ 5889 FN(get_current_pid_tgid, 14, ##ctx) \ 5890 FN(get_current_uid_gid, 15, ##ctx) \ 5891 FN(get_current_comm, 16, ##ctx) \ 5892 FN(get_cgroup_classid, 17, ##ctx) \ 5893 FN(skb_vlan_push, 18, ##ctx) \ 5894 FN(skb_vlan_pop, 19, ##ctx) \ 5895 FN(skb_get_tunnel_key, 20, ##ctx) \ 5896 FN(skb_set_tunnel_key, 21, ##ctx) \ 5897 FN(perf_event_read, 22, ##ctx) \ 5898 FN(redirect, 23, ##ctx) \ 5899 FN(get_route_realm, 24, ##ctx) \ 5900 FN(perf_event_output, 25, ##ctx) \ 5901 FN(skb_load_bytes, 26, ##ctx) \ 5902 FN(get_stackid, 27, ##ctx) \ 5903 FN(csum_diff, 28, ##ctx) \ 5904 FN(skb_get_tunnel_opt, 29, ##ctx) \ 5905 FN(skb_set_tunnel_opt, 30, ##ctx) \ 5906 FN(skb_change_proto, 31, ##ctx) \ 5907 FN(skb_change_type, 32, ##ctx) \ 5908 FN(skb_under_cgroup, 33, ##ctx) \ 5909 FN(get_hash_recalc, 34, ##ctx) \ 5910 FN(get_current_task, 35, ##ctx) \ 5911 FN(probe_write_user, 36, ##ctx) \ 5912 FN(current_task_under_cgroup, 37, ##ctx) \ 5913 FN(skb_change_tail, 38, ##ctx) \ 5914 FN(skb_pull_data, 39, ##ctx) \ 5915 FN(csum_update, 40, ##ctx) \ 5916 FN(set_hash_invalid, 41, ##ctx) \ 5917 FN(get_numa_node_id, 42, ##ctx) \ 5918 FN(skb_change_head, 43, ##ctx) \ 5919 FN(xdp_adjust_head, 44, ##ctx) \ 5920 FN(probe_read_str, 45, ##ctx) \ 5921 FN(get_socket_cookie, 46, ##ctx) \ 5922 FN(get_socket_uid, 47, ##ctx) \ 5923 FN(set_hash, 48, ##ctx) \ 5924 FN(setsockopt, 49, ##ctx) \ 5925 FN(skb_adjust_room, 50, ##ctx) \ 5926 FN(redirect_map, 51, ##ctx) \ 5927 FN(sk_redirect_map, 52, ##ctx) \ 5928 FN(sock_map_update, 53, ##ctx) \ 5929 FN(xdp_adjust_meta, 54, ##ctx) \ 5930 FN(perf_event_read_value, 55, ##ctx) \ 5931 FN(perf_prog_read_value, 56, ##ctx) \ 5932 FN(getsockopt, 57, ##ctx) \ 5933 FN(override_return, 58, ##ctx) \ 5934 FN(sock_ops_cb_flags_set, 59, ##ctx) \ 5935 FN(msg_redirect_map, 60, ##ctx) \ 5936 FN(msg_apply_bytes, 61, ##ctx) \ 5937 FN(msg_cork_bytes, 62, ##ctx) \ 5938 FN(msg_pull_data, 63, ##ctx) \ 5939 FN(bind, 64, ##ctx) \ 5940 FN(xdp_adjust_tail, 65, ##ctx) \ 5941 FN(skb_get_xfrm_state, 66, ##ctx) \ 5942 FN(get_stack, 67, ##ctx) \ 5943 FN(skb_load_bytes_relative, 68, ##ctx) \ 5944 FN(fib_lookup, 69, ##ctx) \ 5945 FN(sock_hash_update, 70, ##ctx) \ 5946 FN(msg_redirect_hash, 71, ##ctx) \ 5947 FN(sk_redirect_hash, 72, ##ctx) \ 5948 FN(lwt_push_encap, 73, ##ctx) \ 5949 FN(lwt_seg6_store_bytes, 74, ##ctx) \ 5950 FN(lwt_seg6_adjust_srh, 75, ##ctx) \ 5951 FN(lwt_seg6_action, 76, ##ctx) \ 5952 FN(rc_repeat, 77, ##ctx) \ 5953 FN(rc_keydown, 78, ##ctx) \ 5954 FN(skb_cgroup_id, 79, ##ctx) \ 5955 FN(get_current_cgroup_id, 80, ##ctx) \ 5956 FN(get_local_storage, 81, ##ctx) \ 5957 FN(sk_select_reuseport, 82, ##ctx) \ 5958 FN(skb_ancestor_cgroup_id, 83, ##ctx) \ 5959 FN(sk_lookup_tcp, 84, ##ctx) \ 5960 FN(sk_lookup_udp, 85, ##ctx) \ 5961 FN(sk_release, 86, ##ctx) \ 5962 FN(map_push_elem, 87, ##ctx) \ 5963 FN(map_pop_elem, 88, ##ctx) \ 5964 FN(map_peek_elem, 89, ##ctx) \ 5965 FN(msg_push_data, 90, ##ctx) \ 5966 FN(msg_pop_data, 91, ##ctx) \ 5967 FN(rc_pointer_rel, 92, ##ctx) \ 5968 FN(spin_lock, 93, ##ctx) \ 5969 FN(spin_unlock, 94, ##ctx) \ 5970 FN(sk_fullsock, 95, ##ctx) \ 5971 FN(tcp_sock, 96, ##ctx) \ 5972 FN(skb_ecn_set_ce, 97, ##ctx) \ 5973 FN(get_listener_sock, 98, ##ctx) \ 5974 FN(skc_lookup_tcp, 99, ##ctx) \ 5975 FN(tcp_check_syncookie, 100, ##ctx) \ 5976 FN(sysctl_get_name, 101, ##ctx) \ 5977 FN(sysctl_get_current_value, 102, ##ctx) \ 5978 FN(sysctl_get_new_value, 103, ##ctx) \ 5979 FN(sysctl_set_new_value, 104, ##ctx) \ 5980 FN(strtol, 105, ##ctx) \ 5981 FN(strtoul, 106, ##ctx) \ 5982 FN(sk_storage_get, 107, ##ctx) \ 5983 FN(sk_storage_delete, 108, ##ctx) \ 5984 FN(send_signal, 109, ##ctx) \ 5985 FN(tcp_gen_syncookie, 110, ##ctx) \ 5986 FN(skb_output, 111, ##ctx) \ 5987 FN(probe_read_user, 112, ##ctx) \ 5988 FN(probe_read_kernel, 113, ##ctx) \ 5989 FN(probe_read_user_str, 114, ##ctx) \ 5990 FN(probe_read_kernel_str, 115, ##ctx) \ 5991 FN(tcp_send_ack, 116, ##ctx) \ 5992 FN(send_signal_thread, 117, ##ctx) \ 5993 FN(jiffies64, 118, ##ctx) \ 5994 FN(read_branch_records, 119, ##ctx) \ 5995 FN(get_ns_current_pid_tgid, 120, ##ctx) \ 5996 FN(xdp_output, 121, ##ctx) \ 5997 FN(get_netns_cookie, 122, ##ctx) \ 5998 FN(get_current_ancestor_cgroup_id, 123, ##ctx) \ 5999 FN(sk_assign, 124, ##ctx) \ 6000 FN(ktime_get_boot_ns, 125, ##ctx) \ 6001 FN(seq_printf, 126, ##ctx) \ 6002 FN(seq_write, 127, ##ctx) \ 6003 FN(sk_cgroup_id, 128, ##ctx) \ 6004 FN(sk_ancestor_cgroup_id, 129, ##ctx) \ 6005 FN(ringbuf_output, 130, ##ctx) \ 6006 FN(ringbuf_reserve, 131, ##ctx) \ 6007 FN(ringbuf_submit, 132, ##ctx) \ 6008 FN(ringbuf_discard, 133, ##ctx) \ 6009 FN(ringbuf_query, 134, ##ctx) \ 6010 FN(csum_level, 135, ##ctx) \ 6011 FN(skc_to_tcp6_sock, 136, ##ctx) \ 6012 FN(skc_to_tcp_sock, 137, ##ctx) \ 6013 FN(skc_to_tcp_timewait_sock, 138, ##ctx) \ 6014 FN(skc_to_tcp_request_sock, 139, ##ctx) \ 6015 FN(skc_to_udp6_sock, 140, ##ctx) \ 6016 FN(get_task_stack, 141, ##ctx) \ 6017 FN(load_hdr_opt, 142, ##ctx) \ 6018 FN(store_hdr_opt, 143, ##ctx) \ 6019 FN(reserve_hdr_opt, 144, ##ctx) \ 6020 FN(inode_storage_get, 145, ##ctx) \ 6021 FN(inode_storage_delete, 146, ##ctx) \ 6022 FN(d_path, 147, ##ctx) \ 6023 FN(copy_from_user, 148, ##ctx) \ 6024 FN(snprintf_btf, 149, ##ctx) \ 6025 FN(seq_printf_btf, 150, ##ctx) \ 6026 FN(skb_cgroup_classid, 151, ##ctx) \ 6027 FN(redirect_neigh, 152, ##ctx) \ 6028 FN(per_cpu_ptr, 153, ##ctx) \ 6029 FN(this_cpu_ptr, 154, ##ctx) \ 6030 FN(redirect_peer, 155, ##ctx) \ 6031 FN(task_storage_get, 156, ##ctx) \ 6032 FN(task_storage_delete, 157, ##ctx) \ 6033 FN(get_current_task_btf, 158, ##ctx) \ 6034 FN(bprm_opts_set, 159, ##ctx) \ 6035 FN(ktime_get_coarse_ns, 160, ##ctx) \ 6036 FN(ima_inode_hash, 161, ##ctx) \ 6037 FN(sock_from_file, 162, ##ctx) \ 6038 FN(check_mtu, 163, ##ctx) \ 6039 FN(for_each_map_elem, 164, ##ctx) \ 6040 FN(snprintf, 165, ##ctx) \ 6041 FN(sys_bpf, 166, ##ctx) \ 6042 FN(btf_find_by_name_kind, 167, ##ctx) \ 6043 FN(sys_close, 168, ##ctx) \ 6044 FN(timer_init, 169, ##ctx) \ 6045 FN(timer_set_callback, 170, ##ctx) \ 6046 FN(timer_start, 171, ##ctx) \ 6047 FN(timer_cancel, 172, ##ctx) \ 6048 FN(get_func_ip, 173, ##ctx) \ 6049 FN(get_attach_cookie, 174, ##ctx) \ 6050 FN(task_pt_regs, 175, ##ctx) \ 6051 FN(get_branch_snapshot, 176, ##ctx) \ 6052 FN(trace_vprintk, 177, ##ctx) \ 6053 FN(skc_to_unix_sock, 178, ##ctx) \ 6054 FN(kallsyms_lookup_name, 179, ##ctx) \ 6055 FN(find_vma, 180, ##ctx) \ 6056 FN(loop, 181, ##ctx) \ 6057 FN(strncmp, 182, ##ctx) \ 6058 FN(get_func_arg, 183, ##ctx) \ 6059 FN(get_func_ret, 184, ##ctx) \ 6060 FN(get_func_arg_cnt, 185, ##ctx) \ 6061 FN(get_retval, 186, ##ctx) \ 6062 FN(set_retval, 187, ##ctx) \ 6063 FN(xdp_get_buff_len, 188, ##ctx) \ 6064 FN(xdp_load_bytes, 189, ##ctx) \ 6065 FN(xdp_store_bytes, 190, ##ctx) \ 6066 FN(copy_from_user_task, 191, ##ctx) \ 6067 FN(skb_set_tstamp, 192, ##ctx) \ 6068 FN(ima_file_hash, 193, ##ctx) \ 6069 FN(kptr_xchg, 194, ##ctx) \ 6070 FN(map_lookup_percpu_elem, 195, ##ctx) \ 6071 FN(skc_to_mptcp_sock, 196, ##ctx) \ 6072 FN(dynptr_from_mem, 197, ##ctx) \ 6073 FN(ringbuf_reserve_dynptr, 198, ##ctx) \ 6074 FN(ringbuf_submit_dynptr, 199, ##ctx) \ 6075 FN(ringbuf_discard_dynptr, 200, ##ctx) \ 6076 FN(dynptr_read, 201, ##ctx) \ 6077 FN(dynptr_write, 202, ##ctx) \ 6078 FN(dynptr_data, 203, ##ctx) \ 6079 FN(tcp_raw_gen_syncookie_ipv4, 204, ##ctx) \ 6080 FN(tcp_raw_gen_syncookie_ipv6, 205, ##ctx) \ 6081 FN(tcp_raw_check_syncookie_ipv4, 206, ##ctx) \ 6082 FN(tcp_raw_check_syncookie_ipv6, 207, ##ctx) \ 6083 FN(ktime_get_tai_ns, 208, ##ctx) \ 6084 FN(user_ringbuf_drain, 209, ##ctx) \ 6085 FN(cgrp_storage_get, 210, ##ctx) \ 6086 FN(cgrp_storage_delete, 211, ##ctx) \ 6087 /* This helper list is effectively frozen. If you are trying to \ 6088 * add a new helper, you should add a kfunc instead which has \ 6089 * less stability guarantees. See Documentation/bpf/kfuncs.rst \ 6090 */ 6091 6092 /* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't 6093 * know or care about integer value that is now passed as second argument 6094 */ 6095 #define __BPF_FUNC_MAPPER_APPLY(name, value, FN) FN(name), 6096 #define __BPF_FUNC_MAPPER(FN) ___BPF_FUNC_MAPPER(__BPF_FUNC_MAPPER_APPLY, FN) 6097 6098 /* integer value in 'imm' field of BPF_CALL instruction selects which helper 6099 * function eBPF program intends to call 6100 */ 6101 #define __BPF_ENUM_FN(x, y) BPF_FUNC_ ## x = y, 6102 enum bpf_func_id { 6103 ___BPF_FUNC_MAPPER(__BPF_ENUM_FN) 6104 __BPF_FUNC_MAX_ID, 6105 }; 6106 #undef __BPF_ENUM_FN 6107 6108 /* All flags used by eBPF helper functions, placed here. */ 6109 6110 /* BPF_FUNC_skb_store_bytes flags. */ 6111 enum { 6112 BPF_F_RECOMPUTE_CSUM = (1ULL << 0), 6113 BPF_F_INVALIDATE_HASH = (1ULL << 1), 6114 }; 6115 6116 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. 6117 * First 4 bits are for passing the header field size. 6118 */ 6119 enum { 6120 BPF_F_HDR_FIELD_MASK = 0xfULL, 6121 }; 6122 6123 /* BPF_FUNC_l4_csum_replace flags. */ 6124 enum { 6125 BPF_F_PSEUDO_HDR = (1ULL << 4), 6126 BPF_F_MARK_MANGLED_0 = (1ULL << 5), 6127 BPF_F_MARK_ENFORCE = (1ULL << 6), 6128 BPF_F_IPV6 = (1ULL << 7), 6129 }; 6130 6131 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 6132 enum { 6133 BPF_F_TUNINFO_IPV6 = (1ULL << 0), 6134 }; 6135 6136 /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ 6137 enum { 6138 BPF_F_SKIP_FIELD_MASK = 0xffULL, 6139 BPF_F_USER_STACK = (1ULL << 8), 6140 /* flags used by BPF_FUNC_get_stackid only. */ 6141 BPF_F_FAST_STACK_CMP = (1ULL << 9), 6142 BPF_F_REUSE_STACKID = (1ULL << 10), 6143 /* flags used by BPF_FUNC_get_stack only. */ 6144 BPF_F_USER_BUILD_ID = (1ULL << 11), 6145 }; 6146 6147 /* BPF_FUNC_skb_set_tunnel_key flags. */ 6148 enum { 6149 BPF_F_ZERO_CSUM_TX = (1ULL << 1), 6150 BPF_F_DONT_FRAGMENT = (1ULL << 2), 6151 BPF_F_SEQ_NUMBER = (1ULL << 3), 6152 BPF_F_NO_TUNNEL_KEY = (1ULL << 4), 6153 }; 6154 6155 /* BPF_FUNC_skb_get_tunnel_key flags. */ 6156 enum { 6157 BPF_F_TUNINFO_FLAGS = (1ULL << 4), 6158 }; 6159 6160 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and 6161 * BPF_FUNC_perf_event_read_value flags. 6162 */ 6163 enum { 6164 BPF_F_INDEX_MASK = 0xffffffffULL, 6165 BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, 6166 /* BPF_FUNC_perf_event_output for sk_buff input context. */ 6167 BPF_F_CTXLEN_MASK = (0xfffffULL << 32), 6168 }; 6169 6170 /* Current network namespace */ 6171 enum { 6172 BPF_F_CURRENT_NETNS = (-1L), 6173 }; 6174 6175 /* BPF_FUNC_csum_level level values. */ 6176 enum { 6177 BPF_CSUM_LEVEL_QUERY, 6178 BPF_CSUM_LEVEL_INC, 6179 BPF_CSUM_LEVEL_DEC, 6180 BPF_CSUM_LEVEL_RESET, 6181 }; 6182 6183 /* BPF_FUNC_skb_adjust_room flags. */ 6184 enum { 6185 BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), 6186 BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), 6187 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), 6188 BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), 6189 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), 6190 BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), 6191 BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6), 6192 BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7), 6193 BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8), 6194 }; 6195 6196 enum { 6197 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, 6198 BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, 6199 }; 6200 6201 #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ 6202 BPF_ADJ_ROOM_ENCAP_L2_MASK) \ 6203 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) 6204 6205 /* BPF_FUNC_sysctl_get_name flags. */ 6206 enum { 6207 BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), 6208 }; 6209 6210 /* BPF_FUNC_<kernel_obj>_storage_get flags */ 6211 enum { 6212 BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0), 6213 /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility 6214 * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead. 6215 */ 6216 BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE, 6217 }; 6218 6219 /* BPF_FUNC_read_branch_records flags. */ 6220 enum { 6221 BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), 6222 }; 6223 6224 /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and 6225 * BPF_FUNC_bpf_ringbuf_output flags. 6226 */ 6227 enum { 6228 BPF_RB_NO_WAKEUP = (1ULL << 0), 6229 BPF_RB_FORCE_WAKEUP = (1ULL << 1), 6230 }; 6231 6232 /* BPF_FUNC_bpf_ringbuf_query flags */ 6233 enum { 6234 BPF_RB_AVAIL_DATA = 0, 6235 BPF_RB_RING_SIZE = 1, 6236 BPF_RB_CONS_POS = 2, 6237 BPF_RB_PROD_POS = 3, 6238 BPF_RB_OVERWRITE_POS = 4, 6239 }; 6240 6241 /* BPF ring buffer constants */ 6242 enum { 6243 BPF_RINGBUF_BUSY_BIT = (1U << 31), 6244 BPF_RINGBUF_DISCARD_BIT = (1U << 30), 6245 BPF_RINGBUF_HDR_SZ = 8, 6246 }; 6247 6248 /* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ 6249 enum { 6250 BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), 6251 BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), 6252 }; 6253 6254 /* Mode for BPF_FUNC_skb_adjust_room helper. */ 6255 enum bpf_adj_room_mode { 6256 BPF_ADJ_ROOM_NET, 6257 BPF_ADJ_ROOM_MAC, 6258 }; 6259 6260 /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ 6261 enum bpf_hdr_start_off { 6262 BPF_HDR_START_MAC, 6263 BPF_HDR_START_NET, 6264 }; 6265 6266 /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ 6267 enum bpf_lwt_encap_mode { 6268 BPF_LWT_ENCAP_SEG6, 6269 BPF_LWT_ENCAP_SEG6_INLINE, 6270 BPF_LWT_ENCAP_IP, 6271 }; 6272 6273 /* Flags for bpf_bprm_opts_set helper */ 6274 enum { 6275 BPF_F_BPRM_SECUREEXEC = (1ULL << 0), 6276 }; 6277 6278 /* Flags for bpf_redirect and bpf_redirect_map helpers */ 6279 enum { 6280 BPF_F_INGRESS = (1ULL << 0), /* used for skb path */ 6281 BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */ 6282 BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */ 6283 #define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS) 6284 }; 6285 6286 #define __bpf_md_ptr(type, name) \ 6287 union { \ 6288 type name; \ 6289 __u64 :64; \ 6290 } __attribute__((aligned(8))) 6291 6292 /* The enum used in skb->tstamp_type. It specifies the clock type 6293 * of the time stored in the skb->tstamp. 6294 */ 6295 enum { 6296 BPF_SKB_TSTAMP_UNSPEC = 0, /* DEPRECATED */ 6297 BPF_SKB_TSTAMP_DELIVERY_MONO = 1, /* DEPRECATED */ 6298 BPF_SKB_CLOCK_REALTIME = 0, 6299 BPF_SKB_CLOCK_MONOTONIC = 1, 6300 BPF_SKB_CLOCK_TAI = 2, 6301 /* For any future BPF_SKB_CLOCK_* that the bpf prog cannot handle, 6302 * the bpf prog can try to deduce it by ingress/egress/skb->sk->sk_clockid. 6303 */ 6304 }; 6305 6306 /* user accessible mirror of in-kernel sk_buff. 6307 * new fields can only be added to the end of this structure 6308 */ 6309 struct __sk_buff { 6310 __u32 len; 6311 __u32 pkt_type; 6312 __u32 mark; 6313 __u32 queue_mapping; 6314 __u32 protocol; 6315 __u32 vlan_present; 6316 __u32 vlan_tci; 6317 __u32 vlan_proto; 6318 __u32 priority; 6319 __u32 ingress_ifindex; 6320 __u32 ifindex; 6321 __u32 tc_index; 6322 __u32 cb[5]; 6323 __u32 hash; 6324 __u32 tc_classid; 6325 __u32 data; 6326 __u32 data_end; 6327 __u32 napi_id; 6328 6329 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ 6330 __u32 family; 6331 __u32 remote_ip4; /* Stored in network byte order */ 6332 __u32 local_ip4; /* Stored in network byte order */ 6333 __u32 remote_ip6[4]; /* Stored in network byte order */ 6334 __u32 local_ip6[4]; /* Stored in network byte order */ 6335 __u32 remote_port; /* Stored in network byte order */ 6336 __u32 local_port; /* stored in host byte order */ 6337 /* ... here. */ 6338 6339 __u32 data_meta; 6340 __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); 6341 __u64 tstamp; 6342 __u32 wire_len; 6343 __u32 gso_segs; 6344 __bpf_md_ptr(struct bpf_sock *, sk); 6345 __u32 gso_size; 6346 __u8 tstamp_type; 6347 __u32 :24; /* Padding, future use. */ 6348 __u64 hwtstamp; 6349 }; 6350 6351 struct bpf_tunnel_key { 6352 __u32 tunnel_id; 6353 union { 6354 __u32 remote_ipv4; 6355 __u32 remote_ipv6[4]; 6356 }; 6357 __u8 tunnel_tos; 6358 __u8 tunnel_ttl; 6359 union { 6360 __u16 tunnel_ext; /* compat */ 6361 __be16 tunnel_flags; 6362 }; 6363 __u32 tunnel_label; 6364 union { 6365 __u32 local_ipv4; 6366 __u32 local_ipv6[4]; 6367 }; 6368 }; 6369 6370 /* user accessible mirror of in-kernel xfrm_state. 6371 * new fields can only be added to the end of this structure 6372 */ 6373 struct bpf_xfrm_state { 6374 __u32 reqid; 6375 __u32 spi; /* Stored in network byte order */ 6376 __u16 family; 6377 __u16 ext; /* Padding, future use. */ 6378 union { 6379 __u32 remote_ipv4; /* Stored in network byte order */ 6380 __u32 remote_ipv6[4]; /* Stored in network byte order */ 6381 }; 6382 }; 6383 6384 /* Generic BPF return codes which all BPF program types may support. 6385 * The values are binary compatible with their TC_ACT_* counter-part to 6386 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT 6387 * programs. 6388 * 6389 * XDP is handled seprately, see XDP_*. 6390 */ 6391 enum bpf_ret_code { 6392 BPF_OK = 0, 6393 /* 1 reserved */ 6394 BPF_DROP = 2, 6395 /* 3-6 reserved */ 6396 BPF_REDIRECT = 7, 6397 /* >127 are reserved for prog type specific return codes. 6398 * 6399 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and 6400 * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been 6401 * changed and should be routed based on its new L3 header. 6402 * (This is an L3 redirect, as opposed to L2 redirect 6403 * represented by BPF_REDIRECT above). 6404 */ 6405 BPF_LWT_REROUTE = 128, 6406 /* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR 6407 * to indicate that no custom dissection was performed, and 6408 * fallback to standard dissector is requested. 6409 */ 6410 BPF_FLOW_DISSECTOR_CONTINUE = 129, 6411 }; 6412 6413 struct bpf_sock { 6414 __u32 bound_dev_if; 6415 __u32 family; 6416 __u32 type; 6417 __u32 protocol; 6418 __u32 mark; 6419 __u32 priority; 6420 /* IP address also allows 1 and 2 bytes access */ 6421 __u32 src_ip4; 6422 __u32 src_ip6[4]; 6423 __u32 src_port; /* host byte order */ 6424 __be16 dst_port; /* network byte order */ 6425 __u16 :16; /* zero padding */ 6426 __u32 dst_ip4; 6427 __u32 dst_ip6[4]; 6428 __u32 state; 6429 __s32 rx_queue_mapping; 6430 }; 6431 6432 struct bpf_tcp_sock { 6433 __u32 snd_cwnd; /* Sending congestion window */ 6434 __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 6435 __u32 rtt_min; 6436 __u32 snd_ssthresh; /* Slow start size threshold */ 6437 __u32 rcv_nxt; /* What we want to receive next */ 6438 __u32 snd_nxt; /* Next sequence we send */ 6439 __u32 snd_una; /* First byte we want an ack for */ 6440 __u32 mss_cache; /* Cached effective mss, not including SACKS */ 6441 __u32 ecn_flags; /* ECN status bits. */ 6442 __u32 rate_delivered; /* saved rate sample: packets delivered */ 6443 __u32 rate_interval_us; /* saved rate sample: time elapsed */ 6444 __u32 packets_out; /* Packets which are "in flight" */ 6445 __u32 retrans_out; /* Retransmitted packets out */ 6446 __u32 total_retrans; /* Total retransmits for entire connection */ 6447 __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn 6448 * total number of segments in. 6449 */ 6450 __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn 6451 * total number of data segments in. 6452 */ 6453 __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut 6454 * The total number of segments sent. 6455 */ 6456 __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut 6457 * total number of data segments sent. 6458 */ 6459 __u32 lost_out; /* Lost packets */ 6460 __u32 sacked_out; /* SACK'd packets */ 6461 __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived 6462 * sum(delta(rcv_nxt)), or how many bytes 6463 * were acked. 6464 */ 6465 __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked 6466 * sum(delta(snd_una)), or how many bytes 6467 * were acked. 6468 */ 6469 __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups 6470 * total number of DSACK blocks received 6471 */ 6472 __u32 delivered; /* Total data packets delivered incl. rexmits */ 6473 __u32 delivered_ce; /* Like the above but only ECE marked packets */ 6474 __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ 6475 }; 6476 6477 struct bpf_sock_tuple { 6478 union { 6479 struct { 6480 __be32 saddr; 6481 __be32 daddr; 6482 __be16 sport; 6483 __be16 dport; 6484 } ipv4; 6485 struct { 6486 __be32 saddr[4]; 6487 __be32 daddr[4]; 6488 __be16 sport; 6489 __be16 dport; 6490 } ipv6; 6491 }; 6492 }; 6493 6494 /* (Simplified) user return codes for tcx prog type. 6495 * A valid tcx program must return one of these defined values. All other 6496 * return codes are reserved for future use. Must remain compatible with 6497 * their TC_ACT_* counter-parts. For compatibility in behavior, unknown 6498 * return codes are mapped to TCX_NEXT. 6499 */ 6500 enum tcx_action_base { 6501 TCX_NEXT = -1, 6502 TCX_PASS = 0, 6503 TCX_DROP = 2, 6504 TCX_REDIRECT = 7, 6505 }; 6506 6507 struct bpf_xdp_sock { 6508 __u32 queue_id; 6509 }; 6510 6511 #define XDP_PACKET_HEADROOM 256 6512 6513 /* User return codes for XDP prog type. 6514 * A valid XDP program must return one of these defined values. All other 6515 * return codes are reserved for future use. Unknown return codes will 6516 * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). 6517 */ 6518 enum xdp_action { 6519 XDP_ABORTED = 0, 6520 XDP_DROP, 6521 XDP_PASS, 6522 XDP_TX, 6523 XDP_REDIRECT, 6524 }; 6525 6526 /* user accessible metadata for XDP packet hook 6527 * new fields must be added to the end of this structure 6528 */ 6529 struct xdp_md { 6530 __u32 data; 6531 __u32 data_end; 6532 __u32 data_meta; 6533 /* Below access go through struct xdp_rxq_info */ 6534 __u32 ingress_ifindex; /* rxq->dev->ifindex */ 6535 __u32 rx_queue_index; /* rxq->queue_index */ 6536 6537 __u32 egress_ifindex; /* txq->dev->ifindex */ 6538 }; 6539 6540 /* DEVMAP map-value layout 6541 * 6542 * The struct data-layout of map-value is a configuration interface. 6543 * New members can only be added to the end of this structure. 6544 */ 6545 struct bpf_devmap_val { 6546 __u32 ifindex; /* device index */ 6547 union { 6548 int fd; /* prog fd on map write */ 6549 __u32 id; /* prog id on map read */ 6550 } bpf_prog; 6551 }; 6552 6553 /* CPUMAP map-value layout 6554 * 6555 * The struct data-layout of map-value is a configuration interface. 6556 * New members can only be added to the end of this structure. 6557 */ 6558 struct bpf_cpumap_val { 6559 __u32 qsize; /* queue size to remote target CPU */ 6560 union { 6561 int fd; /* prog fd on map write */ 6562 __u32 id; /* prog id on map read */ 6563 } bpf_prog; 6564 }; 6565 6566 enum sk_action { 6567 SK_DROP = 0, 6568 SK_PASS, 6569 }; 6570 6571 /* user accessible metadata for SK_MSG packet hook, new fields must 6572 * be added to the end of this structure 6573 */ 6574 struct sk_msg_md { 6575 __bpf_md_ptr(void *, data); 6576 __bpf_md_ptr(void *, data_end); 6577 6578 __u32 family; 6579 __u32 remote_ip4; /* Stored in network byte order */ 6580 __u32 local_ip4; /* Stored in network byte order */ 6581 __u32 remote_ip6[4]; /* Stored in network byte order */ 6582 __u32 local_ip6[4]; /* Stored in network byte order */ 6583 __u32 remote_port; /* Stored in network byte order */ 6584 __u32 local_port; /* stored in host byte order */ 6585 __u32 size; /* Total size of sk_msg */ 6586 6587 __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ 6588 }; 6589 6590 struct sk_reuseport_md { 6591 /* 6592 * Start of directly accessible data. It begins from 6593 * the tcp/udp header. 6594 */ 6595 __bpf_md_ptr(void *, data); 6596 /* End of directly accessible data */ 6597 __bpf_md_ptr(void *, data_end); 6598 /* 6599 * Total length of packet (starting from the tcp/udp header). 6600 * Note that the directly accessible bytes (data_end - data) 6601 * could be less than this "len". Those bytes could be 6602 * indirectly read by a helper "bpf_skb_load_bytes()". 6603 */ 6604 __u32 len; 6605 /* 6606 * Eth protocol in the mac header (network byte order). e.g. 6607 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) 6608 */ 6609 __u32 eth_protocol; 6610 __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ 6611 __u32 bind_inany; /* Is sock bound to an INANY address? */ 6612 __u32 hash; /* A hash of the packet 4 tuples */ 6613 /* When reuse->migrating_sk is NULL, it is selecting a sk for the 6614 * new incoming connection request (e.g. selecting a listen sk for 6615 * the received SYN in the TCP case). reuse->sk is one of the sk 6616 * in the reuseport group. The bpf prog can use reuse->sk to learn 6617 * the local listening ip/port without looking into the skb. 6618 * 6619 * When reuse->migrating_sk is not NULL, reuse->sk is closed and 6620 * reuse->migrating_sk is the socket that needs to be migrated 6621 * to another listening socket. migrating_sk could be a fullsock 6622 * sk that is fully established or a reqsk that is in-the-middle 6623 * of 3-way handshake. 6624 */ 6625 __bpf_md_ptr(struct bpf_sock *, sk); 6626 __bpf_md_ptr(struct bpf_sock *, migrating_sk); 6627 }; 6628 6629 #define BPF_TAG_SIZE 8 6630 6631 struct bpf_prog_info { 6632 __u32 type; 6633 __u32 id; 6634 __u8 tag[BPF_TAG_SIZE]; 6635 __u32 jited_prog_len; 6636 __u32 xlated_prog_len; 6637 __aligned_u64 jited_prog_insns; 6638 __aligned_u64 xlated_prog_insns; 6639 __u64 load_time; /* ns since boottime */ 6640 __u32 created_by_uid; 6641 __u32 nr_map_ids; 6642 __aligned_u64 map_ids; 6643 char name[BPF_OBJ_NAME_LEN]; 6644 __u32 ifindex; 6645 __u32 gpl_compatible:1; 6646 __u32 :31; /* alignment pad */ 6647 __u64 netns_dev; 6648 __u64 netns_ino; 6649 __u32 nr_jited_ksyms; 6650 __u32 nr_jited_func_lens; 6651 __aligned_u64 jited_ksyms; 6652 __aligned_u64 jited_func_lens; 6653 __u32 btf_id; 6654 __u32 func_info_rec_size; 6655 __aligned_u64 func_info; 6656 __u32 nr_func_info; 6657 __u32 nr_line_info; 6658 __aligned_u64 line_info; 6659 __aligned_u64 jited_line_info; 6660 __u32 nr_jited_line_info; 6661 __u32 line_info_rec_size; 6662 __u32 jited_line_info_rec_size; 6663 __u32 nr_prog_tags; 6664 __aligned_u64 prog_tags; 6665 __u64 run_time_ns; 6666 __u64 run_cnt; 6667 __u64 recursion_misses; 6668 __u32 verified_insns; 6669 __u32 attach_btf_obj_id; 6670 __u32 attach_btf_id; 6671 } __attribute__((aligned(8))); 6672 6673 struct bpf_map_info { 6674 __u32 type; 6675 __u32 id; 6676 __u32 key_size; 6677 __u32 value_size; 6678 __u32 max_entries; 6679 __u32 map_flags; 6680 char name[BPF_OBJ_NAME_LEN]; 6681 __u32 ifindex; 6682 __u32 btf_vmlinux_value_type_id; 6683 __u64 netns_dev; 6684 __u64 netns_ino; 6685 __u32 btf_id; 6686 __u32 btf_key_type_id; 6687 __u32 btf_value_type_id; 6688 __u32 btf_vmlinux_id; 6689 __u64 map_extra; 6690 __aligned_u64 hash; 6691 __u32 hash_size; 6692 } __attribute__((aligned(8))); 6693 6694 struct bpf_btf_info { 6695 __aligned_u64 btf; 6696 __u32 btf_size; 6697 __u32 id; 6698 __aligned_u64 name; 6699 __u32 name_len; 6700 __u32 kernel_btf; 6701 } __attribute__((aligned(8))); 6702 6703 struct bpf_link_info { 6704 __u32 type; 6705 __u32 id; 6706 __u32 prog_id; 6707 union { 6708 struct { 6709 __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ 6710 __u32 tp_name_len; /* in/out: tp_name buffer len */ 6711 __u32 :32; 6712 __u64 cookie; 6713 } raw_tracepoint; 6714 struct { 6715 __u32 attach_type; 6716 __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */ 6717 __u32 target_btf_id; /* BTF type id inside the object */ 6718 __u32 :32; 6719 __u64 cookie; 6720 } tracing; 6721 struct { 6722 __u64 cgroup_id; 6723 __u32 attach_type; 6724 } cgroup; 6725 struct { 6726 __aligned_u64 target_name; /* in/out: target_name buffer ptr */ 6727 __u32 target_name_len; /* in/out: target_name buffer len */ 6728 6729 /* If the iter specific field is 32 bits, it can be put 6730 * in the first or second union. Otherwise it should be 6731 * put in the second union. 6732 */ 6733 union { 6734 struct { 6735 __u32 map_id; 6736 } map; 6737 }; 6738 union { 6739 struct { 6740 __u64 cgroup_id; 6741 __u32 order; 6742 } cgroup; 6743 struct { 6744 __u32 tid; 6745 __u32 pid; 6746 } task; 6747 }; 6748 } iter; 6749 struct { 6750 __u32 netns_ino; 6751 __u32 attach_type; 6752 } netns; 6753 struct { 6754 __u32 ifindex; 6755 } xdp; 6756 struct { 6757 __u32 map_id; 6758 } struct_ops; 6759 struct { 6760 __u32 pf; 6761 __u32 hooknum; 6762 __s32 priority; 6763 __u32 flags; 6764 } netfilter; 6765 struct { 6766 __aligned_u64 addrs; 6767 __u32 count; /* in/out: kprobe_multi function count */ 6768 __u32 flags; 6769 __u64 missed; 6770 __aligned_u64 cookies; 6771 } kprobe_multi; 6772 struct { 6773 __aligned_u64 path; 6774 __aligned_u64 offsets; 6775 __aligned_u64 ref_ctr_offsets; 6776 __aligned_u64 cookies; 6777 __u32 path_size; /* in/out: real path size on success, including zero byte */ 6778 __u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */ 6779 __u32 flags; 6780 __u32 pid; 6781 } uprobe_multi; 6782 struct { 6783 __u32 type; /* enum bpf_perf_event_type */ 6784 __u32 :32; 6785 union { 6786 struct { 6787 __aligned_u64 file_name; /* in/out */ 6788 __u32 name_len; 6789 __u32 offset; /* offset from file_name */ 6790 __u64 cookie; 6791 __u64 ref_ctr_offset; 6792 } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */ 6793 struct { 6794 __aligned_u64 func_name; /* in/out */ 6795 __u32 name_len; 6796 __u32 offset; /* offset from func_name */ 6797 __u64 addr; 6798 __u64 missed; 6799 __u64 cookie; 6800 } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */ 6801 struct { 6802 __aligned_u64 tp_name; /* in/out */ 6803 __u32 name_len; 6804 __u32 :32; 6805 __u64 cookie; 6806 } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */ 6807 struct { 6808 __u64 config; 6809 __u32 type; 6810 __u32 :32; 6811 __u64 cookie; 6812 } event; /* BPF_PERF_EVENT_EVENT */ 6813 }; 6814 } perf_event; 6815 struct { 6816 __u32 ifindex; 6817 __u32 attach_type; 6818 } tcx; 6819 struct { 6820 __u32 ifindex; 6821 __u32 attach_type; 6822 } netkit; 6823 struct { 6824 __u32 map_id; 6825 __u32 attach_type; 6826 } sockmap; 6827 }; 6828 } __attribute__((aligned(8))); 6829 6830 struct bpf_token_info { 6831 __u64 allowed_cmds; 6832 __u64 allowed_maps; 6833 __u64 allowed_progs; 6834 __u64 allowed_attachs; 6835 } __attribute__((aligned(8))); 6836 6837 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed 6838 * by user and intended to be used by socket (e.g. to bind to, depends on 6839 * attach type). 6840 */ 6841 struct bpf_sock_addr { 6842 __u32 user_family; /* Allows 4-byte read, but no write. */ 6843 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. 6844 * Stored in network byte order. 6845 */ 6846 __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 6847 * Stored in network byte order. 6848 */ 6849 __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write. 6850 * Stored in network byte order 6851 */ 6852 __u32 family; /* Allows 4-byte read, but no write */ 6853 __u32 type; /* Allows 4-byte read, but no write */ 6854 __u32 protocol; /* Allows 4-byte read, but no write */ 6855 __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. 6856 * Stored in network byte order. 6857 */ 6858 __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 6859 * Stored in network byte order. 6860 */ 6861 __bpf_md_ptr(struct bpf_sock *, sk); 6862 }; 6863 6864 /* User bpf_sock_ops struct to access socket values and specify request ops 6865 * and their replies. 6866 * Some of this fields are in network (bigendian) byte order and may need 6867 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). 6868 * New fields can only be added at the end of this structure 6869 */ 6870 struct bpf_sock_ops { 6871 __u32 op; 6872 union { 6873 __u32 args[4]; /* Optionally passed to bpf program */ 6874 __u32 reply; /* Returned by bpf program */ 6875 __u32 replylong[4]; /* Optionally returned by bpf prog */ 6876 }; 6877 __u32 family; 6878 __u32 remote_ip4; /* Stored in network byte order */ 6879 __u32 local_ip4; /* Stored in network byte order */ 6880 __u32 remote_ip6[4]; /* Stored in network byte order */ 6881 __u32 local_ip6[4]; /* Stored in network byte order */ 6882 __u32 remote_port; /* Stored in network byte order */ 6883 __u32 local_port; /* stored in host byte order */ 6884 __u32 is_fullsock; /* Some TCP fields are only valid if 6885 * there is a full socket. If not, the 6886 * fields read as zero. 6887 */ 6888 __u32 snd_cwnd; 6889 __u32 srtt_us; /* Averaged RTT << 3 in usecs */ 6890 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ 6891 __u32 state; 6892 __u32 rtt_min; 6893 __u32 snd_ssthresh; 6894 __u32 rcv_nxt; 6895 __u32 snd_nxt; 6896 __u32 snd_una; 6897 __u32 mss_cache; 6898 __u32 ecn_flags; 6899 __u32 rate_delivered; 6900 __u32 rate_interval_us; 6901 __u32 packets_out; 6902 __u32 retrans_out; 6903 __u32 total_retrans; 6904 __u32 segs_in; 6905 __u32 data_segs_in; 6906 __u32 segs_out; 6907 __u32 data_segs_out; 6908 __u32 lost_out; 6909 __u32 sacked_out; 6910 __u32 sk_txhash; 6911 __u64 bytes_received; 6912 __u64 bytes_acked; 6913 __bpf_md_ptr(struct bpf_sock *, sk); 6914 /* [skb_data, skb_data_end) covers the whole TCP header. 6915 * 6916 * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received 6917 * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the 6918 * header has not been written. 6919 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have 6920 * been written so far. 6921 * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes 6922 * the 3WHS. 6923 * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes 6924 * the 3WHS. 6925 * 6926 * bpf_load_hdr_opt() can also be used to read a particular option. 6927 */ 6928 __bpf_md_ptr(void *, skb_data); 6929 __bpf_md_ptr(void *, skb_data_end); 6930 __u32 skb_len; /* The total length of a packet. 6931 * It includes the header, options, 6932 * and payload. 6933 */ 6934 __u32 skb_tcp_flags; /* tcp_flags of the header. It provides 6935 * an easy way to check for tcp_flags 6936 * without parsing skb_data. 6937 * 6938 * In particular, the skb_tcp_flags 6939 * will still be available in 6940 * BPF_SOCK_OPS_HDR_OPT_LEN even though 6941 * the outgoing header has not 6942 * been written yet. 6943 */ 6944 __u64 skb_hwtstamp; 6945 }; 6946 6947 /* Definitions for bpf_sock_ops_cb_flags */ 6948 enum { 6949 BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), 6950 BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), 6951 BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), 6952 BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), 6953 /* Call bpf for all received TCP headers. The bpf prog will be 6954 * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB 6955 * 6956 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB 6957 * for the header option related helpers that will be useful 6958 * to the bpf programs. 6959 * 6960 * It could be used at the client/active side (i.e. connect() side) 6961 * when the server told it that the server was in syncookie 6962 * mode and required the active side to resend the bpf-written 6963 * options. The active side can keep writing the bpf-options until 6964 * it received a valid packet from the server side to confirm 6965 * the earlier packet (and options) has been received. The later 6966 * example patch is using it like this at the active side when the 6967 * server is in syncookie mode. 6968 * 6969 * The bpf prog will usually turn this off in the common cases. 6970 */ 6971 BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4), 6972 /* Call bpf when kernel has received a header option that 6973 * the kernel cannot handle. The bpf prog will be called under 6974 * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB. 6975 * 6976 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB 6977 * for the header option related helpers that will be useful 6978 * to the bpf programs. 6979 */ 6980 BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5), 6981 /* Call bpf when the kernel is writing header options for the 6982 * outgoing packet. The bpf prog will first be called 6983 * to reserve space in a skb under 6984 * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then 6985 * the bpf prog will be called to write the header option(s) 6986 * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 6987 * 6988 * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB 6989 * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option 6990 * related helpers that will be useful to the bpf programs. 6991 * 6992 * The kernel gets its chance to reserve space and write 6993 * options first before the BPF program does. 6994 */ 6995 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6), 6996 /* Mask of all currently supported cb flags */ 6997 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F, 6998 }; 6999 7000 enum { 7001 SK_BPF_CB_TX_TIMESTAMPING = 1<<0, 7002 SK_BPF_CB_MASK = (SK_BPF_CB_TX_TIMESTAMPING - 1) | 7003 SK_BPF_CB_TX_TIMESTAMPING 7004 }; 7005 7006 /* List of known BPF sock_ops operators. 7007 * New entries can only be added at the end 7008 */ 7009 enum { 7010 BPF_SOCK_OPS_VOID, 7011 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or 7012 * -1 if default value should be used 7013 */ 7014 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized 7015 * window (in packets) or -1 if default 7016 * value should be used 7017 */ 7018 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an 7019 * active connection is initialized 7020 */ 7021 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an 7022 * active connection is 7023 * established 7024 */ 7025 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a 7026 * passive connection is 7027 * established 7028 */ 7029 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control 7030 * needs ECN 7031 */ 7032 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is 7033 * based on the path and may be 7034 * dependent on the congestion control 7035 * algorithm. In general it indicates 7036 * a congestion threshold. RTTs above 7037 * this indicate congestion 7038 */ 7039 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. 7040 * Arg1: value of icsk_retransmits 7041 * Arg2: value of icsk_rto 7042 * Arg3: whether RTO has expired 7043 */ 7044 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. 7045 * Arg1: sequence number of 1st byte 7046 * Arg2: # segments 7047 * Arg3: return value of 7048 * tcp_transmit_skb (0 => success) 7049 */ 7050 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. 7051 * Arg1: old_state 7052 * Arg2: new_state 7053 */ 7054 BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after 7055 * socket transition to LISTEN state. 7056 */ 7057 BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. 7058 * Arg1: measured RTT input (mrtt) 7059 * Arg2: updated srtt 7060 */ 7061 BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option. 7062 * It will be called to handle 7063 * the packets received at 7064 * an already established 7065 * connection. 7066 * 7067 * sock_ops->skb_data: 7068 * Referring to the received skb. 7069 * It covers the TCP header only. 7070 * 7071 * bpf_load_hdr_opt() can also 7072 * be used to search for a 7073 * particular option. 7074 */ 7075 BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the 7076 * header option later in 7077 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 7078 * Arg1: bool want_cookie. (in 7079 * writing SYNACK only) 7080 * 7081 * sock_ops->skb_data: 7082 * Not available because no header has 7083 * been written yet. 7084 * 7085 * sock_ops->skb_tcp_flags: 7086 * The tcp_flags of the 7087 * outgoing skb. (e.g. SYN, ACK, FIN). 7088 * 7089 * bpf_reserve_hdr_opt() should 7090 * be used to reserve space. 7091 */ 7092 BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options 7093 * Arg1: bool want_cookie. (in 7094 * writing SYNACK only) 7095 * 7096 * sock_ops->skb_data: 7097 * Referring to the outgoing skb. 7098 * It covers the TCP header 7099 * that has already been written 7100 * by the kernel and the 7101 * earlier bpf-progs. 7102 * 7103 * sock_ops->skb_tcp_flags: 7104 * The tcp_flags of the outgoing 7105 * skb. (e.g. SYN, ACK, FIN). 7106 * 7107 * bpf_store_hdr_opt() should 7108 * be used to write the 7109 * option. 7110 * 7111 * bpf_load_hdr_opt() can also 7112 * be used to search for a 7113 * particular option that 7114 * has already been written 7115 * by the kernel or the 7116 * earlier bpf-progs. 7117 */ 7118 BPF_SOCK_OPS_TSTAMP_SCHED_CB, /* Called when skb is passing 7119 * through dev layer when 7120 * SK_BPF_CB_TX_TIMESTAMPING 7121 * feature is on. 7122 */ 7123 BPF_SOCK_OPS_TSTAMP_SND_SW_CB, /* Called when skb is about to send 7124 * to the nic when SK_BPF_CB_TX_TIMESTAMPING 7125 * feature is on. 7126 */ 7127 BPF_SOCK_OPS_TSTAMP_SND_HW_CB, /* Called in hardware phase when 7128 * SK_BPF_CB_TX_TIMESTAMPING feature 7129 * is on. 7130 */ 7131 BPF_SOCK_OPS_TSTAMP_ACK_CB, /* Called when all the skbs in the 7132 * same sendmsg call are acked 7133 * when SK_BPF_CB_TX_TIMESTAMPING 7134 * feature is on. 7135 */ 7136 BPF_SOCK_OPS_TSTAMP_SENDMSG_CB, /* Called when every sendmsg syscall 7137 * is triggered. It's used to correlate 7138 * sendmsg timestamp with corresponding 7139 * tskey. 7140 */ 7141 }; 7142 7143 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect 7144 * changes between the TCP and BPF versions. Ideally this should never happen. 7145 * If it does, we need to add code to convert them before calling 7146 * the BPF sock_ops function. 7147 */ 7148 enum { 7149 BPF_TCP_ESTABLISHED = 1, 7150 BPF_TCP_SYN_SENT, 7151 BPF_TCP_SYN_RECV, 7152 BPF_TCP_FIN_WAIT1, 7153 BPF_TCP_FIN_WAIT2, 7154 BPF_TCP_TIME_WAIT, 7155 BPF_TCP_CLOSE, 7156 BPF_TCP_CLOSE_WAIT, 7157 BPF_TCP_LAST_ACK, 7158 BPF_TCP_LISTEN, 7159 BPF_TCP_CLOSING, /* Now a valid state */ 7160 BPF_TCP_NEW_SYN_RECV, 7161 BPF_TCP_BOUND_INACTIVE, 7162 7163 BPF_TCP_MAX_STATES /* Leave at the end! */ 7164 }; 7165 7166 enum { 7167 TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ 7168 TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ 7169 TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */ 7170 TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */ 7171 /* Copy the SYN pkt to optval 7172 * 7173 * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the 7174 * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit 7175 * to only getting from the saved_syn. It can either get the 7176 * syn packet from: 7177 * 7178 * 1. the just-received SYN packet (only available when writing the 7179 * SYNACK). It will be useful when it is not necessary to 7180 * save the SYN packet for latter use. It is also the only way 7181 * to get the SYN during syncookie mode because the syn 7182 * packet cannot be saved during syncookie. 7183 * 7184 * OR 7185 * 7186 * 2. the earlier saved syn which was done by 7187 * bpf_setsockopt(TCP_SAVE_SYN). 7188 * 7189 * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the 7190 * SYN packet is obtained. 7191 * 7192 * If the bpf-prog does not need the IP[46] header, the 7193 * bpf-prog can avoid parsing the IP header by using 7194 * TCP_BPF_SYN. Otherwise, the bpf-prog can get both 7195 * IP[46] and TCP header by using TCP_BPF_SYN_IP. 7196 * 7197 * >0: Total number of bytes copied 7198 * -ENOSPC: Not enough space in optval. Only optlen number of 7199 * bytes is copied. 7200 * -ENOENT: The SYN skb is not available now and the earlier SYN pkt 7201 * is not saved by setsockopt(TCP_SAVE_SYN). 7202 */ 7203 TCP_BPF_SYN = 1005, /* Copy the TCP header */ 7204 TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */ 7205 TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ 7206 TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */ 7207 SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */ 7208 SK_BPF_BYPASS_PROT_MEM = 1010, /* Get or Set sk->sk_bypass_prot_mem */ 7209 }; 7210 7211 enum { 7212 BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0), 7213 }; 7214 7215 /* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and 7216 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 7217 */ 7218 enum { 7219 BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the 7220 * total option spaces 7221 * required for an established 7222 * sk in order to calculate the 7223 * MSS. No skb is actually 7224 * sent. 7225 */ 7226 BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode 7227 * when sending a SYN. 7228 */ 7229 }; 7230 7231 struct bpf_perf_event_value { 7232 __u64 counter; 7233 __u64 enabled; 7234 __u64 running; 7235 }; 7236 7237 enum { 7238 BPF_DEVCG_ACC_MKNOD = (1ULL << 0), 7239 BPF_DEVCG_ACC_READ = (1ULL << 1), 7240 BPF_DEVCG_ACC_WRITE = (1ULL << 2), 7241 }; 7242 7243 enum { 7244 BPF_DEVCG_DEV_BLOCK = (1ULL << 0), 7245 BPF_DEVCG_DEV_CHAR = (1ULL << 1), 7246 }; 7247 7248 struct bpf_cgroup_dev_ctx { 7249 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ 7250 __u32 access_type; 7251 __u32 major; 7252 __u32 minor; 7253 }; 7254 7255 struct bpf_raw_tracepoint_args { 7256 __u64 args[0]; 7257 }; 7258 7259 /* DIRECT: Skip the FIB rules and go to FIB table associated with device 7260 * OUTPUT: Do lookup from egress perspective; default is ingress 7261 */ 7262 enum { 7263 BPF_FIB_LOOKUP_DIRECT = (1U << 0), 7264 BPF_FIB_LOOKUP_OUTPUT = (1U << 1), 7265 BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), 7266 BPF_FIB_LOOKUP_TBID = (1U << 3), 7267 BPF_FIB_LOOKUP_SRC = (1U << 4), 7268 BPF_FIB_LOOKUP_MARK = (1U << 5), 7269 }; 7270 7271 enum { 7272 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ 7273 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ 7274 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ 7275 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ 7276 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ 7277 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ 7278 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ 7279 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ 7280 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ 7281 BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */ 7282 }; 7283 7284 struct bpf_fib_lookup { 7285 /* input: network family for lookup (AF_INET, AF_INET6) 7286 * output: network family of egress nexthop 7287 */ 7288 __u8 family; 7289 7290 /* set if lookup is to consider L4 data - e.g., FIB rules */ 7291 __u8 l4_protocol; 7292 __be16 sport; 7293 __be16 dport; 7294 7295 union { /* used for MTU check */ 7296 /* input to lookup */ 7297 __u16 tot_len; /* L3 length from network hdr (iph->tot_len) */ 7298 7299 /* output: MTU value */ 7300 __u16 mtu_result; 7301 } __attribute__((packed, aligned(2))); 7302 /* input: L3 device index for lookup 7303 * output: device index from FIB lookup 7304 */ 7305 __u32 ifindex; 7306 7307 union { 7308 /* inputs to lookup */ 7309 __u8 tos; /* AF_INET */ 7310 __be32 flowinfo; /* AF_INET6, flow_label + priority */ 7311 7312 /* output: metric of fib result (IPv4/IPv6 only) */ 7313 __u32 rt_metric; 7314 }; 7315 7316 /* input: source address to consider for lookup 7317 * output: source address result from lookup 7318 */ 7319 union { 7320 __be32 ipv4_src; 7321 __u32 ipv6_src[4]; /* in6_addr; network order */ 7322 }; 7323 7324 /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in 7325 * network header. output: bpf_fib_lookup sets to gateway address 7326 * if FIB lookup returns gateway route 7327 */ 7328 union { 7329 __be32 ipv4_dst; 7330 __u32 ipv6_dst[4]; /* in6_addr; network order */ 7331 }; 7332 7333 union { 7334 struct { 7335 /* output */ 7336 __be16 h_vlan_proto; 7337 __be16 h_vlan_TCI; 7338 }; 7339 /* input: when accompanied with the 7340 * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a 7341 * specific routing table to use for the fib lookup. 7342 */ 7343 __u32 tbid; 7344 }; 7345 7346 union { 7347 /* input */ 7348 struct { 7349 __u32 mark; /* policy routing */ 7350 /* 2 4-byte holes for input */ 7351 }; 7352 7353 /* output: source and dest mac */ 7354 struct { 7355 __u8 smac[6]; /* ETH_ALEN */ 7356 __u8 dmac[6]; /* ETH_ALEN */ 7357 }; 7358 }; 7359 }; 7360 7361 struct bpf_redir_neigh { 7362 /* network family for lookup (AF_INET, AF_INET6) */ 7363 __u32 nh_family; 7364 /* network address of nexthop; skips fib lookup to find gateway */ 7365 union { 7366 __be32 ipv4_nh; 7367 __u32 ipv6_nh[4]; /* in6_addr; network order */ 7368 }; 7369 }; 7370 7371 /* bpf_check_mtu flags*/ 7372 enum bpf_check_mtu_flags { 7373 BPF_MTU_CHK_SEGS = (1U << 0), 7374 }; 7375 7376 enum bpf_check_mtu_ret { 7377 BPF_MTU_CHK_RET_SUCCESS, /* check and lookup successful */ 7378 BPF_MTU_CHK_RET_FRAG_NEEDED, /* fragmentation required to fwd */ 7379 BPF_MTU_CHK_RET_SEGS_TOOBIG, /* GSO re-segmentation needed to fwd */ 7380 }; 7381 7382 enum bpf_task_fd_type { 7383 BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ 7384 BPF_FD_TYPE_TRACEPOINT, /* tp name */ 7385 BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ 7386 BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ 7387 BPF_FD_TYPE_UPROBE, /* filename + offset */ 7388 BPF_FD_TYPE_URETPROBE, /* filename + offset */ 7389 }; 7390 7391 enum { 7392 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), 7393 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), 7394 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), 7395 }; 7396 7397 struct bpf_flow_keys { 7398 __u16 nhoff; 7399 __u16 thoff; 7400 __u16 addr_proto; /* ETH_P_* of valid addrs */ 7401 __u8 is_frag; 7402 __u8 is_first_frag; 7403 __u8 is_encap; 7404 __u8 ip_proto; 7405 __be16 n_proto; 7406 __be16 sport; 7407 __be16 dport; 7408 union { 7409 struct { 7410 __be32 ipv4_src; 7411 __be32 ipv4_dst; 7412 }; 7413 struct { 7414 __u32 ipv6_src[4]; /* in6_addr; network order */ 7415 __u32 ipv6_dst[4]; /* in6_addr; network order */ 7416 }; 7417 }; 7418 __u32 flags; 7419 __be32 flow_label; 7420 }; 7421 7422 struct bpf_func_info { 7423 __u32 insn_off; 7424 __u32 type_id; 7425 }; 7426 7427 #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) 7428 #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) 7429 7430 struct bpf_line_info { 7431 __u32 insn_off; 7432 __u32 file_name_off; 7433 __u32 line_off; 7434 __u32 line_col; 7435 }; 7436 7437 struct bpf_spin_lock { 7438 __u32 val; 7439 }; 7440 7441 struct bpf_timer { 7442 __u64 __opaque[2]; 7443 } __attribute__((aligned(8))); 7444 7445 struct bpf_task_work { 7446 __u64 __opaque; 7447 } __attribute__((aligned(8))); 7448 7449 struct bpf_wq { 7450 __u64 __opaque[2]; 7451 } __attribute__((aligned(8))); 7452 7453 struct bpf_dynptr { 7454 __u64 __opaque[2]; 7455 } __attribute__((aligned(8))); 7456 7457 struct bpf_list_head { 7458 __u64 __opaque[2]; 7459 } __attribute__((aligned(8))); 7460 7461 struct bpf_list_node { 7462 __u64 __opaque[3]; 7463 } __attribute__((aligned(8))); 7464 7465 struct bpf_rb_root { 7466 __u64 __opaque[2]; 7467 } __attribute__((aligned(8))); 7468 7469 struct bpf_rb_node { 7470 __u64 __opaque[4]; 7471 } __attribute__((aligned(8))); 7472 7473 struct bpf_refcount { 7474 __u32 __opaque[1]; 7475 } __attribute__((aligned(4))); 7476 7477 struct bpf_sysctl { 7478 __u32 write; /* Sysctl is being read (= 0) or written (= 1). 7479 * Allows 1,2,4-byte read, but no write. 7480 */ 7481 __u32 file_pos; /* Sysctl file position to read from, write to. 7482 * Allows 1,2,4-byte read an 4-byte write. 7483 */ 7484 }; 7485 7486 struct bpf_sockopt { 7487 __bpf_md_ptr(struct bpf_sock *, sk); 7488 __bpf_md_ptr(void *, optval); 7489 __bpf_md_ptr(void *, optval_end); 7490 7491 __s32 level; 7492 __s32 optname; 7493 __s32 optlen; 7494 __s32 retval; 7495 }; 7496 7497 struct bpf_pidns_info { 7498 __u32 pid; 7499 __u32 tgid; 7500 }; 7501 7502 /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ 7503 struct bpf_sk_lookup { 7504 union { 7505 __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ 7506 __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */ 7507 }; 7508 7509 __u32 family; /* Protocol family (AF_INET, AF_INET6) */ 7510 __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ 7511 __u32 remote_ip4; /* Network byte order */ 7512 __u32 remote_ip6[4]; /* Network byte order */ 7513 __be16 remote_port; /* Network byte order */ 7514 __u16 :16; /* Zero padding */ 7515 __u32 local_ip4; /* Network byte order */ 7516 __u32 local_ip6[4]; /* Network byte order */ 7517 __u32 local_port; /* Host byte order */ 7518 __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */ 7519 }; 7520 7521 /* 7522 * struct btf_ptr is used for typed pointer representation; the 7523 * type id is used to render the pointer data as the appropriate type 7524 * via the bpf_snprintf_btf() helper described above. A flags field - 7525 * potentially to specify additional details about the BTF pointer 7526 * (rather than its mode of display) - is included for future use. 7527 * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately. 7528 */ 7529 struct btf_ptr { 7530 void *ptr; 7531 __u32 type_id; 7532 __u32 flags; /* BTF ptr flags; unused at present. */ 7533 }; 7534 7535 /* 7536 * Flags to control bpf_snprintf_btf() behaviour. 7537 * - BTF_F_COMPACT: no formatting around type information 7538 * - BTF_F_NONAME: no struct/union member names/types 7539 * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values; 7540 * equivalent to %px. 7541 * - BTF_F_ZERO: show zero-valued struct/union members; they 7542 * are not displayed by default 7543 */ 7544 enum { 7545 BTF_F_COMPACT = (1ULL << 0), 7546 BTF_F_NONAME = (1ULL << 1), 7547 BTF_F_PTR_RAW = (1ULL << 2), 7548 BTF_F_ZERO = (1ULL << 3), 7549 }; 7550 7551 /* bpf_core_relo_kind encodes which aspect of captured field/type/enum value 7552 * has to be adjusted by relocations. It is emitted by llvm and passed to 7553 * libbpf and later to the kernel. 7554 */ 7555 enum bpf_core_relo_kind { 7556 BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */ 7557 BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */ 7558 BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */ 7559 BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ 7560 BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ 7561 BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ 7562 BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ 7563 BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */ 7564 BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */ 7565 BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */ 7566 BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ 7567 BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */ 7568 BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */ 7569 }; 7570 7571 /* 7572 * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf 7573 * and from libbpf to the kernel. 7574 * 7575 * CO-RE relocation captures the following data: 7576 * - insn_off - instruction offset (in bytes) within a BPF program that needs 7577 * its insn->imm field to be relocated with actual field info; 7578 * - type_id - BTF type ID of the "root" (containing) entity of a relocatable 7579 * type or field; 7580 * - access_str_off - offset into corresponding .BTF string section. String 7581 * interpretation depends on specific relocation kind: 7582 * - for field-based relocations, string encodes an accessed field using 7583 * a sequence of field and array indices, separated by colon (:). It's 7584 * conceptually very close to LLVM's getelementptr ([0]) instruction's 7585 * arguments for identifying offset to a field. 7586 * - for type-based relocations, strings is expected to be just "0"; 7587 * - for enum value-based relocations, string contains an index of enum 7588 * value within its enum type; 7589 * - kind - one of enum bpf_core_relo_kind; 7590 * 7591 * Example: 7592 * struct sample { 7593 * int a; 7594 * struct { 7595 * int b[10]; 7596 * }; 7597 * }; 7598 * 7599 * struct sample *s = ...; 7600 * int *x = &s->a; // encoded as "0:0" (a is field #0) 7601 * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, 7602 * // b is field #0 inside anon struct, accessing elem #5) 7603 * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) 7604 * 7605 * type_id for all relocs in this example will capture BTF type id of 7606 * `struct sample`. 7607 * 7608 * Such relocation is emitted when using __builtin_preserve_access_index() 7609 * Clang built-in, passing expression that captures field address, e.g.: 7610 * 7611 * bpf_probe_read(&dst, sizeof(dst), 7612 * __builtin_preserve_access_index(&src->a.b.c)); 7613 * 7614 * In this case Clang will emit field relocation recording necessary data to 7615 * be able to find offset of embedded `a.b.c` field within `src` struct. 7616 * 7617 * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction 7618 */ 7619 struct bpf_core_relo { 7620 __u32 insn_off; 7621 __u32 type_id; 7622 __u32 access_str_off; 7623 enum bpf_core_relo_kind kind; 7624 }; 7625 7626 /* 7627 * Flags to control bpf_timer_start() behaviour. 7628 * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is 7629 * relative to current time. 7630 * - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller. 7631 */ 7632 enum { 7633 BPF_F_TIMER_ABS = (1ULL << 0), 7634 BPF_F_TIMER_CPU_PIN = (1ULL << 1), 7635 }; 7636 7637 /* BPF numbers iterator state */ 7638 struct bpf_iter_num { 7639 /* opaque iterator state; having __u64 here allows to preserve correct 7640 * alignment requirements in vmlinux.h, generated from BTF 7641 */ 7642 __u64 __opaque[1]; 7643 } __attribute__((aligned(8))); 7644 7645 /* 7646 * Flags to control BPF kfunc behaviour. 7647 * - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective 7648 * helper documentation for details.) 7649 */ 7650 enum bpf_kfunc_flags { 7651 BPF_F_PAD_ZEROS = (1ULL << 0), 7652 }; 7653 7654 /* 7655 * Values of a BPF_MAP_TYPE_INSN_ARRAY entry must be of this type. 7656 * 7657 * Before the map is used the orig_off field should point to an 7658 * instruction inside the program being loaded. The other fields 7659 * must be set to 0. 7660 * 7661 * After the program is loaded, the xlated_off will be adjusted 7662 * by the verifier to point to the index of the original instruction 7663 * in the xlated program. If the instruction is deleted, it will 7664 * be set to (u32)-1. The jitted_off will be set to the corresponding 7665 * offset in the jitted image of the program. 7666 */ 7667 struct bpf_insn_array_value { 7668 __u32 orig_off; 7669 __u32 xlated_off; 7670 __u32 jitted_off; 7671 __u32 :32; 7672 }; 7673 7674 #endif /* _UAPI__LINUX_BPF_H__ */ 7675