xref: /linux/tools/include/uapi/linux/bpf.h (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  */
8 #ifndef _UAPI__LINUX_BPF_H__
9 #define _UAPI__LINUX_BPF_H__
10 
11 #include <linux/types.h>
12 #include <linux/bpf_common.h>
13 
14 /* Extended instruction set based on top of classic BPF */
15 
16 /* instruction classes */
17 #define BPF_JMP32	0x06	/* jmp mode in word width */
18 #define BPF_ALU64	0x07	/* alu mode in double word width */
19 
20 /* ld/ldx fields */
21 #define BPF_DW		0x18	/* double word (64-bit) */
22 #define BPF_MEMSX	0x80	/* load with sign extension */
23 #define BPF_ATOMIC	0xc0	/* atomic memory ops - op type in immediate */
24 #define BPF_XADD	0xc0	/* exclusive add - legacy name */
25 
26 /* alu/jmp fields */
27 #define BPF_MOV		0xb0	/* mov reg to reg */
28 #define BPF_ARSH	0xc0	/* sign extending arithmetic shift right */
29 
30 /* change endianness of a register */
31 #define BPF_END		0xd0	/* flags for endianness conversion: */
32 #define BPF_TO_LE	0x00	/* convert to little-endian */
33 #define BPF_TO_BE	0x08	/* convert to big-endian */
34 #define BPF_FROM_LE	BPF_TO_LE
35 #define BPF_FROM_BE	BPF_TO_BE
36 
37 /* jmp encodings */
38 #define BPF_JNE		0x50	/* jump != */
39 #define BPF_JLT		0xa0	/* LT is unsigned, '<' */
40 #define BPF_JLE		0xb0	/* LE is unsigned, '<=' */
41 #define BPF_JSGT	0x60	/* SGT is signed '>', GT in x86 */
42 #define BPF_JSGE	0x70	/* SGE is signed '>=', GE in x86 */
43 #define BPF_JSLT	0xc0	/* SLT is signed, '<' */
44 #define BPF_JSLE	0xd0	/* SLE is signed, '<=' */
45 #define BPF_JCOND	0xe0	/* conditional pseudo jumps: may_goto, goto_or_nop */
46 #define BPF_CALL	0x80	/* function call */
47 #define BPF_EXIT	0x90	/* function return */
48 
49 /* atomic op type fields (stored in immediate) */
50 #define BPF_FETCH	0x01	/* not an opcode on its own, used to build others */
51 #define BPF_XCHG	(0xe0 | BPF_FETCH)	/* atomic exchange */
52 #define BPF_CMPXCHG	(0xf0 | BPF_FETCH)	/* atomic compare-and-write */
53 
54 enum bpf_cond_pseudo_jmp {
55 	BPF_MAY_GOTO = 0,
56 };
57 
58 /* Register numbers */
59 enum {
60 	BPF_REG_0 = 0,
61 	BPF_REG_1,
62 	BPF_REG_2,
63 	BPF_REG_3,
64 	BPF_REG_4,
65 	BPF_REG_5,
66 	BPF_REG_6,
67 	BPF_REG_7,
68 	BPF_REG_8,
69 	BPF_REG_9,
70 	BPF_REG_10,
71 	__MAX_BPF_REG,
72 };
73 
74 /* BPF has 10 general purpose 64-bit registers and stack frame. */
75 #define MAX_BPF_REG	__MAX_BPF_REG
76 
77 struct bpf_insn {
78 	__u8	code;		/* opcode */
79 	__u8	dst_reg:4;	/* dest register */
80 	__u8	src_reg:4;	/* source register */
81 	__s16	off;		/* signed offset */
82 	__s32	imm;		/* signed immediate constant */
83 };
84 
85 /* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
86  * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
87  * the trailing flexible array member) instead.
88  */
89 struct bpf_lpm_trie_key {
90 	__u32	prefixlen;	/* up to 32 for AF_INET, 128 for AF_INET6 */
91 	__u8	data[0];	/* Arbitrary size */
92 };
93 
94 /* Header for bpf_lpm_trie_key structs */
95 struct bpf_lpm_trie_key_hdr {
96 	__u32	prefixlen;
97 };
98 
99 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
100 struct bpf_lpm_trie_key_u8 {
101 	union {
102 		struct bpf_lpm_trie_key_hdr	hdr;
103 		__u32				prefixlen;
104 	};
105 	__u8	data[];		/* Arbitrary size */
106 };
107 
108 struct bpf_cgroup_storage_key {
109 	__u64	cgroup_inode_id;	/* cgroup inode id */
110 	__u32	attach_type;		/* program attach type (enum bpf_attach_type) */
111 };
112 
113 enum bpf_cgroup_iter_order {
114 	BPF_CGROUP_ITER_ORDER_UNSPEC = 0,
115 	BPF_CGROUP_ITER_SELF_ONLY,		/* process only a single object. */
116 	BPF_CGROUP_ITER_DESCENDANTS_PRE,	/* walk descendants in pre-order. */
117 	BPF_CGROUP_ITER_DESCENDANTS_POST,	/* walk descendants in post-order. */
118 	BPF_CGROUP_ITER_ANCESTORS_UP,		/* walk ancestors upward. */
119 };
120 
121 union bpf_iter_link_info {
122 	struct {
123 		__u32	map_fd;
124 	} map;
125 	struct {
126 		enum bpf_cgroup_iter_order order;
127 
128 		/* At most one of cgroup_fd and cgroup_id can be non-zero. If
129 		 * both are zero, the walk starts from the default cgroup v2
130 		 * root. For walking v1 hierarchy, one should always explicitly
131 		 * specify cgroup_fd.
132 		 */
133 		__u32	cgroup_fd;
134 		__u64	cgroup_id;
135 	} cgroup;
136 	/* Parameters of task iterators. */
137 	struct {
138 		__u32	tid;
139 		__u32	pid;
140 		__u32	pid_fd;
141 	} task;
142 };
143 
144 /* BPF syscall commands, see bpf(2) man-page for more details. */
145 /**
146  * DOC: eBPF Syscall Preamble
147  *
148  * The operation to be performed by the **bpf**\ () system call is determined
149  * by the *cmd* argument. Each operation takes an accompanying argument,
150  * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see
151  * below). The size argument is the size of the union pointed to by *attr*.
152  */
153 /**
154  * DOC: eBPF Syscall Commands
155  *
156  * BPF_MAP_CREATE
157  *	Description
158  *		Create a map and return a file descriptor that refers to the
159  *		map. The close-on-exec file descriptor flag (see **fcntl**\ (2))
160  *		is automatically enabled for the new file descriptor.
161  *
162  *		Applying **close**\ (2) to the file descriptor returned by
163  *		**BPF_MAP_CREATE** will delete the map (but see NOTES).
164  *
165  *	Return
166  *		A new file descriptor (a nonnegative integer), or -1 if an
167  *		error occurred (in which case, *errno* is set appropriately).
168  *
169  * BPF_MAP_LOOKUP_ELEM
170  *	Description
171  *		Look up an element with a given *key* in the map referred to
172  *		by the file descriptor *map_fd*.
173  *
174  *		The *flags* argument may be specified as one of the
175  *		following:
176  *
177  *		**BPF_F_LOCK**
178  *			Look up the value of a spin-locked map without
179  *			returning the lock. This must be specified if the
180  *			elements contain a spinlock.
181  *
182  *	Return
183  *		Returns zero on success. On error, -1 is returned and *errno*
184  *		is set appropriately.
185  *
186  * BPF_MAP_UPDATE_ELEM
187  *	Description
188  *		Create or update an element (key/value pair) in a specified map.
189  *
190  *		The *flags* argument should be specified as one of the
191  *		following:
192  *
193  *		**BPF_ANY**
194  *			Create a new element or update an existing element.
195  *		**BPF_NOEXIST**
196  *			Create a new element only if it did not exist.
197  *		**BPF_EXIST**
198  *			Update an existing element.
199  *		**BPF_F_LOCK**
200  *			Update a spin_lock-ed map element.
201  *
202  *	Return
203  *		Returns zero on success. On error, -1 is returned and *errno*
204  *		is set appropriately.
205  *
206  *		May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**,
207  *		**E2BIG**, **EEXIST**, or **ENOENT**.
208  *
209  *		**E2BIG**
210  *			The number of elements in the map reached the
211  *			*max_entries* limit specified at map creation time.
212  *		**EEXIST**
213  *			If *flags* specifies **BPF_NOEXIST** and the element
214  *			with *key* already exists in the map.
215  *		**ENOENT**
216  *			If *flags* specifies **BPF_EXIST** and the element with
217  *			*key* does not exist in the map.
218  *
219  * BPF_MAP_DELETE_ELEM
220  *	Description
221  *		Look up and delete an element by key in a specified map.
222  *
223  *	Return
224  *		Returns zero on success. On error, -1 is returned and *errno*
225  *		is set appropriately.
226  *
227  * BPF_MAP_GET_NEXT_KEY
228  *	Description
229  *		Look up an element by key in a specified map and return the key
230  *		of the next element. Can be used to iterate over all elements
231  *		in the map.
232  *
233  *	Return
234  *		Returns zero on success. On error, -1 is returned and *errno*
235  *		is set appropriately.
236  *
237  *		The following cases can be used to iterate over all elements of
238  *		the map:
239  *
240  *		* If *key* is not found, the operation returns zero and sets
241  *		  the *next_key* pointer to the key of the first element.
242  *		* If *key* is found, the operation returns zero and sets the
243  *		  *next_key* pointer to the key of the next element.
244  *		* If *key* is the last element, returns -1 and *errno* is set
245  *		  to **ENOENT**.
246  *
247  *		May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or
248  *		**EINVAL** on error.
249  *
250  * BPF_PROG_LOAD
251  *	Description
252  *		Verify and load an eBPF program, returning a new file
253  *		descriptor associated with the program.
254  *
255  *		Applying **close**\ (2) to the file descriptor returned by
256  *		**BPF_PROG_LOAD** will unload the eBPF program (but see NOTES).
257  *
258  *		The close-on-exec file descriptor flag (see **fcntl**\ (2)) is
259  *		automatically enabled for the new file descriptor.
260  *
261  *	Return
262  *		A new file descriptor (a nonnegative integer), or -1 if an
263  *		error occurred (in which case, *errno* is set appropriately).
264  *
265  * BPF_OBJ_PIN
266  *	Description
267  *		Pin an eBPF program or map referred by the specified *bpf_fd*
268  *		to the provided *pathname* on the filesystem.
269  *
270  *		The *pathname* argument must not contain a dot (".").
271  *
272  *		On success, *pathname* retains a reference to the eBPF object,
273  *		preventing deallocation of the object when the original
274  *		*bpf_fd* is closed. This allow the eBPF object to live beyond
275  *		**close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent
276  *		process.
277  *
278  *		Applying **unlink**\ (2) or similar calls to the *pathname*
279  *		unpins the object from the filesystem, removing the reference.
280  *		If no other file descriptors or filesystem nodes refer to the
281  *		same object, it will be deallocated (see NOTES).
282  *
283  *		The filesystem type for the parent directory of *pathname* must
284  *		be **BPF_FS_MAGIC**.
285  *
286  *	Return
287  *		Returns zero on success. On error, -1 is returned and *errno*
288  *		is set appropriately.
289  *
290  * BPF_OBJ_GET
291  *	Description
292  *		Open a file descriptor for the eBPF object pinned to the
293  *		specified *pathname*.
294  *
295  *	Return
296  *		A new file descriptor (a nonnegative integer), or -1 if an
297  *		error occurred (in which case, *errno* is set appropriately).
298  *
299  * BPF_PROG_ATTACH
300  *	Description
301  *		Attach an eBPF program to a *target_fd* at the specified
302  *		*attach_type* hook.
303  *
304  *		The *attach_type* specifies the eBPF attachment point to
305  *		attach the program to, and must be one of *bpf_attach_type*
306  *		(see below).
307  *
308  *		The *attach_bpf_fd* must be a valid file descriptor for a
309  *		loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap
310  *		or sock_ops type corresponding to the specified *attach_type*.
311  *
312  *		The *target_fd* must be a valid file descriptor for a kernel
313  *		object which depends on the attach type of *attach_bpf_fd*:
314  *
315  *		**BPF_PROG_TYPE_CGROUP_DEVICE**,
316  *		**BPF_PROG_TYPE_CGROUP_SKB**,
317  *		**BPF_PROG_TYPE_CGROUP_SOCK**,
318  *		**BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
319  *		**BPF_PROG_TYPE_CGROUP_SOCKOPT**,
320  *		**BPF_PROG_TYPE_CGROUP_SYSCTL**,
321  *		**BPF_PROG_TYPE_SOCK_OPS**
322  *
323  *			Control Group v2 hierarchy with the eBPF controller
324  *			enabled. Requires the kernel to be compiled with
325  *			**CONFIG_CGROUP_BPF**.
326  *
327  *		**BPF_PROG_TYPE_FLOW_DISSECTOR**
328  *
329  *			Network namespace (eg /proc/self/ns/net).
330  *
331  *		**BPF_PROG_TYPE_LIRC_MODE2**
332  *
333  *			LIRC device path (eg /dev/lircN). Requires the kernel
334  *			to be compiled with **CONFIG_BPF_LIRC_MODE2**.
335  *
336  *		**BPF_PROG_TYPE_SK_SKB**,
337  *		**BPF_PROG_TYPE_SK_MSG**
338  *
339  *			eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**).
340  *
341  *	Return
342  *		Returns zero on success. On error, -1 is returned and *errno*
343  *		is set appropriately.
344  *
345  * BPF_PROG_DETACH
346  *	Description
347  *		Detach the eBPF program associated with the *target_fd* at the
348  *		hook specified by *attach_type*. The program must have been
349  *		previously attached using **BPF_PROG_ATTACH**.
350  *
351  *	Return
352  *		Returns zero on success. On error, -1 is returned and *errno*
353  *		is set appropriately.
354  *
355  * BPF_PROG_TEST_RUN
356  *	Description
357  *		Run the eBPF program associated with the *prog_fd* a *repeat*
358  *		number of times against a provided program context *ctx_in* and
359  *		data *data_in*, and return the modified program context
360  *		*ctx_out*, *data_out* (for example, packet data), result of the
361  *		execution *retval*, and *duration* of the test run.
362  *
363  *		The sizes of the buffers provided as input and output
364  *		parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must
365  *		be provided in the corresponding variables *ctx_size_in*,
366  *		*ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any
367  *		of these parameters are not provided (ie set to NULL), the
368  *		corresponding size field must be zero.
369  *
370  *		Some program types have particular requirements:
371  *
372  *		**BPF_PROG_TYPE_SK_LOOKUP**
373  *			*data_in* and *data_out* must be NULL.
374  *
375  *		**BPF_PROG_TYPE_RAW_TRACEPOINT**,
376  *		**BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE**
377  *
378  *			*ctx_out*, *data_in* and *data_out* must be NULL.
379  *			*repeat* must be zero.
380  *
381  *		BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN.
382  *
383  *	Return
384  *		Returns zero on success. On error, -1 is returned and *errno*
385  *		is set appropriately.
386  *
387  *		**ENOSPC**
388  *			Either *data_size_out* or *ctx_size_out* is too small.
389  *		**ENOTSUPP**
390  *			This command is not supported by the program type of
391  *			the program referred to by *prog_fd*.
392  *
393  * BPF_PROG_GET_NEXT_ID
394  *	Description
395  *		Fetch the next eBPF program currently loaded into the kernel.
396  *
397  *		Looks for the eBPF program with an id greater than *start_id*
398  *		and updates *next_id* on success. If no other eBPF programs
399  *		remain with ids higher than *start_id*, returns -1 and sets
400  *		*errno* to **ENOENT**.
401  *
402  *	Return
403  *		Returns zero on success. On error, or when no id remains, -1
404  *		is returned and *errno* is set appropriately.
405  *
406  * BPF_MAP_GET_NEXT_ID
407  *	Description
408  *		Fetch the next eBPF map currently loaded into the kernel.
409  *
410  *		Looks for the eBPF map with an id greater than *start_id*
411  *		and updates *next_id* on success. If no other eBPF maps
412  *		remain with ids higher than *start_id*, returns -1 and sets
413  *		*errno* to **ENOENT**.
414  *
415  *	Return
416  *		Returns zero on success. On error, or when no id remains, -1
417  *		is returned and *errno* is set appropriately.
418  *
419  * BPF_PROG_GET_FD_BY_ID
420  *	Description
421  *		Open a file descriptor for the eBPF program corresponding to
422  *		*prog_id*.
423  *
424  *	Return
425  *		A new file descriptor (a nonnegative integer), or -1 if an
426  *		error occurred (in which case, *errno* is set appropriately).
427  *
428  * BPF_MAP_GET_FD_BY_ID
429  *	Description
430  *		Open a file descriptor for the eBPF map corresponding to
431  *		*map_id*.
432  *
433  *	Return
434  *		A new file descriptor (a nonnegative integer), or -1 if an
435  *		error occurred (in which case, *errno* is set appropriately).
436  *
437  * BPF_OBJ_GET_INFO_BY_FD
438  *	Description
439  *		Obtain information about the eBPF object corresponding to
440  *		*bpf_fd*.
441  *
442  *		Populates up to *info_len* bytes of *info*, which will be in
443  *		one of the following formats depending on the eBPF object type
444  *		of *bpf_fd*:
445  *
446  *		* **struct bpf_prog_info**
447  *		* **struct bpf_map_info**
448  *		* **struct bpf_btf_info**
449  *		* **struct bpf_link_info**
450  *
451  *	Return
452  *		Returns zero on success. On error, -1 is returned and *errno*
453  *		is set appropriately.
454  *
455  * BPF_PROG_QUERY
456  *	Description
457  *		Obtain information about eBPF programs associated with the
458  *		specified *attach_type* hook.
459  *
460  *		The *target_fd* must be a valid file descriptor for a kernel
461  *		object which depends on the attach type of *attach_bpf_fd*:
462  *
463  *		**BPF_PROG_TYPE_CGROUP_DEVICE**,
464  *		**BPF_PROG_TYPE_CGROUP_SKB**,
465  *		**BPF_PROG_TYPE_CGROUP_SOCK**,
466  *		**BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
467  *		**BPF_PROG_TYPE_CGROUP_SOCKOPT**,
468  *		**BPF_PROG_TYPE_CGROUP_SYSCTL**,
469  *		**BPF_PROG_TYPE_SOCK_OPS**
470  *
471  *			Control Group v2 hierarchy with the eBPF controller
472  *			enabled. Requires the kernel to be compiled with
473  *			**CONFIG_CGROUP_BPF**.
474  *
475  *		**BPF_PROG_TYPE_FLOW_DISSECTOR**
476  *
477  *			Network namespace (eg /proc/self/ns/net).
478  *
479  *		**BPF_PROG_TYPE_LIRC_MODE2**
480  *
481  *			LIRC device path (eg /dev/lircN). Requires the kernel
482  *			to be compiled with **CONFIG_BPF_LIRC_MODE2**.
483  *
484  *		**BPF_PROG_QUERY** always fetches the number of programs
485  *		attached and the *attach_flags* which were used to attach those
486  *		programs. Additionally, if *prog_ids* is nonzero and the number
487  *		of attached programs is less than *prog_cnt*, populates
488  *		*prog_ids* with the eBPF program ids of the programs attached
489  *		at *target_fd*.
490  *
491  *		The following flags may alter the result:
492  *
493  *		**BPF_F_QUERY_EFFECTIVE**
494  *			Only return information regarding programs which are
495  *			currently effective at the specified *target_fd*.
496  *
497  *	Return
498  *		Returns zero on success. On error, -1 is returned and *errno*
499  *		is set appropriately.
500  *
501  * BPF_RAW_TRACEPOINT_OPEN
502  *	Description
503  *		Attach an eBPF program to a tracepoint *name* to access kernel
504  *		internal arguments of the tracepoint in their raw form.
505  *
506  *		The *prog_fd* must be a valid file descriptor associated with
507  *		a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**.
508  *
509  *		No ABI guarantees are made about the content of tracepoint
510  *		arguments exposed to the corresponding eBPF program.
511  *
512  *		Applying **close**\ (2) to the file descriptor returned by
513  *		**BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES).
514  *
515  *	Return
516  *		A new file descriptor (a nonnegative integer), or -1 if an
517  *		error occurred (in which case, *errno* is set appropriately).
518  *
519  * BPF_BTF_LOAD
520  *	Description
521  *		Verify and load BPF Type Format (BTF) metadata into the kernel,
522  *		returning a new file descriptor associated with the metadata.
523  *		BTF is described in more detail at
524  *		https://www.kernel.org/doc/html/latest/bpf/btf.html.
525  *
526  *		The *btf* parameter must point to valid memory providing
527  *		*btf_size* bytes of BTF binary metadata.
528  *
529  *		The returned file descriptor can be passed to other **bpf**\ ()
530  *		subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to
531  *		associate the BTF with those objects.
532  *
533  *		Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional
534  *		parameters to specify a *btf_log_buf*, *btf_log_size* and
535  *		*btf_log_level* which allow the kernel to return freeform log
536  *		output regarding the BTF verification process.
537  *
538  *	Return
539  *		A new file descriptor (a nonnegative integer), or -1 if an
540  *		error occurred (in which case, *errno* is set appropriately).
541  *
542  * BPF_BTF_GET_FD_BY_ID
543  *	Description
544  *		Open a file descriptor for the BPF Type Format (BTF)
545  *		corresponding to *btf_id*.
546  *
547  *	Return
548  *		A new file descriptor (a nonnegative integer), or -1 if an
549  *		error occurred (in which case, *errno* is set appropriately).
550  *
551  * BPF_TASK_FD_QUERY
552  *	Description
553  *		Obtain information about eBPF programs associated with the
554  *		target process identified by *pid* and *fd*.
555  *
556  *		If the *pid* and *fd* are associated with a tracepoint, kprobe
557  *		or uprobe perf event, then the *prog_id* and *fd_type* will
558  *		be populated with the eBPF program id and file descriptor type
559  *		of type **bpf_task_fd_type**. If associated with a kprobe or
560  *		uprobe, the  *probe_offset* and *probe_addr* will also be
561  *		populated. Optionally, if *buf* is provided, then up to
562  *		*buf_len* bytes of *buf* will be populated with the name of
563  *		the tracepoint, kprobe or uprobe.
564  *
565  *		The resulting *prog_id* may be introspected in deeper detail
566  *		using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**.
567  *
568  *	Return
569  *		Returns zero on success. On error, -1 is returned and *errno*
570  *		is set appropriately.
571  *
572  * BPF_MAP_LOOKUP_AND_DELETE_ELEM
573  *	Description
574  *		Look up an element with the given *key* in the map referred to
575  *		by the file descriptor *fd*, and if found, delete the element.
576  *
577  *		For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map
578  *		types, the *flags* argument needs to be set to 0, but for other
579  *		map types, it may be specified as:
580  *
581  *		**BPF_F_LOCK**
582  *			Look up and delete the value of a spin-locked map
583  *			without returning the lock. This must be specified if
584  *			the elements contain a spinlock.
585  *
586  *		The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types
587  *		implement this command as a "pop" operation, deleting the top
588  *		element rather than one corresponding to *key*.
589  *		The *key* and *key_len* parameters should be zeroed when
590  *		issuing this operation for these map types.
591  *
592  *		This command is only valid for the following map types:
593  *		* **BPF_MAP_TYPE_QUEUE**
594  *		* **BPF_MAP_TYPE_STACK**
595  *		* **BPF_MAP_TYPE_HASH**
596  *		* **BPF_MAP_TYPE_PERCPU_HASH**
597  *		* **BPF_MAP_TYPE_LRU_HASH**
598  *		* **BPF_MAP_TYPE_LRU_PERCPU_HASH**
599  *
600  *	Return
601  *		Returns zero on success. On error, -1 is returned and *errno*
602  *		is set appropriately.
603  *
604  * BPF_MAP_FREEZE
605  *	Description
606  *		Freeze the permissions of the specified map.
607  *
608  *		Write permissions may be frozen by passing zero *flags*.
609  *		Upon success, no future syscall invocations may alter the
610  *		map state of *map_fd*. Write operations from eBPF programs
611  *		are still possible for a frozen map.
612  *
613  *		Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**.
614  *
615  *	Return
616  *		Returns zero on success. On error, -1 is returned and *errno*
617  *		is set appropriately.
618  *
619  * BPF_BTF_GET_NEXT_ID
620  *	Description
621  *		Fetch the next BPF Type Format (BTF) object currently loaded
622  *		into the kernel.
623  *
624  *		Looks for the BTF object with an id greater than *start_id*
625  *		and updates *next_id* on success. If no other BTF objects
626  *		remain with ids higher than *start_id*, returns -1 and sets
627  *		*errno* to **ENOENT**.
628  *
629  *	Return
630  *		Returns zero on success. On error, or when no id remains, -1
631  *		is returned and *errno* is set appropriately.
632  *
633  * BPF_MAP_LOOKUP_BATCH
634  *	Description
635  *		Iterate and fetch multiple elements in a map.
636  *
637  *		Two opaque values are used to manage batch operations,
638  *		*in_batch* and *out_batch*. Initially, *in_batch* must be set
639  *		to NULL to begin the batched operation. After each subsequent
640  *		**BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
641  *		*out_batch* as the *in_batch* for the next operation to
642  *		continue iteration from the current point. Both *in_batch* and
643  *		*out_batch* must point to memory large enough to hold a key,
644  *		except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
645  *		LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters
646  *		must be at least 4 bytes wide regardless of key size.
647  *
648  *		The *keys* and *values* are output parameters which must point
649  *		to memory large enough to hold *count* items based on the key
650  *		and value size of the map *map_fd*. The *keys* buffer must be
651  *		of *key_size* * *count*. The *values* buffer must be of
652  *		*value_size* * *count*.
653  *
654  *		The *elem_flags* argument may be specified as one of the
655  *		following:
656  *
657  *		**BPF_F_LOCK**
658  *			Look up the value of a spin-locked map without
659  *			returning the lock. This must be specified if the
660  *			elements contain a spinlock.
661  *
662  *		On success, *count* elements from the map are copied into the
663  *		user buffer, with the keys copied into *keys* and the values
664  *		copied into the corresponding indices in *values*.
665  *
666  *		If an error is returned and *errno* is not **EFAULT**, *count*
667  *		is set to the number of successfully processed elements.
668  *
669  *	Return
670  *		Returns zero on success. On error, -1 is returned and *errno*
671  *		is set appropriately.
672  *
673  *		May set *errno* to **ENOSPC** to indicate that *keys* or
674  *		*values* is too small to dump an entire bucket during
675  *		iteration of a hash-based map type.
676  *
677  * BPF_MAP_LOOKUP_AND_DELETE_BATCH
678  *	Description
679  *		Iterate and delete all elements in a map.
680  *
681  *		This operation has the same behavior as
682  *		**BPF_MAP_LOOKUP_BATCH** with two exceptions:
683  *
684  *		* Every element that is successfully returned is also deleted
685  *		  from the map. This is at least *count* elements. Note that
686  *		  *count* is both an input and an output parameter.
687  *		* Upon returning with *errno* set to **EFAULT**, up to
688  *		  *count* elements may be deleted without returning the keys
689  *		  and values of the deleted elements.
690  *
691  *	Return
692  *		Returns zero on success. On error, -1 is returned and *errno*
693  *		is set appropriately.
694  *
695  * BPF_MAP_UPDATE_BATCH
696  *	Description
697  *		Update multiple elements in a map by *key*.
698  *
699  *		The *keys* and *values* are input parameters which must point
700  *		to memory large enough to hold *count* items based on the key
701  *		and value size of the map *map_fd*. The *keys* buffer must be
702  *		of *key_size* * *count*. The *values* buffer must be of
703  *		*value_size* * *count*.
704  *
705  *		Each element specified in *keys* is sequentially updated to the
706  *		value in the corresponding index in *values*. The *in_batch*
707  *		and *out_batch* parameters are ignored and should be zeroed.
708  *
709  *		The *elem_flags* argument should be specified as one of the
710  *		following:
711  *
712  *		**BPF_ANY**
713  *			Create new elements or update a existing elements.
714  *		**BPF_NOEXIST**
715  *			Create new elements only if they do not exist.
716  *		**BPF_EXIST**
717  *			Update existing elements.
718  *		**BPF_F_LOCK**
719  *			Update spin_lock-ed map elements. This must be
720  *			specified if the map value contains a spinlock.
721  *
722  *		On success, *count* elements from the map are updated.
723  *
724  *		If an error is returned and *errno* is not **EFAULT**, *count*
725  *		is set to the number of successfully processed elements.
726  *
727  *	Return
728  *		Returns zero on success. On error, -1 is returned and *errno*
729  *		is set appropriately.
730  *
731  *		May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or
732  *		**E2BIG**. **E2BIG** indicates that the number of elements in
733  *		the map reached the *max_entries* limit specified at map
734  *		creation time.
735  *
736  *		May set *errno* to one of the following error codes under
737  *		specific circumstances:
738  *
739  *		**EEXIST**
740  *			If *flags* specifies **BPF_NOEXIST** and the element
741  *			with *key* already exists in the map.
742  *		**ENOENT**
743  *			If *flags* specifies **BPF_EXIST** and the element with
744  *			*key* does not exist in the map.
745  *
746  * BPF_MAP_DELETE_BATCH
747  *	Description
748  *		Delete multiple elements in a map by *key*.
749  *
750  *		The *keys* parameter is an input parameter which must point
751  *		to memory large enough to hold *count* items based on the key
752  *		size of the map *map_fd*, that is, *key_size* * *count*.
753  *
754  *		Each element specified in *keys* is sequentially deleted. The
755  *		*in_batch*, *out_batch*, and *values* parameters are ignored
756  *		and should be zeroed.
757  *
758  *		The *elem_flags* argument may be specified as one of the
759  *		following:
760  *
761  *		**BPF_F_LOCK**
762  *			Look up the value of a spin-locked map without
763  *			returning the lock. This must be specified if the
764  *			elements contain a spinlock.
765  *
766  *		On success, *count* elements from the map are updated.
767  *
768  *		If an error is returned and *errno* is not **EFAULT**, *count*
769  *		is set to the number of successfully processed elements. If
770  *		*errno* is **EFAULT**, up to *count* elements may be been
771  *		deleted.
772  *
773  *	Return
774  *		Returns zero on success. On error, -1 is returned and *errno*
775  *		is set appropriately.
776  *
777  * BPF_LINK_CREATE
778  *	Description
779  *		Attach an eBPF program to a *target_fd* at the specified
780  *		*attach_type* hook and return a file descriptor handle for
781  *		managing the link.
782  *
783  *	Return
784  *		A new file descriptor (a nonnegative integer), or -1 if an
785  *		error occurred (in which case, *errno* is set appropriately).
786  *
787  * BPF_LINK_UPDATE
788  *	Description
789  *		Update the eBPF program in the specified *link_fd* to
790  *		*new_prog_fd*.
791  *
792  *	Return
793  *		Returns zero on success. On error, -1 is returned and *errno*
794  *		is set appropriately.
795  *
796  * BPF_LINK_GET_FD_BY_ID
797  *	Description
798  *		Open a file descriptor for the eBPF Link corresponding to
799  *		*link_id*.
800  *
801  *	Return
802  *		A new file descriptor (a nonnegative integer), or -1 if an
803  *		error occurred (in which case, *errno* is set appropriately).
804  *
805  * BPF_LINK_GET_NEXT_ID
806  *	Description
807  *		Fetch the next eBPF link currently loaded into the kernel.
808  *
809  *		Looks for the eBPF link with an id greater than *start_id*
810  *		and updates *next_id* on success. If no other eBPF links
811  *		remain with ids higher than *start_id*, returns -1 and sets
812  *		*errno* to **ENOENT**.
813  *
814  *	Return
815  *		Returns zero on success. On error, or when no id remains, -1
816  *		is returned and *errno* is set appropriately.
817  *
818  * BPF_ENABLE_STATS
819  *	Description
820  *		Enable eBPF runtime statistics gathering.
821  *
822  *		Runtime statistics gathering for the eBPF runtime is disabled
823  *		by default to minimize the corresponding performance overhead.
824  *		This command enables statistics globally.
825  *
826  *		Multiple programs may independently enable statistics.
827  *		After gathering the desired statistics, eBPF runtime statistics
828  *		may be disabled again by calling **close**\ (2) for the file
829  *		descriptor returned by this function. Statistics will only be
830  *		disabled system-wide when all outstanding file descriptors
831  *		returned by prior calls for this subcommand are closed.
832  *
833  *	Return
834  *		A new file descriptor (a nonnegative integer), or -1 if an
835  *		error occurred (in which case, *errno* is set appropriately).
836  *
837  * BPF_ITER_CREATE
838  *	Description
839  *		Create an iterator on top of the specified *link_fd* (as
840  *		previously created using **BPF_LINK_CREATE**) and return a
841  *		file descriptor that can be used to trigger the iteration.
842  *
843  *		If the resulting file descriptor is pinned to the filesystem
844  *		using  **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls
845  *		for that path will trigger the iterator to read kernel state
846  *		using the eBPF program attached to *link_fd*.
847  *
848  *	Return
849  *		A new file descriptor (a nonnegative integer), or -1 if an
850  *		error occurred (in which case, *errno* is set appropriately).
851  *
852  * BPF_LINK_DETACH
853  *	Description
854  *		Forcefully detach the specified *link_fd* from its
855  *		corresponding attachment point.
856  *
857  *	Return
858  *		Returns zero on success. On error, -1 is returned and *errno*
859  *		is set appropriately.
860  *
861  * BPF_PROG_BIND_MAP
862  *	Description
863  *		Bind a map to the lifetime of an eBPF program.
864  *
865  *		The map identified by *map_fd* is bound to the program
866  *		identified by *prog_fd* and only released when *prog_fd* is
867  *		released. This may be used in cases where metadata should be
868  *		associated with a program which otherwise does not contain any
869  *		references to the map (for example, embedded in the eBPF
870  *		program instructions).
871  *
872  *	Return
873  *		Returns zero on success. On error, -1 is returned and *errno*
874  *		is set appropriately.
875  *
876  * BPF_TOKEN_CREATE
877  *	Description
878  *		Create BPF token with embedded information about what
879  *		BPF-related functionality it allows:
880  *		- a set of allowed bpf() syscall commands;
881  *		- a set of allowed BPF map types to be created with
882  *		BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
883  *		- a set of allowed BPF program types and BPF program attach
884  *		types to be loaded with BPF_PROG_LOAD command, if
885  *		BPF_PROG_LOAD itself is allowed.
886  *
887  *		BPF token is created (derived) from an instance of BPF FS,
888  *		assuming it has necessary delegation mount options specified.
889  *		This BPF token can be passed as an extra parameter to various
890  *		bpf() syscall commands to grant BPF subsystem functionality to
891  *		unprivileged processes.
892  *
893  *		When created, BPF token is "associated" with the owning
894  *		user namespace of BPF FS instance (super block) that it was
895  *		derived from, and subsequent BPF operations performed with
896  *		BPF token would be performing capabilities checks (i.e.,
897  *		CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
898  *		that user namespace. Without BPF token, such capabilities
899  *		have to be granted in init user namespace, making bpf()
900  *		syscall incompatible with user namespace, for the most part.
901  *
902  *	Return
903  *		A new file descriptor (a nonnegative integer), or -1 if an
904  *		error occurred (in which case, *errno* is set appropriately).
905  *
906  * NOTES
907  *	eBPF objects (maps and programs) can be shared between processes.
908  *
909  *	* After **fork**\ (2), the child inherits file descriptors
910  *	  referring to the same eBPF objects.
911  *	* File descriptors referring to eBPF objects can be transferred over
912  *	  **unix**\ (7) domain sockets.
913  *	* File descriptors referring to eBPF objects can be duplicated in the
914  *	  usual way, using **dup**\ (2) and similar calls.
915  *	* File descriptors referring to eBPF objects can be pinned to the
916  *	  filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2).
917  *
918  *	An eBPF object is deallocated only after all file descriptors referring
919  *	to the object have been closed and no references remain pinned to the
920  *	filesystem or attached (for example, bound to a program or device).
921  */
922 enum bpf_cmd {
923 	BPF_MAP_CREATE,
924 	BPF_MAP_LOOKUP_ELEM,
925 	BPF_MAP_UPDATE_ELEM,
926 	BPF_MAP_DELETE_ELEM,
927 	BPF_MAP_GET_NEXT_KEY,
928 	BPF_PROG_LOAD,
929 	BPF_OBJ_PIN,
930 	BPF_OBJ_GET,
931 	BPF_PROG_ATTACH,
932 	BPF_PROG_DETACH,
933 	BPF_PROG_TEST_RUN,
934 	BPF_PROG_RUN = BPF_PROG_TEST_RUN,
935 	BPF_PROG_GET_NEXT_ID,
936 	BPF_MAP_GET_NEXT_ID,
937 	BPF_PROG_GET_FD_BY_ID,
938 	BPF_MAP_GET_FD_BY_ID,
939 	BPF_OBJ_GET_INFO_BY_FD,
940 	BPF_PROG_QUERY,
941 	BPF_RAW_TRACEPOINT_OPEN,
942 	BPF_BTF_LOAD,
943 	BPF_BTF_GET_FD_BY_ID,
944 	BPF_TASK_FD_QUERY,
945 	BPF_MAP_LOOKUP_AND_DELETE_ELEM,
946 	BPF_MAP_FREEZE,
947 	BPF_BTF_GET_NEXT_ID,
948 	BPF_MAP_LOOKUP_BATCH,
949 	BPF_MAP_LOOKUP_AND_DELETE_BATCH,
950 	BPF_MAP_UPDATE_BATCH,
951 	BPF_MAP_DELETE_BATCH,
952 	BPF_LINK_CREATE,
953 	BPF_LINK_UPDATE,
954 	BPF_LINK_GET_FD_BY_ID,
955 	BPF_LINK_GET_NEXT_ID,
956 	BPF_ENABLE_STATS,
957 	BPF_ITER_CREATE,
958 	BPF_LINK_DETACH,
959 	BPF_PROG_BIND_MAP,
960 	BPF_TOKEN_CREATE,
961 	__MAX_BPF_CMD,
962 };
963 
964 enum bpf_map_type {
965 	BPF_MAP_TYPE_UNSPEC,
966 	BPF_MAP_TYPE_HASH,
967 	BPF_MAP_TYPE_ARRAY,
968 	BPF_MAP_TYPE_PROG_ARRAY,
969 	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
970 	BPF_MAP_TYPE_PERCPU_HASH,
971 	BPF_MAP_TYPE_PERCPU_ARRAY,
972 	BPF_MAP_TYPE_STACK_TRACE,
973 	BPF_MAP_TYPE_CGROUP_ARRAY,
974 	BPF_MAP_TYPE_LRU_HASH,
975 	BPF_MAP_TYPE_LRU_PERCPU_HASH,
976 	BPF_MAP_TYPE_LPM_TRIE,
977 	BPF_MAP_TYPE_ARRAY_OF_MAPS,
978 	BPF_MAP_TYPE_HASH_OF_MAPS,
979 	BPF_MAP_TYPE_DEVMAP,
980 	BPF_MAP_TYPE_SOCKMAP,
981 	BPF_MAP_TYPE_CPUMAP,
982 	BPF_MAP_TYPE_XSKMAP,
983 	BPF_MAP_TYPE_SOCKHASH,
984 	BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
985 	/* BPF_MAP_TYPE_CGROUP_STORAGE is available to bpf programs attaching
986 	 * to a cgroup. The newer BPF_MAP_TYPE_CGRP_STORAGE is available to
987 	 * both cgroup-attached and other progs and supports all functionality
988 	 * provided by BPF_MAP_TYPE_CGROUP_STORAGE. So mark
989 	 * BPF_MAP_TYPE_CGROUP_STORAGE deprecated.
990 	 */
991 	BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
992 	BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
993 	BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
994 	/* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
995 	 * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
996 	 * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
997 	 * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
998 	 * deprecated.
999 	 */
1000 	BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
1001 	BPF_MAP_TYPE_QUEUE,
1002 	BPF_MAP_TYPE_STACK,
1003 	BPF_MAP_TYPE_SK_STORAGE,
1004 	BPF_MAP_TYPE_DEVMAP_HASH,
1005 	BPF_MAP_TYPE_STRUCT_OPS,
1006 	BPF_MAP_TYPE_RINGBUF,
1007 	BPF_MAP_TYPE_INODE_STORAGE,
1008 	BPF_MAP_TYPE_TASK_STORAGE,
1009 	BPF_MAP_TYPE_BLOOM_FILTER,
1010 	BPF_MAP_TYPE_USER_RINGBUF,
1011 	BPF_MAP_TYPE_CGRP_STORAGE,
1012 	BPF_MAP_TYPE_ARENA,
1013 	__MAX_BPF_MAP_TYPE
1014 };
1015 
1016 /* Note that tracing related programs such as
1017  * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
1018  * are not subject to a stable API since kernel internal data
1019  * structures can change from release to release and may
1020  * therefore break existing tracing BPF programs. Tracing BPF
1021  * programs correspond to /a/ specific kernel which is to be
1022  * analyzed, and not /a/ specific kernel /and/ all future ones.
1023  */
1024 enum bpf_prog_type {
1025 	BPF_PROG_TYPE_UNSPEC,
1026 	BPF_PROG_TYPE_SOCKET_FILTER,
1027 	BPF_PROG_TYPE_KPROBE,
1028 	BPF_PROG_TYPE_SCHED_CLS,
1029 	BPF_PROG_TYPE_SCHED_ACT,
1030 	BPF_PROG_TYPE_TRACEPOINT,
1031 	BPF_PROG_TYPE_XDP,
1032 	BPF_PROG_TYPE_PERF_EVENT,
1033 	BPF_PROG_TYPE_CGROUP_SKB,
1034 	BPF_PROG_TYPE_CGROUP_SOCK,
1035 	BPF_PROG_TYPE_LWT_IN,
1036 	BPF_PROG_TYPE_LWT_OUT,
1037 	BPF_PROG_TYPE_LWT_XMIT,
1038 	BPF_PROG_TYPE_SOCK_OPS,
1039 	BPF_PROG_TYPE_SK_SKB,
1040 	BPF_PROG_TYPE_CGROUP_DEVICE,
1041 	BPF_PROG_TYPE_SK_MSG,
1042 	BPF_PROG_TYPE_RAW_TRACEPOINT,
1043 	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
1044 	BPF_PROG_TYPE_LWT_SEG6LOCAL,
1045 	BPF_PROG_TYPE_LIRC_MODE2,
1046 	BPF_PROG_TYPE_SK_REUSEPORT,
1047 	BPF_PROG_TYPE_FLOW_DISSECTOR,
1048 	BPF_PROG_TYPE_CGROUP_SYSCTL,
1049 	BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
1050 	BPF_PROG_TYPE_CGROUP_SOCKOPT,
1051 	BPF_PROG_TYPE_TRACING,
1052 	BPF_PROG_TYPE_STRUCT_OPS,
1053 	BPF_PROG_TYPE_EXT,
1054 	BPF_PROG_TYPE_LSM,
1055 	BPF_PROG_TYPE_SK_LOOKUP,
1056 	BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
1057 	BPF_PROG_TYPE_NETFILTER,
1058 	__MAX_BPF_PROG_TYPE
1059 };
1060 
1061 enum bpf_attach_type {
1062 	BPF_CGROUP_INET_INGRESS,
1063 	BPF_CGROUP_INET_EGRESS,
1064 	BPF_CGROUP_INET_SOCK_CREATE,
1065 	BPF_CGROUP_SOCK_OPS,
1066 	BPF_SK_SKB_STREAM_PARSER,
1067 	BPF_SK_SKB_STREAM_VERDICT,
1068 	BPF_CGROUP_DEVICE,
1069 	BPF_SK_MSG_VERDICT,
1070 	BPF_CGROUP_INET4_BIND,
1071 	BPF_CGROUP_INET6_BIND,
1072 	BPF_CGROUP_INET4_CONNECT,
1073 	BPF_CGROUP_INET6_CONNECT,
1074 	BPF_CGROUP_INET4_POST_BIND,
1075 	BPF_CGROUP_INET6_POST_BIND,
1076 	BPF_CGROUP_UDP4_SENDMSG,
1077 	BPF_CGROUP_UDP6_SENDMSG,
1078 	BPF_LIRC_MODE2,
1079 	BPF_FLOW_DISSECTOR,
1080 	BPF_CGROUP_SYSCTL,
1081 	BPF_CGROUP_UDP4_RECVMSG,
1082 	BPF_CGROUP_UDP6_RECVMSG,
1083 	BPF_CGROUP_GETSOCKOPT,
1084 	BPF_CGROUP_SETSOCKOPT,
1085 	BPF_TRACE_RAW_TP,
1086 	BPF_TRACE_FENTRY,
1087 	BPF_TRACE_FEXIT,
1088 	BPF_MODIFY_RETURN,
1089 	BPF_LSM_MAC,
1090 	BPF_TRACE_ITER,
1091 	BPF_CGROUP_INET4_GETPEERNAME,
1092 	BPF_CGROUP_INET6_GETPEERNAME,
1093 	BPF_CGROUP_INET4_GETSOCKNAME,
1094 	BPF_CGROUP_INET6_GETSOCKNAME,
1095 	BPF_XDP_DEVMAP,
1096 	BPF_CGROUP_INET_SOCK_RELEASE,
1097 	BPF_XDP_CPUMAP,
1098 	BPF_SK_LOOKUP,
1099 	BPF_XDP,
1100 	BPF_SK_SKB_VERDICT,
1101 	BPF_SK_REUSEPORT_SELECT,
1102 	BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
1103 	BPF_PERF_EVENT,
1104 	BPF_TRACE_KPROBE_MULTI,
1105 	BPF_LSM_CGROUP,
1106 	BPF_STRUCT_OPS,
1107 	BPF_NETFILTER,
1108 	BPF_TCX_INGRESS,
1109 	BPF_TCX_EGRESS,
1110 	BPF_TRACE_UPROBE_MULTI,
1111 	BPF_CGROUP_UNIX_CONNECT,
1112 	BPF_CGROUP_UNIX_SENDMSG,
1113 	BPF_CGROUP_UNIX_RECVMSG,
1114 	BPF_CGROUP_UNIX_GETPEERNAME,
1115 	BPF_CGROUP_UNIX_GETSOCKNAME,
1116 	BPF_NETKIT_PRIMARY,
1117 	BPF_NETKIT_PEER,
1118 	BPF_TRACE_KPROBE_SESSION,
1119 	BPF_TRACE_UPROBE_SESSION,
1120 	__MAX_BPF_ATTACH_TYPE
1121 };
1122 
1123 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
1124 
1125 /* Add BPF_LINK_TYPE(type, name) in bpf_types.h to keep bpf_link_type_strs[]
1126  * in sync with the definitions below.
1127  */
1128 enum bpf_link_type {
1129 	BPF_LINK_TYPE_UNSPEC = 0,
1130 	BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
1131 	BPF_LINK_TYPE_TRACING = 2,
1132 	BPF_LINK_TYPE_CGROUP = 3,
1133 	BPF_LINK_TYPE_ITER = 4,
1134 	BPF_LINK_TYPE_NETNS = 5,
1135 	BPF_LINK_TYPE_XDP = 6,
1136 	BPF_LINK_TYPE_PERF_EVENT = 7,
1137 	BPF_LINK_TYPE_KPROBE_MULTI = 8,
1138 	BPF_LINK_TYPE_STRUCT_OPS = 9,
1139 	BPF_LINK_TYPE_NETFILTER = 10,
1140 	BPF_LINK_TYPE_TCX = 11,
1141 	BPF_LINK_TYPE_UPROBE_MULTI = 12,
1142 	BPF_LINK_TYPE_NETKIT = 13,
1143 	BPF_LINK_TYPE_SOCKMAP = 14,
1144 	__MAX_BPF_LINK_TYPE,
1145 };
1146 
1147 #define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
1148 
1149 enum bpf_perf_event_type {
1150 	BPF_PERF_EVENT_UNSPEC = 0,
1151 	BPF_PERF_EVENT_UPROBE = 1,
1152 	BPF_PERF_EVENT_URETPROBE = 2,
1153 	BPF_PERF_EVENT_KPROBE = 3,
1154 	BPF_PERF_EVENT_KRETPROBE = 4,
1155 	BPF_PERF_EVENT_TRACEPOINT = 5,
1156 	BPF_PERF_EVENT_EVENT = 6,
1157 };
1158 
1159 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
1160  *
1161  * NONE(default): No further bpf programs allowed in the subtree.
1162  *
1163  * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
1164  * the program in this cgroup yields to sub-cgroup program.
1165  *
1166  * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
1167  * that cgroup program gets run in addition to the program in this cgroup.
1168  *
1169  * Only one program is allowed to be attached to a cgroup with
1170  * NONE or BPF_F_ALLOW_OVERRIDE flag.
1171  * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
1172  * release old program and attach the new one. Attach flags has to match.
1173  *
1174  * Multiple programs are allowed to be attached to a cgroup with
1175  * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
1176  * (those that were attached first, run first)
1177  * The programs of sub-cgroup are executed first, then programs of
1178  * this cgroup and then programs of parent cgroup.
1179  * When children program makes decision (like picking TCP CA or sock bind)
1180  * parent program has a chance to override it.
1181  *
1182  * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of
1183  * programs for a cgroup. Though it's possible to replace an old program at
1184  * any position by also specifying BPF_F_REPLACE flag and position itself in
1185  * replace_bpf_fd attribute. Old program at this position will be released.
1186  *
1187  * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
1188  * A cgroup with NONE doesn't allow any programs in sub-cgroups.
1189  * Ex1:
1190  * cgrp1 (MULTI progs A, B) ->
1191  *    cgrp2 (OVERRIDE prog C) ->
1192  *      cgrp3 (MULTI prog D) ->
1193  *        cgrp4 (OVERRIDE prog E) ->
1194  *          cgrp5 (NONE prog F)
1195  * the event in cgrp5 triggers execution of F,D,A,B in that order.
1196  * if prog F is detached, the execution is E,D,A,B
1197  * if prog F and D are detached, the execution is E,A,B
1198  * if prog F, E and D are detached, the execution is C,A,B
1199  *
1200  * All eligible programs are executed regardless of return code from
1201  * earlier programs.
1202  */
1203 #define BPF_F_ALLOW_OVERRIDE	(1U << 0)
1204 #define BPF_F_ALLOW_MULTI	(1U << 1)
1205 /* Generic attachment flags. */
1206 #define BPF_F_REPLACE		(1U << 2)
1207 #define BPF_F_BEFORE		(1U << 3)
1208 #define BPF_F_AFTER		(1U << 4)
1209 #define BPF_F_ID		(1U << 5)
1210 #define BPF_F_LINK		BPF_F_LINK /* 1 << 13 */
1211 
1212 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
1213  * verifier will perform strict alignment checking as if the kernel
1214  * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
1215  * and NET_IP_ALIGN defined to 2.
1216  */
1217 #define BPF_F_STRICT_ALIGNMENT	(1U << 0)
1218 
1219 /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the
1220  * verifier will allow any alignment whatsoever.  On platforms
1221  * with strict alignment requirements for loads ands stores (such
1222  * as sparc and mips) the verifier validates that all loads and
1223  * stores provably follow this requirement.  This flag turns that
1224  * checking and enforcement off.
1225  *
1226  * It is mostly used for testing when we want to validate the
1227  * context and memory access aspects of the verifier, but because
1228  * of an unaligned access the alignment check would trigger before
1229  * the one we are interested in.
1230  */
1231 #define BPF_F_ANY_ALIGNMENT	(1U << 1)
1232 
1233 /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose.
1234  * Verifier does sub-register def/use analysis and identifies instructions whose
1235  * def only matters for low 32-bit, high 32-bit is never referenced later
1236  * through implicit zero extension. Therefore verifier notifies JIT back-ends
1237  * that it is safe to ignore clearing high 32-bit for these instructions. This
1238  * saves some back-ends a lot of code-gen. However such optimization is not
1239  * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends
1240  * hence hasn't used verifier's analysis result. But, we really want to have a
1241  * way to be able to verify the correctness of the described optimization on
1242  * x86_64 on which testsuites are frequently exercised.
1243  *
1244  * So, this flag is introduced. Once it is set, verifier will randomize high
1245  * 32-bit for those instructions who has been identified as safe to ignore them.
1246  * Then, if verifier is not doing correct analysis, such randomization will
1247  * regress tests to expose bugs.
1248  */
1249 #define BPF_F_TEST_RND_HI32	(1U << 2)
1250 
1251 /* The verifier internal test flag. Behavior is undefined */
1252 #define BPF_F_TEST_STATE_FREQ	(1U << 3)
1253 
1254 /* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will
1255  * restrict map and helper usage for such programs. Sleepable BPF programs can
1256  * only be attached to hooks where kernel execution context allows sleeping.
1257  * Such programs are allowed to use helpers that may sleep like
1258  * bpf_copy_from_user().
1259  */
1260 #define BPF_F_SLEEPABLE		(1U << 4)
1261 
1262 /* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program
1263  * fully support xdp frags.
1264  */
1265 #define BPF_F_XDP_HAS_FRAGS	(1U << 5)
1266 
1267 /* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded
1268  * program becomes device-bound but can access XDP metadata.
1269  */
1270 #define BPF_F_XDP_DEV_BOUND_ONLY	(1U << 6)
1271 
1272 /* The verifier internal test flag. Behavior is undefined */
1273 #define BPF_F_TEST_REG_INVARIANTS	(1U << 7)
1274 
1275 /* link_create.kprobe_multi.flags used in LINK_CREATE command for
1276  * BPF_TRACE_KPROBE_MULTI attach type to create return probe.
1277  */
1278 enum {
1279 	BPF_F_KPROBE_MULTI_RETURN = (1U << 0)
1280 };
1281 
1282 /* link_create.uprobe_multi.flags used in LINK_CREATE command for
1283  * BPF_TRACE_UPROBE_MULTI attach type to create return probe.
1284  */
1285 enum {
1286 	BPF_F_UPROBE_MULTI_RETURN = (1U << 0)
1287 };
1288 
1289 /* link_create.netfilter.flags used in LINK_CREATE command for
1290  * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation.
1291  */
1292 #define BPF_F_NETFILTER_IP_DEFRAG (1U << 0)
1293 
1294 /* When BPF ldimm64's insn[0].src_reg != 0 then this can have
1295  * the following extensions:
1296  *
1297  * insn[0].src_reg:  BPF_PSEUDO_MAP_[FD|IDX]
1298  * insn[0].imm:      map fd or fd_idx
1299  * insn[1].imm:      0
1300  * insn[0].off:      0
1301  * insn[1].off:      0
1302  * ldimm64 rewrite:  address of map
1303  * verifier type:    CONST_PTR_TO_MAP
1304  */
1305 #define BPF_PSEUDO_MAP_FD	1
1306 #define BPF_PSEUDO_MAP_IDX	5
1307 
1308 /* insn[0].src_reg:  BPF_PSEUDO_MAP_[IDX_]VALUE
1309  * insn[0].imm:      map fd or fd_idx
1310  * insn[1].imm:      offset into value
1311  * insn[0].off:      0
1312  * insn[1].off:      0
1313  * ldimm64 rewrite:  address of map[0]+offset
1314  * verifier type:    PTR_TO_MAP_VALUE
1315  */
1316 #define BPF_PSEUDO_MAP_VALUE		2
1317 #define BPF_PSEUDO_MAP_IDX_VALUE	6
1318 
1319 /* insn[0].src_reg:  BPF_PSEUDO_BTF_ID
1320  * insn[0].imm:      kernel btd id of VAR
1321  * insn[1].imm:      0
1322  * insn[0].off:      0
1323  * insn[1].off:      0
1324  * ldimm64 rewrite:  address of the kernel variable
1325  * verifier type:    PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var
1326  *                   is struct/union.
1327  */
1328 #define BPF_PSEUDO_BTF_ID	3
1329 /* insn[0].src_reg:  BPF_PSEUDO_FUNC
1330  * insn[0].imm:      insn offset to the func
1331  * insn[1].imm:      0
1332  * insn[0].off:      0
1333  * insn[1].off:      0
1334  * ldimm64 rewrite:  address of the function
1335  * verifier type:    PTR_TO_FUNC.
1336  */
1337 #define BPF_PSEUDO_FUNC		4
1338 
1339 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
1340  * offset to another bpf function
1341  */
1342 #define BPF_PSEUDO_CALL		1
1343 /* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL,
1344  * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel
1345  */
1346 #define BPF_PSEUDO_KFUNC_CALL	2
1347 
1348 enum bpf_addr_space_cast {
1349 	BPF_ADDR_SPACE_CAST = 1,
1350 };
1351 
1352 /* flags for BPF_MAP_UPDATE_ELEM command */
1353 enum {
1354 	BPF_ANY		= 0, /* create new element or update existing */
1355 	BPF_NOEXIST	= 1, /* create new element if it didn't exist */
1356 	BPF_EXIST	= 2, /* update existing element */
1357 	BPF_F_LOCK	= 4, /* spin_lock-ed map_lookup/map_update */
1358 };
1359 
1360 /* flags for BPF_MAP_CREATE command */
1361 enum {
1362 	BPF_F_NO_PREALLOC	= (1U << 0),
1363 /* Instead of having one common LRU list in the
1364  * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
1365  * which can scale and perform better.
1366  * Note, the LRU nodes (including free nodes) cannot be moved
1367  * across different LRU lists.
1368  */
1369 	BPF_F_NO_COMMON_LRU	= (1U << 1),
1370 /* Specify numa node during map creation */
1371 	BPF_F_NUMA_NODE		= (1U << 2),
1372 
1373 /* Flags for accessing BPF object from syscall side. */
1374 	BPF_F_RDONLY		= (1U << 3),
1375 	BPF_F_WRONLY		= (1U << 4),
1376 
1377 /* Flag for stack_map, store build_id+offset instead of pointer */
1378 	BPF_F_STACK_BUILD_ID	= (1U << 5),
1379 
1380 /* Zero-initialize hash function seed. This should only be used for testing. */
1381 	BPF_F_ZERO_SEED		= (1U << 6),
1382 
1383 /* Flags for accessing BPF object from program side. */
1384 	BPF_F_RDONLY_PROG	= (1U << 7),
1385 	BPF_F_WRONLY_PROG	= (1U << 8),
1386 
1387 /* Clone map from listener for newly accepted socket */
1388 	BPF_F_CLONE		= (1U << 9),
1389 
1390 /* Enable memory-mapping BPF map */
1391 	BPF_F_MMAPABLE		= (1U << 10),
1392 
1393 /* Share perf_event among processes */
1394 	BPF_F_PRESERVE_ELEMS	= (1U << 11),
1395 
1396 /* Create a map that is suitable to be an inner map with dynamic max entries */
1397 	BPF_F_INNER_MAP		= (1U << 12),
1398 
1399 /* Create a map that will be registered/unregesitered by the backed bpf_link */
1400 	BPF_F_LINK		= (1U << 13),
1401 
1402 /* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
1403 	BPF_F_PATH_FD		= (1U << 14),
1404 
1405 /* Flag for value_type_btf_obj_fd, the fd is available */
1406 	BPF_F_VTYPE_BTF_OBJ_FD	= (1U << 15),
1407 
1408 /* BPF token FD is passed in a corresponding command's token_fd field */
1409 	BPF_F_TOKEN_FD          = (1U << 16),
1410 
1411 /* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */
1412 	BPF_F_SEGV_ON_FAULT	= (1U << 17),
1413 
1414 /* Do not translate kernel bpf_arena pointers to user pointers */
1415 	BPF_F_NO_USER_CONV	= (1U << 18),
1416 };
1417 
1418 /* Flags for BPF_PROG_QUERY. */
1419 
1420 /* Query effective (directly attached + inherited from ancestor cgroups)
1421  * programs that will be executed for events within a cgroup.
1422  * attach_flags with this flag are always returned 0.
1423  */
1424 #define BPF_F_QUERY_EFFECTIVE	(1U << 0)
1425 
1426 /* Flags for BPF_PROG_TEST_RUN */
1427 
1428 /* If set, run the test on the cpu specified by bpf_attr.test.cpu */
1429 #define BPF_F_TEST_RUN_ON_CPU	(1U << 0)
1430 /* If set, XDP frames will be transmitted after processing */
1431 #define BPF_F_TEST_XDP_LIVE_FRAMES	(1U << 1)
1432 /* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */
1433 #define BPF_F_TEST_SKB_CHECKSUM_COMPLETE	(1U << 2)
1434 
1435 /* type for BPF_ENABLE_STATS */
1436 enum bpf_stats_type {
1437 	/* enabled run_time_ns and run_cnt */
1438 	BPF_STATS_RUN_TIME = 0,
1439 };
1440 
1441 enum bpf_stack_build_id_status {
1442 	/* user space need an empty entry to identify end of a trace */
1443 	BPF_STACK_BUILD_ID_EMPTY = 0,
1444 	/* with valid build_id and offset */
1445 	BPF_STACK_BUILD_ID_VALID = 1,
1446 	/* couldn't get build_id, fallback to ip */
1447 	BPF_STACK_BUILD_ID_IP = 2,
1448 };
1449 
1450 #define BPF_BUILD_ID_SIZE 20
1451 struct bpf_stack_build_id {
1452 	__s32		status;
1453 	unsigned char	build_id[BPF_BUILD_ID_SIZE];
1454 	union {
1455 		__u64	offset;
1456 		__u64	ip;
1457 	};
1458 };
1459 
1460 #define BPF_OBJ_NAME_LEN 16U
1461 
1462 union bpf_attr {
1463 	struct { /* anonymous struct used by BPF_MAP_CREATE command */
1464 		__u32	map_type;	/* one of enum bpf_map_type */
1465 		__u32	key_size;	/* size of key in bytes */
1466 		__u32	value_size;	/* size of value in bytes */
1467 		__u32	max_entries;	/* max number of entries in a map */
1468 		__u32	map_flags;	/* BPF_MAP_CREATE related
1469 					 * flags defined above.
1470 					 */
1471 		__u32	inner_map_fd;	/* fd pointing to the inner map */
1472 		__u32	numa_node;	/* numa node (effective only if
1473 					 * BPF_F_NUMA_NODE is set).
1474 					 */
1475 		char	map_name[BPF_OBJ_NAME_LEN];
1476 		__u32	map_ifindex;	/* ifindex of netdev to create on */
1477 		__u32	btf_fd;		/* fd pointing to a BTF type data */
1478 		__u32	btf_key_type_id;	/* BTF type_id of the key */
1479 		__u32	btf_value_type_id;	/* BTF type_id of the value */
1480 		__u32	btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
1481 						   * struct stored as the
1482 						   * map value
1483 						   */
1484 		/* Any per-map-type extra fields
1485 		 *
1486 		 * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
1487 		 * number of hash functions (if 0, the bloom filter will default
1488 		 * to using 5 hash functions).
1489 		 *
1490 		 * BPF_MAP_TYPE_ARENA - contains the address where user space
1491 		 * is going to mmap() the arena. It has to be page aligned.
1492 		 */
1493 		__u64	map_extra;
1494 
1495 		__s32   value_type_btf_obj_fd;	/* fd pointing to a BTF
1496 						 * type data for
1497 						 * btf_vmlinux_value_type_id.
1498 						 */
1499 		/* BPF token FD to use with BPF_MAP_CREATE operation.
1500 		 * If provided, map_flags should have BPF_F_TOKEN_FD flag set.
1501 		 */
1502 		__s32	map_token_fd;
1503 	};
1504 
1505 	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
1506 		__u32		map_fd;
1507 		__aligned_u64	key;
1508 		union {
1509 			__aligned_u64 value;
1510 			__aligned_u64 next_key;
1511 		};
1512 		__u64		flags;
1513 	};
1514 
1515 	struct { /* struct used by BPF_MAP_*_BATCH commands */
1516 		__aligned_u64	in_batch;	/* start batch,
1517 						 * NULL to start from beginning
1518 						 */
1519 		__aligned_u64	out_batch;	/* output: next start batch */
1520 		__aligned_u64	keys;
1521 		__aligned_u64	values;
1522 		__u32		count;		/* input/output:
1523 						 * input: # of key/value
1524 						 * elements
1525 						 * output: # of filled elements
1526 						 */
1527 		__u32		map_fd;
1528 		__u64		elem_flags;
1529 		__u64		flags;
1530 	} batch;
1531 
1532 	struct { /* anonymous struct used by BPF_PROG_LOAD command */
1533 		__u32		prog_type;	/* one of enum bpf_prog_type */
1534 		__u32		insn_cnt;
1535 		__aligned_u64	insns;
1536 		__aligned_u64	license;
1537 		__u32		log_level;	/* verbosity level of verifier */
1538 		__u32		log_size;	/* size of user buffer */
1539 		__aligned_u64	log_buf;	/* user supplied buffer */
1540 		__u32		kern_version;	/* not used */
1541 		__u32		prog_flags;
1542 		char		prog_name[BPF_OBJ_NAME_LEN];
1543 		__u32		prog_ifindex;	/* ifindex of netdev to prep for */
1544 		/* For some prog types expected attach type must be known at
1545 		 * load time to verify attach type specific parts of prog
1546 		 * (context accesses, allowed helpers, etc).
1547 		 */
1548 		__u32		expected_attach_type;
1549 		__u32		prog_btf_fd;	/* fd pointing to BTF type data */
1550 		__u32		func_info_rec_size;	/* userspace bpf_func_info size */
1551 		__aligned_u64	func_info;	/* func info */
1552 		__u32		func_info_cnt;	/* number of bpf_func_info records */
1553 		__u32		line_info_rec_size;	/* userspace bpf_line_info size */
1554 		__aligned_u64	line_info;	/* line info */
1555 		__u32		line_info_cnt;	/* number of bpf_line_info records */
1556 		__u32		attach_btf_id;	/* in-kernel BTF type id to attach to */
1557 		union {
1558 			/* valid prog_fd to attach to bpf prog */
1559 			__u32		attach_prog_fd;
1560 			/* or valid module BTF object fd or 0 to attach to vmlinux */
1561 			__u32		attach_btf_obj_fd;
1562 		};
1563 		__u32		core_relo_cnt;	/* number of bpf_core_relo */
1564 		__aligned_u64	fd_array;	/* array of FDs */
1565 		__aligned_u64	core_relos;
1566 		__u32		core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
1567 		/* output: actual total log contents size (including termintaing zero).
1568 		 * It could be both larger than original log_size (if log was
1569 		 * truncated), or smaller (if log buffer wasn't filled completely).
1570 		 */
1571 		__u32		log_true_size;
1572 		/* BPF token FD to use with BPF_PROG_LOAD operation.
1573 		 * If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
1574 		 */
1575 		__s32		prog_token_fd;
1576 	};
1577 
1578 	struct { /* anonymous struct used by BPF_OBJ_* commands */
1579 		__aligned_u64	pathname;
1580 		__u32		bpf_fd;
1581 		__u32		file_flags;
1582 		/* Same as dirfd in openat() syscall; see openat(2)
1583 		 * manpage for details of path FD and pathname semantics;
1584 		 * path_fd should accompanied by BPF_F_PATH_FD flag set in
1585 		 * file_flags field, otherwise it should be set to zero;
1586 		 * if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed.
1587 		 */
1588 		__s32		path_fd;
1589 	};
1590 
1591 	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
1592 		union {
1593 			__u32	target_fd;	/* target object to attach to or ... */
1594 			__u32	target_ifindex;	/* target ifindex */
1595 		};
1596 		__u32		attach_bpf_fd;
1597 		__u32		attach_type;
1598 		__u32		attach_flags;
1599 		__u32		replace_bpf_fd;
1600 		union {
1601 			__u32	relative_fd;
1602 			__u32	relative_id;
1603 		};
1604 		__u64		expected_revision;
1605 	};
1606 
1607 	struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
1608 		__u32		prog_fd;
1609 		__u32		retval;
1610 		__u32		data_size_in;	/* input: len of data_in */
1611 		__u32		data_size_out;	/* input/output: len of data_out
1612 						 *   returns ENOSPC if data_out
1613 						 *   is too small.
1614 						 */
1615 		__aligned_u64	data_in;
1616 		__aligned_u64	data_out;
1617 		__u32		repeat;
1618 		__u32		duration;
1619 		__u32		ctx_size_in;	/* input: len of ctx_in */
1620 		__u32		ctx_size_out;	/* input/output: len of ctx_out
1621 						 *   returns ENOSPC if ctx_out
1622 						 *   is too small.
1623 						 */
1624 		__aligned_u64	ctx_in;
1625 		__aligned_u64	ctx_out;
1626 		__u32		flags;
1627 		__u32		cpu;
1628 		__u32		batch_size;
1629 	} test;
1630 
1631 	struct { /* anonymous struct used by BPF_*_GET_*_ID */
1632 		union {
1633 			__u32		start_id;
1634 			__u32		prog_id;
1635 			__u32		map_id;
1636 			__u32		btf_id;
1637 			__u32		link_id;
1638 		};
1639 		__u32		next_id;
1640 		__u32		open_flags;
1641 	};
1642 
1643 	struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
1644 		__u32		bpf_fd;
1645 		__u32		info_len;
1646 		__aligned_u64	info;
1647 	} info;
1648 
1649 	struct { /* anonymous struct used by BPF_PROG_QUERY command */
1650 		union {
1651 			__u32	target_fd;	/* target object to query or ... */
1652 			__u32	target_ifindex;	/* target ifindex */
1653 		};
1654 		__u32		attach_type;
1655 		__u32		query_flags;
1656 		__u32		attach_flags;
1657 		__aligned_u64	prog_ids;
1658 		union {
1659 			__u32	prog_cnt;
1660 			__u32	count;
1661 		};
1662 		__u32		:32;
1663 		/* output: per-program attach_flags.
1664 		 * not allowed to be set during effective query.
1665 		 */
1666 		__aligned_u64	prog_attach_flags;
1667 		__aligned_u64	link_ids;
1668 		__aligned_u64	link_attach_flags;
1669 		__u64		revision;
1670 	} query;
1671 
1672 	struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
1673 		__u64		name;
1674 		__u32		prog_fd;
1675 		__u32		:32;
1676 		__aligned_u64	cookie;
1677 	} raw_tracepoint;
1678 
1679 	struct { /* anonymous struct for BPF_BTF_LOAD */
1680 		__aligned_u64	btf;
1681 		__aligned_u64	btf_log_buf;
1682 		__u32		btf_size;
1683 		__u32		btf_log_size;
1684 		__u32		btf_log_level;
1685 		/* output: actual total log contents size (including termintaing zero).
1686 		 * It could be both larger than original log_size (if log was
1687 		 * truncated), or smaller (if log buffer wasn't filled completely).
1688 		 */
1689 		__u32		btf_log_true_size;
1690 		__u32		btf_flags;
1691 		/* BPF token FD to use with BPF_BTF_LOAD operation.
1692 		 * If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
1693 		 */
1694 		__s32		btf_token_fd;
1695 	};
1696 
1697 	struct {
1698 		__u32		pid;		/* input: pid */
1699 		__u32		fd;		/* input: fd */
1700 		__u32		flags;		/* input: flags */
1701 		__u32		buf_len;	/* input/output: buf len */
1702 		__aligned_u64	buf;		/* input/output:
1703 						 *   tp_name for tracepoint
1704 						 *   symbol for kprobe
1705 						 *   filename for uprobe
1706 						 */
1707 		__u32		prog_id;	/* output: prod_id */
1708 		__u32		fd_type;	/* output: BPF_FD_TYPE_* */
1709 		__u64		probe_offset;	/* output: probe_offset */
1710 		__u64		probe_addr;	/* output: probe_addr */
1711 	} task_fd_query;
1712 
1713 	struct { /* struct used by BPF_LINK_CREATE command */
1714 		union {
1715 			__u32		prog_fd;	/* eBPF program to attach */
1716 			__u32		map_fd;		/* struct_ops to attach */
1717 		};
1718 		union {
1719 			__u32	target_fd;	/* target object to attach to or ... */
1720 			__u32	target_ifindex; /* target ifindex */
1721 		};
1722 		__u32		attach_type;	/* attach type */
1723 		__u32		flags;		/* extra flags */
1724 		union {
1725 			__u32	target_btf_id;	/* btf_id of target to attach to */
1726 			struct {
1727 				__aligned_u64	iter_info;	/* extra bpf_iter_link_info */
1728 				__u32		iter_info_len;	/* iter_info length */
1729 			};
1730 			struct {
1731 				/* black box user-provided value passed through
1732 				 * to BPF program at the execution time and
1733 				 * accessible through bpf_get_attach_cookie() BPF helper
1734 				 */
1735 				__u64		bpf_cookie;
1736 			} perf_event;
1737 			struct {
1738 				__u32		flags;
1739 				__u32		cnt;
1740 				__aligned_u64	syms;
1741 				__aligned_u64	addrs;
1742 				__aligned_u64	cookies;
1743 			} kprobe_multi;
1744 			struct {
1745 				/* this is overlaid with the target_btf_id above. */
1746 				__u32		target_btf_id;
1747 				/* black box user-provided value passed through
1748 				 * to BPF program at the execution time and
1749 				 * accessible through bpf_get_attach_cookie() BPF helper
1750 				 */
1751 				__u64		cookie;
1752 			} tracing;
1753 			struct {
1754 				__u32		pf;
1755 				__u32		hooknum;
1756 				__s32		priority;
1757 				__u32		flags;
1758 			} netfilter;
1759 			struct {
1760 				union {
1761 					__u32	relative_fd;
1762 					__u32	relative_id;
1763 				};
1764 				__u64		expected_revision;
1765 			} tcx;
1766 			struct {
1767 				__aligned_u64	path;
1768 				__aligned_u64	offsets;
1769 				__aligned_u64	ref_ctr_offsets;
1770 				__aligned_u64	cookies;
1771 				__u32		cnt;
1772 				__u32		flags;
1773 				__u32		pid;
1774 			} uprobe_multi;
1775 			struct {
1776 				union {
1777 					__u32	relative_fd;
1778 					__u32	relative_id;
1779 				};
1780 				__u64		expected_revision;
1781 			} netkit;
1782 		};
1783 	} link_create;
1784 
1785 	struct { /* struct used by BPF_LINK_UPDATE command */
1786 		__u32		link_fd;	/* link fd */
1787 		union {
1788 			/* new program fd to update link with */
1789 			__u32		new_prog_fd;
1790 			/* new struct_ops map fd to update link with */
1791 			__u32           new_map_fd;
1792 		};
1793 		__u32		flags;		/* extra flags */
1794 		union {
1795 			/* expected link's program fd; is specified only if
1796 			 * BPF_F_REPLACE flag is set in flags.
1797 			 */
1798 			__u32		old_prog_fd;
1799 			/* expected link's map fd; is specified only
1800 			 * if BPF_F_REPLACE flag is set.
1801 			 */
1802 			__u32           old_map_fd;
1803 		};
1804 	} link_update;
1805 
1806 	struct {
1807 		__u32		link_fd;
1808 	} link_detach;
1809 
1810 	struct { /* struct used by BPF_ENABLE_STATS command */
1811 		__u32		type;
1812 	} enable_stats;
1813 
1814 	struct { /* struct used by BPF_ITER_CREATE command */
1815 		__u32		link_fd;
1816 		__u32		flags;
1817 	} iter_create;
1818 
1819 	struct { /* struct used by BPF_PROG_BIND_MAP command */
1820 		__u32		prog_fd;
1821 		__u32		map_fd;
1822 		__u32		flags;		/* extra flags */
1823 	} prog_bind_map;
1824 
1825 	struct { /* struct used by BPF_TOKEN_CREATE command */
1826 		__u32		flags;
1827 		__u32		bpffs_fd;
1828 	} token_create;
1829 
1830 } __attribute__((aligned(8)));
1831 
1832 /* The description below is an attempt at providing documentation to eBPF
1833  * developers about the multiple available eBPF helper functions. It can be
1834  * parsed and used to produce a manual page. The workflow is the following,
1835  * and requires the rst2man utility:
1836  *
1837  *     $ ./scripts/bpf_doc.py \
1838  *             --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
1839  *     $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
1840  *     $ man /tmp/bpf-helpers.7
1841  *
1842  * Note that in order to produce this external documentation, some RST
1843  * formatting is used in the descriptions to get "bold" and "italics" in
1844  * manual pages. Also note that the few trailing white spaces are
1845  * intentional, removing them would break paragraphs for rst2man.
1846  *
1847  * Start of BPF helper function descriptions:
1848  *
1849  * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
1850  * 	Description
1851  * 		Perform a lookup in *map* for an entry associated to *key*.
1852  * 	Return
1853  * 		Map value associated to *key*, or **NULL** if no entry was
1854  * 		found.
1855  *
1856  * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
1857  * 	Description
1858  * 		Add or update the value of the entry associated to *key* in
1859  * 		*map* with *value*. *flags* is one of:
1860  *
1861  * 		**BPF_NOEXIST**
1862  * 			The entry for *key* must not exist in the map.
1863  * 		**BPF_EXIST**
1864  * 			The entry for *key* must already exist in the map.
1865  * 		**BPF_ANY**
1866  * 			No condition on the existence of the entry for *key*.
1867  *
1868  * 		Flag value **BPF_NOEXIST** cannot be used for maps of types
1869  * 		**BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY**  (all
1870  * 		elements always exist), the helper would return an error.
1871  * 	Return
1872  * 		0 on success, or a negative error in case of failure.
1873  *
1874  * long bpf_map_delete_elem(struct bpf_map *map, const void *key)
1875  * 	Description
1876  * 		Delete entry with *key* from *map*.
1877  * 	Return
1878  * 		0 on success, or a negative error in case of failure.
1879  *
1880  * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
1881  * 	Description
1882  * 		For tracing programs, safely attempt to read *size* bytes from
1883  * 		kernel space address *unsafe_ptr* and store the data in *dst*.
1884  *
1885  * 		Generally, use **bpf_probe_read_user**\ () or
1886  * 		**bpf_probe_read_kernel**\ () instead.
1887  * 	Return
1888  * 		0 on success, or a negative error in case of failure.
1889  *
1890  * u64 bpf_ktime_get_ns(void)
1891  * 	Description
1892  * 		Return the time elapsed since system boot, in nanoseconds.
1893  * 		Does not include time the system was suspended.
1894  * 		See: **clock_gettime**\ (**CLOCK_MONOTONIC**)
1895  * 	Return
1896  * 		Current *ktime*.
1897  *
1898  * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
1899  * 	Description
1900  * 		This helper is a "printk()-like" facility for debugging. It
1901  * 		prints a message defined by format *fmt* (of size *fmt_size*)
1902  * 		to file *\/sys/kernel/tracing/trace* from TraceFS, if
1903  * 		available. It can take up to three additional **u64**
1904  * 		arguments (as an eBPF helpers, the total number of arguments is
1905  * 		limited to five).
1906  *
1907  * 		Each time the helper is called, it appends a line to the trace.
1908  * 		Lines are discarded while *\/sys/kernel/tracing/trace* is
1909  * 		open, use *\/sys/kernel/tracing/trace_pipe* to avoid this.
1910  * 		The format of the trace is customizable, and the exact output
1911  * 		one will get depends on the options set in
1912  * 		*\/sys/kernel/tracing/trace_options* (see also the
1913  * 		*README* file under the same directory). However, it usually
1914  * 		defaults to something like:
1915  *
1916  * 		::
1917  *
1918  * 			telnet-470   [001] .N.. 419421.045894: 0x00000001: <formatted msg>
1919  *
1920  * 		In the above:
1921  *
1922  * 			* ``telnet`` is the name of the current task.
1923  * 			* ``470`` is the PID of the current task.
1924  * 			* ``001`` is the CPU number on which the task is
1925  * 			  running.
1926  * 			* In ``.N..``, each character refers to a set of
1927  * 			  options (whether irqs are enabled, scheduling
1928  * 			  options, whether hard/softirqs are running, level of
1929  * 			  preempt_disabled respectively). **N** means that
1930  * 			  **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
1931  * 			  are set.
1932  * 			* ``419421.045894`` is a timestamp.
1933  * 			* ``0x00000001`` is a fake value used by BPF for the
1934  * 			  instruction pointer register.
1935  * 			* ``<formatted msg>`` is the message formatted with
1936  * 			  *fmt*.
1937  *
1938  * 		The conversion specifiers supported by *fmt* are similar, but
1939  * 		more limited than for printk(). They are **%d**, **%i**,
1940  * 		**%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
1941  * 		**%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
1942  * 		of field, padding with zeroes, etc.) is available, and the
1943  * 		helper will return **-EINVAL** (but print nothing) if it
1944  * 		encounters an unknown specifier.
1945  *
1946  * 		Also, note that **bpf_trace_printk**\ () is slow, and should
1947  * 		only be used for debugging purposes. For this reason, a notice
1948  * 		block (spanning several lines) is printed to kernel logs and
1949  * 		states that the helper should not be used "for production use"
1950  * 		the first time this helper is used (or more precisely, when
1951  * 		**trace_printk**\ () buffers are allocated). For passing values
1952  * 		to user space, perf events should be preferred.
1953  * 	Return
1954  * 		The number of bytes written to the buffer, or a negative error
1955  * 		in case of failure.
1956  *
1957  * u32 bpf_get_prandom_u32(void)
1958  * 	Description
1959  * 		Get a pseudo-random number.
1960  *
1961  * 		From a security point of view, this helper uses its own
1962  * 		pseudo-random internal state, and cannot be used to infer the
1963  * 		seed of other random functions in the kernel. However, it is
1964  * 		essential to note that the generator used by the helper is not
1965  * 		cryptographically secure.
1966  * 	Return
1967  * 		A random 32-bit unsigned value.
1968  *
1969  * u32 bpf_get_smp_processor_id(void)
1970  * 	Description
1971  * 		Get the SMP (symmetric multiprocessing) processor id. Note that
1972  * 		all programs run with migration disabled, which means that the
1973  * 		SMP processor id is stable during all the execution of the
1974  * 		program.
1975  * 	Return
1976  * 		The SMP id of the processor running the program.
1977  * 	Attributes
1978  * 		__bpf_fastcall
1979  *
1980  * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
1981  * 	Description
1982  * 		Store *len* bytes from address *from* into the packet
1983  * 		associated to *skb*, at *offset*. *flags* are a combination of
1984  * 		**BPF_F_RECOMPUTE_CSUM** (automatically recompute the
1985  * 		checksum for the packet after storing the bytes) and
1986  * 		**BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
1987  * 		**->swhash** and *skb*\ **->l4hash** to 0).
1988  *
1989  * 		A call to this helper is susceptible to change the underlying
1990  * 		packet buffer. Therefore, at load time, all checks on pointers
1991  * 		previously done by the verifier are invalidated and must be
1992  * 		performed again, if the helper is used in combination with
1993  * 		direct packet access.
1994  * 	Return
1995  * 		0 on success, or a negative error in case of failure.
1996  *
1997  * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
1998  * 	Description
1999  * 		Recompute the layer 3 (e.g. IP) checksum for the packet
2000  * 		associated to *skb*. Computation is incremental, so the helper
2001  * 		must know the former value of the header field that was
2002  * 		modified (*from*), the new value of this field (*to*), and the
2003  * 		number of bytes (2 or 4) for this field, stored in *size*.
2004  * 		Alternatively, it is possible to store the difference between
2005  * 		the previous and the new values of the header field in *to*, by
2006  * 		setting *from* and *size* to 0. For both methods, *offset*
2007  * 		indicates the location of the IP checksum within the packet.
2008  *
2009  * 		This helper works in combination with **bpf_csum_diff**\ (),
2010  * 		which does not update the checksum in-place, but offers more
2011  * 		flexibility and can handle sizes larger than 2 or 4 for the
2012  * 		checksum to update.
2013  *
2014  * 		A call to this helper is susceptible to change the underlying
2015  * 		packet buffer. Therefore, at load time, all checks on pointers
2016  * 		previously done by the verifier are invalidated and must be
2017  * 		performed again, if the helper is used in combination with
2018  * 		direct packet access.
2019  * 	Return
2020  * 		0 on success, or a negative error in case of failure.
2021  *
2022  * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
2023  * 	Description
2024  * 		Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
2025  * 		packet associated to *skb*. Computation is incremental, so the
2026  * 		helper must know the former value of the header field that was
2027  * 		modified (*from*), the new value of this field (*to*), and the
2028  * 		number of bytes (2 or 4) for this field, stored on the lowest
2029  * 		four bits of *flags*. Alternatively, it is possible to store
2030  * 		the difference between the previous and the new values of the
2031  * 		header field in *to*, by setting *from* and the four lowest
2032  * 		bits of *flags* to 0. For both methods, *offset* indicates the
2033  * 		location of the IP checksum within the packet. In addition to
2034  * 		the size of the field, *flags* can be added (bitwise OR) actual
2035  * 		flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
2036  * 		untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
2037  * 		for updates resulting in a null checksum the value is set to
2038  * 		**CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
2039  * 		the checksum is to be computed against a pseudo-header.
2040  *
2041  * 		This helper works in combination with **bpf_csum_diff**\ (),
2042  * 		which does not update the checksum in-place, but offers more
2043  * 		flexibility and can handle sizes larger than 2 or 4 for the
2044  * 		checksum to update.
2045  *
2046  * 		A call to this helper is susceptible to change the underlying
2047  * 		packet buffer. Therefore, at load time, all checks on pointers
2048  * 		previously done by the verifier are invalidated and must be
2049  * 		performed again, if the helper is used in combination with
2050  * 		direct packet access.
2051  * 	Return
2052  * 		0 on success, or a negative error in case of failure.
2053  *
2054  * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
2055  * 	Description
2056  * 		This special helper is used to trigger a "tail call", or in
2057  * 		other words, to jump into another eBPF program. The same stack
2058  * 		frame is used (but values on stack and in registers for the
2059  * 		caller are not accessible to the callee). This mechanism allows
2060  * 		for program chaining, either for raising the maximum number of
2061  * 		available eBPF instructions, or to execute given programs in
2062  * 		conditional blocks. For security reasons, there is an upper
2063  * 		limit to the number of successive tail calls that can be
2064  * 		performed.
2065  *
2066  * 		Upon call of this helper, the program attempts to jump into a
2067  * 		program referenced at index *index* in *prog_array_map*, a
2068  * 		special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
2069  * 		*ctx*, a pointer to the context.
2070  *
2071  * 		If the call succeeds, the kernel immediately runs the first
2072  * 		instruction of the new program. This is not a function call,
2073  * 		and it never returns to the previous program. If the call
2074  * 		fails, then the helper has no effect, and the caller continues
2075  * 		to run its subsequent instructions. A call can fail if the
2076  * 		destination program for the jump does not exist (i.e. *index*
2077  * 		is superior to the number of entries in *prog_array_map*), or
2078  * 		if the maximum number of tail calls has been reached for this
2079  * 		chain of programs. This limit is defined in the kernel by the
2080  * 		macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
2081  *		which is currently set to 33.
2082  * 	Return
2083  * 		0 on success, or a negative error in case of failure.
2084  *
2085  * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
2086  * 	Description
2087  * 		Clone and redirect the packet associated to *skb* to another
2088  * 		net device of index *ifindex*. Both ingress and egress
2089  * 		interfaces can be used for redirection. The **BPF_F_INGRESS**
2090  * 		value in *flags* is used to make the distinction (ingress path
2091  * 		is selected if the flag is present, egress path otherwise).
2092  * 		This is the only flag supported for now.
2093  *
2094  * 		In comparison with **bpf_redirect**\ () helper,
2095  * 		**bpf_clone_redirect**\ () has the associated cost of
2096  * 		duplicating the packet buffer, but this can be executed out of
2097  * 		the eBPF program. Conversely, **bpf_redirect**\ () is more
2098  * 		efficient, but it is handled through an action code where the
2099  * 		redirection happens only after the eBPF program has returned.
2100  *
2101  * 		A call to this helper is susceptible to change the underlying
2102  * 		packet buffer. Therefore, at load time, all checks on pointers
2103  * 		previously done by the verifier are invalidated and must be
2104  * 		performed again, if the helper is used in combination with
2105  * 		direct packet access.
2106  * 	Return
2107  * 		0 on success, or a negative error in case of failure. Positive
2108  * 		error indicates a potential drop or congestion in the target
2109  * 		device. The particular positive error codes are not defined.
2110  *
2111  * u64 bpf_get_current_pid_tgid(void)
2112  * 	Description
2113  * 		Get the current pid and tgid.
2114  * 	Return
2115  * 		A 64-bit integer containing the current tgid and pid, and
2116  * 		created as such:
2117  * 		*current_task*\ **->tgid << 32 \|**
2118  * 		*current_task*\ **->pid**.
2119  *
2120  * u64 bpf_get_current_uid_gid(void)
2121  * 	Description
2122  * 		Get the current uid and gid.
2123  * 	Return
2124  * 		A 64-bit integer containing the current GID and UID, and
2125  * 		created as such: *current_gid* **<< 32 \|** *current_uid*.
2126  *
2127  * long bpf_get_current_comm(void *buf, u32 size_of_buf)
2128  * 	Description
2129  * 		Copy the **comm** attribute of the current task into *buf* of
2130  * 		*size_of_buf*. The **comm** attribute contains the name of
2131  * 		the executable (excluding the path) for the current task. The
2132  * 		*size_of_buf* must be strictly positive. On success, the
2133  * 		helper makes sure that the *buf* is NUL-terminated. On failure,
2134  * 		it is filled with zeroes.
2135  * 	Return
2136  * 		0 on success, or a negative error in case of failure.
2137  *
2138  * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
2139  * 	Description
2140  * 		Retrieve the classid for the current task, i.e. for the net_cls
2141  * 		cgroup to which *skb* belongs.
2142  *
2143  * 		This helper can be used on TC egress path, but not on ingress.
2144  *
2145  * 		The net_cls cgroup provides an interface to tag network packets
2146  * 		based on a user-provided identifier for all traffic coming from
2147  * 		the tasks belonging to the related cgroup. See also the related
2148  * 		kernel documentation, available from the Linux sources in file
2149  * 		*Documentation/admin-guide/cgroup-v1/net_cls.rst*.
2150  *
2151  * 		The Linux kernel has two versions for cgroups: there are
2152  * 		cgroups v1 and cgroups v2. Both are available to users, who can
2153  * 		use a mixture of them, but note that the net_cls cgroup is for
2154  * 		cgroup v1 only. This makes it incompatible with BPF programs
2155  * 		run on cgroups, which is a cgroup-v2-only feature (a socket can
2156  * 		only hold data for one version of cgroups at a time).
2157  *
2158  * 		This helper is only available is the kernel was compiled with
2159  * 		the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
2160  * 		"**y**" or to "**m**".
2161  * 	Return
2162  * 		The classid, or 0 for the default unconfigured classid.
2163  *
2164  * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
2165  * 	Description
2166  * 		Push a *vlan_tci* (VLAN tag control information) of protocol
2167  * 		*vlan_proto* to the packet associated to *skb*, then update
2168  * 		the checksum. Note that if *vlan_proto* is different from
2169  * 		**ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
2170  * 		be **ETH_P_8021Q**.
2171  *
2172  * 		A call to this helper is susceptible to change the underlying
2173  * 		packet buffer. Therefore, at load time, all checks on pointers
2174  * 		previously done by the verifier are invalidated and must be
2175  * 		performed again, if the helper is used in combination with
2176  * 		direct packet access.
2177  * 	Return
2178  * 		0 on success, or a negative error in case of failure.
2179  *
2180  * long bpf_skb_vlan_pop(struct sk_buff *skb)
2181  * 	Description
2182  * 		Pop a VLAN header from the packet associated to *skb*.
2183  *
2184  * 		A call to this helper is susceptible to change the underlying
2185  * 		packet buffer. Therefore, at load time, all checks on pointers
2186  * 		previously done by the verifier are invalidated and must be
2187  * 		performed again, if the helper is used in combination with
2188  * 		direct packet access.
2189  * 	Return
2190  * 		0 on success, or a negative error in case of failure.
2191  *
2192  * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
2193  * 	Description
2194  * 		Get tunnel metadata. This helper takes a pointer *key* to an
2195  * 		empty **struct bpf_tunnel_key** of **size**, that will be
2196  * 		filled with tunnel metadata for the packet associated to *skb*.
2197  * 		The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
2198  * 		indicates that the tunnel is based on IPv6 protocol instead of
2199  * 		IPv4.
2200  *
2201  * 		The **struct bpf_tunnel_key** is an object that generalizes the
2202  * 		principal parameters used by various tunneling protocols into a
2203  * 		single struct. This way, it can be used to easily make a
2204  * 		decision based on the contents of the encapsulation header,
2205  * 		"summarized" in this struct. In particular, it holds the IP
2206  * 		address of the remote end (IPv4 or IPv6, depending on the case)
2207  * 		in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
2208  * 		this struct exposes the *key*\ **->tunnel_id**, which is
2209  * 		generally mapped to a VNI (Virtual Network Identifier), making
2210  * 		it programmable together with the **bpf_skb_set_tunnel_key**\
2211  * 		() helper.
2212  *
2213  * 		Let's imagine that the following code is part of a program
2214  * 		attached to the TC ingress interface, on one end of a GRE
2215  * 		tunnel, and is supposed to filter out all messages coming from
2216  * 		remote ends with IPv4 address other than 10.0.0.1:
2217  *
2218  * 		::
2219  *
2220  * 			int ret;
2221  * 			struct bpf_tunnel_key key = {};
2222  *
2223  * 			ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
2224  * 			if (ret < 0)
2225  * 				return TC_ACT_SHOT;	// drop packet
2226  *
2227  * 			if (key.remote_ipv4 != 0x0a000001)
2228  * 				return TC_ACT_SHOT;	// drop packet
2229  *
2230  * 			return TC_ACT_OK;		// accept packet
2231  *
2232  * 		This interface can also be used with all encapsulation devices
2233  * 		that can operate in "collect metadata" mode: instead of having
2234  * 		one network device per specific configuration, the "collect
2235  * 		metadata" mode only requires a single device where the
2236  * 		configuration can be extracted from this helper.
2237  *
2238  * 		This can be used together with various tunnels such as VXLan,
2239  * 		Geneve, GRE or IP in IP (IPIP).
2240  * 	Return
2241  * 		0 on success, or a negative error in case of failure.
2242  *
2243  * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
2244  * 	Description
2245  * 		Populate tunnel metadata for packet associated to *skb.* The
2246  * 		tunnel metadata is set to the contents of *key*, of *size*. The
2247  * 		*flags* can be set to a combination of the following values:
2248  *
2249  * 		**BPF_F_TUNINFO_IPV6**
2250  * 			Indicate that the tunnel is based on IPv6 protocol
2251  * 			instead of IPv4.
2252  * 		**BPF_F_ZERO_CSUM_TX**
2253  * 			For IPv4 packets, add a flag to tunnel metadata
2254  * 			indicating that checksum computation should be skipped
2255  * 			and checksum set to zeroes.
2256  * 		**BPF_F_DONT_FRAGMENT**
2257  * 			Add a flag to tunnel metadata indicating that the
2258  * 			packet should not be fragmented.
2259  * 		**BPF_F_SEQ_NUMBER**
2260  * 			Add a flag to tunnel metadata indicating that a
2261  * 			sequence number should be added to tunnel header before
2262  * 			sending the packet. This flag was added for GRE
2263  * 			encapsulation, but might be used with other protocols
2264  * 			as well in the future.
2265  * 		**BPF_F_NO_TUNNEL_KEY**
2266  * 			Add a flag to tunnel metadata indicating that no tunnel
2267  * 			key should be set in the resulting tunnel header.
2268  *
2269  * 		Here is a typical usage on the transmit path:
2270  *
2271  * 		::
2272  *
2273  * 			struct bpf_tunnel_key key;
2274  * 			     populate key ...
2275  * 			bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
2276  * 			bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
2277  *
2278  * 		See also the description of the **bpf_skb_get_tunnel_key**\ ()
2279  * 		helper for additional information.
2280  * 	Return
2281  * 		0 on success, or a negative error in case of failure.
2282  *
2283  * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
2284  * 	Description
2285  * 		Read the value of a perf event counter. This helper relies on a
2286  * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
2287  * 		the perf event counter is selected when *map* is updated with
2288  * 		perf event file descriptors. The *map* is an array whose size
2289  * 		is the number of available CPUs, and each cell contains a value
2290  * 		relative to one CPU. The value to retrieve is indicated by
2291  * 		*flags*, that contains the index of the CPU to look up, masked
2292  * 		with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
2293  * 		**BPF_F_CURRENT_CPU** to indicate that the value for the
2294  * 		current CPU should be retrieved.
2295  *
2296  * 		Note that before Linux 4.13, only hardware perf event can be
2297  * 		retrieved.
2298  *
2299  * 		Also, be aware that the newer helper
2300  * 		**bpf_perf_event_read_value**\ () is recommended over
2301  * 		**bpf_perf_event_read**\ () in general. The latter has some ABI
2302  * 		quirks where error and counter value are used as a return code
2303  * 		(which is wrong to do since ranges may overlap). This issue is
2304  * 		fixed with **bpf_perf_event_read_value**\ (), which at the same
2305  * 		time provides more features over the **bpf_perf_event_read**\
2306  * 		() interface. Please refer to the description of
2307  * 		**bpf_perf_event_read_value**\ () for details.
2308  * 	Return
2309  * 		The value of the perf event counter read from the map, or a
2310  * 		negative error code in case of failure.
2311  *
2312  * long bpf_redirect(u32 ifindex, u64 flags)
2313  * 	Description
2314  * 		Redirect the packet to another net device of index *ifindex*.
2315  * 		This helper is somewhat similar to **bpf_clone_redirect**\
2316  * 		(), except that the packet is not cloned, which provides
2317  * 		increased performance.
2318  *
2319  * 		Except for XDP, both ingress and egress interfaces can be used
2320  * 		for redirection. The **BPF_F_INGRESS** value in *flags* is used
2321  * 		to make the distinction (ingress path is selected if the flag
2322  * 		is present, egress path otherwise). Currently, XDP only
2323  * 		supports redirection to the egress interface, and accepts no
2324  * 		flag at all.
2325  *
2326  * 		The same effect can also be attained with the more generic
2327  * 		**bpf_redirect_map**\ (), which uses a BPF map to store the
2328  * 		redirect target instead of providing it directly to the helper.
2329  * 	Return
2330  * 		For XDP, the helper returns **XDP_REDIRECT** on success or
2331  * 		**XDP_ABORTED** on error. For other program types, the values
2332  * 		are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
2333  * 		error.
2334  *
2335  * u32 bpf_get_route_realm(struct sk_buff *skb)
2336  * 	Description
2337  * 		Retrieve the realm or the route, that is to say the
2338  * 		**tclassid** field of the destination for the *skb*. The
2339  * 		identifier retrieved is a user-provided tag, similar to the
2340  * 		one used with the net_cls cgroup (see description for
2341  * 		**bpf_get_cgroup_classid**\ () helper), but here this tag is
2342  * 		held by a route (a destination entry), not by a task.
2343  *
2344  * 		Retrieving this identifier works with the clsact TC egress hook
2345  * 		(see also **tc-bpf(8)**), or alternatively on conventional
2346  * 		classful egress qdiscs, but not on TC ingress path. In case of
2347  * 		clsact TC egress hook, this has the advantage that, internally,
2348  * 		the destination entry has not been dropped yet in the transmit
2349  * 		path. Therefore, the destination entry does not need to be
2350  * 		artificially held via **netif_keep_dst**\ () for a classful
2351  * 		qdisc until the *skb* is freed.
2352  *
2353  * 		This helper is available only if the kernel was compiled with
2354  * 		**CONFIG_IP_ROUTE_CLASSID** configuration option.
2355  * 	Return
2356  * 		The realm of the route for the packet associated to *skb*, or 0
2357  * 		if none was found.
2358  *
2359  * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
2360  * 	Description
2361  * 		Write raw *data* blob into a special BPF perf event held by
2362  * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
2363  * 		event must have the following attributes: **PERF_SAMPLE_RAW**
2364  * 		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
2365  * 		**PERF_COUNT_SW_BPF_OUTPUT** as **config**.
2366  *
2367  * 		The *flags* are used to indicate the index in *map* for which
2368  * 		the value must be put, masked with **BPF_F_INDEX_MASK**.
2369  * 		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
2370  * 		to indicate that the index of the current CPU core should be
2371  * 		used.
2372  *
2373  * 		The value to write, of *size*, is passed through eBPF stack and
2374  * 		pointed by *data*.
2375  *
2376  * 		The context of the program *ctx* needs also be passed to the
2377  * 		helper.
2378  *
2379  * 		On user space, a program willing to read the values needs to
2380  * 		call **perf_event_open**\ () on the perf event (either for
2381  * 		one or for all CPUs) and to store the file descriptor into the
2382  * 		*map*. This must be done before the eBPF program can send data
2383  * 		into it. An example is available in file
2384  * 		*samples/bpf/trace_output_user.c* in the Linux kernel source
2385  * 		tree (the eBPF program counterpart is in
2386  * 		*samples/bpf/trace_output_kern.c*).
2387  *
2388  * 		**bpf_perf_event_output**\ () achieves better performance
2389  * 		than **bpf_trace_printk**\ () for sharing data with user
2390  * 		space, and is much better suitable for streaming data from eBPF
2391  * 		programs.
2392  *
2393  * 		Note that this helper is not restricted to tracing use cases
2394  * 		and can be used with programs attached to TC or XDP as well,
2395  * 		where it allows for passing data to user space listeners. Data
2396  * 		can be:
2397  *
2398  * 		* Only custom structs,
2399  * 		* Only the packet payload, or
2400  * 		* A combination of both.
2401  * 	Return
2402  * 		0 on success, or a negative error in case of failure.
2403  *
2404  * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
2405  * 	Description
2406  * 		This helper was provided as an easy way to load data from a
2407  * 		packet. It can be used to load *len* bytes from *offset* from
2408  * 		the packet associated to *skb*, into the buffer pointed by
2409  * 		*to*.
2410  *
2411  * 		Since Linux 4.7, usage of this helper has mostly been replaced
2412  * 		by "direct packet access", enabling packet data to be
2413  * 		manipulated with *skb*\ **->data** and *skb*\ **->data_end**
2414  * 		pointing respectively to the first byte of packet data and to
2415  * 		the byte after the last byte of packet data. However, it
2416  * 		remains useful if one wishes to read large quantities of data
2417  * 		at once from a packet into the eBPF stack.
2418  * 	Return
2419  * 		0 on success, or a negative error in case of failure.
2420  *
2421  * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
2422  * 	Description
2423  * 		Walk a user or a kernel stack and return its id. To achieve
2424  * 		this, the helper needs *ctx*, which is a pointer to the context
2425  * 		on which the tracing program is executed, and a pointer to a
2426  * 		*map* of type **BPF_MAP_TYPE_STACK_TRACE**.
2427  *
2428  * 		The last argument, *flags*, holds the number of stack frames to
2429  * 		skip (from 0 to 255), masked with
2430  * 		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
2431  * 		a combination of the following flags:
2432  *
2433  * 		**BPF_F_USER_STACK**
2434  * 			Collect a user space stack instead of a kernel stack.
2435  * 		**BPF_F_FAST_STACK_CMP**
2436  * 			Compare stacks by hash only.
2437  * 		**BPF_F_REUSE_STACKID**
2438  * 			If two different stacks hash into the same *stackid*,
2439  * 			discard the old one.
2440  *
2441  * 		The stack id retrieved is a 32 bit long integer handle which
2442  * 		can be further combined with other data (including other stack
2443  * 		ids) and used as a key into maps. This can be useful for
2444  * 		generating a variety of graphs (such as flame graphs or off-cpu
2445  * 		graphs).
2446  *
2447  * 		For walking a stack, this helper is an improvement over
2448  * 		**bpf_probe_read**\ (), which can be used with unrolled loops
2449  * 		but is not efficient and consumes a lot of eBPF instructions.
2450  * 		Instead, **bpf_get_stackid**\ () can collect up to
2451  * 		**PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
2452  * 		this limit can be controlled with the **sysctl** program, and
2453  * 		that it should be manually increased in order to profile long
2454  * 		user stacks (such as stacks for Java programs). To do so, use:
2455  *
2456  * 		::
2457  *
2458  * 			# sysctl kernel.perf_event_max_stack=<new value>
2459  * 	Return
2460  * 		The positive or null stack id on success, or a negative error
2461  * 		in case of failure.
2462  *
2463  * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
2464  * 	Description
2465  * 		Compute a checksum difference, from the raw buffer pointed by
2466  * 		*from*, of length *from_size* (that must be a multiple of 4),
2467  * 		towards the raw buffer pointed by *to*, of size *to_size*
2468  * 		(same remark). An optional *seed* can be added to the value
2469  * 		(this can be cascaded, the seed may come from a previous call
2470  * 		to the helper).
2471  *
2472  * 		This is flexible enough to be used in several ways:
2473  *
2474  * 		* With *from_size* == 0, *to_size* > 0 and *seed* set to
2475  * 		  checksum, it can be used when pushing new data.
2476  * 		* With *from_size* > 0, *to_size* == 0 and *seed* set to
2477  * 		  checksum, it can be used when removing data from a packet.
2478  * 		* With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
2479  * 		  can be used to compute a diff. Note that *from_size* and
2480  * 		  *to_size* do not need to be equal.
2481  *
2482  * 		This helper can be used in combination with
2483  * 		**bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
2484  * 		which one can feed in the difference computed with
2485  * 		**bpf_csum_diff**\ ().
2486  * 	Return
2487  * 		The checksum result, or a negative error code in case of
2488  * 		failure.
2489  *
2490  * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
2491  * 	Description
2492  * 		Retrieve tunnel options metadata for the packet associated to
2493  * 		*skb*, and store the raw tunnel option data to the buffer *opt*
2494  * 		of *size*.
2495  *
2496  * 		This helper can be used with encapsulation devices that can
2497  * 		operate in "collect metadata" mode (please refer to the related
2498  * 		note in the description of **bpf_skb_get_tunnel_key**\ () for
2499  * 		more details). A particular example where this can be used is
2500  * 		in combination with the Geneve encapsulation protocol, where it
2501  * 		allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
2502  * 		and retrieving arbitrary TLVs (Type-Length-Value headers) from
2503  * 		the eBPF program. This allows for full customization of these
2504  * 		headers.
2505  * 	Return
2506  * 		The size of the option data retrieved.
2507  *
2508  * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
2509  * 	Description
2510  * 		Set tunnel options metadata for the packet associated to *skb*
2511  * 		to the option data contained in the raw buffer *opt* of *size*.
2512  *
2513  * 		See also the description of the **bpf_skb_get_tunnel_opt**\ ()
2514  * 		helper for additional information.
2515  * 	Return
2516  * 		0 on success, or a negative error in case of failure.
2517  *
2518  * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
2519  * 	Description
2520  * 		Change the protocol of the *skb* to *proto*. Currently
2521  * 		supported are transition from IPv4 to IPv6, and from IPv6 to
2522  * 		IPv4. The helper takes care of the groundwork for the
2523  * 		transition, including resizing the socket buffer. The eBPF
2524  * 		program is expected to fill the new headers, if any, via
2525  * 		**skb_store_bytes**\ () and to recompute the checksums with
2526  * 		**bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
2527  * 		(). The main case for this helper is to perform NAT64
2528  * 		operations out of an eBPF program.
2529  *
2530  * 		Internally, the GSO type is marked as dodgy so that headers are
2531  * 		checked and segments are recalculated by the GSO/GRO engine.
2532  * 		The size for GSO target is adapted as well.
2533  *
2534  * 		All values for *flags* are reserved for future usage, and must
2535  * 		be left at zero.
2536  *
2537  * 		A call to this helper is susceptible to change the underlying
2538  * 		packet buffer. Therefore, at load time, all checks on pointers
2539  * 		previously done by the verifier are invalidated and must be
2540  * 		performed again, if the helper is used in combination with
2541  * 		direct packet access.
2542  * 	Return
2543  * 		0 on success, or a negative error in case of failure.
2544  *
2545  * long bpf_skb_change_type(struct sk_buff *skb, u32 type)
2546  * 	Description
2547  * 		Change the packet type for the packet associated to *skb*. This
2548  * 		comes down to setting *skb*\ **->pkt_type** to *type*, except
2549  * 		the eBPF program does not have a write access to *skb*\
2550  * 		**->pkt_type** beside this helper. Using a helper here allows
2551  * 		for graceful handling of errors.
2552  *
2553  * 		The major use case is to change incoming *skb*s to
2554  * 		**PACKET_HOST** in a programmatic way instead of having to
2555  * 		recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
2556  * 		example.
2557  *
2558  * 		Note that *type* only allows certain values. At this time, they
2559  * 		are:
2560  *
2561  * 		**PACKET_HOST**
2562  * 			Packet is for us.
2563  * 		**PACKET_BROADCAST**
2564  * 			Send packet to all.
2565  * 		**PACKET_MULTICAST**
2566  * 			Send packet to group.
2567  * 		**PACKET_OTHERHOST**
2568  * 			Send packet to someone else.
2569  * 	Return
2570  * 		0 on success, or a negative error in case of failure.
2571  *
2572  * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
2573  * 	Description
2574  * 		Check whether *skb* is a descendant of the cgroup2 held by
2575  * 		*map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
2576  * 	Return
2577  * 		The return value depends on the result of the test, and can be:
2578  *
2579  * 		* 0, if the *skb* failed the cgroup2 descendant test.
2580  * 		* 1, if the *skb* succeeded the cgroup2 descendant test.
2581  * 		* A negative error code, if an error occurred.
2582  *
2583  * u32 bpf_get_hash_recalc(struct sk_buff *skb)
2584  * 	Description
2585  * 		Retrieve the hash of the packet, *skb*\ **->hash**. If it is
2586  * 		not set, in particular if the hash was cleared due to mangling,
2587  * 		recompute this hash. Later accesses to the hash can be done
2588  * 		directly with *skb*\ **->hash**.
2589  *
2590  * 		Calling **bpf_set_hash_invalid**\ (), changing a packet
2591  * 		prototype with **bpf_skb_change_proto**\ (), or calling
2592  * 		**bpf_skb_store_bytes**\ () with the
2593  * 		**BPF_F_INVALIDATE_HASH** are actions susceptible to clear
2594  * 		the hash and to trigger a new computation for the next call to
2595  * 		**bpf_get_hash_recalc**\ ().
2596  * 	Return
2597  * 		The 32-bit hash.
2598  *
2599  * u64 bpf_get_current_task(void)
2600  * 	Description
2601  * 		Get the current task.
2602  * 	Return
2603  * 		A pointer to the current task struct.
2604  *
2605  * long bpf_probe_write_user(void *dst, const void *src, u32 len)
2606  * 	Description
2607  * 		Attempt in a safe way to write *len* bytes from the buffer
2608  * 		*src* to *dst* in memory. It only works for threads that are in
2609  * 		user context, and *dst* must be a valid user space address.
2610  *
2611  * 		This helper should not be used to implement any kind of
2612  * 		security mechanism because of TOC-TOU attacks, but rather to
2613  * 		debug, divert, and manipulate execution of semi-cooperative
2614  * 		processes.
2615  *
2616  * 		Keep in mind that this feature is meant for experiments, and it
2617  * 		has a risk of crashing the system and running programs.
2618  * 		Therefore, when an eBPF program using this helper is attached,
2619  * 		a warning including PID and process name is printed to kernel
2620  * 		logs.
2621  * 	Return
2622  * 		0 on success, or a negative error in case of failure.
2623  *
2624  * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
2625  * 	Description
2626  * 		Check whether the probe is being run is the context of a given
2627  * 		subset of the cgroup2 hierarchy. The cgroup2 to test is held by
2628  * 		*map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
2629  * 	Return
2630  * 		The return value depends on the result of the test, and can be:
2631  *
2632  *		* 1, if current task belongs to the cgroup2.
2633  *		* 0, if current task does not belong to the cgroup2.
2634  * 		* A negative error code, if an error occurred.
2635  *
2636  * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
2637  * 	Description
2638  * 		Resize (trim or grow) the packet associated to *skb* to the
2639  * 		new *len*. The *flags* are reserved for future usage, and must
2640  * 		be left at zero.
2641  *
2642  * 		The basic idea is that the helper performs the needed work to
2643  * 		change the size of the packet, then the eBPF program rewrites
2644  * 		the rest via helpers like **bpf_skb_store_bytes**\ (),
2645  * 		**bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
2646  * 		and others. This helper is a slow path utility intended for
2647  * 		replies with control messages. And because it is targeted for
2648  * 		slow path, the helper itself can afford to be slow: it
2649  * 		implicitly linearizes, unclones and drops offloads from the
2650  * 		*skb*.
2651  *
2652  * 		A call to this helper is susceptible to change the underlying
2653  * 		packet buffer. Therefore, at load time, all checks on pointers
2654  * 		previously done by the verifier are invalidated and must be
2655  * 		performed again, if the helper is used in combination with
2656  * 		direct packet access.
2657  * 	Return
2658  * 		0 on success, or a negative error in case of failure.
2659  *
2660  * long bpf_skb_pull_data(struct sk_buff *skb, u32 len)
2661  * 	Description
2662  * 		Pull in non-linear data in case the *skb* is non-linear and not
2663  * 		all of *len* are part of the linear section. Make *len* bytes
2664  * 		from *skb* readable and writable. If a zero value is passed for
2665  *		*len*, then all bytes in the linear part of *skb* will be made
2666  *		readable and writable.
2667  *
2668  * 		This helper is only needed for reading and writing with direct
2669  * 		packet access.
2670  *
2671  * 		For direct packet access, testing that offsets to access
2672  * 		are within packet boundaries (test on *skb*\ **->data_end**) is
2673  * 		susceptible to fail if offsets are invalid, or if the requested
2674  * 		data is in non-linear parts of the *skb*. On failure the
2675  * 		program can just bail out, or in the case of a non-linear
2676  * 		buffer, use a helper to make the data available. The
2677  * 		**bpf_skb_load_bytes**\ () helper is a first solution to access
2678  * 		the data. Another one consists in using **bpf_skb_pull_data**
2679  * 		to pull in once the non-linear parts, then retesting and
2680  * 		eventually access the data.
2681  *
2682  * 		At the same time, this also makes sure the *skb* is uncloned,
2683  * 		which is a necessary condition for direct write. As this needs
2684  * 		to be an invariant for the write part only, the verifier
2685  * 		detects writes and adds a prologue that is calling
2686  * 		**bpf_skb_pull_data()** to effectively unclone the *skb* from
2687  * 		the very beginning in case it is indeed cloned.
2688  *
2689  * 		A call to this helper is susceptible to change the underlying
2690  * 		packet buffer. Therefore, at load time, all checks on pointers
2691  * 		previously done by the verifier are invalidated and must be
2692  * 		performed again, if the helper is used in combination with
2693  * 		direct packet access.
2694  * 	Return
2695  * 		0 on success, or a negative error in case of failure.
2696  *
2697  * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
2698  * 	Description
2699  * 		Add the checksum *csum* into *skb*\ **->csum** in case the
2700  * 		driver has supplied a checksum for the entire packet into that
2701  * 		field. Return an error otherwise. This helper is intended to be
2702  * 		used in combination with **bpf_csum_diff**\ (), in particular
2703  * 		when the checksum needs to be updated after data has been
2704  * 		written into the packet through direct packet access.
2705  * 	Return
2706  * 		The checksum on success, or a negative error code in case of
2707  * 		failure.
2708  *
2709  * void bpf_set_hash_invalid(struct sk_buff *skb)
2710  * 	Description
2711  * 		Invalidate the current *skb*\ **->hash**. It can be used after
2712  * 		mangling on headers through direct packet access, in order to
2713  * 		indicate that the hash is outdated and to trigger a
2714  * 		recalculation the next time the kernel tries to access this
2715  * 		hash or when the **bpf_get_hash_recalc**\ () helper is called.
2716  * 	Return
2717  * 		void.
2718  *
2719  * long bpf_get_numa_node_id(void)
2720  * 	Description
2721  * 		Return the id of the current NUMA node. The primary use case
2722  * 		for this helper is the selection of sockets for the local NUMA
2723  * 		node, when the program is attached to sockets using the
2724  * 		**SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
2725  * 		but the helper is also available to other eBPF program types,
2726  * 		similarly to **bpf_get_smp_processor_id**\ ().
2727  * 	Return
2728  * 		The id of current NUMA node.
2729  *
2730  * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
2731  * 	Description
2732  * 		Grows headroom of packet associated to *skb* and adjusts the
2733  * 		offset of the MAC header accordingly, adding *len* bytes of
2734  * 		space. It automatically extends and reallocates memory as
2735  * 		required.
2736  *
2737  * 		This helper can be used on a layer 3 *skb* to push a MAC header
2738  * 		for redirection into a layer 2 device.
2739  *
2740  * 		All values for *flags* are reserved for future usage, and must
2741  * 		be left at zero.
2742  *
2743  * 		A call to this helper is susceptible to change the underlying
2744  * 		packet buffer. Therefore, at load time, all checks on pointers
2745  * 		previously done by the verifier are invalidated and must be
2746  * 		performed again, if the helper is used in combination with
2747  * 		direct packet access.
2748  * 	Return
2749  * 		0 on success, or a negative error in case of failure.
2750  *
2751  * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
2752  * 	Description
2753  * 		Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
2754  * 		it is possible to use a negative value for *delta*. This helper
2755  * 		can be used to prepare the packet for pushing or popping
2756  * 		headers.
2757  *
2758  * 		A call to this helper is susceptible to change the underlying
2759  * 		packet buffer. Therefore, at load time, all checks on pointers
2760  * 		previously done by the verifier are invalidated and must be
2761  * 		performed again, if the helper is used in combination with
2762  * 		direct packet access.
2763  * 	Return
2764  * 		0 on success, or a negative error in case of failure.
2765  *
2766  * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
2767  * 	Description
2768  * 		Copy a NUL terminated string from an unsafe kernel address
2769  * 		*unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
2770  * 		more details.
2771  *
2772  * 		Generally, use **bpf_probe_read_user_str**\ () or
2773  * 		**bpf_probe_read_kernel_str**\ () instead.
2774  * 	Return
2775  * 		On success, the strictly positive length of the string,
2776  * 		including the trailing NUL character. On error, a negative
2777  * 		value.
2778  *
2779  * u64 bpf_get_socket_cookie(struct sk_buff *skb)
2780  * 	Description
2781  * 		If the **struct sk_buff** pointed by *skb* has a known socket,
2782  * 		retrieve the cookie (generated by the kernel) of this socket.
2783  * 		If no cookie has been set yet, generate a new cookie. Once
2784  * 		generated, the socket cookie remains stable for the life of the
2785  * 		socket. This helper can be useful for monitoring per socket
2786  * 		networking traffic statistics as it provides a global socket
2787  * 		identifier that can be assumed unique.
2788  * 	Return
2789  * 		A 8-byte long unique number on success, or 0 if the socket
2790  * 		field is missing inside *skb*.
2791  *
2792  * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
2793  * 	Description
2794  * 		Equivalent to bpf_get_socket_cookie() helper that accepts
2795  * 		*skb*, but gets socket from **struct bpf_sock_addr** context.
2796  * 	Return
2797  * 		A 8-byte long unique number.
2798  *
2799  * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
2800  * 	Description
2801  * 		Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
2802  * 		*skb*, but gets socket from **struct bpf_sock_ops** context.
2803  * 	Return
2804  * 		A 8-byte long unique number.
2805  *
2806  * u64 bpf_get_socket_cookie(struct sock *sk)
2807  * 	Description
2808  * 		Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
2809  * 		*sk*, but gets socket from a BTF **struct sock**. This helper
2810  * 		also works for sleepable programs.
2811  * 	Return
2812  * 		A 8-byte long unique number or 0 if *sk* is NULL.
2813  *
2814  * u32 bpf_get_socket_uid(struct sk_buff *skb)
2815  * 	Description
2816  * 		Get the owner UID of the socked associated to *skb*.
2817  * 	Return
2818  * 		The owner UID of the socket associated to *skb*. If the socket
2819  * 		is **NULL**, or if it is not a full socket (i.e. if it is a
2820  * 		time-wait or a request socket instead), **overflowuid** value
2821  * 		is returned (note that **overflowuid** might also be the actual
2822  * 		UID value for the socket).
2823  *
2824  * long bpf_set_hash(struct sk_buff *skb, u32 hash)
2825  * 	Description
2826  * 		Set the full hash for *skb* (set the field *skb*\ **->hash**)
2827  * 		to value *hash*.
2828  * 	Return
2829  * 		0
2830  *
2831  * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
2832  * 	Description
2833  * 		Emulate a call to **setsockopt()** on the socket associated to
2834  * 		*bpf_socket*, which must be a full socket. The *level* at
2835  * 		which the option resides and the name *optname* of the option
2836  * 		must be specified, see **setsockopt(2)** for more information.
2837  * 		The option value of length *optlen* is pointed by *optval*.
2838  *
2839  * 		*bpf_socket* should be one of the following:
2840  *
2841  * 		* **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
2842  *		* **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
2843  *		  **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
2844  *
2845  * 		This helper actually implements a subset of **setsockopt()**.
2846  * 		It supports the following *level*\ s:
2847  *
2848  * 		* **SOL_SOCKET**, which supports the following *optname*\ s:
2849  * 		  **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
2850  * 		  **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
2851  * 		  **SO_BINDTODEVICE**, **SO_KEEPALIVE**, **SO_REUSEADDR**,
2852  * 		  **SO_REUSEPORT**, **SO_BINDTOIFINDEX**, **SO_TXREHASH**.
2853  * 		* **IPPROTO_TCP**, which supports the following *optname*\ s:
2854  * 		  **TCP_CONGESTION**, **TCP_BPF_IW**,
2855  * 		  **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
2856  * 		  **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
2857  * 		  **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
2858  * 		  **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
2859  * 		  **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
2860  *		  **TCP_BPF_RTO_MIN**, **TCP_BPF_SOCK_OPS_CB_FLAGS**.
2861  * 		* **IPPROTO_IP**, which supports *optname* **IP_TOS**.
2862  * 		* **IPPROTO_IPV6**, which supports the following *optname*\ s:
2863  * 		  **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
2864  * 	Return
2865  * 		0 on success, or a negative error in case of failure.
2866  *
2867  * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
2868  * 	Description
2869  * 		Grow or shrink the room for data in the packet associated to
2870  * 		*skb* by *len_diff*, and according to the selected *mode*.
2871  *
2872  * 		By default, the helper will reset any offloaded checksum
2873  * 		indicator of the skb to CHECKSUM_NONE. This can be avoided
2874  * 		by the following flag:
2875  *
2876  * 		* **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded
2877  * 		  checksum data of the skb to CHECKSUM_NONE.
2878  *
2879  *		There are two supported modes at this time:
2880  *
2881  *		* **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
2882  * 		  (room space is added or removed between the layer 2 and
2883  * 		  layer 3 headers).
2884  *
2885  * 		* **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
2886  * 		  (room space is added or removed between the layer 3 and
2887  * 		  layer 4 headers).
2888  *
2889  *		The following flags are supported at this time:
2890  *
2891  *		* **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
2892  *		  Adjusting mss in this way is not allowed for datagrams.
2893  *
2894  *		* **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
2895  *		  **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
2896  *		  Any new space is reserved to hold a tunnel header.
2897  *		  Configure skb offsets and other fields accordingly.
2898  *
2899  *		* **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
2900  *		  **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
2901  *		  Use with ENCAP_L3 flags to further specify the tunnel type.
2902  *
2903  *		* **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
2904  *		  Use with ENCAP_L3/L4 flags to further specify the tunnel
2905  *		  type; *len* is the length of the inner MAC header.
2906  *
2907  *		* **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**:
2908  *		  Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
2909  *		  L2 type as Ethernet.
2910  *
2911  *		* **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**,
2912  *		  **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**:
2913  *		  Indicate the new IP header version after decapsulating the outer
2914  *		  IP header. Used when the inner and outer IP versions are different.
2915  *
2916  * 		A call to this helper is susceptible to change the underlying
2917  * 		packet buffer. Therefore, at load time, all checks on pointers
2918  * 		previously done by the verifier are invalidated and must be
2919  * 		performed again, if the helper is used in combination with
2920  * 		direct packet access.
2921  * 	Return
2922  * 		0 on success, or a negative error in case of failure.
2923  *
2924  * long bpf_redirect_map(struct bpf_map *map, u64 key, u64 flags)
2925  * 	Description
2926  * 		Redirect the packet to the endpoint referenced by *map* at
2927  * 		index *key*. Depending on its type, this *map* can contain
2928  * 		references to net devices (for forwarding packets through other
2929  * 		ports), or to CPUs (for redirecting XDP frames to another CPU;
2930  * 		but this is only implemented for native XDP (with driver
2931  * 		support) as of this writing).
2932  *
2933  * 		The lower two bits of *flags* are used as the return code if
2934  * 		the map lookup fails. This is so that the return value can be
2935  * 		one of the XDP program return codes up to **XDP_TX**, as chosen
2936  * 		by the caller. The higher bits of *flags* can be set to
2937  * 		BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below.
2938  *
2939  * 		With BPF_F_BROADCAST the packet will be broadcasted to all the
2940  * 		interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress
2941  * 		interface will be excluded when do broadcasting.
2942  *
2943  * 		See also **bpf_redirect**\ (), which only supports redirecting
2944  * 		to an ifindex, but doesn't require a map to do so.
2945  * 	Return
2946  * 		**XDP_REDIRECT** on success, or the value of the two lower bits
2947  * 		of the *flags* argument on error.
2948  *
2949  * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
2950  * 	Description
2951  * 		Redirect the packet to the socket referenced by *map* (of type
2952  * 		**BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
2953  * 		egress interfaces can be used for redirection. The
2954  * 		**BPF_F_INGRESS** value in *flags* is used to make the
2955  * 		distinction (ingress path is selected if the flag is present,
2956  * 		egress path otherwise). This is the only flag supported for now.
2957  * 	Return
2958  * 		**SK_PASS** on success, or **SK_DROP** on error.
2959  *
2960  * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
2961  * 	Description
2962  * 		Add an entry to, or update a *map* referencing sockets. The
2963  * 		*skops* is used as a new value for the entry associated to
2964  * 		*key*. *flags* is one of:
2965  *
2966  * 		**BPF_NOEXIST**
2967  * 			The entry for *key* must not exist in the map.
2968  * 		**BPF_EXIST**
2969  * 			The entry for *key* must already exist in the map.
2970  * 		**BPF_ANY**
2971  * 			No condition on the existence of the entry for *key*.
2972  *
2973  * 		If the *map* has eBPF programs (parser and verdict), those will
2974  * 		be inherited by the socket being added. If the socket is
2975  * 		already attached to eBPF programs, this results in an error.
2976  * 	Return
2977  * 		0 on success, or a negative error in case of failure.
2978  *
2979  * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
2980  * 	Description
2981  * 		Adjust the address pointed by *xdp_md*\ **->data_meta** by
2982  * 		*delta* (which can be positive or negative). Note that this
2983  * 		operation modifies the address stored in *xdp_md*\ **->data**,
2984  * 		so the latter must be loaded only after the helper has been
2985  * 		called.
2986  *
2987  * 		The use of *xdp_md*\ **->data_meta** is optional and programs
2988  * 		are not required to use it. The rationale is that when the
2989  * 		packet is processed with XDP (e.g. as DoS filter), it is
2990  * 		possible to push further meta data along with it before passing
2991  * 		to the stack, and to give the guarantee that an ingress eBPF
2992  * 		program attached as a TC classifier on the same device can pick
2993  * 		this up for further post-processing. Since TC works with socket
2994  * 		buffers, it remains possible to set from XDP the **mark** or
2995  * 		**priority** pointers, or other pointers for the socket buffer.
2996  * 		Having this scratch space generic and programmable allows for
2997  * 		more flexibility as the user is free to store whatever meta
2998  * 		data they need.
2999  *
3000  * 		A call to this helper is susceptible to change the underlying
3001  * 		packet buffer. Therefore, at load time, all checks on pointers
3002  * 		previously done by the verifier are invalidated and must be
3003  * 		performed again, if the helper is used in combination with
3004  * 		direct packet access.
3005  * 	Return
3006  * 		0 on success, or a negative error in case of failure.
3007  *
3008  * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
3009  * 	Description
3010  * 		Read the value of a perf event counter, and store it into *buf*
3011  * 		of size *buf_size*. This helper relies on a *map* of type
3012  * 		**BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
3013  * 		counter is selected when *map* is updated with perf event file
3014  * 		descriptors. The *map* is an array whose size is the number of
3015  * 		available CPUs, and each cell contains a value relative to one
3016  * 		CPU. The value to retrieve is indicated by *flags*, that
3017  * 		contains the index of the CPU to look up, masked with
3018  * 		**BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
3019  * 		**BPF_F_CURRENT_CPU** to indicate that the value for the
3020  * 		current CPU should be retrieved.
3021  *
3022  * 		This helper behaves in a way close to
3023  * 		**bpf_perf_event_read**\ () helper, save that instead of
3024  * 		just returning the value observed, it fills the *buf*
3025  * 		structure. This allows for additional data to be retrieved: in
3026  * 		particular, the enabled and running times (in *buf*\
3027  * 		**->enabled** and *buf*\ **->running**, respectively) are
3028  * 		copied. In general, **bpf_perf_event_read_value**\ () is
3029  * 		recommended over **bpf_perf_event_read**\ (), which has some
3030  * 		ABI issues and provides fewer functionalities.
3031  *
3032  * 		These values are interesting, because hardware PMU (Performance
3033  * 		Monitoring Unit) counters are limited resources. When there are
3034  * 		more PMU based perf events opened than available counters,
3035  * 		kernel will multiplex these events so each event gets certain
3036  * 		percentage (but not all) of the PMU time. In case that
3037  * 		multiplexing happens, the number of samples or counter value
3038  * 		will not reflect the case compared to when no multiplexing
3039  * 		occurs. This makes comparison between different runs difficult.
3040  * 		Typically, the counter value should be normalized before
3041  * 		comparing to other experiments. The usual normalization is done
3042  * 		as follows.
3043  *
3044  * 		::
3045  *
3046  * 			normalized_counter = counter * t_enabled / t_running
3047  *
3048  * 		Where t_enabled is the time enabled for event and t_running is
3049  * 		the time running for event since last normalization. The
3050  * 		enabled and running times are accumulated since the perf event
3051  * 		open. To achieve scaling factor between two invocations of an
3052  * 		eBPF program, users can use CPU id as the key (which is
3053  * 		typical for perf array usage model) to remember the previous
3054  * 		value and do the calculation inside the eBPF program.
3055  * 	Return
3056  * 		0 on success, or a negative error in case of failure.
3057  *
3058  * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
3059  * 	Description
3060  * 		For an eBPF program attached to a perf event, retrieve the
3061  * 		value of the event counter associated to *ctx* and store it in
3062  * 		the structure pointed by *buf* and of size *buf_size*. Enabled
3063  * 		and running times are also stored in the structure (see
3064  * 		description of helper **bpf_perf_event_read_value**\ () for
3065  * 		more details).
3066  * 	Return
3067  * 		0 on success, or a negative error in case of failure.
3068  *
3069  * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
3070  * 	Description
3071  * 		Emulate a call to **getsockopt()** on the socket associated to
3072  * 		*bpf_socket*, which must be a full socket. The *level* at
3073  * 		which the option resides and the name *optname* of the option
3074  * 		must be specified, see **getsockopt(2)** for more information.
3075  * 		The retrieved value is stored in the structure pointed by
3076  * 		*opval* and of length *optlen*.
3077  *
3078  * 		*bpf_socket* should be one of the following:
3079  *
3080  * 		* **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
3081  *		* **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
3082  *		  **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
3083  *
3084  * 		This helper actually implements a subset of **getsockopt()**.
3085  * 		It supports the same set of *optname*\ s that is supported by
3086  * 		the **bpf_setsockopt**\ () helper.  The exceptions are
3087  * 		**TCP_BPF_*** is **bpf_setsockopt**\ () only and
3088  * 		**TCP_SAVED_SYN** is **bpf_getsockopt**\ () only.
3089  * 	Return
3090  * 		0 on success, or a negative error in case of failure.
3091  *
3092  * long bpf_override_return(struct pt_regs *regs, u64 rc)
3093  * 	Description
3094  * 		Used for error injection, this helper uses kprobes to override
3095  * 		the return value of the probed function, and to set it to *rc*.
3096  * 		The first argument is the context *regs* on which the kprobe
3097  * 		works.
3098  *
3099  * 		This helper works by setting the PC (program counter)
3100  * 		to an override function which is run in place of the original
3101  * 		probed function. This means the probed function is not run at
3102  * 		all. The replacement function just returns with the required
3103  * 		value.
3104  *
3105  * 		This helper has security implications, and thus is subject to
3106  * 		restrictions. It is only available if the kernel was compiled
3107  * 		with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
3108  * 		option, and in this case it only works on functions tagged with
3109  * 		**ALLOW_ERROR_INJECTION** in the kernel code.
3110  * 	Return
3111  * 		0
3112  *
3113  * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
3114  * 	Description
3115  * 		Attempt to set the value of the **bpf_sock_ops_cb_flags** field
3116  * 		for the full TCP socket associated to *bpf_sock_ops* to
3117  * 		*argval*.
3118  *
3119  * 		The primary use of this field is to determine if there should
3120  * 		be calls to eBPF programs of type
3121  * 		**BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
3122  * 		code. A program of the same type can change its value, per
3123  * 		connection and as necessary, when the connection is
3124  * 		established. This field is directly accessible for reading, but
3125  * 		this helper must be used for updates in order to return an
3126  * 		error if an eBPF program tries to set a callback that is not
3127  * 		supported in the current kernel.
3128  *
3129  * 		*argval* is a flag array which can combine these flags:
3130  *
3131  * 		* **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
3132  * 		* **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
3133  * 		* **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
3134  * 		* **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT)
3135  *
3136  * 		Therefore, this function can be used to clear a callback flag by
3137  * 		setting the appropriate bit to zero. e.g. to disable the RTO
3138  * 		callback:
3139  *
3140  * 		**bpf_sock_ops_cb_flags_set(bpf_sock,**
3141  * 			**bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)**
3142  *
3143  * 		Here are some examples of where one could call such eBPF
3144  * 		program:
3145  *
3146  * 		* When RTO fires.
3147  * 		* When a packet is retransmitted.
3148  * 		* When the connection terminates.
3149  * 		* When a packet is sent.
3150  * 		* When a packet is received.
3151  * 	Return
3152  * 		Code **-EINVAL** if the socket is not a full TCP socket;
3153  * 		otherwise, a positive number containing the bits that could not
3154  * 		be set is returned (which comes down to 0 if all bits were set
3155  * 		as required).
3156  *
3157  * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
3158  * 	Description
3159  * 		This helper is used in programs implementing policies at the
3160  * 		socket level. If the message *msg* is allowed to pass (i.e. if
3161  * 		the verdict eBPF program returns **SK_PASS**), redirect it to
3162  * 		the socket referenced by *map* (of type
3163  * 		**BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
3164  * 		egress interfaces can be used for redirection. The
3165  * 		**BPF_F_INGRESS** value in *flags* is used to make the
3166  * 		distinction (ingress path is selected if the flag is present,
3167  * 		egress path otherwise). This is the only flag supported for now.
3168  * 	Return
3169  * 		**SK_PASS** on success, or **SK_DROP** on error.
3170  *
3171  * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
3172  * 	Description
3173  * 		For socket policies, apply the verdict of the eBPF program to
3174  * 		the next *bytes* (number of bytes) of message *msg*.
3175  *
3176  * 		For example, this helper can be used in the following cases:
3177  *
3178  * 		* A single **sendmsg**\ () or **sendfile**\ () system call
3179  * 		  contains multiple logical messages that the eBPF program is
3180  * 		  supposed to read and for which it should apply a verdict.
3181  * 		* An eBPF program only cares to read the first *bytes* of a
3182  * 		  *msg*. If the message has a large payload, then setting up
3183  * 		  and calling the eBPF program repeatedly for all bytes, even
3184  * 		  though the verdict is already known, would create unnecessary
3185  * 		  overhead.
3186  *
3187  * 		When called from within an eBPF program, the helper sets a
3188  * 		counter internal to the BPF infrastructure, that is used to
3189  * 		apply the last verdict to the next *bytes*. If *bytes* is
3190  * 		smaller than the current data being processed from a
3191  * 		**sendmsg**\ () or **sendfile**\ () system call, the first
3192  * 		*bytes* will be sent and the eBPF program will be re-run with
3193  * 		the pointer for start of data pointing to byte number *bytes*
3194  * 		**+ 1**. If *bytes* is larger than the current data being
3195  * 		processed, then the eBPF verdict will be applied to multiple
3196  * 		**sendmsg**\ () or **sendfile**\ () calls until *bytes* are
3197  * 		consumed.
3198  *
3199  * 		Note that if a socket closes with the internal counter holding
3200  * 		a non-zero value, this is not a problem because data is not
3201  * 		being buffered for *bytes* and is sent as it is received.
3202  * 	Return
3203  * 		0
3204  *
3205  * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
3206  * 	Description
3207  * 		For socket policies, prevent the execution of the verdict eBPF
3208  * 		program for message *msg* until *bytes* (byte number) have been
3209  * 		accumulated.
3210  *
3211  * 		This can be used when one needs a specific number of bytes
3212  * 		before a verdict can be assigned, even if the data spans
3213  * 		multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
3214  * 		case would be a user calling **sendmsg**\ () repeatedly with
3215  * 		1-byte long message segments. Obviously, this is bad for
3216  * 		performance, but it is still valid. If the eBPF program needs
3217  * 		*bytes* bytes to validate a header, this helper can be used to
3218  * 		prevent the eBPF program to be called again until *bytes* have
3219  * 		been accumulated.
3220  * 	Return
3221  * 		0
3222  *
3223  * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
3224  * 	Description
3225  * 		For socket policies, pull in non-linear data from user space
3226  * 		for *msg* and set pointers *msg*\ **->data** and *msg*\
3227  * 		**->data_end** to *start* and *end* bytes offsets into *msg*,
3228  * 		respectively.
3229  *
3230  * 		If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
3231  * 		*msg* it can only parse data that the (**data**, **data_end**)
3232  * 		pointers have already consumed. For **sendmsg**\ () hooks this
3233  * 		is likely the first scatterlist element. But for calls relying
3234  * 		on the **sendpage** handler (e.g. **sendfile**\ ()) this will
3235  * 		be the range (**0**, **0**) because the data is shared with
3236  * 		user space and by default the objective is to avoid allowing
3237  * 		user space to modify data while (or after) eBPF verdict is
3238  * 		being decided. This helper can be used to pull in data and to
3239  * 		set the start and end pointer to given values. Data will be
3240  * 		copied if necessary (i.e. if data was not linear and if start
3241  * 		and end pointers do not point to the same chunk).
3242  *
3243  * 		A call to this helper is susceptible to change the underlying
3244  * 		packet buffer. Therefore, at load time, all checks on pointers
3245  * 		previously done by the verifier are invalidated and must be
3246  * 		performed again, if the helper is used in combination with
3247  * 		direct packet access.
3248  *
3249  * 		All values for *flags* are reserved for future usage, and must
3250  * 		be left at zero.
3251  * 	Return
3252  * 		0 on success, or a negative error in case of failure.
3253  *
3254  * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
3255  * 	Description
3256  * 		Bind the socket associated to *ctx* to the address pointed by
3257  * 		*addr*, of length *addr_len*. This allows for making outgoing
3258  * 		connection from the desired IP address, which can be useful for
3259  * 		example when all processes inside a cgroup should use one
3260  * 		single IP address on a host that has multiple IP configured.
3261  *
3262  * 		This helper works for IPv4 and IPv6, TCP and UDP sockets. The
3263  * 		domain (*addr*\ **->sa_family**) must be **AF_INET** (or
3264  * 		**AF_INET6**). It's advised to pass zero port (**sin_port**
3265  * 		or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like
3266  * 		behavior and lets the kernel efficiently pick up an unused
3267  * 		port as long as 4-tuple is unique. Passing non-zero port might
3268  * 		lead to degraded performance.
3269  * 	Return
3270  * 		0 on success, or a negative error in case of failure.
3271  *
3272  * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
3273  * 	Description
3274  * 		Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
3275  * 		possible to both shrink and grow the packet tail.
3276  * 		Shrink done via *delta* being a negative integer.
3277  *
3278  * 		A call to this helper is susceptible to change the underlying
3279  * 		packet buffer. Therefore, at load time, all checks on pointers
3280  * 		previously done by the verifier are invalidated and must be
3281  * 		performed again, if the helper is used in combination with
3282  * 		direct packet access.
3283  * 	Return
3284  * 		0 on success, or a negative error in case of failure.
3285  *
3286  * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
3287  * 	Description
3288  * 		Retrieve the XFRM state (IP transform framework, see also
3289  * 		**ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
3290  *
3291  * 		The retrieved value is stored in the **struct bpf_xfrm_state**
3292  * 		pointed by *xfrm_state* and of length *size*.
3293  *
3294  * 		All values for *flags* are reserved for future usage, and must
3295  * 		be left at zero.
3296  *
3297  * 		This helper is available only if the kernel was compiled with
3298  * 		**CONFIG_XFRM** configuration option.
3299  * 	Return
3300  * 		0 on success, or a negative error in case of failure.
3301  *
3302  * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
3303  * 	Description
3304  * 		Return a user or a kernel stack in bpf program provided buffer.
3305  * 		To achieve this, the helper needs *ctx*, which is a pointer
3306  * 		to the context on which the tracing program is executed.
3307  * 		To store the stacktrace, the bpf program provides *buf* with
3308  * 		a nonnegative *size*.
3309  *
3310  * 		The last argument, *flags*, holds the number of stack frames to
3311  * 		skip (from 0 to 255), masked with
3312  * 		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
3313  * 		the following flags:
3314  *
3315  * 		**BPF_F_USER_STACK**
3316  * 			Collect a user space stack instead of a kernel stack.
3317  * 		**BPF_F_USER_BUILD_ID**
3318  * 			Collect (build_id, file_offset) instead of ips for user
3319  * 			stack, only valid if **BPF_F_USER_STACK** is also
3320  * 			specified.
3321  *
3322  * 			*file_offset* is an offset relative to the beginning
3323  * 			of the executable or shared object file backing the vma
3324  * 			which the *ip* falls in. It is *not* an offset relative
3325  * 			to that object's base address. Accordingly, it must be
3326  * 			adjusted by adding (sh_addr - sh_offset), where
3327  * 			sh_{addr,offset} correspond to the executable section
3328  * 			containing *file_offset* in the object, for comparisons
3329  * 			to symbols' st_value to be valid.
3330  *
3331  * 		**bpf_get_stack**\ () can collect up to
3332  * 		**PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
3333  * 		to sufficient large buffer size. Note that
3334  * 		this limit can be controlled with the **sysctl** program, and
3335  * 		that it should be manually increased in order to profile long
3336  * 		user stacks (such as stacks for Java programs). To do so, use:
3337  *
3338  * 		::
3339  *
3340  * 			# sysctl kernel.perf_event_max_stack=<new value>
3341  * 	Return
3342  * 		The non-negative copied *buf* length equal to or less than
3343  * 		*size* on success, or a negative error in case of failure.
3344  *
3345  * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
3346  * 	Description
3347  * 		This helper is similar to **bpf_skb_load_bytes**\ () in that
3348  * 		it provides an easy way to load *len* bytes from *offset*
3349  * 		from the packet associated to *skb*, into the buffer pointed
3350  * 		by *to*. The difference to **bpf_skb_load_bytes**\ () is that
3351  * 		a fifth argument *start_header* exists in order to select a
3352  * 		base offset to start from. *start_header* can be one of:
3353  *
3354  * 		**BPF_HDR_START_MAC**
3355  * 			Base offset to load data from is *skb*'s mac header.
3356  * 		**BPF_HDR_START_NET**
3357  * 			Base offset to load data from is *skb*'s network header.
3358  *
3359  * 		In general, "direct packet access" is the preferred method to
3360  * 		access packet data, however, this helper is in particular useful
3361  * 		in socket filters where *skb*\ **->data** does not always point
3362  * 		to the start of the mac header and where "direct packet access"
3363  * 		is not available.
3364  * 	Return
3365  * 		0 on success, or a negative error in case of failure.
3366  *
3367  * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
3368  *	Description
3369  *		Do FIB lookup in kernel tables using parameters in *params*.
3370  *		If lookup is successful and result shows packet is to be
3371  *		forwarded, the neighbor tables are searched for the nexthop.
3372  *		If successful (ie., FIB lookup shows forwarding and nexthop
3373  *		is resolved), the nexthop address is returned in ipv4_dst
3374  *		or ipv6_dst based on family, smac is set to mac address of
3375  *		egress device, dmac is set to nexthop mac address, rt_metric
3376  *		is set to metric from route (IPv4/IPv6 only), and ifindex
3377  *		is set to the device index of the nexthop from the FIB lookup.
3378  *
3379  *		*plen* argument is the size of the passed in struct.
3380  *		*flags* argument can be a combination of one or more of the
3381  *		following values:
3382  *
3383  *		**BPF_FIB_LOOKUP_DIRECT**
3384  *			Do a direct table lookup vs full lookup using FIB
3385  *			rules.
3386  *		**BPF_FIB_LOOKUP_TBID**
3387  *			Used with BPF_FIB_LOOKUP_DIRECT.
3388  *			Use the routing table ID present in *params*->tbid
3389  *			for the fib lookup.
3390  *		**BPF_FIB_LOOKUP_OUTPUT**
3391  *			Perform lookup from an egress perspective (default is
3392  *			ingress).
3393  *		**BPF_FIB_LOOKUP_SKIP_NEIGH**
3394  *			Skip the neighbour table lookup. *params*->dmac
3395  *			and *params*->smac will not be set as output. A common
3396  *			use case is to call **bpf_redirect_neigh**\ () after
3397  *			doing **bpf_fib_lookup**\ ().
3398  *		**BPF_FIB_LOOKUP_SRC**
3399  *			Derive and set source IP addr in *params*->ipv{4,6}_src
3400  *			for the nexthop. If the src addr cannot be derived,
3401  *			**BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
3402  *			case, *params*->dmac and *params*->smac are not set either.
3403  *		**BPF_FIB_LOOKUP_MARK**
3404  *			Use the mark present in *params*->mark for the fib lookup.
3405  *			This option should not be used with BPF_FIB_LOOKUP_DIRECT,
3406  *			as it only has meaning for full lookups.
3407  *
3408  *		*ctx* is either **struct xdp_md** for XDP programs or
3409  *		**struct sk_buff** tc cls_act programs.
3410  *	Return
3411  *		* < 0 if any input argument is invalid
3412  *		*   0 on success (packet is forwarded, nexthop neighbor exists)
3413  *		* > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
3414  *		  packet is not forwarded or needs assist from full stack
3415  *
3416  *		If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU
3417  *		was exceeded and output params->mtu_result contains the MTU.
3418  *
3419  * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
3420  *	Description
3421  *		Add an entry to, or update a sockhash *map* referencing sockets.
3422  *		The *skops* is used as a new value for the entry associated to
3423  *		*key*. *flags* is one of:
3424  *
3425  *		**BPF_NOEXIST**
3426  *			The entry for *key* must not exist in the map.
3427  *		**BPF_EXIST**
3428  *			The entry for *key* must already exist in the map.
3429  *		**BPF_ANY**
3430  *			No condition on the existence of the entry for *key*.
3431  *
3432  *		If the *map* has eBPF programs (parser and verdict), those will
3433  *		be inherited by the socket being added. If the socket is
3434  *		already attached to eBPF programs, this results in an error.
3435  *	Return
3436  *		0 on success, or a negative error in case of failure.
3437  *
3438  * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
3439  *	Description
3440  *		This helper is used in programs implementing policies at the
3441  *		socket level. If the message *msg* is allowed to pass (i.e. if
3442  *		the verdict eBPF program returns **SK_PASS**), redirect it to
3443  *		the socket referenced by *map* (of type
3444  *		**BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
3445  *		egress interfaces can be used for redirection. The
3446  *		**BPF_F_INGRESS** value in *flags* is used to make the
3447  *		distinction (ingress path is selected if the flag is present,
3448  *		egress path otherwise). This is the only flag supported for now.
3449  *	Return
3450  *		**SK_PASS** on success, or **SK_DROP** on error.
3451  *
3452  * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
3453  *	Description
3454  *		This helper is used in programs implementing policies at the
3455  *		skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
3456  *		if the verdict eBPF program returns **SK_PASS**), redirect it
3457  *		to the socket referenced by *map* (of type
3458  *		**BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
3459  *		egress interfaces can be used for redirection. The
3460  *		**BPF_F_INGRESS** value in *flags* is used to make the
3461  *		distinction (ingress path is selected if the flag is present,
3462  *		egress otherwise). This is the only flag supported for now.
3463  *	Return
3464  *		**SK_PASS** on success, or **SK_DROP** on error.
3465  *
3466  * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
3467  *	Description
3468  *		Encapsulate the packet associated to *skb* within a Layer 3
3469  *		protocol header. This header is provided in the buffer at
3470  *		address *hdr*, with *len* its size in bytes. *type* indicates
3471  *		the protocol of the header and can be one of:
3472  *
3473  *		**BPF_LWT_ENCAP_SEG6**
3474  *			IPv6 encapsulation with Segment Routing Header
3475  *			(**struct ipv6_sr_hdr**). *hdr* only contains the SRH,
3476  *			the IPv6 header is computed by the kernel.
3477  *		**BPF_LWT_ENCAP_SEG6_INLINE**
3478  *			Only works if *skb* contains an IPv6 packet. Insert a
3479  *			Segment Routing Header (**struct ipv6_sr_hdr**) inside
3480  *			the IPv6 header.
3481  *		**BPF_LWT_ENCAP_IP**
3482  *			IP encapsulation (GRE/GUE/IPIP/etc). The outer header
3483  *			must be IPv4 or IPv6, followed by zero or more
3484  *			additional headers, up to **LWT_BPF_MAX_HEADROOM**
3485  *			total bytes in all prepended headers. Please note that
3486  *			if **skb_is_gso**\ (*skb*) is true, no more than two
3487  *			headers can be prepended, and the inner header, if
3488  *			present, should be either GRE or UDP/GUE.
3489  *
3490  *		**BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
3491  *		of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
3492  *		be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
3493  *		**BPF_PROG_TYPE_LWT_XMIT**.
3494  *
3495  * 		A call to this helper is susceptible to change the underlying
3496  * 		packet buffer. Therefore, at load time, all checks on pointers
3497  * 		previously done by the verifier are invalidated and must be
3498  * 		performed again, if the helper is used in combination with
3499  * 		direct packet access.
3500  *	Return
3501  * 		0 on success, or a negative error in case of failure.
3502  *
3503  * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
3504  *	Description
3505  *		Store *len* bytes from address *from* into the packet
3506  *		associated to *skb*, at *offset*. Only the flags, tag and TLVs
3507  *		inside the outermost IPv6 Segment Routing Header can be
3508  *		modified through this helper.
3509  *
3510  * 		A call to this helper is susceptible to change the underlying
3511  * 		packet buffer. Therefore, at load time, all checks on pointers
3512  * 		previously done by the verifier are invalidated and must be
3513  * 		performed again, if the helper is used in combination with
3514  * 		direct packet access.
3515  *	Return
3516  * 		0 on success, or a negative error in case of failure.
3517  *
3518  * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
3519  *	Description
3520  *		Adjust the size allocated to TLVs in the outermost IPv6
3521  *		Segment Routing Header contained in the packet associated to
3522  *		*skb*, at position *offset* by *delta* bytes. Only offsets
3523  *		after the segments are accepted. *delta* can be as well
3524  *		positive (growing) as negative (shrinking).
3525  *
3526  * 		A call to this helper is susceptible to change the underlying
3527  * 		packet buffer. Therefore, at load time, all checks on pointers
3528  * 		previously done by the verifier are invalidated and must be
3529  * 		performed again, if the helper is used in combination with
3530  * 		direct packet access.
3531  *	Return
3532  * 		0 on success, or a negative error in case of failure.
3533  *
3534  * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
3535  *	Description
3536  *		Apply an IPv6 Segment Routing action of type *action* to the
3537  *		packet associated to *skb*. Each action takes a parameter
3538  *		contained at address *param*, and of length *param_len* bytes.
3539  *		*action* can be one of:
3540  *
3541  *		**SEG6_LOCAL_ACTION_END_X**
3542  *			End.X action: Endpoint with Layer-3 cross-connect.
3543  *			Type of *param*: **struct in6_addr**.
3544  *		**SEG6_LOCAL_ACTION_END_T**
3545  *			End.T action: Endpoint with specific IPv6 table lookup.
3546  *			Type of *param*: **int**.
3547  *		**SEG6_LOCAL_ACTION_END_B6**
3548  *			End.B6 action: Endpoint bound to an SRv6 policy.
3549  *			Type of *param*: **struct ipv6_sr_hdr**.
3550  *		**SEG6_LOCAL_ACTION_END_B6_ENCAP**
3551  *			End.B6.Encap action: Endpoint bound to an SRv6
3552  *			encapsulation policy.
3553  *			Type of *param*: **struct ipv6_sr_hdr**.
3554  *
3555  * 		A call to this helper is susceptible to change the underlying
3556  * 		packet buffer. Therefore, at load time, all checks on pointers
3557  * 		previously done by the verifier are invalidated and must be
3558  * 		performed again, if the helper is used in combination with
3559  * 		direct packet access.
3560  *	Return
3561  * 		0 on success, or a negative error in case of failure.
3562  *
3563  * long bpf_rc_repeat(void *ctx)
3564  *	Description
3565  *		This helper is used in programs implementing IR decoding, to
3566  *		report a successfully decoded repeat key message. This delays
3567  *		the generation of a key up event for previously generated
3568  *		key down event.
3569  *
3570  *		Some IR protocols like NEC have a special IR message for
3571  *		repeating last button, for when a button is held down.
3572  *
3573  *		The *ctx* should point to the lirc sample as passed into
3574  *		the program.
3575  *
3576  *		This helper is only available is the kernel was compiled with
3577  *		the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3578  *		"**y**".
3579  *	Return
3580  *		0
3581  *
3582  * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
3583  *	Description
3584  *		This helper is used in programs implementing IR decoding, to
3585  *		report a successfully decoded key press with *scancode*,
3586  *		*toggle* value in the given *protocol*. The scancode will be
3587  *		translated to a keycode using the rc keymap, and reported as
3588  *		an input key down event. After a period a key up event is
3589  *		generated. This period can be extended by calling either
3590  *		**bpf_rc_keydown**\ () again with the same values, or calling
3591  *		**bpf_rc_repeat**\ ().
3592  *
3593  *		Some protocols include a toggle bit, in case the button was
3594  *		released and pressed again between consecutive scancodes.
3595  *
3596  *		The *ctx* should point to the lirc sample as passed into
3597  *		the program.
3598  *
3599  *		The *protocol* is the decoded protocol number (see
3600  *		**enum rc_proto** for some predefined values).
3601  *
3602  *		This helper is only available is the kernel was compiled with
3603  *		the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3604  *		"**y**".
3605  *	Return
3606  *		0
3607  *
3608  * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
3609  * 	Description
3610  * 		Return the cgroup v2 id of the socket associated with the *skb*.
3611  * 		This is roughly similar to the **bpf_get_cgroup_classid**\ ()
3612  * 		helper for cgroup v1 by providing a tag resp. identifier that
3613  * 		can be matched on or used for map lookups e.g. to implement
3614  * 		policy. The cgroup v2 id of a given path in the hierarchy is
3615  * 		exposed in user space through the f_handle API in order to get
3616  * 		to the same 64-bit id.
3617  *
3618  * 		This helper can be used on TC egress path, but not on ingress,
3619  * 		and is available only if the kernel was compiled with the
3620  * 		**CONFIG_SOCK_CGROUP_DATA** configuration option.
3621  * 	Return
3622  * 		The id is returned or 0 in case the id could not be retrieved.
3623  *
3624  * u64 bpf_get_current_cgroup_id(void)
3625  * 	Description
3626  * 		Get the current cgroup id based on the cgroup within which
3627  * 		the current task is running.
3628  * 	Return
3629  * 		A 64-bit integer containing the current cgroup id based
3630  * 		on the cgroup within which the current task is running.
3631  *
3632  * void *bpf_get_local_storage(void *map, u64 flags)
3633  *	Description
3634  *		Get the pointer to the local storage area.
3635  *		The type and the size of the local storage is defined
3636  *		by the *map* argument.
3637  *		The *flags* meaning is specific for each map type,
3638  *		and has to be 0 for cgroup local storage.
3639  *
3640  *		Depending on the BPF program type, a local storage area
3641  *		can be shared between multiple instances of the BPF program,
3642  *		running simultaneously.
3643  *
3644  *		A user should care about the synchronization by himself.
3645  *		For example, by using the **BPF_ATOMIC** instructions to alter
3646  *		the shared data.
3647  *	Return
3648  *		A pointer to the local storage area.
3649  *
3650  * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
3651  *	Description
3652  *		Select a **SO_REUSEPORT** socket from a
3653  *		**BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
3654  *		It checks the selected socket is matching the incoming
3655  *		request in the socket buffer.
3656  *	Return
3657  *		0 on success, or a negative error in case of failure.
3658  *
3659  * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
3660  *	Description
3661  *		Return id of cgroup v2 that is ancestor of cgroup associated
3662  *		with the *skb* at the *ancestor_level*.  The root cgroup is at
3663  *		*ancestor_level* zero and each step down the hierarchy
3664  *		increments the level. If *ancestor_level* == level of cgroup
3665  *		associated with *skb*, then return value will be same as that
3666  *		of **bpf_skb_cgroup_id**\ ().
3667  *
3668  *		The helper is useful to implement policies based on cgroups
3669  *		that are upper in hierarchy than immediate cgroup associated
3670  *		with *skb*.
3671  *
3672  *		The format of returned id and helper limitations are same as in
3673  *		**bpf_skb_cgroup_id**\ ().
3674  *	Return
3675  *		The id is returned or 0 in case the id could not be retrieved.
3676  *
3677  * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3678  *	Description
3679  *		Look for TCP socket matching *tuple*, optionally in a child
3680  *		network namespace *netns*. The return value must be checked,
3681  *		and if non-**NULL**, released via **bpf_sk_release**\ ().
3682  *
3683  *		The *ctx* should point to the context of the program, such as
3684  *		the skb or socket (depending on the hook in use). This is used
3685  *		to determine the base network namespace for the lookup.
3686  *
3687  *		*tuple_size* must be one of:
3688  *
3689  *		**sizeof**\ (*tuple*\ **->ipv4**)
3690  *			Look for an IPv4 socket.
3691  *		**sizeof**\ (*tuple*\ **->ipv6**)
3692  *			Look for an IPv6 socket.
3693  *
3694  *		If the *netns* is a negative signed 32-bit integer, then the
3695  *		socket lookup table in the netns associated with the *ctx*
3696  *		will be used. For the TC hooks, this is the netns of the device
3697  *		in the skb. For socket hooks, this is the netns of the socket.
3698  *		If *netns* is any other signed 32-bit value greater than or
3699  *		equal to zero then it specifies the ID of the netns relative to
3700  *		the netns associated with the *ctx*. *netns* values beyond the
3701  *		range of 32-bit integers are reserved for future use.
3702  *
3703  *		All values for *flags* are reserved for future usage, and must
3704  *		be left at zero.
3705  *
3706  *		This helper is available only if the kernel was compiled with
3707  *		**CONFIG_NET** configuration option.
3708  *	Return
3709  *		Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3710  *		For sockets with reuseport option, the **struct bpf_sock**
3711  *		result is from *reuse*\ **->socks**\ [] using the hash of the
3712  *		tuple.
3713  *
3714  * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3715  *	Description
3716  *		Look for UDP socket matching *tuple*, optionally in a child
3717  *		network namespace *netns*. The return value must be checked,
3718  *		and if non-**NULL**, released via **bpf_sk_release**\ ().
3719  *
3720  *		The *ctx* should point to the context of the program, such as
3721  *		the skb or socket (depending on the hook in use). This is used
3722  *		to determine the base network namespace for the lookup.
3723  *
3724  *		*tuple_size* must be one of:
3725  *
3726  *		**sizeof**\ (*tuple*\ **->ipv4**)
3727  *			Look for an IPv4 socket.
3728  *		**sizeof**\ (*tuple*\ **->ipv6**)
3729  *			Look for an IPv6 socket.
3730  *
3731  *		If the *netns* is a negative signed 32-bit integer, then the
3732  *		socket lookup table in the netns associated with the *ctx*
3733  *		will be used. For the TC hooks, this is the netns of the device
3734  *		in the skb. For socket hooks, this is the netns of the socket.
3735  *		If *netns* is any other signed 32-bit value greater than or
3736  *		equal to zero then it specifies the ID of the netns relative to
3737  *		the netns associated with the *ctx*. *netns* values beyond the
3738  *		range of 32-bit integers are reserved for future use.
3739  *
3740  *		All values for *flags* are reserved for future usage, and must
3741  *		be left at zero.
3742  *
3743  *		This helper is available only if the kernel was compiled with
3744  *		**CONFIG_NET** configuration option.
3745  *	Return
3746  *		Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3747  *		For sockets with reuseport option, the **struct bpf_sock**
3748  *		result is from *reuse*\ **->socks**\ [] using the hash of the
3749  *		tuple.
3750  *
3751  * long bpf_sk_release(void *sock)
3752  *	Description
3753  *		Release the reference held by *sock*. *sock* must be a
3754  *		non-**NULL** pointer that was returned from
3755  *		**bpf_sk_lookup_xxx**\ ().
3756  *	Return
3757  *		0 on success, or a negative error in case of failure.
3758  *
3759  * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
3760  * 	Description
3761  * 		Push an element *value* in *map*. *flags* is one of:
3762  *
3763  * 		**BPF_EXIST**
3764  * 			If the queue/stack is full, the oldest element is
3765  * 			removed to make room for this.
3766  * 	Return
3767  * 		0 on success, or a negative error in case of failure.
3768  *
3769  * long bpf_map_pop_elem(struct bpf_map *map, void *value)
3770  * 	Description
3771  * 		Pop an element from *map*.
3772  * 	Return
3773  * 		0 on success, or a negative error in case of failure.
3774  *
3775  * long bpf_map_peek_elem(struct bpf_map *map, void *value)
3776  * 	Description
3777  * 		Get an element from *map* without removing it.
3778  * 	Return
3779  * 		0 on success, or a negative error in case of failure.
3780  *
3781  * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
3782  *	Description
3783  *		For socket policies, insert *len* bytes into *msg* at offset
3784  *		*start*.
3785  *
3786  *		If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
3787  *		*msg* it may want to insert metadata or options into the *msg*.
3788  *		This can later be read and used by any of the lower layer BPF
3789  *		hooks.
3790  *
3791  *		This helper may fail if under memory pressure (a malloc
3792  *		fails) in these cases BPF programs will get an appropriate
3793  *		error and BPF programs will need to handle them.
3794  *	Return
3795  *		0 on success, or a negative error in case of failure.
3796  *
3797  * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
3798  *	Description
3799  *		Will remove *len* bytes from a *msg* starting at byte *start*.
3800  *		This may result in **ENOMEM** errors under certain situations if
3801  *		an allocation and copy are required due to a full ring buffer.
3802  *		However, the helper will try to avoid doing the allocation
3803  *		if possible. Other errors can occur if input parameters are
3804  *		invalid either due to *start* byte not being valid part of *msg*
3805  *		payload and/or *pop* value being to large.
3806  *	Return
3807  *		0 on success, or a negative error in case of failure.
3808  *
3809  * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
3810  *	Description
3811  *		This helper is used in programs implementing IR decoding, to
3812  *		report a successfully decoded pointer movement.
3813  *
3814  *		The *ctx* should point to the lirc sample as passed into
3815  *		the program.
3816  *
3817  *		This helper is only available is the kernel was compiled with
3818  *		the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3819  *		"**y**".
3820  *	Return
3821  *		0
3822  *
3823  * long bpf_spin_lock(struct bpf_spin_lock *lock)
3824  *	Description
3825  *		Acquire a spinlock represented by the pointer *lock*, which is
3826  *		stored as part of a value of a map. Taking the lock allows to
3827  *		safely update the rest of the fields in that value. The
3828  *		spinlock can (and must) later be released with a call to
3829  *		**bpf_spin_unlock**\ (\ *lock*\ ).
3830  *
3831  *		Spinlocks in BPF programs come with a number of restrictions
3832  *		and constraints:
3833  *
3834  *		* **bpf_spin_lock** objects are only allowed inside maps of
3835  *		  types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
3836  *		  list could be extended in the future).
3837  *		* BTF description of the map is mandatory.
3838  *		* The BPF program can take ONE lock at a time, since taking two
3839  *		  or more could cause dead locks.
3840  *		* Only one **struct bpf_spin_lock** is allowed per map element.
3841  *		* When the lock is taken, calls (either BPF to BPF or helpers)
3842  *		  are not allowed.
3843  *		* The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
3844  *		  allowed inside a spinlock-ed region.
3845  *		* The BPF program MUST call **bpf_spin_unlock**\ () to release
3846  *		  the lock, on all execution paths, before it returns.
3847  *		* The BPF program can access **struct bpf_spin_lock** only via
3848  *		  the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
3849  *		  helpers. Loading or storing data into the **struct
3850  *		  bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
3851  *		* To use the **bpf_spin_lock**\ () helper, the BTF description
3852  *		  of the map value must be a struct and have **struct
3853  *		  bpf_spin_lock** *anyname*\ **;** field at the top level.
3854  *		  Nested lock inside another struct is not allowed.
3855  *		* The **struct bpf_spin_lock** *lock* field in a map value must
3856  *		  be aligned on a multiple of 4 bytes in that value.
3857  *		* Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
3858  *		  the **bpf_spin_lock** field to user space.
3859  *		* Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
3860  *		  a BPF program, do not update the **bpf_spin_lock** field.
3861  *		* **bpf_spin_lock** cannot be on the stack or inside a
3862  *		  networking packet (it can only be inside of a map values).
3863  *		* **bpf_spin_lock** is available to root only.
3864  *		* Tracing programs and socket filter programs cannot use
3865  *		  **bpf_spin_lock**\ () due to insufficient preemption checks
3866  *		  (but this may change in the future).
3867  *		* **bpf_spin_lock** is not allowed in inner maps of map-in-map.
3868  *	Return
3869  *		0
3870  *
3871  * long bpf_spin_unlock(struct bpf_spin_lock *lock)
3872  *	Description
3873  *		Release the *lock* previously locked by a call to
3874  *		**bpf_spin_lock**\ (\ *lock*\ ).
3875  *	Return
3876  *		0
3877  *
3878  * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
3879  *	Description
3880  *		This helper gets a **struct bpf_sock** pointer such
3881  *		that all the fields in this **bpf_sock** can be accessed.
3882  *	Return
3883  *		A **struct bpf_sock** pointer on success, or **NULL** in
3884  *		case of failure.
3885  *
3886  * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
3887  *	Description
3888  *		This helper gets a **struct bpf_tcp_sock** pointer from a
3889  *		**struct bpf_sock** pointer.
3890  *	Return
3891  *		A **struct bpf_tcp_sock** pointer on success, or **NULL** in
3892  *		case of failure.
3893  *
3894  * long bpf_skb_ecn_set_ce(struct sk_buff *skb)
3895  *	Description
3896  *		Set ECN (Explicit Congestion Notification) field of IP header
3897  *		to **CE** (Congestion Encountered) if current value is **ECT**
3898  *		(ECN Capable Transport). Otherwise, do nothing. Works with IPv6
3899  *		and IPv4.
3900  *	Return
3901  *		1 if the **CE** flag is set (either by the current helper call
3902  *		or because it was already present), 0 if it is not set.
3903  *
3904  * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
3905  *	Description
3906  *		Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
3907  *		**bpf_sk_release**\ () is unnecessary and not allowed.
3908  *	Return
3909  *		A **struct bpf_sock** pointer on success, or **NULL** in
3910  *		case of failure.
3911  *
3912  * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3913  *	Description
3914  *		Look for TCP socket matching *tuple*, optionally in a child
3915  *		network namespace *netns*. The return value must be checked,
3916  *		and if non-**NULL**, released via **bpf_sk_release**\ ().
3917  *
3918  *		This function is identical to **bpf_sk_lookup_tcp**\ (), except
3919  *		that it also returns timewait or request sockets. Use
3920  *		**bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
3921  *		full structure.
3922  *
3923  *		This helper is available only if the kernel was compiled with
3924  *		**CONFIG_NET** configuration option.
3925  *	Return
3926  *		Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3927  *		For sockets with reuseport option, the **struct bpf_sock**
3928  *		result is from *reuse*\ **->socks**\ [] using the hash of the
3929  *		tuple.
3930  *
3931  * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
3932  * 	Description
3933  * 		Check whether *iph* and *th* contain a valid SYN cookie ACK for
3934  * 		the listening socket in *sk*.
3935  *
3936  * 		*iph* points to the start of the IPv4 or IPv6 header, while
3937  * 		*iph_len* contains **sizeof**\ (**struct iphdr**) or
3938  * 		**sizeof**\ (**struct ipv6hdr**).
3939  *
3940  * 		*th* points to the start of the TCP header, while *th_len*
3941  *		contains the length of the TCP header (at least
3942  *		**sizeof**\ (**struct tcphdr**)).
3943  * 	Return
3944  * 		0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
3945  * 		error otherwise.
3946  *
3947  * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
3948  *	Description
3949  *		Get name of sysctl in /proc/sys/ and copy it into provided by
3950  *		program buffer *buf* of size *buf_len*.
3951  *
3952  *		The buffer is always NUL terminated, unless it's zero-sized.
3953  *
3954  *		If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is
3955  *		copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name
3956  *		only (e.g. "tcp_mem").
3957  *	Return
3958  *		Number of character copied (not including the trailing NUL).
3959  *
3960  *		**-E2BIG** if the buffer wasn't big enough (*buf* will contain
3961  *		truncated name in this case).
3962  *
3963  * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
3964  *	Description
3965  *		Get current value of sysctl as it is presented in /proc/sys
3966  *		(incl. newline, etc), and copy it as a string into provided
3967  *		by program buffer *buf* of size *buf_len*.
3968  *
3969  *		The whole value is copied, no matter what file position user
3970  *		space issued e.g. sys_read at.
3971  *
3972  *		The buffer is always NUL terminated, unless it's zero-sized.
3973  *	Return
3974  *		Number of character copied (not including the trailing NUL).
3975  *
3976  *		**-E2BIG** if the buffer wasn't big enough (*buf* will contain
3977  *		truncated name in this case).
3978  *
3979  *		**-EINVAL** if current value was unavailable, e.g. because
3980  *		sysctl is uninitialized and read returns -EIO for it.
3981  *
3982  * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
3983  *	Description
3984  *		Get new value being written by user space to sysctl (before
3985  *		the actual write happens) and copy it as a string into
3986  *		provided by program buffer *buf* of size *buf_len*.
3987  *
3988  *		User space may write new value at file position > 0.
3989  *
3990  *		The buffer is always NUL terminated, unless it's zero-sized.
3991  *	Return
3992  *		Number of character copied (not including the trailing NUL).
3993  *
3994  *		**-E2BIG** if the buffer wasn't big enough (*buf* will contain
3995  *		truncated name in this case).
3996  *
3997  *		**-EINVAL** if sysctl is being read.
3998  *
3999  * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
4000  *	Description
4001  *		Override new value being written by user space to sysctl with
4002  *		value provided by program in buffer *buf* of size *buf_len*.
4003  *
4004  *		*buf* should contain a string in same form as provided by user
4005  *		space on sysctl write.
4006  *
4007  *		User space may write new value at file position > 0. To override
4008  *		the whole sysctl value file position should be set to zero.
4009  *	Return
4010  *		0 on success.
4011  *
4012  *		**-E2BIG** if the *buf_len* is too big.
4013  *
4014  *		**-EINVAL** if sysctl is being read.
4015  *
4016  * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
4017  *	Description
4018  *		Convert the initial part of the string from buffer *buf* of
4019  *		size *buf_len* to a long integer according to the given base
4020  *		and save the result in *res*.
4021  *
4022  *		The string may begin with an arbitrary amount of white space
4023  *		(as determined by **isspace**\ (3)) followed by a single
4024  *		optional '**-**' sign.
4025  *
4026  *		Five least significant bits of *flags* encode base, other bits
4027  *		are currently unused.
4028  *
4029  *		Base must be either 8, 10, 16 or 0 to detect it automatically
4030  *		similar to user space **strtol**\ (3).
4031  *	Return
4032  *		Number of characters consumed on success. Must be positive but
4033  *		no more than *buf_len*.
4034  *
4035  *		**-EINVAL** if no valid digits were found or unsupported base
4036  *		was provided.
4037  *
4038  *		**-ERANGE** if resulting value was out of range.
4039  *
4040  * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
4041  *	Description
4042  *		Convert the initial part of the string from buffer *buf* of
4043  *		size *buf_len* to an unsigned long integer according to the
4044  *		given base and save the result in *res*.
4045  *
4046  *		The string may begin with an arbitrary amount of white space
4047  *		(as determined by **isspace**\ (3)).
4048  *
4049  *		Five least significant bits of *flags* encode base, other bits
4050  *		are currently unused.
4051  *
4052  *		Base must be either 8, 10, 16 or 0 to detect it automatically
4053  *		similar to user space **strtoul**\ (3).
4054  *	Return
4055  *		Number of characters consumed on success. Must be positive but
4056  *		no more than *buf_len*.
4057  *
4058  *		**-EINVAL** if no valid digits were found or unsupported base
4059  *		was provided.
4060  *
4061  *		**-ERANGE** if resulting value was out of range.
4062  *
4063  * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags)
4064  *	Description
4065  *		Get a bpf-local-storage from a *sk*.
4066  *
4067  *		Logically, it could be thought of getting the value from
4068  *		a *map* with *sk* as the **key**.  From this
4069  *		perspective,  the usage is not much different from
4070  *		**bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
4071  *		helper enforces the key must be a full socket and the map must
4072  *		be a **BPF_MAP_TYPE_SK_STORAGE** also.
4073  *
4074  *		Underneath, the value is stored locally at *sk* instead of
4075  *		the *map*.  The *map* is used as the bpf-local-storage
4076  *		"type". The bpf-local-storage "type" (i.e. the *map*) is
4077  *		searched against all bpf-local-storages residing at *sk*.
4078  *
4079  *		*sk* is a kernel **struct sock** pointer for LSM program.
4080  *		*sk* is a **struct bpf_sock** pointer for other program types.
4081  *
4082  *		An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
4083  *		used such that a new bpf-local-storage will be
4084  *		created if one does not exist.  *value* can be used
4085  *		together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
4086  *		the initial value of a bpf-local-storage.  If *value* is
4087  *		**NULL**, the new bpf-local-storage will be zero initialized.
4088  *	Return
4089  *		A bpf-local-storage pointer is returned on success.
4090  *
4091  *		**NULL** if not found or there was an error in adding
4092  *		a new bpf-local-storage.
4093  *
4094  * long bpf_sk_storage_delete(struct bpf_map *map, void *sk)
4095  *	Description
4096  *		Delete a bpf-local-storage from a *sk*.
4097  *	Return
4098  *		0 on success.
4099  *
4100  *		**-ENOENT** if the bpf-local-storage cannot be found.
4101  *		**-EINVAL** if sk is not a fullsock (e.g. a request_sock).
4102  *
4103  * long bpf_send_signal(u32 sig)
4104  *	Description
4105  *		Send signal *sig* to the process of the current task.
4106  *		The signal may be delivered to any of this process's threads.
4107  *	Return
4108  *		0 on success or successfully queued.
4109  *
4110  *		**-EBUSY** if work queue under nmi is full.
4111  *
4112  *		**-EINVAL** if *sig* is invalid.
4113  *
4114  *		**-EPERM** if no permission to send the *sig*.
4115  *
4116  *		**-EAGAIN** if bpf program can try again.
4117  *
4118  * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
4119  *	Description
4120  *		Try to issue a SYN cookie for the packet with corresponding
4121  *		IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
4122  *
4123  *		*iph* points to the start of the IPv4 or IPv6 header, while
4124  *		*iph_len* contains **sizeof**\ (**struct iphdr**) or
4125  *		**sizeof**\ (**struct ipv6hdr**).
4126  *
4127  *		*th* points to the start of the TCP header, while *th_len*
4128  *		contains the length of the TCP header with options (at least
4129  *		**sizeof**\ (**struct tcphdr**)).
4130  *	Return
4131  *		On success, lower 32 bits hold the generated SYN cookie in
4132  *		followed by 16 bits which hold the MSS value for that cookie,
4133  *		and the top 16 bits are unused.
4134  *
4135  *		On failure, the returned value is one of the following:
4136  *
4137  *		**-EINVAL** SYN cookie cannot be issued due to error
4138  *
4139  *		**-ENOENT** SYN cookie should not be issued (no SYN flood)
4140  *
4141  *		**-EOPNOTSUPP** kernel configuration does not enable SYN cookies
4142  *
4143  *		**-EPROTONOSUPPORT** IP packet version is not 4 or 6
4144  *
4145  * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
4146  * 	Description
4147  * 		Write raw *data* blob into a special BPF perf event held by
4148  * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
4149  * 		event must have the following attributes: **PERF_SAMPLE_RAW**
4150  * 		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
4151  * 		**PERF_COUNT_SW_BPF_OUTPUT** as **config**.
4152  *
4153  * 		The *flags* are used to indicate the index in *map* for which
4154  * 		the value must be put, masked with **BPF_F_INDEX_MASK**.
4155  * 		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
4156  * 		to indicate that the index of the current CPU core should be
4157  * 		used.
4158  *
4159  * 		The value to write, of *size*, is passed through eBPF stack and
4160  * 		pointed by *data*.
4161  *
4162  * 		*ctx* is a pointer to in-kernel struct sk_buff.
4163  *
4164  * 		This helper is similar to **bpf_perf_event_output**\ () but
4165  * 		restricted to raw_tracepoint bpf programs.
4166  * 	Return
4167  * 		0 on success, or a negative error in case of failure.
4168  *
4169  * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
4170  * 	Description
4171  * 		Safely attempt to read *size* bytes from user space address
4172  * 		*unsafe_ptr* and store the data in *dst*.
4173  * 	Return
4174  * 		0 on success, or a negative error in case of failure.
4175  *
4176  * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
4177  * 	Description
4178  * 		Safely attempt to read *size* bytes from kernel space address
4179  * 		*unsafe_ptr* and store the data in *dst*.
4180  * 	Return
4181  * 		0 on success, or a negative error in case of failure.
4182  *
4183  * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
4184  * 	Description
4185  * 		Copy a NUL terminated string from an unsafe user address
4186  * 		*unsafe_ptr* to *dst*. The *size* should include the
4187  * 		terminating NUL byte. In case the string length is smaller than
4188  * 		*size*, the target is not padded with further NUL bytes. If the
4189  * 		string length is larger than *size*, just *size*-1 bytes are
4190  * 		copied and the last byte is set to NUL.
4191  *
4192  * 		On success, returns the number of bytes that were written,
4193  * 		including the terminal NUL. This makes this helper useful in
4194  * 		tracing programs for reading strings, and more importantly to
4195  * 		get its length at runtime. See the following snippet:
4196  *
4197  * 		::
4198  *
4199  * 			SEC("kprobe/sys_open")
4200  * 			void bpf_sys_open(struct pt_regs *ctx)
4201  * 			{
4202  * 			        char buf[PATHLEN]; // PATHLEN is defined to 256
4203  * 			        int res = bpf_probe_read_user_str(buf, sizeof(buf),
4204  * 				                                  ctx->di);
4205  *
4206  * 				// Consume buf, for example push it to
4207  * 				// userspace via bpf_perf_event_output(); we
4208  * 				// can use res (the string length) as event
4209  * 				// size, after checking its boundaries.
4210  * 			}
4211  *
4212  * 		In comparison, using **bpf_probe_read_user**\ () helper here
4213  * 		instead to read the string would require to estimate the length
4214  * 		at compile time, and would often result in copying more memory
4215  * 		than necessary.
4216  *
4217  * 		Another useful use case is when parsing individual process
4218  * 		arguments or individual environment variables navigating
4219  * 		*current*\ **->mm->arg_start** and *current*\
4220  * 		**->mm->env_start**: using this helper and the return value,
4221  * 		one can quickly iterate at the right offset of the memory area.
4222  * 	Return
4223  * 		On success, the strictly positive length of the output string,
4224  * 		including the trailing NUL character. On error, a negative
4225  * 		value.
4226  *
4227  * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
4228  * 	Description
4229  * 		Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
4230  * 		to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
4231  * 	Return
4232  * 		On success, the strictly positive length of the string, including
4233  * 		the trailing NUL character. On error, a negative value.
4234  *
4235  * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
4236  *	Description
4237  *		Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
4238  *		*rcv_nxt* is the ack_seq to be sent out.
4239  *	Return
4240  *		0 on success, or a negative error in case of failure.
4241  *
4242  * long bpf_send_signal_thread(u32 sig)
4243  *	Description
4244  *		Send signal *sig* to the thread corresponding to the current task.
4245  *	Return
4246  *		0 on success or successfully queued.
4247  *
4248  *		**-EBUSY** if work queue under nmi is full.
4249  *
4250  *		**-EINVAL** if *sig* is invalid.
4251  *
4252  *		**-EPERM** if no permission to send the *sig*.
4253  *
4254  *		**-EAGAIN** if bpf program can try again.
4255  *
4256  * u64 bpf_jiffies64(void)
4257  *	Description
4258  *		Obtain the 64bit jiffies
4259  *	Return
4260  *		The 64 bit jiffies
4261  *
4262  * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
4263  *	Description
4264  *		For an eBPF program attached to a perf event, retrieve the
4265  *		branch records (**struct perf_branch_entry**) associated to *ctx*
4266  *		and store it in the buffer pointed by *buf* up to size
4267  *		*size* bytes.
4268  *	Return
4269  *		On success, number of bytes written to *buf*. On error, a
4270  *		negative value.
4271  *
4272  *		The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
4273  *		instead return the number of bytes required to store all the
4274  *		branch entries. If this flag is set, *buf* may be NULL.
4275  *
4276  *		**-EINVAL** if arguments invalid or **size** not a multiple
4277  *		of **sizeof**\ (**struct perf_branch_entry**\ ).
4278  *
4279  *		**-ENOENT** if architecture does not support branch records.
4280  *
4281  * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
4282  *	Description
4283  *		Returns 0 on success, values for *pid* and *tgid* as seen from the current
4284  *		*namespace* will be returned in *nsdata*.
4285  *	Return
4286  *		0 on success, or one of the following in case of failure:
4287  *
4288  *		**-EINVAL** if dev and inum supplied don't match dev_t and inode number
4289  *              with nsfs of current task, or if dev conversion to dev_t lost high bits.
4290  *
4291  *		**-ENOENT** if pidns does not exists for the current task.
4292  *
4293  * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
4294  *	Description
4295  *		Write raw *data* blob into a special BPF perf event held by
4296  *		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
4297  *		event must have the following attributes: **PERF_SAMPLE_RAW**
4298  *		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
4299  *		**PERF_COUNT_SW_BPF_OUTPUT** as **config**.
4300  *
4301  *		The *flags* are used to indicate the index in *map* for which
4302  *		the value must be put, masked with **BPF_F_INDEX_MASK**.
4303  *		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
4304  *		to indicate that the index of the current CPU core should be
4305  *		used.
4306  *
4307  *		The value to write, of *size*, is passed through eBPF stack and
4308  *		pointed by *data*.
4309  *
4310  *		*ctx* is a pointer to in-kernel struct xdp_buff.
4311  *
4312  *		This helper is similar to **bpf_perf_eventoutput**\ () but
4313  *		restricted to raw_tracepoint bpf programs.
4314  *	Return
4315  *		0 on success, or a negative error in case of failure.
4316  *
4317  * u64 bpf_get_netns_cookie(void *ctx)
4318  * 	Description
4319  * 		Retrieve the cookie (generated by the kernel) of the network
4320  * 		namespace the input *ctx* is associated with. The network
4321  * 		namespace cookie remains stable for its lifetime and provides
4322  * 		a global identifier that can be assumed unique. If *ctx* is
4323  * 		NULL, then the helper returns the cookie for the initial
4324  * 		network namespace. The cookie itself is very similar to that
4325  * 		of **bpf_get_socket_cookie**\ () helper, but for network
4326  * 		namespaces instead of sockets.
4327  * 	Return
4328  * 		A 8-byte long opaque number.
4329  *
4330  * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level)
4331  * 	Description
4332  * 		Return id of cgroup v2 that is ancestor of the cgroup associated
4333  * 		with the current task at the *ancestor_level*. The root cgroup
4334  * 		is at *ancestor_level* zero and each step down the hierarchy
4335  * 		increments the level. If *ancestor_level* == level of cgroup
4336  * 		associated with the current task, then return value will be the
4337  * 		same as that of **bpf_get_current_cgroup_id**\ ().
4338  *
4339  * 		The helper is useful to implement policies based on cgroups
4340  * 		that are upper in hierarchy than immediate cgroup associated
4341  * 		with the current task.
4342  *
4343  * 		The format of returned id and helper limitations are same as in
4344  * 		**bpf_get_current_cgroup_id**\ ().
4345  * 	Return
4346  * 		The id is returned or 0 in case the id could not be retrieved.
4347  *
4348  * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
4349  *	Description
4350  *		Helper is overloaded depending on BPF program type. This
4351  *		description applies to **BPF_PROG_TYPE_SCHED_CLS** and
4352  *		**BPF_PROG_TYPE_SCHED_ACT** programs.
4353  *
4354  *		Assign the *sk* to the *skb*. When combined with appropriate
4355  *		routing configuration to receive the packet towards the socket,
4356  *		will cause *skb* to be delivered to the specified socket.
4357  *		Subsequent redirection of *skb* via  **bpf_redirect**\ (),
4358  *		**bpf_clone_redirect**\ () or other methods outside of BPF may
4359  *		interfere with successful delivery to the socket.
4360  *
4361  *		This operation is only valid from TC ingress path.
4362  *
4363  *		The *flags* argument must be zero.
4364  *	Return
4365  *		0 on success, or a negative error in case of failure:
4366  *
4367  *		**-EINVAL** if specified *flags* are not supported.
4368  *
4369  *		**-ENOENT** if the socket is unavailable for assignment.
4370  *
4371  *		**-ENETUNREACH** if the socket is unreachable (wrong netns).
4372  *
4373  *		**-EOPNOTSUPP** if the operation is not supported, for example
4374  *		a call from outside of TC ingress.
4375  *
4376  * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
4377  *	Description
4378  *		Helper is overloaded depending on BPF program type. This
4379  *		description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs.
4380  *
4381  *		Select the *sk* as a result of a socket lookup.
4382  *
4383  *		For the operation to succeed passed socket must be compatible
4384  *		with the packet description provided by the *ctx* object.
4385  *
4386  *		L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must
4387  *		be an exact match. While IP family (**AF_INET** or
4388  *		**AF_INET6**) must be compatible, that is IPv6 sockets
4389  *		that are not v6-only can be selected for IPv4 packets.
4390  *
4391  *		Only TCP listeners and UDP unconnected sockets can be
4392  *		selected. *sk* can also be NULL to reset any previous
4393  *		selection.
4394  *
4395  *		*flags* argument can combination of following values:
4396  *
4397  *		* **BPF_SK_LOOKUP_F_REPLACE** to override the previous
4398  *		  socket selection, potentially done by a BPF program
4399  *		  that ran before us.
4400  *
4401  *		* **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip
4402  *		  load-balancing within reuseport group for the socket
4403  *		  being selected.
4404  *
4405  *		On success *ctx->sk* will point to the selected socket.
4406  *
4407  *	Return
4408  *		0 on success, or a negative errno in case of failure.
4409  *
4410  *		* **-EAFNOSUPPORT** if socket family (*sk->family*) is
4411  *		  not compatible with packet family (*ctx->family*).
4412  *
4413  *		* **-EEXIST** if socket has been already selected,
4414  *		  potentially by another program, and
4415  *		  **BPF_SK_LOOKUP_F_REPLACE** flag was not specified.
4416  *
4417  *		* **-EINVAL** if unsupported flags were specified.
4418  *
4419  *		* **-EPROTOTYPE** if socket L4 protocol
4420  *		  (*sk->protocol*) doesn't match packet protocol
4421  *		  (*ctx->protocol*).
4422  *
4423  *		* **-ESOCKTNOSUPPORT** if socket is not in allowed
4424  *		  state (TCP listening or UDP unconnected).
4425  *
4426  * u64 bpf_ktime_get_boot_ns(void)
4427  * 	Description
4428  * 		Return the time elapsed since system boot, in nanoseconds.
4429  * 		Does include the time the system was suspended.
4430  * 		See: **clock_gettime**\ (**CLOCK_BOOTTIME**)
4431  * 	Return
4432  * 		Current *ktime*.
4433  *
4434  * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
4435  * 	Description
4436  * 		**bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
4437  * 		out the format string.
4438  * 		The *m* represents the seq_file. The *fmt* and *fmt_size* are for
4439  * 		the format string itself. The *data* and *data_len* are format string
4440  * 		arguments. The *data* are a **u64** array and corresponding format string
4441  * 		values are stored in the array. For strings and pointers where pointees
4442  * 		are accessed, only the pointer values are stored in the *data* array.
4443  * 		The *data_len* is the size of *data* in bytes - must be a multiple of 8.
4444  *
4445  *		Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
4446  *		Reading kernel memory may fail due to either invalid address or
4447  *		valid address but requiring a major memory fault. If reading kernel memory
4448  *		fails, the string for **%s** will be an empty string, and the ip
4449  *		address for **%p{i,I}{4,6}** will be 0. Not returning error to
4450  *		bpf program is consistent with what **bpf_trace_printk**\ () does for now.
4451  * 	Return
4452  * 		0 on success, or a negative error in case of failure:
4453  *
4454  *		**-EBUSY** if per-CPU memory copy buffer is busy, can try again
4455  *		by returning 1 from bpf program.
4456  *
4457  *		**-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported.
4458  *
4459  *		**-E2BIG** if *fmt* contains too many format specifiers.
4460  *
4461  *		**-EOVERFLOW** if an overflow happened: The same object will be tried again.
4462  *
4463  * long bpf_seq_write(struct seq_file *m, const void *data, u32 len)
4464  * 	Description
4465  * 		**bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
4466  * 		The *m* represents the seq_file. The *data* and *len* represent the
4467  * 		data to write in bytes.
4468  * 	Return
4469  * 		0 on success, or a negative error in case of failure:
4470  *
4471  *		**-EOVERFLOW** if an overflow happened: The same object will be tried again.
4472  *
4473  * u64 bpf_sk_cgroup_id(void *sk)
4474  *	Description
4475  *		Return the cgroup v2 id of the socket *sk*.
4476  *
4477  *		*sk* must be a non-**NULL** pointer to a socket, e.g. one
4478  *		returned from **bpf_sk_lookup_xxx**\ (),
4479  *		**bpf_sk_fullsock**\ (), etc. The format of returned id is
4480  *		same as in **bpf_skb_cgroup_id**\ ().
4481  *
4482  *		This helper is available only if the kernel was compiled with
4483  *		the **CONFIG_SOCK_CGROUP_DATA** configuration option.
4484  *	Return
4485  *		The id is returned or 0 in case the id could not be retrieved.
4486  *
4487  * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
4488  *	Description
4489  *		Return id of cgroup v2 that is ancestor of cgroup associated
4490  *		with the *sk* at the *ancestor_level*.  The root cgroup is at
4491  *		*ancestor_level* zero and each step down the hierarchy
4492  *		increments the level. If *ancestor_level* == level of cgroup
4493  *		associated with *sk*, then return value will be same as that
4494  *		of **bpf_sk_cgroup_id**\ ().
4495  *
4496  *		The helper is useful to implement policies based on cgroups
4497  *		that are upper in hierarchy than immediate cgroup associated
4498  *		with *sk*.
4499  *
4500  *		The format of returned id and helper limitations are same as in
4501  *		**bpf_sk_cgroup_id**\ ().
4502  *	Return
4503  *		The id is returned or 0 in case the id could not be retrieved.
4504  *
4505  * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
4506  * 	Description
4507  * 		Copy *size* bytes from *data* into a ring buffer *ringbuf*.
4508  * 		If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4509  * 		of new data availability is sent.
4510  * 		If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4511  * 		of new data availability is sent unconditionally.
4512  * 		If **0** is specified in *flags*, an adaptive notification
4513  * 		of new data availability is sent.
4514  *
4515  * 		An adaptive notification is a notification sent whenever the user-space
4516  * 		process has caught up and consumed all available payloads. In case the user-space
4517  * 		process is still processing a previous payload, then no notification is needed
4518  * 		as it will process the newly added payload automatically.
4519  * 	Return
4520  * 		0 on success, or a negative error in case of failure.
4521  *
4522  * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
4523  * 	Description
4524  * 		Reserve *size* bytes of payload in a ring buffer *ringbuf*.
4525  * 		*flags* must be 0.
4526  * 	Return
4527  * 		Valid pointer with *size* bytes of memory available; NULL,
4528  * 		otherwise.
4529  *
4530  * void bpf_ringbuf_submit(void *data, u64 flags)
4531  * 	Description
4532  * 		Submit reserved ring buffer sample, pointed to by *data*.
4533  * 		If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4534  * 		of new data availability is sent.
4535  * 		If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4536  * 		of new data availability is sent unconditionally.
4537  * 		If **0** is specified in *flags*, an adaptive notification
4538  * 		of new data availability is sent.
4539  *
4540  * 		See 'bpf_ringbuf_output()' for the definition of adaptive notification.
4541  * 	Return
4542  * 		Nothing. Always succeeds.
4543  *
4544  * void bpf_ringbuf_discard(void *data, u64 flags)
4545  * 	Description
4546  * 		Discard reserved ring buffer sample, pointed to by *data*.
4547  * 		If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4548  * 		of new data availability is sent.
4549  * 		If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4550  * 		of new data availability is sent unconditionally.
4551  * 		If **0** is specified in *flags*, an adaptive notification
4552  * 		of new data availability is sent.
4553  *
4554  * 		See 'bpf_ringbuf_output()' for the definition of adaptive notification.
4555  * 	Return
4556  * 		Nothing. Always succeeds.
4557  *
4558  * u64 bpf_ringbuf_query(void *ringbuf, u64 flags)
4559  *	Description
4560  *		Query various characteristics of provided ring buffer. What
4561  *		exactly is queries is determined by *flags*:
4562  *
4563  *		* **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
4564  *		* **BPF_RB_RING_SIZE**: The size of ring buffer.
4565  *		* **BPF_RB_CONS_POS**: Consumer position (can wrap around).
4566  *		* **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
4567  *
4568  *		Data returned is just a momentary snapshot of actual values
4569  *		and could be inaccurate, so this facility should be used to
4570  *		power heuristics and for reporting, not to make 100% correct
4571  *		calculation.
4572  *	Return
4573  *		Requested value, or 0, if *flags* are not recognized.
4574  *
4575  * long bpf_csum_level(struct sk_buff *skb, u64 level)
4576  * 	Description
4577  * 		Change the skbs checksum level by one layer up or down, or
4578  * 		reset it entirely to none in order to have the stack perform
4579  * 		checksum validation. The level is applicable to the following
4580  * 		protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of
4581  * 		| ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP |
4582  * 		through **bpf_skb_adjust_room**\ () helper with passing in
4583  * 		**BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one	call
4584  * 		to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since
4585  * 		the UDP header is removed. Similarly, an encap of the latter
4586  * 		into the former could be accompanied by a helper call to
4587  * 		**bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the
4588  * 		skb is still intended to be processed in higher layers of the
4589  * 		stack instead of just egressing at tc.
4590  *
4591  * 		There are three supported level settings at this time:
4592  *
4593  * 		* **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs
4594  * 		  with CHECKSUM_UNNECESSARY.
4595  * 		* **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs
4596  * 		  with CHECKSUM_UNNECESSARY.
4597  * 		* **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and
4598  * 		  sets CHECKSUM_NONE to force checksum validation by the stack.
4599  * 		* **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current
4600  * 		  skb->csum_level.
4601  * 	Return
4602  * 		0 on success, or a negative error in case of failure. In the
4603  * 		case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
4604  * 		is returned or the error code -EACCES in case the skb is not
4605  * 		subject to CHECKSUM_UNNECESSARY.
4606  *
4607  * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk)
4608  *	Description
4609  *		Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
4610  *	Return
4611  *		*sk* if casting is valid, or **NULL** otherwise.
4612  *
4613  * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
4614  *	Description
4615  *		Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
4616  *	Return
4617  *		*sk* if casting is valid, or **NULL** otherwise.
4618  *
4619  * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
4620  * 	Description
4621  *		Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
4622  *	Return
4623  *		*sk* if casting is valid, or **NULL** otherwise.
4624  *
4625  * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
4626  * 	Description
4627  *		Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
4628  *	Return
4629  *		*sk* if casting is valid, or **NULL** otherwise.
4630  *
4631  * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
4632  * 	Description
4633  *		Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
4634  *	Return
4635  *		*sk* if casting is valid, or **NULL** otherwise.
4636  *
4637  * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
4638  *	Description
4639  *		Return a user or a kernel stack in bpf program provided buffer.
4640  *		Note: the user stack will only be populated if the *task* is
4641  *		the current task; all other tasks will return -EOPNOTSUPP.
4642  *		To achieve this, the helper needs *task*, which is a valid
4643  *		pointer to **struct task_struct**. To store the stacktrace, the
4644  *		bpf program provides *buf* with a nonnegative *size*.
4645  *
4646  *		The last argument, *flags*, holds the number of stack frames to
4647  *		skip (from 0 to 255), masked with
4648  *		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
4649  *		the following flags:
4650  *
4651  *		**BPF_F_USER_STACK**
4652  *			Collect a user space stack instead of a kernel stack.
4653  *			The *task* must be the current task.
4654  *		**BPF_F_USER_BUILD_ID**
4655  *			Collect buildid+offset instead of ips for user stack,
4656  *			only valid if **BPF_F_USER_STACK** is also specified.
4657  *
4658  *		**bpf_get_task_stack**\ () can collect up to
4659  *		**PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
4660  *		to sufficient large buffer size. Note that
4661  *		this limit can be controlled with the **sysctl** program, and
4662  *		that it should be manually increased in order to profile long
4663  *		user stacks (such as stacks for Java programs). To do so, use:
4664  *
4665  *		::
4666  *
4667  *			# sysctl kernel.perf_event_max_stack=<new value>
4668  *	Return
4669  * 		The non-negative copied *buf* length equal to or less than
4670  * 		*size* on success, or a negative error in case of failure.
4671  *
4672  * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
4673  *	Description
4674  *		Load header option.  Support reading a particular TCP header
4675  *		option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
4676  *
4677  *		If *flags* is 0, it will search the option from the
4678  *		*skops*\ **->skb_data**.  The comment in **struct bpf_sock_ops**
4679  *		has details on what skb_data contains under different
4680  *		*skops*\ **->op**.
4681  *
4682  *		The first byte of the *searchby_res* specifies the
4683  *		kind that it wants to search.
4684  *
4685  *		If the searching kind is an experimental kind
4686  *		(i.e. 253 or 254 according to RFC6994).  It also
4687  *		needs to specify the "magic" which is either
4688  *		2 bytes or 4 bytes.  It then also needs to
4689  *		specify the size of the magic by using
4690  *		the 2nd byte which is "kind-length" of a TCP
4691  *		header option and the "kind-length" also
4692  *		includes the first 2 bytes "kind" and "kind-length"
4693  *		itself as a normal TCP header option also does.
4694  *
4695  *		For example, to search experimental kind 254 with
4696  *		2 byte magic 0xeB9F, the searchby_res should be
4697  *		[ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
4698  *
4699  *		To search for the standard window scale option (3),
4700  *		the *searchby_res* should be [ 3, 0, 0, .... 0 ].
4701  *		Note, kind-length must be 0 for regular option.
4702  *
4703  *		Searching for No-Op (0) and End-of-Option-List (1) are
4704  *		not supported.
4705  *
4706  *		*len* must be at least 2 bytes which is the minimal size
4707  *		of a header option.
4708  *
4709  *		Supported flags:
4710  *
4711  *		* **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
4712  *		  saved_syn packet or the just-received syn packet.
4713  *
4714  *	Return
4715  *		> 0 when found, the header option is copied to *searchby_res*.
4716  *		The return value is the total length copied. On failure, a
4717  *		negative error code is returned:
4718  *
4719  *		**-EINVAL** if a parameter is invalid.
4720  *
4721  *		**-ENOMSG** if the option is not found.
4722  *
4723  *		**-ENOENT** if no syn packet is available when
4724  *		**BPF_LOAD_HDR_OPT_TCP_SYN** is used.
4725  *
4726  *		**-ENOSPC** if there is not enough space.  Only *len* number of
4727  *		bytes are copied.
4728  *
4729  *		**-EFAULT** on failure to parse the header options in the
4730  *		packet.
4731  *
4732  *		**-EPERM** if the helper cannot be used under the current
4733  *		*skops*\ **->op**.
4734  *
4735  * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
4736  *	Description
4737  *		Store header option.  The data will be copied
4738  *		from buffer *from* with length *len* to the TCP header.
4739  *
4740  *		The buffer *from* should have the whole option that
4741  *		includes the kind, kind-length, and the actual
4742  *		option data.  The *len* must be at least kind-length
4743  *		long.  The kind-length does not have to be 4 byte
4744  *		aligned.  The kernel will take care of the padding
4745  *		and setting the 4 bytes aligned value to th->doff.
4746  *
4747  *		This helper will check for duplicated option
4748  *		by searching the same option in the outgoing skb.
4749  *
4750  *		This helper can only be called during
4751  *		**BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
4752  *
4753  *	Return
4754  *		0 on success, or negative error in case of failure:
4755  *
4756  *		**-EINVAL** If param is invalid.
4757  *
4758  *		**-ENOSPC** if there is not enough space in the header.
4759  *		Nothing has been written
4760  *
4761  *		**-EEXIST** if the option already exists.
4762  *
4763  *		**-EFAULT** on failure to parse the existing header options.
4764  *
4765  *		**-EPERM** if the helper cannot be used under the current
4766  *		*skops*\ **->op**.
4767  *
4768  * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
4769  *	Description
4770  *		Reserve *len* bytes for the bpf header option.  The
4771  *		space will be used by **bpf_store_hdr_opt**\ () later in
4772  *		**BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
4773  *
4774  *		If **bpf_reserve_hdr_opt**\ () is called multiple times,
4775  *		the total number of bytes will be reserved.
4776  *
4777  *		This helper can only be called during
4778  *		**BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
4779  *
4780  *	Return
4781  *		0 on success, or negative error in case of failure:
4782  *
4783  *		**-EINVAL** if a parameter is invalid.
4784  *
4785  *		**-ENOSPC** if there is not enough space in the header.
4786  *
4787  *		**-EPERM** if the helper cannot be used under the current
4788  *		*skops*\ **->op**.
4789  *
4790  * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
4791  *	Description
4792  *		Get a bpf_local_storage from an *inode*.
4793  *
4794  *		Logically, it could be thought of as getting the value from
4795  *		a *map* with *inode* as the **key**.  From this
4796  *		perspective,  the usage is not much different from
4797  *		**bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this
4798  *		helper enforces the key must be an inode and the map must also
4799  *		be a **BPF_MAP_TYPE_INODE_STORAGE**.
4800  *
4801  *		Underneath, the value is stored locally at *inode* instead of
4802  *		the *map*.  The *map* is used as the bpf-local-storage
4803  *		"type". The bpf-local-storage "type" (i.e. the *map*) is
4804  *		searched against all bpf_local_storage residing at *inode*.
4805  *
4806  *		An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
4807  *		used such that a new bpf_local_storage will be
4808  *		created if one does not exist.  *value* can be used
4809  *		together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
4810  *		the initial value of a bpf_local_storage.  If *value* is
4811  *		**NULL**, the new bpf_local_storage will be zero initialized.
4812  *	Return
4813  *		A bpf_local_storage pointer is returned on success.
4814  *
4815  *		**NULL** if not found or there was an error in adding
4816  *		a new bpf_local_storage.
4817  *
4818  * int bpf_inode_storage_delete(struct bpf_map *map, void *inode)
4819  *	Description
4820  *		Delete a bpf_local_storage from an *inode*.
4821  *	Return
4822  *		0 on success.
4823  *
4824  *		**-ENOENT** if the bpf_local_storage cannot be found.
4825  *
4826  * long bpf_d_path(struct path *path, char *buf, u32 sz)
4827  *	Description
4828  *		Return full path for given **struct path** object, which
4829  *		needs to be the kernel BTF *path* object. The path is
4830  *		returned in the provided buffer *buf* of size *sz* and
4831  *		is zero terminated.
4832  *
4833  *	Return
4834  *		On success, the strictly positive length of the string,
4835  *		including the trailing NUL character. On error, a negative
4836  *		value.
4837  *
4838  * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
4839  * 	Description
4840  * 		Read *size* bytes from user space address *user_ptr* and store
4841  * 		the data in *dst*. This is a wrapper of **copy_from_user**\ ().
4842  * 	Return
4843  * 		0 on success, or a negative error in case of failure.
4844  *
4845  * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags)
4846  *	Description
4847  *		Use BTF to store a string representation of *ptr*->ptr in *str*,
4848  *		using *ptr*->type_id.  This value should specify the type
4849  *		that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1)
4850  *		can be used to look up vmlinux BTF type ids. Traversing the
4851  *		data structure using BTF, the type information and values are
4852  *		stored in the first *str_size* - 1 bytes of *str*.  Safe copy of
4853  *		the pointer data is carried out to avoid kernel crashes during
4854  *		operation.  Smaller types can use string space on the stack;
4855  *		larger programs can use map data to store the string
4856  *		representation.
4857  *
4858  *		The string can be subsequently shared with userspace via
4859  *		bpf_perf_event_output() or ring buffer interfaces.
4860  *		bpf_trace_printk() is to be avoided as it places too small
4861  *		a limit on string size to be useful.
4862  *
4863  *		*flags* is a combination of
4864  *
4865  *		**BTF_F_COMPACT**
4866  *			no formatting around type information
4867  *		**BTF_F_NONAME**
4868  *			no struct/union member names/types
4869  *		**BTF_F_PTR_RAW**
4870  *			show raw (unobfuscated) pointer values;
4871  *			equivalent to printk specifier %px.
4872  *		**BTF_F_ZERO**
4873  *			show zero-valued struct/union members; they
4874  *			are not displayed by default
4875  *
4876  *	Return
4877  *		The number of bytes that were written (or would have been
4878  *		written if output had to be truncated due to string size),
4879  *		or a negative error in cases of failure.
4880  *
4881  * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
4882  *	Description
4883  *		Use BTF to write to seq_write a string representation of
4884  *		*ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf().
4885  *		*flags* are identical to those used for bpf_snprintf_btf.
4886  *	Return
4887  *		0 on success or a negative error in case of failure.
4888  *
4889  * u64 bpf_skb_cgroup_classid(struct sk_buff *skb)
4890  * 	Description
4891  * 		See **bpf_get_cgroup_classid**\ () for the main description.
4892  * 		This helper differs from **bpf_get_cgroup_classid**\ () in that
4893  * 		the cgroup v1 net_cls class is retrieved only from the *skb*'s
4894  * 		associated socket instead of the current process.
4895  * 	Return
4896  * 		The id is returned or 0 in case the id could not be retrieved.
4897  *
4898  * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags)
4899  * 	Description
4900  * 		Redirect the packet to another net device of index *ifindex*
4901  * 		and fill in L2 addresses from neighboring subsystem. This helper
4902  * 		is somewhat similar to **bpf_redirect**\ (), except that it
4903  * 		populates L2 addresses as well, meaning, internally, the helper
4904  * 		relies on the neighbor lookup for the L2 address of the nexthop.
4905  *
4906  * 		The helper will perform a FIB lookup based on the skb's
4907  * 		networking header to get the address of the next hop, unless
4908  * 		this is supplied by the caller in the *params* argument. The
4909  * 		*plen* argument indicates the len of *params* and should be set
4910  * 		to 0 if *params* is NULL.
4911  *
4912  * 		The *flags* argument is reserved and must be 0. The helper is
4913  * 		currently only supported for tc BPF program types, and enabled
4914  * 		for IPv4 and IPv6 protocols.
4915  * 	Return
4916  * 		The helper returns **TC_ACT_REDIRECT** on success or
4917  * 		**TC_ACT_SHOT** on error.
4918  *
4919  * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu)
4920  *     Description
4921  *             Take a pointer to a percpu ksym, *percpu_ptr*, and return a
4922  *             pointer to the percpu kernel variable on *cpu*. A ksym is an
4923  *             extern variable decorated with '__ksym'. For ksym, there is a
4924  *             global var (either static or global) defined of the same name
4925  *             in the kernel. The ksym is percpu if the global var is percpu.
4926  *             The returned pointer points to the global percpu var on *cpu*.
4927  *
4928  *             bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the
4929  *             kernel, except that bpf_per_cpu_ptr() may return NULL. This
4930  *             happens if *cpu* is larger than nr_cpu_ids. The caller of
4931  *             bpf_per_cpu_ptr() must check the returned value.
4932  *     Return
4933  *             A pointer pointing to the kernel percpu variable on *cpu*, or
4934  *             NULL, if *cpu* is invalid.
4935  *
4936  * void *bpf_this_cpu_ptr(const void *percpu_ptr)
4937  *	Description
4938  *		Take a pointer to a percpu ksym, *percpu_ptr*, and return a
4939  *		pointer to the percpu kernel variable on this cpu. See the
4940  *		description of 'ksym' in **bpf_per_cpu_ptr**\ ().
4941  *
4942  *		bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
4943  *		the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
4944  *		never return NULL.
4945  *	Return
4946  *		A pointer pointing to the kernel percpu variable on this cpu.
4947  *
4948  * long bpf_redirect_peer(u32 ifindex, u64 flags)
4949  * 	Description
4950  * 		Redirect the packet to another net device of index *ifindex*.
4951  * 		This helper is somewhat similar to **bpf_redirect**\ (), except
4952  * 		that the redirection happens to the *ifindex*' peer device and
4953  * 		the netns switch takes place from ingress to ingress without
4954  * 		going through the CPU's backlog queue.
4955  *
4956  * 		The *flags* argument is reserved and must be 0. The helper is
4957  * 		currently only supported for tc BPF program types at the
4958  * 		ingress hook and for veth and netkit target device types. The
4959  * 		peer device must reside in a different network namespace.
4960  * 	Return
4961  * 		The helper returns **TC_ACT_REDIRECT** on success or
4962  * 		**TC_ACT_SHOT** on error.
4963  *
4964  * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags)
4965  *	Description
4966  *		Get a bpf_local_storage from the *task*.
4967  *
4968  *		Logically, it could be thought of as getting the value from
4969  *		a *map* with *task* as the **key**.  From this
4970  *		perspective,  the usage is not much different from
4971  *		**bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
4972  *		helper enforces the key must be a task_struct and the map must also
4973  *		be a **BPF_MAP_TYPE_TASK_STORAGE**.
4974  *
4975  *		Underneath, the value is stored locally at *task* instead of
4976  *		the *map*.  The *map* is used as the bpf-local-storage
4977  *		"type". The bpf-local-storage "type" (i.e. the *map*) is
4978  *		searched against all bpf_local_storage residing at *task*.
4979  *
4980  *		An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
4981  *		used such that a new bpf_local_storage will be
4982  *		created if one does not exist.  *value* can be used
4983  *		together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
4984  *		the initial value of a bpf_local_storage.  If *value* is
4985  *		**NULL**, the new bpf_local_storage will be zero initialized.
4986  *	Return
4987  *		A bpf_local_storage pointer is returned on success.
4988  *
4989  *		**NULL** if not found or there was an error in adding
4990  *		a new bpf_local_storage.
4991  *
4992  * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task)
4993  *	Description
4994  *		Delete a bpf_local_storage from a *task*.
4995  *	Return
4996  *		0 on success.
4997  *
4998  *		**-ENOENT** if the bpf_local_storage cannot be found.
4999  *
5000  * struct task_struct *bpf_get_current_task_btf(void)
5001  *	Description
5002  *		Return a BTF pointer to the "current" task.
5003  *		This pointer can also be used in helpers that accept an
5004  *		*ARG_PTR_TO_BTF_ID* of type *task_struct*.
5005  *	Return
5006  *		Pointer to the current task.
5007  *
5008  * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags)
5009  *	Description
5010  *		Set or clear certain options on *bprm*:
5011  *
5012  *		**BPF_F_BPRM_SECUREEXEC** Set the secureexec bit
5013  *		which sets the **AT_SECURE** auxv for glibc. The bit
5014  *		is cleared if the flag is not specified.
5015  *	Return
5016  *		**-EINVAL** if invalid *flags* are passed, zero otherwise.
5017  *
5018  * u64 bpf_ktime_get_coarse_ns(void)
5019  * 	Description
5020  * 		Return a coarse-grained version of the time elapsed since
5021  * 		system boot, in nanoseconds. Does not include time the system
5022  * 		was suspended.
5023  *
5024  * 		See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**)
5025  * 	Return
5026  * 		Current *ktime*.
5027  *
5028  * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size)
5029  *	Description
5030  *		Returns the stored IMA hash of the *inode* (if it's available).
5031  *		If the hash is larger than *size*, then only *size*
5032  *		bytes will be copied to *dst*
5033  *	Return
5034  *		The **hash_algo** is returned on success,
5035  *		**-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if
5036  *		invalid arguments are passed.
5037  *
5038  * struct socket *bpf_sock_from_file(struct file *file)
5039  *	Description
5040  *		If the given file represents a socket, returns the associated
5041  *		socket.
5042  *	Return
5043  *		A pointer to a struct socket on success or NULL if the file is
5044  *		not a socket.
5045  *
5046  * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
5047  *	Description
5048  *		Check packet size against exceeding MTU of net device (based
5049  *		on *ifindex*).  This helper will likely be used in combination
5050  *		with helpers that adjust/change the packet size.
5051  *
5052  *		The argument *len_diff* can be used for querying with a planned
5053  *		size change. This allows to check MTU prior to changing packet
5054  *		ctx. Providing a *len_diff* adjustment that is larger than the
5055  *		actual packet size (resulting in negative packet size) will in
5056  *		principle not exceed the MTU, which is why it is not considered
5057  *		a failure.  Other BPF helpers are needed for performing the
5058  *		planned size change; therefore the responsibility for catching
5059  *		a negative packet size belongs in those helpers.
5060  *
5061  *		Specifying *ifindex* zero means the MTU check is performed
5062  *		against the current net device.  This is practical if this isn't
5063  *		used prior to redirect.
5064  *
5065  *		On input *mtu_len* must be a valid pointer, else verifier will
5066  *		reject BPF program.  If the value *mtu_len* is initialized to
5067  *		zero then the ctx packet size is use.  When value *mtu_len* is
5068  *		provided as input this specify the L3 length that the MTU check
5069  *		is done against. Remember XDP and TC length operate at L2, but
5070  *		this value is L3 as this correlate to MTU and IP-header tot_len
5071  *		values which are L3 (similar behavior as bpf_fib_lookup).
5072  *
5073  *		The Linux kernel route table can configure MTUs on a more
5074  *		specific per route level, which is not provided by this helper.
5075  *		For route level MTU checks use the **bpf_fib_lookup**\ ()
5076  *		helper.
5077  *
5078  *		*ctx* is either **struct xdp_md** for XDP programs or
5079  *		**struct sk_buff** for tc cls_act programs.
5080  *
5081  *		The *flags* argument can be a combination of one or more of the
5082  *		following values:
5083  *
5084  *		**BPF_MTU_CHK_SEGS**
5085  *			This flag will only works for *ctx* **struct sk_buff**.
5086  *			If packet context contains extra packet segment buffers
5087  *			(often knows as GSO skb), then MTU check is harder to
5088  *			check at this point, because in transmit path it is
5089  *			possible for the skb packet to get re-segmented
5090  *			(depending on net device features).  This could still be
5091  *			a MTU violation, so this flag enables performing MTU
5092  *			check against segments, with a different violation
5093  *			return code to tell it apart. Check cannot use len_diff.
5094  *
5095  *		On return *mtu_len* pointer contains the MTU value of the net
5096  *		device.  Remember the net device configured MTU is the L3 size,
5097  *		which is returned here and XDP and TC length operate at L2.
5098  *		Helper take this into account for you, but remember when using
5099  *		MTU value in your BPF-code.
5100  *
5101  *	Return
5102  *		* 0 on success, and populate MTU value in *mtu_len* pointer.
5103  *
5104  *		* < 0 if any input argument is invalid (*mtu_len* not updated)
5105  *
5106  *		MTU violations return positive values, but also populate MTU
5107  *		value in *mtu_len* pointer, as this can be needed for
5108  *		implementing PMTU handing:
5109  *
5110  *		* **BPF_MTU_CHK_RET_FRAG_NEEDED**
5111  *		* **BPF_MTU_CHK_RET_SEGS_TOOBIG**
5112  *
5113  * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags)
5114  *	Description
5115  *		For each element in **map**, call **callback_fn** function with
5116  *		**map**, **callback_ctx** and other map-specific parameters.
5117  *		The **callback_fn** should be a static function and
5118  *		the **callback_ctx** should be a pointer to the stack.
5119  *		The **flags** is used to control certain aspects of the helper.
5120  *		Currently, the **flags** must be 0.
5121  *
5122  *		The following are a list of supported map types and their
5123  *		respective expected callback signatures:
5124  *
5125  *		BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH,
5126  *		BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
5127  *		BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY
5128  *
5129  *		long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx);
5130  *
5131  *		For per_cpu maps, the map_value is the value on the cpu where the
5132  *		bpf_prog is running.
5133  *
5134  *		If **callback_fn** return 0, the helper will continue to the next
5135  *		element. If return value is 1, the helper will skip the rest of
5136  *		elements and return. Other return values are not used now.
5137  *
5138  *	Return
5139  *		The number of traversed map elements for success, **-EINVAL** for
5140  *		invalid **flags**.
5141  *
5142  * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len)
5143  *	Description
5144  *		Outputs a string into the **str** buffer of size **str_size**
5145  *		based on a format string stored in a read-only map pointed by
5146  *		**fmt**.
5147  *
5148  *		Each format specifier in **fmt** corresponds to one u64 element
5149  *		in the **data** array. For strings and pointers where pointees
5150  *		are accessed, only the pointer values are stored in the *data*
5151  *		array. The *data_len* is the size of *data* in bytes - must be
5152  *		a multiple of 8.
5153  *
5154  *		Formats **%s** and **%p{i,I}{4,6}** require to read kernel
5155  *		memory. Reading kernel memory may fail due to either invalid
5156  *		address or valid address but requiring a major memory fault. If
5157  *		reading kernel memory fails, the string for **%s** will be an
5158  *		empty string, and the ip address for **%p{i,I}{4,6}** will be 0.
5159  *		Not returning error to bpf program is consistent with what
5160  *		**bpf_trace_printk**\ () does for now.
5161  *
5162  *	Return
5163  *		The strictly positive length of the formatted string, including
5164  *		the trailing zero character. If the return value is greater than
5165  *		**str_size**, **str** contains a truncated string, guaranteed to
5166  *		be zero-terminated except when **str_size** is 0.
5167  *
5168  *		Or **-EBUSY** if the per-CPU memory copy buffer is busy.
5169  *
5170  * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
5171  * 	Description
5172  * 		Execute bpf syscall with given arguments.
5173  * 	Return
5174  * 		A syscall result.
5175  *
5176  * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
5177  * 	Description
5178  * 		Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
5179  * 	Return
5180  * 		Returns btf_id and btf_obj_fd in lower and upper 32 bits.
5181  *
5182  * long bpf_sys_close(u32 fd)
5183  * 	Description
5184  * 		Execute close syscall for given FD.
5185  * 	Return
5186  * 		A syscall result.
5187  *
5188  * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags)
5189  *	Description
5190  *		Initialize the timer.
5191  *		First 4 bits of *flags* specify clockid.
5192  *		Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed.
5193  *		All other bits of *flags* are reserved.
5194  *		The verifier will reject the program if *timer* is not from
5195  *		the same *map*.
5196  *	Return
5197  *		0 on success.
5198  *		**-EBUSY** if *timer* is already initialized.
5199  *		**-EINVAL** if invalid *flags* are passed.
5200  *		**-EPERM** if *timer* is in a map that doesn't have any user references.
5201  *		The user space should either hold a file descriptor to a map with timers
5202  *		or pin such map in bpffs. When map is unpinned or file descriptor is
5203  *		closed all timers in the map will be cancelled and freed.
5204  *
5205  * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn)
5206  *	Description
5207  *		Configure the timer to call *callback_fn* static function.
5208  *	Return
5209  *		0 on success.
5210  *		**-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
5211  *		**-EPERM** if *timer* is in a map that doesn't have any user references.
5212  *		The user space should either hold a file descriptor to a map with timers
5213  *		or pin such map in bpffs. When map is unpinned or file descriptor is
5214  *		closed all timers in the map will be cancelled and freed.
5215  *
5216  * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags)
5217  *	Description
5218  *		Set timer expiration N nanoseconds from the current time. The
5219  *		configured callback will be invoked in soft irq context on some cpu
5220  *		and will not repeat unless another bpf_timer_start() is made.
5221  *		In such case the next invocation can migrate to a different cpu.
5222  *		Since struct bpf_timer is a field inside map element the map
5223  *		owns the timer. The bpf_timer_set_callback() will increment refcnt
5224  *		of BPF program to make sure that callback_fn code stays valid.
5225  *		When user space reference to a map reaches zero all timers
5226  *		in a map are cancelled and corresponding program's refcnts are
5227  *		decremented. This is done to make sure that Ctrl-C of a user
5228  *		process doesn't leave any timers running. If map is pinned in
5229  *		bpffs the callback_fn can re-arm itself indefinitely.
5230  *		bpf_map_update/delete_elem() helpers and user space sys_bpf commands
5231  *		cancel and free the timer in the given map element.
5232  *		The map can contain timers that invoke callback_fn-s from different
5233  *		programs. The same callback_fn can serve different timers from
5234  *		different maps if key/value layout matches across maps.
5235  *		Every bpf_timer_set_callback() can have different callback_fn.
5236  *
5237  *		*flags* can be one of:
5238  *
5239  *		**BPF_F_TIMER_ABS**
5240  *			Start the timer in absolute expire value instead of the
5241  *			default relative one.
5242  *		**BPF_F_TIMER_CPU_PIN**
5243  *			Timer will be pinned to the CPU of the caller.
5244  *
5245  *	Return
5246  *		0 on success.
5247  *		**-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
5248  *		or invalid *flags* are passed.
5249  *
5250  * long bpf_timer_cancel(struct bpf_timer *timer)
5251  *	Description
5252  *		Cancel the timer and wait for callback_fn to finish if it was running.
5253  *	Return
5254  *		0 if the timer was not active.
5255  *		1 if the timer was active.
5256  *		**-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
5257  *		**-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
5258  *		own timer which would have led to a deadlock otherwise.
5259  *
5260  * u64 bpf_get_func_ip(void *ctx)
5261  * 	Description
5262  * 		Get address of the traced function (for tracing and kprobe programs).
5263  *
5264  * 		When called for kprobe program attached as uprobe it returns
5265  * 		probe address for both entry and return uprobe.
5266  *
5267  * 	Return
5268  * 		Address of the traced function for kprobe.
5269  * 		0 for kprobes placed within the function (not at the entry).
5270  * 		Address of the probe for uprobe and return uprobe.
5271  *
5272  * u64 bpf_get_attach_cookie(void *ctx)
5273  * 	Description
5274  * 		Get bpf_cookie value provided (optionally) during the program
5275  * 		attachment. It might be different for each individual
5276  * 		attachment, even if BPF program itself is the same.
5277  * 		Expects BPF program context *ctx* as a first argument.
5278  *
5279  * 		Supported for the following program types:
5280  *			- kprobe/uprobe;
5281  *			- tracepoint;
5282  *			- perf_event.
5283  * 	Return
5284  *		Value specified by user at BPF link creation/attachment time
5285  *		or 0, if it was not specified.
5286  *
5287  * long bpf_task_pt_regs(struct task_struct *task)
5288  *	Description
5289  *		Get the struct pt_regs associated with **task**.
5290  *	Return
5291  *		A pointer to struct pt_regs.
5292  *
5293  * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
5294  *	Description
5295  *		Get branch trace from hardware engines like Intel LBR. The
5296  *		hardware engine is stopped shortly after the helper is
5297  *		called. Therefore, the user need to filter branch entries
5298  *		based on the actual use case. To capture branch trace
5299  *		before the trigger point of the BPF program, the helper
5300  *		should be called at the beginning of the BPF program.
5301  *
5302  *		The data is stored as struct perf_branch_entry into output
5303  *		buffer *entries*. *size* is the size of *entries* in bytes.
5304  *		*flags* is reserved for now and must be zero.
5305  *
5306  *	Return
5307  *		On success, number of bytes written to *buf*. On error, a
5308  *		negative value.
5309  *
5310  *		**-EINVAL** if *flags* is not zero.
5311  *
5312  *		**-ENOENT** if architecture does not support branch records.
5313  *
5314  * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
5315  *	Description
5316  *		Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
5317  *		to format and can handle more format args as a result.
5318  *
5319  *		Arguments are to be used as in **bpf_seq_printf**\ () helper.
5320  *	Return
5321  *		The number of bytes written to the buffer, or a negative error
5322  *		in case of failure.
5323  *
5324  * struct unix_sock *bpf_skc_to_unix_sock(void *sk)
5325  * 	Description
5326  *		Dynamically cast a *sk* pointer to a *unix_sock* pointer.
5327  *	Return
5328  *		*sk* if casting is valid, or **NULL** otherwise.
5329  *
5330  * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
5331  *	Description
5332  *		Get the address of a kernel symbol, returned in *res*. *res* is
5333  *		set to 0 if the symbol is not found.
5334  *	Return
5335  *		On success, zero. On error, a negative value.
5336  *
5337  *		**-EINVAL** if *flags* is not zero.
5338  *
5339  *		**-EINVAL** if string *name* is not the same size as *name_sz*.
5340  *
5341  *		**-ENOENT** if symbol is not found.
5342  *
5343  *		**-EPERM** if caller does not have permission to obtain kernel address.
5344  *
5345  * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
5346  *	Description
5347  *		Find vma of *task* that contains *addr*, call *callback_fn*
5348  *		function with *task*, *vma*, and *callback_ctx*.
5349  *		The *callback_fn* should be a static function and
5350  *		the *callback_ctx* should be a pointer to the stack.
5351  *		The *flags* is used to control certain aspects of the helper.
5352  *		Currently, the *flags* must be 0.
5353  *
5354  *		The expected callback signature is
5355  *
5356  *		long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
5357  *
5358  *	Return
5359  *		0 on success.
5360  *		**-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
5361  *		**-EBUSY** if failed to try lock mmap_lock.
5362  *		**-EINVAL** for invalid **flags**.
5363  *
5364  * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags)
5365  *	Description
5366  *		For **nr_loops**, call **callback_fn** function
5367  *		with **callback_ctx** as the context parameter.
5368  *		The **callback_fn** should be a static function and
5369  *		the **callback_ctx** should be a pointer to the stack.
5370  *		The **flags** is used to control certain aspects of the helper.
5371  *		Currently, the **flags** must be 0. Currently, nr_loops is
5372  *		limited to 1 << 23 (~8 million) loops.
5373  *
5374  *		long (\*callback_fn)(u64 index, void \*ctx);
5375  *
5376  *		where **index** is the current index in the loop. The index
5377  *		is zero-indexed.
5378  *
5379  *		If **callback_fn** returns 0, the helper will continue to the next
5380  *		loop. If return value is 1, the helper will skip the rest of
5381  *		the loops and return. Other return values are not used now,
5382  *		and will be rejected by the verifier.
5383  *
5384  *	Return
5385  *		The number of loops performed, **-EINVAL** for invalid **flags**,
5386  *		**-E2BIG** if **nr_loops** exceeds the maximum number of loops.
5387  *
5388  * long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2)
5389  *	Description
5390  *		Do strncmp() between **s1** and **s2**. **s1** doesn't need
5391  *		to be null-terminated and **s1_sz** is the maximum storage
5392  *		size of **s1**. **s2** must be a read-only string.
5393  *	Return
5394  *		An integer less than, equal to, or greater than zero
5395  *		if the first **s1_sz** bytes of **s1** is found to be
5396  *		less than, to match, or be greater than **s2**.
5397  *
5398  * long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
5399  *	Description
5400  *		Get **n**-th argument register (zero based) of the traced function (for tracing programs)
5401  *		returned in **value**.
5402  *
5403  *	Return
5404  *		0 on success.
5405  *		**-EINVAL** if n >= argument register count of traced function.
5406  *
5407  * long bpf_get_func_ret(void *ctx, u64 *value)
5408  *	Description
5409  *		Get return value of the traced function (for tracing programs)
5410  *		in **value**.
5411  *
5412  *	Return
5413  *		0 on success.
5414  *		**-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
5415  *
5416  * long bpf_get_func_arg_cnt(void *ctx)
5417  *	Description
5418  *		Get number of registers of the traced function (for tracing programs) where
5419  *		function arguments are stored in these registers.
5420  *
5421  *	Return
5422  *		The number of argument registers of the traced function.
5423  *
5424  * int bpf_get_retval(void)
5425  *	Description
5426  *		Get the BPF program's return value that will be returned to the upper layers.
5427  *
5428  *		This helper is currently supported by cgroup programs and only by the hooks
5429  *		where BPF program's return value is returned to the userspace via errno.
5430  *	Return
5431  *		The BPF program's return value.
5432  *
5433  * int bpf_set_retval(int retval)
5434  *	Description
5435  *		Set the BPF program's return value that will be returned to the upper layers.
5436  *
5437  *		This helper is currently supported by cgroup programs and only by the hooks
5438  *		where BPF program's return value is returned to the userspace via errno.
5439  *
5440  *		Note that there is the following corner case where the program exports an error
5441  *		via bpf_set_retval but signals success via 'return 1':
5442  *
5443  *			bpf_set_retval(-EPERM);
5444  *			return 1;
5445  *
5446  *		In this case, the BPF program's return value will use helper's -EPERM. This
5447  *		still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case.
5448  *
5449  *	Return
5450  *		0 on success, or a negative error in case of failure.
5451  *
5452  * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md)
5453  *	Description
5454  *		Get the total size of a given xdp buff (linear and paged area)
5455  *	Return
5456  *		The total size of a given xdp buffer.
5457  *
5458  * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
5459  *	Description
5460  *		This helper is provided as an easy way to load data from a
5461  *		xdp buffer. It can be used to load *len* bytes from *offset* from
5462  *		the frame associated to *xdp_md*, into the buffer pointed by
5463  *		*buf*.
5464  *	Return
5465  *		0 on success, or a negative error in case of failure.
5466  *
5467  * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
5468  *	Description
5469  *		Store *len* bytes from buffer *buf* into the frame
5470  *		associated to *xdp_md*, at *offset*.
5471  *	Return
5472  *		0 on success, or a negative error in case of failure.
5473  *
5474  * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags)
5475  *	Description
5476  *		Read *size* bytes from user space address *user_ptr* in *tsk*'s
5477  *		address space, and stores the data in *dst*. *flags* is not
5478  *		used yet and is provided for future extensibility. This helper
5479  *		can only be used by sleepable programs.
5480  *	Return
5481  *		0 on success, or a negative error in case of failure. On error
5482  *		*dst* buffer is zeroed out.
5483  *
5484  * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type)
5485  *	Description
5486  *		Change the __sk_buff->tstamp_type to *tstamp_type*
5487  *		and set *tstamp* to the __sk_buff->tstamp together.
5488  *
5489  *		If there is no need to change the __sk_buff->tstamp_type,
5490  *		the tstamp value can be directly written to __sk_buff->tstamp
5491  *		instead.
5492  *
5493  *		BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that
5494  *		will be kept during bpf_redirect_*().  A non zero
5495  *		*tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO
5496  *		*tstamp_type*.
5497  *
5498  *		A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used
5499  *		with a zero *tstamp*.
5500  *
5501  *		Only IPv4 and IPv6 skb->protocol are supported.
5502  *
5503  *		This function is most useful when it needs to set a
5504  *		mono delivery time to __sk_buff->tstamp and then
5505  *		bpf_redirect_*() to the egress of an iface.  For example,
5506  *		changing the (rcv) timestamp in __sk_buff->tstamp at
5507  *		ingress to a mono delivery time and then bpf_redirect_*()
5508  *		to sch_fq@phy-dev.
5509  *	Return
5510  *		0 on success.
5511  *		**-EINVAL** for invalid input
5512  *		**-EOPNOTSUPP** for unsupported protocol
5513  *
5514  * long bpf_ima_file_hash(struct file *file, void *dst, u32 size)
5515  *	Description
5516  *		Returns a calculated IMA hash of the *file*.
5517  *		If the hash is larger than *size*, then only *size*
5518  *		bytes will be copied to *dst*
5519  *	Return
5520  *		The **hash_algo** is returned on success,
5521  *		**-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
5522  *		invalid arguments are passed.
5523  *
5524  * void *bpf_kptr_xchg(void *dst, void *ptr)
5525  *	Description
5526  *		Exchange kptr at pointer *dst* with *ptr*, and return the old value.
5527  *		*dst* can be map value or local kptr. *ptr* can be NULL, otherwise
5528  *		it must be a referenced pointer which will be released when this helper
5529  *		is called.
5530  *	Return
5531  *		The old value of kptr (which can be NULL). The returned pointer
5532  *		if not NULL, is a reference which must be released using its
5533  *		corresponding release function, or moved into a BPF map before
5534  *		program exit.
5535  *
5536  * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
5537  * 	Description
5538  * 		Perform a lookup in *percpu map* for an entry associated to
5539  * 		*key* on *cpu*.
5540  * 	Return
5541  * 		Map value associated to *key* on *cpu*, or **NULL** if no entry
5542  * 		was found or *cpu* is invalid.
5543  *
5544  * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk)
5545  *	Description
5546  *		Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
5547  *	Return
5548  *		*sk* if casting is valid, or **NULL** otherwise.
5549  *
5550  * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
5551  *	Description
5552  *		Get a dynptr to local memory *data*.
5553  *
5554  *		*data* must be a ptr to a map value.
5555  *		The maximum *size* supported is DYNPTR_MAX_SIZE.
5556  *		*flags* is currently unused.
5557  *	Return
5558  *		0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
5559  *		-EINVAL if flags is not 0.
5560  *
5561  * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
5562  *	Description
5563  *		Reserve *size* bytes of payload in a ring buffer *ringbuf*
5564  *		through the dynptr interface. *flags* must be 0.
5565  *
5566  *		Please note that a corresponding bpf_ringbuf_submit_dynptr or
5567  *		bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
5568  *		reservation fails. This is enforced by the verifier.
5569  *	Return
5570  *		0 on success, or a negative error in case of failure.
5571  *
5572  * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
5573  *	Description
5574  *		Submit reserved ring buffer sample, pointed to by *data*,
5575  *		through the dynptr interface. This is a no-op if the dynptr is
5576  *		invalid/null.
5577  *
5578  *		For more information on *flags*, please see
5579  *		'bpf_ringbuf_submit'.
5580  *	Return
5581  *		Nothing. Always succeeds.
5582  *
5583  * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
5584  *	Description
5585  *		Discard reserved ring buffer sample through the dynptr
5586  *		interface. This is a no-op if the dynptr is invalid/null.
5587  *
5588  *		For more information on *flags*, please see
5589  *		'bpf_ringbuf_discard'.
5590  *	Return
5591  *		Nothing. Always succeeds.
5592  *
5593  * long bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr *src, u32 offset, u64 flags)
5594  *	Description
5595  *		Read *len* bytes from *src* into *dst*, starting from *offset*
5596  *		into *src*.
5597  *		*flags* is currently unused.
5598  *	Return
5599  *		0 on success, -E2BIG if *offset* + *len* exceeds the length
5600  *		of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
5601  *		*flags* is not 0.
5602  *
5603  * long bpf_dynptr_write(const struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
5604  *	Description
5605  *		Write *len* bytes from *src* into *dst*, starting from *offset*
5606  *		into *dst*.
5607  *
5608  *		*flags* must be 0 except for skb-type dynptrs.
5609  *
5610  *		For skb-type dynptrs:
5611  *		    *  All data slices of the dynptr are automatically
5612  *		       invalidated after **bpf_dynptr_write**\ (). This is
5613  *		       because writing may pull the skb and change the
5614  *		       underlying packet buffer.
5615  *
5616  *		    *  For *flags*, please see the flags accepted by
5617  *		       **bpf_skb_store_bytes**\ ().
5618  *	Return
5619  *		0 on success, -E2BIG if *offset* + *len* exceeds the length
5620  *		of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
5621  *		is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs,
5622  *		other errors correspond to errors returned by **bpf_skb_store_bytes**\ ().
5623  *
5624  * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len)
5625  *	Description
5626  *		Get a pointer to the underlying dynptr data.
5627  *
5628  *		*len* must be a statically known value. The returned data slice
5629  *		is invalidated whenever the dynptr is invalidated.
5630  *
5631  *		skb and xdp type dynptrs may not use bpf_dynptr_data. They should
5632  *		instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr.
5633  *	Return
5634  *		Pointer to the underlying dynptr data, NULL if the dynptr is
5635  *		read-only, if the dynptr is invalid, or if the offset and length
5636  *		is out of bounds.
5637  *
5638  * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len)
5639  *	Description
5640  *		Try to issue a SYN cookie for the packet with corresponding
5641  *		IPv4/TCP headers, *iph* and *th*, without depending on a
5642  *		listening socket.
5643  *
5644  *		*iph* points to the IPv4 header.
5645  *
5646  *		*th* points to the start of the TCP header, while *th_len*
5647  *		contains the length of the TCP header (at least
5648  *		**sizeof**\ (**struct tcphdr**)).
5649  *	Return
5650  *		On success, lower 32 bits hold the generated SYN cookie in
5651  *		followed by 16 bits which hold the MSS value for that cookie,
5652  *		and the top 16 bits are unused.
5653  *
5654  *		On failure, the returned value is one of the following:
5655  *
5656  *		**-EINVAL** if *th_len* is invalid.
5657  *
5658  * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len)
5659  *	Description
5660  *		Try to issue a SYN cookie for the packet with corresponding
5661  *		IPv6/TCP headers, *iph* and *th*, without depending on a
5662  *		listening socket.
5663  *
5664  *		*iph* points to the IPv6 header.
5665  *
5666  *		*th* points to the start of the TCP header, while *th_len*
5667  *		contains the length of the TCP header (at least
5668  *		**sizeof**\ (**struct tcphdr**)).
5669  *	Return
5670  *		On success, lower 32 bits hold the generated SYN cookie in
5671  *		followed by 16 bits which hold the MSS value for that cookie,
5672  *		and the top 16 bits are unused.
5673  *
5674  *		On failure, the returned value is one of the following:
5675  *
5676  *		**-EINVAL** if *th_len* is invalid.
5677  *
5678  *		**-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
5679  *
5680  * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th)
5681  *	Description
5682  *		Check whether *iph* and *th* contain a valid SYN cookie ACK
5683  *		without depending on a listening socket.
5684  *
5685  *		*iph* points to the IPv4 header.
5686  *
5687  *		*th* points to the TCP header.
5688  *	Return
5689  *		0 if *iph* and *th* are a valid SYN cookie ACK.
5690  *
5691  *		On failure, the returned value is one of the following:
5692  *
5693  *		**-EACCES** if the SYN cookie is not valid.
5694  *
5695  * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th)
5696  *	Description
5697  *		Check whether *iph* and *th* contain a valid SYN cookie ACK
5698  *		without depending on a listening socket.
5699  *
5700  *		*iph* points to the IPv6 header.
5701  *
5702  *		*th* points to the TCP header.
5703  *	Return
5704  *		0 if *iph* and *th* are a valid SYN cookie ACK.
5705  *
5706  *		On failure, the returned value is one of the following:
5707  *
5708  *		**-EACCES** if the SYN cookie is not valid.
5709  *
5710  *		**-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
5711  *
5712  * u64 bpf_ktime_get_tai_ns(void)
5713  *	Description
5714  *		A nonsettable system-wide clock derived from wall-clock time but
5715  *		ignoring leap seconds.  This clock does not experience
5716  *		discontinuities and backwards jumps caused by NTP inserting leap
5717  *		seconds as CLOCK_REALTIME does.
5718  *
5719  *		See: **clock_gettime**\ (**CLOCK_TAI**)
5720  *	Return
5721  *		Current *ktime*.
5722  *
5723  * long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags)
5724  *	Description
5725  *		Drain samples from the specified user ring buffer, and invoke
5726  *		the provided callback for each such sample:
5727  *
5728  *		long (\*callback_fn)(const struct bpf_dynptr \*dynptr, void \*ctx);
5729  *
5730  *		If **callback_fn** returns 0, the helper will continue to try
5731  *		and drain the next sample, up to a maximum of
5732  *		BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1,
5733  *		the helper will skip the rest of the samples and return. Other
5734  *		return values are not used now, and will be rejected by the
5735  *		verifier.
5736  *	Return
5737  *		The number of drained samples if no error was encountered while
5738  *		draining samples, or 0 if no samples were present in the ring
5739  *		buffer. If a user-space producer was epoll-waiting on this map,
5740  *		and at least one sample was drained, they will receive an event
5741  *		notification notifying them of available space in the ring
5742  *		buffer. If the BPF_RB_NO_WAKEUP flag is passed to this
5743  *		function, no wakeup notification will be sent. If the
5744  *		BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will
5745  *		be sent even if no sample was drained.
5746  *
5747  *		On failure, the returned value is one of the following:
5748  *
5749  *		**-EBUSY** if the ring buffer is contended, and another calling
5750  *		context was concurrently draining the ring buffer.
5751  *
5752  *		**-EINVAL** if user-space is not properly tracking the ring
5753  *		buffer due to the producer position not being aligned to 8
5754  *		bytes, a sample not being aligned to 8 bytes, or the producer
5755  *		position not matching the advertised length of a sample.
5756  *
5757  *		**-E2BIG** if user-space has tried to publish a sample which is
5758  *		larger than the size of the ring buffer, or which cannot fit
5759  *		within a struct bpf_dynptr.
5760  *
5761  * void *bpf_cgrp_storage_get(struct bpf_map *map, struct cgroup *cgroup, void *value, u64 flags)
5762  *	Description
5763  *		Get a bpf_local_storage from the *cgroup*.
5764  *
5765  *		Logically, it could be thought of as getting the value from
5766  *		a *map* with *cgroup* as the **key**.  From this
5767  *		perspective,  the usage is not much different from
5768  *		**bpf_map_lookup_elem**\ (*map*, **&**\ *cgroup*) except this
5769  *		helper enforces the key must be a cgroup struct and the map must also
5770  *		be a **BPF_MAP_TYPE_CGRP_STORAGE**.
5771  *
5772  *		In reality, the local-storage value is embedded directly inside of the
5773  *		*cgroup* object itself, rather than being located in the
5774  *		**BPF_MAP_TYPE_CGRP_STORAGE** map. When the local-storage value is
5775  *		queried for some *map* on a *cgroup* object, the kernel will perform an
5776  *		O(n) iteration over all of the live local-storage values for that
5777  *		*cgroup* object until the local-storage value for the *map* is found.
5778  *
5779  *		An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
5780  *		used such that a new bpf_local_storage will be
5781  *		created if one does not exist.  *value* can be used
5782  *		together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
5783  *		the initial value of a bpf_local_storage.  If *value* is
5784  *		**NULL**, the new bpf_local_storage will be zero initialized.
5785  *	Return
5786  *		A bpf_local_storage pointer is returned on success.
5787  *
5788  *		**NULL** if not found or there was an error in adding
5789  *		a new bpf_local_storage.
5790  *
5791  * long bpf_cgrp_storage_delete(struct bpf_map *map, struct cgroup *cgroup)
5792  *	Description
5793  *		Delete a bpf_local_storage from a *cgroup*.
5794  *	Return
5795  *		0 on success.
5796  *
5797  *		**-ENOENT** if the bpf_local_storage cannot be found.
5798  */
5799 #define ___BPF_FUNC_MAPPER(FN, ctx...)			\
5800 	FN(unspec, 0, ##ctx)				\
5801 	FN(map_lookup_elem, 1, ##ctx)			\
5802 	FN(map_update_elem, 2, ##ctx)			\
5803 	FN(map_delete_elem, 3, ##ctx)			\
5804 	FN(probe_read, 4, ##ctx)			\
5805 	FN(ktime_get_ns, 5, ##ctx)			\
5806 	FN(trace_printk, 6, ##ctx)			\
5807 	FN(get_prandom_u32, 7, ##ctx)			\
5808 	FN(get_smp_processor_id, 8, ##ctx)		\
5809 	FN(skb_store_bytes, 9, ##ctx)			\
5810 	FN(l3_csum_replace, 10, ##ctx)			\
5811 	FN(l4_csum_replace, 11, ##ctx)			\
5812 	FN(tail_call, 12, ##ctx)			\
5813 	FN(clone_redirect, 13, ##ctx)			\
5814 	FN(get_current_pid_tgid, 14, ##ctx)		\
5815 	FN(get_current_uid_gid, 15, ##ctx)		\
5816 	FN(get_current_comm, 16, ##ctx)			\
5817 	FN(get_cgroup_classid, 17, ##ctx)		\
5818 	FN(skb_vlan_push, 18, ##ctx)			\
5819 	FN(skb_vlan_pop, 19, ##ctx)			\
5820 	FN(skb_get_tunnel_key, 20, ##ctx)		\
5821 	FN(skb_set_tunnel_key, 21, ##ctx)		\
5822 	FN(perf_event_read, 22, ##ctx)			\
5823 	FN(redirect, 23, ##ctx)				\
5824 	FN(get_route_realm, 24, ##ctx)			\
5825 	FN(perf_event_output, 25, ##ctx)		\
5826 	FN(skb_load_bytes, 26, ##ctx)			\
5827 	FN(get_stackid, 27, ##ctx)			\
5828 	FN(csum_diff, 28, ##ctx)			\
5829 	FN(skb_get_tunnel_opt, 29, ##ctx)		\
5830 	FN(skb_set_tunnel_opt, 30, ##ctx)		\
5831 	FN(skb_change_proto, 31, ##ctx)			\
5832 	FN(skb_change_type, 32, ##ctx)			\
5833 	FN(skb_under_cgroup, 33, ##ctx)			\
5834 	FN(get_hash_recalc, 34, ##ctx)			\
5835 	FN(get_current_task, 35, ##ctx)			\
5836 	FN(probe_write_user, 36, ##ctx)			\
5837 	FN(current_task_under_cgroup, 37, ##ctx)	\
5838 	FN(skb_change_tail, 38, ##ctx)			\
5839 	FN(skb_pull_data, 39, ##ctx)			\
5840 	FN(csum_update, 40, ##ctx)			\
5841 	FN(set_hash_invalid, 41, ##ctx)			\
5842 	FN(get_numa_node_id, 42, ##ctx)			\
5843 	FN(skb_change_head, 43, ##ctx)			\
5844 	FN(xdp_adjust_head, 44, ##ctx)			\
5845 	FN(probe_read_str, 45, ##ctx)			\
5846 	FN(get_socket_cookie, 46, ##ctx)		\
5847 	FN(get_socket_uid, 47, ##ctx)			\
5848 	FN(set_hash, 48, ##ctx)				\
5849 	FN(setsockopt, 49, ##ctx)			\
5850 	FN(skb_adjust_room, 50, ##ctx)			\
5851 	FN(redirect_map, 51, ##ctx)			\
5852 	FN(sk_redirect_map, 52, ##ctx)			\
5853 	FN(sock_map_update, 53, ##ctx)			\
5854 	FN(xdp_adjust_meta, 54, ##ctx)			\
5855 	FN(perf_event_read_value, 55, ##ctx)		\
5856 	FN(perf_prog_read_value, 56, ##ctx)		\
5857 	FN(getsockopt, 57, ##ctx)			\
5858 	FN(override_return, 58, ##ctx)			\
5859 	FN(sock_ops_cb_flags_set, 59, ##ctx)		\
5860 	FN(msg_redirect_map, 60, ##ctx)			\
5861 	FN(msg_apply_bytes, 61, ##ctx)			\
5862 	FN(msg_cork_bytes, 62, ##ctx)			\
5863 	FN(msg_pull_data, 63, ##ctx)			\
5864 	FN(bind, 64, ##ctx)				\
5865 	FN(xdp_adjust_tail, 65, ##ctx)			\
5866 	FN(skb_get_xfrm_state, 66, ##ctx)		\
5867 	FN(get_stack, 67, ##ctx)			\
5868 	FN(skb_load_bytes_relative, 68, ##ctx)		\
5869 	FN(fib_lookup, 69, ##ctx)			\
5870 	FN(sock_hash_update, 70, ##ctx)			\
5871 	FN(msg_redirect_hash, 71, ##ctx)		\
5872 	FN(sk_redirect_hash, 72, ##ctx)			\
5873 	FN(lwt_push_encap, 73, ##ctx)			\
5874 	FN(lwt_seg6_store_bytes, 74, ##ctx)		\
5875 	FN(lwt_seg6_adjust_srh, 75, ##ctx)		\
5876 	FN(lwt_seg6_action, 76, ##ctx)			\
5877 	FN(rc_repeat, 77, ##ctx)			\
5878 	FN(rc_keydown, 78, ##ctx)			\
5879 	FN(skb_cgroup_id, 79, ##ctx)			\
5880 	FN(get_current_cgroup_id, 80, ##ctx)		\
5881 	FN(get_local_storage, 81, ##ctx)		\
5882 	FN(sk_select_reuseport, 82, ##ctx)		\
5883 	FN(skb_ancestor_cgroup_id, 83, ##ctx)		\
5884 	FN(sk_lookup_tcp, 84, ##ctx)			\
5885 	FN(sk_lookup_udp, 85, ##ctx)			\
5886 	FN(sk_release, 86, ##ctx)			\
5887 	FN(map_push_elem, 87, ##ctx)			\
5888 	FN(map_pop_elem, 88, ##ctx)			\
5889 	FN(map_peek_elem, 89, ##ctx)			\
5890 	FN(msg_push_data, 90, ##ctx)			\
5891 	FN(msg_pop_data, 91, ##ctx)			\
5892 	FN(rc_pointer_rel, 92, ##ctx)			\
5893 	FN(spin_lock, 93, ##ctx)			\
5894 	FN(spin_unlock, 94, ##ctx)			\
5895 	FN(sk_fullsock, 95, ##ctx)			\
5896 	FN(tcp_sock, 96, ##ctx)				\
5897 	FN(skb_ecn_set_ce, 97, ##ctx)			\
5898 	FN(get_listener_sock, 98, ##ctx)		\
5899 	FN(skc_lookup_tcp, 99, ##ctx)			\
5900 	FN(tcp_check_syncookie, 100, ##ctx)		\
5901 	FN(sysctl_get_name, 101, ##ctx)			\
5902 	FN(sysctl_get_current_value, 102, ##ctx)	\
5903 	FN(sysctl_get_new_value, 103, ##ctx)		\
5904 	FN(sysctl_set_new_value, 104, ##ctx)		\
5905 	FN(strtol, 105, ##ctx)				\
5906 	FN(strtoul, 106, ##ctx)				\
5907 	FN(sk_storage_get, 107, ##ctx)			\
5908 	FN(sk_storage_delete, 108, ##ctx)		\
5909 	FN(send_signal, 109, ##ctx)			\
5910 	FN(tcp_gen_syncookie, 110, ##ctx)		\
5911 	FN(skb_output, 111, ##ctx)			\
5912 	FN(probe_read_user, 112, ##ctx)			\
5913 	FN(probe_read_kernel, 113, ##ctx)		\
5914 	FN(probe_read_user_str, 114, ##ctx)		\
5915 	FN(probe_read_kernel_str, 115, ##ctx)		\
5916 	FN(tcp_send_ack, 116, ##ctx)			\
5917 	FN(send_signal_thread, 117, ##ctx)		\
5918 	FN(jiffies64, 118, ##ctx)			\
5919 	FN(read_branch_records, 119, ##ctx)		\
5920 	FN(get_ns_current_pid_tgid, 120, ##ctx)		\
5921 	FN(xdp_output, 121, ##ctx)			\
5922 	FN(get_netns_cookie, 122, ##ctx)		\
5923 	FN(get_current_ancestor_cgroup_id, 123, ##ctx)	\
5924 	FN(sk_assign, 124, ##ctx)			\
5925 	FN(ktime_get_boot_ns, 125, ##ctx)		\
5926 	FN(seq_printf, 126, ##ctx)			\
5927 	FN(seq_write, 127, ##ctx)			\
5928 	FN(sk_cgroup_id, 128, ##ctx)			\
5929 	FN(sk_ancestor_cgroup_id, 129, ##ctx)		\
5930 	FN(ringbuf_output, 130, ##ctx)			\
5931 	FN(ringbuf_reserve, 131, ##ctx)			\
5932 	FN(ringbuf_submit, 132, ##ctx)			\
5933 	FN(ringbuf_discard, 133, ##ctx)			\
5934 	FN(ringbuf_query, 134, ##ctx)			\
5935 	FN(csum_level, 135, ##ctx)			\
5936 	FN(skc_to_tcp6_sock, 136, ##ctx)		\
5937 	FN(skc_to_tcp_sock, 137, ##ctx)			\
5938 	FN(skc_to_tcp_timewait_sock, 138, ##ctx)	\
5939 	FN(skc_to_tcp_request_sock, 139, ##ctx)		\
5940 	FN(skc_to_udp6_sock, 140, ##ctx)		\
5941 	FN(get_task_stack, 141, ##ctx)			\
5942 	FN(load_hdr_opt, 142, ##ctx)			\
5943 	FN(store_hdr_opt, 143, ##ctx)			\
5944 	FN(reserve_hdr_opt, 144, ##ctx)			\
5945 	FN(inode_storage_get, 145, ##ctx)		\
5946 	FN(inode_storage_delete, 146, ##ctx)		\
5947 	FN(d_path, 147, ##ctx)				\
5948 	FN(copy_from_user, 148, ##ctx)			\
5949 	FN(snprintf_btf, 149, ##ctx)			\
5950 	FN(seq_printf_btf, 150, ##ctx)			\
5951 	FN(skb_cgroup_classid, 151, ##ctx)		\
5952 	FN(redirect_neigh, 152, ##ctx)			\
5953 	FN(per_cpu_ptr, 153, ##ctx)			\
5954 	FN(this_cpu_ptr, 154, ##ctx)			\
5955 	FN(redirect_peer, 155, ##ctx)			\
5956 	FN(task_storage_get, 156, ##ctx)		\
5957 	FN(task_storage_delete, 157, ##ctx)		\
5958 	FN(get_current_task_btf, 158, ##ctx)		\
5959 	FN(bprm_opts_set, 159, ##ctx)			\
5960 	FN(ktime_get_coarse_ns, 160, ##ctx)		\
5961 	FN(ima_inode_hash, 161, ##ctx)			\
5962 	FN(sock_from_file, 162, ##ctx)			\
5963 	FN(check_mtu, 163, ##ctx)			\
5964 	FN(for_each_map_elem, 164, ##ctx)		\
5965 	FN(snprintf, 165, ##ctx)			\
5966 	FN(sys_bpf, 166, ##ctx)				\
5967 	FN(btf_find_by_name_kind, 167, ##ctx)		\
5968 	FN(sys_close, 168, ##ctx)			\
5969 	FN(timer_init, 169, ##ctx)			\
5970 	FN(timer_set_callback, 170, ##ctx)		\
5971 	FN(timer_start, 171, ##ctx)			\
5972 	FN(timer_cancel, 172, ##ctx)			\
5973 	FN(get_func_ip, 173, ##ctx)			\
5974 	FN(get_attach_cookie, 174, ##ctx)		\
5975 	FN(task_pt_regs, 175, ##ctx)			\
5976 	FN(get_branch_snapshot, 176, ##ctx)		\
5977 	FN(trace_vprintk, 177, ##ctx)			\
5978 	FN(skc_to_unix_sock, 178, ##ctx)		\
5979 	FN(kallsyms_lookup_name, 179, ##ctx)		\
5980 	FN(find_vma, 180, ##ctx)			\
5981 	FN(loop, 181, ##ctx)				\
5982 	FN(strncmp, 182, ##ctx)				\
5983 	FN(get_func_arg, 183, ##ctx)			\
5984 	FN(get_func_ret, 184, ##ctx)			\
5985 	FN(get_func_arg_cnt, 185, ##ctx)		\
5986 	FN(get_retval, 186, ##ctx)			\
5987 	FN(set_retval, 187, ##ctx)			\
5988 	FN(xdp_get_buff_len, 188, ##ctx)		\
5989 	FN(xdp_load_bytes, 189, ##ctx)			\
5990 	FN(xdp_store_bytes, 190, ##ctx)			\
5991 	FN(copy_from_user_task, 191, ##ctx)		\
5992 	FN(skb_set_tstamp, 192, ##ctx)			\
5993 	FN(ima_file_hash, 193, ##ctx)			\
5994 	FN(kptr_xchg, 194, ##ctx)			\
5995 	FN(map_lookup_percpu_elem, 195, ##ctx)		\
5996 	FN(skc_to_mptcp_sock, 196, ##ctx)		\
5997 	FN(dynptr_from_mem, 197, ##ctx)			\
5998 	FN(ringbuf_reserve_dynptr, 198, ##ctx)		\
5999 	FN(ringbuf_submit_dynptr, 199, ##ctx)		\
6000 	FN(ringbuf_discard_dynptr, 200, ##ctx)		\
6001 	FN(dynptr_read, 201, ##ctx)			\
6002 	FN(dynptr_write, 202, ##ctx)			\
6003 	FN(dynptr_data, 203, ##ctx)			\
6004 	FN(tcp_raw_gen_syncookie_ipv4, 204, ##ctx)	\
6005 	FN(tcp_raw_gen_syncookie_ipv6, 205, ##ctx)	\
6006 	FN(tcp_raw_check_syncookie_ipv4, 206, ##ctx)	\
6007 	FN(tcp_raw_check_syncookie_ipv6, 207, ##ctx)	\
6008 	FN(ktime_get_tai_ns, 208, ##ctx)		\
6009 	FN(user_ringbuf_drain, 209, ##ctx)		\
6010 	FN(cgrp_storage_get, 210, ##ctx)		\
6011 	FN(cgrp_storage_delete, 211, ##ctx)		\
6012 	/* */
6013 
6014 /* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
6015  * know or care about integer value that is now passed as second argument
6016  */
6017 #define __BPF_FUNC_MAPPER_APPLY(name, value, FN) FN(name),
6018 #define __BPF_FUNC_MAPPER(FN) ___BPF_FUNC_MAPPER(__BPF_FUNC_MAPPER_APPLY, FN)
6019 
6020 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
6021  * function eBPF program intends to call
6022  */
6023 #define __BPF_ENUM_FN(x, y) BPF_FUNC_ ## x = y,
6024 enum bpf_func_id {
6025 	___BPF_FUNC_MAPPER(__BPF_ENUM_FN)
6026 	__BPF_FUNC_MAX_ID,
6027 };
6028 #undef __BPF_ENUM_FN
6029 
6030 /* All flags used by eBPF helper functions, placed here. */
6031 
6032 /* BPF_FUNC_skb_store_bytes flags. */
6033 enum {
6034 	BPF_F_RECOMPUTE_CSUM		= (1ULL << 0),
6035 	BPF_F_INVALIDATE_HASH		= (1ULL << 1),
6036 };
6037 
6038 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
6039  * First 4 bits are for passing the header field size.
6040  */
6041 enum {
6042 	BPF_F_HDR_FIELD_MASK		= 0xfULL,
6043 };
6044 
6045 /* BPF_FUNC_l4_csum_replace flags. */
6046 enum {
6047 	BPF_F_PSEUDO_HDR		= (1ULL << 4),
6048 	BPF_F_MARK_MANGLED_0		= (1ULL << 5),
6049 	BPF_F_MARK_ENFORCE		= (1ULL << 6),
6050 };
6051 
6052 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
6053 enum {
6054 	BPF_F_TUNINFO_IPV6		= (1ULL << 0),
6055 };
6056 
6057 /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
6058 enum {
6059 	BPF_F_SKIP_FIELD_MASK		= 0xffULL,
6060 	BPF_F_USER_STACK		= (1ULL << 8),
6061 /* flags used by BPF_FUNC_get_stackid only. */
6062 	BPF_F_FAST_STACK_CMP		= (1ULL << 9),
6063 	BPF_F_REUSE_STACKID		= (1ULL << 10),
6064 /* flags used by BPF_FUNC_get_stack only. */
6065 	BPF_F_USER_BUILD_ID		= (1ULL << 11),
6066 };
6067 
6068 /* BPF_FUNC_skb_set_tunnel_key flags. */
6069 enum {
6070 	BPF_F_ZERO_CSUM_TX		= (1ULL << 1),
6071 	BPF_F_DONT_FRAGMENT		= (1ULL << 2),
6072 	BPF_F_SEQ_NUMBER		= (1ULL << 3),
6073 	BPF_F_NO_TUNNEL_KEY		= (1ULL << 4),
6074 };
6075 
6076 /* BPF_FUNC_skb_get_tunnel_key flags. */
6077 enum {
6078 	BPF_F_TUNINFO_FLAGS		= (1ULL << 4),
6079 };
6080 
6081 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
6082  * BPF_FUNC_perf_event_read_value flags.
6083  */
6084 enum {
6085 	BPF_F_INDEX_MASK		= 0xffffffffULL,
6086 	BPF_F_CURRENT_CPU		= BPF_F_INDEX_MASK,
6087 /* BPF_FUNC_perf_event_output for sk_buff input context. */
6088 	BPF_F_CTXLEN_MASK		= (0xfffffULL << 32),
6089 };
6090 
6091 /* Current network namespace */
6092 enum {
6093 	BPF_F_CURRENT_NETNS		= (-1L),
6094 };
6095 
6096 /* BPF_FUNC_csum_level level values. */
6097 enum {
6098 	BPF_CSUM_LEVEL_QUERY,
6099 	BPF_CSUM_LEVEL_INC,
6100 	BPF_CSUM_LEVEL_DEC,
6101 	BPF_CSUM_LEVEL_RESET,
6102 };
6103 
6104 /* BPF_FUNC_skb_adjust_room flags. */
6105 enum {
6106 	BPF_F_ADJ_ROOM_FIXED_GSO	= (1ULL << 0),
6107 	BPF_F_ADJ_ROOM_ENCAP_L3_IPV4	= (1ULL << 1),
6108 	BPF_F_ADJ_ROOM_ENCAP_L3_IPV6	= (1ULL << 2),
6109 	BPF_F_ADJ_ROOM_ENCAP_L4_GRE	= (1ULL << 3),
6110 	BPF_F_ADJ_ROOM_ENCAP_L4_UDP	= (1ULL << 4),
6111 	BPF_F_ADJ_ROOM_NO_CSUM_RESET	= (1ULL << 5),
6112 	BPF_F_ADJ_ROOM_ENCAP_L2_ETH	= (1ULL << 6),
6113 	BPF_F_ADJ_ROOM_DECAP_L3_IPV4	= (1ULL << 7),
6114 	BPF_F_ADJ_ROOM_DECAP_L3_IPV6	= (1ULL << 8),
6115 };
6116 
6117 enum {
6118 	BPF_ADJ_ROOM_ENCAP_L2_MASK	= 0xff,
6119 	BPF_ADJ_ROOM_ENCAP_L2_SHIFT	= 56,
6120 };
6121 
6122 #define BPF_F_ADJ_ROOM_ENCAP_L2(len)	(((__u64)len & \
6123 					  BPF_ADJ_ROOM_ENCAP_L2_MASK) \
6124 					 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
6125 
6126 /* BPF_FUNC_sysctl_get_name flags. */
6127 enum {
6128 	BPF_F_SYSCTL_BASE_NAME		= (1ULL << 0),
6129 };
6130 
6131 /* BPF_FUNC_<kernel_obj>_storage_get flags */
6132 enum {
6133 	BPF_LOCAL_STORAGE_GET_F_CREATE	= (1ULL << 0),
6134 	/* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility
6135 	 * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead.
6136 	 */
6137 	BPF_SK_STORAGE_GET_F_CREATE  = BPF_LOCAL_STORAGE_GET_F_CREATE,
6138 };
6139 
6140 /* BPF_FUNC_read_branch_records flags. */
6141 enum {
6142 	BPF_F_GET_BRANCH_RECORDS_SIZE	= (1ULL << 0),
6143 };
6144 
6145 /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and
6146  * BPF_FUNC_bpf_ringbuf_output flags.
6147  */
6148 enum {
6149 	BPF_RB_NO_WAKEUP		= (1ULL << 0),
6150 	BPF_RB_FORCE_WAKEUP		= (1ULL << 1),
6151 };
6152 
6153 /* BPF_FUNC_bpf_ringbuf_query flags */
6154 enum {
6155 	BPF_RB_AVAIL_DATA = 0,
6156 	BPF_RB_RING_SIZE = 1,
6157 	BPF_RB_CONS_POS = 2,
6158 	BPF_RB_PROD_POS = 3,
6159 };
6160 
6161 /* BPF ring buffer constants */
6162 enum {
6163 	BPF_RINGBUF_BUSY_BIT		= (1U << 31),
6164 	BPF_RINGBUF_DISCARD_BIT		= (1U << 30),
6165 	BPF_RINGBUF_HDR_SZ		= 8,
6166 };
6167 
6168 /* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */
6169 enum {
6170 	BPF_SK_LOOKUP_F_REPLACE		= (1ULL << 0),
6171 	BPF_SK_LOOKUP_F_NO_REUSEPORT	= (1ULL << 1),
6172 };
6173 
6174 /* Mode for BPF_FUNC_skb_adjust_room helper. */
6175 enum bpf_adj_room_mode {
6176 	BPF_ADJ_ROOM_NET,
6177 	BPF_ADJ_ROOM_MAC,
6178 };
6179 
6180 /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
6181 enum bpf_hdr_start_off {
6182 	BPF_HDR_START_MAC,
6183 	BPF_HDR_START_NET,
6184 };
6185 
6186 /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
6187 enum bpf_lwt_encap_mode {
6188 	BPF_LWT_ENCAP_SEG6,
6189 	BPF_LWT_ENCAP_SEG6_INLINE,
6190 	BPF_LWT_ENCAP_IP,
6191 };
6192 
6193 /* Flags for bpf_bprm_opts_set helper */
6194 enum {
6195 	BPF_F_BPRM_SECUREEXEC	= (1ULL << 0),
6196 };
6197 
6198 /* Flags for bpf_redirect and bpf_redirect_map helpers */
6199 enum {
6200 	BPF_F_INGRESS		= (1ULL << 0), /* used for skb path */
6201 	BPF_F_BROADCAST		= (1ULL << 3), /* used for XDP path */
6202 	BPF_F_EXCLUDE_INGRESS	= (1ULL << 4), /* used for XDP path */
6203 #define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS)
6204 };
6205 
6206 #define __bpf_md_ptr(type, name)	\
6207 union {					\
6208 	type name;			\
6209 	__u64 :64;			\
6210 } __attribute__((aligned(8)))
6211 
6212 /* The enum used in skb->tstamp_type. It specifies the clock type
6213  * of the time stored in the skb->tstamp.
6214  */
6215 enum {
6216 	BPF_SKB_TSTAMP_UNSPEC = 0,		/* DEPRECATED */
6217 	BPF_SKB_TSTAMP_DELIVERY_MONO = 1,	/* DEPRECATED */
6218 	BPF_SKB_CLOCK_REALTIME = 0,
6219 	BPF_SKB_CLOCK_MONOTONIC = 1,
6220 	BPF_SKB_CLOCK_TAI = 2,
6221 	/* For any future BPF_SKB_CLOCK_* that the bpf prog cannot handle,
6222 	 * the bpf prog can try to deduce it by ingress/egress/skb->sk->sk_clockid.
6223 	 */
6224 };
6225 
6226 /* user accessible mirror of in-kernel sk_buff.
6227  * new fields can only be added to the end of this structure
6228  */
6229 struct __sk_buff {
6230 	__u32 len;
6231 	__u32 pkt_type;
6232 	__u32 mark;
6233 	__u32 queue_mapping;
6234 	__u32 protocol;
6235 	__u32 vlan_present;
6236 	__u32 vlan_tci;
6237 	__u32 vlan_proto;
6238 	__u32 priority;
6239 	__u32 ingress_ifindex;
6240 	__u32 ifindex;
6241 	__u32 tc_index;
6242 	__u32 cb[5];
6243 	__u32 hash;
6244 	__u32 tc_classid;
6245 	__u32 data;
6246 	__u32 data_end;
6247 	__u32 napi_id;
6248 
6249 	/* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
6250 	__u32 family;
6251 	__u32 remote_ip4;	/* Stored in network byte order */
6252 	__u32 local_ip4;	/* Stored in network byte order */
6253 	__u32 remote_ip6[4];	/* Stored in network byte order */
6254 	__u32 local_ip6[4];	/* Stored in network byte order */
6255 	__u32 remote_port;	/* Stored in network byte order */
6256 	__u32 local_port;	/* stored in host byte order */
6257 	/* ... here. */
6258 
6259 	__u32 data_meta;
6260 	__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
6261 	__u64 tstamp;
6262 	__u32 wire_len;
6263 	__u32 gso_segs;
6264 	__bpf_md_ptr(struct bpf_sock *, sk);
6265 	__u32 gso_size;
6266 	__u8  tstamp_type;
6267 	__u32 :24;		/* Padding, future use. */
6268 	__u64 hwtstamp;
6269 };
6270 
6271 struct bpf_tunnel_key {
6272 	__u32 tunnel_id;
6273 	union {
6274 		__u32 remote_ipv4;
6275 		__u32 remote_ipv6[4];
6276 	};
6277 	__u8 tunnel_tos;
6278 	__u8 tunnel_ttl;
6279 	union {
6280 		__u16 tunnel_ext;	/* compat */
6281 		__be16 tunnel_flags;
6282 	};
6283 	__u32 tunnel_label;
6284 	union {
6285 		__u32 local_ipv4;
6286 		__u32 local_ipv6[4];
6287 	};
6288 };
6289 
6290 /* user accessible mirror of in-kernel xfrm_state.
6291  * new fields can only be added to the end of this structure
6292  */
6293 struct bpf_xfrm_state {
6294 	__u32 reqid;
6295 	__u32 spi;	/* Stored in network byte order */
6296 	__u16 family;
6297 	__u16 ext;	/* Padding, future use. */
6298 	union {
6299 		__u32 remote_ipv4;	/* Stored in network byte order */
6300 		__u32 remote_ipv6[4];	/* Stored in network byte order */
6301 	};
6302 };
6303 
6304 /* Generic BPF return codes which all BPF program types may support.
6305  * The values are binary compatible with their TC_ACT_* counter-part to
6306  * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
6307  * programs.
6308  *
6309  * XDP is handled seprately, see XDP_*.
6310  */
6311 enum bpf_ret_code {
6312 	BPF_OK = 0,
6313 	/* 1 reserved */
6314 	BPF_DROP = 2,
6315 	/* 3-6 reserved */
6316 	BPF_REDIRECT = 7,
6317 	/* >127 are reserved for prog type specific return codes.
6318 	 *
6319 	 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
6320 	 *    BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
6321 	 *    changed and should be routed based on its new L3 header.
6322 	 *    (This is an L3 redirect, as opposed to L2 redirect
6323 	 *    represented by BPF_REDIRECT above).
6324 	 */
6325 	BPF_LWT_REROUTE = 128,
6326 	/* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR
6327 	 *   to indicate that no custom dissection was performed, and
6328 	 *   fallback to standard dissector is requested.
6329 	 */
6330 	BPF_FLOW_DISSECTOR_CONTINUE = 129,
6331 };
6332 
6333 struct bpf_sock {
6334 	__u32 bound_dev_if;
6335 	__u32 family;
6336 	__u32 type;
6337 	__u32 protocol;
6338 	__u32 mark;
6339 	__u32 priority;
6340 	/* IP address also allows 1 and 2 bytes access */
6341 	__u32 src_ip4;
6342 	__u32 src_ip6[4];
6343 	__u32 src_port;		/* host byte order */
6344 	__be16 dst_port;	/* network byte order */
6345 	__u16 :16;		/* zero padding */
6346 	__u32 dst_ip4;
6347 	__u32 dst_ip6[4];
6348 	__u32 state;
6349 	__s32 rx_queue_mapping;
6350 };
6351 
6352 struct bpf_tcp_sock {
6353 	__u32 snd_cwnd;		/* Sending congestion window		*/
6354 	__u32 srtt_us;		/* smoothed round trip time << 3 in usecs */
6355 	__u32 rtt_min;
6356 	__u32 snd_ssthresh;	/* Slow start size threshold		*/
6357 	__u32 rcv_nxt;		/* What we want to receive next		*/
6358 	__u32 snd_nxt;		/* Next sequence we send		*/
6359 	__u32 snd_una;		/* First byte we want an ack for	*/
6360 	__u32 mss_cache;	/* Cached effective mss, not including SACKS */
6361 	__u32 ecn_flags;	/* ECN status bits.			*/
6362 	__u32 rate_delivered;	/* saved rate sample: packets delivered */
6363 	__u32 rate_interval_us;	/* saved rate sample: time elapsed */
6364 	__u32 packets_out;	/* Packets which are "in flight"	*/
6365 	__u32 retrans_out;	/* Retransmitted packets out		*/
6366 	__u32 total_retrans;	/* Total retransmits for entire connection */
6367 	__u32 segs_in;		/* RFC4898 tcpEStatsPerfSegsIn
6368 				 * total number of segments in.
6369 				 */
6370 	__u32 data_segs_in;	/* RFC4898 tcpEStatsPerfDataSegsIn
6371 				 * total number of data segments in.
6372 				 */
6373 	__u32 segs_out;		/* RFC4898 tcpEStatsPerfSegsOut
6374 				 * The total number of segments sent.
6375 				 */
6376 	__u32 data_segs_out;	/* RFC4898 tcpEStatsPerfDataSegsOut
6377 				 * total number of data segments sent.
6378 				 */
6379 	__u32 lost_out;		/* Lost packets			*/
6380 	__u32 sacked_out;	/* SACK'd packets			*/
6381 	__u64 bytes_received;	/* RFC4898 tcpEStatsAppHCThruOctetsReceived
6382 				 * sum(delta(rcv_nxt)), or how many bytes
6383 				 * were acked.
6384 				 */
6385 	__u64 bytes_acked;	/* RFC4898 tcpEStatsAppHCThruOctetsAcked
6386 				 * sum(delta(snd_una)), or how many bytes
6387 				 * were acked.
6388 				 */
6389 	__u32 dsack_dups;	/* RFC4898 tcpEStatsStackDSACKDups
6390 				 * total number of DSACK blocks received
6391 				 */
6392 	__u32 delivered;	/* Total data packets delivered incl. rexmits */
6393 	__u32 delivered_ce;	/* Like the above but only ECE marked packets */
6394 	__u32 icsk_retransmits;	/* Number of unrecovered [RTO] timeouts */
6395 };
6396 
6397 struct bpf_sock_tuple {
6398 	union {
6399 		struct {
6400 			__be32 saddr;
6401 			__be32 daddr;
6402 			__be16 sport;
6403 			__be16 dport;
6404 		} ipv4;
6405 		struct {
6406 			__be32 saddr[4];
6407 			__be32 daddr[4];
6408 			__be16 sport;
6409 			__be16 dport;
6410 		} ipv6;
6411 	};
6412 };
6413 
6414 /* (Simplified) user return codes for tcx prog type.
6415  * A valid tcx program must return one of these defined values. All other
6416  * return codes are reserved for future use. Must remain compatible with
6417  * their TC_ACT_* counter-parts. For compatibility in behavior, unknown
6418  * return codes are mapped to TCX_NEXT.
6419  */
6420 enum tcx_action_base {
6421 	TCX_NEXT	= -1,
6422 	TCX_PASS	= 0,
6423 	TCX_DROP	= 2,
6424 	TCX_REDIRECT	= 7,
6425 };
6426 
6427 struct bpf_xdp_sock {
6428 	__u32 queue_id;
6429 };
6430 
6431 #define XDP_PACKET_HEADROOM 256
6432 
6433 /* User return codes for XDP prog type.
6434  * A valid XDP program must return one of these defined values. All other
6435  * return codes are reserved for future use. Unknown return codes will
6436  * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
6437  */
6438 enum xdp_action {
6439 	XDP_ABORTED = 0,
6440 	XDP_DROP,
6441 	XDP_PASS,
6442 	XDP_TX,
6443 	XDP_REDIRECT,
6444 };
6445 
6446 /* user accessible metadata for XDP packet hook
6447  * new fields must be added to the end of this structure
6448  */
6449 struct xdp_md {
6450 	__u32 data;
6451 	__u32 data_end;
6452 	__u32 data_meta;
6453 	/* Below access go through struct xdp_rxq_info */
6454 	__u32 ingress_ifindex; /* rxq->dev->ifindex */
6455 	__u32 rx_queue_index;  /* rxq->queue_index  */
6456 
6457 	__u32 egress_ifindex;  /* txq->dev->ifindex */
6458 };
6459 
6460 /* DEVMAP map-value layout
6461  *
6462  * The struct data-layout of map-value is a configuration interface.
6463  * New members can only be added to the end of this structure.
6464  */
6465 struct bpf_devmap_val {
6466 	__u32 ifindex;   /* device index */
6467 	union {
6468 		int   fd;  /* prog fd on map write */
6469 		__u32 id;  /* prog id on map read */
6470 	} bpf_prog;
6471 };
6472 
6473 /* CPUMAP map-value layout
6474  *
6475  * The struct data-layout of map-value is a configuration interface.
6476  * New members can only be added to the end of this structure.
6477  */
6478 struct bpf_cpumap_val {
6479 	__u32 qsize;	/* queue size to remote target CPU */
6480 	union {
6481 		int   fd;	/* prog fd on map write */
6482 		__u32 id;	/* prog id on map read */
6483 	} bpf_prog;
6484 };
6485 
6486 enum sk_action {
6487 	SK_DROP = 0,
6488 	SK_PASS,
6489 };
6490 
6491 /* user accessible metadata for SK_MSG packet hook, new fields must
6492  * be added to the end of this structure
6493  */
6494 struct sk_msg_md {
6495 	__bpf_md_ptr(void *, data);
6496 	__bpf_md_ptr(void *, data_end);
6497 
6498 	__u32 family;
6499 	__u32 remote_ip4;	/* Stored in network byte order */
6500 	__u32 local_ip4;	/* Stored in network byte order */
6501 	__u32 remote_ip6[4];	/* Stored in network byte order */
6502 	__u32 local_ip6[4];	/* Stored in network byte order */
6503 	__u32 remote_port;	/* Stored in network byte order */
6504 	__u32 local_port;	/* stored in host byte order */
6505 	__u32 size;		/* Total size of sk_msg */
6506 
6507 	__bpf_md_ptr(struct bpf_sock *, sk); /* current socket */
6508 };
6509 
6510 struct sk_reuseport_md {
6511 	/*
6512 	 * Start of directly accessible data. It begins from
6513 	 * the tcp/udp header.
6514 	 */
6515 	__bpf_md_ptr(void *, data);
6516 	/* End of directly accessible data */
6517 	__bpf_md_ptr(void *, data_end);
6518 	/*
6519 	 * Total length of packet (starting from the tcp/udp header).
6520 	 * Note that the directly accessible bytes (data_end - data)
6521 	 * could be less than this "len".  Those bytes could be
6522 	 * indirectly read by a helper "bpf_skb_load_bytes()".
6523 	 */
6524 	__u32 len;
6525 	/*
6526 	 * Eth protocol in the mac header (network byte order). e.g.
6527 	 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
6528 	 */
6529 	__u32 eth_protocol;
6530 	__u32 ip_protocol;	/* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
6531 	__u32 bind_inany;	/* Is sock bound to an INANY address? */
6532 	__u32 hash;		/* A hash of the packet 4 tuples */
6533 	/* When reuse->migrating_sk is NULL, it is selecting a sk for the
6534 	 * new incoming connection request (e.g. selecting a listen sk for
6535 	 * the received SYN in the TCP case).  reuse->sk is one of the sk
6536 	 * in the reuseport group. The bpf prog can use reuse->sk to learn
6537 	 * the local listening ip/port without looking into the skb.
6538 	 *
6539 	 * When reuse->migrating_sk is not NULL, reuse->sk is closed and
6540 	 * reuse->migrating_sk is the socket that needs to be migrated
6541 	 * to another listening socket.  migrating_sk could be a fullsock
6542 	 * sk that is fully established or a reqsk that is in-the-middle
6543 	 * of 3-way handshake.
6544 	 */
6545 	__bpf_md_ptr(struct bpf_sock *, sk);
6546 	__bpf_md_ptr(struct bpf_sock *, migrating_sk);
6547 };
6548 
6549 #define BPF_TAG_SIZE	8
6550 
6551 struct bpf_prog_info {
6552 	__u32 type;
6553 	__u32 id;
6554 	__u8  tag[BPF_TAG_SIZE];
6555 	__u32 jited_prog_len;
6556 	__u32 xlated_prog_len;
6557 	__aligned_u64 jited_prog_insns;
6558 	__aligned_u64 xlated_prog_insns;
6559 	__u64 load_time;	/* ns since boottime */
6560 	__u32 created_by_uid;
6561 	__u32 nr_map_ids;
6562 	__aligned_u64 map_ids;
6563 	char name[BPF_OBJ_NAME_LEN];
6564 	__u32 ifindex;
6565 	__u32 gpl_compatible:1;
6566 	__u32 :31; /* alignment pad */
6567 	__u64 netns_dev;
6568 	__u64 netns_ino;
6569 	__u32 nr_jited_ksyms;
6570 	__u32 nr_jited_func_lens;
6571 	__aligned_u64 jited_ksyms;
6572 	__aligned_u64 jited_func_lens;
6573 	__u32 btf_id;
6574 	__u32 func_info_rec_size;
6575 	__aligned_u64 func_info;
6576 	__u32 nr_func_info;
6577 	__u32 nr_line_info;
6578 	__aligned_u64 line_info;
6579 	__aligned_u64 jited_line_info;
6580 	__u32 nr_jited_line_info;
6581 	__u32 line_info_rec_size;
6582 	__u32 jited_line_info_rec_size;
6583 	__u32 nr_prog_tags;
6584 	__aligned_u64 prog_tags;
6585 	__u64 run_time_ns;
6586 	__u64 run_cnt;
6587 	__u64 recursion_misses;
6588 	__u32 verified_insns;
6589 	__u32 attach_btf_obj_id;
6590 	__u32 attach_btf_id;
6591 } __attribute__((aligned(8)));
6592 
6593 struct bpf_map_info {
6594 	__u32 type;
6595 	__u32 id;
6596 	__u32 key_size;
6597 	__u32 value_size;
6598 	__u32 max_entries;
6599 	__u32 map_flags;
6600 	char  name[BPF_OBJ_NAME_LEN];
6601 	__u32 ifindex;
6602 	__u32 btf_vmlinux_value_type_id;
6603 	__u64 netns_dev;
6604 	__u64 netns_ino;
6605 	__u32 btf_id;
6606 	__u32 btf_key_type_id;
6607 	__u32 btf_value_type_id;
6608 	__u32 btf_vmlinux_id;
6609 	__u64 map_extra;
6610 } __attribute__((aligned(8)));
6611 
6612 struct bpf_btf_info {
6613 	__aligned_u64 btf;
6614 	__u32 btf_size;
6615 	__u32 id;
6616 	__aligned_u64 name;
6617 	__u32 name_len;
6618 	__u32 kernel_btf;
6619 } __attribute__((aligned(8)));
6620 
6621 struct bpf_link_info {
6622 	__u32 type;
6623 	__u32 id;
6624 	__u32 prog_id;
6625 	union {
6626 		struct {
6627 			__aligned_u64 tp_name; /* in/out: tp_name buffer ptr */
6628 			__u32 tp_name_len;     /* in/out: tp_name buffer len */
6629 		} raw_tracepoint;
6630 		struct {
6631 			__u32 attach_type;
6632 			__u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */
6633 			__u32 target_btf_id; /* BTF type id inside the object */
6634 		} tracing;
6635 		struct {
6636 			__u64 cgroup_id;
6637 			__u32 attach_type;
6638 		} cgroup;
6639 		struct {
6640 			__aligned_u64 target_name; /* in/out: target_name buffer ptr */
6641 			__u32 target_name_len;	   /* in/out: target_name buffer len */
6642 
6643 			/* If the iter specific field is 32 bits, it can be put
6644 			 * in the first or second union. Otherwise it should be
6645 			 * put in the second union.
6646 			 */
6647 			union {
6648 				struct {
6649 					__u32 map_id;
6650 				} map;
6651 			};
6652 			union {
6653 				struct {
6654 					__u64 cgroup_id;
6655 					__u32 order;
6656 				} cgroup;
6657 				struct {
6658 					__u32 tid;
6659 					__u32 pid;
6660 				} task;
6661 			};
6662 		} iter;
6663 		struct  {
6664 			__u32 netns_ino;
6665 			__u32 attach_type;
6666 		} netns;
6667 		struct {
6668 			__u32 ifindex;
6669 		} xdp;
6670 		struct {
6671 			__u32 map_id;
6672 		} struct_ops;
6673 		struct {
6674 			__u32 pf;
6675 			__u32 hooknum;
6676 			__s32 priority;
6677 			__u32 flags;
6678 		} netfilter;
6679 		struct {
6680 			__aligned_u64 addrs;
6681 			__u32 count; /* in/out: kprobe_multi function count */
6682 			__u32 flags;
6683 			__u64 missed;
6684 			__aligned_u64 cookies;
6685 		} kprobe_multi;
6686 		struct {
6687 			__aligned_u64 path;
6688 			__aligned_u64 offsets;
6689 			__aligned_u64 ref_ctr_offsets;
6690 			__aligned_u64 cookies;
6691 			__u32 path_size; /* in/out: real path size on success, including zero byte */
6692 			__u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
6693 			__u32 flags;
6694 			__u32 pid;
6695 		} uprobe_multi;
6696 		struct {
6697 			__u32 type; /* enum bpf_perf_event_type */
6698 			__u32 :32;
6699 			union {
6700 				struct {
6701 					__aligned_u64 file_name; /* in/out */
6702 					__u32 name_len;
6703 					__u32 offset; /* offset from file_name */
6704 					__u64 cookie;
6705 				} uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
6706 				struct {
6707 					__aligned_u64 func_name; /* in/out */
6708 					__u32 name_len;
6709 					__u32 offset; /* offset from func_name */
6710 					__u64 addr;
6711 					__u64 missed;
6712 					__u64 cookie;
6713 				} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
6714 				struct {
6715 					__aligned_u64 tp_name;   /* in/out */
6716 					__u32 name_len;
6717 					__u32 :32;
6718 					__u64 cookie;
6719 				} tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
6720 				struct {
6721 					__u64 config;
6722 					__u32 type;
6723 					__u32 :32;
6724 					__u64 cookie;
6725 				} event; /* BPF_PERF_EVENT_EVENT */
6726 			};
6727 		} perf_event;
6728 		struct {
6729 			__u32 ifindex;
6730 			__u32 attach_type;
6731 		} tcx;
6732 		struct {
6733 			__u32 ifindex;
6734 			__u32 attach_type;
6735 		} netkit;
6736 		struct {
6737 			__u32 map_id;
6738 			__u32 attach_type;
6739 		} sockmap;
6740 	};
6741 } __attribute__((aligned(8)));
6742 
6743 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
6744  * by user and intended to be used by socket (e.g. to bind to, depends on
6745  * attach type).
6746  */
6747 struct bpf_sock_addr {
6748 	__u32 user_family;	/* Allows 4-byte read, but no write. */
6749 	__u32 user_ip4;		/* Allows 1,2,4-byte read and 4-byte write.
6750 				 * Stored in network byte order.
6751 				 */
6752 	__u32 user_ip6[4];	/* Allows 1,2,4,8-byte read and 4,8-byte write.
6753 				 * Stored in network byte order.
6754 				 */
6755 	__u32 user_port;	/* Allows 1,2,4-byte read and 4-byte write.
6756 				 * Stored in network byte order
6757 				 */
6758 	__u32 family;		/* Allows 4-byte read, but no write */
6759 	__u32 type;		/* Allows 4-byte read, but no write */
6760 	__u32 protocol;		/* Allows 4-byte read, but no write */
6761 	__u32 msg_src_ip4;	/* Allows 1,2,4-byte read and 4-byte write.
6762 				 * Stored in network byte order.
6763 				 */
6764 	__u32 msg_src_ip6[4];	/* Allows 1,2,4,8-byte read and 4,8-byte write.
6765 				 * Stored in network byte order.
6766 				 */
6767 	__bpf_md_ptr(struct bpf_sock *, sk);
6768 };
6769 
6770 /* User bpf_sock_ops struct to access socket values and specify request ops
6771  * and their replies.
6772  * Some of this fields are in network (bigendian) byte order and may need
6773  * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
6774  * New fields can only be added at the end of this structure
6775  */
6776 struct bpf_sock_ops {
6777 	__u32 op;
6778 	union {
6779 		__u32 args[4];		/* Optionally passed to bpf program */
6780 		__u32 reply;		/* Returned by bpf program	    */
6781 		__u32 replylong[4];	/* Optionally returned by bpf prog  */
6782 	};
6783 	__u32 family;
6784 	__u32 remote_ip4;	/* Stored in network byte order */
6785 	__u32 local_ip4;	/* Stored in network byte order */
6786 	__u32 remote_ip6[4];	/* Stored in network byte order */
6787 	__u32 local_ip6[4];	/* Stored in network byte order */
6788 	__u32 remote_port;	/* Stored in network byte order */
6789 	__u32 local_port;	/* stored in host byte order */
6790 	__u32 is_fullsock;	/* Some TCP fields are only valid if
6791 				 * there is a full socket. If not, the
6792 				 * fields read as zero.
6793 				 */
6794 	__u32 snd_cwnd;
6795 	__u32 srtt_us;		/* Averaged RTT << 3 in usecs */
6796 	__u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
6797 	__u32 state;
6798 	__u32 rtt_min;
6799 	__u32 snd_ssthresh;
6800 	__u32 rcv_nxt;
6801 	__u32 snd_nxt;
6802 	__u32 snd_una;
6803 	__u32 mss_cache;
6804 	__u32 ecn_flags;
6805 	__u32 rate_delivered;
6806 	__u32 rate_interval_us;
6807 	__u32 packets_out;
6808 	__u32 retrans_out;
6809 	__u32 total_retrans;
6810 	__u32 segs_in;
6811 	__u32 data_segs_in;
6812 	__u32 segs_out;
6813 	__u32 data_segs_out;
6814 	__u32 lost_out;
6815 	__u32 sacked_out;
6816 	__u32 sk_txhash;
6817 	__u64 bytes_received;
6818 	__u64 bytes_acked;
6819 	__bpf_md_ptr(struct bpf_sock *, sk);
6820 	/* [skb_data, skb_data_end) covers the whole TCP header.
6821 	 *
6822 	 * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received
6823 	 * BPF_SOCK_OPS_HDR_OPT_LEN_CB:   Not useful because the
6824 	 *                                header has not been written.
6825 	 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have
6826 	 *				  been written so far.
6827 	 * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:  The SYNACK that concludes
6828 	 *					the 3WHS.
6829 	 * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes
6830 	 *					the 3WHS.
6831 	 *
6832 	 * bpf_load_hdr_opt() can also be used to read a particular option.
6833 	 */
6834 	__bpf_md_ptr(void *, skb_data);
6835 	__bpf_md_ptr(void *, skb_data_end);
6836 	__u32 skb_len;		/* The total length of a packet.
6837 				 * It includes the header, options,
6838 				 * and payload.
6839 				 */
6840 	__u32 skb_tcp_flags;	/* tcp_flags of the header.  It provides
6841 				 * an easy way to check for tcp_flags
6842 				 * without parsing skb_data.
6843 				 *
6844 				 * In particular, the skb_tcp_flags
6845 				 * will still be available in
6846 				 * BPF_SOCK_OPS_HDR_OPT_LEN even though
6847 				 * the outgoing header has not
6848 				 * been written yet.
6849 				 */
6850 	__u64 skb_hwtstamp;
6851 };
6852 
6853 /* Definitions for bpf_sock_ops_cb_flags */
6854 enum {
6855 	BPF_SOCK_OPS_RTO_CB_FLAG	= (1<<0),
6856 	BPF_SOCK_OPS_RETRANS_CB_FLAG	= (1<<1),
6857 	BPF_SOCK_OPS_STATE_CB_FLAG	= (1<<2),
6858 	BPF_SOCK_OPS_RTT_CB_FLAG	= (1<<3),
6859 	/* Call bpf for all received TCP headers.  The bpf prog will be
6860 	 * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6861 	 *
6862 	 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6863 	 * for the header option related helpers that will be useful
6864 	 * to the bpf programs.
6865 	 *
6866 	 * It could be used at the client/active side (i.e. connect() side)
6867 	 * when the server told it that the server was in syncookie
6868 	 * mode and required the active side to resend the bpf-written
6869 	 * options.  The active side can keep writing the bpf-options until
6870 	 * it received a valid packet from the server side to confirm
6871 	 * the earlier packet (and options) has been received.  The later
6872 	 * example patch is using it like this at the active side when the
6873 	 * server is in syncookie mode.
6874 	 *
6875 	 * The bpf prog will usually turn this off in the common cases.
6876 	 */
6877 	BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG	= (1<<4),
6878 	/* Call bpf when kernel has received a header option that
6879 	 * the kernel cannot handle.  The bpf prog will be called under
6880 	 * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB.
6881 	 *
6882 	 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6883 	 * for the header option related helpers that will be useful
6884 	 * to the bpf programs.
6885 	 */
6886 	BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5),
6887 	/* Call bpf when the kernel is writing header options for the
6888 	 * outgoing packet.  The bpf prog will first be called
6889 	 * to reserve space in a skb under
6890 	 * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB.  Then
6891 	 * the bpf prog will be called to write the header option(s)
6892 	 * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
6893 	 *
6894 	 * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB
6895 	 * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option
6896 	 * related helpers that will be useful to the bpf programs.
6897 	 *
6898 	 * The kernel gets its chance to reserve space and write
6899 	 * options first before the BPF program does.
6900 	 */
6901 	BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6),
6902 /* Mask of all currently supported cb flags */
6903 	BPF_SOCK_OPS_ALL_CB_FLAGS       = 0x7F,
6904 };
6905 
6906 /* List of known BPF sock_ops operators.
6907  * New entries can only be added at the end
6908  */
6909 enum {
6910 	BPF_SOCK_OPS_VOID,
6911 	BPF_SOCK_OPS_TIMEOUT_INIT,	/* Should return SYN-RTO value to use or
6912 					 * -1 if default value should be used
6913 					 */
6914 	BPF_SOCK_OPS_RWND_INIT,		/* Should return initial advertized
6915 					 * window (in packets) or -1 if default
6916 					 * value should be used
6917 					 */
6918 	BPF_SOCK_OPS_TCP_CONNECT_CB,	/* Calls BPF program right before an
6919 					 * active connection is initialized
6920 					 */
6921 	BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB,	/* Calls BPF program when an
6922 						 * active connection is
6923 						 * established
6924 						 */
6925 	BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,	/* Calls BPF program when a
6926 						 * passive connection is
6927 						 * established
6928 						 */
6929 	BPF_SOCK_OPS_NEEDS_ECN,		/* If connection's congestion control
6930 					 * needs ECN
6931 					 */
6932 	BPF_SOCK_OPS_BASE_RTT,		/* Get base RTT. The correct value is
6933 					 * based on the path and may be
6934 					 * dependent on the congestion control
6935 					 * algorithm. In general it indicates
6936 					 * a congestion threshold. RTTs above
6937 					 * this indicate congestion
6938 					 */
6939 	BPF_SOCK_OPS_RTO_CB,		/* Called when an RTO has triggered.
6940 					 * Arg1: value of icsk_retransmits
6941 					 * Arg2: value of icsk_rto
6942 					 * Arg3: whether RTO has expired
6943 					 */
6944 	BPF_SOCK_OPS_RETRANS_CB,	/* Called when skb is retransmitted.
6945 					 * Arg1: sequence number of 1st byte
6946 					 * Arg2: # segments
6947 					 * Arg3: return value of
6948 					 *       tcp_transmit_skb (0 => success)
6949 					 */
6950 	BPF_SOCK_OPS_STATE_CB,		/* Called when TCP changes state.
6951 					 * Arg1: old_state
6952 					 * Arg2: new_state
6953 					 */
6954 	BPF_SOCK_OPS_TCP_LISTEN_CB,	/* Called on listen(2), right after
6955 					 * socket transition to LISTEN state.
6956 					 */
6957 	BPF_SOCK_OPS_RTT_CB,		/* Called on every RTT.
6958 					 * Arg1: measured RTT input (mrtt)
6959 					 * Arg2: updated srtt
6960 					 */
6961 	BPF_SOCK_OPS_PARSE_HDR_OPT_CB,	/* Parse the header option.
6962 					 * It will be called to handle
6963 					 * the packets received at
6964 					 * an already established
6965 					 * connection.
6966 					 *
6967 					 * sock_ops->skb_data:
6968 					 * Referring to the received skb.
6969 					 * It covers the TCP header only.
6970 					 *
6971 					 * bpf_load_hdr_opt() can also
6972 					 * be used to search for a
6973 					 * particular option.
6974 					 */
6975 	BPF_SOCK_OPS_HDR_OPT_LEN_CB,	/* Reserve space for writing the
6976 					 * header option later in
6977 					 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
6978 					 * Arg1: bool want_cookie. (in
6979 					 *       writing SYNACK only)
6980 					 *
6981 					 * sock_ops->skb_data:
6982 					 * Not available because no header has
6983 					 * been	written yet.
6984 					 *
6985 					 * sock_ops->skb_tcp_flags:
6986 					 * The tcp_flags of the
6987 					 * outgoing skb. (e.g. SYN, ACK, FIN).
6988 					 *
6989 					 * bpf_reserve_hdr_opt() should
6990 					 * be used to reserve space.
6991 					 */
6992 	BPF_SOCK_OPS_WRITE_HDR_OPT_CB,	/* Write the header options
6993 					 * Arg1: bool want_cookie. (in
6994 					 *       writing SYNACK only)
6995 					 *
6996 					 * sock_ops->skb_data:
6997 					 * Referring to the outgoing skb.
6998 					 * It covers the TCP header
6999 					 * that has already been written
7000 					 * by the kernel and the
7001 					 * earlier bpf-progs.
7002 					 *
7003 					 * sock_ops->skb_tcp_flags:
7004 					 * The tcp_flags of the outgoing
7005 					 * skb. (e.g. SYN, ACK, FIN).
7006 					 *
7007 					 * bpf_store_hdr_opt() should
7008 					 * be used to write the
7009 					 * option.
7010 					 *
7011 					 * bpf_load_hdr_opt() can also
7012 					 * be used to search for a
7013 					 * particular option that
7014 					 * has already been written
7015 					 * by the kernel or the
7016 					 * earlier bpf-progs.
7017 					 */
7018 };
7019 
7020 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
7021  * changes between the TCP and BPF versions. Ideally this should never happen.
7022  * If it does, we need to add code to convert them before calling
7023  * the BPF sock_ops function.
7024  */
7025 enum {
7026 	BPF_TCP_ESTABLISHED = 1,
7027 	BPF_TCP_SYN_SENT,
7028 	BPF_TCP_SYN_RECV,
7029 	BPF_TCP_FIN_WAIT1,
7030 	BPF_TCP_FIN_WAIT2,
7031 	BPF_TCP_TIME_WAIT,
7032 	BPF_TCP_CLOSE,
7033 	BPF_TCP_CLOSE_WAIT,
7034 	BPF_TCP_LAST_ACK,
7035 	BPF_TCP_LISTEN,
7036 	BPF_TCP_CLOSING,	/* Now a valid state */
7037 	BPF_TCP_NEW_SYN_RECV,
7038 	BPF_TCP_BOUND_INACTIVE,
7039 
7040 	BPF_TCP_MAX_STATES	/* Leave at the end! */
7041 };
7042 
7043 enum {
7044 	TCP_BPF_IW		= 1001,	/* Set TCP initial congestion window */
7045 	TCP_BPF_SNDCWND_CLAMP	= 1002,	/* Set sndcwnd_clamp */
7046 	TCP_BPF_DELACK_MAX	= 1003, /* Max delay ack in usecs */
7047 	TCP_BPF_RTO_MIN		= 1004, /* Min delay ack in usecs */
7048 	/* Copy the SYN pkt to optval
7049 	 *
7050 	 * BPF_PROG_TYPE_SOCK_OPS only.  It is similar to the
7051 	 * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit
7052 	 * to only getting from the saved_syn.  It can either get the
7053 	 * syn packet from:
7054 	 *
7055 	 * 1. the just-received SYN packet (only available when writing the
7056 	 *    SYNACK).  It will be useful when it is not necessary to
7057 	 *    save the SYN packet for latter use.  It is also the only way
7058 	 *    to get the SYN during syncookie mode because the syn
7059 	 *    packet cannot be saved during syncookie.
7060 	 *
7061 	 * OR
7062 	 *
7063 	 * 2. the earlier saved syn which was done by
7064 	 *    bpf_setsockopt(TCP_SAVE_SYN).
7065 	 *
7066 	 * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the
7067 	 * SYN packet is obtained.
7068 	 *
7069 	 * If the bpf-prog does not need the IP[46] header,  the
7070 	 * bpf-prog can avoid parsing the IP header by using
7071 	 * TCP_BPF_SYN.  Otherwise, the bpf-prog can get both
7072 	 * IP[46] and TCP header by using TCP_BPF_SYN_IP.
7073 	 *
7074 	 *      >0: Total number of bytes copied
7075 	 * -ENOSPC: Not enough space in optval. Only optlen number of
7076 	 *          bytes is copied.
7077 	 * -ENOENT: The SYN skb is not available now and the earlier SYN pkt
7078 	 *	    is not saved by setsockopt(TCP_SAVE_SYN).
7079 	 */
7080 	TCP_BPF_SYN		= 1005, /* Copy the TCP header */
7081 	TCP_BPF_SYN_IP		= 1006, /* Copy the IP[46] and TCP header */
7082 	TCP_BPF_SYN_MAC         = 1007, /* Copy the MAC, IP[46], and TCP header */
7083 	TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */
7084 };
7085 
7086 enum {
7087 	BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0),
7088 };
7089 
7090 /* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and
7091  * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
7092  */
7093 enum {
7094 	BPF_WRITE_HDR_TCP_CURRENT_MSS = 1,	/* Kernel is finding the
7095 						 * total option spaces
7096 						 * required for an established
7097 						 * sk in order to calculate the
7098 						 * MSS.  No skb is actually
7099 						 * sent.
7100 						 */
7101 	BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2,	/* Kernel is in syncookie mode
7102 						 * when sending a SYN.
7103 						 */
7104 };
7105 
7106 struct bpf_perf_event_value {
7107 	__u64 counter;
7108 	__u64 enabled;
7109 	__u64 running;
7110 };
7111 
7112 enum {
7113 	BPF_DEVCG_ACC_MKNOD	= (1ULL << 0),
7114 	BPF_DEVCG_ACC_READ	= (1ULL << 1),
7115 	BPF_DEVCG_ACC_WRITE	= (1ULL << 2),
7116 };
7117 
7118 enum {
7119 	BPF_DEVCG_DEV_BLOCK	= (1ULL << 0),
7120 	BPF_DEVCG_DEV_CHAR	= (1ULL << 1),
7121 };
7122 
7123 struct bpf_cgroup_dev_ctx {
7124 	/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
7125 	__u32 access_type;
7126 	__u32 major;
7127 	__u32 minor;
7128 };
7129 
7130 struct bpf_raw_tracepoint_args {
7131 	__u64 args[0];
7132 };
7133 
7134 /* DIRECT:  Skip the FIB rules and go to FIB table associated with device
7135  * OUTPUT:  Do lookup from egress perspective; default is ingress
7136  */
7137 enum {
7138 	BPF_FIB_LOOKUP_DIRECT  = (1U << 0),
7139 	BPF_FIB_LOOKUP_OUTPUT  = (1U << 1),
7140 	BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
7141 	BPF_FIB_LOOKUP_TBID    = (1U << 3),
7142 	BPF_FIB_LOOKUP_SRC     = (1U << 4),
7143 	BPF_FIB_LOOKUP_MARK    = (1U << 5),
7144 };
7145 
7146 enum {
7147 	BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
7148 	BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
7149 	BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
7150 	BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
7151 	BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
7152 	BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
7153 	BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
7154 	BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
7155 	BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
7156 	BPF_FIB_LKUP_RET_NO_SRC_ADDR,  /* failed to derive IP src addr */
7157 };
7158 
7159 struct bpf_fib_lookup {
7160 	/* input:  network family for lookup (AF_INET, AF_INET6)
7161 	 * output: network family of egress nexthop
7162 	 */
7163 	__u8	family;
7164 
7165 	/* set if lookup is to consider L4 data - e.g., FIB rules */
7166 	__u8	l4_protocol;
7167 	__be16	sport;
7168 	__be16	dport;
7169 
7170 	union {	/* used for MTU check */
7171 		/* input to lookup */
7172 		__u16	tot_len; /* L3 length from network hdr (iph->tot_len) */
7173 
7174 		/* output: MTU value */
7175 		__u16	mtu_result;
7176 	} __attribute__((packed, aligned(2)));
7177 	/* input: L3 device index for lookup
7178 	 * output: device index from FIB lookup
7179 	 */
7180 	__u32	ifindex;
7181 
7182 	union {
7183 		/* inputs to lookup */
7184 		__u8	tos;		/* AF_INET  */
7185 		__be32	flowinfo;	/* AF_INET6, flow_label + priority */
7186 
7187 		/* output: metric of fib result (IPv4/IPv6 only) */
7188 		__u32	rt_metric;
7189 	};
7190 
7191 	/* input: source address to consider for lookup
7192 	 * output: source address result from lookup
7193 	 */
7194 	union {
7195 		__be32		ipv4_src;
7196 		__u32		ipv6_src[4];  /* in6_addr; network order */
7197 	};
7198 
7199 	/* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in
7200 	 * network header. output: bpf_fib_lookup sets to gateway address
7201 	 * if FIB lookup returns gateway route
7202 	 */
7203 	union {
7204 		__be32		ipv4_dst;
7205 		__u32		ipv6_dst[4];  /* in6_addr; network order */
7206 	};
7207 
7208 	union {
7209 		struct {
7210 			/* output */
7211 			__be16	h_vlan_proto;
7212 			__be16	h_vlan_TCI;
7213 		};
7214 		/* input: when accompanied with the
7215 		 * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a
7216 		 * specific routing table to use for the fib lookup.
7217 		 */
7218 		__u32	tbid;
7219 	};
7220 
7221 	union {
7222 		/* input */
7223 		struct {
7224 			__u32	mark;   /* policy routing */
7225 			/* 2 4-byte holes for input */
7226 		};
7227 
7228 		/* output: source and dest mac */
7229 		struct {
7230 			__u8	smac[6];	/* ETH_ALEN */
7231 			__u8	dmac[6];	/* ETH_ALEN */
7232 		};
7233 	};
7234 };
7235 
7236 struct bpf_redir_neigh {
7237 	/* network family for lookup (AF_INET, AF_INET6) */
7238 	__u32 nh_family;
7239 	/* network address of nexthop; skips fib lookup to find gateway */
7240 	union {
7241 		__be32		ipv4_nh;
7242 		__u32		ipv6_nh[4];  /* in6_addr; network order */
7243 	};
7244 };
7245 
7246 /* bpf_check_mtu flags*/
7247 enum  bpf_check_mtu_flags {
7248 	BPF_MTU_CHK_SEGS  = (1U << 0),
7249 };
7250 
7251 enum bpf_check_mtu_ret {
7252 	BPF_MTU_CHK_RET_SUCCESS,      /* check and lookup successful */
7253 	BPF_MTU_CHK_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
7254 	BPF_MTU_CHK_RET_SEGS_TOOBIG,  /* GSO re-segmentation needed to fwd */
7255 };
7256 
7257 enum bpf_task_fd_type {
7258 	BPF_FD_TYPE_RAW_TRACEPOINT,	/* tp name */
7259 	BPF_FD_TYPE_TRACEPOINT,		/* tp name */
7260 	BPF_FD_TYPE_KPROBE,		/* (symbol + offset) or addr */
7261 	BPF_FD_TYPE_KRETPROBE,		/* (symbol + offset) or addr */
7262 	BPF_FD_TYPE_UPROBE,		/* filename + offset */
7263 	BPF_FD_TYPE_URETPROBE,		/* filename + offset */
7264 };
7265 
7266 enum {
7267 	BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG		= (1U << 0),
7268 	BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL		= (1U << 1),
7269 	BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP		= (1U << 2),
7270 };
7271 
7272 struct bpf_flow_keys {
7273 	__u16	nhoff;
7274 	__u16	thoff;
7275 	__u16	addr_proto;			/* ETH_P_* of valid addrs */
7276 	__u8	is_frag;
7277 	__u8	is_first_frag;
7278 	__u8	is_encap;
7279 	__u8	ip_proto;
7280 	__be16	n_proto;
7281 	__be16	sport;
7282 	__be16	dport;
7283 	union {
7284 		struct {
7285 			__be32	ipv4_src;
7286 			__be32	ipv4_dst;
7287 		};
7288 		struct {
7289 			__u32	ipv6_src[4];	/* in6_addr; network order */
7290 			__u32	ipv6_dst[4];	/* in6_addr; network order */
7291 		};
7292 	};
7293 	__u32	flags;
7294 	__be32	flow_label;
7295 };
7296 
7297 struct bpf_func_info {
7298 	__u32	insn_off;
7299 	__u32	type_id;
7300 };
7301 
7302 #define BPF_LINE_INFO_LINE_NUM(line_col)	((line_col) >> 10)
7303 #define BPF_LINE_INFO_LINE_COL(line_col)	((line_col) & 0x3ff)
7304 
7305 struct bpf_line_info {
7306 	__u32	insn_off;
7307 	__u32	file_name_off;
7308 	__u32	line_off;
7309 	__u32	line_col;
7310 };
7311 
7312 struct bpf_spin_lock {
7313 	__u32	val;
7314 };
7315 
7316 struct bpf_timer {
7317 	__u64 __opaque[2];
7318 } __attribute__((aligned(8)));
7319 
7320 struct bpf_wq {
7321 	__u64 __opaque[2];
7322 } __attribute__((aligned(8)));
7323 
7324 struct bpf_dynptr {
7325 	__u64 __opaque[2];
7326 } __attribute__((aligned(8)));
7327 
7328 struct bpf_list_head {
7329 	__u64 __opaque[2];
7330 } __attribute__((aligned(8)));
7331 
7332 struct bpf_list_node {
7333 	__u64 __opaque[3];
7334 } __attribute__((aligned(8)));
7335 
7336 struct bpf_rb_root {
7337 	__u64 __opaque[2];
7338 } __attribute__((aligned(8)));
7339 
7340 struct bpf_rb_node {
7341 	__u64 __opaque[4];
7342 } __attribute__((aligned(8)));
7343 
7344 struct bpf_refcount {
7345 	__u32 __opaque[1];
7346 } __attribute__((aligned(4)));
7347 
7348 struct bpf_sysctl {
7349 	__u32	write;		/* Sysctl is being read (= 0) or written (= 1).
7350 				 * Allows 1,2,4-byte read, but no write.
7351 				 */
7352 	__u32	file_pos;	/* Sysctl file position to read from, write to.
7353 				 * Allows 1,2,4-byte read an 4-byte write.
7354 				 */
7355 };
7356 
7357 struct bpf_sockopt {
7358 	__bpf_md_ptr(struct bpf_sock *, sk);
7359 	__bpf_md_ptr(void *, optval);
7360 	__bpf_md_ptr(void *, optval_end);
7361 
7362 	__s32	level;
7363 	__s32	optname;
7364 	__s32	optlen;
7365 	__s32	retval;
7366 };
7367 
7368 struct bpf_pidns_info {
7369 	__u32 pid;
7370 	__u32 tgid;
7371 };
7372 
7373 /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
7374 struct bpf_sk_lookup {
7375 	union {
7376 		__bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
7377 		__u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
7378 	};
7379 
7380 	__u32 family;		/* Protocol family (AF_INET, AF_INET6) */
7381 	__u32 protocol;		/* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
7382 	__u32 remote_ip4;	/* Network byte order */
7383 	__u32 remote_ip6[4];	/* Network byte order */
7384 	__be16 remote_port;	/* Network byte order */
7385 	__u16 :16;		/* Zero padding */
7386 	__u32 local_ip4;	/* Network byte order */
7387 	__u32 local_ip6[4];	/* Network byte order */
7388 	__u32 local_port;	/* Host byte order */
7389 	__u32 ingress_ifindex;		/* The arriving interface. Determined by inet_iif. */
7390 };
7391 
7392 /*
7393  * struct btf_ptr is used for typed pointer representation; the
7394  * type id is used to render the pointer data as the appropriate type
7395  * via the bpf_snprintf_btf() helper described above.  A flags field -
7396  * potentially to specify additional details about the BTF pointer
7397  * (rather than its mode of display) - is included for future use.
7398  * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately.
7399  */
7400 struct btf_ptr {
7401 	void *ptr;
7402 	__u32 type_id;
7403 	__u32 flags;		/* BTF ptr flags; unused at present. */
7404 };
7405 
7406 /*
7407  * Flags to control bpf_snprintf_btf() behaviour.
7408  *     - BTF_F_COMPACT: no formatting around type information
7409  *     - BTF_F_NONAME: no struct/union member names/types
7410  *     - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
7411  *       equivalent to %px.
7412  *     - BTF_F_ZERO: show zero-valued struct/union members; they
7413  *       are not displayed by default
7414  */
7415 enum {
7416 	BTF_F_COMPACT	=	(1ULL << 0),
7417 	BTF_F_NONAME	=	(1ULL << 1),
7418 	BTF_F_PTR_RAW	=	(1ULL << 2),
7419 	BTF_F_ZERO	=	(1ULL << 3),
7420 };
7421 
7422 /* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
7423  * has to be adjusted by relocations. It is emitted by llvm and passed to
7424  * libbpf and later to the kernel.
7425  */
7426 enum bpf_core_relo_kind {
7427 	BPF_CORE_FIELD_BYTE_OFFSET = 0,      /* field byte offset */
7428 	BPF_CORE_FIELD_BYTE_SIZE = 1,        /* field size in bytes */
7429 	BPF_CORE_FIELD_EXISTS = 2,           /* field existence in target kernel */
7430 	BPF_CORE_FIELD_SIGNED = 3,           /* field signedness (0 - unsigned, 1 - signed) */
7431 	BPF_CORE_FIELD_LSHIFT_U64 = 4,       /* bitfield-specific left bitshift */
7432 	BPF_CORE_FIELD_RSHIFT_U64 = 5,       /* bitfield-specific right bitshift */
7433 	BPF_CORE_TYPE_ID_LOCAL = 6,          /* type ID in local BPF object */
7434 	BPF_CORE_TYPE_ID_TARGET = 7,         /* type ID in target kernel */
7435 	BPF_CORE_TYPE_EXISTS = 8,            /* type existence in target kernel */
7436 	BPF_CORE_TYPE_SIZE = 9,              /* type size in bytes */
7437 	BPF_CORE_ENUMVAL_EXISTS = 10,        /* enum value existence in target kernel */
7438 	BPF_CORE_ENUMVAL_VALUE = 11,         /* enum value integer value */
7439 	BPF_CORE_TYPE_MATCHES = 12,          /* type match in target kernel */
7440 };
7441 
7442 /*
7443  * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf
7444  * and from libbpf to the kernel.
7445  *
7446  * CO-RE relocation captures the following data:
7447  * - insn_off - instruction offset (in bytes) within a BPF program that needs
7448  *   its insn->imm field to be relocated with actual field info;
7449  * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
7450  *   type or field;
7451  * - access_str_off - offset into corresponding .BTF string section. String
7452  *   interpretation depends on specific relocation kind:
7453  *     - for field-based relocations, string encodes an accessed field using
7454  *       a sequence of field and array indices, separated by colon (:). It's
7455  *       conceptually very close to LLVM's getelementptr ([0]) instruction's
7456  *       arguments for identifying offset to a field.
7457  *     - for type-based relocations, strings is expected to be just "0";
7458  *     - for enum value-based relocations, string contains an index of enum
7459  *       value within its enum type;
7460  * - kind - one of enum bpf_core_relo_kind;
7461  *
7462  * Example:
7463  *   struct sample {
7464  *       int a;
7465  *       struct {
7466  *           int b[10];
7467  *       };
7468  *   };
7469  *
7470  *   struct sample *s = ...;
7471  *   int *x = &s->a;     // encoded as "0:0" (a is field #0)
7472  *   int *y = &s->b[5];  // encoded as "0:1:0:5" (anon struct is field #1,
7473  *                       // b is field #0 inside anon struct, accessing elem #5)
7474  *   int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
7475  *
7476  * type_id for all relocs in this example will capture BTF type id of
7477  * `struct sample`.
7478  *
7479  * Such relocation is emitted when using __builtin_preserve_access_index()
7480  * Clang built-in, passing expression that captures field address, e.g.:
7481  *
7482  * bpf_probe_read(&dst, sizeof(dst),
7483  *		  __builtin_preserve_access_index(&src->a.b.c));
7484  *
7485  * In this case Clang will emit field relocation recording necessary data to
7486  * be able to find offset of embedded `a.b.c` field within `src` struct.
7487  *
7488  * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
7489  */
7490 struct bpf_core_relo {
7491 	__u32 insn_off;
7492 	__u32 type_id;
7493 	__u32 access_str_off;
7494 	enum bpf_core_relo_kind kind;
7495 };
7496 
7497 /*
7498  * Flags to control bpf_timer_start() behaviour.
7499  *     - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
7500  *       relative to current time.
7501  *     - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
7502  */
7503 enum {
7504 	BPF_F_TIMER_ABS = (1ULL << 0),
7505 	BPF_F_TIMER_CPU_PIN = (1ULL << 1),
7506 };
7507 
7508 /* BPF numbers iterator state */
7509 struct bpf_iter_num {
7510 	/* opaque iterator state; having __u64 here allows to preserve correct
7511 	 * alignment requirements in vmlinux.h, generated from BTF
7512 	 */
7513 	__u64 __opaque[1];
7514 } __attribute__((aligned(8)));
7515 
7516 /*
7517  * Flags to control BPF kfunc behaviour.
7518  *     - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective
7519  *       helper documentation for details.)
7520  */
7521 enum bpf_kfunc_flags {
7522 	BPF_F_PAD_ZEROS = (1ULL << 0),
7523 };
7524 
7525 #endif /* _UAPI__LINUX_BPF_H__ */
7526