xref: /linux/tools/lib/bpf/bpf_core_read.h (revision 45d8b572fac3aa8b49d53c946b3685eaf78a2824)
1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2 #ifndef __BPF_CORE_READ_H__
3 #define __BPF_CORE_READ_H__
4 
5 #include <bpf/bpf_helpers.h>
6 
7 /*
8  * enum bpf_field_info_kind is passed as a second argument into
9  * __builtin_preserve_field_info() built-in to get a specific aspect of
10  * a field, captured as a first argument. __builtin_preserve_field_info(field,
11  * info_kind) returns __u32 integer and produces BTF field relocation, which
12  * is understood and processed by libbpf during BPF object loading. See
13  * selftests/bpf for examples.
14  */
15 enum bpf_field_info_kind {
16 	BPF_FIELD_BYTE_OFFSET = 0,	/* field byte offset */
17 	BPF_FIELD_BYTE_SIZE = 1,
18 	BPF_FIELD_EXISTS = 2,		/* field existence in target kernel */
19 	BPF_FIELD_SIGNED = 3,
20 	BPF_FIELD_LSHIFT_U64 = 4,
21 	BPF_FIELD_RSHIFT_U64 = 5,
22 };
23 
24 /* second argument to __builtin_btf_type_id() built-in */
25 enum bpf_type_id_kind {
26 	BPF_TYPE_ID_LOCAL = 0,		/* BTF type ID in local program */
27 	BPF_TYPE_ID_TARGET = 1,		/* BTF type ID in target kernel */
28 };
29 
30 /* second argument to __builtin_preserve_type_info() built-in */
31 enum bpf_type_info_kind {
32 	BPF_TYPE_EXISTS = 0,		/* type existence in target kernel */
33 	BPF_TYPE_SIZE = 1,		/* type size in target kernel */
34 	BPF_TYPE_MATCHES = 2,		/* type match in target kernel */
35 };
36 
37 /* second argument to __builtin_preserve_enum_value() built-in */
38 enum bpf_enum_value_kind {
39 	BPF_ENUMVAL_EXISTS = 0,		/* enum value existence in kernel */
40 	BPF_ENUMVAL_VALUE = 1,		/* enum value value relocation */
41 };
42 
43 #define __CORE_RELO(src, field, info)					      \
44 	__builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
45 
46 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
47 #define __CORE_BITFIELD_PROBE_READ(dst, src, fld)			      \
48 	bpf_probe_read_kernel(						      \
49 			(void *)dst,					      \
50 			__CORE_RELO(src, fld, BYTE_SIZE),		      \
51 			(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
52 #else
53 /* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so
54  * for big-endian we need to adjust destination pointer accordingly, based on
55  * field byte size
56  */
57 #define __CORE_BITFIELD_PROBE_READ(dst, src, fld)			      \
58 	bpf_probe_read_kernel(						      \
59 			(void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
60 			__CORE_RELO(src, fld, BYTE_SIZE),		      \
61 			(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
62 #endif
63 
64 /*
65  * Extract bitfield, identified by s->field, and return its value as u64.
66  * All this is done in relocatable manner, so bitfield changes such as
67  * signedness, bit size, offset changes, this will be handled automatically.
68  * This version of macro is using bpf_probe_read_kernel() to read underlying
69  * integer storage. Macro functions as an expression and its return type is
70  * bpf_probe_read_kernel()'s return value: 0, on success, <0 on error.
71  */
72 #define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({			      \
73 	unsigned long long val = 0;					      \
74 									      \
75 	__CORE_BITFIELD_PROBE_READ(&val, s, field);			      \
76 	val <<= __CORE_RELO(s, field, LSHIFT_U64);			      \
77 	if (__CORE_RELO(s, field, SIGNED))				      \
78 		val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64);  \
79 	else								      \
80 		val = val >> __CORE_RELO(s, field, RSHIFT_U64);		      \
81 	val;								      \
82 })
83 
84 /*
85  * Extract bitfield, identified by s->field, and return its value as u64.
86  * This version of macro is using direct memory reads and should be used from
87  * BPF program types that support such functionality (e.g., typed raw
88  * tracepoints).
89  */
90 #define BPF_CORE_READ_BITFIELD(s, field) ({				      \
91 	const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
92 	unsigned long long val;						      \
93 									      \
94 	/* This is a so-called barrier_var() operation that makes specified   \
95 	 * variable "a black box" for optimizing compiler.		      \
96 	 * It forces compiler to perform BYTE_OFFSET relocation on p and use  \
97 	 * its calculated value in the switch below, instead of applying      \
98 	 * the same relocation 4 times for each individual memory load.       \
99 	 */								      \
100 	asm volatile("" : "=r"(p) : "0"(p));				      \
101 									      \
102 	switch (__CORE_RELO(s, field, BYTE_SIZE)) {			      \
103 	case 1: val = *(const unsigned char *)p; break;			      \
104 	case 2: val = *(const unsigned short *)p; break;		      \
105 	case 4: val = *(const unsigned int *)p; break;			      \
106 	case 8: val = *(const unsigned long long *)p; break;		      \
107 	}								      \
108 	val <<= __CORE_RELO(s, field, LSHIFT_U64);			      \
109 	if (__CORE_RELO(s, field, SIGNED))				      \
110 		val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64);  \
111 	else								      \
112 		val = val >> __CORE_RELO(s, field, RSHIFT_U64);		      \
113 	val;								      \
114 })
115 
116 /*
117  * Write to a bitfield, identified by s->field.
118  * This is the inverse of BPF_CORE_WRITE_BITFIELD().
119  */
120 #define BPF_CORE_WRITE_BITFIELD(s, field, new_val) ({			\
121 	void *p = (void *)s + __CORE_RELO(s, field, BYTE_OFFSET);	\
122 	unsigned int byte_size = __CORE_RELO(s, field, BYTE_SIZE);	\
123 	unsigned int lshift = __CORE_RELO(s, field, LSHIFT_U64);	\
124 	unsigned int rshift = __CORE_RELO(s, field, RSHIFT_U64);	\
125 	unsigned long long mask, val, nval = new_val;			\
126 	unsigned int rpad = rshift - lshift;				\
127 									\
128 	asm volatile("" : "+r"(p));					\
129 									\
130 	switch (byte_size) {						\
131 	case 1: val = *(unsigned char *)p; break;			\
132 	case 2: val = *(unsigned short *)p; break;			\
133 	case 4: val = *(unsigned int *)p; break;			\
134 	case 8: val = *(unsigned long long *)p; break;			\
135 	}								\
136 									\
137 	mask = (~0ULL << rshift) >> lshift;				\
138 	val = (val & ~mask) | ((nval << rpad) & mask);			\
139 									\
140 	switch (byte_size) {						\
141 	case 1: *(unsigned char *)p      = val; break;			\
142 	case 2: *(unsigned short *)p     = val; break;			\
143 	case 4: *(unsigned int *)p       = val; break;			\
144 	case 8: *(unsigned long long *)p = val; break;			\
145 	}								\
146 })
147 
148 /* Differentiator between compilers builtin implementations. This is a
149  * requirement due to the compiler parsing differences where GCC optimizes
150  * early in parsing those constructs of type pointers to the builtin specific
151  * type, resulting in not being possible to collect the required type
152  * information in the builtin expansion.
153  */
154 #ifdef __clang__
155 #define ___bpf_typeof(type) ((typeof(type) *) 0)
156 #else
157 #define ___bpf_typeof1(type, NR) ({					    \
158 	extern typeof(type) *___concat(bpf_type_tmp_, NR);		    \
159 	___concat(bpf_type_tmp_, NR);					    \
160 })
161 #define ___bpf_typeof(type) ___bpf_typeof1(type, __COUNTER__)
162 #endif
163 
164 #ifdef __clang__
165 #define ___bpf_field_ref1(field)	(field)
166 #define ___bpf_field_ref2(type, field)	(___bpf_typeof(type)->field)
167 #else
168 #define ___bpf_field_ref1(field)	(&(field))
169 #define ___bpf_field_ref2(type, field)	(&(___bpf_typeof(type)->field))
170 #endif
171 #define ___bpf_field_ref(args...)					    \
172 	___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args)
173 
174 /*
175  * Convenience macro to check that field actually exists in target kernel's.
176  * Returns:
177  *    1, if matching field is present in target kernel;
178  *    0, if no matching field found.
179  *
180  * Supports two forms:
181  *   - field reference through variable access:
182  *     bpf_core_field_exists(p->my_field);
183  *   - field reference through type and field names:
184  *     bpf_core_field_exists(struct my_type, my_field).
185  */
186 #define bpf_core_field_exists(field...)					    \
187 	__builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_EXISTS)
188 
189 /*
190  * Convenience macro to get the byte size of a field. Works for integers,
191  * struct/unions, pointers, arrays, and enums.
192  *
193  * Supports two forms:
194  *   - field reference through variable access:
195  *     bpf_core_field_size(p->my_field);
196  *   - field reference through type and field names:
197  *     bpf_core_field_size(struct my_type, my_field).
198  */
199 #define bpf_core_field_size(field...)					    \
200 	__builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_SIZE)
201 
202 /*
203  * Convenience macro to get field's byte offset.
204  *
205  * Supports two forms:
206  *   - field reference through variable access:
207  *     bpf_core_field_offset(p->my_field);
208  *   - field reference through type and field names:
209  *     bpf_core_field_offset(struct my_type, my_field).
210  */
211 #define bpf_core_field_offset(field...)					    \
212 	__builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_OFFSET)
213 
214 /*
215  * Convenience macro to get BTF type ID of a specified type, using a local BTF
216  * information. Return 32-bit unsigned integer with type ID from program's own
217  * BTF. Always succeeds.
218  */
219 #define bpf_core_type_id_local(type)					    \
220 	__builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_LOCAL)
221 
222 /*
223  * Convenience macro to get BTF type ID of a target kernel's type that matches
224  * specified local type.
225  * Returns:
226  *    - valid 32-bit unsigned type ID in kernel BTF;
227  *    - 0, if no matching type was found in a target kernel BTF.
228  */
229 #define bpf_core_type_id_kernel(type)					    \
230 	__builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_TARGET)
231 
232 /*
233  * Convenience macro to check that provided named type
234  * (struct/union/enum/typedef) exists in a target kernel.
235  * Returns:
236  *    1, if such type is present in target kernel's BTF;
237  *    0, if no matching type is found.
238  */
239 #define bpf_core_type_exists(type)					    \
240 	__builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_EXISTS)
241 
242 /*
243  * Convenience macro to check that provided named type
244  * (struct/union/enum/typedef) "matches" that in a target kernel.
245  * Returns:
246  *    1, if the type matches in the target kernel's BTF;
247  *    0, if the type does not match any in the target kernel
248  */
249 #define bpf_core_type_matches(type)					    \
250 	__builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_MATCHES)
251 
252 /*
253  * Convenience macro to get the byte size of a provided named type
254  * (struct/union/enum/typedef) in a target kernel.
255  * Returns:
256  *    >= 0 size (in bytes), if type is present in target kernel's BTF;
257  *    0, if no matching type is found.
258  */
259 #define bpf_core_type_size(type)					    \
260 	__builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_SIZE)
261 
262 /*
263  * Convenience macro to check that provided enumerator value is defined in
264  * a target kernel.
265  * Returns:
266  *    1, if specified enum type and its enumerator value are present in target
267  *    kernel's BTF;
268  *    0, if no matching enum and/or enum value within that enum is found.
269  */
270 #ifdef __clang__
271 #define bpf_core_enum_value_exists(enum_type, enum_value)		    \
272 	__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
273 #else
274 #define bpf_core_enum_value_exists(enum_type, enum_value)		    \
275 	__builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_EXISTS)
276 #endif
277 
278 /*
279  * Convenience macro to get the integer value of an enumerator value in
280  * a target kernel.
281  * Returns:
282  *    64-bit value, if specified enum type and its enumerator value are
283  *    present in target kernel's BTF;
284  *    0, if no matching enum and/or enum value within that enum is found.
285  */
286 #ifdef __clang__
287 #define bpf_core_enum_value(enum_type, enum_value)			    \
288 	__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
289 #else
290 #define bpf_core_enum_value(enum_type, enum_value)			    \
291 	__builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_VALUE)
292 #endif
293 
294 /*
295  * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
296  * offset relocation for source address using __builtin_preserve_access_index()
297  * built-in, provided by Clang.
298  *
299  * __builtin_preserve_access_index() takes as an argument an expression of
300  * taking an address of a field within struct/union. It makes compiler emit
301  * a relocation, which records BTF type ID describing root struct/union and an
302  * accessor string which describes exact embedded field that was used to take
303  * an address. See detailed description of this relocation format and
304  * semantics in comments to struct bpf_core_relo in include/uapi/linux/bpf.h.
305  *
306  * This relocation allows libbpf to adjust BPF instruction to use correct
307  * actual field offset, based on target kernel BTF type that matches original
308  * (local) BTF, used to record relocation.
309  */
310 #define bpf_core_read(dst, sz, src)					    \
311 	bpf_probe_read_kernel(dst, sz, (const void *)__builtin_preserve_access_index(src))
312 
313 /* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */
314 #define bpf_core_read_user(dst, sz, src)				    \
315 	bpf_probe_read_user(dst, sz, (const void *)__builtin_preserve_access_index(src))
316 /*
317  * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str()
318  * additionally emitting BPF CO-RE field relocation for specified source
319  * argument.
320  */
321 #define bpf_core_read_str(dst, sz, src)					    \
322 	bpf_probe_read_kernel_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
323 
324 /* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */
325 #define bpf_core_read_user_str(dst, sz, src)				    \
326 	bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
327 
328 extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
329 
330 /*
331  * Cast provided pointer *ptr* into a pointer to a specified *type* in such
332  * a way that BPF verifier will become aware of associated kernel-side BTF
333  * type. This allows to access members of kernel types directly without the
334  * need to use BPF_CORE_READ() macros.
335  */
336 #define bpf_core_cast(ptr, type)					    \
337 	((typeof(type) *)bpf_rdonly_cast((ptr), bpf_core_type_id_kernel(type)))
338 
339 #define ___concat(a, b) a ## b
340 #define ___apply(fn, n) ___concat(fn, n)
341 #define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
342 
343 /*
344  * return number of provided arguments; used for switch-based variadic macro
345  * definitions (see ___last, ___arrow, etc below)
346  */
347 #define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
348 /*
349  * return 0 if no arguments are passed, N - otherwise; used for
350  * recursively-defined macros to specify termination (0) case, and generic
351  * (N) case (e.g., ___read_ptrs, ___core_read)
352  */
353 #define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
354 
355 #define ___last1(x) x
356 #define ___last2(a, x) x
357 #define ___last3(a, b, x) x
358 #define ___last4(a, b, c, x) x
359 #define ___last5(a, b, c, d, x) x
360 #define ___last6(a, b, c, d, e, x) x
361 #define ___last7(a, b, c, d, e, f, x) x
362 #define ___last8(a, b, c, d, e, f, g, x) x
363 #define ___last9(a, b, c, d, e, f, g, h, x) x
364 #define ___last10(a, b, c, d, e, f, g, h, i, x) x
365 #define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__)
366 
367 #define ___nolast2(a, _) a
368 #define ___nolast3(a, b, _) a, b
369 #define ___nolast4(a, b, c, _) a, b, c
370 #define ___nolast5(a, b, c, d, _) a, b, c, d
371 #define ___nolast6(a, b, c, d, e, _) a, b, c, d, e
372 #define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f
373 #define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g
374 #define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h
375 #define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i
376 #define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__)
377 
378 #define ___arrow1(a) a
379 #define ___arrow2(a, b) a->b
380 #define ___arrow3(a, b, c) a->b->c
381 #define ___arrow4(a, b, c, d) a->b->c->d
382 #define ___arrow5(a, b, c, d, e) a->b->c->d->e
383 #define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f
384 #define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g
385 #define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h
386 #define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i
387 #define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
388 #define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
389 
390 #define ___type(...) typeof(___arrow(__VA_ARGS__))
391 
392 #define ___read(read_fn, dst, src_type, src, accessor)			    \
393 	read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
394 
395 /* "recursively" read a sequence of inner pointers using local __t var */
396 #define ___rd_first(fn, src, a) ___read(fn, &__t, ___type(src), src, a);
397 #define ___rd_last(fn, ...)						    \
398 	___read(fn, &__t, ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__));
399 #define ___rd_p1(fn, ...) const void *__t; ___rd_first(fn, __VA_ARGS__)
400 #define ___rd_p2(fn, ...) ___rd_p1(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
401 #define ___rd_p3(fn, ...) ___rd_p2(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
402 #define ___rd_p4(fn, ...) ___rd_p3(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
403 #define ___rd_p5(fn, ...) ___rd_p4(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
404 #define ___rd_p6(fn, ...) ___rd_p5(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
405 #define ___rd_p7(fn, ...) ___rd_p6(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
406 #define ___rd_p8(fn, ...) ___rd_p7(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
407 #define ___rd_p9(fn, ...) ___rd_p8(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
408 #define ___read_ptrs(fn, src, ...)					    \
409 	___apply(___rd_p, ___narg(__VA_ARGS__))(fn, src, __VA_ARGS__)
410 
411 #define ___core_read0(fn, fn_ptr, dst, src, a)				    \
412 	___read(fn, dst, ___type(src), src, a);
413 #define ___core_readN(fn, fn_ptr, dst, src, ...)			    \
414 	___read_ptrs(fn_ptr, src, ___nolast(__VA_ARGS__))		    \
415 	___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t,	    \
416 		___last(__VA_ARGS__));
417 #define ___core_read(fn, fn_ptr, dst, src, a, ...)			    \
418 	___apply(___core_read, ___empty(__VA_ARGS__))(fn, fn_ptr, dst,	    \
419 						      src, a, ##__VA_ARGS__)
420 
421 /*
422  * BPF_CORE_READ_INTO() is a more performance-conscious variant of
423  * BPF_CORE_READ(), in which final field is read into user-provided storage.
424  * See BPF_CORE_READ() below for more details on general usage.
425  */
426 #define BPF_CORE_READ_INTO(dst, src, a, ...) ({				    \
427 	___core_read(bpf_core_read, bpf_core_read,			    \
428 		     dst, (src), a, ##__VA_ARGS__)			    \
429 })
430 
431 /*
432  * Variant of BPF_CORE_READ_INTO() for reading from user-space memory.
433  *
434  * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use.
435  */
436 #define BPF_CORE_READ_USER_INTO(dst, src, a, ...) ({			    \
437 	___core_read(bpf_core_read_user, bpf_core_read_user,		    \
438 		     dst, (src), a, ##__VA_ARGS__)			    \
439 })
440 
441 /* Non-CO-RE variant of BPF_CORE_READ_INTO() */
442 #define BPF_PROBE_READ_INTO(dst, src, a, ...) ({			    \
443 	___core_read(bpf_probe_read_kernel, bpf_probe_read_kernel,	    \
444 		     dst, (src), a, ##__VA_ARGS__)			    \
445 })
446 
447 /* Non-CO-RE variant of BPF_CORE_READ_USER_INTO().
448  *
449  * As no CO-RE relocations are emitted, source types can be arbitrary and are
450  * not restricted to kernel types only.
451  */
452 #define BPF_PROBE_READ_USER_INTO(dst, src, a, ...) ({			    \
453 	___core_read(bpf_probe_read_user, bpf_probe_read_user,		    \
454 		     dst, (src), a, ##__VA_ARGS__)			    \
455 })
456 
457 /*
458  * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as
459  * BPF_CORE_READ() for intermediate pointers, but then executes (and returns
460  * corresponding error code) bpf_core_read_str() for final string read.
461  */
462 #define BPF_CORE_READ_STR_INTO(dst, src, a, ...) ({			    \
463 	___core_read(bpf_core_read_str, bpf_core_read,			    \
464 		     dst, (src), a, ##__VA_ARGS__)			    \
465 })
466 
467 /*
468  * Variant of BPF_CORE_READ_STR_INTO() for reading from user-space memory.
469  *
470  * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use.
471  */
472 #define BPF_CORE_READ_USER_STR_INTO(dst, src, a, ...) ({		    \
473 	___core_read(bpf_core_read_user_str, bpf_core_read_user,	    \
474 		     dst, (src), a, ##__VA_ARGS__)			    \
475 })
476 
477 /* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */
478 #define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({			    \
479 	___core_read(bpf_probe_read_kernel_str, bpf_probe_read_kernel,	    \
480 		     dst, (src), a, ##__VA_ARGS__)			    \
481 })
482 
483 /*
484  * Non-CO-RE variant of BPF_CORE_READ_USER_STR_INTO().
485  *
486  * As no CO-RE relocations are emitted, source types can be arbitrary and are
487  * not restricted to kernel types only.
488  */
489 #define BPF_PROBE_READ_USER_STR_INTO(dst, src, a, ...) ({		    \
490 	___core_read(bpf_probe_read_user_str, bpf_probe_read_user,	    \
491 		     dst, (src), a, ##__VA_ARGS__)			    \
492 })
493 
494 /*
495  * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially
496  * when there are few pointer chasing steps.
497  * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like:
498  *	int x = s->a.b.c->d.e->f->g;
499  * can be succinctly achieved using BPF_CORE_READ as:
500  *	int x = BPF_CORE_READ(s, a.b.c, d.e, f, g);
501  *
502  * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF
503  * CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically
504  * equivalent to:
505  * 1. const void *__t = s->a.b.c;
506  * 2. __t = __t->d.e;
507  * 3. __t = __t->f;
508  * 4. return __t->g;
509  *
510  * Equivalence is logical, because there is a heavy type casting/preservation
511  * involved, as well as all the reads are happening through
512  * bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to
513  * emit CO-RE relocations.
514  *
515  * N.B. Only up to 9 "field accessors" are supported, which should be more
516  * than enough for any practical purpose.
517  */
518 #define BPF_CORE_READ(src, a, ...) ({					    \
519 	___type((src), a, ##__VA_ARGS__) __r;				    \
520 	BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__);		    \
521 	__r;								    \
522 })
523 
524 /*
525  * Variant of BPF_CORE_READ() for reading from user-space memory.
526  *
527  * NOTE: all the source types involved are still *kernel types* and need to
528  * exist in kernel (or kernel module) BTF, otherwise CO-RE relocation will
529  * fail. Custom user types are not relocatable with CO-RE.
530  * The typical situation in which BPF_CORE_READ_USER() might be used is to
531  * read kernel UAPI types from the user-space memory passed in as a syscall
532  * input argument.
533  */
534 #define BPF_CORE_READ_USER(src, a, ...) ({				    \
535 	___type((src), a, ##__VA_ARGS__) __r;				    \
536 	BPF_CORE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__);		    \
537 	__r;								    \
538 })
539 
540 /* Non-CO-RE variant of BPF_CORE_READ() */
541 #define BPF_PROBE_READ(src, a, ...) ({					    \
542 	___type((src), a, ##__VA_ARGS__) __r;				    \
543 	BPF_PROBE_READ_INTO(&__r, (src), a, ##__VA_ARGS__);		    \
544 	__r;								    \
545 })
546 
547 /*
548  * Non-CO-RE variant of BPF_CORE_READ_USER().
549  *
550  * As no CO-RE relocations are emitted, source types can be arbitrary and are
551  * not restricted to kernel types only.
552  */
553 #define BPF_PROBE_READ_USER(src, a, ...) ({				    \
554 	___type((src), a, ##__VA_ARGS__) __r;				    \
555 	BPF_PROBE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__);	    \
556 	__r;								    \
557 })
558 
559 #endif
560 
561