1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 #ifndef __BPF_CORE_READ_H__ 3 #define __BPF_CORE_READ_H__ 4 5 /* 6 * enum bpf_field_info_kind is passed as a second argument into 7 * __builtin_preserve_field_info() built-in to get a specific aspect of 8 * a field, captured as a first argument. __builtin_preserve_field_info(field, 9 * info_kind) returns __u32 integer and produces BTF field relocation, which 10 * is understood and processed by libbpf during BPF object loading. See 11 * selftests/bpf for examples. 12 */ 13 enum bpf_field_info_kind { 14 BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ 15 BPF_FIELD_BYTE_SIZE = 1, 16 BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ 17 BPF_FIELD_SIGNED = 3, 18 BPF_FIELD_LSHIFT_U64 = 4, 19 BPF_FIELD_RSHIFT_U64 = 5, 20 }; 21 22 /* second argument to __builtin_btf_type_id() built-in */ 23 enum bpf_type_id_kind { 24 BPF_TYPE_ID_LOCAL = 0, /* BTF type ID in local program */ 25 BPF_TYPE_ID_TARGET = 1, /* BTF type ID in target kernel */ 26 }; 27 28 /* second argument to __builtin_preserve_type_info() built-in */ 29 enum bpf_type_info_kind { 30 BPF_TYPE_EXISTS = 0, /* type existence in target kernel */ 31 BPF_TYPE_SIZE = 1, /* type size in target kernel */ 32 BPF_TYPE_MATCHES = 2, /* type match in target kernel */ 33 }; 34 35 /* second argument to __builtin_preserve_enum_value() built-in */ 36 enum bpf_enum_value_kind { 37 BPF_ENUMVAL_EXISTS = 0, /* enum value existence in kernel */ 38 BPF_ENUMVAL_VALUE = 1, /* enum value value relocation */ 39 }; 40 41 #define __CORE_RELO(src, field, info) \ 42 __builtin_preserve_field_info((src)->field, BPF_FIELD_##info) 43 44 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 45 #define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ 46 bpf_probe_read_kernel( \ 47 (void *)dst, \ 48 __CORE_RELO(src, fld, BYTE_SIZE), \ 49 (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) 50 #else 51 /* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so 52 * for big-endian we need to adjust destination pointer accordingly, based on 53 * field byte size 54 */ 55 #define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ 56 bpf_probe_read_kernel( \ 57 (void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \ 58 __CORE_RELO(src, fld, BYTE_SIZE), \ 59 (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) 60 #endif 61 62 /* 63 * Extract bitfield, identified by s->field, and return its value as u64. 64 * All this is done in relocatable manner, so bitfield changes such as 65 * signedness, bit size, offset changes, this will be handled automatically. 66 * This version of macro is using bpf_probe_read_kernel() to read underlying 67 * integer storage. Macro functions as an expression and its return type is 68 * bpf_probe_read_kernel()'s return value: 0, on success, <0 on error. 69 */ 70 #define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \ 71 unsigned long long val = 0; \ 72 \ 73 __CORE_BITFIELD_PROBE_READ(&val, s, field); \ 74 val <<= __CORE_RELO(s, field, LSHIFT_U64); \ 75 if (__CORE_RELO(s, field, SIGNED)) \ 76 val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ 77 else \ 78 val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ 79 val; \ 80 }) 81 82 /* 83 * Extract bitfield, identified by s->field, and return its value as u64. 84 * This version of macro is using direct memory reads and should be used from 85 * BPF program types that support such functionality (e.g., typed raw 86 * tracepoints). 87 */ 88 #define BPF_CORE_READ_BITFIELD(s, field) ({ \ 89 const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ 90 unsigned long long val; \ 91 \ 92 /* This is a so-called barrier_var() operation that makes specified \ 93 * variable "a black box" for optimizing compiler. \ 94 * It forces compiler to perform BYTE_OFFSET relocation on p and use \ 95 * its calculated value in the switch below, instead of applying \ 96 * the same relocation 4 times for each individual memory load. \ 97 */ \ 98 asm volatile("" : "=r"(p) : "0"(p)); \ 99 \ 100 switch (__CORE_RELO(s, field, BYTE_SIZE)) { \ 101 case 1: val = *(const unsigned char *)p; break; \ 102 case 2: val = *(const unsigned short *)p; break; \ 103 case 4: val = *(const unsigned int *)p; break; \ 104 case 8: val = *(const unsigned long long *)p; break; \ 105 } \ 106 val <<= __CORE_RELO(s, field, LSHIFT_U64); \ 107 if (__CORE_RELO(s, field, SIGNED)) \ 108 val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ 109 else \ 110 val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ 111 val; \ 112 }) 113 114 /* 115 * Write to a bitfield, identified by s->field. 116 * This is the inverse of BPF_CORE_WRITE_BITFIELD(). 117 */ 118 #define BPF_CORE_WRITE_BITFIELD(s, field, new_val) ({ \ 119 void *p = (void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ 120 unsigned int byte_size = __CORE_RELO(s, field, BYTE_SIZE); \ 121 unsigned int lshift = __CORE_RELO(s, field, LSHIFT_U64); \ 122 unsigned int rshift = __CORE_RELO(s, field, RSHIFT_U64); \ 123 unsigned long long mask, val, nval = new_val; \ 124 unsigned int rpad = rshift - lshift; \ 125 \ 126 asm volatile("" : "+r"(p)); \ 127 \ 128 switch (byte_size) { \ 129 case 1: val = *(unsigned char *)p; break; \ 130 case 2: val = *(unsigned short *)p; break; \ 131 case 4: val = *(unsigned int *)p; break; \ 132 case 8: val = *(unsigned long long *)p; break; \ 133 } \ 134 \ 135 mask = (~0ULL << rshift) >> lshift; \ 136 val = (val & ~mask) | ((nval << rpad) & mask); \ 137 \ 138 switch (byte_size) { \ 139 case 1: *(unsigned char *)p = val; break; \ 140 case 2: *(unsigned short *)p = val; break; \ 141 case 4: *(unsigned int *)p = val; break; \ 142 case 8: *(unsigned long long *)p = val; break; \ 143 } \ 144 }) 145 146 #define ___bpf_field_ref1(field) (field) 147 #define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field) 148 #define ___bpf_field_ref(args...) \ 149 ___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args) 150 151 /* 152 * Convenience macro to check that field actually exists in target kernel's. 153 * Returns: 154 * 1, if matching field is present in target kernel; 155 * 0, if no matching field found. 156 * 157 * Supports two forms: 158 * - field reference through variable access: 159 * bpf_core_field_exists(p->my_field); 160 * - field reference through type and field names: 161 * bpf_core_field_exists(struct my_type, my_field). 162 */ 163 #define bpf_core_field_exists(field...) \ 164 __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_EXISTS) 165 166 /* 167 * Convenience macro to get the byte size of a field. Works for integers, 168 * struct/unions, pointers, arrays, and enums. 169 * 170 * Supports two forms: 171 * - field reference through variable access: 172 * bpf_core_field_size(p->my_field); 173 * - field reference through type and field names: 174 * bpf_core_field_size(struct my_type, my_field). 175 */ 176 #define bpf_core_field_size(field...) \ 177 __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_SIZE) 178 179 /* 180 * Convenience macro to get field's byte offset. 181 * 182 * Supports two forms: 183 * - field reference through variable access: 184 * bpf_core_field_offset(p->my_field); 185 * - field reference through type and field names: 186 * bpf_core_field_offset(struct my_type, my_field). 187 */ 188 #define bpf_core_field_offset(field...) \ 189 __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_OFFSET) 190 191 /* 192 * Convenience macro to get BTF type ID of a specified type, using a local BTF 193 * information. Return 32-bit unsigned integer with type ID from program's own 194 * BTF. Always succeeds. 195 */ 196 #define bpf_core_type_id_local(type) \ 197 __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL) 198 199 /* 200 * Convenience macro to get BTF type ID of a target kernel's type that matches 201 * specified local type. 202 * Returns: 203 * - valid 32-bit unsigned type ID in kernel BTF; 204 * - 0, if no matching type was found in a target kernel BTF. 205 */ 206 #define bpf_core_type_id_kernel(type) \ 207 __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET) 208 209 /* 210 * Convenience macro to check that provided named type 211 * (struct/union/enum/typedef) exists in a target kernel. 212 * Returns: 213 * 1, if such type is present in target kernel's BTF; 214 * 0, if no matching type is found. 215 */ 216 #define bpf_core_type_exists(type) \ 217 __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS) 218 219 /* 220 * Convenience macro to check that provided named type 221 * (struct/union/enum/typedef) "matches" that in a target kernel. 222 * Returns: 223 * 1, if the type matches in the target kernel's BTF; 224 * 0, if the type does not match any in the target kernel 225 */ 226 #define bpf_core_type_matches(type) \ 227 __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES) 228 229 /* 230 * Convenience macro to get the byte size of a provided named type 231 * (struct/union/enum/typedef) in a target kernel. 232 * Returns: 233 * >= 0 size (in bytes), if type is present in target kernel's BTF; 234 * 0, if no matching type is found. 235 */ 236 #define bpf_core_type_size(type) \ 237 __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE) 238 239 /* 240 * Convenience macro to check that provided enumerator value is defined in 241 * a target kernel. 242 * Returns: 243 * 1, if specified enum type and its enumerator value are present in target 244 * kernel's BTF; 245 * 0, if no matching enum and/or enum value within that enum is found. 246 */ 247 #define bpf_core_enum_value_exists(enum_type, enum_value) \ 248 __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS) 249 250 /* 251 * Convenience macro to get the integer value of an enumerator value in 252 * a target kernel. 253 * Returns: 254 * 64-bit value, if specified enum type and its enumerator value are 255 * present in target kernel's BTF; 256 * 0, if no matching enum and/or enum value within that enum is found. 257 */ 258 #define bpf_core_enum_value(enum_type, enum_value) \ 259 __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE) 260 261 /* 262 * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures 263 * offset relocation for source address using __builtin_preserve_access_index() 264 * built-in, provided by Clang. 265 * 266 * __builtin_preserve_access_index() takes as an argument an expression of 267 * taking an address of a field within struct/union. It makes compiler emit 268 * a relocation, which records BTF type ID describing root struct/union and an 269 * accessor string which describes exact embedded field that was used to take 270 * an address. See detailed description of this relocation format and 271 * semantics in comments to struct bpf_field_reloc in libbpf_internal.h. 272 * 273 * This relocation allows libbpf to adjust BPF instruction to use correct 274 * actual field offset, based on target kernel BTF type that matches original 275 * (local) BTF, used to record relocation. 276 */ 277 #define bpf_core_read(dst, sz, src) \ 278 bpf_probe_read_kernel(dst, sz, (const void *)__builtin_preserve_access_index(src)) 279 280 /* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */ 281 #define bpf_core_read_user(dst, sz, src) \ 282 bpf_probe_read_user(dst, sz, (const void *)__builtin_preserve_access_index(src)) 283 /* 284 * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str() 285 * additionally emitting BPF CO-RE field relocation for specified source 286 * argument. 287 */ 288 #define bpf_core_read_str(dst, sz, src) \ 289 bpf_probe_read_kernel_str(dst, sz, (const void *)__builtin_preserve_access_index(src)) 290 291 /* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */ 292 #define bpf_core_read_user_str(dst, sz, src) \ 293 bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src)) 294 295 #define ___concat(a, b) a ## b 296 #define ___apply(fn, n) ___concat(fn, n) 297 #define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N 298 299 /* 300 * return number of provided arguments; used for switch-based variadic macro 301 * definitions (see ___last, ___arrow, etc below) 302 */ 303 #define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) 304 /* 305 * return 0 if no arguments are passed, N - otherwise; used for 306 * recursively-defined macros to specify termination (0) case, and generic 307 * (N) case (e.g., ___read_ptrs, ___core_read) 308 */ 309 #define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) 310 311 #define ___last1(x) x 312 #define ___last2(a, x) x 313 #define ___last3(a, b, x) x 314 #define ___last4(a, b, c, x) x 315 #define ___last5(a, b, c, d, x) x 316 #define ___last6(a, b, c, d, e, x) x 317 #define ___last7(a, b, c, d, e, f, x) x 318 #define ___last8(a, b, c, d, e, f, g, x) x 319 #define ___last9(a, b, c, d, e, f, g, h, x) x 320 #define ___last10(a, b, c, d, e, f, g, h, i, x) x 321 #define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__) 322 323 #define ___nolast2(a, _) a 324 #define ___nolast3(a, b, _) a, b 325 #define ___nolast4(a, b, c, _) a, b, c 326 #define ___nolast5(a, b, c, d, _) a, b, c, d 327 #define ___nolast6(a, b, c, d, e, _) a, b, c, d, e 328 #define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f 329 #define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g 330 #define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h 331 #define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i 332 #define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__) 333 334 #define ___arrow1(a) a 335 #define ___arrow2(a, b) a->b 336 #define ___arrow3(a, b, c) a->b->c 337 #define ___arrow4(a, b, c, d) a->b->c->d 338 #define ___arrow5(a, b, c, d, e) a->b->c->d->e 339 #define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f 340 #define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g 341 #define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h 342 #define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i 343 #define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j 344 #define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__) 345 346 #define ___type(...) typeof(___arrow(__VA_ARGS__)) 347 348 #define ___read(read_fn, dst, src_type, src, accessor) \ 349 read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor) 350 351 /* "recursively" read a sequence of inner pointers using local __t var */ 352 #define ___rd_first(fn, src, a) ___read(fn, &__t, ___type(src), src, a); 353 #define ___rd_last(fn, ...) \ 354 ___read(fn, &__t, ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__)); 355 #define ___rd_p1(fn, ...) const void *__t; ___rd_first(fn, __VA_ARGS__) 356 #define ___rd_p2(fn, ...) ___rd_p1(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) 357 #define ___rd_p3(fn, ...) ___rd_p2(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) 358 #define ___rd_p4(fn, ...) ___rd_p3(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) 359 #define ___rd_p5(fn, ...) ___rd_p4(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) 360 #define ___rd_p6(fn, ...) ___rd_p5(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) 361 #define ___rd_p7(fn, ...) ___rd_p6(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) 362 #define ___rd_p8(fn, ...) ___rd_p7(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) 363 #define ___rd_p9(fn, ...) ___rd_p8(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) 364 #define ___read_ptrs(fn, src, ...) \ 365 ___apply(___rd_p, ___narg(__VA_ARGS__))(fn, src, __VA_ARGS__) 366 367 #define ___core_read0(fn, fn_ptr, dst, src, a) \ 368 ___read(fn, dst, ___type(src), src, a); 369 #define ___core_readN(fn, fn_ptr, dst, src, ...) \ 370 ___read_ptrs(fn_ptr, src, ___nolast(__VA_ARGS__)) \ 371 ___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \ 372 ___last(__VA_ARGS__)); 373 #define ___core_read(fn, fn_ptr, dst, src, a, ...) \ 374 ___apply(___core_read, ___empty(__VA_ARGS__))(fn, fn_ptr, dst, \ 375 src, a, ##__VA_ARGS__) 376 377 /* 378 * BPF_CORE_READ_INTO() is a more performance-conscious variant of 379 * BPF_CORE_READ(), in which final field is read into user-provided storage. 380 * See BPF_CORE_READ() below for more details on general usage. 381 */ 382 #define BPF_CORE_READ_INTO(dst, src, a, ...) ({ \ 383 ___core_read(bpf_core_read, bpf_core_read, \ 384 dst, (src), a, ##__VA_ARGS__) \ 385 }) 386 387 /* 388 * Variant of BPF_CORE_READ_INTO() for reading from user-space memory. 389 * 390 * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. 391 */ 392 #define BPF_CORE_READ_USER_INTO(dst, src, a, ...) ({ \ 393 ___core_read(bpf_core_read_user, bpf_core_read_user, \ 394 dst, (src), a, ##__VA_ARGS__) \ 395 }) 396 397 /* Non-CO-RE variant of BPF_CORE_READ_INTO() */ 398 #define BPF_PROBE_READ_INTO(dst, src, a, ...) ({ \ 399 ___core_read(bpf_probe_read_kernel, bpf_probe_read_kernel, \ 400 dst, (src), a, ##__VA_ARGS__) \ 401 }) 402 403 /* Non-CO-RE variant of BPF_CORE_READ_USER_INTO(). 404 * 405 * As no CO-RE relocations are emitted, source types can be arbitrary and are 406 * not restricted to kernel types only. 407 */ 408 #define BPF_PROBE_READ_USER_INTO(dst, src, a, ...) ({ \ 409 ___core_read(bpf_probe_read_user, bpf_probe_read_user, \ 410 dst, (src), a, ##__VA_ARGS__) \ 411 }) 412 413 /* 414 * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as 415 * BPF_CORE_READ() for intermediate pointers, but then executes (and returns 416 * corresponding error code) bpf_core_read_str() for final string read. 417 */ 418 #define BPF_CORE_READ_STR_INTO(dst, src, a, ...) ({ \ 419 ___core_read(bpf_core_read_str, bpf_core_read, \ 420 dst, (src), a, ##__VA_ARGS__) \ 421 }) 422 423 /* 424 * Variant of BPF_CORE_READ_STR_INTO() for reading from user-space memory. 425 * 426 * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. 427 */ 428 #define BPF_CORE_READ_USER_STR_INTO(dst, src, a, ...) ({ \ 429 ___core_read(bpf_core_read_user_str, bpf_core_read_user, \ 430 dst, (src), a, ##__VA_ARGS__) \ 431 }) 432 433 /* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */ 434 #define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({ \ 435 ___core_read(bpf_probe_read_kernel_str, bpf_probe_read_kernel, \ 436 dst, (src), a, ##__VA_ARGS__) \ 437 }) 438 439 /* 440 * Non-CO-RE variant of BPF_CORE_READ_USER_STR_INTO(). 441 * 442 * As no CO-RE relocations are emitted, source types can be arbitrary and are 443 * not restricted to kernel types only. 444 */ 445 #define BPF_PROBE_READ_USER_STR_INTO(dst, src, a, ...) ({ \ 446 ___core_read(bpf_probe_read_user_str, bpf_probe_read_user, \ 447 dst, (src), a, ##__VA_ARGS__) \ 448 }) 449 450 /* 451 * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially 452 * when there are few pointer chasing steps. 453 * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like: 454 * int x = s->a.b.c->d.e->f->g; 455 * can be succinctly achieved using BPF_CORE_READ as: 456 * int x = BPF_CORE_READ(s, a.b.c, d.e, f, g); 457 * 458 * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF 459 * CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically 460 * equivalent to: 461 * 1. const void *__t = s->a.b.c; 462 * 2. __t = __t->d.e; 463 * 3. __t = __t->f; 464 * 4. return __t->g; 465 * 466 * Equivalence is logical, because there is a heavy type casting/preservation 467 * involved, as well as all the reads are happening through 468 * bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to 469 * emit CO-RE relocations. 470 * 471 * N.B. Only up to 9 "field accessors" are supported, which should be more 472 * than enough for any practical purpose. 473 */ 474 #define BPF_CORE_READ(src, a, ...) ({ \ 475 ___type((src), a, ##__VA_ARGS__) __r; \ 476 BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ 477 __r; \ 478 }) 479 480 /* 481 * Variant of BPF_CORE_READ() for reading from user-space memory. 482 * 483 * NOTE: all the source types involved are still *kernel types* and need to 484 * exist in kernel (or kernel module) BTF, otherwise CO-RE relocation will 485 * fail. Custom user types are not relocatable with CO-RE. 486 * The typical situation in which BPF_CORE_READ_USER() might be used is to 487 * read kernel UAPI types from the user-space memory passed in as a syscall 488 * input argument. 489 */ 490 #define BPF_CORE_READ_USER(src, a, ...) ({ \ 491 ___type((src), a, ##__VA_ARGS__) __r; \ 492 BPF_CORE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \ 493 __r; \ 494 }) 495 496 /* Non-CO-RE variant of BPF_CORE_READ() */ 497 #define BPF_PROBE_READ(src, a, ...) ({ \ 498 ___type((src), a, ##__VA_ARGS__) __r; \ 499 BPF_PROBE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ 500 __r; \ 501 }) 502 503 /* 504 * Non-CO-RE variant of BPF_CORE_READ_USER(). 505 * 506 * As no CO-RE relocations are emitted, source types can be arbitrary and are 507 * not restricted to kernel types only. 508 */ 509 #define BPF_PROBE_READ_USER(src, a, ...) ({ \ 510 ___type((src), a, ##__VA_ARGS__) __r; \ 511 BPF_PROBE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \ 512 __r; \ 513 }) 514 515 #endif 516 517