1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018 Facebook */ 3 4 #include <uapi/linux/btf.h> 5 #include <uapi/linux/types.h> 6 #include <linux/seq_file.h> 7 #include <linux/compiler.h> 8 #include <linux/ctype.h> 9 #include <linux/errno.h> 10 #include <linux/slab.h> 11 #include <linux/anon_inodes.h> 12 #include <linux/file.h> 13 #include <linux/uaccess.h> 14 #include <linux/kernel.h> 15 #include <linux/idr.h> 16 #include <linux/sort.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/btf.h> 19 20 /* BTF (BPF Type Format) is the meta data format which describes 21 * the data types of BPF program/map. Hence, it basically focus 22 * on the C programming language which the modern BPF is primary 23 * using. 24 * 25 * ELF Section: 26 * ~~~~~~~~~~~ 27 * The BTF data is stored under the ".BTF" ELF section 28 * 29 * struct btf_type: 30 * ~~~~~~~~~~~~~~~ 31 * Each 'struct btf_type' object describes a C data type. 32 * Depending on the type it is describing, a 'struct btf_type' 33 * object may be followed by more data. F.e. 34 * To describe an array, 'struct btf_type' is followed by 35 * 'struct btf_array'. 36 * 37 * 'struct btf_type' and any extra data following it are 38 * 4 bytes aligned. 39 * 40 * Type section: 41 * ~~~~~~~~~~~~~ 42 * The BTF type section contains a list of 'struct btf_type' objects. 43 * Each one describes a C type. Recall from the above section 44 * that a 'struct btf_type' object could be immediately followed by extra 45 * data in order to desribe some particular C types. 46 * 47 * type_id: 48 * ~~~~~~~ 49 * Each btf_type object is identified by a type_id. The type_id 50 * is implicitly implied by the location of the btf_type object in 51 * the BTF type section. The first one has type_id 1. The second 52 * one has type_id 2...etc. Hence, an earlier btf_type has 53 * a smaller type_id. 54 * 55 * A btf_type object may refer to another btf_type object by using 56 * type_id (i.e. the "type" in the "struct btf_type"). 57 * 58 * NOTE that we cannot assume any reference-order. 59 * A btf_type object can refer to an earlier btf_type object 60 * but it can also refer to a later btf_type object. 61 * 62 * For example, to describe "const void *". A btf_type 63 * object describing "const" may refer to another btf_type 64 * object describing "void *". This type-reference is done 65 * by specifying type_id: 66 * 67 * [1] CONST (anon) type_id=2 68 * [2] PTR (anon) type_id=0 69 * 70 * The above is the btf_verifier debug log: 71 * - Each line started with "[?]" is a btf_type object 72 * - [?] is the type_id of the btf_type object. 73 * - CONST/PTR is the BTF_KIND_XXX 74 * - "(anon)" is the name of the type. It just 75 * happens that CONST and PTR has no name. 76 * - type_id=XXX is the 'u32 type' in btf_type 77 * 78 * NOTE: "void" has type_id 0 79 * 80 * String section: 81 * ~~~~~~~~~~~~~~ 82 * The BTF string section contains the names used by the type section. 83 * Each string is referred by an "offset" from the beginning of the 84 * string section. 85 * 86 * Each string is '\0' terminated. 87 * 88 * The first character in the string section must be '\0' 89 * which is used to mean 'anonymous'. Some btf_type may not 90 * have a name. 91 */ 92 93 /* BTF verification: 94 * 95 * To verify BTF data, two passes are needed. 96 * 97 * Pass #1 98 * ~~~~~~~ 99 * The first pass is to collect all btf_type objects to 100 * an array: "btf->types". 101 * 102 * Depending on the C type that a btf_type is describing, 103 * a btf_type may be followed by extra data. We don't know 104 * how many btf_type is there, and more importantly we don't 105 * know where each btf_type is located in the type section. 106 * 107 * Without knowing the location of each type_id, most verifications 108 * cannot be done. e.g. an earlier btf_type may refer to a later 109 * btf_type (recall the "const void *" above), so we cannot 110 * check this type-reference in the first pass. 111 * 112 * In the first pass, it still does some verifications (e.g. 113 * checking the name is a valid offset to the string section). 114 * 115 * Pass #2 116 * ~~~~~~~ 117 * The main focus is to resolve a btf_type that is referring 118 * to another type. 119 * 120 * We have to ensure the referring type: 121 * 1) does exist in the BTF (i.e. in btf->types[]) 122 * 2) does not cause a loop: 123 * struct A { 124 * struct B b; 125 * }; 126 * 127 * struct B { 128 * struct A a; 129 * }; 130 * 131 * btf_type_needs_resolve() decides if a btf_type needs 132 * to be resolved. 133 * 134 * The needs_resolve type implements the "resolve()" ops which 135 * essentially does a DFS and detects backedge. 136 * 137 * During resolve (or DFS), different C types have different 138 * "RESOLVED" conditions. 139 * 140 * When resolving a BTF_KIND_STRUCT, we need to resolve all its 141 * members because a member is always referring to another 142 * type. A struct's member can be treated as "RESOLVED" if 143 * it is referring to a BTF_KIND_PTR. Otherwise, the 144 * following valid C struct would be rejected: 145 * 146 * struct A { 147 * int m; 148 * struct A *a; 149 * }; 150 * 151 * When resolving a BTF_KIND_PTR, it needs to keep resolving if 152 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot 153 * detect a pointer loop, e.g.: 154 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR + 155 * ^ | 156 * +-----------------------------------------+ 157 * 158 */ 159 160 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2) 161 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) 162 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) 163 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) 164 #define BITS_ROUNDUP_BYTES(bits) \ 165 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) 166 167 #define BTF_INFO_MASK 0x8f00ffff 168 #define BTF_INT_MASK 0x0fffffff 169 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) 170 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) 171 172 /* 16MB for 64k structs and each has 16 members and 173 * a few MB spaces for the string section. 174 * The hard limit is S32_MAX. 175 */ 176 #define BTF_MAX_SIZE (16 * 1024 * 1024) 177 178 #define for_each_member(i, struct_type, member) \ 179 for (i = 0, member = btf_type_member(struct_type); \ 180 i < btf_type_vlen(struct_type); \ 181 i++, member++) 182 183 #define for_each_member_from(i, from, struct_type, member) \ 184 for (i = from, member = btf_type_member(struct_type) + from; \ 185 i < btf_type_vlen(struct_type); \ 186 i++, member++) 187 188 static DEFINE_IDR(btf_idr); 189 static DEFINE_SPINLOCK(btf_idr_lock); 190 191 struct btf { 192 void *data; 193 struct btf_type **types; 194 u32 *resolved_ids; 195 u32 *resolved_sizes; 196 const char *strings; 197 void *nohdr_data; 198 struct btf_header hdr; 199 u32 nr_types; 200 u32 types_size; 201 u32 data_size; 202 refcount_t refcnt; 203 u32 id; 204 struct rcu_head rcu; 205 }; 206 207 enum verifier_phase { 208 CHECK_META, 209 CHECK_TYPE, 210 }; 211 212 struct resolve_vertex { 213 const struct btf_type *t; 214 u32 type_id; 215 u16 next_member; 216 }; 217 218 enum visit_state { 219 NOT_VISITED, 220 VISITED, 221 RESOLVED, 222 }; 223 224 enum resolve_mode { 225 RESOLVE_TBD, /* To Be Determined */ 226 RESOLVE_PTR, /* Resolving for Pointer */ 227 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union 228 * or array 229 */ 230 }; 231 232 #define MAX_RESOLVE_DEPTH 32 233 234 struct btf_sec_info { 235 u32 off; 236 u32 len; 237 }; 238 239 struct btf_verifier_env { 240 struct btf *btf; 241 u8 *visit_states; 242 struct resolve_vertex stack[MAX_RESOLVE_DEPTH]; 243 struct bpf_verifier_log log; 244 u32 log_type_id; 245 u32 top_stack; 246 enum verifier_phase phase; 247 enum resolve_mode resolve_mode; 248 }; 249 250 static const char * const btf_kind_str[NR_BTF_KINDS] = { 251 [BTF_KIND_UNKN] = "UNKNOWN", 252 [BTF_KIND_INT] = "INT", 253 [BTF_KIND_PTR] = "PTR", 254 [BTF_KIND_ARRAY] = "ARRAY", 255 [BTF_KIND_STRUCT] = "STRUCT", 256 [BTF_KIND_UNION] = "UNION", 257 [BTF_KIND_ENUM] = "ENUM", 258 [BTF_KIND_FWD] = "FWD", 259 [BTF_KIND_TYPEDEF] = "TYPEDEF", 260 [BTF_KIND_VOLATILE] = "VOLATILE", 261 [BTF_KIND_CONST] = "CONST", 262 [BTF_KIND_RESTRICT] = "RESTRICT", 263 [BTF_KIND_FUNC] = "FUNC", 264 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO", 265 }; 266 267 struct btf_kind_operations { 268 s32 (*check_meta)(struct btf_verifier_env *env, 269 const struct btf_type *t, 270 u32 meta_left); 271 int (*resolve)(struct btf_verifier_env *env, 272 const struct resolve_vertex *v); 273 int (*check_member)(struct btf_verifier_env *env, 274 const struct btf_type *struct_type, 275 const struct btf_member *member, 276 const struct btf_type *member_type); 277 int (*check_kflag_member)(struct btf_verifier_env *env, 278 const struct btf_type *struct_type, 279 const struct btf_member *member, 280 const struct btf_type *member_type); 281 void (*log_details)(struct btf_verifier_env *env, 282 const struct btf_type *t); 283 void (*seq_show)(const struct btf *btf, const struct btf_type *t, 284 u32 type_id, void *data, u8 bits_offsets, 285 struct seq_file *m); 286 }; 287 288 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS]; 289 static struct btf_type btf_void; 290 291 static int btf_resolve(struct btf_verifier_env *env, 292 const struct btf_type *t, u32 type_id); 293 294 static bool btf_type_is_modifier(const struct btf_type *t) 295 { 296 /* Some of them is not strictly a C modifier 297 * but they are grouped into the same bucket 298 * for BTF concern: 299 * A type (t) that refers to another 300 * type through t->type AND its size cannot 301 * be determined without following the t->type. 302 * 303 * ptr does not fall into this bucket 304 * because its size is always sizeof(void *). 305 */ 306 switch (BTF_INFO_KIND(t->info)) { 307 case BTF_KIND_TYPEDEF: 308 case BTF_KIND_VOLATILE: 309 case BTF_KIND_CONST: 310 case BTF_KIND_RESTRICT: 311 return true; 312 } 313 314 return false; 315 } 316 317 static bool btf_type_is_void(const struct btf_type *t) 318 { 319 return t == &btf_void; 320 } 321 322 static bool btf_type_is_fwd(const struct btf_type *t) 323 { 324 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD; 325 } 326 327 static bool btf_type_is_func(const struct btf_type *t) 328 { 329 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC; 330 } 331 332 static bool btf_type_is_func_proto(const struct btf_type *t) 333 { 334 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO; 335 } 336 337 static bool btf_type_nosize(const struct btf_type *t) 338 { 339 return btf_type_is_void(t) || btf_type_is_fwd(t) || 340 btf_type_is_func(t) || btf_type_is_func_proto(t); 341 } 342 343 static bool btf_type_nosize_or_null(const struct btf_type *t) 344 { 345 return !t || btf_type_nosize(t); 346 } 347 348 /* union is only a special case of struct: 349 * all its offsetof(member) == 0 350 */ 351 static bool btf_type_is_struct(const struct btf_type *t) 352 { 353 u8 kind = BTF_INFO_KIND(t->info); 354 355 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION; 356 } 357 358 static bool btf_type_is_array(const struct btf_type *t) 359 { 360 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY; 361 } 362 363 static bool btf_type_is_ptr(const struct btf_type *t) 364 { 365 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR; 366 } 367 368 static bool btf_type_is_int(const struct btf_type *t) 369 { 370 return BTF_INFO_KIND(t->info) == BTF_KIND_INT; 371 } 372 373 /* What types need to be resolved? 374 * 375 * btf_type_is_modifier() is an obvious one. 376 * 377 * btf_type_is_struct() because its member refers to 378 * another type (through member->type). 379 380 * btf_type_is_array() because its element (array->type) 381 * refers to another type. Array can be thought of a 382 * special case of struct while array just has the same 383 * member-type repeated by array->nelems of times. 384 */ 385 static bool btf_type_needs_resolve(const struct btf_type *t) 386 { 387 return btf_type_is_modifier(t) || 388 btf_type_is_ptr(t) || 389 btf_type_is_struct(t) || 390 btf_type_is_array(t); 391 } 392 393 /* t->size can be used */ 394 static bool btf_type_has_size(const struct btf_type *t) 395 { 396 switch (BTF_INFO_KIND(t->info)) { 397 case BTF_KIND_INT: 398 case BTF_KIND_STRUCT: 399 case BTF_KIND_UNION: 400 case BTF_KIND_ENUM: 401 return true; 402 } 403 404 return false; 405 } 406 407 static const char *btf_int_encoding_str(u8 encoding) 408 { 409 if (encoding == 0) 410 return "(none)"; 411 else if (encoding == BTF_INT_SIGNED) 412 return "SIGNED"; 413 else if (encoding == BTF_INT_CHAR) 414 return "CHAR"; 415 else if (encoding == BTF_INT_BOOL) 416 return "BOOL"; 417 else 418 return "UNKN"; 419 } 420 421 static u16 btf_type_vlen(const struct btf_type *t) 422 { 423 return BTF_INFO_VLEN(t->info); 424 } 425 426 static bool btf_type_kflag(const struct btf_type *t) 427 { 428 return BTF_INFO_KFLAG(t->info); 429 } 430 431 static u32 btf_member_bit_offset(const struct btf_type *struct_type, 432 const struct btf_member *member) 433 { 434 return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset) 435 : member->offset; 436 } 437 438 static u32 btf_member_bitfield_size(const struct btf_type *struct_type, 439 const struct btf_member *member) 440 { 441 return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset) 442 : 0; 443 } 444 445 static u32 btf_type_int(const struct btf_type *t) 446 { 447 return *(u32 *)(t + 1); 448 } 449 450 static const struct btf_array *btf_type_array(const struct btf_type *t) 451 { 452 return (const struct btf_array *)(t + 1); 453 } 454 455 static const struct btf_member *btf_type_member(const struct btf_type *t) 456 { 457 return (const struct btf_member *)(t + 1); 458 } 459 460 static const struct btf_enum *btf_type_enum(const struct btf_type *t) 461 { 462 return (const struct btf_enum *)(t + 1); 463 } 464 465 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) 466 { 467 return kind_ops[BTF_INFO_KIND(t->info)]; 468 } 469 470 bool btf_name_offset_valid(const struct btf *btf, u32 offset) 471 { 472 return BTF_STR_OFFSET_VALID(offset) && 473 offset < btf->hdr.str_len; 474 } 475 476 /* Only C-style identifier is permitted. This can be relaxed if 477 * necessary. 478 */ 479 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) 480 { 481 /* offset must be valid */ 482 const char *src = &btf->strings[offset]; 483 const char *src_limit; 484 485 if (!isalpha(*src) && *src != '_') 486 return false; 487 488 /* set a limit on identifier length */ 489 src_limit = src + KSYM_NAME_LEN; 490 src++; 491 while (*src && src < src_limit) { 492 if (!isalnum(*src) && *src != '_') 493 return false; 494 src++; 495 } 496 497 return !*src; 498 } 499 500 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset) 501 { 502 if (!offset) 503 return "(anon)"; 504 else if (offset < btf->hdr.str_len) 505 return &btf->strings[offset]; 506 else 507 return "(invalid-name-offset)"; 508 } 509 510 const char *btf_name_by_offset(const struct btf *btf, u32 offset) 511 { 512 if (offset < btf->hdr.str_len) 513 return &btf->strings[offset]; 514 515 return NULL; 516 } 517 518 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) 519 { 520 if (type_id > btf->nr_types) 521 return NULL; 522 523 return btf->types[type_id]; 524 } 525 526 /* 527 * Regular int is not a bit field and it must be either 528 * u8/u16/u32/u64 or __int128. 529 */ 530 static bool btf_type_int_is_regular(const struct btf_type *t) 531 { 532 u8 nr_bits, nr_bytes; 533 u32 int_data; 534 535 int_data = btf_type_int(t); 536 nr_bits = BTF_INT_BITS(int_data); 537 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits); 538 if (BITS_PER_BYTE_MASKED(nr_bits) || 539 BTF_INT_OFFSET(int_data) || 540 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && 541 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) && 542 nr_bytes != (2 * sizeof(u64)))) { 543 return false; 544 } 545 546 return true; 547 } 548 549 /* 550 * Check that given struct member is a regular int with expected 551 * offset and size. 552 */ 553 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, 554 const struct btf_member *m, 555 u32 expected_offset, u32 expected_size) 556 { 557 const struct btf_type *t; 558 u32 id, int_data; 559 u8 nr_bits; 560 561 id = m->type; 562 t = btf_type_id_size(btf, &id, NULL); 563 if (!t || !btf_type_is_int(t)) 564 return false; 565 566 int_data = btf_type_int(t); 567 nr_bits = BTF_INT_BITS(int_data); 568 if (btf_type_kflag(s)) { 569 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset); 570 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset); 571 572 /* if kflag set, int should be a regular int and 573 * bit offset should be at byte boundary. 574 */ 575 return !bitfield_size && 576 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset && 577 BITS_ROUNDUP_BYTES(nr_bits) == expected_size; 578 } 579 580 if (BTF_INT_OFFSET(int_data) || 581 BITS_PER_BYTE_MASKED(m->offset) || 582 BITS_ROUNDUP_BYTES(m->offset) != expected_offset || 583 BITS_PER_BYTE_MASKED(nr_bits) || 584 BITS_ROUNDUP_BYTES(nr_bits) != expected_size) 585 return false; 586 587 return true; 588 } 589 590 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, 591 const char *fmt, ...) 592 { 593 va_list args; 594 595 va_start(args, fmt); 596 bpf_verifier_vlog(log, fmt, args); 597 va_end(args); 598 } 599 600 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env, 601 const char *fmt, ...) 602 { 603 struct bpf_verifier_log *log = &env->log; 604 va_list args; 605 606 if (!bpf_verifier_log_needed(log)) 607 return; 608 609 va_start(args, fmt); 610 bpf_verifier_vlog(log, fmt, args); 611 va_end(args); 612 } 613 614 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, 615 const struct btf_type *t, 616 bool log_details, 617 const char *fmt, ...) 618 { 619 struct bpf_verifier_log *log = &env->log; 620 u8 kind = BTF_INFO_KIND(t->info); 621 struct btf *btf = env->btf; 622 va_list args; 623 624 if (!bpf_verifier_log_needed(log)) 625 return; 626 627 __btf_verifier_log(log, "[%u] %s %s%s", 628 env->log_type_id, 629 btf_kind_str[kind], 630 __btf_name_by_offset(btf, t->name_off), 631 log_details ? " " : ""); 632 633 if (log_details) 634 btf_type_ops(t)->log_details(env, t); 635 636 if (fmt && *fmt) { 637 __btf_verifier_log(log, " "); 638 va_start(args, fmt); 639 bpf_verifier_vlog(log, fmt, args); 640 va_end(args); 641 } 642 643 __btf_verifier_log(log, "\n"); 644 } 645 646 #define btf_verifier_log_type(env, t, ...) \ 647 __btf_verifier_log_type((env), (t), true, __VA_ARGS__) 648 #define btf_verifier_log_basic(env, t, ...) \ 649 __btf_verifier_log_type((env), (t), false, __VA_ARGS__) 650 651 __printf(4, 5) 652 static void btf_verifier_log_member(struct btf_verifier_env *env, 653 const struct btf_type *struct_type, 654 const struct btf_member *member, 655 const char *fmt, ...) 656 { 657 struct bpf_verifier_log *log = &env->log; 658 struct btf *btf = env->btf; 659 va_list args; 660 661 if (!bpf_verifier_log_needed(log)) 662 return; 663 664 /* The CHECK_META phase already did a btf dump. 665 * 666 * If member is logged again, it must hit an error in 667 * parsing this member. It is useful to print out which 668 * struct this member belongs to. 669 */ 670 if (env->phase != CHECK_META) 671 btf_verifier_log_type(env, struct_type, NULL); 672 673 if (btf_type_kflag(struct_type)) 674 __btf_verifier_log(log, 675 "\t%s type_id=%u bitfield_size=%u bits_offset=%u", 676 __btf_name_by_offset(btf, member->name_off), 677 member->type, 678 BTF_MEMBER_BITFIELD_SIZE(member->offset), 679 BTF_MEMBER_BIT_OFFSET(member->offset)); 680 else 681 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u", 682 __btf_name_by_offset(btf, member->name_off), 683 member->type, member->offset); 684 685 if (fmt && *fmt) { 686 __btf_verifier_log(log, " "); 687 va_start(args, fmt); 688 bpf_verifier_vlog(log, fmt, args); 689 va_end(args); 690 } 691 692 __btf_verifier_log(log, "\n"); 693 } 694 695 static void btf_verifier_log_hdr(struct btf_verifier_env *env, 696 u32 btf_data_size) 697 { 698 struct bpf_verifier_log *log = &env->log; 699 const struct btf *btf = env->btf; 700 const struct btf_header *hdr; 701 702 if (!bpf_verifier_log_needed(log)) 703 return; 704 705 hdr = &btf->hdr; 706 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic); 707 __btf_verifier_log(log, "version: %u\n", hdr->version); 708 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags); 709 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len); 710 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off); 711 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len); 712 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off); 713 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len); 714 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size); 715 } 716 717 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) 718 { 719 struct btf *btf = env->btf; 720 721 /* < 2 because +1 for btf_void which is always in btf->types[0]. 722 * btf_void is not accounted in btf->nr_types because btf_void 723 * does not come from the BTF file. 724 */ 725 if (btf->types_size - btf->nr_types < 2) { 726 /* Expand 'types' array */ 727 728 struct btf_type **new_types; 729 u32 expand_by, new_size; 730 731 if (btf->types_size == BTF_MAX_TYPE) { 732 btf_verifier_log(env, "Exceeded max num of types"); 733 return -E2BIG; 734 } 735 736 expand_by = max_t(u32, btf->types_size >> 2, 16); 737 new_size = min_t(u32, BTF_MAX_TYPE, 738 btf->types_size + expand_by); 739 740 new_types = kvcalloc(new_size, sizeof(*new_types), 741 GFP_KERNEL | __GFP_NOWARN); 742 if (!new_types) 743 return -ENOMEM; 744 745 if (btf->nr_types == 0) 746 new_types[0] = &btf_void; 747 else 748 memcpy(new_types, btf->types, 749 sizeof(*btf->types) * (btf->nr_types + 1)); 750 751 kvfree(btf->types); 752 btf->types = new_types; 753 btf->types_size = new_size; 754 } 755 756 btf->types[++(btf->nr_types)] = t; 757 758 return 0; 759 } 760 761 static int btf_alloc_id(struct btf *btf) 762 { 763 int id; 764 765 idr_preload(GFP_KERNEL); 766 spin_lock_bh(&btf_idr_lock); 767 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC); 768 if (id > 0) 769 btf->id = id; 770 spin_unlock_bh(&btf_idr_lock); 771 idr_preload_end(); 772 773 if (WARN_ON_ONCE(!id)) 774 return -ENOSPC; 775 776 return id > 0 ? 0 : id; 777 } 778 779 static void btf_free_id(struct btf *btf) 780 { 781 unsigned long flags; 782 783 /* 784 * In map-in-map, calling map_delete_elem() on outer 785 * map will call bpf_map_put on the inner map. 786 * It will then eventually call btf_free_id() 787 * on the inner map. Some of the map_delete_elem() 788 * implementation may have irq disabled, so 789 * we need to use the _irqsave() version instead 790 * of the _bh() version. 791 */ 792 spin_lock_irqsave(&btf_idr_lock, flags); 793 idr_remove(&btf_idr, btf->id); 794 spin_unlock_irqrestore(&btf_idr_lock, flags); 795 } 796 797 static void btf_free(struct btf *btf) 798 { 799 kvfree(btf->types); 800 kvfree(btf->resolved_sizes); 801 kvfree(btf->resolved_ids); 802 kvfree(btf->data); 803 kfree(btf); 804 } 805 806 static void btf_free_rcu(struct rcu_head *rcu) 807 { 808 struct btf *btf = container_of(rcu, struct btf, rcu); 809 810 btf_free(btf); 811 } 812 813 void btf_put(struct btf *btf) 814 { 815 if (btf && refcount_dec_and_test(&btf->refcnt)) { 816 btf_free_id(btf); 817 call_rcu(&btf->rcu, btf_free_rcu); 818 } 819 } 820 821 static int env_resolve_init(struct btf_verifier_env *env) 822 { 823 struct btf *btf = env->btf; 824 u32 nr_types = btf->nr_types; 825 u32 *resolved_sizes = NULL; 826 u32 *resolved_ids = NULL; 827 u8 *visit_states = NULL; 828 829 /* +1 for btf_void */ 830 resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes), 831 GFP_KERNEL | __GFP_NOWARN); 832 if (!resolved_sizes) 833 goto nomem; 834 835 resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids), 836 GFP_KERNEL | __GFP_NOWARN); 837 if (!resolved_ids) 838 goto nomem; 839 840 visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states), 841 GFP_KERNEL | __GFP_NOWARN); 842 if (!visit_states) 843 goto nomem; 844 845 btf->resolved_sizes = resolved_sizes; 846 btf->resolved_ids = resolved_ids; 847 env->visit_states = visit_states; 848 849 return 0; 850 851 nomem: 852 kvfree(resolved_sizes); 853 kvfree(resolved_ids); 854 kvfree(visit_states); 855 return -ENOMEM; 856 } 857 858 static void btf_verifier_env_free(struct btf_verifier_env *env) 859 { 860 kvfree(env->visit_states); 861 kfree(env); 862 } 863 864 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env, 865 const struct btf_type *next_type) 866 { 867 switch (env->resolve_mode) { 868 case RESOLVE_TBD: 869 /* int, enum or void is a sink */ 870 return !btf_type_needs_resolve(next_type); 871 case RESOLVE_PTR: 872 /* int, enum, void, struct, array, func or func_proto is a sink 873 * for ptr 874 */ 875 return !btf_type_is_modifier(next_type) && 876 !btf_type_is_ptr(next_type); 877 case RESOLVE_STRUCT_OR_ARRAY: 878 /* int, enum, void, ptr, func or func_proto is a sink 879 * for struct and array 880 */ 881 return !btf_type_is_modifier(next_type) && 882 !btf_type_is_array(next_type) && 883 !btf_type_is_struct(next_type); 884 default: 885 BUG(); 886 } 887 } 888 889 static bool env_type_is_resolved(const struct btf_verifier_env *env, 890 u32 type_id) 891 { 892 return env->visit_states[type_id] == RESOLVED; 893 } 894 895 static int env_stack_push(struct btf_verifier_env *env, 896 const struct btf_type *t, u32 type_id) 897 { 898 struct resolve_vertex *v; 899 900 if (env->top_stack == MAX_RESOLVE_DEPTH) 901 return -E2BIG; 902 903 if (env->visit_states[type_id] != NOT_VISITED) 904 return -EEXIST; 905 906 env->visit_states[type_id] = VISITED; 907 908 v = &env->stack[env->top_stack++]; 909 v->t = t; 910 v->type_id = type_id; 911 v->next_member = 0; 912 913 if (env->resolve_mode == RESOLVE_TBD) { 914 if (btf_type_is_ptr(t)) 915 env->resolve_mode = RESOLVE_PTR; 916 else if (btf_type_is_struct(t) || btf_type_is_array(t)) 917 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY; 918 } 919 920 return 0; 921 } 922 923 static void env_stack_set_next_member(struct btf_verifier_env *env, 924 u16 next_member) 925 { 926 env->stack[env->top_stack - 1].next_member = next_member; 927 } 928 929 static void env_stack_pop_resolved(struct btf_verifier_env *env, 930 u32 resolved_type_id, 931 u32 resolved_size) 932 { 933 u32 type_id = env->stack[--(env->top_stack)].type_id; 934 struct btf *btf = env->btf; 935 936 btf->resolved_sizes[type_id] = resolved_size; 937 btf->resolved_ids[type_id] = resolved_type_id; 938 env->visit_states[type_id] = RESOLVED; 939 } 940 941 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) 942 { 943 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; 944 } 945 946 /* The input param "type_id" must point to a needs_resolve type */ 947 static const struct btf_type *btf_type_id_resolve(const struct btf *btf, 948 u32 *type_id) 949 { 950 *type_id = btf->resolved_ids[*type_id]; 951 return btf_type_by_id(btf, *type_id); 952 } 953 954 const struct btf_type *btf_type_id_size(const struct btf *btf, 955 u32 *type_id, u32 *ret_size) 956 { 957 const struct btf_type *size_type; 958 u32 size_type_id = *type_id; 959 u32 size = 0; 960 961 size_type = btf_type_by_id(btf, size_type_id); 962 if (btf_type_nosize_or_null(size_type)) 963 return NULL; 964 965 if (btf_type_has_size(size_type)) { 966 size = size_type->size; 967 } else if (btf_type_is_array(size_type)) { 968 size = btf->resolved_sizes[size_type_id]; 969 } else if (btf_type_is_ptr(size_type)) { 970 size = sizeof(void *); 971 } else { 972 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type))) 973 return NULL; 974 975 size = btf->resolved_sizes[size_type_id]; 976 size_type_id = btf->resolved_ids[size_type_id]; 977 size_type = btf_type_by_id(btf, size_type_id); 978 if (btf_type_nosize_or_null(size_type)) 979 return NULL; 980 } 981 982 *type_id = size_type_id; 983 if (ret_size) 984 *ret_size = size; 985 986 return size_type; 987 } 988 989 static int btf_df_check_member(struct btf_verifier_env *env, 990 const struct btf_type *struct_type, 991 const struct btf_member *member, 992 const struct btf_type *member_type) 993 { 994 btf_verifier_log_basic(env, struct_type, 995 "Unsupported check_member"); 996 return -EINVAL; 997 } 998 999 static int btf_df_check_kflag_member(struct btf_verifier_env *env, 1000 const struct btf_type *struct_type, 1001 const struct btf_member *member, 1002 const struct btf_type *member_type) 1003 { 1004 btf_verifier_log_basic(env, struct_type, 1005 "Unsupported check_kflag_member"); 1006 return -EINVAL; 1007 } 1008 1009 /* Used for ptr, array and struct/union type members. 1010 * int, enum and modifier types have their specific callback functions. 1011 */ 1012 static int btf_generic_check_kflag_member(struct btf_verifier_env *env, 1013 const struct btf_type *struct_type, 1014 const struct btf_member *member, 1015 const struct btf_type *member_type) 1016 { 1017 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) { 1018 btf_verifier_log_member(env, struct_type, member, 1019 "Invalid member bitfield_size"); 1020 return -EINVAL; 1021 } 1022 1023 /* bitfield size is 0, so member->offset represents bit offset only. 1024 * It is safe to call non kflag check_member variants. 1025 */ 1026 return btf_type_ops(member_type)->check_member(env, struct_type, 1027 member, 1028 member_type); 1029 } 1030 1031 static int btf_df_resolve(struct btf_verifier_env *env, 1032 const struct resolve_vertex *v) 1033 { 1034 btf_verifier_log_basic(env, v->t, "Unsupported resolve"); 1035 return -EINVAL; 1036 } 1037 1038 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t, 1039 u32 type_id, void *data, u8 bits_offsets, 1040 struct seq_file *m) 1041 { 1042 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info)); 1043 } 1044 1045 static int btf_int_check_member(struct btf_verifier_env *env, 1046 const struct btf_type *struct_type, 1047 const struct btf_member *member, 1048 const struct btf_type *member_type) 1049 { 1050 u32 int_data = btf_type_int(member_type); 1051 u32 struct_bits_off = member->offset; 1052 u32 struct_size = struct_type->size; 1053 u32 nr_copy_bits; 1054 u32 bytes_offset; 1055 1056 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) { 1057 btf_verifier_log_member(env, struct_type, member, 1058 "bits_offset exceeds U32_MAX"); 1059 return -EINVAL; 1060 } 1061 1062 struct_bits_off += BTF_INT_OFFSET(int_data); 1063 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1064 nr_copy_bits = BTF_INT_BITS(int_data) + 1065 BITS_PER_BYTE_MASKED(struct_bits_off); 1066 1067 if (nr_copy_bits > BITS_PER_U128) { 1068 btf_verifier_log_member(env, struct_type, member, 1069 "nr_copy_bits exceeds 128"); 1070 return -EINVAL; 1071 } 1072 1073 if (struct_size < bytes_offset || 1074 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 1075 btf_verifier_log_member(env, struct_type, member, 1076 "Member exceeds struct_size"); 1077 return -EINVAL; 1078 } 1079 1080 return 0; 1081 } 1082 1083 static int btf_int_check_kflag_member(struct btf_verifier_env *env, 1084 const struct btf_type *struct_type, 1085 const struct btf_member *member, 1086 const struct btf_type *member_type) 1087 { 1088 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset; 1089 u32 int_data = btf_type_int(member_type); 1090 u32 struct_size = struct_type->size; 1091 u32 nr_copy_bits; 1092 1093 /* a regular int type is required for the kflag int member */ 1094 if (!btf_type_int_is_regular(member_type)) { 1095 btf_verifier_log_member(env, struct_type, member, 1096 "Invalid member base type"); 1097 return -EINVAL; 1098 } 1099 1100 /* check sanity of bitfield size */ 1101 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 1102 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 1103 nr_int_data_bits = BTF_INT_BITS(int_data); 1104 if (!nr_bits) { 1105 /* Not a bitfield member, member offset must be at byte 1106 * boundary. 1107 */ 1108 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1109 btf_verifier_log_member(env, struct_type, member, 1110 "Invalid member offset"); 1111 return -EINVAL; 1112 } 1113 1114 nr_bits = nr_int_data_bits; 1115 } else if (nr_bits > nr_int_data_bits) { 1116 btf_verifier_log_member(env, struct_type, member, 1117 "Invalid member bitfield_size"); 1118 return -EINVAL; 1119 } 1120 1121 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1122 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off); 1123 if (nr_copy_bits > BITS_PER_U128) { 1124 btf_verifier_log_member(env, struct_type, member, 1125 "nr_copy_bits exceeds 128"); 1126 return -EINVAL; 1127 } 1128 1129 if (struct_size < bytes_offset || 1130 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 1131 btf_verifier_log_member(env, struct_type, member, 1132 "Member exceeds struct_size"); 1133 return -EINVAL; 1134 } 1135 1136 return 0; 1137 } 1138 1139 static s32 btf_int_check_meta(struct btf_verifier_env *env, 1140 const struct btf_type *t, 1141 u32 meta_left) 1142 { 1143 u32 int_data, nr_bits, meta_needed = sizeof(int_data); 1144 u16 encoding; 1145 1146 if (meta_left < meta_needed) { 1147 btf_verifier_log_basic(env, t, 1148 "meta_left:%u meta_needed:%u", 1149 meta_left, meta_needed); 1150 return -EINVAL; 1151 } 1152 1153 if (btf_type_vlen(t)) { 1154 btf_verifier_log_type(env, t, "vlen != 0"); 1155 return -EINVAL; 1156 } 1157 1158 if (btf_type_kflag(t)) { 1159 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 1160 return -EINVAL; 1161 } 1162 1163 int_data = btf_type_int(t); 1164 if (int_data & ~BTF_INT_MASK) { 1165 btf_verifier_log_basic(env, t, "Invalid int_data:%x", 1166 int_data); 1167 return -EINVAL; 1168 } 1169 1170 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); 1171 1172 if (nr_bits > BITS_PER_U128) { 1173 btf_verifier_log_type(env, t, "nr_bits exceeds %zu", 1174 BITS_PER_U128); 1175 return -EINVAL; 1176 } 1177 1178 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) { 1179 btf_verifier_log_type(env, t, "nr_bits exceeds type_size"); 1180 return -EINVAL; 1181 } 1182 1183 /* 1184 * Only one of the encoding bits is allowed and it 1185 * should be sufficient for the pretty print purpose (i.e. decoding). 1186 * Multiple bits can be allowed later if it is found 1187 * to be insufficient. 1188 */ 1189 encoding = BTF_INT_ENCODING(int_data); 1190 if (encoding && 1191 encoding != BTF_INT_SIGNED && 1192 encoding != BTF_INT_CHAR && 1193 encoding != BTF_INT_BOOL) { 1194 btf_verifier_log_type(env, t, "Unsupported encoding"); 1195 return -ENOTSUPP; 1196 } 1197 1198 btf_verifier_log_type(env, t, NULL); 1199 1200 return meta_needed; 1201 } 1202 1203 static void btf_int_log(struct btf_verifier_env *env, 1204 const struct btf_type *t) 1205 { 1206 int int_data = btf_type_int(t); 1207 1208 btf_verifier_log(env, 1209 "size=%u bits_offset=%u nr_bits=%u encoding=%s", 1210 t->size, BTF_INT_OFFSET(int_data), 1211 BTF_INT_BITS(int_data), 1212 btf_int_encoding_str(BTF_INT_ENCODING(int_data))); 1213 } 1214 1215 static void btf_int128_print(struct seq_file *m, void *data) 1216 { 1217 /* data points to a __int128 number. 1218 * Suppose 1219 * int128_num = *(__int128 *)data; 1220 * The below formulas shows what upper_num and lower_num represents: 1221 * upper_num = int128_num >> 64; 1222 * lower_num = int128_num & 0xffffffffFFFFFFFFULL; 1223 */ 1224 u64 upper_num, lower_num; 1225 1226 #ifdef __BIG_ENDIAN_BITFIELD 1227 upper_num = *(u64 *)data; 1228 lower_num = *(u64 *)(data + 8); 1229 #else 1230 upper_num = *(u64 *)(data + 8); 1231 lower_num = *(u64 *)data; 1232 #endif 1233 if (upper_num == 0) 1234 seq_printf(m, "0x%llx", lower_num); 1235 else 1236 seq_printf(m, "0x%llx%016llx", upper_num, lower_num); 1237 } 1238 1239 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits, 1240 u16 right_shift_bits) 1241 { 1242 u64 upper_num, lower_num; 1243 1244 #ifdef __BIG_ENDIAN_BITFIELD 1245 upper_num = print_num[0]; 1246 lower_num = print_num[1]; 1247 #else 1248 upper_num = print_num[1]; 1249 lower_num = print_num[0]; 1250 #endif 1251 1252 /* shake out un-needed bits by shift/or operations */ 1253 if (left_shift_bits >= 64) { 1254 upper_num = lower_num << (left_shift_bits - 64); 1255 lower_num = 0; 1256 } else { 1257 upper_num = (upper_num << left_shift_bits) | 1258 (lower_num >> (64 - left_shift_bits)); 1259 lower_num = lower_num << left_shift_bits; 1260 } 1261 1262 if (right_shift_bits >= 64) { 1263 lower_num = upper_num >> (right_shift_bits - 64); 1264 upper_num = 0; 1265 } else { 1266 lower_num = (lower_num >> right_shift_bits) | 1267 (upper_num << (64 - right_shift_bits)); 1268 upper_num = upper_num >> right_shift_bits; 1269 } 1270 1271 #ifdef __BIG_ENDIAN_BITFIELD 1272 print_num[0] = upper_num; 1273 print_num[1] = lower_num; 1274 #else 1275 print_num[0] = lower_num; 1276 print_num[1] = upper_num; 1277 #endif 1278 } 1279 1280 static void btf_bitfield_seq_show(void *data, u8 bits_offset, 1281 u8 nr_bits, struct seq_file *m) 1282 { 1283 u16 left_shift_bits, right_shift_bits; 1284 u8 nr_copy_bytes; 1285 u8 nr_copy_bits; 1286 u64 print_num[2] = {}; 1287 1288 nr_copy_bits = nr_bits + bits_offset; 1289 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 1290 1291 memcpy(print_num, data, nr_copy_bytes); 1292 1293 #ifdef __BIG_ENDIAN_BITFIELD 1294 left_shift_bits = bits_offset; 1295 #else 1296 left_shift_bits = BITS_PER_U128 - nr_copy_bits; 1297 #endif 1298 right_shift_bits = BITS_PER_U128 - nr_bits; 1299 1300 btf_int128_shift(print_num, left_shift_bits, right_shift_bits); 1301 btf_int128_print(m, print_num); 1302 } 1303 1304 1305 static void btf_int_bits_seq_show(const struct btf *btf, 1306 const struct btf_type *t, 1307 void *data, u8 bits_offset, 1308 struct seq_file *m) 1309 { 1310 u32 int_data = btf_type_int(t); 1311 u8 nr_bits = BTF_INT_BITS(int_data); 1312 u8 total_bits_offset; 1313 1314 /* 1315 * bits_offset is at most 7. 1316 * BTF_INT_OFFSET() cannot exceed 128 bits. 1317 */ 1318 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 1319 data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 1320 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); 1321 btf_bitfield_seq_show(data, bits_offset, nr_bits, m); 1322 } 1323 1324 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, 1325 u32 type_id, void *data, u8 bits_offset, 1326 struct seq_file *m) 1327 { 1328 u32 int_data = btf_type_int(t); 1329 u8 encoding = BTF_INT_ENCODING(int_data); 1330 bool sign = encoding & BTF_INT_SIGNED; 1331 u8 nr_bits = BTF_INT_BITS(int_data); 1332 1333 if (bits_offset || BTF_INT_OFFSET(int_data) || 1334 BITS_PER_BYTE_MASKED(nr_bits)) { 1335 btf_int_bits_seq_show(btf, t, data, bits_offset, m); 1336 return; 1337 } 1338 1339 switch (nr_bits) { 1340 case 128: 1341 btf_int128_print(m, data); 1342 break; 1343 case 64: 1344 if (sign) 1345 seq_printf(m, "%lld", *(s64 *)data); 1346 else 1347 seq_printf(m, "%llu", *(u64 *)data); 1348 break; 1349 case 32: 1350 if (sign) 1351 seq_printf(m, "%d", *(s32 *)data); 1352 else 1353 seq_printf(m, "%u", *(u32 *)data); 1354 break; 1355 case 16: 1356 if (sign) 1357 seq_printf(m, "%d", *(s16 *)data); 1358 else 1359 seq_printf(m, "%u", *(u16 *)data); 1360 break; 1361 case 8: 1362 if (sign) 1363 seq_printf(m, "%d", *(s8 *)data); 1364 else 1365 seq_printf(m, "%u", *(u8 *)data); 1366 break; 1367 default: 1368 btf_int_bits_seq_show(btf, t, data, bits_offset, m); 1369 } 1370 } 1371 1372 static const struct btf_kind_operations int_ops = { 1373 .check_meta = btf_int_check_meta, 1374 .resolve = btf_df_resolve, 1375 .check_member = btf_int_check_member, 1376 .check_kflag_member = btf_int_check_kflag_member, 1377 .log_details = btf_int_log, 1378 .seq_show = btf_int_seq_show, 1379 }; 1380 1381 static int btf_modifier_check_member(struct btf_verifier_env *env, 1382 const struct btf_type *struct_type, 1383 const struct btf_member *member, 1384 const struct btf_type *member_type) 1385 { 1386 const struct btf_type *resolved_type; 1387 u32 resolved_type_id = member->type; 1388 struct btf_member resolved_member; 1389 struct btf *btf = env->btf; 1390 1391 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 1392 if (!resolved_type) { 1393 btf_verifier_log_member(env, struct_type, member, 1394 "Invalid member"); 1395 return -EINVAL; 1396 } 1397 1398 resolved_member = *member; 1399 resolved_member.type = resolved_type_id; 1400 1401 return btf_type_ops(resolved_type)->check_member(env, struct_type, 1402 &resolved_member, 1403 resolved_type); 1404 } 1405 1406 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env, 1407 const struct btf_type *struct_type, 1408 const struct btf_member *member, 1409 const struct btf_type *member_type) 1410 { 1411 const struct btf_type *resolved_type; 1412 u32 resolved_type_id = member->type; 1413 struct btf_member resolved_member; 1414 struct btf *btf = env->btf; 1415 1416 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 1417 if (!resolved_type) { 1418 btf_verifier_log_member(env, struct_type, member, 1419 "Invalid member"); 1420 return -EINVAL; 1421 } 1422 1423 resolved_member = *member; 1424 resolved_member.type = resolved_type_id; 1425 1426 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type, 1427 &resolved_member, 1428 resolved_type); 1429 } 1430 1431 static int btf_ptr_check_member(struct btf_verifier_env *env, 1432 const struct btf_type *struct_type, 1433 const struct btf_member *member, 1434 const struct btf_type *member_type) 1435 { 1436 u32 struct_size, struct_bits_off, bytes_offset; 1437 1438 struct_size = struct_type->size; 1439 struct_bits_off = member->offset; 1440 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1441 1442 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1443 btf_verifier_log_member(env, struct_type, member, 1444 "Member is not byte aligned"); 1445 return -EINVAL; 1446 } 1447 1448 if (struct_size - bytes_offset < sizeof(void *)) { 1449 btf_verifier_log_member(env, struct_type, member, 1450 "Member exceeds struct_size"); 1451 return -EINVAL; 1452 } 1453 1454 return 0; 1455 } 1456 1457 static int btf_ref_type_check_meta(struct btf_verifier_env *env, 1458 const struct btf_type *t, 1459 u32 meta_left) 1460 { 1461 if (btf_type_vlen(t)) { 1462 btf_verifier_log_type(env, t, "vlen != 0"); 1463 return -EINVAL; 1464 } 1465 1466 if (btf_type_kflag(t)) { 1467 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 1468 return -EINVAL; 1469 } 1470 1471 if (!BTF_TYPE_ID_VALID(t->type)) { 1472 btf_verifier_log_type(env, t, "Invalid type_id"); 1473 return -EINVAL; 1474 } 1475 1476 /* typedef type must have a valid name, and other ref types, 1477 * volatile, const, restrict, should have a null name. 1478 */ 1479 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { 1480 if (!t->name_off || 1481 !btf_name_valid_identifier(env->btf, t->name_off)) { 1482 btf_verifier_log_type(env, t, "Invalid name"); 1483 return -EINVAL; 1484 } 1485 } else { 1486 if (t->name_off) { 1487 btf_verifier_log_type(env, t, "Invalid name"); 1488 return -EINVAL; 1489 } 1490 } 1491 1492 btf_verifier_log_type(env, t, NULL); 1493 1494 return 0; 1495 } 1496 1497 static int btf_modifier_resolve(struct btf_verifier_env *env, 1498 const struct resolve_vertex *v) 1499 { 1500 const struct btf_type *t = v->t; 1501 const struct btf_type *next_type; 1502 u32 next_type_id = t->type; 1503 struct btf *btf = env->btf; 1504 u32 next_type_size = 0; 1505 1506 next_type = btf_type_by_id(btf, next_type_id); 1507 if (!next_type) { 1508 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1509 return -EINVAL; 1510 } 1511 1512 if (!env_type_is_resolve_sink(env, next_type) && 1513 !env_type_is_resolved(env, next_type_id)) 1514 return env_stack_push(env, next_type, next_type_id); 1515 1516 /* Figure out the resolved next_type_id with size. 1517 * They will be stored in the current modifier's 1518 * resolved_ids and resolved_sizes such that it can 1519 * save us a few type-following when we use it later (e.g. in 1520 * pretty print). 1521 */ 1522 if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) { 1523 if (env_type_is_resolved(env, next_type_id)) 1524 next_type = btf_type_id_resolve(btf, &next_type_id); 1525 1526 /* "typedef void new_void", "const void"...etc */ 1527 if (!btf_type_is_void(next_type) && 1528 !btf_type_is_fwd(next_type)) { 1529 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1530 return -EINVAL; 1531 } 1532 } 1533 1534 env_stack_pop_resolved(env, next_type_id, next_type_size); 1535 1536 return 0; 1537 } 1538 1539 static int btf_ptr_resolve(struct btf_verifier_env *env, 1540 const struct resolve_vertex *v) 1541 { 1542 const struct btf_type *next_type; 1543 const struct btf_type *t = v->t; 1544 u32 next_type_id = t->type; 1545 struct btf *btf = env->btf; 1546 1547 next_type = btf_type_by_id(btf, next_type_id); 1548 if (!next_type) { 1549 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1550 return -EINVAL; 1551 } 1552 1553 if (!env_type_is_resolve_sink(env, next_type) && 1554 !env_type_is_resolved(env, next_type_id)) 1555 return env_stack_push(env, next_type, next_type_id); 1556 1557 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY, 1558 * the modifier may have stopped resolving when it was resolved 1559 * to a ptr (last-resolved-ptr). 1560 * 1561 * We now need to continue from the last-resolved-ptr to 1562 * ensure the last-resolved-ptr will not referring back to 1563 * the currenct ptr (t). 1564 */ 1565 if (btf_type_is_modifier(next_type)) { 1566 const struct btf_type *resolved_type; 1567 u32 resolved_type_id; 1568 1569 resolved_type_id = next_type_id; 1570 resolved_type = btf_type_id_resolve(btf, &resolved_type_id); 1571 1572 if (btf_type_is_ptr(resolved_type) && 1573 !env_type_is_resolve_sink(env, resolved_type) && 1574 !env_type_is_resolved(env, resolved_type_id)) 1575 return env_stack_push(env, resolved_type, 1576 resolved_type_id); 1577 } 1578 1579 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 1580 if (env_type_is_resolved(env, next_type_id)) 1581 next_type = btf_type_id_resolve(btf, &next_type_id); 1582 1583 if (!btf_type_is_void(next_type) && 1584 !btf_type_is_fwd(next_type) && 1585 !btf_type_is_func_proto(next_type)) { 1586 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1587 return -EINVAL; 1588 } 1589 } 1590 1591 env_stack_pop_resolved(env, next_type_id, 0); 1592 1593 return 0; 1594 } 1595 1596 static void btf_modifier_seq_show(const struct btf *btf, 1597 const struct btf_type *t, 1598 u32 type_id, void *data, 1599 u8 bits_offset, struct seq_file *m) 1600 { 1601 t = btf_type_id_resolve(btf, &type_id); 1602 1603 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m); 1604 } 1605 1606 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t, 1607 u32 type_id, void *data, u8 bits_offset, 1608 struct seq_file *m) 1609 { 1610 /* It is a hashed value */ 1611 seq_printf(m, "%p", *(void **)data); 1612 } 1613 1614 static void btf_ref_type_log(struct btf_verifier_env *env, 1615 const struct btf_type *t) 1616 { 1617 btf_verifier_log(env, "type_id=%u", t->type); 1618 } 1619 1620 static struct btf_kind_operations modifier_ops = { 1621 .check_meta = btf_ref_type_check_meta, 1622 .resolve = btf_modifier_resolve, 1623 .check_member = btf_modifier_check_member, 1624 .check_kflag_member = btf_modifier_check_kflag_member, 1625 .log_details = btf_ref_type_log, 1626 .seq_show = btf_modifier_seq_show, 1627 }; 1628 1629 static struct btf_kind_operations ptr_ops = { 1630 .check_meta = btf_ref_type_check_meta, 1631 .resolve = btf_ptr_resolve, 1632 .check_member = btf_ptr_check_member, 1633 .check_kflag_member = btf_generic_check_kflag_member, 1634 .log_details = btf_ref_type_log, 1635 .seq_show = btf_ptr_seq_show, 1636 }; 1637 1638 static s32 btf_fwd_check_meta(struct btf_verifier_env *env, 1639 const struct btf_type *t, 1640 u32 meta_left) 1641 { 1642 if (btf_type_vlen(t)) { 1643 btf_verifier_log_type(env, t, "vlen != 0"); 1644 return -EINVAL; 1645 } 1646 1647 if (t->type) { 1648 btf_verifier_log_type(env, t, "type != 0"); 1649 return -EINVAL; 1650 } 1651 1652 /* fwd type must have a valid name */ 1653 if (!t->name_off || 1654 !btf_name_valid_identifier(env->btf, t->name_off)) { 1655 btf_verifier_log_type(env, t, "Invalid name"); 1656 return -EINVAL; 1657 } 1658 1659 btf_verifier_log_type(env, t, NULL); 1660 1661 return 0; 1662 } 1663 1664 static void btf_fwd_type_log(struct btf_verifier_env *env, 1665 const struct btf_type *t) 1666 { 1667 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct"); 1668 } 1669 1670 static struct btf_kind_operations fwd_ops = { 1671 .check_meta = btf_fwd_check_meta, 1672 .resolve = btf_df_resolve, 1673 .check_member = btf_df_check_member, 1674 .check_kflag_member = btf_df_check_kflag_member, 1675 .log_details = btf_fwd_type_log, 1676 .seq_show = btf_df_seq_show, 1677 }; 1678 1679 static int btf_array_check_member(struct btf_verifier_env *env, 1680 const struct btf_type *struct_type, 1681 const struct btf_member *member, 1682 const struct btf_type *member_type) 1683 { 1684 u32 struct_bits_off = member->offset; 1685 u32 struct_size, bytes_offset; 1686 u32 array_type_id, array_size; 1687 struct btf *btf = env->btf; 1688 1689 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1690 btf_verifier_log_member(env, struct_type, member, 1691 "Member is not byte aligned"); 1692 return -EINVAL; 1693 } 1694 1695 array_type_id = member->type; 1696 btf_type_id_size(btf, &array_type_id, &array_size); 1697 struct_size = struct_type->size; 1698 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1699 if (struct_size - bytes_offset < array_size) { 1700 btf_verifier_log_member(env, struct_type, member, 1701 "Member exceeds struct_size"); 1702 return -EINVAL; 1703 } 1704 1705 return 0; 1706 } 1707 1708 static s32 btf_array_check_meta(struct btf_verifier_env *env, 1709 const struct btf_type *t, 1710 u32 meta_left) 1711 { 1712 const struct btf_array *array = btf_type_array(t); 1713 u32 meta_needed = sizeof(*array); 1714 1715 if (meta_left < meta_needed) { 1716 btf_verifier_log_basic(env, t, 1717 "meta_left:%u meta_needed:%u", 1718 meta_left, meta_needed); 1719 return -EINVAL; 1720 } 1721 1722 /* array type should not have a name */ 1723 if (t->name_off) { 1724 btf_verifier_log_type(env, t, "Invalid name"); 1725 return -EINVAL; 1726 } 1727 1728 if (btf_type_vlen(t)) { 1729 btf_verifier_log_type(env, t, "vlen != 0"); 1730 return -EINVAL; 1731 } 1732 1733 if (btf_type_kflag(t)) { 1734 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 1735 return -EINVAL; 1736 } 1737 1738 if (t->size) { 1739 btf_verifier_log_type(env, t, "size != 0"); 1740 return -EINVAL; 1741 } 1742 1743 /* Array elem type and index type cannot be in type void, 1744 * so !array->type and !array->index_type are not allowed. 1745 */ 1746 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { 1747 btf_verifier_log_type(env, t, "Invalid elem"); 1748 return -EINVAL; 1749 } 1750 1751 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { 1752 btf_verifier_log_type(env, t, "Invalid index"); 1753 return -EINVAL; 1754 } 1755 1756 btf_verifier_log_type(env, t, NULL); 1757 1758 return meta_needed; 1759 } 1760 1761 static int btf_array_resolve(struct btf_verifier_env *env, 1762 const struct resolve_vertex *v) 1763 { 1764 const struct btf_array *array = btf_type_array(v->t); 1765 const struct btf_type *elem_type, *index_type; 1766 u32 elem_type_id, index_type_id; 1767 struct btf *btf = env->btf; 1768 u32 elem_size; 1769 1770 /* Check array->index_type */ 1771 index_type_id = array->index_type; 1772 index_type = btf_type_by_id(btf, index_type_id); 1773 if (btf_type_nosize_or_null(index_type)) { 1774 btf_verifier_log_type(env, v->t, "Invalid index"); 1775 return -EINVAL; 1776 } 1777 1778 if (!env_type_is_resolve_sink(env, index_type) && 1779 !env_type_is_resolved(env, index_type_id)) 1780 return env_stack_push(env, index_type, index_type_id); 1781 1782 index_type = btf_type_id_size(btf, &index_type_id, NULL); 1783 if (!index_type || !btf_type_is_int(index_type) || 1784 !btf_type_int_is_regular(index_type)) { 1785 btf_verifier_log_type(env, v->t, "Invalid index"); 1786 return -EINVAL; 1787 } 1788 1789 /* Check array->type */ 1790 elem_type_id = array->type; 1791 elem_type = btf_type_by_id(btf, elem_type_id); 1792 if (btf_type_nosize_or_null(elem_type)) { 1793 btf_verifier_log_type(env, v->t, 1794 "Invalid elem"); 1795 return -EINVAL; 1796 } 1797 1798 if (!env_type_is_resolve_sink(env, elem_type) && 1799 !env_type_is_resolved(env, elem_type_id)) 1800 return env_stack_push(env, elem_type, elem_type_id); 1801 1802 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 1803 if (!elem_type) { 1804 btf_verifier_log_type(env, v->t, "Invalid elem"); 1805 return -EINVAL; 1806 } 1807 1808 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) { 1809 btf_verifier_log_type(env, v->t, "Invalid array of int"); 1810 return -EINVAL; 1811 } 1812 1813 if (array->nelems && elem_size > U32_MAX / array->nelems) { 1814 btf_verifier_log_type(env, v->t, 1815 "Array size overflows U32_MAX"); 1816 return -EINVAL; 1817 } 1818 1819 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems); 1820 1821 return 0; 1822 } 1823 1824 static void btf_array_log(struct btf_verifier_env *env, 1825 const struct btf_type *t) 1826 { 1827 const struct btf_array *array = btf_type_array(t); 1828 1829 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u", 1830 array->type, array->index_type, array->nelems); 1831 } 1832 1833 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t, 1834 u32 type_id, void *data, u8 bits_offset, 1835 struct seq_file *m) 1836 { 1837 const struct btf_array *array = btf_type_array(t); 1838 const struct btf_kind_operations *elem_ops; 1839 const struct btf_type *elem_type; 1840 u32 i, elem_size, elem_type_id; 1841 1842 elem_type_id = array->type; 1843 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 1844 elem_ops = btf_type_ops(elem_type); 1845 seq_puts(m, "["); 1846 for (i = 0; i < array->nelems; i++) { 1847 if (i) 1848 seq_puts(m, ","); 1849 1850 elem_ops->seq_show(btf, elem_type, elem_type_id, data, 1851 bits_offset, m); 1852 data += elem_size; 1853 } 1854 seq_puts(m, "]"); 1855 } 1856 1857 static struct btf_kind_operations array_ops = { 1858 .check_meta = btf_array_check_meta, 1859 .resolve = btf_array_resolve, 1860 .check_member = btf_array_check_member, 1861 .check_kflag_member = btf_generic_check_kflag_member, 1862 .log_details = btf_array_log, 1863 .seq_show = btf_array_seq_show, 1864 }; 1865 1866 static int btf_struct_check_member(struct btf_verifier_env *env, 1867 const struct btf_type *struct_type, 1868 const struct btf_member *member, 1869 const struct btf_type *member_type) 1870 { 1871 u32 struct_bits_off = member->offset; 1872 u32 struct_size, bytes_offset; 1873 1874 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1875 btf_verifier_log_member(env, struct_type, member, 1876 "Member is not byte aligned"); 1877 return -EINVAL; 1878 } 1879 1880 struct_size = struct_type->size; 1881 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1882 if (struct_size - bytes_offset < member_type->size) { 1883 btf_verifier_log_member(env, struct_type, member, 1884 "Member exceeds struct_size"); 1885 return -EINVAL; 1886 } 1887 1888 return 0; 1889 } 1890 1891 static s32 btf_struct_check_meta(struct btf_verifier_env *env, 1892 const struct btf_type *t, 1893 u32 meta_left) 1894 { 1895 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; 1896 const struct btf_member *member; 1897 u32 meta_needed, last_offset; 1898 struct btf *btf = env->btf; 1899 u32 struct_size = t->size; 1900 u32 offset; 1901 u16 i; 1902 1903 meta_needed = btf_type_vlen(t) * sizeof(*member); 1904 if (meta_left < meta_needed) { 1905 btf_verifier_log_basic(env, t, 1906 "meta_left:%u meta_needed:%u", 1907 meta_left, meta_needed); 1908 return -EINVAL; 1909 } 1910 1911 /* struct type either no name or a valid one */ 1912 if (t->name_off && 1913 !btf_name_valid_identifier(env->btf, t->name_off)) { 1914 btf_verifier_log_type(env, t, "Invalid name"); 1915 return -EINVAL; 1916 } 1917 1918 btf_verifier_log_type(env, t, NULL); 1919 1920 last_offset = 0; 1921 for_each_member(i, t, member) { 1922 if (!btf_name_offset_valid(btf, member->name_off)) { 1923 btf_verifier_log_member(env, t, member, 1924 "Invalid member name_offset:%u", 1925 member->name_off); 1926 return -EINVAL; 1927 } 1928 1929 /* struct member either no name or a valid one */ 1930 if (member->name_off && 1931 !btf_name_valid_identifier(btf, member->name_off)) { 1932 btf_verifier_log_member(env, t, member, "Invalid name"); 1933 return -EINVAL; 1934 } 1935 /* A member cannot be in type void */ 1936 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { 1937 btf_verifier_log_member(env, t, member, 1938 "Invalid type_id"); 1939 return -EINVAL; 1940 } 1941 1942 offset = btf_member_bit_offset(t, member); 1943 if (is_union && offset) { 1944 btf_verifier_log_member(env, t, member, 1945 "Invalid member bits_offset"); 1946 return -EINVAL; 1947 } 1948 1949 /* 1950 * ">" instead of ">=" because the last member could be 1951 * "char a[0];" 1952 */ 1953 if (last_offset > offset) { 1954 btf_verifier_log_member(env, t, member, 1955 "Invalid member bits_offset"); 1956 return -EINVAL; 1957 } 1958 1959 if (BITS_ROUNDUP_BYTES(offset) > struct_size) { 1960 btf_verifier_log_member(env, t, member, 1961 "Member bits_offset exceeds its struct size"); 1962 return -EINVAL; 1963 } 1964 1965 btf_verifier_log_member(env, t, member, NULL); 1966 last_offset = offset; 1967 } 1968 1969 return meta_needed; 1970 } 1971 1972 static int btf_struct_resolve(struct btf_verifier_env *env, 1973 const struct resolve_vertex *v) 1974 { 1975 const struct btf_member *member; 1976 int err; 1977 u16 i; 1978 1979 /* Before continue resolving the next_member, 1980 * ensure the last member is indeed resolved to a 1981 * type with size info. 1982 */ 1983 if (v->next_member) { 1984 const struct btf_type *last_member_type; 1985 const struct btf_member *last_member; 1986 u16 last_member_type_id; 1987 1988 last_member = btf_type_member(v->t) + v->next_member - 1; 1989 last_member_type_id = last_member->type; 1990 if (WARN_ON_ONCE(!env_type_is_resolved(env, 1991 last_member_type_id))) 1992 return -EINVAL; 1993 1994 last_member_type = btf_type_by_id(env->btf, 1995 last_member_type_id); 1996 if (btf_type_kflag(v->t)) 1997 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t, 1998 last_member, 1999 last_member_type); 2000 else 2001 err = btf_type_ops(last_member_type)->check_member(env, v->t, 2002 last_member, 2003 last_member_type); 2004 if (err) 2005 return err; 2006 } 2007 2008 for_each_member_from(i, v->next_member, v->t, member) { 2009 u32 member_type_id = member->type; 2010 const struct btf_type *member_type = btf_type_by_id(env->btf, 2011 member_type_id); 2012 2013 if (btf_type_nosize_or_null(member_type)) { 2014 btf_verifier_log_member(env, v->t, member, 2015 "Invalid member"); 2016 return -EINVAL; 2017 } 2018 2019 if (!env_type_is_resolve_sink(env, member_type) && 2020 !env_type_is_resolved(env, member_type_id)) { 2021 env_stack_set_next_member(env, i + 1); 2022 return env_stack_push(env, member_type, member_type_id); 2023 } 2024 2025 if (btf_type_kflag(v->t)) 2026 err = btf_type_ops(member_type)->check_kflag_member(env, v->t, 2027 member, 2028 member_type); 2029 else 2030 err = btf_type_ops(member_type)->check_member(env, v->t, 2031 member, 2032 member_type); 2033 if (err) 2034 return err; 2035 } 2036 2037 env_stack_pop_resolved(env, 0, 0); 2038 2039 return 0; 2040 } 2041 2042 static void btf_struct_log(struct btf_verifier_env *env, 2043 const struct btf_type *t) 2044 { 2045 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 2046 } 2047 2048 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t, 2049 u32 type_id, void *data, u8 bits_offset, 2050 struct seq_file *m) 2051 { 2052 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ","; 2053 const struct btf_member *member; 2054 u32 i; 2055 2056 seq_puts(m, "{"); 2057 for_each_member(i, t, member) { 2058 const struct btf_type *member_type = btf_type_by_id(btf, 2059 member->type); 2060 const struct btf_kind_operations *ops; 2061 u32 member_offset, bitfield_size; 2062 u32 bytes_offset; 2063 u8 bits8_offset; 2064 2065 if (i) 2066 seq_puts(m, seq); 2067 2068 member_offset = btf_member_bit_offset(t, member); 2069 bitfield_size = btf_member_bitfield_size(t, member); 2070 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); 2071 bits8_offset = BITS_PER_BYTE_MASKED(member_offset); 2072 if (bitfield_size) { 2073 btf_bitfield_seq_show(data + bytes_offset, bits8_offset, 2074 bitfield_size, m); 2075 } else { 2076 ops = btf_type_ops(member_type); 2077 ops->seq_show(btf, member_type, member->type, 2078 data + bytes_offset, bits8_offset, m); 2079 } 2080 } 2081 seq_puts(m, "}"); 2082 } 2083 2084 static struct btf_kind_operations struct_ops = { 2085 .check_meta = btf_struct_check_meta, 2086 .resolve = btf_struct_resolve, 2087 .check_member = btf_struct_check_member, 2088 .check_kflag_member = btf_generic_check_kflag_member, 2089 .log_details = btf_struct_log, 2090 .seq_show = btf_struct_seq_show, 2091 }; 2092 2093 static int btf_enum_check_member(struct btf_verifier_env *env, 2094 const struct btf_type *struct_type, 2095 const struct btf_member *member, 2096 const struct btf_type *member_type) 2097 { 2098 u32 struct_bits_off = member->offset; 2099 u32 struct_size, bytes_offset; 2100 2101 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2102 btf_verifier_log_member(env, struct_type, member, 2103 "Member is not byte aligned"); 2104 return -EINVAL; 2105 } 2106 2107 struct_size = struct_type->size; 2108 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2109 if (struct_size - bytes_offset < sizeof(int)) { 2110 btf_verifier_log_member(env, struct_type, member, 2111 "Member exceeds struct_size"); 2112 return -EINVAL; 2113 } 2114 2115 return 0; 2116 } 2117 2118 static int btf_enum_check_kflag_member(struct btf_verifier_env *env, 2119 const struct btf_type *struct_type, 2120 const struct btf_member *member, 2121 const struct btf_type *member_type) 2122 { 2123 u32 struct_bits_off, nr_bits, bytes_end, struct_size; 2124 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE; 2125 2126 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 2127 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 2128 if (!nr_bits) { 2129 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2130 btf_verifier_log_member(env, struct_type, member, 2131 "Member is not byte aligned"); 2132 return -EINVAL; 2133 } 2134 2135 nr_bits = int_bitsize; 2136 } else if (nr_bits > int_bitsize) { 2137 btf_verifier_log_member(env, struct_type, member, 2138 "Invalid member bitfield_size"); 2139 return -EINVAL; 2140 } 2141 2142 struct_size = struct_type->size; 2143 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits); 2144 if (struct_size < bytes_end) { 2145 btf_verifier_log_member(env, struct_type, member, 2146 "Member exceeds struct_size"); 2147 return -EINVAL; 2148 } 2149 2150 return 0; 2151 } 2152 2153 static s32 btf_enum_check_meta(struct btf_verifier_env *env, 2154 const struct btf_type *t, 2155 u32 meta_left) 2156 { 2157 const struct btf_enum *enums = btf_type_enum(t); 2158 struct btf *btf = env->btf; 2159 u16 i, nr_enums; 2160 u32 meta_needed; 2161 2162 nr_enums = btf_type_vlen(t); 2163 meta_needed = nr_enums * sizeof(*enums); 2164 2165 if (meta_left < meta_needed) { 2166 btf_verifier_log_basic(env, t, 2167 "meta_left:%u meta_needed:%u", 2168 meta_left, meta_needed); 2169 return -EINVAL; 2170 } 2171 2172 if (btf_type_kflag(t)) { 2173 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2174 return -EINVAL; 2175 } 2176 2177 if (t->size != sizeof(int)) { 2178 btf_verifier_log_type(env, t, "Expected size:%zu", 2179 sizeof(int)); 2180 return -EINVAL; 2181 } 2182 2183 /* enum type either no name or a valid one */ 2184 if (t->name_off && 2185 !btf_name_valid_identifier(env->btf, t->name_off)) { 2186 btf_verifier_log_type(env, t, "Invalid name"); 2187 return -EINVAL; 2188 } 2189 2190 btf_verifier_log_type(env, t, NULL); 2191 2192 for (i = 0; i < nr_enums; i++) { 2193 if (!btf_name_offset_valid(btf, enums[i].name_off)) { 2194 btf_verifier_log(env, "\tInvalid name_offset:%u", 2195 enums[i].name_off); 2196 return -EINVAL; 2197 } 2198 2199 /* enum member must have a valid name */ 2200 if (!enums[i].name_off || 2201 !btf_name_valid_identifier(btf, enums[i].name_off)) { 2202 btf_verifier_log_type(env, t, "Invalid name"); 2203 return -EINVAL; 2204 } 2205 2206 2207 btf_verifier_log(env, "\t%s val=%d\n", 2208 __btf_name_by_offset(btf, enums[i].name_off), 2209 enums[i].val); 2210 } 2211 2212 return meta_needed; 2213 } 2214 2215 static void btf_enum_log(struct btf_verifier_env *env, 2216 const struct btf_type *t) 2217 { 2218 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 2219 } 2220 2221 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t, 2222 u32 type_id, void *data, u8 bits_offset, 2223 struct seq_file *m) 2224 { 2225 const struct btf_enum *enums = btf_type_enum(t); 2226 u32 i, nr_enums = btf_type_vlen(t); 2227 int v = *(int *)data; 2228 2229 for (i = 0; i < nr_enums; i++) { 2230 if (v == enums[i].val) { 2231 seq_printf(m, "%s", 2232 __btf_name_by_offset(btf, 2233 enums[i].name_off)); 2234 return; 2235 } 2236 } 2237 2238 seq_printf(m, "%d", v); 2239 } 2240 2241 static struct btf_kind_operations enum_ops = { 2242 .check_meta = btf_enum_check_meta, 2243 .resolve = btf_df_resolve, 2244 .check_member = btf_enum_check_member, 2245 .check_kflag_member = btf_enum_check_kflag_member, 2246 .log_details = btf_enum_log, 2247 .seq_show = btf_enum_seq_show, 2248 }; 2249 2250 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env, 2251 const struct btf_type *t, 2252 u32 meta_left) 2253 { 2254 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param); 2255 2256 if (meta_left < meta_needed) { 2257 btf_verifier_log_basic(env, t, 2258 "meta_left:%u meta_needed:%u", 2259 meta_left, meta_needed); 2260 return -EINVAL; 2261 } 2262 2263 if (t->name_off) { 2264 btf_verifier_log_type(env, t, "Invalid name"); 2265 return -EINVAL; 2266 } 2267 2268 if (btf_type_kflag(t)) { 2269 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2270 return -EINVAL; 2271 } 2272 2273 btf_verifier_log_type(env, t, NULL); 2274 2275 return meta_needed; 2276 } 2277 2278 static void btf_func_proto_log(struct btf_verifier_env *env, 2279 const struct btf_type *t) 2280 { 2281 const struct btf_param *args = (const struct btf_param *)(t + 1); 2282 u16 nr_args = btf_type_vlen(t), i; 2283 2284 btf_verifier_log(env, "return=%u args=(", t->type); 2285 if (!nr_args) { 2286 btf_verifier_log(env, "void"); 2287 goto done; 2288 } 2289 2290 if (nr_args == 1 && !args[0].type) { 2291 /* Only one vararg */ 2292 btf_verifier_log(env, "vararg"); 2293 goto done; 2294 } 2295 2296 btf_verifier_log(env, "%u %s", args[0].type, 2297 __btf_name_by_offset(env->btf, 2298 args[0].name_off)); 2299 for (i = 1; i < nr_args - 1; i++) 2300 btf_verifier_log(env, ", %u %s", args[i].type, 2301 __btf_name_by_offset(env->btf, 2302 args[i].name_off)); 2303 2304 if (nr_args > 1) { 2305 const struct btf_param *last_arg = &args[nr_args - 1]; 2306 2307 if (last_arg->type) 2308 btf_verifier_log(env, ", %u %s", last_arg->type, 2309 __btf_name_by_offset(env->btf, 2310 last_arg->name_off)); 2311 else 2312 btf_verifier_log(env, ", vararg"); 2313 } 2314 2315 done: 2316 btf_verifier_log(env, ")"); 2317 } 2318 2319 static struct btf_kind_operations func_proto_ops = { 2320 .check_meta = btf_func_proto_check_meta, 2321 .resolve = btf_df_resolve, 2322 /* 2323 * BTF_KIND_FUNC_PROTO cannot be directly referred by 2324 * a struct's member. 2325 * 2326 * It should be a funciton pointer instead. 2327 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO) 2328 * 2329 * Hence, there is no btf_func_check_member(). 2330 */ 2331 .check_member = btf_df_check_member, 2332 .check_kflag_member = btf_df_check_kflag_member, 2333 .log_details = btf_func_proto_log, 2334 .seq_show = btf_df_seq_show, 2335 }; 2336 2337 static s32 btf_func_check_meta(struct btf_verifier_env *env, 2338 const struct btf_type *t, 2339 u32 meta_left) 2340 { 2341 if (!t->name_off || 2342 !btf_name_valid_identifier(env->btf, t->name_off)) { 2343 btf_verifier_log_type(env, t, "Invalid name"); 2344 return -EINVAL; 2345 } 2346 2347 if (btf_type_vlen(t)) { 2348 btf_verifier_log_type(env, t, "vlen != 0"); 2349 return -EINVAL; 2350 } 2351 2352 if (btf_type_kflag(t)) { 2353 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2354 return -EINVAL; 2355 } 2356 2357 btf_verifier_log_type(env, t, NULL); 2358 2359 return 0; 2360 } 2361 2362 static struct btf_kind_operations func_ops = { 2363 .check_meta = btf_func_check_meta, 2364 .resolve = btf_df_resolve, 2365 .check_member = btf_df_check_member, 2366 .check_kflag_member = btf_df_check_kflag_member, 2367 .log_details = btf_ref_type_log, 2368 .seq_show = btf_df_seq_show, 2369 }; 2370 2371 static int btf_func_proto_check(struct btf_verifier_env *env, 2372 const struct btf_type *t) 2373 { 2374 const struct btf_type *ret_type; 2375 const struct btf_param *args; 2376 const struct btf *btf; 2377 u16 nr_args, i; 2378 int err; 2379 2380 btf = env->btf; 2381 args = (const struct btf_param *)(t + 1); 2382 nr_args = btf_type_vlen(t); 2383 2384 /* Check func return type which could be "void" (t->type == 0) */ 2385 if (t->type) { 2386 u32 ret_type_id = t->type; 2387 2388 ret_type = btf_type_by_id(btf, ret_type_id); 2389 if (!ret_type) { 2390 btf_verifier_log_type(env, t, "Invalid return type"); 2391 return -EINVAL; 2392 } 2393 2394 if (btf_type_needs_resolve(ret_type) && 2395 !env_type_is_resolved(env, ret_type_id)) { 2396 err = btf_resolve(env, ret_type, ret_type_id); 2397 if (err) 2398 return err; 2399 } 2400 2401 /* Ensure the return type is a type that has a size */ 2402 if (!btf_type_id_size(btf, &ret_type_id, NULL)) { 2403 btf_verifier_log_type(env, t, "Invalid return type"); 2404 return -EINVAL; 2405 } 2406 } 2407 2408 if (!nr_args) 2409 return 0; 2410 2411 /* Last func arg type_id could be 0 if it is a vararg */ 2412 if (!args[nr_args - 1].type) { 2413 if (args[nr_args - 1].name_off) { 2414 btf_verifier_log_type(env, t, "Invalid arg#%u", 2415 nr_args); 2416 return -EINVAL; 2417 } 2418 nr_args--; 2419 } 2420 2421 err = 0; 2422 for (i = 0; i < nr_args; i++) { 2423 const struct btf_type *arg_type; 2424 u32 arg_type_id; 2425 2426 arg_type_id = args[i].type; 2427 arg_type = btf_type_by_id(btf, arg_type_id); 2428 if (!arg_type) { 2429 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 2430 err = -EINVAL; 2431 break; 2432 } 2433 2434 if (args[i].name_off && 2435 (!btf_name_offset_valid(btf, args[i].name_off) || 2436 !btf_name_valid_identifier(btf, args[i].name_off))) { 2437 btf_verifier_log_type(env, t, 2438 "Invalid arg#%u", i + 1); 2439 err = -EINVAL; 2440 break; 2441 } 2442 2443 if (btf_type_needs_resolve(arg_type) && 2444 !env_type_is_resolved(env, arg_type_id)) { 2445 err = btf_resolve(env, arg_type, arg_type_id); 2446 if (err) 2447 break; 2448 } 2449 2450 if (!btf_type_id_size(btf, &arg_type_id, NULL)) { 2451 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 2452 err = -EINVAL; 2453 break; 2454 } 2455 } 2456 2457 return err; 2458 } 2459 2460 static int btf_func_check(struct btf_verifier_env *env, 2461 const struct btf_type *t) 2462 { 2463 const struct btf_type *proto_type; 2464 const struct btf_param *args; 2465 const struct btf *btf; 2466 u16 nr_args, i; 2467 2468 btf = env->btf; 2469 proto_type = btf_type_by_id(btf, t->type); 2470 2471 if (!proto_type || !btf_type_is_func_proto(proto_type)) { 2472 btf_verifier_log_type(env, t, "Invalid type_id"); 2473 return -EINVAL; 2474 } 2475 2476 args = (const struct btf_param *)(proto_type + 1); 2477 nr_args = btf_type_vlen(proto_type); 2478 for (i = 0; i < nr_args; i++) { 2479 if (!args[i].name_off && args[i].type) { 2480 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 2481 return -EINVAL; 2482 } 2483 } 2484 2485 return 0; 2486 } 2487 2488 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { 2489 [BTF_KIND_INT] = &int_ops, 2490 [BTF_KIND_PTR] = &ptr_ops, 2491 [BTF_KIND_ARRAY] = &array_ops, 2492 [BTF_KIND_STRUCT] = &struct_ops, 2493 [BTF_KIND_UNION] = &struct_ops, 2494 [BTF_KIND_ENUM] = &enum_ops, 2495 [BTF_KIND_FWD] = &fwd_ops, 2496 [BTF_KIND_TYPEDEF] = &modifier_ops, 2497 [BTF_KIND_VOLATILE] = &modifier_ops, 2498 [BTF_KIND_CONST] = &modifier_ops, 2499 [BTF_KIND_RESTRICT] = &modifier_ops, 2500 [BTF_KIND_FUNC] = &func_ops, 2501 [BTF_KIND_FUNC_PROTO] = &func_proto_ops, 2502 }; 2503 2504 static s32 btf_check_meta(struct btf_verifier_env *env, 2505 const struct btf_type *t, 2506 u32 meta_left) 2507 { 2508 u32 saved_meta_left = meta_left; 2509 s32 var_meta_size; 2510 2511 if (meta_left < sizeof(*t)) { 2512 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu", 2513 env->log_type_id, meta_left, sizeof(*t)); 2514 return -EINVAL; 2515 } 2516 meta_left -= sizeof(*t); 2517 2518 if (t->info & ~BTF_INFO_MASK) { 2519 btf_verifier_log(env, "[%u] Invalid btf_info:%x", 2520 env->log_type_id, t->info); 2521 return -EINVAL; 2522 } 2523 2524 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX || 2525 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) { 2526 btf_verifier_log(env, "[%u] Invalid kind:%u", 2527 env->log_type_id, BTF_INFO_KIND(t->info)); 2528 return -EINVAL; 2529 } 2530 2531 if (!btf_name_offset_valid(env->btf, t->name_off)) { 2532 btf_verifier_log(env, "[%u] Invalid name_offset:%u", 2533 env->log_type_id, t->name_off); 2534 return -EINVAL; 2535 } 2536 2537 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left); 2538 if (var_meta_size < 0) 2539 return var_meta_size; 2540 2541 meta_left -= var_meta_size; 2542 2543 return saved_meta_left - meta_left; 2544 } 2545 2546 static int btf_check_all_metas(struct btf_verifier_env *env) 2547 { 2548 struct btf *btf = env->btf; 2549 struct btf_header *hdr; 2550 void *cur, *end; 2551 2552 hdr = &btf->hdr; 2553 cur = btf->nohdr_data + hdr->type_off; 2554 end = cur + hdr->type_len; 2555 2556 env->log_type_id = 1; 2557 while (cur < end) { 2558 struct btf_type *t = cur; 2559 s32 meta_size; 2560 2561 meta_size = btf_check_meta(env, t, end - cur); 2562 if (meta_size < 0) 2563 return meta_size; 2564 2565 btf_add_type(env, t); 2566 cur += meta_size; 2567 env->log_type_id++; 2568 } 2569 2570 return 0; 2571 } 2572 2573 static bool btf_resolve_valid(struct btf_verifier_env *env, 2574 const struct btf_type *t, 2575 u32 type_id) 2576 { 2577 struct btf *btf = env->btf; 2578 2579 if (!env_type_is_resolved(env, type_id)) 2580 return false; 2581 2582 if (btf_type_is_struct(t)) 2583 return !btf->resolved_ids[type_id] && 2584 !btf->resolved_sizes[type_id]; 2585 2586 if (btf_type_is_modifier(t) || btf_type_is_ptr(t)) { 2587 t = btf_type_id_resolve(btf, &type_id); 2588 return t && !btf_type_is_modifier(t); 2589 } 2590 2591 if (btf_type_is_array(t)) { 2592 const struct btf_array *array = btf_type_array(t); 2593 const struct btf_type *elem_type; 2594 u32 elem_type_id = array->type; 2595 u32 elem_size; 2596 2597 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 2598 return elem_type && !btf_type_is_modifier(elem_type) && 2599 (array->nelems * elem_size == 2600 btf->resolved_sizes[type_id]); 2601 } 2602 2603 return false; 2604 } 2605 2606 static int btf_resolve(struct btf_verifier_env *env, 2607 const struct btf_type *t, u32 type_id) 2608 { 2609 u32 save_log_type_id = env->log_type_id; 2610 const struct resolve_vertex *v; 2611 int err = 0; 2612 2613 env->resolve_mode = RESOLVE_TBD; 2614 env_stack_push(env, t, type_id); 2615 while (!err && (v = env_stack_peak(env))) { 2616 env->log_type_id = v->type_id; 2617 err = btf_type_ops(v->t)->resolve(env, v); 2618 } 2619 2620 env->log_type_id = type_id; 2621 if (err == -E2BIG) { 2622 btf_verifier_log_type(env, t, 2623 "Exceeded max resolving depth:%u", 2624 MAX_RESOLVE_DEPTH); 2625 } else if (err == -EEXIST) { 2626 btf_verifier_log_type(env, t, "Loop detected"); 2627 } 2628 2629 /* Final sanity check */ 2630 if (!err && !btf_resolve_valid(env, t, type_id)) { 2631 btf_verifier_log_type(env, t, "Invalid resolve state"); 2632 err = -EINVAL; 2633 } 2634 2635 env->log_type_id = save_log_type_id; 2636 return err; 2637 } 2638 2639 static int btf_check_all_types(struct btf_verifier_env *env) 2640 { 2641 struct btf *btf = env->btf; 2642 u32 type_id; 2643 int err; 2644 2645 err = env_resolve_init(env); 2646 if (err) 2647 return err; 2648 2649 env->phase++; 2650 for (type_id = 1; type_id <= btf->nr_types; type_id++) { 2651 const struct btf_type *t = btf_type_by_id(btf, type_id); 2652 2653 env->log_type_id = type_id; 2654 if (btf_type_needs_resolve(t) && 2655 !env_type_is_resolved(env, type_id)) { 2656 err = btf_resolve(env, t, type_id); 2657 if (err) 2658 return err; 2659 } 2660 2661 if (btf_type_is_func_proto(t)) { 2662 err = btf_func_proto_check(env, t); 2663 if (err) 2664 return err; 2665 } 2666 2667 if (btf_type_is_func(t)) { 2668 err = btf_func_check(env, t); 2669 if (err) 2670 return err; 2671 } 2672 } 2673 2674 return 0; 2675 } 2676 2677 static int btf_parse_type_sec(struct btf_verifier_env *env) 2678 { 2679 const struct btf_header *hdr = &env->btf->hdr; 2680 int err; 2681 2682 /* Type section must align to 4 bytes */ 2683 if (hdr->type_off & (sizeof(u32) - 1)) { 2684 btf_verifier_log(env, "Unaligned type_off"); 2685 return -EINVAL; 2686 } 2687 2688 if (!hdr->type_len) { 2689 btf_verifier_log(env, "No type found"); 2690 return -EINVAL; 2691 } 2692 2693 err = btf_check_all_metas(env); 2694 if (err) 2695 return err; 2696 2697 return btf_check_all_types(env); 2698 } 2699 2700 static int btf_parse_str_sec(struct btf_verifier_env *env) 2701 { 2702 const struct btf_header *hdr; 2703 struct btf *btf = env->btf; 2704 const char *start, *end; 2705 2706 hdr = &btf->hdr; 2707 start = btf->nohdr_data + hdr->str_off; 2708 end = start + hdr->str_len; 2709 2710 if (end != btf->data + btf->data_size) { 2711 btf_verifier_log(env, "String section is not at the end"); 2712 return -EINVAL; 2713 } 2714 2715 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || 2716 start[0] || end[-1]) { 2717 btf_verifier_log(env, "Invalid string section"); 2718 return -EINVAL; 2719 } 2720 2721 btf->strings = start; 2722 2723 return 0; 2724 } 2725 2726 static const size_t btf_sec_info_offset[] = { 2727 offsetof(struct btf_header, type_off), 2728 offsetof(struct btf_header, str_off), 2729 }; 2730 2731 static int btf_sec_info_cmp(const void *a, const void *b) 2732 { 2733 const struct btf_sec_info *x = a; 2734 const struct btf_sec_info *y = b; 2735 2736 return (int)(x->off - y->off) ? : (int)(x->len - y->len); 2737 } 2738 2739 static int btf_check_sec_info(struct btf_verifier_env *env, 2740 u32 btf_data_size) 2741 { 2742 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)]; 2743 u32 total, expected_total, i; 2744 const struct btf_header *hdr; 2745 const struct btf *btf; 2746 2747 btf = env->btf; 2748 hdr = &btf->hdr; 2749 2750 /* Populate the secs from hdr */ 2751 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) 2752 secs[i] = *(struct btf_sec_info *)((void *)hdr + 2753 btf_sec_info_offset[i]); 2754 2755 sort(secs, ARRAY_SIZE(btf_sec_info_offset), 2756 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL); 2757 2758 /* Check for gaps and overlap among sections */ 2759 total = 0; 2760 expected_total = btf_data_size - hdr->hdr_len; 2761 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) { 2762 if (expected_total < secs[i].off) { 2763 btf_verifier_log(env, "Invalid section offset"); 2764 return -EINVAL; 2765 } 2766 if (total < secs[i].off) { 2767 /* gap */ 2768 btf_verifier_log(env, "Unsupported section found"); 2769 return -EINVAL; 2770 } 2771 if (total > secs[i].off) { 2772 btf_verifier_log(env, "Section overlap found"); 2773 return -EINVAL; 2774 } 2775 if (expected_total - total < secs[i].len) { 2776 btf_verifier_log(env, 2777 "Total section length too long"); 2778 return -EINVAL; 2779 } 2780 total += secs[i].len; 2781 } 2782 2783 /* There is data other than hdr and known sections */ 2784 if (expected_total != total) { 2785 btf_verifier_log(env, "Unsupported section found"); 2786 return -EINVAL; 2787 } 2788 2789 return 0; 2790 } 2791 2792 static int btf_parse_hdr(struct btf_verifier_env *env) 2793 { 2794 u32 hdr_len, hdr_copy, btf_data_size; 2795 const struct btf_header *hdr; 2796 struct btf *btf; 2797 int err; 2798 2799 btf = env->btf; 2800 btf_data_size = btf->data_size; 2801 2802 if (btf_data_size < 2803 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) { 2804 btf_verifier_log(env, "hdr_len not found"); 2805 return -EINVAL; 2806 } 2807 2808 hdr = btf->data; 2809 hdr_len = hdr->hdr_len; 2810 if (btf_data_size < hdr_len) { 2811 btf_verifier_log(env, "btf_header not found"); 2812 return -EINVAL; 2813 } 2814 2815 /* Ensure the unsupported header fields are zero */ 2816 if (hdr_len > sizeof(btf->hdr)) { 2817 u8 *expected_zero = btf->data + sizeof(btf->hdr); 2818 u8 *end = btf->data + hdr_len; 2819 2820 for (; expected_zero < end; expected_zero++) { 2821 if (*expected_zero) { 2822 btf_verifier_log(env, "Unsupported btf_header"); 2823 return -E2BIG; 2824 } 2825 } 2826 } 2827 2828 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); 2829 memcpy(&btf->hdr, btf->data, hdr_copy); 2830 2831 hdr = &btf->hdr; 2832 2833 btf_verifier_log_hdr(env, btf_data_size); 2834 2835 if (hdr->magic != BTF_MAGIC) { 2836 btf_verifier_log(env, "Invalid magic"); 2837 return -EINVAL; 2838 } 2839 2840 if (hdr->version != BTF_VERSION) { 2841 btf_verifier_log(env, "Unsupported version"); 2842 return -ENOTSUPP; 2843 } 2844 2845 if (hdr->flags) { 2846 btf_verifier_log(env, "Unsupported flags"); 2847 return -ENOTSUPP; 2848 } 2849 2850 if (btf_data_size == hdr->hdr_len) { 2851 btf_verifier_log(env, "No data"); 2852 return -EINVAL; 2853 } 2854 2855 err = btf_check_sec_info(env, btf_data_size); 2856 if (err) 2857 return err; 2858 2859 return 0; 2860 } 2861 2862 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, 2863 u32 log_level, char __user *log_ubuf, u32 log_size) 2864 { 2865 struct btf_verifier_env *env = NULL; 2866 struct bpf_verifier_log *log; 2867 struct btf *btf = NULL; 2868 u8 *data; 2869 int err; 2870 2871 if (btf_data_size > BTF_MAX_SIZE) 2872 return ERR_PTR(-E2BIG); 2873 2874 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 2875 if (!env) 2876 return ERR_PTR(-ENOMEM); 2877 2878 log = &env->log; 2879 if (log_level || log_ubuf || log_size) { 2880 /* user requested verbose verifier output 2881 * and supplied buffer to store the verification trace 2882 */ 2883 log->level = log_level; 2884 log->ubuf = log_ubuf; 2885 log->len_total = log_size; 2886 2887 /* log attributes have to be sane */ 2888 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || 2889 !log->level || !log->ubuf) { 2890 err = -EINVAL; 2891 goto errout; 2892 } 2893 } 2894 2895 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 2896 if (!btf) { 2897 err = -ENOMEM; 2898 goto errout; 2899 } 2900 env->btf = btf; 2901 2902 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); 2903 if (!data) { 2904 err = -ENOMEM; 2905 goto errout; 2906 } 2907 2908 btf->data = data; 2909 btf->data_size = btf_data_size; 2910 2911 if (copy_from_user(data, btf_data, btf_data_size)) { 2912 err = -EFAULT; 2913 goto errout; 2914 } 2915 2916 err = btf_parse_hdr(env); 2917 if (err) 2918 goto errout; 2919 2920 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 2921 2922 err = btf_parse_str_sec(env); 2923 if (err) 2924 goto errout; 2925 2926 err = btf_parse_type_sec(env); 2927 if (err) 2928 goto errout; 2929 2930 if (log->level && bpf_verifier_log_full(log)) { 2931 err = -ENOSPC; 2932 goto errout; 2933 } 2934 2935 btf_verifier_env_free(env); 2936 refcount_set(&btf->refcnt, 1); 2937 return btf; 2938 2939 errout: 2940 btf_verifier_env_free(env); 2941 if (btf) 2942 btf_free(btf); 2943 return ERR_PTR(err); 2944 } 2945 2946 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, 2947 struct seq_file *m) 2948 { 2949 const struct btf_type *t = btf_type_by_id(btf, type_id); 2950 2951 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m); 2952 } 2953 2954 static int btf_release(struct inode *inode, struct file *filp) 2955 { 2956 btf_put(filp->private_data); 2957 return 0; 2958 } 2959 2960 const struct file_operations btf_fops = { 2961 .release = btf_release, 2962 }; 2963 2964 static int __btf_new_fd(struct btf *btf) 2965 { 2966 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC); 2967 } 2968 2969 int btf_new_fd(const union bpf_attr *attr) 2970 { 2971 struct btf *btf; 2972 int ret; 2973 2974 btf = btf_parse(u64_to_user_ptr(attr->btf), 2975 attr->btf_size, attr->btf_log_level, 2976 u64_to_user_ptr(attr->btf_log_buf), 2977 attr->btf_log_size); 2978 if (IS_ERR(btf)) 2979 return PTR_ERR(btf); 2980 2981 ret = btf_alloc_id(btf); 2982 if (ret) { 2983 btf_free(btf); 2984 return ret; 2985 } 2986 2987 /* 2988 * The BTF ID is published to the userspace. 2989 * All BTF free must go through call_rcu() from 2990 * now on (i.e. free by calling btf_put()). 2991 */ 2992 2993 ret = __btf_new_fd(btf); 2994 if (ret < 0) 2995 btf_put(btf); 2996 2997 return ret; 2998 } 2999 3000 struct btf *btf_get_by_fd(int fd) 3001 { 3002 struct btf *btf; 3003 struct fd f; 3004 3005 f = fdget(fd); 3006 3007 if (!f.file) 3008 return ERR_PTR(-EBADF); 3009 3010 if (f.file->f_op != &btf_fops) { 3011 fdput(f); 3012 return ERR_PTR(-EINVAL); 3013 } 3014 3015 btf = f.file->private_data; 3016 refcount_inc(&btf->refcnt); 3017 fdput(f); 3018 3019 return btf; 3020 } 3021 3022 int btf_get_info_by_fd(const struct btf *btf, 3023 const union bpf_attr *attr, 3024 union bpf_attr __user *uattr) 3025 { 3026 struct bpf_btf_info __user *uinfo; 3027 struct bpf_btf_info info = {}; 3028 u32 info_copy, btf_copy; 3029 void __user *ubtf; 3030 u32 uinfo_len; 3031 3032 uinfo = u64_to_user_ptr(attr->info.info); 3033 uinfo_len = attr->info.info_len; 3034 3035 info_copy = min_t(u32, uinfo_len, sizeof(info)); 3036 if (copy_from_user(&info, uinfo, info_copy)) 3037 return -EFAULT; 3038 3039 info.id = btf->id; 3040 ubtf = u64_to_user_ptr(info.btf); 3041 btf_copy = min_t(u32, btf->data_size, info.btf_size); 3042 if (copy_to_user(ubtf, btf->data, btf_copy)) 3043 return -EFAULT; 3044 info.btf_size = btf->data_size; 3045 3046 if (copy_to_user(uinfo, &info, info_copy) || 3047 put_user(info_copy, &uattr->info.info_len)) 3048 return -EFAULT; 3049 3050 return 0; 3051 } 3052 3053 int btf_get_fd_by_id(u32 id) 3054 { 3055 struct btf *btf; 3056 int fd; 3057 3058 rcu_read_lock(); 3059 btf = idr_find(&btf_idr, id); 3060 if (!btf || !refcount_inc_not_zero(&btf->refcnt)) 3061 btf = ERR_PTR(-ENOENT); 3062 rcu_read_unlock(); 3063 3064 if (IS_ERR(btf)) 3065 return PTR_ERR(btf); 3066 3067 fd = __btf_new_fd(btf); 3068 if (fd < 0) 3069 btf_put(btf); 3070 3071 return fd; 3072 } 3073 3074 u32 btf_id(const struct btf *btf) 3075 { 3076 return btf->id; 3077 } 3078