1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Facebook */ 3 4 #include <uapi/linux/btf.h> 5 #include <uapi/linux/bpf.h> 6 #include <uapi/linux/bpf_perf_event.h> 7 #include <uapi/linux/types.h> 8 #include <linux/seq_file.h> 9 #include <linux/compiler.h> 10 #include <linux/ctype.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/anon_inodes.h> 14 #include <linux/file.h> 15 #include <linux/uaccess.h> 16 #include <linux/kernel.h> 17 #include <linux/idr.h> 18 #include <linux/sort.h> 19 #include <linux/bpf_verifier.h> 20 #include <linux/btf.h> 21 #include <linux/btf_ids.h> 22 #include <linux/skmsg.h> 23 #include <linux/perf_event.h> 24 #include <linux/bsearch.h> 25 #include <linux/kobject.h> 26 #include <linux/sysfs.h> 27 #include <net/sock.h> 28 #include "../tools/lib/bpf/relo_core.h" 29 30 /* BTF (BPF Type Format) is the meta data format which describes 31 * the data types of BPF program/map. Hence, it basically focus 32 * on the C programming language which the modern BPF is primary 33 * using. 34 * 35 * ELF Section: 36 * ~~~~~~~~~~~ 37 * The BTF data is stored under the ".BTF" ELF section 38 * 39 * struct btf_type: 40 * ~~~~~~~~~~~~~~~ 41 * Each 'struct btf_type' object describes a C data type. 42 * Depending on the type it is describing, a 'struct btf_type' 43 * object may be followed by more data. F.e. 44 * To describe an array, 'struct btf_type' is followed by 45 * 'struct btf_array'. 46 * 47 * 'struct btf_type' and any extra data following it are 48 * 4 bytes aligned. 49 * 50 * Type section: 51 * ~~~~~~~~~~~~~ 52 * The BTF type section contains a list of 'struct btf_type' objects. 53 * Each one describes a C type. Recall from the above section 54 * that a 'struct btf_type' object could be immediately followed by extra 55 * data in order to describe some particular C types. 56 * 57 * type_id: 58 * ~~~~~~~ 59 * Each btf_type object is identified by a type_id. The type_id 60 * is implicitly implied by the location of the btf_type object in 61 * the BTF type section. The first one has type_id 1. The second 62 * one has type_id 2...etc. Hence, an earlier btf_type has 63 * a smaller type_id. 64 * 65 * A btf_type object may refer to another btf_type object by using 66 * type_id (i.e. the "type" in the "struct btf_type"). 67 * 68 * NOTE that we cannot assume any reference-order. 69 * A btf_type object can refer to an earlier btf_type object 70 * but it can also refer to a later btf_type object. 71 * 72 * For example, to describe "const void *". A btf_type 73 * object describing "const" may refer to another btf_type 74 * object describing "void *". This type-reference is done 75 * by specifying type_id: 76 * 77 * [1] CONST (anon) type_id=2 78 * [2] PTR (anon) type_id=0 79 * 80 * The above is the btf_verifier debug log: 81 * - Each line started with "[?]" is a btf_type object 82 * - [?] is the type_id of the btf_type object. 83 * - CONST/PTR is the BTF_KIND_XXX 84 * - "(anon)" is the name of the type. It just 85 * happens that CONST and PTR has no name. 86 * - type_id=XXX is the 'u32 type' in btf_type 87 * 88 * NOTE: "void" has type_id 0 89 * 90 * String section: 91 * ~~~~~~~~~~~~~~ 92 * The BTF string section contains the names used by the type section. 93 * Each string is referred by an "offset" from the beginning of the 94 * string section. 95 * 96 * Each string is '\0' terminated. 97 * 98 * The first character in the string section must be '\0' 99 * which is used to mean 'anonymous'. Some btf_type may not 100 * have a name. 101 */ 102 103 /* BTF verification: 104 * 105 * To verify BTF data, two passes are needed. 106 * 107 * Pass #1 108 * ~~~~~~~ 109 * The first pass is to collect all btf_type objects to 110 * an array: "btf->types". 111 * 112 * Depending on the C type that a btf_type is describing, 113 * a btf_type may be followed by extra data. We don't know 114 * how many btf_type is there, and more importantly we don't 115 * know where each btf_type is located in the type section. 116 * 117 * Without knowing the location of each type_id, most verifications 118 * cannot be done. e.g. an earlier btf_type may refer to a later 119 * btf_type (recall the "const void *" above), so we cannot 120 * check this type-reference in the first pass. 121 * 122 * In the first pass, it still does some verifications (e.g. 123 * checking the name is a valid offset to the string section). 124 * 125 * Pass #2 126 * ~~~~~~~ 127 * The main focus is to resolve a btf_type that is referring 128 * to another type. 129 * 130 * We have to ensure the referring type: 131 * 1) does exist in the BTF (i.e. in btf->types[]) 132 * 2) does not cause a loop: 133 * struct A { 134 * struct B b; 135 * }; 136 * 137 * struct B { 138 * struct A a; 139 * }; 140 * 141 * btf_type_needs_resolve() decides if a btf_type needs 142 * to be resolved. 143 * 144 * The needs_resolve type implements the "resolve()" ops which 145 * essentially does a DFS and detects backedge. 146 * 147 * During resolve (or DFS), different C types have different 148 * "RESOLVED" conditions. 149 * 150 * When resolving a BTF_KIND_STRUCT, we need to resolve all its 151 * members because a member is always referring to another 152 * type. A struct's member can be treated as "RESOLVED" if 153 * it is referring to a BTF_KIND_PTR. Otherwise, the 154 * following valid C struct would be rejected: 155 * 156 * struct A { 157 * int m; 158 * struct A *a; 159 * }; 160 * 161 * When resolving a BTF_KIND_PTR, it needs to keep resolving if 162 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot 163 * detect a pointer loop, e.g.: 164 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR + 165 * ^ | 166 * +-----------------------------------------+ 167 * 168 */ 169 170 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2) 171 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) 172 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) 173 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) 174 #define BITS_ROUNDUP_BYTES(bits) \ 175 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) 176 177 #define BTF_INFO_MASK 0x9f00ffff 178 #define BTF_INT_MASK 0x0fffffff 179 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) 180 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) 181 182 /* 16MB for 64k structs and each has 16 members and 183 * a few MB spaces for the string section. 184 * The hard limit is S32_MAX. 185 */ 186 #define BTF_MAX_SIZE (16 * 1024 * 1024) 187 188 #define for_each_member_from(i, from, struct_type, member) \ 189 for (i = from, member = btf_type_member(struct_type) + from; \ 190 i < btf_type_vlen(struct_type); \ 191 i++, member++) 192 193 #define for_each_vsi_from(i, from, struct_type, member) \ 194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \ 195 i < btf_type_vlen(struct_type); \ 196 i++, member++) 197 198 DEFINE_IDR(btf_idr); 199 DEFINE_SPINLOCK(btf_idr_lock); 200 201 enum btf_kfunc_hook { 202 BTF_KFUNC_HOOK_XDP, 203 BTF_KFUNC_HOOK_TC, 204 BTF_KFUNC_HOOK_STRUCT_OPS, 205 BTF_KFUNC_HOOK_TRACING, 206 BTF_KFUNC_HOOK_SYSCALL, 207 BTF_KFUNC_HOOK_MAX, 208 }; 209 210 enum { 211 BTF_KFUNC_SET_MAX_CNT = 32, 212 BTF_DTOR_KFUNC_MAX_CNT = 256, 213 }; 214 215 struct btf_kfunc_set_tab { 216 struct btf_id_set *sets[BTF_KFUNC_HOOK_MAX][BTF_KFUNC_TYPE_MAX]; 217 }; 218 219 struct btf_id_dtor_kfunc_tab { 220 u32 cnt; 221 struct btf_id_dtor_kfunc dtors[]; 222 }; 223 224 struct btf { 225 void *data; 226 struct btf_type **types; 227 u32 *resolved_ids; 228 u32 *resolved_sizes; 229 const char *strings; 230 void *nohdr_data; 231 struct btf_header hdr; 232 u32 nr_types; /* includes VOID for base BTF */ 233 u32 types_size; 234 u32 data_size; 235 refcount_t refcnt; 236 u32 id; 237 struct rcu_head rcu; 238 struct btf_kfunc_set_tab *kfunc_set_tab; 239 struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; 240 241 /* split BTF support */ 242 struct btf *base_btf; 243 u32 start_id; /* first type ID in this BTF (0 for base BTF) */ 244 u32 start_str_off; /* first string offset (0 for base BTF) */ 245 char name[MODULE_NAME_LEN]; 246 bool kernel_btf; 247 }; 248 249 enum verifier_phase { 250 CHECK_META, 251 CHECK_TYPE, 252 }; 253 254 struct resolve_vertex { 255 const struct btf_type *t; 256 u32 type_id; 257 u16 next_member; 258 }; 259 260 enum visit_state { 261 NOT_VISITED, 262 VISITED, 263 RESOLVED, 264 }; 265 266 enum resolve_mode { 267 RESOLVE_TBD, /* To Be Determined */ 268 RESOLVE_PTR, /* Resolving for Pointer */ 269 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union 270 * or array 271 */ 272 }; 273 274 #define MAX_RESOLVE_DEPTH 32 275 276 struct btf_sec_info { 277 u32 off; 278 u32 len; 279 }; 280 281 struct btf_verifier_env { 282 struct btf *btf; 283 u8 *visit_states; 284 struct resolve_vertex stack[MAX_RESOLVE_DEPTH]; 285 struct bpf_verifier_log log; 286 u32 log_type_id; 287 u32 top_stack; 288 enum verifier_phase phase; 289 enum resolve_mode resolve_mode; 290 }; 291 292 static const char * const btf_kind_str[NR_BTF_KINDS] = { 293 [BTF_KIND_UNKN] = "UNKNOWN", 294 [BTF_KIND_INT] = "INT", 295 [BTF_KIND_PTR] = "PTR", 296 [BTF_KIND_ARRAY] = "ARRAY", 297 [BTF_KIND_STRUCT] = "STRUCT", 298 [BTF_KIND_UNION] = "UNION", 299 [BTF_KIND_ENUM] = "ENUM", 300 [BTF_KIND_FWD] = "FWD", 301 [BTF_KIND_TYPEDEF] = "TYPEDEF", 302 [BTF_KIND_VOLATILE] = "VOLATILE", 303 [BTF_KIND_CONST] = "CONST", 304 [BTF_KIND_RESTRICT] = "RESTRICT", 305 [BTF_KIND_FUNC] = "FUNC", 306 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO", 307 [BTF_KIND_VAR] = "VAR", 308 [BTF_KIND_DATASEC] = "DATASEC", 309 [BTF_KIND_FLOAT] = "FLOAT", 310 [BTF_KIND_DECL_TAG] = "DECL_TAG", 311 [BTF_KIND_TYPE_TAG] = "TYPE_TAG", 312 [BTF_KIND_ENUM64] = "ENUM64", 313 }; 314 315 const char *btf_type_str(const struct btf_type *t) 316 { 317 return btf_kind_str[BTF_INFO_KIND(t->info)]; 318 } 319 320 /* Chunk size we use in safe copy of data to be shown. */ 321 #define BTF_SHOW_OBJ_SAFE_SIZE 32 322 323 /* 324 * This is the maximum size of a base type value (equivalent to a 325 * 128-bit int); if we are at the end of our safe buffer and have 326 * less than 16 bytes space we can't be assured of being able 327 * to copy the next type safely, so in such cases we will initiate 328 * a new copy. 329 */ 330 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16 331 332 /* Type name size */ 333 #define BTF_SHOW_NAME_SIZE 80 334 335 /* 336 * Common data to all BTF show operations. Private show functions can add 337 * their own data to a structure containing a struct btf_show and consult it 338 * in the show callback. See btf_type_show() below. 339 * 340 * One challenge with showing nested data is we want to skip 0-valued 341 * data, but in order to figure out whether a nested object is all zeros 342 * we need to walk through it. As a result, we need to make two passes 343 * when handling structs, unions and arrays; the first path simply looks 344 * for nonzero data, while the second actually does the display. The first 345 * pass is signalled by show->state.depth_check being set, and if we 346 * encounter a non-zero value we set show->state.depth_to_show to 347 * the depth at which we encountered it. When we have completed the 348 * first pass, we will know if anything needs to be displayed if 349 * depth_to_show > depth. See btf_[struct,array]_show() for the 350 * implementation of this. 351 * 352 * Another problem is we want to ensure the data for display is safe to 353 * access. To support this, the anonymous "struct {} obj" tracks the data 354 * object and our safe copy of it. We copy portions of the data needed 355 * to the object "copy" buffer, but because its size is limited to 356 * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we 357 * traverse larger objects for display. 358 * 359 * The various data type show functions all start with a call to 360 * btf_show_start_type() which returns a pointer to the safe copy 361 * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the 362 * raw data itself). btf_show_obj_safe() is responsible for 363 * using copy_from_kernel_nofault() to update the safe data if necessary 364 * as we traverse the object's data. skbuff-like semantics are 365 * used: 366 * 367 * - obj.head points to the start of the toplevel object for display 368 * - obj.size is the size of the toplevel object 369 * - obj.data points to the current point in the original data at 370 * which our safe data starts. obj.data will advance as we copy 371 * portions of the data. 372 * 373 * In most cases a single copy will suffice, but larger data structures 374 * such as "struct task_struct" will require many copies. The logic in 375 * btf_show_obj_safe() handles the logic that determines if a new 376 * copy_from_kernel_nofault() is needed. 377 */ 378 struct btf_show { 379 u64 flags; 380 void *target; /* target of show operation (seq file, buffer) */ 381 void (*showfn)(struct btf_show *show, const char *fmt, va_list args); 382 const struct btf *btf; 383 /* below are used during iteration */ 384 struct { 385 u8 depth; 386 u8 depth_to_show; 387 u8 depth_check; 388 u8 array_member:1, 389 array_terminated:1; 390 u16 array_encoding; 391 u32 type_id; 392 int status; /* non-zero for error */ 393 const struct btf_type *type; 394 const struct btf_member *member; 395 char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */ 396 } state; 397 struct { 398 u32 size; 399 void *head; 400 void *data; 401 u8 safe[BTF_SHOW_OBJ_SAFE_SIZE]; 402 } obj; 403 }; 404 405 struct btf_kind_operations { 406 s32 (*check_meta)(struct btf_verifier_env *env, 407 const struct btf_type *t, 408 u32 meta_left); 409 int (*resolve)(struct btf_verifier_env *env, 410 const struct resolve_vertex *v); 411 int (*check_member)(struct btf_verifier_env *env, 412 const struct btf_type *struct_type, 413 const struct btf_member *member, 414 const struct btf_type *member_type); 415 int (*check_kflag_member)(struct btf_verifier_env *env, 416 const struct btf_type *struct_type, 417 const struct btf_member *member, 418 const struct btf_type *member_type); 419 void (*log_details)(struct btf_verifier_env *env, 420 const struct btf_type *t); 421 void (*show)(const struct btf *btf, const struct btf_type *t, 422 u32 type_id, void *data, u8 bits_offsets, 423 struct btf_show *show); 424 }; 425 426 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS]; 427 static struct btf_type btf_void; 428 429 static int btf_resolve(struct btf_verifier_env *env, 430 const struct btf_type *t, u32 type_id); 431 432 static int btf_func_check(struct btf_verifier_env *env, 433 const struct btf_type *t); 434 435 static bool btf_type_is_modifier(const struct btf_type *t) 436 { 437 /* Some of them is not strictly a C modifier 438 * but they are grouped into the same bucket 439 * for BTF concern: 440 * A type (t) that refers to another 441 * type through t->type AND its size cannot 442 * be determined without following the t->type. 443 * 444 * ptr does not fall into this bucket 445 * because its size is always sizeof(void *). 446 */ 447 switch (BTF_INFO_KIND(t->info)) { 448 case BTF_KIND_TYPEDEF: 449 case BTF_KIND_VOLATILE: 450 case BTF_KIND_CONST: 451 case BTF_KIND_RESTRICT: 452 case BTF_KIND_TYPE_TAG: 453 return true; 454 } 455 456 return false; 457 } 458 459 bool btf_type_is_void(const struct btf_type *t) 460 { 461 return t == &btf_void; 462 } 463 464 static bool btf_type_is_fwd(const struct btf_type *t) 465 { 466 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD; 467 } 468 469 static bool btf_type_nosize(const struct btf_type *t) 470 { 471 return btf_type_is_void(t) || btf_type_is_fwd(t) || 472 btf_type_is_func(t) || btf_type_is_func_proto(t); 473 } 474 475 static bool btf_type_nosize_or_null(const struct btf_type *t) 476 { 477 return !t || btf_type_nosize(t); 478 } 479 480 static bool __btf_type_is_struct(const struct btf_type *t) 481 { 482 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT; 483 } 484 485 static bool btf_type_is_array(const struct btf_type *t) 486 { 487 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY; 488 } 489 490 static bool btf_type_is_datasec(const struct btf_type *t) 491 { 492 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; 493 } 494 495 static bool btf_type_is_decl_tag(const struct btf_type *t) 496 { 497 return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG; 498 } 499 500 static bool btf_type_is_decl_tag_target(const struct btf_type *t) 501 { 502 return btf_type_is_func(t) || btf_type_is_struct(t) || 503 btf_type_is_var(t) || btf_type_is_typedef(t); 504 } 505 506 u32 btf_nr_types(const struct btf *btf) 507 { 508 u32 total = 0; 509 510 while (btf) { 511 total += btf->nr_types; 512 btf = btf->base_btf; 513 } 514 515 return total; 516 } 517 518 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) 519 { 520 const struct btf_type *t; 521 const char *tname; 522 u32 i, total; 523 524 total = btf_nr_types(btf); 525 for (i = 1; i < total; i++) { 526 t = btf_type_by_id(btf, i); 527 if (BTF_INFO_KIND(t->info) != kind) 528 continue; 529 530 tname = btf_name_by_offset(btf, t->name_off); 531 if (!strcmp(tname, name)) 532 return i; 533 } 534 535 return -ENOENT; 536 } 537 538 static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p) 539 { 540 struct btf *btf; 541 s32 ret; 542 int id; 543 544 btf = bpf_get_btf_vmlinux(); 545 if (IS_ERR(btf)) 546 return PTR_ERR(btf); 547 if (!btf) 548 return -EINVAL; 549 550 ret = btf_find_by_name_kind(btf, name, kind); 551 /* ret is never zero, since btf_find_by_name_kind returns 552 * positive btf_id or negative error. 553 */ 554 if (ret > 0) { 555 btf_get(btf); 556 *btf_p = btf; 557 return ret; 558 } 559 560 /* If name is not found in vmlinux's BTF then search in module's BTFs */ 561 spin_lock_bh(&btf_idr_lock); 562 idr_for_each_entry(&btf_idr, btf, id) { 563 if (!btf_is_module(btf)) 564 continue; 565 /* linear search could be slow hence unlock/lock 566 * the IDR to avoiding holding it for too long 567 */ 568 btf_get(btf); 569 spin_unlock_bh(&btf_idr_lock); 570 ret = btf_find_by_name_kind(btf, name, kind); 571 if (ret > 0) { 572 *btf_p = btf; 573 return ret; 574 } 575 spin_lock_bh(&btf_idr_lock); 576 btf_put(btf); 577 } 578 spin_unlock_bh(&btf_idr_lock); 579 return ret; 580 } 581 582 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, 583 u32 id, u32 *res_id) 584 { 585 const struct btf_type *t = btf_type_by_id(btf, id); 586 587 while (btf_type_is_modifier(t)) { 588 id = t->type; 589 t = btf_type_by_id(btf, t->type); 590 } 591 592 if (res_id) 593 *res_id = id; 594 595 return t; 596 } 597 598 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf, 599 u32 id, u32 *res_id) 600 { 601 const struct btf_type *t; 602 603 t = btf_type_skip_modifiers(btf, id, NULL); 604 if (!btf_type_is_ptr(t)) 605 return NULL; 606 607 return btf_type_skip_modifiers(btf, t->type, res_id); 608 } 609 610 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, 611 u32 id, u32 *res_id) 612 { 613 const struct btf_type *ptype; 614 615 ptype = btf_type_resolve_ptr(btf, id, res_id); 616 if (ptype && btf_type_is_func_proto(ptype)) 617 return ptype; 618 619 return NULL; 620 } 621 622 /* Types that act only as a source, not sink or intermediate 623 * type when resolving. 624 */ 625 static bool btf_type_is_resolve_source_only(const struct btf_type *t) 626 { 627 return btf_type_is_var(t) || 628 btf_type_is_decl_tag(t) || 629 btf_type_is_datasec(t); 630 } 631 632 /* What types need to be resolved? 633 * 634 * btf_type_is_modifier() is an obvious one. 635 * 636 * btf_type_is_struct() because its member refers to 637 * another type (through member->type). 638 * 639 * btf_type_is_var() because the variable refers to 640 * another type. btf_type_is_datasec() holds multiple 641 * btf_type_is_var() types that need resolving. 642 * 643 * btf_type_is_array() because its element (array->type) 644 * refers to another type. Array can be thought of a 645 * special case of struct while array just has the same 646 * member-type repeated by array->nelems of times. 647 */ 648 static bool btf_type_needs_resolve(const struct btf_type *t) 649 { 650 return btf_type_is_modifier(t) || 651 btf_type_is_ptr(t) || 652 btf_type_is_struct(t) || 653 btf_type_is_array(t) || 654 btf_type_is_var(t) || 655 btf_type_is_func(t) || 656 btf_type_is_decl_tag(t) || 657 btf_type_is_datasec(t); 658 } 659 660 /* t->size can be used */ 661 static bool btf_type_has_size(const struct btf_type *t) 662 { 663 switch (BTF_INFO_KIND(t->info)) { 664 case BTF_KIND_INT: 665 case BTF_KIND_STRUCT: 666 case BTF_KIND_UNION: 667 case BTF_KIND_ENUM: 668 case BTF_KIND_DATASEC: 669 case BTF_KIND_FLOAT: 670 case BTF_KIND_ENUM64: 671 return true; 672 } 673 674 return false; 675 } 676 677 static const char *btf_int_encoding_str(u8 encoding) 678 { 679 if (encoding == 0) 680 return "(none)"; 681 else if (encoding == BTF_INT_SIGNED) 682 return "SIGNED"; 683 else if (encoding == BTF_INT_CHAR) 684 return "CHAR"; 685 else if (encoding == BTF_INT_BOOL) 686 return "BOOL"; 687 else 688 return "UNKN"; 689 } 690 691 static u32 btf_type_int(const struct btf_type *t) 692 { 693 return *(u32 *)(t + 1); 694 } 695 696 static const struct btf_array *btf_type_array(const struct btf_type *t) 697 { 698 return (const struct btf_array *)(t + 1); 699 } 700 701 static const struct btf_enum *btf_type_enum(const struct btf_type *t) 702 { 703 return (const struct btf_enum *)(t + 1); 704 } 705 706 static const struct btf_var *btf_type_var(const struct btf_type *t) 707 { 708 return (const struct btf_var *)(t + 1); 709 } 710 711 static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t) 712 { 713 return (const struct btf_decl_tag *)(t + 1); 714 } 715 716 static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t) 717 { 718 return (const struct btf_enum64 *)(t + 1); 719 } 720 721 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) 722 { 723 return kind_ops[BTF_INFO_KIND(t->info)]; 724 } 725 726 static bool btf_name_offset_valid(const struct btf *btf, u32 offset) 727 { 728 if (!BTF_STR_OFFSET_VALID(offset)) 729 return false; 730 731 while (offset < btf->start_str_off) 732 btf = btf->base_btf; 733 734 offset -= btf->start_str_off; 735 return offset < btf->hdr.str_len; 736 } 737 738 static bool __btf_name_char_ok(char c, bool first, bool dot_ok) 739 { 740 if ((first ? !isalpha(c) : 741 !isalnum(c)) && 742 c != '_' && 743 ((c == '.' && !dot_ok) || 744 c != '.')) 745 return false; 746 return true; 747 } 748 749 static const char *btf_str_by_offset(const struct btf *btf, u32 offset) 750 { 751 while (offset < btf->start_str_off) 752 btf = btf->base_btf; 753 754 offset -= btf->start_str_off; 755 if (offset < btf->hdr.str_len) 756 return &btf->strings[offset]; 757 758 return NULL; 759 } 760 761 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok) 762 { 763 /* offset must be valid */ 764 const char *src = btf_str_by_offset(btf, offset); 765 const char *src_limit; 766 767 if (!__btf_name_char_ok(*src, true, dot_ok)) 768 return false; 769 770 /* set a limit on identifier length */ 771 src_limit = src + KSYM_NAME_LEN; 772 src++; 773 while (*src && src < src_limit) { 774 if (!__btf_name_char_ok(*src, false, dot_ok)) 775 return false; 776 src++; 777 } 778 779 return !*src; 780 } 781 782 /* Only C-style identifier is permitted. This can be relaxed if 783 * necessary. 784 */ 785 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) 786 { 787 return __btf_name_valid(btf, offset, false); 788 } 789 790 static bool btf_name_valid_section(const struct btf *btf, u32 offset) 791 { 792 return __btf_name_valid(btf, offset, true); 793 } 794 795 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset) 796 { 797 const char *name; 798 799 if (!offset) 800 return "(anon)"; 801 802 name = btf_str_by_offset(btf, offset); 803 return name ?: "(invalid-name-offset)"; 804 } 805 806 const char *btf_name_by_offset(const struct btf *btf, u32 offset) 807 { 808 return btf_str_by_offset(btf, offset); 809 } 810 811 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) 812 { 813 while (type_id < btf->start_id) 814 btf = btf->base_btf; 815 816 type_id -= btf->start_id; 817 if (type_id >= btf->nr_types) 818 return NULL; 819 return btf->types[type_id]; 820 } 821 822 /* 823 * Regular int is not a bit field and it must be either 824 * u8/u16/u32/u64 or __int128. 825 */ 826 static bool btf_type_int_is_regular(const struct btf_type *t) 827 { 828 u8 nr_bits, nr_bytes; 829 u32 int_data; 830 831 int_data = btf_type_int(t); 832 nr_bits = BTF_INT_BITS(int_data); 833 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits); 834 if (BITS_PER_BYTE_MASKED(nr_bits) || 835 BTF_INT_OFFSET(int_data) || 836 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && 837 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) && 838 nr_bytes != (2 * sizeof(u64)))) { 839 return false; 840 } 841 842 return true; 843 } 844 845 /* 846 * Check that given struct member is a regular int with expected 847 * offset and size. 848 */ 849 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, 850 const struct btf_member *m, 851 u32 expected_offset, u32 expected_size) 852 { 853 const struct btf_type *t; 854 u32 id, int_data; 855 u8 nr_bits; 856 857 id = m->type; 858 t = btf_type_id_size(btf, &id, NULL); 859 if (!t || !btf_type_is_int(t)) 860 return false; 861 862 int_data = btf_type_int(t); 863 nr_bits = BTF_INT_BITS(int_data); 864 if (btf_type_kflag(s)) { 865 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset); 866 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset); 867 868 /* if kflag set, int should be a regular int and 869 * bit offset should be at byte boundary. 870 */ 871 return !bitfield_size && 872 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset && 873 BITS_ROUNDUP_BYTES(nr_bits) == expected_size; 874 } 875 876 if (BTF_INT_OFFSET(int_data) || 877 BITS_PER_BYTE_MASKED(m->offset) || 878 BITS_ROUNDUP_BYTES(m->offset) != expected_offset || 879 BITS_PER_BYTE_MASKED(nr_bits) || 880 BITS_ROUNDUP_BYTES(nr_bits) != expected_size) 881 return false; 882 883 return true; 884 } 885 886 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */ 887 static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf, 888 u32 id) 889 { 890 const struct btf_type *t = btf_type_by_id(btf, id); 891 892 while (btf_type_is_modifier(t) && 893 BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) { 894 t = btf_type_by_id(btf, t->type); 895 } 896 897 return t; 898 } 899 900 #define BTF_SHOW_MAX_ITER 10 901 902 #define BTF_KIND_BIT(kind) (1ULL << kind) 903 904 /* 905 * Populate show->state.name with type name information. 906 * Format of type name is 907 * 908 * [.member_name = ] (type_name) 909 */ 910 static const char *btf_show_name(struct btf_show *show) 911 { 912 /* BTF_MAX_ITER array suffixes "[]" */ 913 const char *array_suffixes = "[][][][][][][][][][]"; 914 const char *array_suffix = &array_suffixes[strlen(array_suffixes)]; 915 /* BTF_MAX_ITER pointer suffixes "*" */ 916 const char *ptr_suffixes = "**********"; 917 const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)]; 918 const char *name = NULL, *prefix = "", *parens = ""; 919 const struct btf_member *m = show->state.member; 920 const struct btf_type *t; 921 const struct btf_array *array; 922 u32 id = show->state.type_id; 923 const char *member = NULL; 924 bool show_member = false; 925 u64 kinds = 0; 926 int i; 927 928 show->state.name[0] = '\0'; 929 930 /* 931 * Don't show type name if we're showing an array member; 932 * in that case we show the array type so don't need to repeat 933 * ourselves for each member. 934 */ 935 if (show->state.array_member) 936 return ""; 937 938 /* Retrieve member name, if any. */ 939 if (m) { 940 member = btf_name_by_offset(show->btf, m->name_off); 941 show_member = strlen(member) > 0; 942 id = m->type; 943 } 944 945 /* 946 * Start with type_id, as we have resolved the struct btf_type * 947 * via btf_modifier_show() past the parent typedef to the child 948 * struct, int etc it is defined as. In such cases, the type_id 949 * still represents the starting type while the struct btf_type * 950 * in our show->state points at the resolved type of the typedef. 951 */ 952 t = btf_type_by_id(show->btf, id); 953 if (!t) 954 return ""; 955 956 /* 957 * The goal here is to build up the right number of pointer and 958 * array suffixes while ensuring the type name for a typedef 959 * is represented. Along the way we accumulate a list of 960 * BTF kinds we have encountered, since these will inform later 961 * display; for example, pointer types will not require an 962 * opening "{" for struct, we will just display the pointer value. 963 * 964 * We also want to accumulate the right number of pointer or array 965 * indices in the format string while iterating until we get to 966 * the typedef/pointee/array member target type. 967 * 968 * We start by pointing at the end of pointer and array suffix 969 * strings; as we accumulate pointers and arrays we move the pointer 970 * or array string backwards so it will show the expected number of 971 * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers 972 * and/or arrays and typedefs are supported as a precaution. 973 * 974 * We also want to get typedef name while proceeding to resolve 975 * type it points to so that we can add parentheses if it is a 976 * "typedef struct" etc. 977 */ 978 for (i = 0; i < BTF_SHOW_MAX_ITER; i++) { 979 980 switch (BTF_INFO_KIND(t->info)) { 981 case BTF_KIND_TYPEDEF: 982 if (!name) 983 name = btf_name_by_offset(show->btf, 984 t->name_off); 985 kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF); 986 id = t->type; 987 break; 988 case BTF_KIND_ARRAY: 989 kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY); 990 parens = "["; 991 if (!t) 992 return ""; 993 array = btf_type_array(t); 994 if (array_suffix > array_suffixes) 995 array_suffix -= 2; 996 id = array->type; 997 break; 998 case BTF_KIND_PTR: 999 kinds |= BTF_KIND_BIT(BTF_KIND_PTR); 1000 if (ptr_suffix > ptr_suffixes) 1001 ptr_suffix -= 1; 1002 id = t->type; 1003 break; 1004 default: 1005 id = 0; 1006 break; 1007 } 1008 if (!id) 1009 break; 1010 t = btf_type_skip_qualifiers(show->btf, id); 1011 } 1012 /* We may not be able to represent this type; bail to be safe */ 1013 if (i == BTF_SHOW_MAX_ITER) 1014 return ""; 1015 1016 if (!name) 1017 name = btf_name_by_offset(show->btf, t->name_off); 1018 1019 switch (BTF_INFO_KIND(t->info)) { 1020 case BTF_KIND_STRUCT: 1021 case BTF_KIND_UNION: 1022 prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ? 1023 "struct" : "union"; 1024 /* if it's an array of struct/union, parens is already set */ 1025 if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY)))) 1026 parens = "{"; 1027 break; 1028 case BTF_KIND_ENUM: 1029 case BTF_KIND_ENUM64: 1030 prefix = "enum"; 1031 break; 1032 default: 1033 break; 1034 } 1035 1036 /* pointer does not require parens */ 1037 if (kinds & BTF_KIND_BIT(BTF_KIND_PTR)) 1038 parens = ""; 1039 /* typedef does not require struct/union/enum prefix */ 1040 if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF)) 1041 prefix = ""; 1042 1043 if (!name) 1044 name = ""; 1045 1046 /* Even if we don't want type name info, we want parentheses etc */ 1047 if (show->flags & BTF_SHOW_NONAME) 1048 snprintf(show->state.name, sizeof(show->state.name), "%s", 1049 parens); 1050 else 1051 snprintf(show->state.name, sizeof(show->state.name), 1052 "%s%s%s(%s%s%s%s%s%s)%s", 1053 /* first 3 strings comprise ".member = " */ 1054 show_member ? "." : "", 1055 show_member ? member : "", 1056 show_member ? " = " : "", 1057 /* ...next is our prefix (struct, enum, etc) */ 1058 prefix, 1059 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "", 1060 /* ...this is the type name itself */ 1061 name, 1062 /* ...suffixed by the appropriate '*', '[]' suffixes */ 1063 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix, 1064 array_suffix, parens); 1065 1066 return show->state.name; 1067 } 1068 1069 static const char *__btf_show_indent(struct btf_show *show) 1070 { 1071 const char *indents = " "; 1072 const char *indent = &indents[strlen(indents)]; 1073 1074 if ((indent - show->state.depth) >= indents) 1075 return indent - show->state.depth; 1076 return indents; 1077 } 1078 1079 static const char *btf_show_indent(struct btf_show *show) 1080 { 1081 return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show); 1082 } 1083 1084 static const char *btf_show_newline(struct btf_show *show) 1085 { 1086 return show->flags & BTF_SHOW_COMPACT ? "" : "\n"; 1087 } 1088 1089 static const char *btf_show_delim(struct btf_show *show) 1090 { 1091 if (show->state.depth == 0) 1092 return ""; 1093 1094 if ((show->flags & BTF_SHOW_COMPACT) && show->state.type && 1095 BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION) 1096 return "|"; 1097 1098 return ","; 1099 } 1100 1101 __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...) 1102 { 1103 va_list args; 1104 1105 if (!show->state.depth_check) { 1106 va_start(args, fmt); 1107 show->showfn(show, fmt, args); 1108 va_end(args); 1109 } 1110 } 1111 1112 /* Macros are used here as btf_show_type_value[s]() prepends and appends 1113 * format specifiers to the format specifier passed in; these do the work of 1114 * adding indentation, delimiters etc while the caller simply has to specify 1115 * the type value(s) in the format specifier + value(s). 1116 */ 1117 #define btf_show_type_value(show, fmt, value) \ 1118 do { \ 1119 if ((value) != 0 || (show->flags & BTF_SHOW_ZERO) || \ 1120 show->state.depth == 0) { \ 1121 btf_show(show, "%s%s" fmt "%s%s", \ 1122 btf_show_indent(show), \ 1123 btf_show_name(show), \ 1124 value, btf_show_delim(show), \ 1125 btf_show_newline(show)); \ 1126 if (show->state.depth > show->state.depth_to_show) \ 1127 show->state.depth_to_show = show->state.depth; \ 1128 } \ 1129 } while (0) 1130 1131 #define btf_show_type_values(show, fmt, ...) \ 1132 do { \ 1133 btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \ 1134 btf_show_name(show), \ 1135 __VA_ARGS__, btf_show_delim(show), \ 1136 btf_show_newline(show)); \ 1137 if (show->state.depth > show->state.depth_to_show) \ 1138 show->state.depth_to_show = show->state.depth; \ 1139 } while (0) 1140 1141 /* How much is left to copy to safe buffer after @data? */ 1142 static int btf_show_obj_size_left(struct btf_show *show, void *data) 1143 { 1144 return show->obj.head + show->obj.size - data; 1145 } 1146 1147 /* Is object pointed to by @data of @size already copied to our safe buffer? */ 1148 static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size) 1149 { 1150 return data >= show->obj.data && 1151 (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE); 1152 } 1153 1154 /* 1155 * If object pointed to by @data of @size falls within our safe buffer, return 1156 * the equivalent pointer to the same safe data. Assumes 1157 * copy_from_kernel_nofault() has already happened and our safe buffer is 1158 * populated. 1159 */ 1160 static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size) 1161 { 1162 if (btf_show_obj_is_safe(show, data, size)) 1163 return show->obj.safe + (data - show->obj.data); 1164 return NULL; 1165 } 1166 1167 /* 1168 * Return a safe-to-access version of data pointed to by @data. 1169 * We do this by copying the relevant amount of information 1170 * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault(). 1171 * 1172 * If BTF_SHOW_UNSAFE is specified, just return data as-is; no 1173 * safe copy is needed. 1174 * 1175 * Otherwise we need to determine if we have the required amount 1176 * of data (determined by the @data pointer and the size of the 1177 * largest base type we can encounter (represented by 1178 * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures 1179 * that we will be able to print some of the current object, 1180 * and if more is needed a copy will be triggered. 1181 * Some objects such as structs will not fit into the buffer; 1182 * in such cases additional copies when we iterate over their 1183 * members may be needed. 1184 * 1185 * btf_show_obj_safe() is used to return a safe buffer for 1186 * btf_show_start_type(); this ensures that as we recurse into 1187 * nested types we always have safe data for the given type. 1188 * This approach is somewhat wasteful; it's possible for example 1189 * that when iterating over a large union we'll end up copying the 1190 * same data repeatedly, but the goal is safety not performance. 1191 * We use stack data as opposed to per-CPU buffers because the 1192 * iteration over a type can take some time, and preemption handling 1193 * would greatly complicate use of the safe buffer. 1194 */ 1195 static void *btf_show_obj_safe(struct btf_show *show, 1196 const struct btf_type *t, 1197 void *data) 1198 { 1199 const struct btf_type *rt; 1200 int size_left, size; 1201 void *safe = NULL; 1202 1203 if (show->flags & BTF_SHOW_UNSAFE) 1204 return data; 1205 1206 rt = btf_resolve_size(show->btf, t, &size); 1207 if (IS_ERR(rt)) { 1208 show->state.status = PTR_ERR(rt); 1209 return NULL; 1210 } 1211 1212 /* 1213 * Is this toplevel object? If so, set total object size and 1214 * initialize pointers. Otherwise check if we still fall within 1215 * our safe object data. 1216 */ 1217 if (show->state.depth == 0) { 1218 show->obj.size = size; 1219 show->obj.head = data; 1220 } else { 1221 /* 1222 * If the size of the current object is > our remaining 1223 * safe buffer we _may_ need to do a new copy. However 1224 * consider the case of a nested struct; it's size pushes 1225 * us over the safe buffer limit, but showing any individual 1226 * struct members does not. In such cases, we don't need 1227 * to initiate a fresh copy yet; however we definitely need 1228 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left 1229 * in our buffer, regardless of the current object size. 1230 * The logic here is that as we resolve types we will 1231 * hit a base type at some point, and we need to be sure 1232 * the next chunk of data is safely available to display 1233 * that type info safely. We cannot rely on the size of 1234 * the current object here because it may be much larger 1235 * than our current buffer (e.g. task_struct is 8k). 1236 * All we want to do here is ensure that we can print the 1237 * next basic type, which we can if either 1238 * - the current type size is within the safe buffer; or 1239 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in 1240 * the safe buffer. 1241 */ 1242 safe = __btf_show_obj_safe(show, data, 1243 min(size, 1244 BTF_SHOW_OBJ_BASE_TYPE_SIZE)); 1245 } 1246 1247 /* 1248 * We need a new copy to our safe object, either because we haven't 1249 * yet copied and are initializing safe data, or because the data 1250 * we want falls outside the boundaries of the safe object. 1251 */ 1252 if (!safe) { 1253 size_left = btf_show_obj_size_left(show, data); 1254 if (size_left > BTF_SHOW_OBJ_SAFE_SIZE) 1255 size_left = BTF_SHOW_OBJ_SAFE_SIZE; 1256 show->state.status = copy_from_kernel_nofault(show->obj.safe, 1257 data, size_left); 1258 if (!show->state.status) { 1259 show->obj.data = data; 1260 safe = show->obj.safe; 1261 } 1262 } 1263 1264 return safe; 1265 } 1266 1267 /* 1268 * Set the type we are starting to show and return a safe data pointer 1269 * to be used for showing the associated data. 1270 */ 1271 static void *btf_show_start_type(struct btf_show *show, 1272 const struct btf_type *t, 1273 u32 type_id, void *data) 1274 { 1275 show->state.type = t; 1276 show->state.type_id = type_id; 1277 show->state.name[0] = '\0'; 1278 1279 return btf_show_obj_safe(show, t, data); 1280 } 1281 1282 static void btf_show_end_type(struct btf_show *show) 1283 { 1284 show->state.type = NULL; 1285 show->state.type_id = 0; 1286 show->state.name[0] = '\0'; 1287 } 1288 1289 static void *btf_show_start_aggr_type(struct btf_show *show, 1290 const struct btf_type *t, 1291 u32 type_id, void *data) 1292 { 1293 void *safe_data = btf_show_start_type(show, t, type_id, data); 1294 1295 if (!safe_data) 1296 return safe_data; 1297 1298 btf_show(show, "%s%s%s", btf_show_indent(show), 1299 btf_show_name(show), 1300 btf_show_newline(show)); 1301 show->state.depth++; 1302 return safe_data; 1303 } 1304 1305 static void btf_show_end_aggr_type(struct btf_show *show, 1306 const char *suffix) 1307 { 1308 show->state.depth--; 1309 btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix, 1310 btf_show_delim(show), btf_show_newline(show)); 1311 btf_show_end_type(show); 1312 } 1313 1314 static void btf_show_start_member(struct btf_show *show, 1315 const struct btf_member *m) 1316 { 1317 show->state.member = m; 1318 } 1319 1320 static void btf_show_start_array_member(struct btf_show *show) 1321 { 1322 show->state.array_member = 1; 1323 btf_show_start_member(show, NULL); 1324 } 1325 1326 static void btf_show_end_member(struct btf_show *show) 1327 { 1328 show->state.member = NULL; 1329 } 1330 1331 static void btf_show_end_array_member(struct btf_show *show) 1332 { 1333 show->state.array_member = 0; 1334 btf_show_end_member(show); 1335 } 1336 1337 static void *btf_show_start_array_type(struct btf_show *show, 1338 const struct btf_type *t, 1339 u32 type_id, 1340 u16 array_encoding, 1341 void *data) 1342 { 1343 show->state.array_encoding = array_encoding; 1344 show->state.array_terminated = 0; 1345 return btf_show_start_aggr_type(show, t, type_id, data); 1346 } 1347 1348 static void btf_show_end_array_type(struct btf_show *show) 1349 { 1350 show->state.array_encoding = 0; 1351 show->state.array_terminated = 0; 1352 btf_show_end_aggr_type(show, "]"); 1353 } 1354 1355 static void *btf_show_start_struct_type(struct btf_show *show, 1356 const struct btf_type *t, 1357 u32 type_id, 1358 void *data) 1359 { 1360 return btf_show_start_aggr_type(show, t, type_id, data); 1361 } 1362 1363 static void btf_show_end_struct_type(struct btf_show *show) 1364 { 1365 btf_show_end_aggr_type(show, "}"); 1366 } 1367 1368 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, 1369 const char *fmt, ...) 1370 { 1371 va_list args; 1372 1373 va_start(args, fmt); 1374 bpf_verifier_vlog(log, fmt, args); 1375 va_end(args); 1376 } 1377 1378 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env, 1379 const char *fmt, ...) 1380 { 1381 struct bpf_verifier_log *log = &env->log; 1382 va_list args; 1383 1384 if (!bpf_verifier_log_needed(log)) 1385 return; 1386 1387 va_start(args, fmt); 1388 bpf_verifier_vlog(log, fmt, args); 1389 va_end(args); 1390 } 1391 1392 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, 1393 const struct btf_type *t, 1394 bool log_details, 1395 const char *fmt, ...) 1396 { 1397 struct bpf_verifier_log *log = &env->log; 1398 u8 kind = BTF_INFO_KIND(t->info); 1399 struct btf *btf = env->btf; 1400 va_list args; 1401 1402 if (!bpf_verifier_log_needed(log)) 1403 return; 1404 1405 /* btf verifier prints all types it is processing via 1406 * btf_verifier_log_type(..., fmt = NULL). 1407 * Skip those prints for in-kernel BTF verification. 1408 */ 1409 if (log->level == BPF_LOG_KERNEL && !fmt) 1410 return; 1411 1412 __btf_verifier_log(log, "[%u] %s %s%s", 1413 env->log_type_id, 1414 btf_kind_str[kind], 1415 __btf_name_by_offset(btf, t->name_off), 1416 log_details ? " " : ""); 1417 1418 if (log_details) 1419 btf_type_ops(t)->log_details(env, t); 1420 1421 if (fmt && *fmt) { 1422 __btf_verifier_log(log, " "); 1423 va_start(args, fmt); 1424 bpf_verifier_vlog(log, fmt, args); 1425 va_end(args); 1426 } 1427 1428 __btf_verifier_log(log, "\n"); 1429 } 1430 1431 #define btf_verifier_log_type(env, t, ...) \ 1432 __btf_verifier_log_type((env), (t), true, __VA_ARGS__) 1433 #define btf_verifier_log_basic(env, t, ...) \ 1434 __btf_verifier_log_type((env), (t), false, __VA_ARGS__) 1435 1436 __printf(4, 5) 1437 static void btf_verifier_log_member(struct btf_verifier_env *env, 1438 const struct btf_type *struct_type, 1439 const struct btf_member *member, 1440 const char *fmt, ...) 1441 { 1442 struct bpf_verifier_log *log = &env->log; 1443 struct btf *btf = env->btf; 1444 va_list args; 1445 1446 if (!bpf_verifier_log_needed(log)) 1447 return; 1448 1449 if (log->level == BPF_LOG_KERNEL && !fmt) 1450 return; 1451 /* The CHECK_META phase already did a btf dump. 1452 * 1453 * If member is logged again, it must hit an error in 1454 * parsing this member. It is useful to print out which 1455 * struct this member belongs to. 1456 */ 1457 if (env->phase != CHECK_META) 1458 btf_verifier_log_type(env, struct_type, NULL); 1459 1460 if (btf_type_kflag(struct_type)) 1461 __btf_verifier_log(log, 1462 "\t%s type_id=%u bitfield_size=%u bits_offset=%u", 1463 __btf_name_by_offset(btf, member->name_off), 1464 member->type, 1465 BTF_MEMBER_BITFIELD_SIZE(member->offset), 1466 BTF_MEMBER_BIT_OFFSET(member->offset)); 1467 else 1468 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u", 1469 __btf_name_by_offset(btf, member->name_off), 1470 member->type, member->offset); 1471 1472 if (fmt && *fmt) { 1473 __btf_verifier_log(log, " "); 1474 va_start(args, fmt); 1475 bpf_verifier_vlog(log, fmt, args); 1476 va_end(args); 1477 } 1478 1479 __btf_verifier_log(log, "\n"); 1480 } 1481 1482 __printf(4, 5) 1483 static void btf_verifier_log_vsi(struct btf_verifier_env *env, 1484 const struct btf_type *datasec_type, 1485 const struct btf_var_secinfo *vsi, 1486 const char *fmt, ...) 1487 { 1488 struct bpf_verifier_log *log = &env->log; 1489 va_list args; 1490 1491 if (!bpf_verifier_log_needed(log)) 1492 return; 1493 if (log->level == BPF_LOG_KERNEL && !fmt) 1494 return; 1495 if (env->phase != CHECK_META) 1496 btf_verifier_log_type(env, datasec_type, NULL); 1497 1498 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u", 1499 vsi->type, vsi->offset, vsi->size); 1500 if (fmt && *fmt) { 1501 __btf_verifier_log(log, " "); 1502 va_start(args, fmt); 1503 bpf_verifier_vlog(log, fmt, args); 1504 va_end(args); 1505 } 1506 1507 __btf_verifier_log(log, "\n"); 1508 } 1509 1510 static void btf_verifier_log_hdr(struct btf_verifier_env *env, 1511 u32 btf_data_size) 1512 { 1513 struct bpf_verifier_log *log = &env->log; 1514 const struct btf *btf = env->btf; 1515 const struct btf_header *hdr; 1516 1517 if (!bpf_verifier_log_needed(log)) 1518 return; 1519 1520 if (log->level == BPF_LOG_KERNEL) 1521 return; 1522 hdr = &btf->hdr; 1523 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic); 1524 __btf_verifier_log(log, "version: %u\n", hdr->version); 1525 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags); 1526 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len); 1527 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off); 1528 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len); 1529 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off); 1530 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len); 1531 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size); 1532 } 1533 1534 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) 1535 { 1536 struct btf *btf = env->btf; 1537 1538 if (btf->types_size == btf->nr_types) { 1539 /* Expand 'types' array */ 1540 1541 struct btf_type **new_types; 1542 u32 expand_by, new_size; 1543 1544 if (btf->start_id + btf->types_size == BTF_MAX_TYPE) { 1545 btf_verifier_log(env, "Exceeded max num of types"); 1546 return -E2BIG; 1547 } 1548 1549 expand_by = max_t(u32, btf->types_size >> 2, 16); 1550 new_size = min_t(u32, BTF_MAX_TYPE, 1551 btf->types_size + expand_by); 1552 1553 new_types = kvcalloc(new_size, sizeof(*new_types), 1554 GFP_KERNEL | __GFP_NOWARN); 1555 if (!new_types) 1556 return -ENOMEM; 1557 1558 if (btf->nr_types == 0) { 1559 if (!btf->base_btf) { 1560 /* lazily init VOID type */ 1561 new_types[0] = &btf_void; 1562 btf->nr_types++; 1563 } 1564 } else { 1565 memcpy(new_types, btf->types, 1566 sizeof(*btf->types) * btf->nr_types); 1567 } 1568 1569 kvfree(btf->types); 1570 btf->types = new_types; 1571 btf->types_size = new_size; 1572 } 1573 1574 btf->types[btf->nr_types++] = t; 1575 1576 return 0; 1577 } 1578 1579 static int btf_alloc_id(struct btf *btf) 1580 { 1581 int id; 1582 1583 idr_preload(GFP_KERNEL); 1584 spin_lock_bh(&btf_idr_lock); 1585 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC); 1586 if (id > 0) 1587 btf->id = id; 1588 spin_unlock_bh(&btf_idr_lock); 1589 idr_preload_end(); 1590 1591 if (WARN_ON_ONCE(!id)) 1592 return -ENOSPC; 1593 1594 return id > 0 ? 0 : id; 1595 } 1596 1597 static void btf_free_id(struct btf *btf) 1598 { 1599 unsigned long flags; 1600 1601 /* 1602 * In map-in-map, calling map_delete_elem() on outer 1603 * map will call bpf_map_put on the inner map. 1604 * It will then eventually call btf_free_id() 1605 * on the inner map. Some of the map_delete_elem() 1606 * implementation may have irq disabled, so 1607 * we need to use the _irqsave() version instead 1608 * of the _bh() version. 1609 */ 1610 spin_lock_irqsave(&btf_idr_lock, flags); 1611 idr_remove(&btf_idr, btf->id); 1612 spin_unlock_irqrestore(&btf_idr_lock, flags); 1613 } 1614 1615 static void btf_free_kfunc_set_tab(struct btf *btf) 1616 { 1617 struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab; 1618 int hook, type; 1619 1620 if (!tab) 1621 return; 1622 /* For module BTF, we directly assign the sets being registered, so 1623 * there is nothing to free except kfunc_set_tab. 1624 */ 1625 if (btf_is_module(btf)) 1626 goto free_tab; 1627 for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) { 1628 for (type = 0; type < ARRAY_SIZE(tab->sets[0]); type++) 1629 kfree(tab->sets[hook][type]); 1630 } 1631 free_tab: 1632 kfree(tab); 1633 btf->kfunc_set_tab = NULL; 1634 } 1635 1636 static void btf_free_dtor_kfunc_tab(struct btf *btf) 1637 { 1638 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; 1639 1640 if (!tab) 1641 return; 1642 kfree(tab); 1643 btf->dtor_kfunc_tab = NULL; 1644 } 1645 1646 static void btf_free(struct btf *btf) 1647 { 1648 btf_free_dtor_kfunc_tab(btf); 1649 btf_free_kfunc_set_tab(btf); 1650 kvfree(btf->types); 1651 kvfree(btf->resolved_sizes); 1652 kvfree(btf->resolved_ids); 1653 kvfree(btf->data); 1654 kfree(btf); 1655 } 1656 1657 static void btf_free_rcu(struct rcu_head *rcu) 1658 { 1659 struct btf *btf = container_of(rcu, struct btf, rcu); 1660 1661 btf_free(btf); 1662 } 1663 1664 void btf_get(struct btf *btf) 1665 { 1666 refcount_inc(&btf->refcnt); 1667 } 1668 1669 void btf_put(struct btf *btf) 1670 { 1671 if (btf && refcount_dec_and_test(&btf->refcnt)) { 1672 btf_free_id(btf); 1673 call_rcu(&btf->rcu, btf_free_rcu); 1674 } 1675 } 1676 1677 static int env_resolve_init(struct btf_verifier_env *env) 1678 { 1679 struct btf *btf = env->btf; 1680 u32 nr_types = btf->nr_types; 1681 u32 *resolved_sizes = NULL; 1682 u32 *resolved_ids = NULL; 1683 u8 *visit_states = NULL; 1684 1685 resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes), 1686 GFP_KERNEL | __GFP_NOWARN); 1687 if (!resolved_sizes) 1688 goto nomem; 1689 1690 resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids), 1691 GFP_KERNEL | __GFP_NOWARN); 1692 if (!resolved_ids) 1693 goto nomem; 1694 1695 visit_states = kvcalloc(nr_types, sizeof(*visit_states), 1696 GFP_KERNEL | __GFP_NOWARN); 1697 if (!visit_states) 1698 goto nomem; 1699 1700 btf->resolved_sizes = resolved_sizes; 1701 btf->resolved_ids = resolved_ids; 1702 env->visit_states = visit_states; 1703 1704 return 0; 1705 1706 nomem: 1707 kvfree(resolved_sizes); 1708 kvfree(resolved_ids); 1709 kvfree(visit_states); 1710 return -ENOMEM; 1711 } 1712 1713 static void btf_verifier_env_free(struct btf_verifier_env *env) 1714 { 1715 kvfree(env->visit_states); 1716 kfree(env); 1717 } 1718 1719 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env, 1720 const struct btf_type *next_type) 1721 { 1722 switch (env->resolve_mode) { 1723 case RESOLVE_TBD: 1724 /* int, enum or void is a sink */ 1725 return !btf_type_needs_resolve(next_type); 1726 case RESOLVE_PTR: 1727 /* int, enum, void, struct, array, func or func_proto is a sink 1728 * for ptr 1729 */ 1730 return !btf_type_is_modifier(next_type) && 1731 !btf_type_is_ptr(next_type); 1732 case RESOLVE_STRUCT_OR_ARRAY: 1733 /* int, enum, void, ptr, func or func_proto is a sink 1734 * for struct and array 1735 */ 1736 return !btf_type_is_modifier(next_type) && 1737 !btf_type_is_array(next_type) && 1738 !btf_type_is_struct(next_type); 1739 default: 1740 BUG(); 1741 } 1742 } 1743 1744 static bool env_type_is_resolved(const struct btf_verifier_env *env, 1745 u32 type_id) 1746 { 1747 /* base BTF types should be resolved by now */ 1748 if (type_id < env->btf->start_id) 1749 return true; 1750 1751 return env->visit_states[type_id - env->btf->start_id] == RESOLVED; 1752 } 1753 1754 static int env_stack_push(struct btf_verifier_env *env, 1755 const struct btf_type *t, u32 type_id) 1756 { 1757 const struct btf *btf = env->btf; 1758 struct resolve_vertex *v; 1759 1760 if (env->top_stack == MAX_RESOLVE_DEPTH) 1761 return -E2BIG; 1762 1763 if (type_id < btf->start_id 1764 || env->visit_states[type_id - btf->start_id] != NOT_VISITED) 1765 return -EEXIST; 1766 1767 env->visit_states[type_id - btf->start_id] = VISITED; 1768 1769 v = &env->stack[env->top_stack++]; 1770 v->t = t; 1771 v->type_id = type_id; 1772 v->next_member = 0; 1773 1774 if (env->resolve_mode == RESOLVE_TBD) { 1775 if (btf_type_is_ptr(t)) 1776 env->resolve_mode = RESOLVE_PTR; 1777 else if (btf_type_is_struct(t) || btf_type_is_array(t)) 1778 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY; 1779 } 1780 1781 return 0; 1782 } 1783 1784 static void env_stack_set_next_member(struct btf_verifier_env *env, 1785 u16 next_member) 1786 { 1787 env->stack[env->top_stack - 1].next_member = next_member; 1788 } 1789 1790 static void env_stack_pop_resolved(struct btf_verifier_env *env, 1791 u32 resolved_type_id, 1792 u32 resolved_size) 1793 { 1794 u32 type_id = env->stack[--(env->top_stack)].type_id; 1795 struct btf *btf = env->btf; 1796 1797 type_id -= btf->start_id; /* adjust to local type id */ 1798 btf->resolved_sizes[type_id] = resolved_size; 1799 btf->resolved_ids[type_id] = resolved_type_id; 1800 env->visit_states[type_id] = RESOLVED; 1801 } 1802 1803 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) 1804 { 1805 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; 1806 } 1807 1808 /* Resolve the size of a passed-in "type" 1809 * 1810 * type: is an array (e.g. u32 array[x][y]) 1811 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY, 1812 * *type_size: (x * y * sizeof(u32)). Hence, *type_size always 1813 * corresponds to the return type. 1814 * *elem_type: u32 1815 * *elem_id: id of u32 1816 * *total_nelems: (x * y). Hence, individual elem size is 1817 * (*type_size / *total_nelems) 1818 * *type_id: id of type if it's changed within the function, 0 if not 1819 * 1820 * type: is not an array (e.g. const struct X) 1821 * return type: type "struct X" 1822 * *type_size: sizeof(struct X) 1823 * *elem_type: same as return type ("struct X") 1824 * *elem_id: 0 1825 * *total_nelems: 1 1826 * *type_id: id of type if it's changed within the function, 0 if not 1827 */ 1828 static const struct btf_type * 1829 __btf_resolve_size(const struct btf *btf, const struct btf_type *type, 1830 u32 *type_size, const struct btf_type **elem_type, 1831 u32 *elem_id, u32 *total_nelems, u32 *type_id) 1832 { 1833 const struct btf_type *array_type = NULL; 1834 const struct btf_array *array = NULL; 1835 u32 i, size, nelems = 1, id = 0; 1836 1837 for (i = 0; i < MAX_RESOLVE_DEPTH; i++) { 1838 switch (BTF_INFO_KIND(type->info)) { 1839 /* type->size can be used */ 1840 case BTF_KIND_INT: 1841 case BTF_KIND_STRUCT: 1842 case BTF_KIND_UNION: 1843 case BTF_KIND_ENUM: 1844 case BTF_KIND_FLOAT: 1845 case BTF_KIND_ENUM64: 1846 size = type->size; 1847 goto resolved; 1848 1849 case BTF_KIND_PTR: 1850 size = sizeof(void *); 1851 goto resolved; 1852 1853 /* Modifiers */ 1854 case BTF_KIND_TYPEDEF: 1855 case BTF_KIND_VOLATILE: 1856 case BTF_KIND_CONST: 1857 case BTF_KIND_RESTRICT: 1858 case BTF_KIND_TYPE_TAG: 1859 id = type->type; 1860 type = btf_type_by_id(btf, type->type); 1861 break; 1862 1863 case BTF_KIND_ARRAY: 1864 if (!array_type) 1865 array_type = type; 1866 array = btf_type_array(type); 1867 if (nelems && array->nelems > U32_MAX / nelems) 1868 return ERR_PTR(-EINVAL); 1869 nelems *= array->nelems; 1870 type = btf_type_by_id(btf, array->type); 1871 break; 1872 1873 /* type without size */ 1874 default: 1875 return ERR_PTR(-EINVAL); 1876 } 1877 } 1878 1879 return ERR_PTR(-EINVAL); 1880 1881 resolved: 1882 if (nelems && size > U32_MAX / nelems) 1883 return ERR_PTR(-EINVAL); 1884 1885 *type_size = nelems * size; 1886 if (total_nelems) 1887 *total_nelems = nelems; 1888 if (elem_type) 1889 *elem_type = type; 1890 if (elem_id) 1891 *elem_id = array ? array->type : 0; 1892 if (type_id && id) 1893 *type_id = id; 1894 1895 return array_type ? : type; 1896 } 1897 1898 const struct btf_type * 1899 btf_resolve_size(const struct btf *btf, const struct btf_type *type, 1900 u32 *type_size) 1901 { 1902 return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL); 1903 } 1904 1905 static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id) 1906 { 1907 while (type_id < btf->start_id) 1908 btf = btf->base_btf; 1909 1910 return btf->resolved_ids[type_id - btf->start_id]; 1911 } 1912 1913 /* The input param "type_id" must point to a needs_resolve type */ 1914 static const struct btf_type *btf_type_id_resolve(const struct btf *btf, 1915 u32 *type_id) 1916 { 1917 *type_id = btf_resolved_type_id(btf, *type_id); 1918 return btf_type_by_id(btf, *type_id); 1919 } 1920 1921 static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id) 1922 { 1923 while (type_id < btf->start_id) 1924 btf = btf->base_btf; 1925 1926 return btf->resolved_sizes[type_id - btf->start_id]; 1927 } 1928 1929 const struct btf_type *btf_type_id_size(const struct btf *btf, 1930 u32 *type_id, u32 *ret_size) 1931 { 1932 const struct btf_type *size_type; 1933 u32 size_type_id = *type_id; 1934 u32 size = 0; 1935 1936 size_type = btf_type_by_id(btf, size_type_id); 1937 if (btf_type_nosize_or_null(size_type)) 1938 return NULL; 1939 1940 if (btf_type_has_size(size_type)) { 1941 size = size_type->size; 1942 } else if (btf_type_is_array(size_type)) { 1943 size = btf_resolved_type_size(btf, size_type_id); 1944 } else if (btf_type_is_ptr(size_type)) { 1945 size = sizeof(void *); 1946 } else { 1947 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) && 1948 !btf_type_is_var(size_type))) 1949 return NULL; 1950 1951 size_type_id = btf_resolved_type_id(btf, size_type_id); 1952 size_type = btf_type_by_id(btf, size_type_id); 1953 if (btf_type_nosize_or_null(size_type)) 1954 return NULL; 1955 else if (btf_type_has_size(size_type)) 1956 size = size_type->size; 1957 else if (btf_type_is_array(size_type)) 1958 size = btf_resolved_type_size(btf, size_type_id); 1959 else if (btf_type_is_ptr(size_type)) 1960 size = sizeof(void *); 1961 else 1962 return NULL; 1963 } 1964 1965 *type_id = size_type_id; 1966 if (ret_size) 1967 *ret_size = size; 1968 1969 return size_type; 1970 } 1971 1972 static int btf_df_check_member(struct btf_verifier_env *env, 1973 const struct btf_type *struct_type, 1974 const struct btf_member *member, 1975 const struct btf_type *member_type) 1976 { 1977 btf_verifier_log_basic(env, struct_type, 1978 "Unsupported check_member"); 1979 return -EINVAL; 1980 } 1981 1982 static int btf_df_check_kflag_member(struct btf_verifier_env *env, 1983 const struct btf_type *struct_type, 1984 const struct btf_member *member, 1985 const struct btf_type *member_type) 1986 { 1987 btf_verifier_log_basic(env, struct_type, 1988 "Unsupported check_kflag_member"); 1989 return -EINVAL; 1990 } 1991 1992 /* Used for ptr, array struct/union and float type members. 1993 * int, enum and modifier types have their specific callback functions. 1994 */ 1995 static int btf_generic_check_kflag_member(struct btf_verifier_env *env, 1996 const struct btf_type *struct_type, 1997 const struct btf_member *member, 1998 const struct btf_type *member_type) 1999 { 2000 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) { 2001 btf_verifier_log_member(env, struct_type, member, 2002 "Invalid member bitfield_size"); 2003 return -EINVAL; 2004 } 2005 2006 /* bitfield size is 0, so member->offset represents bit offset only. 2007 * It is safe to call non kflag check_member variants. 2008 */ 2009 return btf_type_ops(member_type)->check_member(env, struct_type, 2010 member, 2011 member_type); 2012 } 2013 2014 static int btf_df_resolve(struct btf_verifier_env *env, 2015 const struct resolve_vertex *v) 2016 { 2017 btf_verifier_log_basic(env, v->t, "Unsupported resolve"); 2018 return -EINVAL; 2019 } 2020 2021 static void btf_df_show(const struct btf *btf, const struct btf_type *t, 2022 u32 type_id, void *data, u8 bits_offsets, 2023 struct btf_show *show) 2024 { 2025 btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info)); 2026 } 2027 2028 static int btf_int_check_member(struct btf_verifier_env *env, 2029 const struct btf_type *struct_type, 2030 const struct btf_member *member, 2031 const struct btf_type *member_type) 2032 { 2033 u32 int_data = btf_type_int(member_type); 2034 u32 struct_bits_off = member->offset; 2035 u32 struct_size = struct_type->size; 2036 u32 nr_copy_bits; 2037 u32 bytes_offset; 2038 2039 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) { 2040 btf_verifier_log_member(env, struct_type, member, 2041 "bits_offset exceeds U32_MAX"); 2042 return -EINVAL; 2043 } 2044 2045 struct_bits_off += BTF_INT_OFFSET(int_data); 2046 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2047 nr_copy_bits = BTF_INT_BITS(int_data) + 2048 BITS_PER_BYTE_MASKED(struct_bits_off); 2049 2050 if (nr_copy_bits > BITS_PER_U128) { 2051 btf_verifier_log_member(env, struct_type, member, 2052 "nr_copy_bits exceeds 128"); 2053 return -EINVAL; 2054 } 2055 2056 if (struct_size < bytes_offset || 2057 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 2058 btf_verifier_log_member(env, struct_type, member, 2059 "Member exceeds struct_size"); 2060 return -EINVAL; 2061 } 2062 2063 return 0; 2064 } 2065 2066 static int btf_int_check_kflag_member(struct btf_verifier_env *env, 2067 const struct btf_type *struct_type, 2068 const struct btf_member *member, 2069 const struct btf_type *member_type) 2070 { 2071 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset; 2072 u32 int_data = btf_type_int(member_type); 2073 u32 struct_size = struct_type->size; 2074 u32 nr_copy_bits; 2075 2076 /* a regular int type is required for the kflag int member */ 2077 if (!btf_type_int_is_regular(member_type)) { 2078 btf_verifier_log_member(env, struct_type, member, 2079 "Invalid member base type"); 2080 return -EINVAL; 2081 } 2082 2083 /* check sanity of bitfield size */ 2084 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 2085 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 2086 nr_int_data_bits = BTF_INT_BITS(int_data); 2087 if (!nr_bits) { 2088 /* Not a bitfield member, member offset must be at byte 2089 * boundary. 2090 */ 2091 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2092 btf_verifier_log_member(env, struct_type, member, 2093 "Invalid member offset"); 2094 return -EINVAL; 2095 } 2096 2097 nr_bits = nr_int_data_bits; 2098 } else if (nr_bits > nr_int_data_bits) { 2099 btf_verifier_log_member(env, struct_type, member, 2100 "Invalid member bitfield_size"); 2101 return -EINVAL; 2102 } 2103 2104 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2105 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off); 2106 if (nr_copy_bits > BITS_PER_U128) { 2107 btf_verifier_log_member(env, struct_type, member, 2108 "nr_copy_bits exceeds 128"); 2109 return -EINVAL; 2110 } 2111 2112 if (struct_size < bytes_offset || 2113 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 2114 btf_verifier_log_member(env, struct_type, member, 2115 "Member exceeds struct_size"); 2116 return -EINVAL; 2117 } 2118 2119 return 0; 2120 } 2121 2122 static s32 btf_int_check_meta(struct btf_verifier_env *env, 2123 const struct btf_type *t, 2124 u32 meta_left) 2125 { 2126 u32 int_data, nr_bits, meta_needed = sizeof(int_data); 2127 u16 encoding; 2128 2129 if (meta_left < meta_needed) { 2130 btf_verifier_log_basic(env, t, 2131 "meta_left:%u meta_needed:%u", 2132 meta_left, meta_needed); 2133 return -EINVAL; 2134 } 2135 2136 if (btf_type_vlen(t)) { 2137 btf_verifier_log_type(env, t, "vlen != 0"); 2138 return -EINVAL; 2139 } 2140 2141 if (btf_type_kflag(t)) { 2142 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2143 return -EINVAL; 2144 } 2145 2146 int_data = btf_type_int(t); 2147 if (int_data & ~BTF_INT_MASK) { 2148 btf_verifier_log_basic(env, t, "Invalid int_data:%x", 2149 int_data); 2150 return -EINVAL; 2151 } 2152 2153 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); 2154 2155 if (nr_bits > BITS_PER_U128) { 2156 btf_verifier_log_type(env, t, "nr_bits exceeds %zu", 2157 BITS_PER_U128); 2158 return -EINVAL; 2159 } 2160 2161 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) { 2162 btf_verifier_log_type(env, t, "nr_bits exceeds type_size"); 2163 return -EINVAL; 2164 } 2165 2166 /* 2167 * Only one of the encoding bits is allowed and it 2168 * should be sufficient for the pretty print purpose (i.e. decoding). 2169 * Multiple bits can be allowed later if it is found 2170 * to be insufficient. 2171 */ 2172 encoding = BTF_INT_ENCODING(int_data); 2173 if (encoding && 2174 encoding != BTF_INT_SIGNED && 2175 encoding != BTF_INT_CHAR && 2176 encoding != BTF_INT_BOOL) { 2177 btf_verifier_log_type(env, t, "Unsupported encoding"); 2178 return -ENOTSUPP; 2179 } 2180 2181 btf_verifier_log_type(env, t, NULL); 2182 2183 return meta_needed; 2184 } 2185 2186 static void btf_int_log(struct btf_verifier_env *env, 2187 const struct btf_type *t) 2188 { 2189 int int_data = btf_type_int(t); 2190 2191 btf_verifier_log(env, 2192 "size=%u bits_offset=%u nr_bits=%u encoding=%s", 2193 t->size, BTF_INT_OFFSET(int_data), 2194 BTF_INT_BITS(int_data), 2195 btf_int_encoding_str(BTF_INT_ENCODING(int_data))); 2196 } 2197 2198 static void btf_int128_print(struct btf_show *show, void *data) 2199 { 2200 /* data points to a __int128 number. 2201 * Suppose 2202 * int128_num = *(__int128 *)data; 2203 * The below formulas shows what upper_num and lower_num represents: 2204 * upper_num = int128_num >> 64; 2205 * lower_num = int128_num & 0xffffffffFFFFFFFFULL; 2206 */ 2207 u64 upper_num, lower_num; 2208 2209 #ifdef __BIG_ENDIAN_BITFIELD 2210 upper_num = *(u64 *)data; 2211 lower_num = *(u64 *)(data + 8); 2212 #else 2213 upper_num = *(u64 *)(data + 8); 2214 lower_num = *(u64 *)data; 2215 #endif 2216 if (upper_num == 0) 2217 btf_show_type_value(show, "0x%llx", lower_num); 2218 else 2219 btf_show_type_values(show, "0x%llx%016llx", upper_num, 2220 lower_num); 2221 } 2222 2223 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits, 2224 u16 right_shift_bits) 2225 { 2226 u64 upper_num, lower_num; 2227 2228 #ifdef __BIG_ENDIAN_BITFIELD 2229 upper_num = print_num[0]; 2230 lower_num = print_num[1]; 2231 #else 2232 upper_num = print_num[1]; 2233 lower_num = print_num[0]; 2234 #endif 2235 2236 /* shake out un-needed bits by shift/or operations */ 2237 if (left_shift_bits >= 64) { 2238 upper_num = lower_num << (left_shift_bits - 64); 2239 lower_num = 0; 2240 } else { 2241 upper_num = (upper_num << left_shift_bits) | 2242 (lower_num >> (64 - left_shift_bits)); 2243 lower_num = lower_num << left_shift_bits; 2244 } 2245 2246 if (right_shift_bits >= 64) { 2247 lower_num = upper_num >> (right_shift_bits - 64); 2248 upper_num = 0; 2249 } else { 2250 lower_num = (lower_num >> right_shift_bits) | 2251 (upper_num << (64 - right_shift_bits)); 2252 upper_num = upper_num >> right_shift_bits; 2253 } 2254 2255 #ifdef __BIG_ENDIAN_BITFIELD 2256 print_num[0] = upper_num; 2257 print_num[1] = lower_num; 2258 #else 2259 print_num[0] = lower_num; 2260 print_num[1] = upper_num; 2261 #endif 2262 } 2263 2264 static void btf_bitfield_show(void *data, u8 bits_offset, 2265 u8 nr_bits, struct btf_show *show) 2266 { 2267 u16 left_shift_bits, right_shift_bits; 2268 u8 nr_copy_bytes; 2269 u8 nr_copy_bits; 2270 u64 print_num[2] = {}; 2271 2272 nr_copy_bits = nr_bits + bits_offset; 2273 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 2274 2275 memcpy(print_num, data, nr_copy_bytes); 2276 2277 #ifdef __BIG_ENDIAN_BITFIELD 2278 left_shift_bits = bits_offset; 2279 #else 2280 left_shift_bits = BITS_PER_U128 - nr_copy_bits; 2281 #endif 2282 right_shift_bits = BITS_PER_U128 - nr_bits; 2283 2284 btf_int128_shift(print_num, left_shift_bits, right_shift_bits); 2285 btf_int128_print(show, print_num); 2286 } 2287 2288 2289 static void btf_int_bits_show(const struct btf *btf, 2290 const struct btf_type *t, 2291 void *data, u8 bits_offset, 2292 struct btf_show *show) 2293 { 2294 u32 int_data = btf_type_int(t); 2295 u8 nr_bits = BTF_INT_BITS(int_data); 2296 u8 total_bits_offset; 2297 2298 /* 2299 * bits_offset is at most 7. 2300 * BTF_INT_OFFSET() cannot exceed 128 bits. 2301 */ 2302 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 2303 data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 2304 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); 2305 btf_bitfield_show(data, bits_offset, nr_bits, show); 2306 } 2307 2308 static void btf_int_show(const struct btf *btf, const struct btf_type *t, 2309 u32 type_id, void *data, u8 bits_offset, 2310 struct btf_show *show) 2311 { 2312 u32 int_data = btf_type_int(t); 2313 u8 encoding = BTF_INT_ENCODING(int_data); 2314 bool sign = encoding & BTF_INT_SIGNED; 2315 u8 nr_bits = BTF_INT_BITS(int_data); 2316 void *safe_data; 2317 2318 safe_data = btf_show_start_type(show, t, type_id, data); 2319 if (!safe_data) 2320 return; 2321 2322 if (bits_offset || BTF_INT_OFFSET(int_data) || 2323 BITS_PER_BYTE_MASKED(nr_bits)) { 2324 btf_int_bits_show(btf, t, safe_data, bits_offset, show); 2325 goto out; 2326 } 2327 2328 switch (nr_bits) { 2329 case 128: 2330 btf_int128_print(show, safe_data); 2331 break; 2332 case 64: 2333 if (sign) 2334 btf_show_type_value(show, "%lld", *(s64 *)safe_data); 2335 else 2336 btf_show_type_value(show, "%llu", *(u64 *)safe_data); 2337 break; 2338 case 32: 2339 if (sign) 2340 btf_show_type_value(show, "%d", *(s32 *)safe_data); 2341 else 2342 btf_show_type_value(show, "%u", *(u32 *)safe_data); 2343 break; 2344 case 16: 2345 if (sign) 2346 btf_show_type_value(show, "%d", *(s16 *)safe_data); 2347 else 2348 btf_show_type_value(show, "%u", *(u16 *)safe_data); 2349 break; 2350 case 8: 2351 if (show->state.array_encoding == BTF_INT_CHAR) { 2352 /* check for null terminator */ 2353 if (show->state.array_terminated) 2354 break; 2355 if (*(char *)data == '\0') { 2356 show->state.array_terminated = 1; 2357 break; 2358 } 2359 if (isprint(*(char *)data)) { 2360 btf_show_type_value(show, "'%c'", 2361 *(char *)safe_data); 2362 break; 2363 } 2364 } 2365 if (sign) 2366 btf_show_type_value(show, "%d", *(s8 *)safe_data); 2367 else 2368 btf_show_type_value(show, "%u", *(u8 *)safe_data); 2369 break; 2370 default: 2371 btf_int_bits_show(btf, t, safe_data, bits_offset, show); 2372 break; 2373 } 2374 out: 2375 btf_show_end_type(show); 2376 } 2377 2378 static const struct btf_kind_operations int_ops = { 2379 .check_meta = btf_int_check_meta, 2380 .resolve = btf_df_resolve, 2381 .check_member = btf_int_check_member, 2382 .check_kflag_member = btf_int_check_kflag_member, 2383 .log_details = btf_int_log, 2384 .show = btf_int_show, 2385 }; 2386 2387 static int btf_modifier_check_member(struct btf_verifier_env *env, 2388 const struct btf_type *struct_type, 2389 const struct btf_member *member, 2390 const struct btf_type *member_type) 2391 { 2392 const struct btf_type *resolved_type; 2393 u32 resolved_type_id = member->type; 2394 struct btf_member resolved_member; 2395 struct btf *btf = env->btf; 2396 2397 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 2398 if (!resolved_type) { 2399 btf_verifier_log_member(env, struct_type, member, 2400 "Invalid member"); 2401 return -EINVAL; 2402 } 2403 2404 resolved_member = *member; 2405 resolved_member.type = resolved_type_id; 2406 2407 return btf_type_ops(resolved_type)->check_member(env, struct_type, 2408 &resolved_member, 2409 resolved_type); 2410 } 2411 2412 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env, 2413 const struct btf_type *struct_type, 2414 const struct btf_member *member, 2415 const struct btf_type *member_type) 2416 { 2417 const struct btf_type *resolved_type; 2418 u32 resolved_type_id = member->type; 2419 struct btf_member resolved_member; 2420 struct btf *btf = env->btf; 2421 2422 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 2423 if (!resolved_type) { 2424 btf_verifier_log_member(env, struct_type, member, 2425 "Invalid member"); 2426 return -EINVAL; 2427 } 2428 2429 resolved_member = *member; 2430 resolved_member.type = resolved_type_id; 2431 2432 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type, 2433 &resolved_member, 2434 resolved_type); 2435 } 2436 2437 static int btf_ptr_check_member(struct btf_verifier_env *env, 2438 const struct btf_type *struct_type, 2439 const struct btf_member *member, 2440 const struct btf_type *member_type) 2441 { 2442 u32 struct_size, struct_bits_off, bytes_offset; 2443 2444 struct_size = struct_type->size; 2445 struct_bits_off = member->offset; 2446 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2447 2448 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2449 btf_verifier_log_member(env, struct_type, member, 2450 "Member is not byte aligned"); 2451 return -EINVAL; 2452 } 2453 2454 if (struct_size - bytes_offset < sizeof(void *)) { 2455 btf_verifier_log_member(env, struct_type, member, 2456 "Member exceeds struct_size"); 2457 return -EINVAL; 2458 } 2459 2460 return 0; 2461 } 2462 2463 static int btf_ref_type_check_meta(struct btf_verifier_env *env, 2464 const struct btf_type *t, 2465 u32 meta_left) 2466 { 2467 const char *value; 2468 2469 if (btf_type_vlen(t)) { 2470 btf_verifier_log_type(env, t, "vlen != 0"); 2471 return -EINVAL; 2472 } 2473 2474 if (btf_type_kflag(t)) { 2475 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2476 return -EINVAL; 2477 } 2478 2479 if (!BTF_TYPE_ID_VALID(t->type)) { 2480 btf_verifier_log_type(env, t, "Invalid type_id"); 2481 return -EINVAL; 2482 } 2483 2484 /* typedef/type_tag type must have a valid name, and other ref types, 2485 * volatile, const, restrict, should have a null name. 2486 */ 2487 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { 2488 if (!t->name_off || 2489 !btf_name_valid_identifier(env->btf, t->name_off)) { 2490 btf_verifier_log_type(env, t, "Invalid name"); 2491 return -EINVAL; 2492 } 2493 } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) { 2494 value = btf_name_by_offset(env->btf, t->name_off); 2495 if (!value || !value[0]) { 2496 btf_verifier_log_type(env, t, "Invalid name"); 2497 return -EINVAL; 2498 } 2499 } else { 2500 if (t->name_off) { 2501 btf_verifier_log_type(env, t, "Invalid name"); 2502 return -EINVAL; 2503 } 2504 } 2505 2506 btf_verifier_log_type(env, t, NULL); 2507 2508 return 0; 2509 } 2510 2511 static int btf_modifier_resolve(struct btf_verifier_env *env, 2512 const struct resolve_vertex *v) 2513 { 2514 const struct btf_type *t = v->t; 2515 const struct btf_type *next_type; 2516 u32 next_type_id = t->type; 2517 struct btf *btf = env->btf; 2518 2519 next_type = btf_type_by_id(btf, next_type_id); 2520 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2521 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2522 return -EINVAL; 2523 } 2524 2525 if (!env_type_is_resolve_sink(env, next_type) && 2526 !env_type_is_resolved(env, next_type_id)) 2527 return env_stack_push(env, next_type, next_type_id); 2528 2529 /* Figure out the resolved next_type_id with size. 2530 * They will be stored in the current modifier's 2531 * resolved_ids and resolved_sizes such that it can 2532 * save us a few type-following when we use it later (e.g. in 2533 * pretty print). 2534 */ 2535 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2536 if (env_type_is_resolved(env, next_type_id)) 2537 next_type = btf_type_id_resolve(btf, &next_type_id); 2538 2539 /* "typedef void new_void", "const void"...etc */ 2540 if (!btf_type_is_void(next_type) && 2541 !btf_type_is_fwd(next_type) && 2542 !btf_type_is_func_proto(next_type)) { 2543 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2544 return -EINVAL; 2545 } 2546 } 2547 2548 env_stack_pop_resolved(env, next_type_id, 0); 2549 2550 return 0; 2551 } 2552 2553 static int btf_var_resolve(struct btf_verifier_env *env, 2554 const struct resolve_vertex *v) 2555 { 2556 const struct btf_type *next_type; 2557 const struct btf_type *t = v->t; 2558 u32 next_type_id = t->type; 2559 struct btf *btf = env->btf; 2560 2561 next_type = btf_type_by_id(btf, next_type_id); 2562 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2563 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2564 return -EINVAL; 2565 } 2566 2567 if (!env_type_is_resolve_sink(env, next_type) && 2568 !env_type_is_resolved(env, next_type_id)) 2569 return env_stack_push(env, next_type, next_type_id); 2570 2571 if (btf_type_is_modifier(next_type)) { 2572 const struct btf_type *resolved_type; 2573 u32 resolved_type_id; 2574 2575 resolved_type_id = next_type_id; 2576 resolved_type = btf_type_id_resolve(btf, &resolved_type_id); 2577 2578 if (btf_type_is_ptr(resolved_type) && 2579 !env_type_is_resolve_sink(env, resolved_type) && 2580 !env_type_is_resolved(env, resolved_type_id)) 2581 return env_stack_push(env, resolved_type, 2582 resolved_type_id); 2583 } 2584 2585 /* We must resolve to something concrete at this point, no 2586 * forward types or similar that would resolve to size of 2587 * zero is allowed. 2588 */ 2589 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2590 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2591 return -EINVAL; 2592 } 2593 2594 env_stack_pop_resolved(env, next_type_id, 0); 2595 2596 return 0; 2597 } 2598 2599 static int btf_ptr_resolve(struct btf_verifier_env *env, 2600 const struct resolve_vertex *v) 2601 { 2602 const struct btf_type *next_type; 2603 const struct btf_type *t = v->t; 2604 u32 next_type_id = t->type; 2605 struct btf *btf = env->btf; 2606 2607 next_type = btf_type_by_id(btf, next_type_id); 2608 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2609 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2610 return -EINVAL; 2611 } 2612 2613 if (!env_type_is_resolve_sink(env, next_type) && 2614 !env_type_is_resolved(env, next_type_id)) 2615 return env_stack_push(env, next_type, next_type_id); 2616 2617 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY, 2618 * the modifier may have stopped resolving when it was resolved 2619 * to a ptr (last-resolved-ptr). 2620 * 2621 * We now need to continue from the last-resolved-ptr to 2622 * ensure the last-resolved-ptr will not referring back to 2623 * the current ptr (t). 2624 */ 2625 if (btf_type_is_modifier(next_type)) { 2626 const struct btf_type *resolved_type; 2627 u32 resolved_type_id; 2628 2629 resolved_type_id = next_type_id; 2630 resolved_type = btf_type_id_resolve(btf, &resolved_type_id); 2631 2632 if (btf_type_is_ptr(resolved_type) && 2633 !env_type_is_resolve_sink(env, resolved_type) && 2634 !env_type_is_resolved(env, resolved_type_id)) 2635 return env_stack_push(env, resolved_type, 2636 resolved_type_id); 2637 } 2638 2639 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2640 if (env_type_is_resolved(env, next_type_id)) 2641 next_type = btf_type_id_resolve(btf, &next_type_id); 2642 2643 if (!btf_type_is_void(next_type) && 2644 !btf_type_is_fwd(next_type) && 2645 !btf_type_is_func_proto(next_type)) { 2646 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2647 return -EINVAL; 2648 } 2649 } 2650 2651 env_stack_pop_resolved(env, next_type_id, 0); 2652 2653 return 0; 2654 } 2655 2656 static void btf_modifier_show(const struct btf *btf, 2657 const struct btf_type *t, 2658 u32 type_id, void *data, 2659 u8 bits_offset, struct btf_show *show) 2660 { 2661 if (btf->resolved_ids) 2662 t = btf_type_id_resolve(btf, &type_id); 2663 else 2664 t = btf_type_skip_modifiers(btf, type_id, NULL); 2665 2666 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); 2667 } 2668 2669 static void btf_var_show(const struct btf *btf, const struct btf_type *t, 2670 u32 type_id, void *data, u8 bits_offset, 2671 struct btf_show *show) 2672 { 2673 t = btf_type_id_resolve(btf, &type_id); 2674 2675 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); 2676 } 2677 2678 static void btf_ptr_show(const struct btf *btf, const struct btf_type *t, 2679 u32 type_id, void *data, u8 bits_offset, 2680 struct btf_show *show) 2681 { 2682 void *safe_data; 2683 2684 safe_data = btf_show_start_type(show, t, type_id, data); 2685 if (!safe_data) 2686 return; 2687 2688 /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */ 2689 if (show->flags & BTF_SHOW_PTR_RAW) 2690 btf_show_type_value(show, "0x%px", *(void **)safe_data); 2691 else 2692 btf_show_type_value(show, "0x%p", *(void **)safe_data); 2693 btf_show_end_type(show); 2694 } 2695 2696 static void btf_ref_type_log(struct btf_verifier_env *env, 2697 const struct btf_type *t) 2698 { 2699 btf_verifier_log(env, "type_id=%u", t->type); 2700 } 2701 2702 static struct btf_kind_operations modifier_ops = { 2703 .check_meta = btf_ref_type_check_meta, 2704 .resolve = btf_modifier_resolve, 2705 .check_member = btf_modifier_check_member, 2706 .check_kflag_member = btf_modifier_check_kflag_member, 2707 .log_details = btf_ref_type_log, 2708 .show = btf_modifier_show, 2709 }; 2710 2711 static struct btf_kind_operations ptr_ops = { 2712 .check_meta = btf_ref_type_check_meta, 2713 .resolve = btf_ptr_resolve, 2714 .check_member = btf_ptr_check_member, 2715 .check_kflag_member = btf_generic_check_kflag_member, 2716 .log_details = btf_ref_type_log, 2717 .show = btf_ptr_show, 2718 }; 2719 2720 static s32 btf_fwd_check_meta(struct btf_verifier_env *env, 2721 const struct btf_type *t, 2722 u32 meta_left) 2723 { 2724 if (btf_type_vlen(t)) { 2725 btf_verifier_log_type(env, t, "vlen != 0"); 2726 return -EINVAL; 2727 } 2728 2729 if (t->type) { 2730 btf_verifier_log_type(env, t, "type != 0"); 2731 return -EINVAL; 2732 } 2733 2734 /* fwd type must have a valid name */ 2735 if (!t->name_off || 2736 !btf_name_valid_identifier(env->btf, t->name_off)) { 2737 btf_verifier_log_type(env, t, "Invalid name"); 2738 return -EINVAL; 2739 } 2740 2741 btf_verifier_log_type(env, t, NULL); 2742 2743 return 0; 2744 } 2745 2746 static void btf_fwd_type_log(struct btf_verifier_env *env, 2747 const struct btf_type *t) 2748 { 2749 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct"); 2750 } 2751 2752 static struct btf_kind_operations fwd_ops = { 2753 .check_meta = btf_fwd_check_meta, 2754 .resolve = btf_df_resolve, 2755 .check_member = btf_df_check_member, 2756 .check_kflag_member = btf_df_check_kflag_member, 2757 .log_details = btf_fwd_type_log, 2758 .show = btf_df_show, 2759 }; 2760 2761 static int btf_array_check_member(struct btf_verifier_env *env, 2762 const struct btf_type *struct_type, 2763 const struct btf_member *member, 2764 const struct btf_type *member_type) 2765 { 2766 u32 struct_bits_off = member->offset; 2767 u32 struct_size, bytes_offset; 2768 u32 array_type_id, array_size; 2769 struct btf *btf = env->btf; 2770 2771 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2772 btf_verifier_log_member(env, struct_type, member, 2773 "Member is not byte aligned"); 2774 return -EINVAL; 2775 } 2776 2777 array_type_id = member->type; 2778 btf_type_id_size(btf, &array_type_id, &array_size); 2779 struct_size = struct_type->size; 2780 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2781 if (struct_size - bytes_offset < array_size) { 2782 btf_verifier_log_member(env, struct_type, member, 2783 "Member exceeds struct_size"); 2784 return -EINVAL; 2785 } 2786 2787 return 0; 2788 } 2789 2790 static s32 btf_array_check_meta(struct btf_verifier_env *env, 2791 const struct btf_type *t, 2792 u32 meta_left) 2793 { 2794 const struct btf_array *array = btf_type_array(t); 2795 u32 meta_needed = sizeof(*array); 2796 2797 if (meta_left < meta_needed) { 2798 btf_verifier_log_basic(env, t, 2799 "meta_left:%u meta_needed:%u", 2800 meta_left, meta_needed); 2801 return -EINVAL; 2802 } 2803 2804 /* array type should not have a name */ 2805 if (t->name_off) { 2806 btf_verifier_log_type(env, t, "Invalid name"); 2807 return -EINVAL; 2808 } 2809 2810 if (btf_type_vlen(t)) { 2811 btf_verifier_log_type(env, t, "vlen != 0"); 2812 return -EINVAL; 2813 } 2814 2815 if (btf_type_kflag(t)) { 2816 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2817 return -EINVAL; 2818 } 2819 2820 if (t->size) { 2821 btf_verifier_log_type(env, t, "size != 0"); 2822 return -EINVAL; 2823 } 2824 2825 /* Array elem type and index type cannot be in type void, 2826 * so !array->type and !array->index_type are not allowed. 2827 */ 2828 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { 2829 btf_verifier_log_type(env, t, "Invalid elem"); 2830 return -EINVAL; 2831 } 2832 2833 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { 2834 btf_verifier_log_type(env, t, "Invalid index"); 2835 return -EINVAL; 2836 } 2837 2838 btf_verifier_log_type(env, t, NULL); 2839 2840 return meta_needed; 2841 } 2842 2843 static int btf_array_resolve(struct btf_verifier_env *env, 2844 const struct resolve_vertex *v) 2845 { 2846 const struct btf_array *array = btf_type_array(v->t); 2847 const struct btf_type *elem_type, *index_type; 2848 u32 elem_type_id, index_type_id; 2849 struct btf *btf = env->btf; 2850 u32 elem_size; 2851 2852 /* Check array->index_type */ 2853 index_type_id = array->index_type; 2854 index_type = btf_type_by_id(btf, index_type_id); 2855 if (btf_type_nosize_or_null(index_type) || 2856 btf_type_is_resolve_source_only(index_type)) { 2857 btf_verifier_log_type(env, v->t, "Invalid index"); 2858 return -EINVAL; 2859 } 2860 2861 if (!env_type_is_resolve_sink(env, index_type) && 2862 !env_type_is_resolved(env, index_type_id)) 2863 return env_stack_push(env, index_type, index_type_id); 2864 2865 index_type = btf_type_id_size(btf, &index_type_id, NULL); 2866 if (!index_type || !btf_type_is_int(index_type) || 2867 !btf_type_int_is_regular(index_type)) { 2868 btf_verifier_log_type(env, v->t, "Invalid index"); 2869 return -EINVAL; 2870 } 2871 2872 /* Check array->type */ 2873 elem_type_id = array->type; 2874 elem_type = btf_type_by_id(btf, elem_type_id); 2875 if (btf_type_nosize_or_null(elem_type) || 2876 btf_type_is_resolve_source_only(elem_type)) { 2877 btf_verifier_log_type(env, v->t, 2878 "Invalid elem"); 2879 return -EINVAL; 2880 } 2881 2882 if (!env_type_is_resolve_sink(env, elem_type) && 2883 !env_type_is_resolved(env, elem_type_id)) 2884 return env_stack_push(env, elem_type, elem_type_id); 2885 2886 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 2887 if (!elem_type) { 2888 btf_verifier_log_type(env, v->t, "Invalid elem"); 2889 return -EINVAL; 2890 } 2891 2892 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) { 2893 btf_verifier_log_type(env, v->t, "Invalid array of int"); 2894 return -EINVAL; 2895 } 2896 2897 if (array->nelems && elem_size > U32_MAX / array->nelems) { 2898 btf_verifier_log_type(env, v->t, 2899 "Array size overflows U32_MAX"); 2900 return -EINVAL; 2901 } 2902 2903 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems); 2904 2905 return 0; 2906 } 2907 2908 static void btf_array_log(struct btf_verifier_env *env, 2909 const struct btf_type *t) 2910 { 2911 const struct btf_array *array = btf_type_array(t); 2912 2913 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u", 2914 array->type, array->index_type, array->nelems); 2915 } 2916 2917 static void __btf_array_show(const struct btf *btf, const struct btf_type *t, 2918 u32 type_id, void *data, u8 bits_offset, 2919 struct btf_show *show) 2920 { 2921 const struct btf_array *array = btf_type_array(t); 2922 const struct btf_kind_operations *elem_ops; 2923 const struct btf_type *elem_type; 2924 u32 i, elem_size = 0, elem_type_id; 2925 u16 encoding = 0; 2926 2927 elem_type_id = array->type; 2928 elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL); 2929 if (elem_type && btf_type_has_size(elem_type)) 2930 elem_size = elem_type->size; 2931 2932 if (elem_type && btf_type_is_int(elem_type)) { 2933 u32 int_type = btf_type_int(elem_type); 2934 2935 encoding = BTF_INT_ENCODING(int_type); 2936 2937 /* 2938 * BTF_INT_CHAR encoding never seems to be set for 2939 * char arrays, so if size is 1 and element is 2940 * printable as a char, we'll do that. 2941 */ 2942 if (elem_size == 1) 2943 encoding = BTF_INT_CHAR; 2944 } 2945 2946 if (!btf_show_start_array_type(show, t, type_id, encoding, data)) 2947 return; 2948 2949 if (!elem_type) 2950 goto out; 2951 elem_ops = btf_type_ops(elem_type); 2952 2953 for (i = 0; i < array->nelems; i++) { 2954 2955 btf_show_start_array_member(show); 2956 2957 elem_ops->show(btf, elem_type, elem_type_id, data, 2958 bits_offset, show); 2959 data += elem_size; 2960 2961 btf_show_end_array_member(show); 2962 2963 if (show->state.array_terminated) 2964 break; 2965 } 2966 out: 2967 btf_show_end_array_type(show); 2968 } 2969 2970 static void btf_array_show(const struct btf *btf, const struct btf_type *t, 2971 u32 type_id, void *data, u8 bits_offset, 2972 struct btf_show *show) 2973 { 2974 const struct btf_member *m = show->state.member; 2975 2976 /* 2977 * First check if any members would be shown (are non-zero). 2978 * See comments above "struct btf_show" definition for more 2979 * details on how this works at a high-level. 2980 */ 2981 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { 2982 if (!show->state.depth_check) { 2983 show->state.depth_check = show->state.depth + 1; 2984 show->state.depth_to_show = 0; 2985 } 2986 __btf_array_show(btf, t, type_id, data, bits_offset, show); 2987 show->state.member = m; 2988 2989 if (show->state.depth_check != show->state.depth + 1) 2990 return; 2991 show->state.depth_check = 0; 2992 2993 if (show->state.depth_to_show <= show->state.depth) 2994 return; 2995 /* 2996 * Reaching here indicates we have recursed and found 2997 * non-zero array member(s). 2998 */ 2999 } 3000 __btf_array_show(btf, t, type_id, data, bits_offset, show); 3001 } 3002 3003 static struct btf_kind_operations array_ops = { 3004 .check_meta = btf_array_check_meta, 3005 .resolve = btf_array_resolve, 3006 .check_member = btf_array_check_member, 3007 .check_kflag_member = btf_generic_check_kflag_member, 3008 .log_details = btf_array_log, 3009 .show = btf_array_show, 3010 }; 3011 3012 static int btf_struct_check_member(struct btf_verifier_env *env, 3013 const struct btf_type *struct_type, 3014 const struct btf_member *member, 3015 const struct btf_type *member_type) 3016 { 3017 u32 struct_bits_off = member->offset; 3018 u32 struct_size, bytes_offset; 3019 3020 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 3021 btf_verifier_log_member(env, struct_type, member, 3022 "Member is not byte aligned"); 3023 return -EINVAL; 3024 } 3025 3026 struct_size = struct_type->size; 3027 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 3028 if (struct_size - bytes_offset < member_type->size) { 3029 btf_verifier_log_member(env, struct_type, member, 3030 "Member exceeds struct_size"); 3031 return -EINVAL; 3032 } 3033 3034 return 0; 3035 } 3036 3037 static s32 btf_struct_check_meta(struct btf_verifier_env *env, 3038 const struct btf_type *t, 3039 u32 meta_left) 3040 { 3041 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; 3042 const struct btf_member *member; 3043 u32 meta_needed, last_offset; 3044 struct btf *btf = env->btf; 3045 u32 struct_size = t->size; 3046 u32 offset; 3047 u16 i; 3048 3049 meta_needed = btf_type_vlen(t) * sizeof(*member); 3050 if (meta_left < meta_needed) { 3051 btf_verifier_log_basic(env, t, 3052 "meta_left:%u meta_needed:%u", 3053 meta_left, meta_needed); 3054 return -EINVAL; 3055 } 3056 3057 /* struct type either no name or a valid one */ 3058 if (t->name_off && 3059 !btf_name_valid_identifier(env->btf, t->name_off)) { 3060 btf_verifier_log_type(env, t, "Invalid name"); 3061 return -EINVAL; 3062 } 3063 3064 btf_verifier_log_type(env, t, NULL); 3065 3066 last_offset = 0; 3067 for_each_member(i, t, member) { 3068 if (!btf_name_offset_valid(btf, member->name_off)) { 3069 btf_verifier_log_member(env, t, member, 3070 "Invalid member name_offset:%u", 3071 member->name_off); 3072 return -EINVAL; 3073 } 3074 3075 /* struct member either no name or a valid one */ 3076 if (member->name_off && 3077 !btf_name_valid_identifier(btf, member->name_off)) { 3078 btf_verifier_log_member(env, t, member, "Invalid name"); 3079 return -EINVAL; 3080 } 3081 /* A member cannot be in type void */ 3082 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { 3083 btf_verifier_log_member(env, t, member, 3084 "Invalid type_id"); 3085 return -EINVAL; 3086 } 3087 3088 offset = __btf_member_bit_offset(t, member); 3089 if (is_union && offset) { 3090 btf_verifier_log_member(env, t, member, 3091 "Invalid member bits_offset"); 3092 return -EINVAL; 3093 } 3094 3095 /* 3096 * ">" instead of ">=" because the last member could be 3097 * "char a[0];" 3098 */ 3099 if (last_offset > offset) { 3100 btf_verifier_log_member(env, t, member, 3101 "Invalid member bits_offset"); 3102 return -EINVAL; 3103 } 3104 3105 if (BITS_ROUNDUP_BYTES(offset) > struct_size) { 3106 btf_verifier_log_member(env, t, member, 3107 "Member bits_offset exceeds its struct size"); 3108 return -EINVAL; 3109 } 3110 3111 btf_verifier_log_member(env, t, member, NULL); 3112 last_offset = offset; 3113 } 3114 3115 return meta_needed; 3116 } 3117 3118 static int btf_struct_resolve(struct btf_verifier_env *env, 3119 const struct resolve_vertex *v) 3120 { 3121 const struct btf_member *member; 3122 int err; 3123 u16 i; 3124 3125 /* Before continue resolving the next_member, 3126 * ensure the last member is indeed resolved to a 3127 * type with size info. 3128 */ 3129 if (v->next_member) { 3130 const struct btf_type *last_member_type; 3131 const struct btf_member *last_member; 3132 u16 last_member_type_id; 3133 3134 last_member = btf_type_member(v->t) + v->next_member - 1; 3135 last_member_type_id = last_member->type; 3136 if (WARN_ON_ONCE(!env_type_is_resolved(env, 3137 last_member_type_id))) 3138 return -EINVAL; 3139 3140 last_member_type = btf_type_by_id(env->btf, 3141 last_member_type_id); 3142 if (btf_type_kflag(v->t)) 3143 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t, 3144 last_member, 3145 last_member_type); 3146 else 3147 err = btf_type_ops(last_member_type)->check_member(env, v->t, 3148 last_member, 3149 last_member_type); 3150 if (err) 3151 return err; 3152 } 3153 3154 for_each_member_from(i, v->next_member, v->t, member) { 3155 u32 member_type_id = member->type; 3156 const struct btf_type *member_type = btf_type_by_id(env->btf, 3157 member_type_id); 3158 3159 if (btf_type_nosize_or_null(member_type) || 3160 btf_type_is_resolve_source_only(member_type)) { 3161 btf_verifier_log_member(env, v->t, member, 3162 "Invalid member"); 3163 return -EINVAL; 3164 } 3165 3166 if (!env_type_is_resolve_sink(env, member_type) && 3167 !env_type_is_resolved(env, member_type_id)) { 3168 env_stack_set_next_member(env, i + 1); 3169 return env_stack_push(env, member_type, member_type_id); 3170 } 3171 3172 if (btf_type_kflag(v->t)) 3173 err = btf_type_ops(member_type)->check_kflag_member(env, v->t, 3174 member, 3175 member_type); 3176 else 3177 err = btf_type_ops(member_type)->check_member(env, v->t, 3178 member, 3179 member_type); 3180 if (err) 3181 return err; 3182 } 3183 3184 env_stack_pop_resolved(env, 0, 0); 3185 3186 return 0; 3187 } 3188 3189 static void btf_struct_log(struct btf_verifier_env *env, 3190 const struct btf_type *t) 3191 { 3192 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 3193 } 3194 3195 enum btf_field_type { 3196 BTF_FIELD_SPIN_LOCK, 3197 BTF_FIELD_TIMER, 3198 BTF_FIELD_KPTR, 3199 }; 3200 3201 enum { 3202 BTF_FIELD_IGNORE = 0, 3203 BTF_FIELD_FOUND = 1, 3204 }; 3205 3206 struct btf_field_info { 3207 u32 type_id; 3208 u32 off; 3209 enum bpf_kptr_type type; 3210 }; 3211 3212 static int btf_find_struct(const struct btf *btf, const struct btf_type *t, 3213 u32 off, int sz, struct btf_field_info *info) 3214 { 3215 if (!__btf_type_is_struct(t)) 3216 return BTF_FIELD_IGNORE; 3217 if (t->size != sz) 3218 return BTF_FIELD_IGNORE; 3219 info->off = off; 3220 return BTF_FIELD_FOUND; 3221 } 3222 3223 static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, 3224 u32 off, int sz, struct btf_field_info *info) 3225 { 3226 enum bpf_kptr_type type; 3227 u32 res_id; 3228 3229 /* For PTR, sz is always == 8 */ 3230 if (!btf_type_is_ptr(t)) 3231 return BTF_FIELD_IGNORE; 3232 t = btf_type_by_id(btf, t->type); 3233 3234 if (!btf_type_is_type_tag(t)) 3235 return BTF_FIELD_IGNORE; 3236 /* Reject extra tags */ 3237 if (btf_type_is_type_tag(btf_type_by_id(btf, t->type))) 3238 return -EINVAL; 3239 if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off))) 3240 type = BPF_KPTR_UNREF; 3241 else if (!strcmp("kptr_ref", __btf_name_by_offset(btf, t->name_off))) 3242 type = BPF_KPTR_REF; 3243 else 3244 return -EINVAL; 3245 3246 /* Get the base type */ 3247 t = btf_type_skip_modifiers(btf, t->type, &res_id); 3248 /* Only pointer to struct is allowed */ 3249 if (!__btf_type_is_struct(t)) 3250 return -EINVAL; 3251 3252 info->type_id = res_id; 3253 info->off = off; 3254 info->type = type; 3255 return BTF_FIELD_FOUND; 3256 } 3257 3258 static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t, 3259 const char *name, int sz, int align, 3260 enum btf_field_type field_type, 3261 struct btf_field_info *info, int info_cnt) 3262 { 3263 const struct btf_member *member; 3264 struct btf_field_info tmp; 3265 int ret, idx = 0; 3266 u32 i, off; 3267 3268 for_each_member(i, t, member) { 3269 const struct btf_type *member_type = btf_type_by_id(btf, 3270 member->type); 3271 3272 if (name && strcmp(__btf_name_by_offset(btf, member_type->name_off), name)) 3273 continue; 3274 3275 off = __btf_member_bit_offset(t, member); 3276 if (off % 8) 3277 /* valid C code cannot generate such BTF */ 3278 return -EINVAL; 3279 off /= 8; 3280 if (off % align) 3281 return -EINVAL; 3282 3283 switch (field_type) { 3284 case BTF_FIELD_SPIN_LOCK: 3285 case BTF_FIELD_TIMER: 3286 ret = btf_find_struct(btf, member_type, off, sz, 3287 idx < info_cnt ? &info[idx] : &tmp); 3288 if (ret < 0) 3289 return ret; 3290 break; 3291 case BTF_FIELD_KPTR: 3292 ret = btf_find_kptr(btf, member_type, off, sz, 3293 idx < info_cnt ? &info[idx] : &tmp); 3294 if (ret < 0) 3295 return ret; 3296 break; 3297 default: 3298 return -EFAULT; 3299 } 3300 3301 if (ret == BTF_FIELD_IGNORE) 3302 continue; 3303 if (idx >= info_cnt) 3304 return -E2BIG; 3305 ++idx; 3306 } 3307 return idx; 3308 } 3309 3310 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, 3311 const char *name, int sz, int align, 3312 enum btf_field_type field_type, 3313 struct btf_field_info *info, int info_cnt) 3314 { 3315 const struct btf_var_secinfo *vsi; 3316 struct btf_field_info tmp; 3317 int ret, idx = 0; 3318 u32 i, off; 3319 3320 for_each_vsi(i, t, vsi) { 3321 const struct btf_type *var = btf_type_by_id(btf, vsi->type); 3322 const struct btf_type *var_type = btf_type_by_id(btf, var->type); 3323 3324 off = vsi->offset; 3325 3326 if (name && strcmp(__btf_name_by_offset(btf, var_type->name_off), name)) 3327 continue; 3328 if (vsi->size != sz) 3329 continue; 3330 if (off % align) 3331 return -EINVAL; 3332 3333 switch (field_type) { 3334 case BTF_FIELD_SPIN_LOCK: 3335 case BTF_FIELD_TIMER: 3336 ret = btf_find_struct(btf, var_type, off, sz, 3337 idx < info_cnt ? &info[idx] : &tmp); 3338 if (ret < 0) 3339 return ret; 3340 break; 3341 case BTF_FIELD_KPTR: 3342 ret = btf_find_kptr(btf, var_type, off, sz, 3343 idx < info_cnt ? &info[idx] : &tmp); 3344 if (ret < 0) 3345 return ret; 3346 break; 3347 default: 3348 return -EFAULT; 3349 } 3350 3351 if (ret == BTF_FIELD_IGNORE) 3352 continue; 3353 if (idx >= info_cnt) 3354 return -E2BIG; 3355 ++idx; 3356 } 3357 return idx; 3358 } 3359 3360 static int btf_find_field(const struct btf *btf, const struct btf_type *t, 3361 enum btf_field_type field_type, 3362 struct btf_field_info *info, int info_cnt) 3363 { 3364 const char *name; 3365 int sz, align; 3366 3367 switch (field_type) { 3368 case BTF_FIELD_SPIN_LOCK: 3369 name = "bpf_spin_lock"; 3370 sz = sizeof(struct bpf_spin_lock); 3371 align = __alignof__(struct bpf_spin_lock); 3372 break; 3373 case BTF_FIELD_TIMER: 3374 name = "bpf_timer"; 3375 sz = sizeof(struct bpf_timer); 3376 align = __alignof__(struct bpf_timer); 3377 break; 3378 case BTF_FIELD_KPTR: 3379 name = NULL; 3380 sz = sizeof(u64); 3381 align = 8; 3382 break; 3383 default: 3384 return -EFAULT; 3385 } 3386 3387 if (__btf_type_is_struct(t)) 3388 return btf_find_struct_field(btf, t, name, sz, align, field_type, info, info_cnt); 3389 else if (btf_type_is_datasec(t)) 3390 return btf_find_datasec_var(btf, t, name, sz, align, field_type, info, info_cnt); 3391 return -EINVAL; 3392 } 3393 3394 /* find 'struct bpf_spin_lock' in map value. 3395 * return >= 0 offset if found 3396 * and < 0 in case of error 3397 */ 3398 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t) 3399 { 3400 struct btf_field_info info; 3401 int ret; 3402 3403 ret = btf_find_field(btf, t, BTF_FIELD_SPIN_LOCK, &info, 1); 3404 if (ret < 0) 3405 return ret; 3406 if (!ret) 3407 return -ENOENT; 3408 return info.off; 3409 } 3410 3411 int btf_find_timer(const struct btf *btf, const struct btf_type *t) 3412 { 3413 struct btf_field_info info; 3414 int ret; 3415 3416 ret = btf_find_field(btf, t, BTF_FIELD_TIMER, &info, 1); 3417 if (ret < 0) 3418 return ret; 3419 if (!ret) 3420 return -ENOENT; 3421 return info.off; 3422 } 3423 3424 struct bpf_map_value_off *btf_parse_kptrs(const struct btf *btf, 3425 const struct btf_type *t) 3426 { 3427 struct btf_field_info info_arr[BPF_MAP_VALUE_OFF_MAX]; 3428 struct bpf_map_value_off *tab; 3429 struct btf *kernel_btf = NULL; 3430 struct module *mod = NULL; 3431 int ret, i, nr_off; 3432 3433 ret = btf_find_field(btf, t, BTF_FIELD_KPTR, info_arr, ARRAY_SIZE(info_arr)); 3434 if (ret < 0) 3435 return ERR_PTR(ret); 3436 if (!ret) 3437 return NULL; 3438 3439 nr_off = ret; 3440 tab = kzalloc(offsetof(struct bpf_map_value_off, off[nr_off]), GFP_KERNEL | __GFP_NOWARN); 3441 if (!tab) 3442 return ERR_PTR(-ENOMEM); 3443 3444 for (i = 0; i < nr_off; i++) { 3445 const struct btf_type *t; 3446 s32 id; 3447 3448 /* Find type in map BTF, and use it to look up the matching type 3449 * in vmlinux or module BTFs, by name and kind. 3450 */ 3451 t = btf_type_by_id(btf, info_arr[i].type_id); 3452 id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info), 3453 &kernel_btf); 3454 if (id < 0) { 3455 ret = id; 3456 goto end; 3457 } 3458 3459 /* Find and stash the function pointer for the destruction function that 3460 * needs to be eventually invoked from the map free path. 3461 */ 3462 if (info_arr[i].type == BPF_KPTR_REF) { 3463 const struct btf_type *dtor_func; 3464 const char *dtor_func_name; 3465 unsigned long addr; 3466 s32 dtor_btf_id; 3467 3468 /* This call also serves as a whitelist of allowed objects that 3469 * can be used as a referenced pointer and be stored in a map at 3470 * the same time. 3471 */ 3472 dtor_btf_id = btf_find_dtor_kfunc(kernel_btf, id); 3473 if (dtor_btf_id < 0) { 3474 ret = dtor_btf_id; 3475 goto end_btf; 3476 } 3477 3478 dtor_func = btf_type_by_id(kernel_btf, dtor_btf_id); 3479 if (!dtor_func) { 3480 ret = -ENOENT; 3481 goto end_btf; 3482 } 3483 3484 if (btf_is_module(kernel_btf)) { 3485 mod = btf_try_get_module(kernel_btf); 3486 if (!mod) { 3487 ret = -ENXIO; 3488 goto end_btf; 3489 } 3490 } 3491 3492 /* We already verified dtor_func to be btf_type_is_func 3493 * in register_btf_id_dtor_kfuncs. 3494 */ 3495 dtor_func_name = __btf_name_by_offset(kernel_btf, dtor_func->name_off); 3496 addr = kallsyms_lookup_name(dtor_func_name); 3497 if (!addr) { 3498 ret = -EINVAL; 3499 goto end_mod; 3500 } 3501 tab->off[i].kptr.dtor = (void *)addr; 3502 } 3503 3504 tab->off[i].offset = info_arr[i].off; 3505 tab->off[i].type = info_arr[i].type; 3506 tab->off[i].kptr.btf_id = id; 3507 tab->off[i].kptr.btf = kernel_btf; 3508 tab->off[i].kptr.module = mod; 3509 } 3510 tab->nr_off = nr_off; 3511 return tab; 3512 end_mod: 3513 module_put(mod); 3514 end_btf: 3515 btf_put(kernel_btf); 3516 end: 3517 while (i--) { 3518 btf_put(tab->off[i].kptr.btf); 3519 if (tab->off[i].kptr.module) 3520 module_put(tab->off[i].kptr.module); 3521 } 3522 kfree(tab); 3523 return ERR_PTR(ret); 3524 } 3525 3526 static void __btf_struct_show(const struct btf *btf, const struct btf_type *t, 3527 u32 type_id, void *data, u8 bits_offset, 3528 struct btf_show *show) 3529 { 3530 const struct btf_member *member; 3531 void *safe_data; 3532 u32 i; 3533 3534 safe_data = btf_show_start_struct_type(show, t, type_id, data); 3535 if (!safe_data) 3536 return; 3537 3538 for_each_member(i, t, member) { 3539 const struct btf_type *member_type = btf_type_by_id(btf, 3540 member->type); 3541 const struct btf_kind_operations *ops; 3542 u32 member_offset, bitfield_size; 3543 u32 bytes_offset; 3544 u8 bits8_offset; 3545 3546 btf_show_start_member(show, member); 3547 3548 member_offset = __btf_member_bit_offset(t, member); 3549 bitfield_size = __btf_member_bitfield_size(t, member); 3550 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); 3551 bits8_offset = BITS_PER_BYTE_MASKED(member_offset); 3552 if (bitfield_size) { 3553 safe_data = btf_show_start_type(show, member_type, 3554 member->type, 3555 data + bytes_offset); 3556 if (safe_data) 3557 btf_bitfield_show(safe_data, 3558 bits8_offset, 3559 bitfield_size, show); 3560 btf_show_end_type(show); 3561 } else { 3562 ops = btf_type_ops(member_type); 3563 ops->show(btf, member_type, member->type, 3564 data + bytes_offset, bits8_offset, show); 3565 } 3566 3567 btf_show_end_member(show); 3568 } 3569 3570 btf_show_end_struct_type(show); 3571 } 3572 3573 static void btf_struct_show(const struct btf *btf, const struct btf_type *t, 3574 u32 type_id, void *data, u8 bits_offset, 3575 struct btf_show *show) 3576 { 3577 const struct btf_member *m = show->state.member; 3578 3579 /* 3580 * First check if any members would be shown (are non-zero). 3581 * See comments above "struct btf_show" definition for more 3582 * details on how this works at a high-level. 3583 */ 3584 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { 3585 if (!show->state.depth_check) { 3586 show->state.depth_check = show->state.depth + 1; 3587 show->state.depth_to_show = 0; 3588 } 3589 __btf_struct_show(btf, t, type_id, data, bits_offset, show); 3590 /* Restore saved member data here */ 3591 show->state.member = m; 3592 if (show->state.depth_check != show->state.depth + 1) 3593 return; 3594 show->state.depth_check = 0; 3595 3596 if (show->state.depth_to_show <= show->state.depth) 3597 return; 3598 /* 3599 * Reaching here indicates we have recursed and found 3600 * non-zero child values. 3601 */ 3602 } 3603 3604 __btf_struct_show(btf, t, type_id, data, bits_offset, show); 3605 } 3606 3607 static struct btf_kind_operations struct_ops = { 3608 .check_meta = btf_struct_check_meta, 3609 .resolve = btf_struct_resolve, 3610 .check_member = btf_struct_check_member, 3611 .check_kflag_member = btf_generic_check_kflag_member, 3612 .log_details = btf_struct_log, 3613 .show = btf_struct_show, 3614 }; 3615 3616 static int btf_enum_check_member(struct btf_verifier_env *env, 3617 const struct btf_type *struct_type, 3618 const struct btf_member *member, 3619 const struct btf_type *member_type) 3620 { 3621 u32 struct_bits_off = member->offset; 3622 u32 struct_size, bytes_offset; 3623 3624 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 3625 btf_verifier_log_member(env, struct_type, member, 3626 "Member is not byte aligned"); 3627 return -EINVAL; 3628 } 3629 3630 struct_size = struct_type->size; 3631 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 3632 if (struct_size - bytes_offset < member_type->size) { 3633 btf_verifier_log_member(env, struct_type, member, 3634 "Member exceeds struct_size"); 3635 return -EINVAL; 3636 } 3637 3638 return 0; 3639 } 3640 3641 static int btf_enum_check_kflag_member(struct btf_verifier_env *env, 3642 const struct btf_type *struct_type, 3643 const struct btf_member *member, 3644 const struct btf_type *member_type) 3645 { 3646 u32 struct_bits_off, nr_bits, bytes_end, struct_size; 3647 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE; 3648 3649 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 3650 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 3651 if (!nr_bits) { 3652 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 3653 btf_verifier_log_member(env, struct_type, member, 3654 "Member is not byte aligned"); 3655 return -EINVAL; 3656 } 3657 3658 nr_bits = int_bitsize; 3659 } else if (nr_bits > int_bitsize) { 3660 btf_verifier_log_member(env, struct_type, member, 3661 "Invalid member bitfield_size"); 3662 return -EINVAL; 3663 } 3664 3665 struct_size = struct_type->size; 3666 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits); 3667 if (struct_size < bytes_end) { 3668 btf_verifier_log_member(env, struct_type, member, 3669 "Member exceeds struct_size"); 3670 return -EINVAL; 3671 } 3672 3673 return 0; 3674 } 3675 3676 static s32 btf_enum_check_meta(struct btf_verifier_env *env, 3677 const struct btf_type *t, 3678 u32 meta_left) 3679 { 3680 const struct btf_enum *enums = btf_type_enum(t); 3681 struct btf *btf = env->btf; 3682 const char *fmt_str; 3683 u16 i, nr_enums; 3684 u32 meta_needed; 3685 3686 nr_enums = btf_type_vlen(t); 3687 meta_needed = nr_enums * sizeof(*enums); 3688 3689 if (meta_left < meta_needed) { 3690 btf_verifier_log_basic(env, t, 3691 "meta_left:%u meta_needed:%u", 3692 meta_left, meta_needed); 3693 return -EINVAL; 3694 } 3695 3696 if (t->size > 8 || !is_power_of_2(t->size)) { 3697 btf_verifier_log_type(env, t, "Unexpected size"); 3698 return -EINVAL; 3699 } 3700 3701 /* enum type either no name or a valid one */ 3702 if (t->name_off && 3703 !btf_name_valid_identifier(env->btf, t->name_off)) { 3704 btf_verifier_log_type(env, t, "Invalid name"); 3705 return -EINVAL; 3706 } 3707 3708 btf_verifier_log_type(env, t, NULL); 3709 3710 for (i = 0; i < nr_enums; i++) { 3711 if (!btf_name_offset_valid(btf, enums[i].name_off)) { 3712 btf_verifier_log(env, "\tInvalid name_offset:%u", 3713 enums[i].name_off); 3714 return -EINVAL; 3715 } 3716 3717 /* enum member must have a valid name */ 3718 if (!enums[i].name_off || 3719 !btf_name_valid_identifier(btf, enums[i].name_off)) { 3720 btf_verifier_log_type(env, t, "Invalid name"); 3721 return -EINVAL; 3722 } 3723 3724 if (env->log.level == BPF_LOG_KERNEL) 3725 continue; 3726 fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n"; 3727 btf_verifier_log(env, fmt_str, 3728 __btf_name_by_offset(btf, enums[i].name_off), 3729 enums[i].val); 3730 } 3731 3732 return meta_needed; 3733 } 3734 3735 static void btf_enum_log(struct btf_verifier_env *env, 3736 const struct btf_type *t) 3737 { 3738 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 3739 } 3740 3741 static void btf_enum_show(const struct btf *btf, const struct btf_type *t, 3742 u32 type_id, void *data, u8 bits_offset, 3743 struct btf_show *show) 3744 { 3745 const struct btf_enum *enums = btf_type_enum(t); 3746 u32 i, nr_enums = btf_type_vlen(t); 3747 void *safe_data; 3748 int v; 3749 3750 safe_data = btf_show_start_type(show, t, type_id, data); 3751 if (!safe_data) 3752 return; 3753 3754 v = *(int *)safe_data; 3755 3756 for (i = 0; i < nr_enums; i++) { 3757 if (v != enums[i].val) 3758 continue; 3759 3760 btf_show_type_value(show, "%s", 3761 __btf_name_by_offset(btf, 3762 enums[i].name_off)); 3763 3764 btf_show_end_type(show); 3765 return; 3766 } 3767 3768 if (btf_type_kflag(t)) 3769 btf_show_type_value(show, "%d", v); 3770 else 3771 btf_show_type_value(show, "%u", v); 3772 btf_show_end_type(show); 3773 } 3774 3775 static struct btf_kind_operations enum_ops = { 3776 .check_meta = btf_enum_check_meta, 3777 .resolve = btf_df_resolve, 3778 .check_member = btf_enum_check_member, 3779 .check_kflag_member = btf_enum_check_kflag_member, 3780 .log_details = btf_enum_log, 3781 .show = btf_enum_show, 3782 }; 3783 3784 static s32 btf_enum64_check_meta(struct btf_verifier_env *env, 3785 const struct btf_type *t, 3786 u32 meta_left) 3787 { 3788 const struct btf_enum64 *enums = btf_type_enum64(t); 3789 struct btf *btf = env->btf; 3790 const char *fmt_str; 3791 u16 i, nr_enums; 3792 u32 meta_needed; 3793 3794 nr_enums = btf_type_vlen(t); 3795 meta_needed = nr_enums * sizeof(*enums); 3796 3797 if (meta_left < meta_needed) { 3798 btf_verifier_log_basic(env, t, 3799 "meta_left:%u meta_needed:%u", 3800 meta_left, meta_needed); 3801 return -EINVAL; 3802 } 3803 3804 if (t->size > 8 || !is_power_of_2(t->size)) { 3805 btf_verifier_log_type(env, t, "Unexpected size"); 3806 return -EINVAL; 3807 } 3808 3809 /* enum type either no name or a valid one */ 3810 if (t->name_off && 3811 !btf_name_valid_identifier(env->btf, t->name_off)) { 3812 btf_verifier_log_type(env, t, "Invalid name"); 3813 return -EINVAL; 3814 } 3815 3816 btf_verifier_log_type(env, t, NULL); 3817 3818 for (i = 0; i < nr_enums; i++) { 3819 if (!btf_name_offset_valid(btf, enums[i].name_off)) { 3820 btf_verifier_log(env, "\tInvalid name_offset:%u", 3821 enums[i].name_off); 3822 return -EINVAL; 3823 } 3824 3825 /* enum member must have a valid name */ 3826 if (!enums[i].name_off || 3827 !btf_name_valid_identifier(btf, enums[i].name_off)) { 3828 btf_verifier_log_type(env, t, "Invalid name"); 3829 return -EINVAL; 3830 } 3831 3832 if (env->log.level == BPF_LOG_KERNEL) 3833 continue; 3834 3835 fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n"; 3836 btf_verifier_log(env, fmt_str, 3837 __btf_name_by_offset(btf, enums[i].name_off), 3838 btf_enum64_value(enums + i)); 3839 } 3840 3841 return meta_needed; 3842 } 3843 3844 static void btf_enum64_show(const struct btf *btf, const struct btf_type *t, 3845 u32 type_id, void *data, u8 bits_offset, 3846 struct btf_show *show) 3847 { 3848 const struct btf_enum64 *enums = btf_type_enum64(t); 3849 u32 i, nr_enums = btf_type_vlen(t); 3850 void *safe_data; 3851 s64 v; 3852 3853 safe_data = btf_show_start_type(show, t, type_id, data); 3854 if (!safe_data) 3855 return; 3856 3857 v = *(u64 *)safe_data; 3858 3859 for (i = 0; i < nr_enums; i++) { 3860 if (v != btf_enum64_value(enums + i)) 3861 continue; 3862 3863 btf_show_type_value(show, "%s", 3864 __btf_name_by_offset(btf, 3865 enums[i].name_off)); 3866 3867 btf_show_end_type(show); 3868 return; 3869 } 3870 3871 if (btf_type_kflag(t)) 3872 btf_show_type_value(show, "%lld", v); 3873 else 3874 btf_show_type_value(show, "%llu", v); 3875 btf_show_end_type(show); 3876 } 3877 3878 static struct btf_kind_operations enum64_ops = { 3879 .check_meta = btf_enum64_check_meta, 3880 .resolve = btf_df_resolve, 3881 .check_member = btf_enum_check_member, 3882 .check_kflag_member = btf_enum_check_kflag_member, 3883 .log_details = btf_enum_log, 3884 .show = btf_enum64_show, 3885 }; 3886 3887 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env, 3888 const struct btf_type *t, 3889 u32 meta_left) 3890 { 3891 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param); 3892 3893 if (meta_left < meta_needed) { 3894 btf_verifier_log_basic(env, t, 3895 "meta_left:%u meta_needed:%u", 3896 meta_left, meta_needed); 3897 return -EINVAL; 3898 } 3899 3900 if (t->name_off) { 3901 btf_verifier_log_type(env, t, "Invalid name"); 3902 return -EINVAL; 3903 } 3904 3905 if (btf_type_kflag(t)) { 3906 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3907 return -EINVAL; 3908 } 3909 3910 btf_verifier_log_type(env, t, NULL); 3911 3912 return meta_needed; 3913 } 3914 3915 static void btf_func_proto_log(struct btf_verifier_env *env, 3916 const struct btf_type *t) 3917 { 3918 const struct btf_param *args = (const struct btf_param *)(t + 1); 3919 u16 nr_args = btf_type_vlen(t), i; 3920 3921 btf_verifier_log(env, "return=%u args=(", t->type); 3922 if (!nr_args) { 3923 btf_verifier_log(env, "void"); 3924 goto done; 3925 } 3926 3927 if (nr_args == 1 && !args[0].type) { 3928 /* Only one vararg */ 3929 btf_verifier_log(env, "vararg"); 3930 goto done; 3931 } 3932 3933 btf_verifier_log(env, "%u %s", args[0].type, 3934 __btf_name_by_offset(env->btf, 3935 args[0].name_off)); 3936 for (i = 1; i < nr_args - 1; i++) 3937 btf_verifier_log(env, ", %u %s", args[i].type, 3938 __btf_name_by_offset(env->btf, 3939 args[i].name_off)); 3940 3941 if (nr_args > 1) { 3942 const struct btf_param *last_arg = &args[nr_args - 1]; 3943 3944 if (last_arg->type) 3945 btf_verifier_log(env, ", %u %s", last_arg->type, 3946 __btf_name_by_offset(env->btf, 3947 last_arg->name_off)); 3948 else 3949 btf_verifier_log(env, ", vararg"); 3950 } 3951 3952 done: 3953 btf_verifier_log(env, ")"); 3954 } 3955 3956 static struct btf_kind_operations func_proto_ops = { 3957 .check_meta = btf_func_proto_check_meta, 3958 .resolve = btf_df_resolve, 3959 /* 3960 * BTF_KIND_FUNC_PROTO cannot be directly referred by 3961 * a struct's member. 3962 * 3963 * It should be a function pointer instead. 3964 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO) 3965 * 3966 * Hence, there is no btf_func_check_member(). 3967 */ 3968 .check_member = btf_df_check_member, 3969 .check_kflag_member = btf_df_check_kflag_member, 3970 .log_details = btf_func_proto_log, 3971 .show = btf_df_show, 3972 }; 3973 3974 static s32 btf_func_check_meta(struct btf_verifier_env *env, 3975 const struct btf_type *t, 3976 u32 meta_left) 3977 { 3978 if (!t->name_off || 3979 !btf_name_valid_identifier(env->btf, t->name_off)) { 3980 btf_verifier_log_type(env, t, "Invalid name"); 3981 return -EINVAL; 3982 } 3983 3984 if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) { 3985 btf_verifier_log_type(env, t, "Invalid func linkage"); 3986 return -EINVAL; 3987 } 3988 3989 if (btf_type_kflag(t)) { 3990 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3991 return -EINVAL; 3992 } 3993 3994 btf_verifier_log_type(env, t, NULL); 3995 3996 return 0; 3997 } 3998 3999 static int btf_func_resolve(struct btf_verifier_env *env, 4000 const struct resolve_vertex *v) 4001 { 4002 const struct btf_type *t = v->t; 4003 u32 next_type_id = t->type; 4004 int err; 4005 4006 err = btf_func_check(env, t); 4007 if (err) 4008 return err; 4009 4010 env_stack_pop_resolved(env, next_type_id, 0); 4011 return 0; 4012 } 4013 4014 static struct btf_kind_operations func_ops = { 4015 .check_meta = btf_func_check_meta, 4016 .resolve = btf_func_resolve, 4017 .check_member = btf_df_check_member, 4018 .check_kflag_member = btf_df_check_kflag_member, 4019 .log_details = btf_ref_type_log, 4020 .show = btf_df_show, 4021 }; 4022 4023 static s32 btf_var_check_meta(struct btf_verifier_env *env, 4024 const struct btf_type *t, 4025 u32 meta_left) 4026 { 4027 const struct btf_var *var; 4028 u32 meta_needed = sizeof(*var); 4029 4030 if (meta_left < meta_needed) { 4031 btf_verifier_log_basic(env, t, 4032 "meta_left:%u meta_needed:%u", 4033 meta_left, meta_needed); 4034 return -EINVAL; 4035 } 4036 4037 if (btf_type_vlen(t)) { 4038 btf_verifier_log_type(env, t, "vlen != 0"); 4039 return -EINVAL; 4040 } 4041 4042 if (btf_type_kflag(t)) { 4043 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 4044 return -EINVAL; 4045 } 4046 4047 if (!t->name_off || 4048 !__btf_name_valid(env->btf, t->name_off, true)) { 4049 btf_verifier_log_type(env, t, "Invalid name"); 4050 return -EINVAL; 4051 } 4052 4053 /* A var cannot be in type void */ 4054 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) { 4055 btf_verifier_log_type(env, t, "Invalid type_id"); 4056 return -EINVAL; 4057 } 4058 4059 var = btf_type_var(t); 4060 if (var->linkage != BTF_VAR_STATIC && 4061 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) { 4062 btf_verifier_log_type(env, t, "Linkage not supported"); 4063 return -EINVAL; 4064 } 4065 4066 btf_verifier_log_type(env, t, NULL); 4067 4068 return meta_needed; 4069 } 4070 4071 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t) 4072 { 4073 const struct btf_var *var = btf_type_var(t); 4074 4075 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage); 4076 } 4077 4078 static const struct btf_kind_operations var_ops = { 4079 .check_meta = btf_var_check_meta, 4080 .resolve = btf_var_resolve, 4081 .check_member = btf_df_check_member, 4082 .check_kflag_member = btf_df_check_kflag_member, 4083 .log_details = btf_var_log, 4084 .show = btf_var_show, 4085 }; 4086 4087 static s32 btf_datasec_check_meta(struct btf_verifier_env *env, 4088 const struct btf_type *t, 4089 u32 meta_left) 4090 { 4091 const struct btf_var_secinfo *vsi; 4092 u64 last_vsi_end_off = 0, sum = 0; 4093 u32 i, meta_needed; 4094 4095 meta_needed = btf_type_vlen(t) * sizeof(*vsi); 4096 if (meta_left < meta_needed) { 4097 btf_verifier_log_basic(env, t, 4098 "meta_left:%u meta_needed:%u", 4099 meta_left, meta_needed); 4100 return -EINVAL; 4101 } 4102 4103 if (!t->size) { 4104 btf_verifier_log_type(env, t, "size == 0"); 4105 return -EINVAL; 4106 } 4107 4108 if (btf_type_kflag(t)) { 4109 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 4110 return -EINVAL; 4111 } 4112 4113 if (!t->name_off || 4114 !btf_name_valid_section(env->btf, t->name_off)) { 4115 btf_verifier_log_type(env, t, "Invalid name"); 4116 return -EINVAL; 4117 } 4118 4119 btf_verifier_log_type(env, t, NULL); 4120 4121 for_each_vsi(i, t, vsi) { 4122 /* A var cannot be in type void */ 4123 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) { 4124 btf_verifier_log_vsi(env, t, vsi, 4125 "Invalid type_id"); 4126 return -EINVAL; 4127 } 4128 4129 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) { 4130 btf_verifier_log_vsi(env, t, vsi, 4131 "Invalid offset"); 4132 return -EINVAL; 4133 } 4134 4135 if (!vsi->size || vsi->size > t->size) { 4136 btf_verifier_log_vsi(env, t, vsi, 4137 "Invalid size"); 4138 return -EINVAL; 4139 } 4140 4141 last_vsi_end_off = vsi->offset + vsi->size; 4142 if (last_vsi_end_off > t->size) { 4143 btf_verifier_log_vsi(env, t, vsi, 4144 "Invalid offset+size"); 4145 return -EINVAL; 4146 } 4147 4148 btf_verifier_log_vsi(env, t, vsi, NULL); 4149 sum += vsi->size; 4150 } 4151 4152 if (t->size < sum) { 4153 btf_verifier_log_type(env, t, "Invalid btf_info size"); 4154 return -EINVAL; 4155 } 4156 4157 return meta_needed; 4158 } 4159 4160 static int btf_datasec_resolve(struct btf_verifier_env *env, 4161 const struct resolve_vertex *v) 4162 { 4163 const struct btf_var_secinfo *vsi; 4164 struct btf *btf = env->btf; 4165 u16 i; 4166 4167 for_each_vsi_from(i, v->next_member, v->t, vsi) { 4168 u32 var_type_id = vsi->type, type_id, type_size = 0; 4169 const struct btf_type *var_type = btf_type_by_id(env->btf, 4170 var_type_id); 4171 if (!var_type || !btf_type_is_var(var_type)) { 4172 btf_verifier_log_vsi(env, v->t, vsi, 4173 "Not a VAR kind member"); 4174 return -EINVAL; 4175 } 4176 4177 if (!env_type_is_resolve_sink(env, var_type) && 4178 !env_type_is_resolved(env, var_type_id)) { 4179 env_stack_set_next_member(env, i + 1); 4180 return env_stack_push(env, var_type, var_type_id); 4181 } 4182 4183 type_id = var_type->type; 4184 if (!btf_type_id_size(btf, &type_id, &type_size)) { 4185 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type"); 4186 return -EINVAL; 4187 } 4188 4189 if (vsi->size < type_size) { 4190 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size"); 4191 return -EINVAL; 4192 } 4193 } 4194 4195 env_stack_pop_resolved(env, 0, 0); 4196 return 0; 4197 } 4198 4199 static void btf_datasec_log(struct btf_verifier_env *env, 4200 const struct btf_type *t) 4201 { 4202 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 4203 } 4204 4205 static void btf_datasec_show(const struct btf *btf, 4206 const struct btf_type *t, u32 type_id, 4207 void *data, u8 bits_offset, 4208 struct btf_show *show) 4209 { 4210 const struct btf_var_secinfo *vsi; 4211 const struct btf_type *var; 4212 u32 i; 4213 4214 if (!btf_show_start_type(show, t, type_id, data)) 4215 return; 4216 4217 btf_show_type_value(show, "section (\"%s\") = {", 4218 __btf_name_by_offset(btf, t->name_off)); 4219 for_each_vsi(i, t, vsi) { 4220 var = btf_type_by_id(btf, vsi->type); 4221 if (i) 4222 btf_show(show, ","); 4223 btf_type_ops(var)->show(btf, var, vsi->type, 4224 data + vsi->offset, bits_offset, show); 4225 } 4226 btf_show_end_type(show); 4227 } 4228 4229 static const struct btf_kind_operations datasec_ops = { 4230 .check_meta = btf_datasec_check_meta, 4231 .resolve = btf_datasec_resolve, 4232 .check_member = btf_df_check_member, 4233 .check_kflag_member = btf_df_check_kflag_member, 4234 .log_details = btf_datasec_log, 4235 .show = btf_datasec_show, 4236 }; 4237 4238 static s32 btf_float_check_meta(struct btf_verifier_env *env, 4239 const struct btf_type *t, 4240 u32 meta_left) 4241 { 4242 if (btf_type_vlen(t)) { 4243 btf_verifier_log_type(env, t, "vlen != 0"); 4244 return -EINVAL; 4245 } 4246 4247 if (btf_type_kflag(t)) { 4248 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 4249 return -EINVAL; 4250 } 4251 4252 if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 && 4253 t->size != 16) { 4254 btf_verifier_log_type(env, t, "Invalid type_size"); 4255 return -EINVAL; 4256 } 4257 4258 btf_verifier_log_type(env, t, NULL); 4259 4260 return 0; 4261 } 4262 4263 static int btf_float_check_member(struct btf_verifier_env *env, 4264 const struct btf_type *struct_type, 4265 const struct btf_member *member, 4266 const struct btf_type *member_type) 4267 { 4268 u64 start_offset_bytes; 4269 u64 end_offset_bytes; 4270 u64 misalign_bits; 4271 u64 align_bytes; 4272 u64 align_bits; 4273 4274 /* Different architectures have different alignment requirements, so 4275 * here we check only for the reasonable minimum. This way we ensure 4276 * that types after CO-RE can pass the kernel BTF verifier. 4277 */ 4278 align_bytes = min_t(u64, sizeof(void *), member_type->size); 4279 align_bits = align_bytes * BITS_PER_BYTE; 4280 div64_u64_rem(member->offset, align_bits, &misalign_bits); 4281 if (misalign_bits) { 4282 btf_verifier_log_member(env, struct_type, member, 4283 "Member is not properly aligned"); 4284 return -EINVAL; 4285 } 4286 4287 start_offset_bytes = member->offset / BITS_PER_BYTE; 4288 end_offset_bytes = start_offset_bytes + member_type->size; 4289 if (end_offset_bytes > struct_type->size) { 4290 btf_verifier_log_member(env, struct_type, member, 4291 "Member exceeds struct_size"); 4292 return -EINVAL; 4293 } 4294 4295 return 0; 4296 } 4297 4298 static void btf_float_log(struct btf_verifier_env *env, 4299 const struct btf_type *t) 4300 { 4301 btf_verifier_log(env, "size=%u", t->size); 4302 } 4303 4304 static const struct btf_kind_operations float_ops = { 4305 .check_meta = btf_float_check_meta, 4306 .resolve = btf_df_resolve, 4307 .check_member = btf_float_check_member, 4308 .check_kflag_member = btf_generic_check_kflag_member, 4309 .log_details = btf_float_log, 4310 .show = btf_df_show, 4311 }; 4312 4313 static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env, 4314 const struct btf_type *t, 4315 u32 meta_left) 4316 { 4317 const struct btf_decl_tag *tag; 4318 u32 meta_needed = sizeof(*tag); 4319 s32 component_idx; 4320 const char *value; 4321 4322 if (meta_left < meta_needed) { 4323 btf_verifier_log_basic(env, t, 4324 "meta_left:%u meta_needed:%u", 4325 meta_left, meta_needed); 4326 return -EINVAL; 4327 } 4328 4329 value = btf_name_by_offset(env->btf, t->name_off); 4330 if (!value || !value[0]) { 4331 btf_verifier_log_type(env, t, "Invalid value"); 4332 return -EINVAL; 4333 } 4334 4335 if (btf_type_vlen(t)) { 4336 btf_verifier_log_type(env, t, "vlen != 0"); 4337 return -EINVAL; 4338 } 4339 4340 if (btf_type_kflag(t)) { 4341 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 4342 return -EINVAL; 4343 } 4344 4345 component_idx = btf_type_decl_tag(t)->component_idx; 4346 if (component_idx < -1) { 4347 btf_verifier_log_type(env, t, "Invalid component_idx"); 4348 return -EINVAL; 4349 } 4350 4351 btf_verifier_log_type(env, t, NULL); 4352 4353 return meta_needed; 4354 } 4355 4356 static int btf_decl_tag_resolve(struct btf_verifier_env *env, 4357 const struct resolve_vertex *v) 4358 { 4359 const struct btf_type *next_type; 4360 const struct btf_type *t = v->t; 4361 u32 next_type_id = t->type; 4362 struct btf *btf = env->btf; 4363 s32 component_idx; 4364 u32 vlen; 4365 4366 next_type = btf_type_by_id(btf, next_type_id); 4367 if (!next_type || !btf_type_is_decl_tag_target(next_type)) { 4368 btf_verifier_log_type(env, v->t, "Invalid type_id"); 4369 return -EINVAL; 4370 } 4371 4372 if (!env_type_is_resolve_sink(env, next_type) && 4373 !env_type_is_resolved(env, next_type_id)) 4374 return env_stack_push(env, next_type, next_type_id); 4375 4376 component_idx = btf_type_decl_tag(t)->component_idx; 4377 if (component_idx != -1) { 4378 if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) { 4379 btf_verifier_log_type(env, v->t, "Invalid component_idx"); 4380 return -EINVAL; 4381 } 4382 4383 if (btf_type_is_struct(next_type)) { 4384 vlen = btf_type_vlen(next_type); 4385 } else { 4386 /* next_type should be a function */ 4387 next_type = btf_type_by_id(btf, next_type->type); 4388 vlen = btf_type_vlen(next_type); 4389 } 4390 4391 if ((u32)component_idx >= vlen) { 4392 btf_verifier_log_type(env, v->t, "Invalid component_idx"); 4393 return -EINVAL; 4394 } 4395 } 4396 4397 env_stack_pop_resolved(env, next_type_id, 0); 4398 4399 return 0; 4400 } 4401 4402 static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t) 4403 { 4404 btf_verifier_log(env, "type=%u component_idx=%d", t->type, 4405 btf_type_decl_tag(t)->component_idx); 4406 } 4407 4408 static const struct btf_kind_operations decl_tag_ops = { 4409 .check_meta = btf_decl_tag_check_meta, 4410 .resolve = btf_decl_tag_resolve, 4411 .check_member = btf_df_check_member, 4412 .check_kflag_member = btf_df_check_kflag_member, 4413 .log_details = btf_decl_tag_log, 4414 .show = btf_df_show, 4415 }; 4416 4417 static int btf_func_proto_check(struct btf_verifier_env *env, 4418 const struct btf_type *t) 4419 { 4420 const struct btf_type *ret_type; 4421 const struct btf_param *args; 4422 const struct btf *btf; 4423 u16 nr_args, i; 4424 int err; 4425 4426 btf = env->btf; 4427 args = (const struct btf_param *)(t + 1); 4428 nr_args = btf_type_vlen(t); 4429 4430 /* Check func return type which could be "void" (t->type == 0) */ 4431 if (t->type) { 4432 u32 ret_type_id = t->type; 4433 4434 ret_type = btf_type_by_id(btf, ret_type_id); 4435 if (!ret_type) { 4436 btf_verifier_log_type(env, t, "Invalid return type"); 4437 return -EINVAL; 4438 } 4439 4440 if (btf_type_needs_resolve(ret_type) && 4441 !env_type_is_resolved(env, ret_type_id)) { 4442 err = btf_resolve(env, ret_type, ret_type_id); 4443 if (err) 4444 return err; 4445 } 4446 4447 /* Ensure the return type is a type that has a size */ 4448 if (!btf_type_id_size(btf, &ret_type_id, NULL)) { 4449 btf_verifier_log_type(env, t, "Invalid return type"); 4450 return -EINVAL; 4451 } 4452 } 4453 4454 if (!nr_args) 4455 return 0; 4456 4457 /* Last func arg type_id could be 0 if it is a vararg */ 4458 if (!args[nr_args - 1].type) { 4459 if (args[nr_args - 1].name_off) { 4460 btf_verifier_log_type(env, t, "Invalid arg#%u", 4461 nr_args); 4462 return -EINVAL; 4463 } 4464 nr_args--; 4465 } 4466 4467 err = 0; 4468 for (i = 0; i < nr_args; i++) { 4469 const struct btf_type *arg_type; 4470 u32 arg_type_id; 4471 4472 arg_type_id = args[i].type; 4473 arg_type = btf_type_by_id(btf, arg_type_id); 4474 if (!arg_type) { 4475 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4476 err = -EINVAL; 4477 break; 4478 } 4479 4480 if (args[i].name_off && 4481 (!btf_name_offset_valid(btf, args[i].name_off) || 4482 !btf_name_valid_identifier(btf, args[i].name_off))) { 4483 btf_verifier_log_type(env, t, 4484 "Invalid arg#%u", i + 1); 4485 err = -EINVAL; 4486 break; 4487 } 4488 4489 if (btf_type_needs_resolve(arg_type) && 4490 !env_type_is_resolved(env, arg_type_id)) { 4491 err = btf_resolve(env, arg_type, arg_type_id); 4492 if (err) 4493 break; 4494 } 4495 4496 if (!btf_type_id_size(btf, &arg_type_id, NULL)) { 4497 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4498 err = -EINVAL; 4499 break; 4500 } 4501 } 4502 4503 return err; 4504 } 4505 4506 static int btf_func_check(struct btf_verifier_env *env, 4507 const struct btf_type *t) 4508 { 4509 const struct btf_type *proto_type; 4510 const struct btf_param *args; 4511 const struct btf *btf; 4512 u16 nr_args, i; 4513 4514 btf = env->btf; 4515 proto_type = btf_type_by_id(btf, t->type); 4516 4517 if (!proto_type || !btf_type_is_func_proto(proto_type)) { 4518 btf_verifier_log_type(env, t, "Invalid type_id"); 4519 return -EINVAL; 4520 } 4521 4522 args = (const struct btf_param *)(proto_type + 1); 4523 nr_args = btf_type_vlen(proto_type); 4524 for (i = 0; i < nr_args; i++) { 4525 if (!args[i].name_off && args[i].type) { 4526 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4527 return -EINVAL; 4528 } 4529 } 4530 4531 return 0; 4532 } 4533 4534 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { 4535 [BTF_KIND_INT] = &int_ops, 4536 [BTF_KIND_PTR] = &ptr_ops, 4537 [BTF_KIND_ARRAY] = &array_ops, 4538 [BTF_KIND_STRUCT] = &struct_ops, 4539 [BTF_KIND_UNION] = &struct_ops, 4540 [BTF_KIND_ENUM] = &enum_ops, 4541 [BTF_KIND_FWD] = &fwd_ops, 4542 [BTF_KIND_TYPEDEF] = &modifier_ops, 4543 [BTF_KIND_VOLATILE] = &modifier_ops, 4544 [BTF_KIND_CONST] = &modifier_ops, 4545 [BTF_KIND_RESTRICT] = &modifier_ops, 4546 [BTF_KIND_FUNC] = &func_ops, 4547 [BTF_KIND_FUNC_PROTO] = &func_proto_ops, 4548 [BTF_KIND_VAR] = &var_ops, 4549 [BTF_KIND_DATASEC] = &datasec_ops, 4550 [BTF_KIND_FLOAT] = &float_ops, 4551 [BTF_KIND_DECL_TAG] = &decl_tag_ops, 4552 [BTF_KIND_TYPE_TAG] = &modifier_ops, 4553 [BTF_KIND_ENUM64] = &enum64_ops, 4554 }; 4555 4556 static s32 btf_check_meta(struct btf_verifier_env *env, 4557 const struct btf_type *t, 4558 u32 meta_left) 4559 { 4560 u32 saved_meta_left = meta_left; 4561 s32 var_meta_size; 4562 4563 if (meta_left < sizeof(*t)) { 4564 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu", 4565 env->log_type_id, meta_left, sizeof(*t)); 4566 return -EINVAL; 4567 } 4568 meta_left -= sizeof(*t); 4569 4570 if (t->info & ~BTF_INFO_MASK) { 4571 btf_verifier_log(env, "[%u] Invalid btf_info:%x", 4572 env->log_type_id, t->info); 4573 return -EINVAL; 4574 } 4575 4576 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX || 4577 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) { 4578 btf_verifier_log(env, "[%u] Invalid kind:%u", 4579 env->log_type_id, BTF_INFO_KIND(t->info)); 4580 return -EINVAL; 4581 } 4582 4583 if (!btf_name_offset_valid(env->btf, t->name_off)) { 4584 btf_verifier_log(env, "[%u] Invalid name_offset:%u", 4585 env->log_type_id, t->name_off); 4586 return -EINVAL; 4587 } 4588 4589 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left); 4590 if (var_meta_size < 0) 4591 return var_meta_size; 4592 4593 meta_left -= var_meta_size; 4594 4595 return saved_meta_left - meta_left; 4596 } 4597 4598 static int btf_check_all_metas(struct btf_verifier_env *env) 4599 { 4600 struct btf *btf = env->btf; 4601 struct btf_header *hdr; 4602 void *cur, *end; 4603 4604 hdr = &btf->hdr; 4605 cur = btf->nohdr_data + hdr->type_off; 4606 end = cur + hdr->type_len; 4607 4608 env->log_type_id = btf->base_btf ? btf->start_id : 1; 4609 while (cur < end) { 4610 struct btf_type *t = cur; 4611 s32 meta_size; 4612 4613 meta_size = btf_check_meta(env, t, end - cur); 4614 if (meta_size < 0) 4615 return meta_size; 4616 4617 btf_add_type(env, t); 4618 cur += meta_size; 4619 env->log_type_id++; 4620 } 4621 4622 return 0; 4623 } 4624 4625 static bool btf_resolve_valid(struct btf_verifier_env *env, 4626 const struct btf_type *t, 4627 u32 type_id) 4628 { 4629 struct btf *btf = env->btf; 4630 4631 if (!env_type_is_resolved(env, type_id)) 4632 return false; 4633 4634 if (btf_type_is_struct(t) || btf_type_is_datasec(t)) 4635 return !btf_resolved_type_id(btf, type_id) && 4636 !btf_resolved_type_size(btf, type_id); 4637 4638 if (btf_type_is_decl_tag(t) || btf_type_is_func(t)) 4639 return btf_resolved_type_id(btf, type_id) && 4640 !btf_resolved_type_size(btf, type_id); 4641 4642 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) || 4643 btf_type_is_var(t)) { 4644 t = btf_type_id_resolve(btf, &type_id); 4645 return t && 4646 !btf_type_is_modifier(t) && 4647 !btf_type_is_var(t) && 4648 !btf_type_is_datasec(t); 4649 } 4650 4651 if (btf_type_is_array(t)) { 4652 const struct btf_array *array = btf_type_array(t); 4653 const struct btf_type *elem_type; 4654 u32 elem_type_id = array->type; 4655 u32 elem_size; 4656 4657 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 4658 return elem_type && !btf_type_is_modifier(elem_type) && 4659 (array->nelems * elem_size == 4660 btf_resolved_type_size(btf, type_id)); 4661 } 4662 4663 return false; 4664 } 4665 4666 static int btf_resolve(struct btf_verifier_env *env, 4667 const struct btf_type *t, u32 type_id) 4668 { 4669 u32 save_log_type_id = env->log_type_id; 4670 const struct resolve_vertex *v; 4671 int err = 0; 4672 4673 env->resolve_mode = RESOLVE_TBD; 4674 env_stack_push(env, t, type_id); 4675 while (!err && (v = env_stack_peak(env))) { 4676 env->log_type_id = v->type_id; 4677 err = btf_type_ops(v->t)->resolve(env, v); 4678 } 4679 4680 env->log_type_id = type_id; 4681 if (err == -E2BIG) { 4682 btf_verifier_log_type(env, t, 4683 "Exceeded max resolving depth:%u", 4684 MAX_RESOLVE_DEPTH); 4685 } else if (err == -EEXIST) { 4686 btf_verifier_log_type(env, t, "Loop detected"); 4687 } 4688 4689 /* Final sanity check */ 4690 if (!err && !btf_resolve_valid(env, t, type_id)) { 4691 btf_verifier_log_type(env, t, "Invalid resolve state"); 4692 err = -EINVAL; 4693 } 4694 4695 env->log_type_id = save_log_type_id; 4696 return err; 4697 } 4698 4699 static int btf_check_all_types(struct btf_verifier_env *env) 4700 { 4701 struct btf *btf = env->btf; 4702 const struct btf_type *t; 4703 u32 type_id, i; 4704 int err; 4705 4706 err = env_resolve_init(env); 4707 if (err) 4708 return err; 4709 4710 env->phase++; 4711 for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) { 4712 type_id = btf->start_id + i; 4713 t = btf_type_by_id(btf, type_id); 4714 4715 env->log_type_id = type_id; 4716 if (btf_type_needs_resolve(t) && 4717 !env_type_is_resolved(env, type_id)) { 4718 err = btf_resolve(env, t, type_id); 4719 if (err) 4720 return err; 4721 } 4722 4723 if (btf_type_is_func_proto(t)) { 4724 err = btf_func_proto_check(env, t); 4725 if (err) 4726 return err; 4727 } 4728 } 4729 4730 return 0; 4731 } 4732 4733 static int btf_parse_type_sec(struct btf_verifier_env *env) 4734 { 4735 const struct btf_header *hdr = &env->btf->hdr; 4736 int err; 4737 4738 /* Type section must align to 4 bytes */ 4739 if (hdr->type_off & (sizeof(u32) - 1)) { 4740 btf_verifier_log(env, "Unaligned type_off"); 4741 return -EINVAL; 4742 } 4743 4744 if (!env->btf->base_btf && !hdr->type_len) { 4745 btf_verifier_log(env, "No type found"); 4746 return -EINVAL; 4747 } 4748 4749 err = btf_check_all_metas(env); 4750 if (err) 4751 return err; 4752 4753 return btf_check_all_types(env); 4754 } 4755 4756 static int btf_parse_str_sec(struct btf_verifier_env *env) 4757 { 4758 const struct btf_header *hdr; 4759 struct btf *btf = env->btf; 4760 const char *start, *end; 4761 4762 hdr = &btf->hdr; 4763 start = btf->nohdr_data + hdr->str_off; 4764 end = start + hdr->str_len; 4765 4766 if (end != btf->data + btf->data_size) { 4767 btf_verifier_log(env, "String section is not at the end"); 4768 return -EINVAL; 4769 } 4770 4771 btf->strings = start; 4772 4773 if (btf->base_btf && !hdr->str_len) 4774 return 0; 4775 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) { 4776 btf_verifier_log(env, "Invalid string section"); 4777 return -EINVAL; 4778 } 4779 if (!btf->base_btf && start[0]) { 4780 btf_verifier_log(env, "Invalid string section"); 4781 return -EINVAL; 4782 } 4783 4784 return 0; 4785 } 4786 4787 static const size_t btf_sec_info_offset[] = { 4788 offsetof(struct btf_header, type_off), 4789 offsetof(struct btf_header, str_off), 4790 }; 4791 4792 static int btf_sec_info_cmp(const void *a, const void *b) 4793 { 4794 const struct btf_sec_info *x = a; 4795 const struct btf_sec_info *y = b; 4796 4797 return (int)(x->off - y->off) ? : (int)(x->len - y->len); 4798 } 4799 4800 static int btf_check_sec_info(struct btf_verifier_env *env, 4801 u32 btf_data_size) 4802 { 4803 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)]; 4804 u32 total, expected_total, i; 4805 const struct btf_header *hdr; 4806 const struct btf *btf; 4807 4808 btf = env->btf; 4809 hdr = &btf->hdr; 4810 4811 /* Populate the secs from hdr */ 4812 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) 4813 secs[i] = *(struct btf_sec_info *)((void *)hdr + 4814 btf_sec_info_offset[i]); 4815 4816 sort(secs, ARRAY_SIZE(btf_sec_info_offset), 4817 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL); 4818 4819 /* Check for gaps and overlap among sections */ 4820 total = 0; 4821 expected_total = btf_data_size - hdr->hdr_len; 4822 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) { 4823 if (expected_total < secs[i].off) { 4824 btf_verifier_log(env, "Invalid section offset"); 4825 return -EINVAL; 4826 } 4827 if (total < secs[i].off) { 4828 /* gap */ 4829 btf_verifier_log(env, "Unsupported section found"); 4830 return -EINVAL; 4831 } 4832 if (total > secs[i].off) { 4833 btf_verifier_log(env, "Section overlap found"); 4834 return -EINVAL; 4835 } 4836 if (expected_total - total < secs[i].len) { 4837 btf_verifier_log(env, 4838 "Total section length too long"); 4839 return -EINVAL; 4840 } 4841 total += secs[i].len; 4842 } 4843 4844 /* There is data other than hdr and known sections */ 4845 if (expected_total != total) { 4846 btf_verifier_log(env, "Unsupported section found"); 4847 return -EINVAL; 4848 } 4849 4850 return 0; 4851 } 4852 4853 static int btf_parse_hdr(struct btf_verifier_env *env) 4854 { 4855 u32 hdr_len, hdr_copy, btf_data_size; 4856 const struct btf_header *hdr; 4857 struct btf *btf; 4858 int err; 4859 4860 btf = env->btf; 4861 btf_data_size = btf->data_size; 4862 4863 if (btf_data_size < offsetofend(struct btf_header, hdr_len)) { 4864 btf_verifier_log(env, "hdr_len not found"); 4865 return -EINVAL; 4866 } 4867 4868 hdr = btf->data; 4869 hdr_len = hdr->hdr_len; 4870 if (btf_data_size < hdr_len) { 4871 btf_verifier_log(env, "btf_header not found"); 4872 return -EINVAL; 4873 } 4874 4875 /* Ensure the unsupported header fields are zero */ 4876 if (hdr_len > sizeof(btf->hdr)) { 4877 u8 *expected_zero = btf->data + sizeof(btf->hdr); 4878 u8 *end = btf->data + hdr_len; 4879 4880 for (; expected_zero < end; expected_zero++) { 4881 if (*expected_zero) { 4882 btf_verifier_log(env, "Unsupported btf_header"); 4883 return -E2BIG; 4884 } 4885 } 4886 } 4887 4888 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); 4889 memcpy(&btf->hdr, btf->data, hdr_copy); 4890 4891 hdr = &btf->hdr; 4892 4893 btf_verifier_log_hdr(env, btf_data_size); 4894 4895 if (hdr->magic != BTF_MAGIC) { 4896 btf_verifier_log(env, "Invalid magic"); 4897 return -EINVAL; 4898 } 4899 4900 if (hdr->version != BTF_VERSION) { 4901 btf_verifier_log(env, "Unsupported version"); 4902 return -ENOTSUPP; 4903 } 4904 4905 if (hdr->flags) { 4906 btf_verifier_log(env, "Unsupported flags"); 4907 return -ENOTSUPP; 4908 } 4909 4910 if (!btf->base_btf && btf_data_size == hdr->hdr_len) { 4911 btf_verifier_log(env, "No data"); 4912 return -EINVAL; 4913 } 4914 4915 err = btf_check_sec_info(env, btf_data_size); 4916 if (err) 4917 return err; 4918 4919 return 0; 4920 } 4921 4922 static int btf_check_type_tags(struct btf_verifier_env *env, 4923 struct btf *btf, int start_id) 4924 { 4925 int i, n, good_id = start_id - 1; 4926 bool in_tags; 4927 4928 n = btf_nr_types(btf); 4929 for (i = start_id; i < n; i++) { 4930 const struct btf_type *t; 4931 u32 cur_id = i; 4932 4933 t = btf_type_by_id(btf, i); 4934 if (!t) 4935 return -EINVAL; 4936 if (!btf_type_is_modifier(t)) 4937 continue; 4938 4939 cond_resched(); 4940 4941 in_tags = btf_type_is_type_tag(t); 4942 while (btf_type_is_modifier(t)) { 4943 if (btf_type_is_type_tag(t)) { 4944 if (!in_tags) { 4945 btf_verifier_log(env, "Type tags don't precede modifiers"); 4946 return -EINVAL; 4947 } 4948 } else if (in_tags) { 4949 in_tags = false; 4950 } 4951 if (cur_id <= good_id) 4952 break; 4953 /* Move to next type */ 4954 cur_id = t->type; 4955 t = btf_type_by_id(btf, cur_id); 4956 if (!t) 4957 return -EINVAL; 4958 } 4959 good_id = i; 4960 } 4961 return 0; 4962 } 4963 4964 static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, 4965 u32 log_level, char __user *log_ubuf, u32 log_size) 4966 { 4967 struct btf_verifier_env *env = NULL; 4968 struct bpf_verifier_log *log; 4969 struct btf *btf = NULL; 4970 u8 *data; 4971 int err; 4972 4973 if (btf_data_size > BTF_MAX_SIZE) 4974 return ERR_PTR(-E2BIG); 4975 4976 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 4977 if (!env) 4978 return ERR_PTR(-ENOMEM); 4979 4980 log = &env->log; 4981 if (log_level || log_ubuf || log_size) { 4982 /* user requested verbose verifier output 4983 * and supplied buffer to store the verification trace 4984 */ 4985 log->level = log_level; 4986 log->ubuf = log_ubuf; 4987 log->len_total = log_size; 4988 4989 /* log attributes have to be sane */ 4990 if (!bpf_verifier_log_attr_valid(log)) { 4991 err = -EINVAL; 4992 goto errout; 4993 } 4994 } 4995 4996 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 4997 if (!btf) { 4998 err = -ENOMEM; 4999 goto errout; 5000 } 5001 env->btf = btf; 5002 5003 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); 5004 if (!data) { 5005 err = -ENOMEM; 5006 goto errout; 5007 } 5008 5009 btf->data = data; 5010 btf->data_size = btf_data_size; 5011 5012 if (copy_from_bpfptr(data, btf_data, btf_data_size)) { 5013 err = -EFAULT; 5014 goto errout; 5015 } 5016 5017 err = btf_parse_hdr(env); 5018 if (err) 5019 goto errout; 5020 5021 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 5022 5023 err = btf_parse_str_sec(env); 5024 if (err) 5025 goto errout; 5026 5027 err = btf_parse_type_sec(env); 5028 if (err) 5029 goto errout; 5030 5031 err = btf_check_type_tags(env, btf, 1); 5032 if (err) 5033 goto errout; 5034 5035 if (log->level && bpf_verifier_log_full(log)) { 5036 err = -ENOSPC; 5037 goto errout; 5038 } 5039 5040 btf_verifier_env_free(env); 5041 refcount_set(&btf->refcnt, 1); 5042 return btf; 5043 5044 errout: 5045 btf_verifier_env_free(env); 5046 if (btf) 5047 btf_free(btf); 5048 return ERR_PTR(err); 5049 } 5050 5051 extern char __weak __start_BTF[]; 5052 extern char __weak __stop_BTF[]; 5053 extern struct btf *btf_vmlinux; 5054 5055 #define BPF_MAP_TYPE(_id, _ops) 5056 #define BPF_LINK_TYPE(_id, _name) 5057 static union { 5058 struct bpf_ctx_convert { 5059 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 5060 prog_ctx_type _id##_prog; \ 5061 kern_ctx_type _id##_kern; 5062 #include <linux/bpf_types.h> 5063 #undef BPF_PROG_TYPE 5064 } *__t; 5065 /* 't' is written once under lock. Read many times. */ 5066 const struct btf_type *t; 5067 } bpf_ctx_convert; 5068 enum { 5069 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 5070 __ctx_convert##_id, 5071 #include <linux/bpf_types.h> 5072 #undef BPF_PROG_TYPE 5073 __ctx_convert_unused, /* to avoid empty enum in extreme .config */ 5074 }; 5075 static u8 bpf_ctx_convert_map[] = { 5076 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 5077 [_id] = __ctx_convert##_id, 5078 #include <linux/bpf_types.h> 5079 #undef BPF_PROG_TYPE 5080 0, /* avoid empty array */ 5081 }; 5082 #undef BPF_MAP_TYPE 5083 #undef BPF_LINK_TYPE 5084 5085 static const struct btf_member * 5086 btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, 5087 const struct btf_type *t, enum bpf_prog_type prog_type, 5088 int arg) 5089 { 5090 const struct btf_type *conv_struct; 5091 const struct btf_type *ctx_struct; 5092 const struct btf_member *ctx_type; 5093 const char *tname, *ctx_tname; 5094 5095 conv_struct = bpf_ctx_convert.t; 5096 if (!conv_struct) { 5097 bpf_log(log, "btf_vmlinux is malformed\n"); 5098 return NULL; 5099 } 5100 t = btf_type_by_id(btf, t->type); 5101 while (btf_type_is_modifier(t)) 5102 t = btf_type_by_id(btf, t->type); 5103 if (!btf_type_is_struct(t)) { 5104 /* Only pointer to struct is supported for now. 5105 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF 5106 * is not supported yet. 5107 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine. 5108 */ 5109 return NULL; 5110 } 5111 tname = btf_name_by_offset(btf, t->name_off); 5112 if (!tname) { 5113 bpf_log(log, "arg#%d struct doesn't have a name\n", arg); 5114 return NULL; 5115 } 5116 /* prog_type is valid bpf program type. No need for bounds check. */ 5117 ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2; 5118 /* ctx_struct is a pointer to prog_ctx_type in vmlinux. 5119 * Like 'struct __sk_buff' 5120 */ 5121 ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type); 5122 if (!ctx_struct) 5123 /* should not happen */ 5124 return NULL; 5125 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off); 5126 if (!ctx_tname) { 5127 /* should not happen */ 5128 bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n"); 5129 return NULL; 5130 } 5131 /* only compare that prog's ctx type name is the same as 5132 * kernel expects. No need to compare field by field. 5133 * It's ok for bpf prog to do: 5134 * struct __sk_buff {}; 5135 * int socket_filter_bpf_prog(struct __sk_buff *skb) 5136 * { // no fields of skb are ever used } 5137 */ 5138 if (strcmp(ctx_tname, tname)) 5139 return NULL; 5140 return ctx_type; 5141 } 5142 5143 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, 5144 struct btf *btf, 5145 const struct btf_type *t, 5146 enum bpf_prog_type prog_type, 5147 int arg) 5148 { 5149 const struct btf_member *prog_ctx_type, *kern_ctx_type; 5150 5151 prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg); 5152 if (!prog_ctx_type) 5153 return -ENOENT; 5154 kern_ctx_type = prog_ctx_type + 1; 5155 return kern_ctx_type->type; 5156 } 5157 5158 BTF_ID_LIST(bpf_ctx_convert_btf_id) 5159 BTF_ID(struct, bpf_ctx_convert) 5160 5161 struct btf *btf_parse_vmlinux(void) 5162 { 5163 struct btf_verifier_env *env = NULL; 5164 struct bpf_verifier_log *log; 5165 struct btf *btf = NULL; 5166 int err; 5167 5168 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 5169 if (!env) 5170 return ERR_PTR(-ENOMEM); 5171 5172 log = &env->log; 5173 log->level = BPF_LOG_KERNEL; 5174 5175 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 5176 if (!btf) { 5177 err = -ENOMEM; 5178 goto errout; 5179 } 5180 env->btf = btf; 5181 5182 btf->data = __start_BTF; 5183 btf->data_size = __stop_BTF - __start_BTF; 5184 btf->kernel_btf = true; 5185 snprintf(btf->name, sizeof(btf->name), "vmlinux"); 5186 5187 err = btf_parse_hdr(env); 5188 if (err) 5189 goto errout; 5190 5191 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 5192 5193 err = btf_parse_str_sec(env); 5194 if (err) 5195 goto errout; 5196 5197 err = btf_check_all_metas(env); 5198 if (err) 5199 goto errout; 5200 5201 err = btf_check_type_tags(env, btf, 1); 5202 if (err) 5203 goto errout; 5204 5205 /* btf_parse_vmlinux() runs under bpf_verifier_lock */ 5206 bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); 5207 5208 bpf_struct_ops_init(btf, log); 5209 5210 refcount_set(&btf->refcnt, 1); 5211 5212 err = btf_alloc_id(btf); 5213 if (err) 5214 goto errout; 5215 5216 btf_verifier_env_free(env); 5217 return btf; 5218 5219 errout: 5220 btf_verifier_env_free(env); 5221 if (btf) { 5222 kvfree(btf->types); 5223 kfree(btf); 5224 } 5225 return ERR_PTR(err); 5226 } 5227 5228 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 5229 5230 static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size) 5231 { 5232 struct btf_verifier_env *env = NULL; 5233 struct bpf_verifier_log *log; 5234 struct btf *btf = NULL, *base_btf; 5235 int err; 5236 5237 base_btf = bpf_get_btf_vmlinux(); 5238 if (IS_ERR(base_btf)) 5239 return base_btf; 5240 if (!base_btf) 5241 return ERR_PTR(-EINVAL); 5242 5243 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 5244 if (!env) 5245 return ERR_PTR(-ENOMEM); 5246 5247 log = &env->log; 5248 log->level = BPF_LOG_KERNEL; 5249 5250 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 5251 if (!btf) { 5252 err = -ENOMEM; 5253 goto errout; 5254 } 5255 env->btf = btf; 5256 5257 btf->base_btf = base_btf; 5258 btf->start_id = base_btf->nr_types; 5259 btf->start_str_off = base_btf->hdr.str_len; 5260 btf->kernel_btf = true; 5261 snprintf(btf->name, sizeof(btf->name), "%s", module_name); 5262 5263 btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN); 5264 if (!btf->data) { 5265 err = -ENOMEM; 5266 goto errout; 5267 } 5268 memcpy(btf->data, data, data_size); 5269 btf->data_size = data_size; 5270 5271 err = btf_parse_hdr(env); 5272 if (err) 5273 goto errout; 5274 5275 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 5276 5277 err = btf_parse_str_sec(env); 5278 if (err) 5279 goto errout; 5280 5281 err = btf_check_all_metas(env); 5282 if (err) 5283 goto errout; 5284 5285 err = btf_check_type_tags(env, btf, btf_nr_types(base_btf)); 5286 if (err) 5287 goto errout; 5288 5289 btf_verifier_env_free(env); 5290 refcount_set(&btf->refcnt, 1); 5291 return btf; 5292 5293 errout: 5294 btf_verifier_env_free(env); 5295 if (btf) { 5296 kvfree(btf->data); 5297 kvfree(btf->types); 5298 kfree(btf); 5299 } 5300 return ERR_PTR(err); 5301 } 5302 5303 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ 5304 5305 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog) 5306 { 5307 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 5308 5309 if (tgt_prog) 5310 return tgt_prog->aux->btf; 5311 else 5312 return prog->aux->attach_btf; 5313 } 5314 5315 static bool is_int_ptr(struct btf *btf, const struct btf_type *t) 5316 { 5317 /* t comes in already as a pointer */ 5318 t = btf_type_by_id(btf, t->type); 5319 5320 /* allow const */ 5321 if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST) 5322 t = btf_type_by_id(btf, t->type); 5323 5324 return btf_type_is_int(t); 5325 } 5326 5327 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 5328 const struct bpf_prog *prog, 5329 struct bpf_insn_access_aux *info) 5330 { 5331 const struct btf_type *t = prog->aux->attach_func_proto; 5332 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 5333 struct btf *btf = bpf_prog_get_target_btf(prog); 5334 const char *tname = prog->aux->attach_func_name; 5335 struct bpf_verifier_log *log = info->log; 5336 const struct btf_param *args; 5337 const char *tag_value; 5338 u32 nr_args, arg; 5339 int i, ret; 5340 5341 if (off % 8) { 5342 bpf_log(log, "func '%s' offset %d is not multiple of 8\n", 5343 tname, off); 5344 return false; 5345 } 5346 arg = off / 8; 5347 args = (const struct btf_param *)(t + 1); 5348 /* if (t == NULL) Fall back to default BPF prog with 5349 * MAX_BPF_FUNC_REG_ARGS u64 arguments. 5350 */ 5351 nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS; 5352 if (prog->aux->attach_btf_trace) { 5353 /* skip first 'void *__data' argument in btf_trace_##name typedef */ 5354 args++; 5355 nr_args--; 5356 } 5357 5358 if (arg > nr_args) { 5359 bpf_log(log, "func '%s' doesn't have %d-th argument\n", 5360 tname, arg + 1); 5361 return false; 5362 } 5363 5364 if (arg == nr_args) { 5365 switch (prog->expected_attach_type) { 5366 case BPF_LSM_CGROUP: 5367 case BPF_LSM_MAC: 5368 case BPF_TRACE_FEXIT: 5369 /* When LSM programs are attached to void LSM hooks 5370 * they use FEXIT trampolines and when attached to 5371 * int LSM hooks, they use MODIFY_RETURN trampolines. 5372 * 5373 * While the LSM programs are BPF_MODIFY_RETURN-like 5374 * the check: 5375 * 5376 * if (ret_type != 'int') 5377 * return -EINVAL; 5378 * 5379 * is _not_ done here. This is still safe as LSM hooks 5380 * have only void and int return types. 5381 */ 5382 if (!t) 5383 return true; 5384 t = btf_type_by_id(btf, t->type); 5385 break; 5386 case BPF_MODIFY_RETURN: 5387 /* For now the BPF_MODIFY_RETURN can only be attached to 5388 * functions that return an int. 5389 */ 5390 if (!t) 5391 return false; 5392 5393 t = btf_type_skip_modifiers(btf, t->type, NULL); 5394 if (!btf_type_is_small_int(t)) { 5395 bpf_log(log, 5396 "ret type %s not allowed for fmod_ret\n", 5397 btf_kind_str[BTF_INFO_KIND(t->info)]); 5398 return false; 5399 } 5400 break; 5401 default: 5402 bpf_log(log, "func '%s' doesn't have %d-th argument\n", 5403 tname, arg + 1); 5404 return false; 5405 } 5406 } else { 5407 if (!t) 5408 /* Default prog with MAX_BPF_FUNC_REG_ARGS args */ 5409 return true; 5410 t = btf_type_by_id(btf, args[arg].type); 5411 } 5412 5413 /* skip modifiers */ 5414 while (btf_type_is_modifier(t)) 5415 t = btf_type_by_id(btf, t->type); 5416 if (btf_type_is_small_int(t) || btf_is_any_enum(t)) 5417 /* accessing a scalar */ 5418 return true; 5419 if (!btf_type_is_ptr(t)) { 5420 bpf_log(log, 5421 "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n", 5422 tname, arg, 5423 __btf_name_by_offset(btf, t->name_off), 5424 btf_kind_str[BTF_INFO_KIND(t->info)]); 5425 return false; 5426 } 5427 5428 /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ 5429 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { 5430 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; 5431 u32 type, flag; 5432 5433 type = base_type(ctx_arg_info->reg_type); 5434 flag = type_flag(ctx_arg_info->reg_type); 5435 if (ctx_arg_info->offset == off && type == PTR_TO_BUF && 5436 (flag & PTR_MAYBE_NULL)) { 5437 info->reg_type = ctx_arg_info->reg_type; 5438 return true; 5439 } 5440 } 5441 5442 if (t->type == 0) 5443 /* This is a pointer to void. 5444 * It is the same as scalar from the verifier safety pov. 5445 * No further pointer walking is allowed. 5446 */ 5447 return true; 5448 5449 if (is_int_ptr(btf, t)) 5450 return true; 5451 5452 /* this is a pointer to another type */ 5453 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { 5454 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; 5455 5456 if (ctx_arg_info->offset == off) { 5457 if (!ctx_arg_info->btf_id) { 5458 bpf_log(log,"invalid btf_id for context argument offset %u\n", off); 5459 return false; 5460 } 5461 5462 info->reg_type = ctx_arg_info->reg_type; 5463 info->btf = btf_vmlinux; 5464 info->btf_id = ctx_arg_info->btf_id; 5465 return true; 5466 } 5467 } 5468 5469 info->reg_type = PTR_TO_BTF_ID; 5470 if (tgt_prog) { 5471 enum bpf_prog_type tgt_type; 5472 5473 if (tgt_prog->type == BPF_PROG_TYPE_EXT) 5474 tgt_type = tgt_prog->aux->saved_dst_prog_type; 5475 else 5476 tgt_type = tgt_prog->type; 5477 5478 ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg); 5479 if (ret > 0) { 5480 info->btf = btf_vmlinux; 5481 info->btf_id = ret; 5482 return true; 5483 } else { 5484 return false; 5485 } 5486 } 5487 5488 info->btf = btf; 5489 info->btf_id = t->type; 5490 t = btf_type_by_id(btf, t->type); 5491 5492 if (btf_type_is_type_tag(t)) { 5493 tag_value = __btf_name_by_offset(btf, t->name_off); 5494 if (strcmp(tag_value, "user") == 0) 5495 info->reg_type |= MEM_USER; 5496 if (strcmp(tag_value, "percpu") == 0) 5497 info->reg_type |= MEM_PERCPU; 5498 } 5499 5500 /* skip modifiers */ 5501 while (btf_type_is_modifier(t)) { 5502 info->btf_id = t->type; 5503 t = btf_type_by_id(btf, t->type); 5504 } 5505 if (!btf_type_is_struct(t)) { 5506 bpf_log(log, 5507 "func '%s' arg%d type %s is not a struct\n", 5508 tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]); 5509 return false; 5510 } 5511 bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n", 5512 tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)], 5513 __btf_name_by_offset(btf, t->name_off)); 5514 return true; 5515 } 5516 5517 enum bpf_struct_walk_result { 5518 /* < 0 error */ 5519 WALK_SCALAR = 0, 5520 WALK_PTR, 5521 WALK_STRUCT, 5522 }; 5523 5524 static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, 5525 const struct btf_type *t, int off, int size, 5526 u32 *next_btf_id, enum bpf_type_flag *flag) 5527 { 5528 u32 i, moff, mtrue_end, msize = 0, total_nelems = 0; 5529 const struct btf_type *mtype, *elem_type = NULL; 5530 const struct btf_member *member; 5531 const char *tname, *mname, *tag_value; 5532 u32 vlen, elem_id, mid; 5533 5534 again: 5535 tname = __btf_name_by_offset(btf, t->name_off); 5536 if (!btf_type_is_struct(t)) { 5537 bpf_log(log, "Type '%s' is not a struct\n", tname); 5538 return -EINVAL; 5539 } 5540 5541 vlen = btf_type_vlen(t); 5542 if (off + size > t->size) { 5543 /* If the last element is a variable size array, we may 5544 * need to relax the rule. 5545 */ 5546 struct btf_array *array_elem; 5547 5548 if (vlen == 0) 5549 goto error; 5550 5551 member = btf_type_member(t) + vlen - 1; 5552 mtype = btf_type_skip_modifiers(btf, member->type, 5553 NULL); 5554 if (!btf_type_is_array(mtype)) 5555 goto error; 5556 5557 array_elem = (struct btf_array *)(mtype + 1); 5558 if (array_elem->nelems != 0) 5559 goto error; 5560 5561 moff = __btf_member_bit_offset(t, member) / 8; 5562 if (off < moff) 5563 goto error; 5564 5565 /* Only allow structure for now, can be relaxed for 5566 * other types later. 5567 */ 5568 t = btf_type_skip_modifiers(btf, array_elem->type, 5569 NULL); 5570 if (!btf_type_is_struct(t)) 5571 goto error; 5572 5573 off = (off - moff) % t->size; 5574 goto again; 5575 5576 error: 5577 bpf_log(log, "access beyond struct %s at off %u size %u\n", 5578 tname, off, size); 5579 return -EACCES; 5580 } 5581 5582 for_each_member(i, t, member) { 5583 /* offset of the field in bytes */ 5584 moff = __btf_member_bit_offset(t, member) / 8; 5585 if (off + size <= moff) 5586 /* won't find anything, field is already too far */ 5587 break; 5588 5589 if (__btf_member_bitfield_size(t, member)) { 5590 u32 end_bit = __btf_member_bit_offset(t, member) + 5591 __btf_member_bitfield_size(t, member); 5592 5593 /* off <= moff instead of off == moff because clang 5594 * does not generate a BTF member for anonymous 5595 * bitfield like the ":16" here: 5596 * struct { 5597 * int :16; 5598 * int x:8; 5599 * }; 5600 */ 5601 if (off <= moff && 5602 BITS_ROUNDUP_BYTES(end_bit) <= off + size) 5603 return WALK_SCALAR; 5604 5605 /* off may be accessing a following member 5606 * 5607 * or 5608 * 5609 * Doing partial access at either end of this 5610 * bitfield. Continue on this case also to 5611 * treat it as not accessing this bitfield 5612 * and eventually error out as field not 5613 * found to keep it simple. 5614 * It could be relaxed if there was a legit 5615 * partial access case later. 5616 */ 5617 continue; 5618 } 5619 5620 /* In case of "off" is pointing to holes of a struct */ 5621 if (off < moff) 5622 break; 5623 5624 /* type of the field */ 5625 mid = member->type; 5626 mtype = btf_type_by_id(btf, member->type); 5627 mname = __btf_name_by_offset(btf, member->name_off); 5628 5629 mtype = __btf_resolve_size(btf, mtype, &msize, 5630 &elem_type, &elem_id, &total_nelems, 5631 &mid); 5632 if (IS_ERR(mtype)) { 5633 bpf_log(log, "field %s doesn't have size\n", mname); 5634 return -EFAULT; 5635 } 5636 5637 mtrue_end = moff + msize; 5638 if (off >= mtrue_end) 5639 /* no overlap with member, keep iterating */ 5640 continue; 5641 5642 if (btf_type_is_array(mtype)) { 5643 u32 elem_idx; 5644 5645 /* __btf_resolve_size() above helps to 5646 * linearize a multi-dimensional array. 5647 * 5648 * The logic here is treating an array 5649 * in a struct as the following way: 5650 * 5651 * struct outer { 5652 * struct inner array[2][2]; 5653 * }; 5654 * 5655 * looks like: 5656 * 5657 * struct outer { 5658 * struct inner array_elem0; 5659 * struct inner array_elem1; 5660 * struct inner array_elem2; 5661 * struct inner array_elem3; 5662 * }; 5663 * 5664 * When accessing outer->array[1][0], it moves 5665 * moff to "array_elem2", set mtype to 5666 * "struct inner", and msize also becomes 5667 * sizeof(struct inner). Then most of the 5668 * remaining logic will fall through without 5669 * caring the current member is an array or 5670 * not. 5671 * 5672 * Unlike mtype/msize/moff, mtrue_end does not 5673 * change. The naming difference ("_true") tells 5674 * that it is not always corresponding to 5675 * the current mtype/msize/moff. 5676 * It is the true end of the current 5677 * member (i.e. array in this case). That 5678 * will allow an int array to be accessed like 5679 * a scratch space, 5680 * i.e. allow access beyond the size of 5681 * the array's element as long as it is 5682 * within the mtrue_end boundary. 5683 */ 5684 5685 /* skip empty array */ 5686 if (moff == mtrue_end) 5687 continue; 5688 5689 msize /= total_nelems; 5690 elem_idx = (off - moff) / msize; 5691 moff += elem_idx * msize; 5692 mtype = elem_type; 5693 mid = elem_id; 5694 } 5695 5696 /* the 'off' we're looking for is either equal to start 5697 * of this field or inside of this struct 5698 */ 5699 if (btf_type_is_struct(mtype)) { 5700 /* our field must be inside that union or struct */ 5701 t = mtype; 5702 5703 /* return if the offset matches the member offset */ 5704 if (off == moff) { 5705 *next_btf_id = mid; 5706 return WALK_STRUCT; 5707 } 5708 5709 /* adjust offset we're looking for */ 5710 off -= moff; 5711 goto again; 5712 } 5713 5714 if (btf_type_is_ptr(mtype)) { 5715 const struct btf_type *stype, *t; 5716 enum bpf_type_flag tmp_flag = 0; 5717 u32 id; 5718 5719 if (msize != size || off != moff) { 5720 bpf_log(log, 5721 "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n", 5722 mname, moff, tname, off, size); 5723 return -EACCES; 5724 } 5725 5726 /* check type tag */ 5727 t = btf_type_by_id(btf, mtype->type); 5728 if (btf_type_is_type_tag(t)) { 5729 tag_value = __btf_name_by_offset(btf, t->name_off); 5730 /* check __user tag */ 5731 if (strcmp(tag_value, "user") == 0) 5732 tmp_flag = MEM_USER; 5733 /* check __percpu tag */ 5734 if (strcmp(tag_value, "percpu") == 0) 5735 tmp_flag = MEM_PERCPU; 5736 } 5737 5738 stype = btf_type_skip_modifiers(btf, mtype->type, &id); 5739 if (btf_type_is_struct(stype)) { 5740 *next_btf_id = id; 5741 *flag = tmp_flag; 5742 return WALK_PTR; 5743 } 5744 } 5745 5746 /* Allow more flexible access within an int as long as 5747 * it is within mtrue_end. 5748 * Since mtrue_end could be the end of an array, 5749 * that also allows using an array of int as a scratch 5750 * space. e.g. skb->cb[]. 5751 */ 5752 if (off + size > mtrue_end) { 5753 bpf_log(log, 5754 "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n", 5755 mname, mtrue_end, tname, off, size); 5756 return -EACCES; 5757 } 5758 5759 return WALK_SCALAR; 5760 } 5761 bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off); 5762 return -EINVAL; 5763 } 5764 5765 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, 5766 const struct btf_type *t, int off, int size, 5767 enum bpf_access_type atype __maybe_unused, 5768 u32 *next_btf_id, enum bpf_type_flag *flag) 5769 { 5770 enum bpf_type_flag tmp_flag = 0; 5771 int err; 5772 u32 id; 5773 5774 do { 5775 err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag); 5776 5777 switch (err) { 5778 case WALK_PTR: 5779 /* If we found the pointer or scalar on t+off, 5780 * we're done. 5781 */ 5782 *next_btf_id = id; 5783 *flag = tmp_flag; 5784 return PTR_TO_BTF_ID; 5785 case WALK_SCALAR: 5786 return SCALAR_VALUE; 5787 case WALK_STRUCT: 5788 /* We found nested struct, so continue the search 5789 * by diving in it. At this point the offset is 5790 * aligned with the new type, so set it to 0. 5791 */ 5792 t = btf_type_by_id(btf, id); 5793 off = 0; 5794 break; 5795 default: 5796 /* It's either error or unknown return value.. 5797 * scream and leave. 5798 */ 5799 if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value")) 5800 return -EINVAL; 5801 return err; 5802 } 5803 } while (t); 5804 5805 return -EINVAL; 5806 } 5807 5808 /* Check that two BTF types, each specified as an BTF object + id, are exactly 5809 * the same. Trivial ID check is not enough due to module BTFs, because we can 5810 * end up with two different module BTFs, but IDs point to the common type in 5811 * vmlinux BTF. 5812 */ 5813 static bool btf_types_are_same(const struct btf *btf1, u32 id1, 5814 const struct btf *btf2, u32 id2) 5815 { 5816 if (id1 != id2) 5817 return false; 5818 if (btf1 == btf2) 5819 return true; 5820 return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2); 5821 } 5822 5823 bool btf_struct_ids_match(struct bpf_verifier_log *log, 5824 const struct btf *btf, u32 id, int off, 5825 const struct btf *need_btf, u32 need_type_id, 5826 bool strict) 5827 { 5828 const struct btf_type *type; 5829 enum bpf_type_flag flag; 5830 int err; 5831 5832 /* Are we already done? */ 5833 if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id)) 5834 return true; 5835 /* In case of strict type match, we do not walk struct, the top level 5836 * type match must succeed. When strict is true, off should have already 5837 * been 0. 5838 */ 5839 if (strict) 5840 return false; 5841 again: 5842 type = btf_type_by_id(btf, id); 5843 if (!type) 5844 return false; 5845 err = btf_struct_walk(log, btf, type, off, 1, &id, &flag); 5846 if (err != WALK_STRUCT) 5847 return false; 5848 5849 /* We found nested struct object. If it matches 5850 * the requested ID, we're done. Otherwise let's 5851 * continue the search with offset 0 in the new 5852 * type. 5853 */ 5854 if (!btf_types_are_same(btf, id, need_btf, need_type_id)) { 5855 off = 0; 5856 goto again; 5857 } 5858 5859 return true; 5860 } 5861 5862 static int __get_type_size(struct btf *btf, u32 btf_id, 5863 const struct btf_type **bad_type) 5864 { 5865 const struct btf_type *t; 5866 5867 if (!btf_id) 5868 /* void */ 5869 return 0; 5870 t = btf_type_by_id(btf, btf_id); 5871 while (t && btf_type_is_modifier(t)) 5872 t = btf_type_by_id(btf, t->type); 5873 if (!t) { 5874 *bad_type = btf_type_by_id(btf, 0); 5875 return -EINVAL; 5876 } 5877 if (btf_type_is_ptr(t)) 5878 /* kernel size of pointer. Not BPF's size of pointer*/ 5879 return sizeof(void *); 5880 if (btf_type_is_int(t) || btf_is_any_enum(t)) 5881 return t->size; 5882 *bad_type = t; 5883 return -EINVAL; 5884 } 5885 5886 int btf_distill_func_proto(struct bpf_verifier_log *log, 5887 struct btf *btf, 5888 const struct btf_type *func, 5889 const char *tname, 5890 struct btf_func_model *m) 5891 { 5892 const struct btf_param *args; 5893 const struct btf_type *t; 5894 u32 i, nargs; 5895 int ret; 5896 5897 if (!func) { 5898 /* BTF function prototype doesn't match the verifier types. 5899 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args. 5900 */ 5901 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) 5902 m->arg_size[i] = 8; 5903 m->ret_size = 8; 5904 m->nr_args = MAX_BPF_FUNC_REG_ARGS; 5905 return 0; 5906 } 5907 args = (const struct btf_param *)(func + 1); 5908 nargs = btf_type_vlen(func); 5909 if (nargs > MAX_BPF_FUNC_ARGS) { 5910 bpf_log(log, 5911 "The function %s has %d arguments. Too many.\n", 5912 tname, nargs); 5913 return -EINVAL; 5914 } 5915 ret = __get_type_size(btf, func->type, &t); 5916 if (ret < 0) { 5917 bpf_log(log, 5918 "The function %s return type %s is unsupported.\n", 5919 tname, btf_kind_str[BTF_INFO_KIND(t->info)]); 5920 return -EINVAL; 5921 } 5922 m->ret_size = ret; 5923 5924 for (i = 0; i < nargs; i++) { 5925 if (i == nargs - 1 && args[i].type == 0) { 5926 bpf_log(log, 5927 "The function %s with variable args is unsupported.\n", 5928 tname); 5929 return -EINVAL; 5930 } 5931 ret = __get_type_size(btf, args[i].type, &t); 5932 if (ret < 0) { 5933 bpf_log(log, 5934 "The function %s arg%d type %s is unsupported.\n", 5935 tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]); 5936 return -EINVAL; 5937 } 5938 if (ret == 0) { 5939 bpf_log(log, 5940 "The function %s has malformed void argument.\n", 5941 tname); 5942 return -EINVAL; 5943 } 5944 m->arg_size[i] = ret; 5945 } 5946 m->nr_args = nargs; 5947 return 0; 5948 } 5949 5950 /* Compare BTFs of two functions assuming only scalars and pointers to context. 5951 * t1 points to BTF_KIND_FUNC in btf1 5952 * t2 points to BTF_KIND_FUNC in btf2 5953 * Returns: 5954 * EINVAL - function prototype mismatch 5955 * EFAULT - verifier bug 5956 * 0 - 99% match. The last 1% is validated by the verifier. 5957 */ 5958 static int btf_check_func_type_match(struct bpf_verifier_log *log, 5959 struct btf *btf1, const struct btf_type *t1, 5960 struct btf *btf2, const struct btf_type *t2) 5961 { 5962 const struct btf_param *args1, *args2; 5963 const char *fn1, *fn2, *s1, *s2; 5964 u32 nargs1, nargs2, i; 5965 5966 fn1 = btf_name_by_offset(btf1, t1->name_off); 5967 fn2 = btf_name_by_offset(btf2, t2->name_off); 5968 5969 if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) { 5970 bpf_log(log, "%s() is not a global function\n", fn1); 5971 return -EINVAL; 5972 } 5973 if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) { 5974 bpf_log(log, "%s() is not a global function\n", fn2); 5975 return -EINVAL; 5976 } 5977 5978 t1 = btf_type_by_id(btf1, t1->type); 5979 if (!t1 || !btf_type_is_func_proto(t1)) 5980 return -EFAULT; 5981 t2 = btf_type_by_id(btf2, t2->type); 5982 if (!t2 || !btf_type_is_func_proto(t2)) 5983 return -EFAULT; 5984 5985 args1 = (const struct btf_param *)(t1 + 1); 5986 nargs1 = btf_type_vlen(t1); 5987 args2 = (const struct btf_param *)(t2 + 1); 5988 nargs2 = btf_type_vlen(t2); 5989 5990 if (nargs1 != nargs2) { 5991 bpf_log(log, "%s() has %d args while %s() has %d args\n", 5992 fn1, nargs1, fn2, nargs2); 5993 return -EINVAL; 5994 } 5995 5996 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL); 5997 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL); 5998 if (t1->info != t2->info) { 5999 bpf_log(log, 6000 "Return type %s of %s() doesn't match type %s of %s()\n", 6001 btf_type_str(t1), fn1, 6002 btf_type_str(t2), fn2); 6003 return -EINVAL; 6004 } 6005 6006 for (i = 0; i < nargs1; i++) { 6007 t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL); 6008 t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL); 6009 6010 if (t1->info != t2->info) { 6011 bpf_log(log, "arg%d in %s() is %s while %s() has %s\n", 6012 i, fn1, btf_type_str(t1), 6013 fn2, btf_type_str(t2)); 6014 return -EINVAL; 6015 } 6016 if (btf_type_has_size(t1) && t1->size != t2->size) { 6017 bpf_log(log, 6018 "arg%d in %s() has size %d while %s() has %d\n", 6019 i, fn1, t1->size, 6020 fn2, t2->size); 6021 return -EINVAL; 6022 } 6023 6024 /* global functions are validated with scalars and pointers 6025 * to context only. And only global functions can be replaced. 6026 * Hence type check only those types. 6027 */ 6028 if (btf_type_is_int(t1) || btf_is_any_enum(t1)) 6029 continue; 6030 if (!btf_type_is_ptr(t1)) { 6031 bpf_log(log, 6032 "arg%d in %s() has unrecognized type\n", 6033 i, fn1); 6034 return -EINVAL; 6035 } 6036 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL); 6037 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL); 6038 if (!btf_type_is_struct(t1)) { 6039 bpf_log(log, 6040 "arg%d in %s() is not a pointer to context\n", 6041 i, fn1); 6042 return -EINVAL; 6043 } 6044 if (!btf_type_is_struct(t2)) { 6045 bpf_log(log, 6046 "arg%d in %s() is not a pointer to context\n", 6047 i, fn2); 6048 return -EINVAL; 6049 } 6050 /* This is an optional check to make program writing easier. 6051 * Compare names of structs and report an error to the user. 6052 * btf_prepare_func_args() already checked that t2 struct 6053 * is a context type. btf_prepare_func_args() will check 6054 * later that t1 struct is a context type as well. 6055 */ 6056 s1 = btf_name_by_offset(btf1, t1->name_off); 6057 s2 = btf_name_by_offset(btf2, t2->name_off); 6058 if (strcmp(s1, s2)) { 6059 bpf_log(log, 6060 "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n", 6061 i, fn1, s1, fn2, s2); 6062 return -EINVAL; 6063 } 6064 } 6065 return 0; 6066 } 6067 6068 /* Compare BTFs of given program with BTF of target program */ 6069 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 6070 struct btf *btf2, const struct btf_type *t2) 6071 { 6072 struct btf *btf1 = prog->aux->btf; 6073 const struct btf_type *t1; 6074 u32 btf_id = 0; 6075 6076 if (!prog->aux->func_info) { 6077 bpf_log(log, "Program extension requires BTF\n"); 6078 return -EINVAL; 6079 } 6080 6081 btf_id = prog->aux->func_info[0].type_id; 6082 if (!btf_id) 6083 return -EFAULT; 6084 6085 t1 = btf_type_by_id(btf1, btf_id); 6086 if (!t1 || !btf_type_is_func(t1)) 6087 return -EFAULT; 6088 6089 return btf_check_func_type_match(log, btf1, t1, btf2, t2); 6090 } 6091 6092 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { 6093 #ifdef CONFIG_NET 6094 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK], 6095 [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 6096 [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP], 6097 #endif 6098 }; 6099 6100 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */ 6101 static bool __btf_type_is_scalar_struct(struct bpf_verifier_log *log, 6102 const struct btf *btf, 6103 const struct btf_type *t, int rec) 6104 { 6105 const struct btf_type *member_type; 6106 const struct btf_member *member; 6107 u32 i; 6108 6109 if (!btf_type_is_struct(t)) 6110 return false; 6111 6112 for_each_member(i, t, member) { 6113 const struct btf_array *array; 6114 6115 member_type = btf_type_skip_modifiers(btf, member->type, NULL); 6116 if (btf_type_is_struct(member_type)) { 6117 if (rec >= 3) { 6118 bpf_log(log, "max struct nesting depth exceeded\n"); 6119 return false; 6120 } 6121 if (!__btf_type_is_scalar_struct(log, btf, member_type, rec + 1)) 6122 return false; 6123 continue; 6124 } 6125 if (btf_type_is_array(member_type)) { 6126 array = btf_type_array(member_type); 6127 if (!array->nelems) 6128 return false; 6129 member_type = btf_type_skip_modifiers(btf, array->type, NULL); 6130 if (!btf_type_is_scalar(member_type)) 6131 return false; 6132 continue; 6133 } 6134 if (!btf_type_is_scalar(member_type)) 6135 return false; 6136 } 6137 return true; 6138 } 6139 6140 static bool is_kfunc_arg_mem_size(const struct btf *btf, 6141 const struct btf_param *arg, 6142 const struct bpf_reg_state *reg) 6143 { 6144 int len, sfx_len = sizeof("__sz") - 1; 6145 const struct btf_type *t; 6146 const char *param_name; 6147 6148 t = btf_type_skip_modifiers(btf, arg->type, NULL); 6149 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) 6150 return false; 6151 6152 /* In the future, this can be ported to use BTF tagging */ 6153 param_name = btf_name_by_offset(btf, arg->name_off); 6154 if (str_is_empty(param_name)) 6155 return false; 6156 len = strlen(param_name); 6157 if (len < sfx_len) 6158 return false; 6159 param_name += len - sfx_len; 6160 if (strncmp(param_name, "__sz", sfx_len)) 6161 return false; 6162 6163 return true; 6164 } 6165 6166 static int btf_check_func_arg_match(struct bpf_verifier_env *env, 6167 const struct btf *btf, u32 func_id, 6168 struct bpf_reg_state *regs, 6169 bool ptr_to_mem_ok) 6170 { 6171 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 6172 struct bpf_verifier_log *log = &env->log; 6173 u32 i, nargs, ref_id, ref_obj_id = 0; 6174 bool is_kfunc = btf_is_kernel(btf); 6175 bool rel = false, kptr_get = false; 6176 const char *func_name, *ref_tname; 6177 const struct btf_type *t, *ref_t; 6178 const struct btf_param *args; 6179 int ref_regno = 0, ret; 6180 6181 t = btf_type_by_id(btf, func_id); 6182 if (!t || !btf_type_is_func(t)) { 6183 /* These checks were already done by the verifier while loading 6184 * struct bpf_func_info or in add_kfunc_call(). 6185 */ 6186 bpf_log(log, "BTF of func_id %u doesn't point to KIND_FUNC\n", 6187 func_id); 6188 return -EFAULT; 6189 } 6190 func_name = btf_name_by_offset(btf, t->name_off); 6191 6192 t = btf_type_by_id(btf, t->type); 6193 if (!t || !btf_type_is_func_proto(t)) { 6194 bpf_log(log, "Invalid BTF of func %s\n", func_name); 6195 return -EFAULT; 6196 } 6197 args = (const struct btf_param *)(t + 1); 6198 nargs = btf_type_vlen(t); 6199 if (nargs > MAX_BPF_FUNC_REG_ARGS) { 6200 bpf_log(log, "Function %s has %d > %d args\n", func_name, nargs, 6201 MAX_BPF_FUNC_REG_ARGS); 6202 return -EINVAL; 6203 } 6204 6205 if (is_kfunc) { 6206 /* Only kfunc can be release func */ 6207 rel = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), 6208 BTF_KFUNC_TYPE_RELEASE, func_id); 6209 kptr_get = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), 6210 BTF_KFUNC_TYPE_KPTR_ACQUIRE, func_id); 6211 } 6212 6213 /* check that BTF function arguments match actual types that the 6214 * verifier sees. 6215 */ 6216 for (i = 0; i < nargs; i++) { 6217 enum bpf_arg_type arg_type = ARG_DONTCARE; 6218 u32 regno = i + 1; 6219 struct bpf_reg_state *reg = ®s[regno]; 6220 6221 t = btf_type_skip_modifiers(btf, args[i].type, NULL); 6222 if (btf_type_is_scalar(t)) { 6223 if (reg->type == SCALAR_VALUE) 6224 continue; 6225 bpf_log(log, "R%d is not a scalar\n", regno); 6226 return -EINVAL; 6227 } 6228 6229 if (!btf_type_is_ptr(t)) { 6230 bpf_log(log, "Unrecognized arg#%d type %s\n", 6231 i, btf_type_str(t)); 6232 return -EINVAL; 6233 } 6234 6235 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); 6236 ref_tname = btf_name_by_offset(btf, ref_t->name_off); 6237 6238 if (rel && reg->ref_obj_id) 6239 arg_type |= OBJ_RELEASE; 6240 ret = check_func_arg_reg_off(env, reg, regno, arg_type); 6241 if (ret < 0) 6242 return ret; 6243 6244 /* kptr_get is only true for kfunc */ 6245 if (i == 0 && kptr_get) { 6246 struct bpf_map_value_off_desc *off_desc; 6247 6248 if (reg->type != PTR_TO_MAP_VALUE) { 6249 bpf_log(log, "arg#0 expected pointer to map value\n"); 6250 return -EINVAL; 6251 } 6252 6253 /* check_func_arg_reg_off allows var_off for 6254 * PTR_TO_MAP_VALUE, but we need fixed offset to find 6255 * off_desc. 6256 */ 6257 if (!tnum_is_const(reg->var_off)) { 6258 bpf_log(log, "arg#0 must have constant offset\n"); 6259 return -EINVAL; 6260 } 6261 6262 off_desc = bpf_map_kptr_off_contains(reg->map_ptr, reg->off + reg->var_off.value); 6263 if (!off_desc || off_desc->type != BPF_KPTR_REF) { 6264 bpf_log(log, "arg#0 no referenced kptr at map value offset=%llu\n", 6265 reg->off + reg->var_off.value); 6266 return -EINVAL; 6267 } 6268 6269 if (!btf_type_is_ptr(ref_t)) { 6270 bpf_log(log, "arg#0 BTF type must be a double pointer\n"); 6271 return -EINVAL; 6272 } 6273 6274 ref_t = btf_type_skip_modifiers(btf, ref_t->type, &ref_id); 6275 ref_tname = btf_name_by_offset(btf, ref_t->name_off); 6276 6277 if (!btf_type_is_struct(ref_t)) { 6278 bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n", 6279 func_name, i, btf_type_str(ref_t), ref_tname); 6280 return -EINVAL; 6281 } 6282 if (!btf_struct_ids_match(log, btf, ref_id, 0, off_desc->kptr.btf, 6283 off_desc->kptr.btf_id, true)) { 6284 bpf_log(log, "kernel function %s args#%d expected pointer to %s %s\n", 6285 func_name, i, btf_type_str(ref_t), ref_tname); 6286 return -EINVAL; 6287 } 6288 /* rest of the arguments can be anything, like normal kfunc */ 6289 } else if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) { 6290 /* If function expects ctx type in BTF check that caller 6291 * is passing PTR_TO_CTX. 6292 */ 6293 if (reg->type != PTR_TO_CTX) { 6294 bpf_log(log, 6295 "arg#%d expected pointer to ctx, but got %s\n", 6296 i, btf_type_str(t)); 6297 return -EINVAL; 6298 } 6299 } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || 6300 (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) { 6301 const struct btf_type *reg_ref_t; 6302 const struct btf *reg_btf; 6303 const char *reg_ref_tname; 6304 u32 reg_ref_id; 6305 6306 if (!btf_type_is_struct(ref_t)) { 6307 bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n", 6308 func_name, i, btf_type_str(ref_t), 6309 ref_tname); 6310 return -EINVAL; 6311 } 6312 6313 if (reg->type == PTR_TO_BTF_ID) { 6314 reg_btf = reg->btf; 6315 reg_ref_id = reg->btf_id; 6316 /* Ensure only one argument is referenced PTR_TO_BTF_ID */ 6317 if (reg->ref_obj_id) { 6318 if (ref_obj_id) { 6319 bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 6320 regno, reg->ref_obj_id, ref_obj_id); 6321 return -EFAULT; 6322 } 6323 ref_regno = regno; 6324 ref_obj_id = reg->ref_obj_id; 6325 } 6326 } else { 6327 reg_btf = btf_vmlinux; 6328 reg_ref_id = *reg2btf_ids[base_type(reg->type)]; 6329 } 6330 6331 reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, 6332 ®_ref_id); 6333 reg_ref_tname = btf_name_by_offset(reg_btf, 6334 reg_ref_t->name_off); 6335 if (!btf_struct_ids_match(log, reg_btf, reg_ref_id, 6336 reg->off, btf, ref_id, rel && reg->ref_obj_id)) { 6337 bpf_log(log, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n", 6338 func_name, i, 6339 btf_type_str(ref_t), ref_tname, 6340 regno, btf_type_str(reg_ref_t), 6341 reg_ref_tname); 6342 return -EINVAL; 6343 } 6344 } else if (ptr_to_mem_ok) { 6345 const struct btf_type *resolve_ret; 6346 u32 type_size; 6347 6348 if (is_kfunc) { 6349 bool arg_mem_size = i + 1 < nargs && is_kfunc_arg_mem_size(btf, &args[i + 1], ®s[regno + 1]); 6350 6351 /* Permit pointer to mem, but only when argument 6352 * type is pointer to scalar, or struct composed 6353 * (recursively) of scalars. 6354 * When arg_mem_size is true, the pointer can be 6355 * void *. 6356 */ 6357 if (!btf_type_is_scalar(ref_t) && 6358 !__btf_type_is_scalar_struct(log, btf, ref_t, 0) && 6359 (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) { 6360 bpf_log(log, 6361 "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n", 6362 i, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : ""); 6363 return -EINVAL; 6364 } 6365 6366 /* Check for mem, len pair */ 6367 if (arg_mem_size) { 6368 if (check_kfunc_mem_size_reg(env, ®s[regno + 1], regno + 1)) { 6369 bpf_log(log, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", 6370 i, i + 1); 6371 return -EINVAL; 6372 } 6373 i++; 6374 continue; 6375 } 6376 } 6377 6378 resolve_ret = btf_resolve_size(btf, ref_t, &type_size); 6379 if (IS_ERR(resolve_ret)) { 6380 bpf_log(log, 6381 "arg#%d reference type('%s %s') size cannot be determined: %ld\n", 6382 i, btf_type_str(ref_t), ref_tname, 6383 PTR_ERR(resolve_ret)); 6384 return -EINVAL; 6385 } 6386 6387 if (check_mem_reg(env, reg, regno, type_size)) 6388 return -EINVAL; 6389 } else { 6390 bpf_log(log, "reg type unsupported for arg#%d %sfunction %s#%d\n", i, 6391 is_kfunc ? "kernel " : "", func_name, func_id); 6392 return -EINVAL; 6393 } 6394 } 6395 6396 /* Either both are set, or neither */ 6397 WARN_ON_ONCE((ref_obj_id && !ref_regno) || (!ref_obj_id && ref_regno)); 6398 /* We already made sure ref_obj_id is set only for one argument. We do 6399 * allow (!rel && ref_obj_id), so that passing such referenced 6400 * PTR_TO_BTF_ID to other kfuncs works. Note that rel is only true when 6401 * is_kfunc is true. 6402 */ 6403 if (rel && !ref_obj_id) { 6404 bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", 6405 func_name); 6406 return -EINVAL; 6407 } 6408 /* returns argument register number > 0 in case of reference release kfunc */ 6409 return rel ? ref_regno : 0; 6410 } 6411 6412 /* Compare BTF of a function with given bpf_reg_state. 6413 * Returns: 6414 * EFAULT - there is a verifier bug. Abort verification. 6415 * EINVAL - there is a type mismatch or BTF is not available. 6416 * 0 - BTF matches with what bpf_reg_state expects. 6417 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized. 6418 */ 6419 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, 6420 struct bpf_reg_state *regs) 6421 { 6422 struct bpf_prog *prog = env->prog; 6423 struct btf *btf = prog->aux->btf; 6424 bool is_global; 6425 u32 btf_id; 6426 int err; 6427 6428 if (!prog->aux->func_info) 6429 return -EINVAL; 6430 6431 btf_id = prog->aux->func_info[subprog].type_id; 6432 if (!btf_id) 6433 return -EFAULT; 6434 6435 if (prog->aux->func_info_aux[subprog].unreliable) 6436 return -EINVAL; 6437 6438 is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 6439 err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global); 6440 6441 /* Compiler optimizations can remove arguments from static functions 6442 * or mismatched type can be passed into a global function. 6443 * In such cases mark the function as unreliable from BTF point of view. 6444 */ 6445 if (err) 6446 prog->aux->func_info_aux[subprog].unreliable = true; 6447 return err; 6448 } 6449 6450 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 6451 const struct btf *btf, u32 func_id, 6452 struct bpf_reg_state *regs) 6453 { 6454 return btf_check_func_arg_match(env, btf, func_id, regs, true); 6455 } 6456 6457 /* Convert BTF of a function into bpf_reg_state if possible 6458 * Returns: 6459 * EFAULT - there is a verifier bug. Abort verification. 6460 * EINVAL - cannot convert BTF. 6461 * 0 - Successfully converted BTF into bpf_reg_state 6462 * (either PTR_TO_CTX or SCALAR_VALUE). 6463 */ 6464 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 6465 struct bpf_reg_state *regs) 6466 { 6467 struct bpf_verifier_log *log = &env->log; 6468 struct bpf_prog *prog = env->prog; 6469 enum bpf_prog_type prog_type = prog->type; 6470 struct btf *btf = prog->aux->btf; 6471 const struct btf_param *args; 6472 const struct btf_type *t, *ref_t; 6473 u32 i, nargs, btf_id; 6474 const char *tname; 6475 6476 if (!prog->aux->func_info || 6477 prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) { 6478 bpf_log(log, "Verifier bug\n"); 6479 return -EFAULT; 6480 } 6481 6482 btf_id = prog->aux->func_info[subprog].type_id; 6483 if (!btf_id) { 6484 bpf_log(log, "Global functions need valid BTF\n"); 6485 return -EFAULT; 6486 } 6487 6488 t = btf_type_by_id(btf, btf_id); 6489 if (!t || !btf_type_is_func(t)) { 6490 /* These checks were already done by the verifier while loading 6491 * struct bpf_func_info 6492 */ 6493 bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n", 6494 subprog); 6495 return -EFAULT; 6496 } 6497 tname = btf_name_by_offset(btf, t->name_off); 6498 6499 if (log->level & BPF_LOG_LEVEL) 6500 bpf_log(log, "Validating %s() func#%d...\n", 6501 tname, subprog); 6502 6503 if (prog->aux->func_info_aux[subprog].unreliable) { 6504 bpf_log(log, "Verifier bug in function %s()\n", tname); 6505 return -EFAULT; 6506 } 6507 if (prog_type == BPF_PROG_TYPE_EXT) 6508 prog_type = prog->aux->dst_prog->type; 6509 6510 t = btf_type_by_id(btf, t->type); 6511 if (!t || !btf_type_is_func_proto(t)) { 6512 bpf_log(log, "Invalid type of function %s()\n", tname); 6513 return -EFAULT; 6514 } 6515 args = (const struct btf_param *)(t + 1); 6516 nargs = btf_type_vlen(t); 6517 if (nargs > MAX_BPF_FUNC_REG_ARGS) { 6518 bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n", 6519 tname, nargs, MAX_BPF_FUNC_REG_ARGS); 6520 return -EINVAL; 6521 } 6522 /* check that function returns int */ 6523 t = btf_type_by_id(btf, t->type); 6524 while (btf_type_is_modifier(t)) 6525 t = btf_type_by_id(btf, t->type); 6526 if (!btf_type_is_int(t) && !btf_is_any_enum(t)) { 6527 bpf_log(log, 6528 "Global function %s() doesn't return scalar. Only those are supported.\n", 6529 tname); 6530 return -EINVAL; 6531 } 6532 /* Convert BTF function arguments into verifier types. 6533 * Only PTR_TO_CTX and SCALAR are supported atm. 6534 */ 6535 for (i = 0; i < nargs; i++) { 6536 struct bpf_reg_state *reg = ®s[i + 1]; 6537 6538 t = btf_type_by_id(btf, args[i].type); 6539 while (btf_type_is_modifier(t)) 6540 t = btf_type_by_id(btf, t->type); 6541 if (btf_type_is_int(t) || btf_is_any_enum(t)) { 6542 reg->type = SCALAR_VALUE; 6543 continue; 6544 } 6545 if (btf_type_is_ptr(t)) { 6546 if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) { 6547 reg->type = PTR_TO_CTX; 6548 continue; 6549 } 6550 6551 t = btf_type_skip_modifiers(btf, t->type, NULL); 6552 6553 ref_t = btf_resolve_size(btf, t, ®->mem_size); 6554 if (IS_ERR(ref_t)) { 6555 bpf_log(log, 6556 "arg#%d reference type('%s %s') size cannot be determined: %ld\n", 6557 i, btf_type_str(t), btf_name_by_offset(btf, t->name_off), 6558 PTR_ERR(ref_t)); 6559 return -EINVAL; 6560 } 6561 6562 reg->type = PTR_TO_MEM | PTR_MAYBE_NULL; 6563 reg->id = ++env->id_gen; 6564 6565 continue; 6566 } 6567 bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n", 6568 i, btf_kind_str[BTF_INFO_KIND(t->info)], tname); 6569 return -EINVAL; 6570 } 6571 return 0; 6572 } 6573 6574 static void btf_type_show(const struct btf *btf, u32 type_id, void *obj, 6575 struct btf_show *show) 6576 { 6577 const struct btf_type *t = btf_type_by_id(btf, type_id); 6578 6579 show->btf = btf; 6580 memset(&show->state, 0, sizeof(show->state)); 6581 memset(&show->obj, 0, sizeof(show->obj)); 6582 6583 btf_type_ops(t)->show(btf, t, type_id, obj, 0, show); 6584 } 6585 6586 static void btf_seq_show(struct btf_show *show, const char *fmt, 6587 va_list args) 6588 { 6589 seq_vprintf((struct seq_file *)show->target, fmt, args); 6590 } 6591 6592 int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, 6593 void *obj, struct seq_file *m, u64 flags) 6594 { 6595 struct btf_show sseq; 6596 6597 sseq.target = m; 6598 sseq.showfn = btf_seq_show; 6599 sseq.flags = flags; 6600 6601 btf_type_show(btf, type_id, obj, &sseq); 6602 6603 return sseq.state.status; 6604 } 6605 6606 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, 6607 struct seq_file *m) 6608 { 6609 (void) btf_type_seq_show_flags(btf, type_id, obj, m, 6610 BTF_SHOW_NONAME | BTF_SHOW_COMPACT | 6611 BTF_SHOW_ZERO | BTF_SHOW_UNSAFE); 6612 } 6613 6614 struct btf_show_snprintf { 6615 struct btf_show show; 6616 int len_left; /* space left in string */ 6617 int len; /* length we would have written */ 6618 }; 6619 6620 static void btf_snprintf_show(struct btf_show *show, const char *fmt, 6621 va_list args) 6622 { 6623 struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show; 6624 int len; 6625 6626 len = vsnprintf(show->target, ssnprintf->len_left, fmt, args); 6627 6628 if (len < 0) { 6629 ssnprintf->len_left = 0; 6630 ssnprintf->len = len; 6631 } else if (len > ssnprintf->len_left) { 6632 /* no space, drive on to get length we would have written */ 6633 ssnprintf->len_left = 0; 6634 ssnprintf->len += len; 6635 } else { 6636 ssnprintf->len_left -= len; 6637 ssnprintf->len += len; 6638 show->target += len; 6639 } 6640 } 6641 6642 int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj, 6643 char *buf, int len, u64 flags) 6644 { 6645 struct btf_show_snprintf ssnprintf; 6646 6647 ssnprintf.show.target = buf; 6648 ssnprintf.show.flags = flags; 6649 ssnprintf.show.showfn = btf_snprintf_show; 6650 ssnprintf.len_left = len; 6651 ssnprintf.len = 0; 6652 6653 btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf); 6654 6655 /* If we encountered an error, return it. */ 6656 if (ssnprintf.show.state.status) 6657 return ssnprintf.show.state.status; 6658 6659 /* Otherwise return length we would have written */ 6660 return ssnprintf.len; 6661 } 6662 6663 #ifdef CONFIG_PROC_FS 6664 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp) 6665 { 6666 const struct btf *btf = filp->private_data; 6667 6668 seq_printf(m, "btf_id:\t%u\n", btf->id); 6669 } 6670 #endif 6671 6672 static int btf_release(struct inode *inode, struct file *filp) 6673 { 6674 btf_put(filp->private_data); 6675 return 0; 6676 } 6677 6678 const struct file_operations btf_fops = { 6679 #ifdef CONFIG_PROC_FS 6680 .show_fdinfo = bpf_btf_show_fdinfo, 6681 #endif 6682 .release = btf_release, 6683 }; 6684 6685 static int __btf_new_fd(struct btf *btf) 6686 { 6687 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC); 6688 } 6689 6690 int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr) 6691 { 6692 struct btf *btf; 6693 int ret; 6694 6695 btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel), 6696 attr->btf_size, attr->btf_log_level, 6697 u64_to_user_ptr(attr->btf_log_buf), 6698 attr->btf_log_size); 6699 if (IS_ERR(btf)) 6700 return PTR_ERR(btf); 6701 6702 ret = btf_alloc_id(btf); 6703 if (ret) { 6704 btf_free(btf); 6705 return ret; 6706 } 6707 6708 /* 6709 * The BTF ID is published to the userspace. 6710 * All BTF free must go through call_rcu() from 6711 * now on (i.e. free by calling btf_put()). 6712 */ 6713 6714 ret = __btf_new_fd(btf); 6715 if (ret < 0) 6716 btf_put(btf); 6717 6718 return ret; 6719 } 6720 6721 struct btf *btf_get_by_fd(int fd) 6722 { 6723 struct btf *btf; 6724 struct fd f; 6725 6726 f = fdget(fd); 6727 6728 if (!f.file) 6729 return ERR_PTR(-EBADF); 6730 6731 if (f.file->f_op != &btf_fops) { 6732 fdput(f); 6733 return ERR_PTR(-EINVAL); 6734 } 6735 6736 btf = f.file->private_data; 6737 refcount_inc(&btf->refcnt); 6738 fdput(f); 6739 6740 return btf; 6741 } 6742 6743 int btf_get_info_by_fd(const struct btf *btf, 6744 const union bpf_attr *attr, 6745 union bpf_attr __user *uattr) 6746 { 6747 struct bpf_btf_info __user *uinfo; 6748 struct bpf_btf_info info; 6749 u32 info_copy, btf_copy; 6750 void __user *ubtf; 6751 char __user *uname; 6752 u32 uinfo_len, uname_len, name_len; 6753 int ret = 0; 6754 6755 uinfo = u64_to_user_ptr(attr->info.info); 6756 uinfo_len = attr->info.info_len; 6757 6758 info_copy = min_t(u32, uinfo_len, sizeof(info)); 6759 memset(&info, 0, sizeof(info)); 6760 if (copy_from_user(&info, uinfo, info_copy)) 6761 return -EFAULT; 6762 6763 info.id = btf->id; 6764 ubtf = u64_to_user_ptr(info.btf); 6765 btf_copy = min_t(u32, btf->data_size, info.btf_size); 6766 if (copy_to_user(ubtf, btf->data, btf_copy)) 6767 return -EFAULT; 6768 info.btf_size = btf->data_size; 6769 6770 info.kernel_btf = btf->kernel_btf; 6771 6772 uname = u64_to_user_ptr(info.name); 6773 uname_len = info.name_len; 6774 if (!uname ^ !uname_len) 6775 return -EINVAL; 6776 6777 name_len = strlen(btf->name); 6778 info.name_len = name_len; 6779 6780 if (uname) { 6781 if (uname_len >= name_len + 1) { 6782 if (copy_to_user(uname, btf->name, name_len + 1)) 6783 return -EFAULT; 6784 } else { 6785 char zero = '\0'; 6786 6787 if (copy_to_user(uname, btf->name, uname_len - 1)) 6788 return -EFAULT; 6789 if (put_user(zero, uname + uname_len - 1)) 6790 return -EFAULT; 6791 /* let user-space know about too short buffer */ 6792 ret = -ENOSPC; 6793 } 6794 } 6795 6796 if (copy_to_user(uinfo, &info, info_copy) || 6797 put_user(info_copy, &uattr->info.info_len)) 6798 return -EFAULT; 6799 6800 return ret; 6801 } 6802 6803 int btf_get_fd_by_id(u32 id) 6804 { 6805 struct btf *btf; 6806 int fd; 6807 6808 rcu_read_lock(); 6809 btf = idr_find(&btf_idr, id); 6810 if (!btf || !refcount_inc_not_zero(&btf->refcnt)) 6811 btf = ERR_PTR(-ENOENT); 6812 rcu_read_unlock(); 6813 6814 if (IS_ERR(btf)) 6815 return PTR_ERR(btf); 6816 6817 fd = __btf_new_fd(btf); 6818 if (fd < 0) 6819 btf_put(btf); 6820 6821 return fd; 6822 } 6823 6824 u32 btf_obj_id(const struct btf *btf) 6825 { 6826 return btf->id; 6827 } 6828 6829 bool btf_is_kernel(const struct btf *btf) 6830 { 6831 return btf->kernel_btf; 6832 } 6833 6834 bool btf_is_module(const struct btf *btf) 6835 { 6836 return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0; 6837 } 6838 6839 static int btf_id_cmp_func(const void *a, const void *b) 6840 { 6841 const int *pa = a, *pb = b; 6842 6843 return *pa - *pb; 6844 } 6845 6846 bool btf_id_set_contains(const struct btf_id_set *set, u32 id) 6847 { 6848 return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL; 6849 } 6850 6851 enum { 6852 BTF_MODULE_F_LIVE = (1 << 0), 6853 }; 6854 6855 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 6856 struct btf_module { 6857 struct list_head list; 6858 struct module *module; 6859 struct btf *btf; 6860 struct bin_attribute *sysfs_attr; 6861 int flags; 6862 }; 6863 6864 static LIST_HEAD(btf_modules); 6865 static DEFINE_MUTEX(btf_module_mutex); 6866 6867 static ssize_t 6868 btf_module_read(struct file *file, struct kobject *kobj, 6869 struct bin_attribute *bin_attr, 6870 char *buf, loff_t off, size_t len) 6871 { 6872 const struct btf *btf = bin_attr->private; 6873 6874 memcpy(buf, btf->data + off, len); 6875 return len; 6876 } 6877 6878 static void purge_cand_cache(struct btf *btf); 6879 6880 static int btf_module_notify(struct notifier_block *nb, unsigned long op, 6881 void *module) 6882 { 6883 struct btf_module *btf_mod, *tmp; 6884 struct module *mod = module; 6885 struct btf *btf; 6886 int err = 0; 6887 6888 if (mod->btf_data_size == 0 || 6889 (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE && 6890 op != MODULE_STATE_GOING)) 6891 goto out; 6892 6893 switch (op) { 6894 case MODULE_STATE_COMING: 6895 btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL); 6896 if (!btf_mod) { 6897 err = -ENOMEM; 6898 goto out; 6899 } 6900 btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size); 6901 if (IS_ERR(btf)) { 6902 pr_warn("failed to validate module [%s] BTF: %ld\n", 6903 mod->name, PTR_ERR(btf)); 6904 kfree(btf_mod); 6905 if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) 6906 err = PTR_ERR(btf); 6907 goto out; 6908 } 6909 err = btf_alloc_id(btf); 6910 if (err) { 6911 btf_free(btf); 6912 kfree(btf_mod); 6913 goto out; 6914 } 6915 6916 purge_cand_cache(NULL); 6917 mutex_lock(&btf_module_mutex); 6918 btf_mod->module = module; 6919 btf_mod->btf = btf; 6920 list_add(&btf_mod->list, &btf_modules); 6921 mutex_unlock(&btf_module_mutex); 6922 6923 if (IS_ENABLED(CONFIG_SYSFS)) { 6924 struct bin_attribute *attr; 6925 6926 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 6927 if (!attr) 6928 goto out; 6929 6930 sysfs_bin_attr_init(attr); 6931 attr->attr.name = btf->name; 6932 attr->attr.mode = 0444; 6933 attr->size = btf->data_size; 6934 attr->private = btf; 6935 attr->read = btf_module_read; 6936 6937 err = sysfs_create_bin_file(btf_kobj, attr); 6938 if (err) { 6939 pr_warn("failed to register module [%s] BTF in sysfs: %d\n", 6940 mod->name, err); 6941 kfree(attr); 6942 err = 0; 6943 goto out; 6944 } 6945 6946 btf_mod->sysfs_attr = attr; 6947 } 6948 6949 break; 6950 case MODULE_STATE_LIVE: 6951 mutex_lock(&btf_module_mutex); 6952 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 6953 if (btf_mod->module != module) 6954 continue; 6955 6956 btf_mod->flags |= BTF_MODULE_F_LIVE; 6957 break; 6958 } 6959 mutex_unlock(&btf_module_mutex); 6960 break; 6961 case MODULE_STATE_GOING: 6962 mutex_lock(&btf_module_mutex); 6963 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 6964 if (btf_mod->module != module) 6965 continue; 6966 6967 list_del(&btf_mod->list); 6968 if (btf_mod->sysfs_attr) 6969 sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr); 6970 purge_cand_cache(btf_mod->btf); 6971 btf_put(btf_mod->btf); 6972 kfree(btf_mod->sysfs_attr); 6973 kfree(btf_mod); 6974 break; 6975 } 6976 mutex_unlock(&btf_module_mutex); 6977 break; 6978 } 6979 out: 6980 return notifier_from_errno(err); 6981 } 6982 6983 static struct notifier_block btf_module_nb = { 6984 .notifier_call = btf_module_notify, 6985 }; 6986 6987 static int __init btf_module_init(void) 6988 { 6989 register_module_notifier(&btf_module_nb); 6990 return 0; 6991 } 6992 6993 fs_initcall(btf_module_init); 6994 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ 6995 6996 struct module *btf_try_get_module(const struct btf *btf) 6997 { 6998 struct module *res = NULL; 6999 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 7000 struct btf_module *btf_mod, *tmp; 7001 7002 mutex_lock(&btf_module_mutex); 7003 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 7004 if (btf_mod->btf != btf) 7005 continue; 7006 7007 /* We must only consider module whose __init routine has 7008 * finished, hence we must check for BTF_MODULE_F_LIVE flag, 7009 * which is set from the notifier callback for 7010 * MODULE_STATE_LIVE. 7011 */ 7012 if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module)) 7013 res = btf_mod->module; 7014 7015 break; 7016 } 7017 mutex_unlock(&btf_module_mutex); 7018 #endif 7019 7020 return res; 7021 } 7022 7023 /* Returns struct btf corresponding to the struct module. 7024 * This function can return NULL or ERR_PTR. 7025 */ 7026 static struct btf *btf_get_module_btf(const struct module *module) 7027 { 7028 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 7029 struct btf_module *btf_mod, *tmp; 7030 #endif 7031 struct btf *btf = NULL; 7032 7033 if (!module) { 7034 btf = bpf_get_btf_vmlinux(); 7035 if (!IS_ERR_OR_NULL(btf)) 7036 btf_get(btf); 7037 return btf; 7038 } 7039 7040 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 7041 mutex_lock(&btf_module_mutex); 7042 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 7043 if (btf_mod->module != module) 7044 continue; 7045 7046 btf_get(btf_mod->btf); 7047 btf = btf_mod->btf; 7048 break; 7049 } 7050 mutex_unlock(&btf_module_mutex); 7051 #endif 7052 7053 return btf; 7054 } 7055 7056 BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) 7057 { 7058 struct btf *btf = NULL; 7059 int btf_obj_fd = 0; 7060 long ret; 7061 7062 if (flags) 7063 return -EINVAL; 7064 7065 if (name_sz <= 1 || name[name_sz - 1]) 7066 return -EINVAL; 7067 7068 ret = bpf_find_btf_id(name, kind, &btf); 7069 if (ret > 0 && btf_is_module(btf)) { 7070 btf_obj_fd = __btf_new_fd(btf); 7071 if (btf_obj_fd < 0) { 7072 btf_put(btf); 7073 return btf_obj_fd; 7074 } 7075 return ret | (((u64)btf_obj_fd) << 32); 7076 } 7077 if (ret > 0) 7078 btf_put(btf); 7079 return ret; 7080 } 7081 7082 const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { 7083 .func = bpf_btf_find_by_name_kind, 7084 .gpl_only = false, 7085 .ret_type = RET_INTEGER, 7086 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7087 .arg2_type = ARG_CONST_SIZE, 7088 .arg3_type = ARG_ANYTHING, 7089 .arg4_type = ARG_ANYTHING, 7090 }; 7091 7092 BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE) 7093 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type) 7094 BTF_TRACING_TYPE_xxx 7095 #undef BTF_TRACING_TYPE 7096 7097 /* Kernel Function (kfunc) BTF ID set registration API */ 7098 7099 static int __btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, 7100 enum btf_kfunc_type type, 7101 struct btf_id_set *add_set, bool vmlinux_set) 7102 { 7103 struct btf_kfunc_set_tab *tab; 7104 struct btf_id_set *set; 7105 u32 set_cnt; 7106 int ret; 7107 7108 if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX) { 7109 ret = -EINVAL; 7110 goto end; 7111 } 7112 7113 if (!add_set->cnt) 7114 return 0; 7115 7116 tab = btf->kfunc_set_tab; 7117 if (!tab) { 7118 tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN); 7119 if (!tab) 7120 return -ENOMEM; 7121 btf->kfunc_set_tab = tab; 7122 } 7123 7124 set = tab->sets[hook][type]; 7125 /* Warn when register_btf_kfunc_id_set is called twice for the same hook 7126 * for module sets. 7127 */ 7128 if (WARN_ON_ONCE(set && !vmlinux_set)) { 7129 ret = -EINVAL; 7130 goto end; 7131 } 7132 7133 /* We don't need to allocate, concatenate, and sort module sets, because 7134 * only one is allowed per hook. Hence, we can directly assign the 7135 * pointer and return. 7136 */ 7137 if (!vmlinux_set) { 7138 tab->sets[hook][type] = add_set; 7139 return 0; 7140 } 7141 7142 /* In case of vmlinux sets, there may be more than one set being 7143 * registered per hook. To create a unified set, we allocate a new set 7144 * and concatenate all individual sets being registered. While each set 7145 * is individually sorted, they may become unsorted when concatenated, 7146 * hence re-sorting the final set again is required to make binary 7147 * searching the set using btf_id_set_contains function work. 7148 */ 7149 set_cnt = set ? set->cnt : 0; 7150 7151 if (set_cnt > U32_MAX - add_set->cnt) { 7152 ret = -EOVERFLOW; 7153 goto end; 7154 } 7155 7156 if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) { 7157 ret = -E2BIG; 7158 goto end; 7159 } 7160 7161 /* Grow set */ 7162 set = krealloc(tab->sets[hook][type], 7163 offsetof(struct btf_id_set, ids[set_cnt + add_set->cnt]), 7164 GFP_KERNEL | __GFP_NOWARN); 7165 if (!set) { 7166 ret = -ENOMEM; 7167 goto end; 7168 } 7169 7170 /* For newly allocated set, initialize set->cnt to 0 */ 7171 if (!tab->sets[hook][type]) 7172 set->cnt = 0; 7173 tab->sets[hook][type] = set; 7174 7175 /* Concatenate the two sets */ 7176 memcpy(set->ids + set->cnt, add_set->ids, add_set->cnt * sizeof(set->ids[0])); 7177 set->cnt += add_set->cnt; 7178 7179 sort(set->ids, set->cnt, sizeof(set->ids[0]), btf_id_cmp_func, NULL); 7180 7181 return 0; 7182 end: 7183 btf_free_kfunc_set_tab(btf); 7184 return ret; 7185 } 7186 7187 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, 7188 const struct btf_kfunc_id_set *kset) 7189 { 7190 bool vmlinux_set = !btf_is_module(btf); 7191 int type, ret = 0; 7192 7193 for (type = 0; type < ARRAY_SIZE(kset->sets); type++) { 7194 if (!kset->sets[type]) 7195 continue; 7196 7197 ret = __btf_populate_kfunc_set(btf, hook, type, kset->sets[type], vmlinux_set); 7198 if (ret) 7199 break; 7200 } 7201 return ret; 7202 } 7203 7204 static bool __btf_kfunc_id_set_contains(const struct btf *btf, 7205 enum btf_kfunc_hook hook, 7206 enum btf_kfunc_type type, 7207 u32 kfunc_btf_id) 7208 { 7209 struct btf_id_set *set; 7210 7211 if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX) 7212 return false; 7213 if (!btf->kfunc_set_tab) 7214 return false; 7215 set = btf->kfunc_set_tab->sets[hook][type]; 7216 if (!set) 7217 return false; 7218 return btf_id_set_contains(set, kfunc_btf_id); 7219 } 7220 7221 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) 7222 { 7223 switch (prog_type) { 7224 case BPF_PROG_TYPE_XDP: 7225 return BTF_KFUNC_HOOK_XDP; 7226 case BPF_PROG_TYPE_SCHED_CLS: 7227 return BTF_KFUNC_HOOK_TC; 7228 case BPF_PROG_TYPE_STRUCT_OPS: 7229 return BTF_KFUNC_HOOK_STRUCT_OPS; 7230 case BPF_PROG_TYPE_TRACING: 7231 return BTF_KFUNC_HOOK_TRACING; 7232 case BPF_PROG_TYPE_SYSCALL: 7233 return BTF_KFUNC_HOOK_SYSCALL; 7234 default: 7235 return BTF_KFUNC_HOOK_MAX; 7236 } 7237 } 7238 7239 /* Caution: 7240 * Reference to the module (obtained using btf_try_get_module) corresponding to 7241 * the struct btf *MUST* be held when calling this function from verifier 7242 * context. This is usually true as we stash references in prog's kfunc_btf_tab; 7243 * keeping the reference for the duration of the call provides the necessary 7244 * protection for looking up a well-formed btf->kfunc_set_tab. 7245 */ 7246 bool btf_kfunc_id_set_contains(const struct btf *btf, 7247 enum bpf_prog_type prog_type, 7248 enum btf_kfunc_type type, u32 kfunc_btf_id) 7249 { 7250 enum btf_kfunc_hook hook; 7251 7252 hook = bpf_prog_type_to_kfunc_hook(prog_type); 7253 return __btf_kfunc_id_set_contains(btf, hook, type, kfunc_btf_id); 7254 } 7255 7256 /* This function must be invoked only from initcalls/module init functions */ 7257 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, 7258 const struct btf_kfunc_id_set *kset) 7259 { 7260 enum btf_kfunc_hook hook; 7261 struct btf *btf; 7262 int ret; 7263 7264 btf = btf_get_module_btf(kset->owner); 7265 if (!btf) { 7266 if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 7267 pr_err("missing vmlinux BTF, cannot register kfuncs\n"); 7268 return -ENOENT; 7269 } 7270 if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) { 7271 pr_err("missing module BTF, cannot register kfuncs\n"); 7272 return -ENOENT; 7273 } 7274 return 0; 7275 } 7276 if (IS_ERR(btf)) 7277 return PTR_ERR(btf); 7278 7279 hook = bpf_prog_type_to_kfunc_hook(prog_type); 7280 ret = btf_populate_kfunc_set(btf, hook, kset); 7281 btf_put(btf); 7282 return ret; 7283 } 7284 EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set); 7285 7286 s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id) 7287 { 7288 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; 7289 struct btf_id_dtor_kfunc *dtor; 7290 7291 if (!tab) 7292 return -ENOENT; 7293 /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need 7294 * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func. 7295 */ 7296 BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0); 7297 dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func); 7298 if (!dtor) 7299 return -ENOENT; 7300 return dtor->kfunc_btf_id; 7301 } 7302 7303 static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt) 7304 { 7305 const struct btf_type *dtor_func, *dtor_func_proto, *t; 7306 const struct btf_param *args; 7307 s32 dtor_btf_id; 7308 u32 nr_args, i; 7309 7310 for (i = 0; i < cnt; i++) { 7311 dtor_btf_id = dtors[i].kfunc_btf_id; 7312 7313 dtor_func = btf_type_by_id(btf, dtor_btf_id); 7314 if (!dtor_func || !btf_type_is_func(dtor_func)) 7315 return -EINVAL; 7316 7317 dtor_func_proto = btf_type_by_id(btf, dtor_func->type); 7318 if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto)) 7319 return -EINVAL; 7320 7321 /* Make sure the prototype of the destructor kfunc is 'void func(type *)' */ 7322 t = btf_type_by_id(btf, dtor_func_proto->type); 7323 if (!t || !btf_type_is_void(t)) 7324 return -EINVAL; 7325 7326 nr_args = btf_type_vlen(dtor_func_proto); 7327 if (nr_args != 1) 7328 return -EINVAL; 7329 args = btf_params(dtor_func_proto); 7330 t = btf_type_by_id(btf, args[0].type); 7331 /* Allow any pointer type, as width on targets Linux supports 7332 * will be same for all pointer types (i.e. sizeof(void *)) 7333 */ 7334 if (!t || !btf_type_is_ptr(t)) 7335 return -EINVAL; 7336 } 7337 return 0; 7338 } 7339 7340 /* This function must be invoked only from initcalls/module init functions */ 7341 int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt, 7342 struct module *owner) 7343 { 7344 struct btf_id_dtor_kfunc_tab *tab; 7345 struct btf *btf; 7346 u32 tab_cnt; 7347 int ret; 7348 7349 btf = btf_get_module_btf(owner); 7350 if (!btf) { 7351 if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 7352 pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n"); 7353 return -ENOENT; 7354 } 7355 if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) { 7356 pr_err("missing module BTF, cannot register dtor kfuncs\n"); 7357 return -ENOENT; 7358 } 7359 return 0; 7360 } 7361 if (IS_ERR(btf)) 7362 return PTR_ERR(btf); 7363 7364 if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { 7365 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT); 7366 ret = -E2BIG; 7367 goto end; 7368 } 7369 7370 /* Ensure that the prototype of dtor kfuncs being registered is sane */ 7371 ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt); 7372 if (ret < 0) 7373 goto end; 7374 7375 tab = btf->dtor_kfunc_tab; 7376 /* Only one call allowed for modules */ 7377 if (WARN_ON_ONCE(tab && btf_is_module(btf))) { 7378 ret = -EINVAL; 7379 goto end; 7380 } 7381 7382 tab_cnt = tab ? tab->cnt : 0; 7383 if (tab_cnt > U32_MAX - add_cnt) { 7384 ret = -EOVERFLOW; 7385 goto end; 7386 } 7387 if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { 7388 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT); 7389 ret = -E2BIG; 7390 goto end; 7391 } 7392 7393 tab = krealloc(btf->dtor_kfunc_tab, 7394 offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]), 7395 GFP_KERNEL | __GFP_NOWARN); 7396 if (!tab) { 7397 ret = -ENOMEM; 7398 goto end; 7399 } 7400 7401 if (!btf->dtor_kfunc_tab) 7402 tab->cnt = 0; 7403 btf->dtor_kfunc_tab = tab; 7404 7405 memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0])); 7406 tab->cnt += add_cnt; 7407 7408 sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL); 7409 7410 return 0; 7411 end: 7412 btf_free_dtor_kfunc_tab(btf); 7413 btf_put(btf); 7414 return ret; 7415 } 7416 EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs); 7417 7418 #define MAX_TYPES_ARE_COMPAT_DEPTH 2 7419 7420 /* Check local and target types for compatibility. This check is used for 7421 * type-based CO-RE relocations and follow slightly different rules than 7422 * field-based relocations. This function assumes that root types were already 7423 * checked for name match. Beyond that initial root-level name check, names 7424 * are completely ignored. Compatibility rules are as follows: 7425 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but 7426 * kind should match for local and target types (i.e., STRUCT is not 7427 * compatible with UNION); 7428 * - for ENUMs/ENUM64s, the size is ignored; 7429 * - for INT, size and signedness are ignored; 7430 * - for ARRAY, dimensionality is ignored, element types are checked for 7431 * compatibility recursively; 7432 * - CONST/VOLATILE/RESTRICT modifiers are ignored; 7433 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; 7434 * - FUNC_PROTOs are compatible if they have compatible signature: same 7435 * number of input args and compatible return and argument types. 7436 * These rules are not set in stone and probably will be adjusted as we get 7437 * more experience with using BPF CO-RE relocations. 7438 */ 7439 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, 7440 const struct btf *targ_btf, __u32 targ_id) 7441 { 7442 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 7443 MAX_TYPES_ARE_COMPAT_DEPTH); 7444 } 7445 7446 static bool bpf_core_is_flavor_sep(const char *s) 7447 { 7448 /* check X___Y name pattern, where X and Y are not underscores */ 7449 return s[0] != '_' && /* X */ 7450 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ 7451 s[4] != '_'; /* Y */ 7452 } 7453 7454 size_t bpf_core_essential_name_len(const char *name) 7455 { 7456 size_t n = strlen(name); 7457 int i; 7458 7459 for (i = n - 5; i >= 0; i--) { 7460 if (bpf_core_is_flavor_sep(name + i)) 7461 return i + 1; 7462 } 7463 return n; 7464 } 7465 7466 struct bpf_cand_cache { 7467 const char *name; 7468 u32 name_len; 7469 u16 kind; 7470 u16 cnt; 7471 struct { 7472 const struct btf *btf; 7473 u32 id; 7474 } cands[]; 7475 }; 7476 7477 static void bpf_free_cands(struct bpf_cand_cache *cands) 7478 { 7479 if (!cands->cnt) 7480 /* empty candidate array was allocated on stack */ 7481 return; 7482 kfree(cands); 7483 } 7484 7485 static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands) 7486 { 7487 kfree(cands->name); 7488 kfree(cands); 7489 } 7490 7491 #define VMLINUX_CAND_CACHE_SIZE 31 7492 static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE]; 7493 7494 #define MODULE_CAND_CACHE_SIZE 31 7495 static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE]; 7496 7497 static DEFINE_MUTEX(cand_cache_mutex); 7498 7499 static void __print_cand_cache(struct bpf_verifier_log *log, 7500 struct bpf_cand_cache **cache, 7501 int cache_size) 7502 { 7503 struct bpf_cand_cache *cc; 7504 int i, j; 7505 7506 for (i = 0; i < cache_size; i++) { 7507 cc = cache[i]; 7508 if (!cc) 7509 continue; 7510 bpf_log(log, "[%d]%s(", i, cc->name); 7511 for (j = 0; j < cc->cnt; j++) { 7512 bpf_log(log, "%d", cc->cands[j].id); 7513 if (j < cc->cnt - 1) 7514 bpf_log(log, " "); 7515 } 7516 bpf_log(log, "), "); 7517 } 7518 } 7519 7520 static void print_cand_cache(struct bpf_verifier_log *log) 7521 { 7522 mutex_lock(&cand_cache_mutex); 7523 bpf_log(log, "vmlinux_cand_cache:"); 7524 __print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7525 bpf_log(log, "\nmodule_cand_cache:"); 7526 __print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7527 bpf_log(log, "\n"); 7528 mutex_unlock(&cand_cache_mutex); 7529 } 7530 7531 static u32 hash_cands(struct bpf_cand_cache *cands) 7532 { 7533 return jhash(cands->name, cands->name_len, 0); 7534 } 7535 7536 static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands, 7537 struct bpf_cand_cache **cache, 7538 int cache_size) 7539 { 7540 struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size]; 7541 7542 if (cc && cc->name_len == cands->name_len && 7543 !strncmp(cc->name, cands->name, cands->name_len)) 7544 return cc; 7545 return NULL; 7546 } 7547 7548 static size_t sizeof_cands(int cnt) 7549 { 7550 return offsetof(struct bpf_cand_cache, cands[cnt]); 7551 } 7552 7553 static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands, 7554 struct bpf_cand_cache **cache, 7555 int cache_size) 7556 { 7557 struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands; 7558 7559 if (*cc) { 7560 bpf_free_cands_from_cache(*cc); 7561 *cc = NULL; 7562 } 7563 new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL); 7564 if (!new_cands) { 7565 bpf_free_cands(cands); 7566 return ERR_PTR(-ENOMEM); 7567 } 7568 /* strdup the name, since it will stay in cache. 7569 * the cands->name points to strings in prog's BTF and the prog can be unloaded. 7570 */ 7571 new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL); 7572 bpf_free_cands(cands); 7573 if (!new_cands->name) { 7574 kfree(new_cands); 7575 return ERR_PTR(-ENOMEM); 7576 } 7577 *cc = new_cands; 7578 return new_cands; 7579 } 7580 7581 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 7582 static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache, 7583 int cache_size) 7584 { 7585 struct bpf_cand_cache *cc; 7586 int i, j; 7587 7588 for (i = 0; i < cache_size; i++) { 7589 cc = cache[i]; 7590 if (!cc) 7591 continue; 7592 if (!btf) { 7593 /* when new module is loaded purge all of module_cand_cache, 7594 * since new module might have candidates with the name 7595 * that matches cached cands. 7596 */ 7597 bpf_free_cands_from_cache(cc); 7598 cache[i] = NULL; 7599 continue; 7600 } 7601 /* when module is unloaded purge cache entries 7602 * that match module's btf 7603 */ 7604 for (j = 0; j < cc->cnt; j++) 7605 if (cc->cands[j].btf == btf) { 7606 bpf_free_cands_from_cache(cc); 7607 cache[i] = NULL; 7608 break; 7609 } 7610 } 7611 7612 } 7613 7614 static void purge_cand_cache(struct btf *btf) 7615 { 7616 mutex_lock(&cand_cache_mutex); 7617 __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7618 mutex_unlock(&cand_cache_mutex); 7619 } 7620 #endif 7621 7622 static struct bpf_cand_cache * 7623 bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf, 7624 int targ_start_id) 7625 { 7626 struct bpf_cand_cache *new_cands; 7627 const struct btf_type *t; 7628 const char *targ_name; 7629 size_t targ_essent_len; 7630 int n, i; 7631 7632 n = btf_nr_types(targ_btf); 7633 for (i = targ_start_id; i < n; i++) { 7634 t = btf_type_by_id(targ_btf, i); 7635 if (btf_kind(t) != cands->kind) 7636 continue; 7637 7638 targ_name = btf_name_by_offset(targ_btf, t->name_off); 7639 if (!targ_name) 7640 continue; 7641 7642 /* the resched point is before strncmp to make sure that search 7643 * for non-existing name will have a chance to schedule(). 7644 */ 7645 cond_resched(); 7646 7647 if (strncmp(cands->name, targ_name, cands->name_len) != 0) 7648 continue; 7649 7650 targ_essent_len = bpf_core_essential_name_len(targ_name); 7651 if (targ_essent_len != cands->name_len) 7652 continue; 7653 7654 /* most of the time there is only one candidate for a given kind+name pair */ 7655 new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL); 7656 if (!new_cands) { 7657 bpf_free_cands(cands); 7658 return ERR_PTR(-ENOMEM); 7659 } 7660 7661 memcpy(new_cands, cands, sizeof_cands(cands->cnt)); 7662 bpf_free_cands(cands); 7663 cands = new_cands; 7664 cands->cands[cands->cnt].btf = targ_btf; 7665 cands->cands[cands->cnt].id = i; 7666 cands->cnt++; 7667 } 7668 return cands; 7669 } 7670 7671 static struct bpf_cand_cache * 7672 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id) 7673 { 7674 struct bpf_cand_cache *cands, *cc, local_cand = {}; 7675 const struct btf *local_btf = ctx->btf; 7676 const struct btf_type *local_type; 7677 const struct btf *main_btf; 7678 size_t local_essent_len; 7679 struct btf *mod_btf; 7680 const char *name; 7681 int id; 7682 7683 main_btf = bpf_get_btf_vmlinux(); 7684 if (IS_ERR(main_btf)) 7685 return ERR_CAST(main_btf); 7686 if (!main_btf) 7687 return ERR_PTR(-EINVAL); 7688 7689 local_type = btf_type_by_id(local_btf, local_type_id); 7690 if (!local_type) 7691 return ERR_PTR(-EINVAL); 7692 7693 name = btf_name_by_offset(local_btf, local_type->name_off); 7694 if (str_is_empty(name)) 7695 return ERR_PTR(-EINVAL); 7696 local_essent_len = bpf_core_essential_name_len(name); 7697 7698 cands = &local_cand; 7699 cands->name = name; 7700 cands->kind = btf_kind(local_type); 7701 cands->name_len = local_essent_len; 7702 7703 cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7704 /* cands is a pointer to stack here */ 7705 if (cc) { 7706 if (cc->cnt) 7707 return cc; 7708 goto check_modules; 7709 } 7710 7711 /* Attempt to find target candidates in vmlinux BTF first */ 7712 cands = bpf_core_add_cands(cands, main_btf, 1); 7713 if (IS_ERR(cands)) 7714 return ERR_CAST(cands); 7715 7716 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */ 7717 7718 /* populate cache even when cands->cnt == 0 */ 7719 cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7720 if (IS_ERR(cc)) 7721 return ERR_CAST(cc); 7722 7723 /* if vmlinux BTF has any candidate, don't go for module BTFs */ 7724 if (cc->cnt) 7725 return cc; 7726 7727 check_modules: 7728 /* cands is a pointer to stack here and cands->cnt == 0 */ 7729 cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7730 if (cc) 7731 /* if cache has it return it even if cc->cnt == 0 */ 7732 return cc; 7733 7734 /* If candidate is not found in vmlinux's BTF then search in module's BTFs */ 7735 spin_lock_bh(&btf_idr_lock); 7736 idr_for_each_entry(&btf_idr, mod_btf, id) { 7737 if (!btf_is_module(mod_btf)) 7738 continue; 7739 /* linear search could be slow hence unlock/lock 7740 * the IDR to avoiding holding it for too long 7741 */ 7742 btf_get(mod_btf); 7743 spin_unlock_bh(&btf_idr_lock); 7744 cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf)); 7745 if (IS_ERR(cands)) { 7746 btf_put(mod_btf); 7747 return ERR_CAST(cands); 7748 } 7749 spin_lock_bh(&btf_idr_lock); 7750 btf_put(mod_btf); 7751 } 7752 spin_unlock_bh(&btf_idr_lock); 7753 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 7754 * or pointer to stack if cands->cnd == 0. 7755 * Copy it into the cache even when cands->cnt == 0 and 7756 * return the result. 7757 */ 7758 return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7759 } 7760 7761 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, 7762 int relo_idx, void *insn) 7763 { 7764 bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL; 7765 struct bpf_core_cand_list cands = {}; 7766 struct bpf_core_relo_res targ_res; 7767 struct bpf_core_spec *specs; 7768 int err; 7769 7770 /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" 7771 * into arrays of btf_ids of struct fields and array indices. 7772 */ 7773 specs = kcalloc(3, sizeof(*specs), GFP_KERNEL); 7774 if (!specs) 7775 return -ENOMEM; 7776 7777 if (need_cands) { 7778 struct bpf_cand_cache *cc; 7779 int i; 7780 7781 mutex_lock(&cand_cache_mutex); 7782 cc = bpf_core_find_cands(ctx, relo->type_id); 7783 if (IS_ERR(cc)) { 7784 bpf_log(ctx->log, "target candidate search failed for %d\n", 7785 relo->type_id); 7786 err = PTR_ERR(cc); 7787 goto out; 7788 } 7789 if (cc->cnt) { 7790 cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL); 7791 if (!cands.cands) { 7792 err = -ENOMEM; 7793 goto out; 7794 } 7795 } 7796 for (i = 0; i < cc->cnt; i++) { 7797 bpf_log(ctx->log, 7798 "CO-RE relocating %s %s: found target candidate [%d]\n", 7799 btf_kind_str[cc->kind], cc->name, cc->cands[i].id); 7800 cands.cands[i].btf = cc->cands[i].btf; 7801 cands.cands[i].id = cc->cands[i].id; 7802 } 7803 cands.len = cc->cnt; 7804 /* cand_cache_mutex needs to span the cache lookup and 7805 * copy of btf pointer into bpf_core_cand_list, 7806 * since module can be unloaded while bpf_core_calc_relo_insn 7807 * is working with module's btf. 7808 */ 7809 } 7810 7811 err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs, 7812 &targ_res); 7813 if (err) 7814 goto out; 7815 7816 err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx, 7817 &targ_res); 7818 7819 out: 7820 kfree(specs); 7821 if (need_cands) { 7822 kfree(cands.cands); 7823 mutex_unlock(&cand_cache_mutex); 7824 if (ctx->log->level & BPF_LOG_LEVEL2) 7825 print_cand_cache(ctx->log); 7826 } 7827 return err; 7828 } 7829