1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Facebook */ 3 4 #include <uapi/linux/btf.h> 5 #include <uapi/linux/bpf.h> 6 #include <uapi/linux/bpf_perf_event.h> 7 #include <uapi/linux/types.h> 8 #include <linux/seq_file.h> 9 #include <linux/compiler.h> 10 #include <linux/ctype.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/anon_inodes.h> 14 #include <linux/file.h> 15 #include <linux/uaccess.h> 16 #include <linux/kernel.h> 17 #include <linux/idr.h> 18 #include <linux/sort.h> 19 #include <linux/bpf_verifier.h> 20 #include <linux/btf.h> 21 #include <linux/btf_ids.h> 22 #include <linux/skmsg.h> 23 #include <linux/perf_event.h> 24 #include <linux/bsearch.h> 25 #include <linux/kobject.h> 26 #include <linux/sysfs.h> 27 #include <net/sock.h> 28 #include "../tools/lib/bpf/relo_core.h" 29 30 /* BTF (BPF Type Format) is the meta data format which describes 31 * the data types of BPF program/map. Hence, it basically focus 32 * on the C programming language which the modern BPF is primary 33 * using. 34 * 35 * ELF Section: 36 * ~~~~~~~~~~~ 37 * The BTF data is stored under the ".BTF" ELF section 38 * 39 * struct btf_type: 40 * ~~~~~~~~~~~~~~~ 41 * Each 'struct btf_type' object describes a C data type. 42 * Depending on the type it is describing, a 'struct btf_type' 43 * object may be followed by more data. F.e. 44 * To describe an array, 'struct btf_type' is followed by 45 * 'struct btf_array'. 46 * 47 * 'struct btf_type' and any extra data following it are 48 * 4 bytes aligned. 49 * 50 * Type section: 51 * ~~~~~~~~~~~~~ 52 * The BTF type section contains a list of 'struct btf_type' objects. 53 * Each one describes a C type. Recall from the above section 54 * that a 'struct btf_type' object could be immediately followed by extra 55 * data in order to describe some particular C types. 56 * 57 * type_id: 58 * ~~~~~~~ 59 * Each btf_type object is identified by a type_id. The type_id 60 * is implicitly implied by the location of the btf_type object in 61 * the BTF type section. The first one has type_id 1. The second 62 * one has type_id 2...etc. Hence, an earlier btf_type has 63 * a smaller type_id. 64 * 65 * A btf_type object may refer to another btf_type object by using 66 * type_id (i.e. the "type" in the "struct btf_type"). 67 * 68 * NOTE that we cannot assume any reference-order. 69 * A btf_type object can refer to an earlier btf_type object 70 * but it can also refer to a later btf_type object. 71 * 72 * For example, to describe "const void *". A btf_type 73 * object describing "const" may refer to another btf_type 74 * object describing "void *". This type-reference is done 75 * by specifying type_id: 76 * 77 * [1] CONST (anon) type_id=2 78 * [2] PTR (anon) type_id=0 79 * 80 * The above is the btf_verifier debug log: 81 * - Each line started with "[?]" is a btf_type object 82 * - [?] is the type_id of the btf_type object. 83 * - CONST/PTR is the BTF_KIND_XXX 84 * - "(anon)" is the name of the type. It just 85 * happens that CONST and PTR has no name. 86 * - type_id=XXX is the 'u32 type' in btf_type 87 * 88 * NOTE: "void" has type_id 0 89 * 90 * String section: 91 * ~~~~~~~~~~~~~~ 92 * The BTF string section contains the names used by the type section. 93 * Each string is referred by an "offset" from the beginning of the 94 * string section. 95 * 96 * Each string is '\0' terminated. 97 * 98 * The first character in the string section must be '\0' 99 * which is used to mean 'anonymous'. Some btf_type may not 100 * have a name. 101 */ 102 103 /* BTF verification: 104 * 105 * To verify BTF data, two passes are needed. 106 * 107 * Pass #1 108 * ~~~~~~~ 109 * The first pass is to collect all btf_type objects to 110 * an array: "btf->types". 111 * 112 * Depending on the C type that a btf_type is describing, 113 * a btf_type may be followed by extra data. We don't know 114 * how many btf_type is there, and more importantly we don't 115 * know where each btf_type is located in the type section. 116 * 117 * Without knowing the location of each type_id, most verifications 118 * cannot be done. e.g. an earlier btf_type may refer to a later 119 * btf_type (recall the "const void *" above), so we cannot 120 * check this type-reference in the first pass. 121 * 122 * In the first pass, it still does some verifications (e.g. 123 * checking the name is a valid offset to the string section). 124 * 125 * Pass #2 126 * ~~~~~~~ 127 * The main focus is to resolve a btf_type that is referring 128 * to another type. 129 * 130 * We have to ensure the referring type: 131 * 1) does exist in the BTF (i.e. in btf->types[]) 132 * 2) does not cause a loop: 133 * struct A { 134 * struct B b; 135 * }; 136 * 137 * struct B { 138 * struct A a; 139 * }; 140 * 141 * btf_type_needs_resolve() decides if a btf_type needs 142 * to be resolved. 143 * 144 * The needs_resolve type implements the "resolve()" ops which 145 * essentially does a DFS and detects backedge. 146 * 147 * During resolve (or DFS), different C types have different 148 * "RESOLVED" conditions. 149 * 150 * When resolving a BTF_KIND_STRUCT, we need to resolve all its 151 * members because a member is always referring to another 152 * type. A struct's member can be treated as "RESOLVED" if 153 * it is referring to a BTF_KIND_PTR. Otherwise, the 154 * following valid C struct would be rejected: 155 * 156 * struct A { 157 * int m; 158 * struct A *a; 159 * }; 160 * 161 * When resolving a BTF_KIND_PTR, it needs to keep resolving if 162 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot 163 * detect a pointer loop, e.g.: 164 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR + 165 * ^ | 166 * +-----------------------------------------+ 167 * 168 */ 169 170 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2) 171 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) 172 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) 173 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) 174 #define BITS_ROUNDUP_BYTES(bits) \ 175 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) 176 177 #define BTF_INFO_MASK 0x9f00ffff 178 #define BTF_INT_MASK 0x0fffffff 179 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) 180 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) 181 182 /* 16MB for 64k structs and each has 16 members and 183 * a few MB spaces for the string section. 184 * The hard limit is S32_MAX. 185 */ 186 #define BTF_MAX_SIZE (16 * 1024 * 1024) 187 188 #define for_each_member_from(i, from, struct_type, member) \ 189 for (i = from, member = btf_type_member(struct_type) + from; \ 190 i < btf_type_vlen(struct_type); \ 191 i++, member++) 192 193 #define for_each_vsi_from(i, from, struct_type, member) \ 194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \ 195 i < btf_type_vlen(struct_type); \ 196 i++, member++) 197 198 DEFINE_IDR(btf_idr); 199 DEFINE_SPINLOCK(btf_idr_lock); 200 201 enum btf_kfunc_hook { 202 BTF_KFUNC_HOOK_XDP, 203 BTF_KFUNC_HOOK_TC, 204 BTF_KFUNC_HOOK_STRUCT_OPS, 205 BTF_KFUNC_HOOK_MAX, 206 }; 207 208 enum { 209 BTF_KFUNC_SET_MAX_CNT = 32, 210 BTF_DTOR_KFUNC_MAX_CNT = 256, 211 }; 212 213 struct btf_kfunc_set_tab { 214 struct btf_id_set *sets[BTF_KFUNC_HOOK_MAX][BTF_KFUNC_TYPE_MAX]; 215 }; 216 217 struct btf_id_dtor_kfunc_tab { 218 u32 cnt; 219 struct btf_id_dtor_kfunc dtors[]; 220 }; 221 222 struct btf { 223 void *data; 224 struct btf_type **types; 225 u32 *resolved_ids; 226 u32 *resolved_sizes; 227 const char *strings; 228 void *nohdr_data; 229 struct btf_header hdr; 230 u32 nr_types; /* includes VOID for base BTF */ 231 u32 types_size; 232 u32 data_size; 233 refcount_t refcnt; 234 u32 id; 235 struct rcu_head rcu; 236 struct btf_kfunc_set_tab *kfunc_set_tab; 237 struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; 238 239 /* split BTF support */ 240 struct btf *base_btf; 241 u32 start_id; /* first type ID in this BTF (0 for base BTF) */ 242 u32 start_str_off; /* first string offset (0 for base BTF) */ 243 char name[MODULE_NAME_LEN]; 244 bool kernel_btf; 245 }; 246 247 enum verifier_phase { 248 CHECK_META, 249 CHECK_TYPE, 250 }; 251 252 struct resolve_vertex { 253 const struct btf_type *t; 254 u32 type_id; 255 u16 next_member; 256 }; 257 258 enum visit_state { 259 NOT_VISITED, 260 VISITED, 261 RESOLVED, 262 }; 263 264 enum resolve_mode { 265 RESOLVE_TBD, /* To Be Determined */ 266 RESOLVE_PTR, /* Resolving for Pointer */ 267 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union 268 * or array 269 */ 270 }; 271 272 #define MAX_RESOLVE_DEPTH 32 273 274 struct btf_sec_info { 275 u32 off; 276 u32 len; 277 }; 278 279 struct btf_verifier_env { 280 struct btf *btf; 281 u8 *visit_states; 282 struct resolve_vertex stack[MAX_RESOLVE_DEPTH]; 283 struct bpf_verifier_log log; 284 u32 log_type_id; 285 u32 top_stack; 286 enum verifier_phase phase; 287 enum resolve_mode resolve_mode; 288 }; 289 290 static const char * const btf_kind_str[NR_BTF_KINDS] = { 291 [BTF_KIND_UNKN] = "UNKNOWN", 292 [BTF_KIND_INT] = "INT", 293 [BTF_KIND_PTR] = "PTR", 294 [BTF_KIND_ARRAY] = "ARRAY", 295 [BTF_KIND_STRUCT] = "STRUCT", 296 [BTF_KIND_UNION] = "UNION", 297 [BTF_KIND_ENUM] = "ENUM", 298 [BTF_KIND_FWD] = "FWD", 299 [BTF_KIND_TYPEDEF] = "TYPEDEF", 300 [BTF_KIND_VOLATILE] = "VOLATILE", 301 [BTF_KIND_CONST] = "CONST", 302 [BTF_KIND_RESTRICT] = "RESTRICT", 303 [BTF_KIND_FUNC] = "FUNC", 304 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO", 305 [BTF_KIND_VAR] = "VAR", 306 [BTF_KIND_DATASEC] = "DATASEC", 307 [BTF_KIND_FLOAT] = "FLOAT", 308 [BTF_KIND_DECL_TAG] = "DECL_TAG", 309 [BTF_KIND_TYPE_TAG] = "TYPE_TAG", 310 }; 311 312 const char *btf_type_str(const struct btf_type *t) 313 { 314 return btf_kind_str[BTF_INFO_KIND(t->info)]; 315 } 316 317 /* Chunk size we use in safe copy of data to be shown. */ 318 #define BTF_SHOW_OBJ_SAFE_SIZE 32 319 320 /* 321 * This is the maximum size of a base type value (equivalent to a 322 * 128-bit int); if we are at the end of our safe buffer and have 323 * less than 16 bytes space we can't be assured of being able 324 * to copy the next type safely, so in such cases we will initiate 325 * a new copy. 326 */ 327 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16 328 329 /* Type name size */ 330 #define BTF_SHOW_NAME_SIZE 80 331 332 /* 333 * Common data to all BTF show operations. Private show functions can add 334 * their own data to a structure containing a struct btf_show and consult it 335 * in the show callback. See btf_type_show() below. 336 * 337 * One challenge with showing nested data is we want to skip 0-valued 338 * data, but in order to figure out whether a nested object is all zeros 339 * we need to walk through it. As a result, we need to make two passes 340 * when handling structs, unions and arrays; the first path simply looks 341 * for nonzero data, while the second actually does the display. The first 342 * pass is signalled by show->state.depth_check being set, and if we 343 * encounter a non-zero value we set show->state.depth_to_show to 344 * the depth at which we encountered it. When we have completed the 345 * first pass, we will know if anything needs to be displayed if 346 * depth_to_show > depth. See btf_[struct,array]_show() for the 347 * implementation of this. 348 * 349 * Another problem is we want to ensure the data for display is safe to 350 * access. To support this, the anonymous "struct {} obj" tracks the data 351 * object and our safe copy of it. We copy portions of the data needed 352 * to the object "copy" buffer, but because its size is limited to 353 * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we 354 * traverse larger objects for display. 355 * 356 * The various data type show functions all start with a call to 357 * btf_show_start_type() which returns a pointer to the safe copy 358 * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the 359 * raw data itself). btf_show_obj_safe() is responsible for 360 * using copy_from_kernel_nofault() to update the safe data if necessary 361 * as we traverse the object's data. skbuff-like semantics are 362 * used: 363 * 364 * - obj.head points to the start of the toplevel object for display 365 * - obj.size is the size of the toplevel object 366 * - obj.data points to the current point in the original data at 367 * which our safe data starts. obj.data will advance as we copy 368 * portions of the data. 369 * 370 * In most cases a single copy will suffice, but larger data structures 371 * such as "struct task_struct" will require many copies. The logic in 372 * btf_show_obj_safe() handles the logic that determines if a new 373 * copy_from_kernel_nofault() is needed. 374 */ 375 struct btf_show { 376 u64 flags; 377 void *target; /* target of show operation (seq file, buffer) */ 378 void (*showfn)(struct btf_show *show, const char *fmt, va_list args); 379 const struct btf *btf; 380 /* below are used during iteration */ 381 struct { 382 u8 depth; 383 u8 depth_to_show; 384 u8 depth_check; 385 u8 array_member:1, 386 array_terminated:1; 387 u16 array_encoding; 388 u32 type_id; 389 int status; /* non-zero for error */ 390 const struct btf_type *type; 391 const struct btf_member *member; 392 char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */ 393 } state; 394 struct { 395 u32 size; 396 void *head; 397 void *data; 398 u8 safe[BTF_SHOW_OBJ_SAFE_SIZE]; 399 } obj; 400 }; 401 402 struct btf_kind_operations { 403 s32 (*check_meta)(struct btf_verifier_env *env, 404 const struct btf_type *t, 405 u32 meta_left); 406 int (*resolve)(struct btf_verifier_env *env, 407 const struct resolve_vertex *v); 408 int (*check_member)(struct btf_verifier_env *env, 409 const struct btf_type *struct_type, 410 const struct btf_member *member, 411 const struct btf_type *member_type); 412 int (*check_kflag_member)(struct btf_verifier_env *env, 413 const struct btf_type *struct_type, 414 const struct btf_member *member, 415 const struct btf_type *member_type); 416 void (*log_details)(struct btf_verifier_env *env, 417 const struct btf_type *t); 418 void (*show)(const struct btf *btf, const struct btf_type *t, 419 u32 type_id, void *data, u8 bits_offsets, 420 struct btf_show *show); 421 }; 422 423 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS]; 424 static struct btf_type btf_void; 425 426 static int btf_resolve(struct btf_verifier_env *env, 427 const struct btf_type *t, u32 type_id); 428 429 static int btf_func_check(struct btf_verifier_env *env, 430 const struct btf_type *t); 431 432 static bool btf_type_is_modifier(const struct btf_type *t) 433 { 434 /* Some of them is not strictly a C modifier 435 * but they are grouped into the same bucket 436 * for BTF concern: 437 * A type (t) that refers to another 438 * type through t->type AND its size cannot 439 * be determined without following the t->type. 440 * 441 * ptr does not fall into this bucket 442 * because its size is always sizeof(void *). 443 */ 444 switch (BTF_INFO_KIND(t->info)) { 445 case BTF_KIND_TYPEDEF: 446 case BTF_KIND_VOLATILE: 447 case BTF_KIND_CONST: 448 case BTF_KIND_RESTRICT: 449 case BTF_KIND_TYPE_TAG: 450 return true; 451 } 452 453 return false; 454 } 455 456 bool btf_type_is_void(const struct btf_type *t) 457 { 458 return t == &btf_void; 459 } 460 461 static bool btf_type_is_fwd(const struct btf_type *t) 462 { 463 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD; 464 } 465 466 static bool btf_type_nosize(const struct btf_type *t) 467 { 468 return btf_type_is_void(t) || btf_type_is_fwd(t) || 469 btf_type_is_func(t) || btf_type_is_func_proto(t); 470 } 471 472 static bool btf_type_nosize_or_null(const struct btf_type *t) 473 { 474 return !t || btf_type_nosize(t); 475 } 476 477 static bool __btf_type_is_struct(const struct btf_type *t) 478 { 479 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT; 480 } 481 482 static bool btf_type_is_array(const struct btf_type *t) 483 { 484 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY; 485 } 486 487 static bool btf_type_is_datasec(const struct btf_type *t) 488 { 489 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; 490 } 491 492 static bool btf_type_is_decl_tag(const struct btf_type *t) 493 { 494 return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG; 495 } 496 497 static bool btf_type_is_decl_tag_target(const struct btf_type *t) 498 { 499 return btf_type_is_func(t) || btf_type_is_struct(t) || 500 btf_type_is_var(t) || btf_type_is_typedef(t); 501 } 502 503 u32 btf_nr_types(const struct btf *btf) 504 { 505 u32 total = 0; 506 507 while (btf) { 508 total += btf->nr_types; 509 btf = btf->base_btf; 510 } 511 512 return total; 513 } 514 515 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) 516 { 517 const struct btf_type *t; 518 const char *tname; 519 u32 i, total; 520 521 total = btf_nr_types(btf); 522 for (i = 1; i < total; i++) { 523 t = btf_type_by_id(btf, i); 524 if (BTF_INFO_KIND(t->info) != kind) 525 continue; 526 527 tname = btf_name_by_offset(btf, t->name_off); 528 if (!strcmp(tname, name)) 529 return i; 530 } 531 532 return -ENOENT; 533 } 534 535 static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p) 536 { 537 struct btf *btf; 538 s32 ret; 539 int id; 540 541 btf = bpf_get_btf_vmlinux(); 542 if (IS_ERR(btf)) 543 return PTR_ERR(btf); 544 if (!btf) 545 return -EINVAL; 546 547 ret = btf_find_by_name_kind(btf, name, kind); 548 /* ret is never zero, since btf_find_by_name_kind returns 549 * positive btf_id or negative error. 550 */ 551 if (ret > 0) { 552 btf_get(btf); 553 *btf_p = btf; 554 return ret; 555 } 556 557 /* If name is not found in vmlinux's BTF then search in module's BTFs */ 558 spin_lock_bh(&btf_idr_lock); 559 idr_for_each_entry(&btf_idr, btf, id) { 560 if (!btf_is_module(btf)) 561 continue; 562 /* linear search could be slow hence unlock/lock 563 * the IDR to avoiding holding it for too long 564 */ 565 btf_get(btf); 566 spin_unlock_bh(&btf_idr_lock); 567 ret = btf_find_by_name_kind(btf, name, kind); 568 if (ret > 0) { 569 *btf_p = btf; 570 return ret; 571 } 572 spin_lock_bh(&btf_idr_lock); 573 btf_put(btf); 574 } 575 spin_unlock_bh(&btf_idr_lock); 576 return ret; 577 } 578 579 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, 580 u32 id, u32 *res_id) 581 { 582 const struct btf_type *t = btf_type_by_id(btf, id); 583 584 while (btf_type_is_modifier(t)) { 585 id = t->type; 586 t = btf_type_by_id(btf, t->type); 587 } 588 589 if (res_id) 590 *res_id = id; 591 592 return t; 593 } 594 595 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf, 596 u32 id, u32 *res_id) 597 { 598 const struct btf_type *t; 599 600 t = btf_type_skip_modifiers(btf, id, NULL); 601 if (!btf_type_is_ptr(t)) 602 return NULL; 603 604 return btf_type_skip_modifiers(btf, t->type, res_id); 605 } 606 607 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, 608 u32 id, u32 *res_id) 609 { 610 const struct btf_type *ptype; 611 612 ptype = btf_type_resolve_ptr(btf, id, res_id); 613 if (ptype && btf_type_is_func_proto(ptype)) 614 return ptype; 615 616 return NULL; 617 } 618 619 /* Types that act only as a source, not sink or intermediate 620 * type when resolving. 621 */ 622 static bool btf_type_is_resolve_source_only(const struct btf_type *t) 623 { 624 return btf_type_is_var(t) || 625 btf_type_is_decl_tag(t) || 626 btf_type_is_datasec(t); 627 } 628 629 /* What types need to be resolved? 630 * 631 * btf_type_is_modifier() is an obvious one. 632 * 633 * btf_type_is_struct() because its member refers to 634 * another type (through member->type). 635 * 636 * btf_type_is_var() because the variable refers to 637 * another type. btf_type_is_datasec() holds multiple 638 * btf_type_is_var() types that need resolving. 639 * 640 * btf_type_is_array() because its element (array->type) 641 * refers to another type. Array can be thought of a 642 * special case of struct while array just has the same 643 * member-type repeated by array->nelems of times. 644 */ 645 static bool btf_type_needs_resolve(const struct btf_type *t) 646 { 647 return btf_type_is_modifier(t) || 648 btf_type_is_ptr(t) || 649 btf_type_is_struct(t) || 650 btf_type_is_array(t) || 651 btf_type_is_var(t) || 652 btf_type_is_func(t) || 653 btf_type_is_decl_tag(t) || 654 btf_type_is_datasec(t); 655 } 656 657 /* t->size can be used */ 658 static bool btf_type_has_size(const struct btf_type *t) 659 { 660 switch (BTF_INFO_KIND(t->info)) { 661 case BTF_KIND_INT: 662 case BTF_KIND_STRUCT: 663 case BTF_KIND_UNION: 664 case BTF_KIND_ENUM: 665 case BTF_KIND_DATASEC: 666 case BTF_KIND_FLOAT: 667 return true; 668 } 669 670 return false; 671 } 672 673 static const char *btf_int_encoding_str(u8 encoding) 674 { 675 if (encoding == 0) 676 return "(none)"; 677 else if (encoding == BTF_INT_SIGNED) 678 return "SIGNED"; 679 else if (encoding == BTF_INT_CHAR) 680 return "CHAR"; 681 else if (encoding == BTF_INT_BOOL) 682 return "BOOL"; 683 else 684 return "UNKN"; 685 } 686 687 static u32 btf_type_int(const struct btf_type *t) 688 { 689 return *(u32 *)(t + 1); 690 } 691 692 static const struct btf_array *btf_type_array(const struct btf_type *t) 693 { 694 return (const struct btf_array *)(t + 1); 695 } 696 697 static const struct btf_enum *btf_type_enum(const struct btf_type *t) 698 { 699 return (const struct btf_enum *)(t + 1); 700 } 701 702 static const struct btf_var *btf_type_var(const struct btf_type *t) 703 { 704 return (const struct btf_var *)(t + 1); 705 } 706 707 static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t) 708 { 709 return (const struct btf_decl_tag *)(t + 1); 710 } 711 712 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) 713 { 714 return kind_ops[BTF_INFO_KIND(t->info)]; 715 } 716 717 static bool btf_name_offset_valid(const struct btf *btf, u32 offset) 718 { 719 if (!BTF_STR_OFFSET_VALID(offset)) 720 return false; 721 722 while (offset < btf->start_str_off) 723 btf = btf->base_btf; 724 725 offset -= btf->start_str_off; 726 return offset < btf->hdr.str_len; 727 } 728 729 static bool __btf_name_char_ok(char c, bool first, bool dot_ok) 730 { 731 if ((first ? !isalpha(c) : 732 !isalnum(c)) && 733 c != '_' && 734 ((c == '.' && !dot_ok) || 735 c != '.')) 736 return false; 737 return true; 738 } 739 740 static const char *btf_str_by_offset(const struct btf *btf, u32 offset) 741 { 742 while (offset < btf->start_str_off) 743 btf = btf->base_btf; 744 745 offset -= btf->start_str_off; 746 if (offset < btf->hdr.str_len) 747 return &btf->strings[offset]; 748 749 return NULL; 750 } 751 752 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok) 753 { 754 /* offset must be valid */ 755 const char *src = btf_str_by_offset(btf, offset); 756 const char *src_limit; 757 758 if (!__btf_name_char_ok(*src, true, dot_ok)) 759 return false; 760 761 /* set a limit on identifier length */ 762 src_limit = src + KSYM_NAME_LEN; 763 src++; 764 while (*src && src < src_limit) { 765 if (!__btf_name_char_ok(*src, false, dot_ok)) 766 return false; 767 src++; 768 } 769 770 return !*src; 771 } 772 773 /* Only C-style identifier is permitted. This can be relaxed if 774 * necessary. 775 */ 776 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) 777 { 778 return __btf_name_valid(btf, offset, false); 779 } 780 781 static bool btf_name_valid_section(const struct btf *btf, u32 offset) 782 { 783 return __btf_name_valid(btf, offset, true); 784 } 785 786 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset) 787 { 788 const char *name; 789 790 if (!offset) 791 return "(anon)"; 792 793 name = btf_str_by_offset(btf, offset); 794 return name ?: "(invalid-name-offset)"; 795 } 796 797 const char *btf_name_by_offset(const struct btf *btf, u32 offset) 798 { 799 return btf_str_by_offset(btf, offset); 800 } 801 802 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) 803 { 804 while (type_id < btf->start_id) 805 btf = btf->base_btf; 806 807 type_id -= btf->start_id; 808 if (type_id >= btf->nr_types) 809 return NULL; 810 return btf->types[type_id]; 811 } 812 813 /* 814 * Regular int is not a bit field and it must be either 815 * u8/u16/u32/u64 or __int128. 816 */ 817 static bool btf_type_int_is_regular(const struct btf_type *t) 818 { 819 u8 nr_bits, nr_bytes; 820 u32 int_data; 821 822 int_data = btf_type_int(t); 823 nr_bits = BTF_INT_BITS(int_data); 824 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits); 825 if (BITS_PER_BYTE_MASKED(nr_bits) || 826 BTF_INT_OFFSET(int_data) || 827 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && 828 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) && 829 nr_bytes != (2 * sizeof(u64)))) { 830 return false; 831 } 832 833 return true; 834 } 835 836 /* 837 * Check that given struct member is a regular int with expected 838 * offset and size. 839 */ 840 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, 841 const struct btf_member *m, 842 u32 expected_offset, u32 expected_size) 843 { 844 const struct btf_type *t; 845 u32 id, int_data; 846 u8 nr_bits; 847 848 id = m->type; 849 t = btf_type_id_size(btf, &id, NULL); 850 if (!t || !btf_type_is_int(t)) 851 return false; 852 853 int_data = btf_type_int(t); 854 nr_bits = BTF_INT_BITS(int_data); 855 if (btf_type_kflag(s)) { 856 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset); 857 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset); 858 859 /* if kflag set, int should be a regular int and 860 * bit offset should be at byte boundary. 861 */ 862 return !bitfield_size && 863 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset && 864 BITS_ROUNDUP_BYTES(nr_bits) == expected_size; 865 } 866 867 if (BTF_INT_OFFSET(int_data) || 868 BITS_PER_BYTE_MASKED(m->offset) || 869 BITS_ROUNDUP_BYTES(m->offset) != expected_offset || 870 BITS_PER_BYTE_MASKED(nr_bits) || 871 BITS_ROUNDUP_BYTES(nr_bits) != expected_size) 872 return false; 873 874 return true; 875 } 876 877 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */ 878 static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf, 879 u32 id) 880 { 881 const struct btf_type *t = btf_type_by_id(btf, id); 882 883 while (btf_type_is_modifier(t) && 884 BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) { 885 t = btf_type_by_id(btf, t->type); 886 } 887 888 return t; 889 } 890 891 #define BTF_SHOW_MAX_ITER 10 892 893 #define BTF_KIND_BIT(kind) (1ULL << kind) 894 895 /* 896 * Populate show->state.name with type name information. 897 * Format of type name is 898 * 899 * [.member_name = ] (type_name) 900 */ 901 static const char *btf_show_name(struct btf_show *show) 902 { 903 /* BTF_MAX_ITER array suffixes "[]" */ 904 const char *array_suffixes = "[][][][][][][][][][]"; 905 const char *array_suffix = &array_suffixes[strlen(array_suffixes)]; 906 /* BTF_MAX_ITER pointer suffixes "*" */ 907 const char *ptr_suffixes = "**********"; 908 const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)]; 909 const char *name = NULL, *prefix = "", *parens = ""; 910 const struct btf_member *m = show->state.member; 911 const struct btf_type *t; 912 const struct btf_array *array; 913 u32 id = show->state.type_id; 914 const char *member = NULL; 915 bool show_member = false; 916 u64 kinds = 0; 917 int i; 918 919 show->state.name[0] = '\0'; 920 921 /* 922 * Don't show type name if we're showing an array member; 923 * in that case we show the array type so don't need to repeat 924 * ourselves for each member. 925 */ 926 if (show->state.array_member) 927 return ""; 928 929 /* Retrieve member name, if any. */ 930 if (m) { 931 member = btf_name_by_offset(show->btf, m->name_off); 932 show_member = strlen(member) > 0; 933 id = m->type; 934 } 935 936 /* 937 * Start with type_id, as we have resolved the struct btf_type * 938 * via btf_modifier_show() past the parent typedef to the child 939 * struct, int etc it is defined as. In such cases, the type_id 940 * still represents the starting type while the struct btf_type * 941 * in our show->state points at the resolved type of the typedef. 942 */ 943 t = btf_type_by_id(show->btf, id); 944 if (!t) 945 return ""; 946 947 /* 948 * The goal here is to build up the right number of pointer and 949 * array suffixes while ensuring the type name for a typedef 950 * is represented. Along the way we accumulate a list of 951 * BTF kinds we have encountered, since these will inform later 952 * display; for example, pointer types will not require an 953 * opening "{" for struct, we will just display the pointer value. 954 * 955 * We also want to accumulate the right number of pointer or array 956 * indices in the format string while iterating until we get to 957 * the typedef/pointee/array member target type. 958 * 959 * We start by pointing at the end of pointer and array suffix 960 * strings; as we accumulate pointers and arrays we move the pointer 961 * or array string backwards so it will show the expected number of 962 * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers 963 * and/or arrays and typedefs are supported as a precaution. 964 * 965 * We also want to get typedef name while proceeding to resolve 966 * type it points to so that we can add parentheses if it is a 967 * "typedef struct" etc. 968 */ 969 for (i = 0; i < BTF_SHOW_MAX_ITER; i++) { 970 971 switch (BTF_INFO_KIND(t->info)) { 972 case BTF_KIND_TYPEDEF: 973 if (!name) 974 name = btf_name_by_offset(show->btf, 975 t->name_off); 976 kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF); 977 id = t->type; 978 break; 979 case BTF_KIND_ARRAY: 980 kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY); 981 parens = "["; 982 if (!t) 983 return ""; 984 array = btf_type_array(t); 985 if (array_suffix > array_suffixes) 986 array_suffix -= 2; 987 id = array->type; 988 break; 989 case BTF_KIND_PTR: 990 kinds |= BTF_KIND_BIT(BTF_KIND_PTR); 991 if (ptr_suffix > ptr_suffixes) 992 ptr_suffix -= 1; 993 id = t->type; 994 break; 995 default: 996 id = 0; 997 break; 998 } 999 if (!id) 1000 break; 1001 t = btf_type_skip_qualifiers(show->btf, id); 1002 } 1003 /* We may not be able to represent this type; bail to be safe */ 1004 if (i == BTF_SHOW_MAX_ITER) 1005 return ""; 1006 1007 if (!name) 1008 name = btf_name_by_offset(show->btf, t->name_off); 1009 1010 switch (BTF_INFO_KIND(t->info)) { 1011 case BTF_KIND_STRUCT: 1012 case BTF_KIND_UNION: 1013 prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ? 1014 "struct" : "union"; 1015 /* if it's an array of struct/union, parens is already set */ 1016 if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY)))) 1017 parens = "{"; 1018 break; 1019 case BTF_KIND_ENUM: 1020 prefix = "enum"; 1021 break; 1022 default: 1023 break; 1024 } 1025 1026 /* pointer does not require parens */ 1027 if (kinds & BTF_KIND_BIT(BTF_KIND_PTR)) 1028 parens = ""; 1029 /* typedef does not require struct/union/enum prefix */ 1030 if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF)) 1031 prefix = ""; 1032 1033 if (!name) 1034 name = ""; 1035 1036 /* Even if we don't want type name info, we want parentheses etc */ 1037 if (show->flags & BTF_SHOW_NONAME) 1038 snprintf(show->state.name, sizeof(show->state.name), "%s", 1039 parens); 1040 else 1041 snprintf(show->state.name, sizeof(show->state.name), 1042 "%s%s%s(%s%s%s%s%s%s)%s", 1043 /* first 3 strings comprise ".member = " */ 1044 show_member ? "." : "", 1045 show_member ? member : "", 1046 show_member ? " = " : "", 1047 /* ...next is our prefix (struct, enum, etc) */ 1048 prefix, 1049 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "", 1050 /* ...this is the type name itself */ 1051 name, 1052 /* ...suffixed by the appropriate '*', '[]' suffixes */ 1053 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix, 1054 array_suffix, parens); 1055 1056 return show->state.name; 1057 } 1058 1059 static const char *__btf_show_indent(struct btf_show *show) 1060 { 1061 const char *indents = " "; 1062 const char *indent = &indents[strlen(indents)]; 1063 1064 if ((indent - show->state.depth) >= indents) 1065 return indent - show->state.depth; 1066 return indents; 1067 } 1068 1069 static const char *btf_show_indent(struct btf_show *show) 1070 { 1071 return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show); 1072 } 1073 1074 static const char *btf_show_newline(struct btf_show *show) 1075 { 1076 return show->flags & BTF_SHOW_COMPACT ? "" : "\n"; 1077 } 1078 1079 static const char *btf_show_delim(struct btf_show *show) 1080 { 1081 if (show->state.depth == 0) 1082 return ""; 1083 1084 if ((show->flags & BTF_SHOW_COMPACT) && show->state.type && 1085 BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION) 1086 return "|"; 1087 1088 return ","; 1089 } 1090 1091 __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...) 1092 { 1093 va_list args; 1094 1095 if (!show->state.depth_check) { 1096 va_start(args, fmt); 1097 show->showfn(show, fmt, args); 1098 va_end(args); 1099 } 1100 } 1101 1102 /* Macros are used here as btf_show_type_value[s]() prepends and appends 1103 * format specifiers to the format specifier passed in; these do the work of 1104 * adding indentation, delimiters etc while the caller simply has to specify 1105 * the type value(s) in the format specifier + value(s). 1106 */ 1107 #define btf_show_type_value(show, fmt, value) \ 1108 do { \ 1109 if ((value) != 0 || (show->flags & BTF_SHOW_ZERO) || \ 1110 show->state.depth == 0) { \ 1111 btf_show(show, "%s%s" fmt "%s%s", \ 1112 btf_show_indent(show), \ 1113 btf_show_name(show), \ 1114 value, btf_show_delim(show), \ 1115 btf_show_newline(show)); \ 1116 if (show->state.depth > show->state.depth_to_show) \ 1117 show->state.depth_to_show = show->state.depth; \ 1118 } \ 1119 } while (0) 1120 1121 #define btf_show_type_values(show, fmt, ...) \ 1122 do { \ 1123 btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \ 1124 btf_show_name(show), \ 1125 __VA_ARGS__, btf_show_delim(show), \ 1126 btf_show_newline(show)); \ 1127 if (show->state.depth > show->state.depth_to_show) \ 1128 show->state.depth_to_show = show->state.depth; \ 1129 } while (0) 1130 1131 /* How much is left to copy to safe buffer after @data? */ 1132 static int btf_show_obj_size_left(struct btf_show *show, void *data) 1133 { 1134 return show->obj.head + show->obj.size - data; 1135 } 1136 1137 /* Is object pointed to by @data of @size already copied to our safe buffer? */ 1138 static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size) 1139 { 1140 return data >= show->obj.data && 1141 (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE); 1142 } 1143 1144 /* 1145 * If object pointed to by @data of @size falls within our safe buffer, return 1146 * the equivalent pointer to the same safe data. Assumes 1147 * copy_from_kernel_nofault() has already happened and our safe buffer is 1148 * populated. 1149 */ 1150 static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size) 1151 { 1152 if (btf_show_obj_is_safe(show, data, size)) 1153 return show->obj.safe + (data - show->obj.data); 1154 return NULL; 1155 } 1156 1157 /* 1158 * Return a safe-to-access version of data pointed to by @data. 1159 * We do this by copying the relevant amount of information 1160 * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault(). 1161 * 1162 * If BTF_SHOW_UNSAFE is specified, just return data as-is; no 1163 * safe copy is needed. 1164 * 1165 * Otherwise we need to determine if we have the required amount 1166 * of data (determined by the @data pointer and the size of the 1167 * largest base type we can encounter (represented by 1168 * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures 1169 * that we will be able to print some of the current object, 1170 * and if more is needed a copy will be triggered. 1171 * Some objects such as structs will not fit into the buffer; 1172 * in such cases additional copies when we iterate over their 1173 * members may be needed. 1174 * 1175 * btf_show_obj_safe() is used to return a safe buffer for 1176 * btf_show_start_type(); this ensures that as we recurse into 1177 * nested types we always have safe data for the given type. 1178 * This approach is somewhat wasteful; it's possible for example 1179 * that when iterating over a large union we'll end up copying the 1180 * same data repeatedly, but the goal is safety not performance. 1181 * We use stack data as opposed to per-CPU buffers because the 1182 * iteration over a type can take some time, and preemption handling 1183 * would greatly complicate use of the safe buffer. 1184 */ 1185 static void *btf_show_obj_safe(struct btf_show *show, 1186 const struct btf_type *t, 1187 void *data) 1188 { 1189 const struct btf_type *rt; 1190 int size_left, size; 1191 void *safe = NULL; 1192 1193 if (show->flags & BTF_SHOW_UNSAFE) 1194 return data; 1195 1196 rt = btf_resolve_size(show->btf, t, &size); 1197 if (IS_ERR(rt)) { 1198 show->state.status = PTR_ERR(rt); 1199 return NULL; 1200 } 1201 1202 /* 1203 * Is this toplevel object? If so, set total object size and 1204 * initialize pointers. Otherwise check if we still fall within 1205 * our safe object data. 1206 */ 1207 if (show->state.depth == 0) { 1208 show->obj.size = size; 1209 show->obj.head = data; 1210 } else { 1211 /* 1212 * If the size of the current object is > our remaining 1213 * safe buffer we _may_ need to do a new copy. However 1214 * consider the case of a nested struct; it's size pushes 1215 * us over the safe buffer limit, but showing any individual 1216 * struct members does not. In such cases, we don't need 1217 * to initiate a fresh copy yet; however we definitely need 1218 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left 1219 * in our buffer, regardless of the current object size. 1220 * The logic here is that as we resolve types we will 1221 * hit a base type at some point, and we need to be sure 1222 * the next chunk of data is safely available to display 1223 * that type info safely. We cannot rely on the size of 1224 * the current object here because it may be much larger 1225 * than our current buffer (e.g. task_struct is 8k). 1226 * All we want to do here is ensure that we can print the 1227 * next basic type, which we can if either 1228 * - the current type size is within the safe buffer; or 1229 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in 1230 * the safe buffer. 1231 */ 1232 safe = __btf_show_obj_safe(show, data, 1233 min(size, 1234 BTF_SHOW_OBJ_BASE_TYPE_SIZE)); 1235 } 1236 1237 /* 1238 * We need a new copy to our safe object, either because we haven't 1239 * yet copied and are initializing safe data, or because the data 1240 * we want falls outside the boundaries of the safe object. 1241 */ 1242 if (!safe) { 1243 size_left = btf_show_obj_size_left(show, data); 1244 if (size_left > BTF_SHOW_OBJ_SAFE_SIZE) 1245 size_left = BTF_SHOW_OBJ_SAFE_SIZE; 1246 show->state.status = copy_from_kernel_nofault(show->obj.safe, 1247 data, size_left); 1248 if (!show->state.status) { 1249 show->obj.data = data; 1250 safe = show->obj.safe; 1251 } 1252 } 1253 1254 return safe; 1255 } 1256 1257 /* 1258 * Set the type we are starting to show and return a safe data pointer 1259 * to be used for showing the associated data. 1260 */ 1261 static void *btf_show_start_type(struct btf_show *show, 1262 const struct btf_type *t, 1263 u32 type_id, void *data) 1264 { 1265 show->state.type = t; 1266 show->state.type_id = type_id; 1267 show->state.name[0] = '\0'; 1268 1269 return btf_show_obj_safe(show, t, data); 1270 } 1271 1272 static void btf_show_end_type(struct btf_show *show) 1273 { 1274 show->state.type = NULL; 1275 show->state.type_id = 0; 1276 show->state.name[0] = '\0'; 1277 } 1278 1279 static void *btf_show_start_aggr_type(struct btf_show *show, 1280 const struct btf_type *t, 1281 u32 type_id, void *data) 1282 { 1283 void *safe_data = btf_show_start_type(show, t, type_id, data); 1284 1285 if (!safe_data) 1286 return safe_data; 1287 1288 btf_show(show, "%s%s%s", btf_show_indent(show), 1289 btf_show_name(show), 1290 btf_show_newline(show)); 1291 show->state.depth++; 1292 return safe_data; 1293 } 1294 1295 static void btf_show_end_aggr_type(struct btf_show *show, 1296 const char *suffix) 1297 { 1298 show->state.depth--; 1299 btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix, 1300 btf_show_delim(show), btf_show_newline(show)); 1301 btf_show_end_type(show); 1302 } 1303 1304 static void btf_show_start_member(struct btf_show *show, 1305 const struct btf_member *m) 1306 { 1307 show->state.member = m; 1308 } 1309 1310 static void btf_show_start_array_member(struct btf_show *show) 1311 { 1312 show->state.array_member = 1; 1313 btf_show_start_member(show, NULL); 1314 } 1315 1316 static void btf_show_end_member(struct btf_show *show) 1317 { 1318 show->state.member = NULL; 1319 } 1320 1321 static void btf_show_end_array_member(struct btf_show *show) 1322 { 1323 show->state.array_member = 0; 1324 btf_show_end_member(show); 1325 } 1326 1327 static void *btf_show_start_array_type(struct btf_show *show, 1328 const struct btf_type *t, 1329 u32 type_id, 1330 u16 array_encoding, 1331 void *data) 1332 { 1333 show->state.array_encoding = array_encoding; 1334 show->state.array_terminated = 0; 1335 return btf_show_start_aggr_type(show, t, type_id, data); 1336 } 1337 1338 static void btf_show_end_array_type(struct btf_show *show) 1339 { 1340 show->state.array_encoding = 0; 1341 show->state.array_terminated = 0; 1342 btf_show_end_aggr_type(show, "]"); 1343 } 1344 1345 static void *btf_show_start_struct_type(struct btf_show *show, 1346 const struct btf_type *t, 1347 u32 type_id, 1348 void *data) 1349 { 1350 return btf_show_start_aggr_type(show, t, type_id, data); 1351 } 1352 1353 static void btf_show_end_struct_type(struct btf_show *show) 1354 { 1355 btf_show_end_aggr_type(show, "}"); 1356 } 1357 1358 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, 1359 const char *fmt, ...) 1360 { 1361 va_list args; 1362 1363 va_start(args, fmt); 1364 bpf_verifier_vlog(log, fmt, args); 1365 va_end(args); 1366 } 1367 1368 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env, 1369 const char *fmt, ...) 1370 { 1371 struct bpf_verifier_log *log = &env->log; 1372 va_list args; 1373 1374 if (!bpf_verifier_log_needed(log)) 1375 return; 1376 1377 va_start(args, fmt); 1378 bpf_verifier_vlog(log, fmt, args); 1379 va_end(args); 1380 } 1381 1382 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, 1383 const struct btf_type *t, 1384 bool log_details, 1385 const char *fmt, ...) 1386 { 1387 struct bpf_verifier_log *log = &env->log; 1388 u8 kind = BTF_INFO_KIND(t->info); 1389 struct btf *btf = env->btf; 1390 va_list args; 1391 1392 if (!bpf_verifier_log_needed(log)) 1393 return; 1394 1395 /* btf verifier prints all types it is processing via 1396 * btf_verifier_log_type(..., fmt = NULL). 1397 * Skip those prints for in-kernel BTF verification. 1398 */ 1399 if (log->level == BPF_LOG_KERNEL && !fmt) 1400 return; 1401 1402 __btf_verifier_log(log, "[%u] %s %s%s", 1403 env->log_type_id, 1404 btf_kind_str[kind], 1405 __btf_name_by_offset(btf, t->name_off), 1406 log_details ? " " : ""); 1407 1408 if (log_details) 1409 btf_type_ops(t)->log_details(env, t); 1410 1411 if (fmt && *fmt) { 1412 __btf_verifier_log(log, " "); 1413 va_start(args, fmt); 1414 bpf_verifier_vlog(log, fmt, args); 1415 va_end(args); 1416 } 1417 1418 __btf_verifier_log(log, "\n"); 1419 } 1420 1421 #define btf_verifier_log_type(env, t, ...) \ 1422 __btf_verifier_log_type((env), (t), true, __VA_ARGS__) 1423 #define btf_verifier_log_basic(env, t, ...) \ 1424 __btf_verifier_log_type((env), (t), false, __VA_ARGS__) 1425 1426 __printf(4, 5) 1427 static void btf_verifier_log_member(struct btf_verifier_env *env, 1428 const struct btf_type *struct_type, 1429 const struct btf_member *member, 1430 const char *fmt, ...) 1431 { 1432 struct bpf_verifier_log *log = &env->log; 1433 struct btf *btf = env->btf; 1434 va_list args; 1435 1436 if (!bpf_verifier_log_needed(log)) 1437 return; 1438 1439 if (log->level == BPF_LOG_KERNEL && !fmt) 1440 return; 1441 /* The CHECK_META phase already did a btf dump. 1442 * 1443 * If member is logged again, it must hit an error in 1444 * parsing this member. It is useful to print out which 1445 * struct this member belongs to. 1446 */ 1447 if (env->phase != CHECK_META) 1448 btf_verifier_log_type(env, struct_type, NULL); 1449 1450 if (btf_type_kflag(struct_type)) 1451 __btf_verifier_log(log, 1452 "\t%s type_id=%u bitfield_size=%u bits_offset=%u", 1453 __btf_name_by_offset(btf, member->name_off), 1454 member->type, 1455 BTF_MEMBER_BITFIELD_SIZE(member->offset), 1456 BTF_MEMBER_BIT_OFFSET(member->offset)); 1457 else 1458 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u", 1459 __btf_name_by_offset(btf, member->name_off), 1460 member->type, member->offset); 1461 1462 if (fmt && *fmt) { 1463 __btf_verifier_log(log, " "); 1464 va_start(args, fmt); 1465 bpf_verifier_vlog(log, fmt, args); 1466 va_end(args); 1467 } 1468 1469 __btf_verifier_log(log, "\n"); 1470 } 1471 1472 __printf(4, 5) 1473 static void btf_verifier_log_vsi(struct btf_verifier_env *env, 1474 const struct btf_type *datasec_type, 1475 const struct btf_var_secinfo *vsi, 1476 const char *fmt, ...) 1477 { 1478 struct bpf_verifier_log *log = &env->log; 1479 va_list args; 1480 1481 if (!bpf_verifier_log_needed(log)) 1482 return; 1483 if (log->level == BPF_LOG_KERNEL && !fmt) 1484 return; 1485 if (env->phase != CHECK_META) 1486 btf_verifier_log_type(env, datasec_type, NULL); 1487 1488 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u", 1489 vsi->type, vsi->offset, vsi->size); 1490 if (fmt && *fmt) { 1491 __btf_verifier_log(log, " "); 1492 va_start(args, fmt); 1493 bpf_verifier_vlog(log, fmt, args); 1494 va_end(args); 1495 } 1496 1497 __btf_verifier_log(log, "\n"); 1498 } 1499 1500 static void btf_verifier_log_hdr(struct btf_verifier_env *env, 1501 u32 btf_data_size) 1502 { 1503 struct bpf_verifier_log *log = &env->log; 1504 const struct btf *btf = env->btf; 1505 const struct btf_header *hdr; 1506 1507 if (!bpf_verifier_log_needed(log)) 1508 return; 1509 1510 if (log->level == BPF_LOG_KERNEL) 1511 return; 1512 hdr = &btf->hdr; 1513 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic); 1514 __btf_verifier_log(log, "version: %u\n", hdr->version); 1515 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags); 1516 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len); 1517 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off); 1518 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len); 1519 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off); 1520 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len); 1521 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size); 1522 } 1523 1524 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) 1525 { 1526 struct btf *btf = env->btf; 1527 1528 if (btf->types_size == btf->nr_types) { 1529 /* Expand 'types' array */ 1530 1531 struct btf_type **new_types; 1532 u32 expand_by, new_size; 1533 1534 if (btf->start_id + btf->types_size == BTF_MAX_TYPE) { 1535 btf_verifier_log(env, "Exceeded max num of types"); 1536 return -E2BIG; 1537 } 1538 1539 expand_by = max_t(u32, btf->types_size >> 2, 16); 1540 new_size = min_t(u32, BTF_MAX_TYPE, 1541 btf->types_size + expand_by); 1542 1543 new_types = kvcalloc(new_size, sizeof(*new_types), 1544 GFP_KERNEL | __GFP_NOWARN); 1545 if (!new_types) 1546 return -ENOMEM; 1547 1548 if (btf->nr_types == 0) { 1549 if (!btf->base_btf) { 1550 /* lazily init VOID type */ 1551 new_types[0] = &btf_void; 1552 btf->nr_types++; 1553 } 1554 } else { 1555 memcpy(new_types, btf->types, 1556 sizeof(*btf->types) * btf->nr_types); 1557 } 1558 1559 kvfree(btf->types); 1560 btf->types = new_types; 1561 btf->types_size = new_size; 1562 } 1563 1564 btf->types[btf->nr_types++] = t; 1565 1566 return 0; 1567 } 1568 1569 static int btf_alloc_id(struct btf *btf) 1570 { 1571 int id; 1572 1573 idr_preload(GFP_KERNEL); 1574 spin_lock_bh(&btf_idr_lock); 1575 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC); 1576 if (id > 0) 1577 btf->id = id; 1578 spin_unlock_bh(&btf_idr_lock); 1579 idr_preload_end(); 1580 1581 if (WARN_ON_ONCE(!id)) 1582 return -ENOSPC; 1583 1584 return id > 0 ? 0 : id; 1585 } 1586 1587 static void btf_free_id(struct btf *btf) 1588 { 1589 unsigned long flags; 1590 1591 /* 1592 * In map-in-map, calling map_delete_elem() on outer 1593 * map will call bpf_map_put on the inner map. 1594 * It will then eventually call btf_free_id() 1595 * on the inner map. Some of the map_delete_elem() 1596 * implementation may have irq disabled, so 1597 * we need to use the _irqsave() version instead 1598 * of the _bh() version. 1599 */ 1600 spin_lock_irqsave(&btf_idr_lock, flags); 1601 idr_remove(&btf_idr, btf->id); 1602 spin_unlock_irqrestore(&btf_idr_lock, flags); 1603 } 1604 1605 static void btf_free_kfunc_set_tab(struct btf *btf) 1606 { 1607 struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab; 1608 int hook, type; 1609 1610 if (!tab) 1611 return; 1612 /* For module BTF, we directly assign the sets being registered, so 1613 * there is nothing to free except kfunc_set_tab. 1614 */ 1615 if (btf_is_module(btf)) 1616 goto free_tab; 1617 for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) { 1618 for (type = 0; type < ARRAY_SIZE(tab->sets[0]); type++) 1619 kfree(tab->sets[hook][type]); 1620 } 1621 free_tab: 1622 kfree(tab); 1623 btf->kfunc_set_tab = NULL; 1624 } 1625 1626 static void btf_free_dtor_kfunc_tab(struct btf *btf) 1627 { 1628 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; 1629 1630 if (!tab) 1631 return; 1632 kfree(tab); 1633 btf->dtor_kfunc_tab = NULL; 1634 } 1635 1636 static void btf_free(struct btf *btf) 1637 { 1638 btf_free_dtor_kfunc_tab(btf); 1639 btf_free_kfunc_set_tab(btf); 1640 kvfree(btf->types); 1641 kvfree(btf->resolved_sizes); 1642 kvfree(btf->resolved_ids); 1643 kvfree(btf->data); 1644 kfree(btf); 1645 } 1646 1647 static void btf_free_rcu(struct rcu_head *rcu) 1648 { 1649 struct btf *btf = container_of(rcu, struct btf, rcu); 1650 1651 btf_free(btf); 1652 } 1653 1654 void btf_get(struct btf *btf) 1655 { 1656 refcount_inc(&btf->refcnt); 1657 } 1658 1659 void btf_put(struct btf *btf) 1660 { 1661 if (btf && refcount_dec_and_test(&btf->refcnt)) { 1662 btf_free_id(btf); 1663 call_rcu(&btf->rcu, btf_free_rcu); 1664 } 1665 } 1666 1667 static int env_resolve_init(struct btf_verifier_env *env) 1668 { 1669 struct btf *btf = env->btf; 1670 u32 nr_types = btf->nr_types; 1671 u32 *resolved_sizes = NULL; 1672 u32 *resolved_ids = NULL; 1673 u8 *visit_states = NULL; 1674 1675 resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes), 1676 GFP_KERNEL | __GFP_NOWARN); 1677 if (!resolved_sizes) 1678 goto nomem; 1679 1680 resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids), 1681 GFP_KERNEL | __GFP_NOWARN); 1682 if (!resolved_ids) 1683 goto nomem; 1684 1685 visit_states = kvcalloc(nr_types, sizeof(*visit_states), 1686 GFP_KERNEL | __GFP_NOWARN); 1687 if (!visit_states) 1688 goto nomem; 1689 1690 btf->resolved_sizes = resolved_sizes; 1691 btf->resolved_ids = resolved_ids; 1692 env->visit_states = visit_states; 1693 1694 return 0; 1695 1696 nomem: 1697 kvfree(resolved_sizes); 1698 kvfree(resolved_ids); 1699 kvfree(visit_states); 1700 return -ENOMEM; 1701 } 1702 1703 static void btf_verifier_env_free(struct btf_verifier_env *env) 1704 { 1705 kvfree(env->visit_states); 1706 kfree(env); 1707 } 1708 1709 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env, 1710 const struct btf_type *next_type) 1711 { 1712 switch (env->resolve_mode) { 1713 case RESOLVE_TBD: 1714 /* int, enum or void is a sink */ 1715 return !btf_type_needs_resolve(next_type); 1716 case RESOLVE_PTR: 1717 /* int, enum, void, struct, array, func or func_proto is a sink 1718 * for ptr 1719 */ 1720 return !btf_type_is_modifier(next_type) && 1721 !btf_type_is_ptr(next_type); 1722 case RESOLVE_STRUCT_OR_ARRAY: 1723 /* int, enum, void, ptr, func or func_proto is a sink 1724 * for struct and array 1725 */ 1726 return !btf_type_is_modifier(next_type) && 1727 !btf_type_is_array(next_type) && 1728 !btf_type_is_struct(next_type); 1729 default: 1730 BUG(); 1731 } 1732 } 1733 1734 static bool env_type_is_resolved(const struct btf_verifier_env *env, 1735 u32 type_id) 1736 { 1737 /* base BTF types should be resolved by now */ 1738 if (type_id < env->btf->start_id) 1739 return true; 1740 1741 return env->visit_states[type_id - env->btf->start_id] == RESOLVED; 1742 } 1743 1744 static int env_stack_push(struct btf_verifier_env *env, 1745 const struct btf_type *t, u32 type_id) 1746 { 1747 const struct btf *btf = env->btf; 1748 struct resolve_vertex *v; 1749 1750 if (env->top_stack == MAX_RESOLVE_DEPTH) 1751 return -E2BIG; 1752 1753 if (type_id < btf->start_id 1754 || env->visit_states[type_id - btf->start_id] != NOT_VISITED) 1755 return -EEXIST; 1756 1757 env->visit_states[type_id - btf->start_id] = VISITED; 1758 1759 v = &env->stack[env->top_stack++]; 1760 v->t = t; 1761 v->type_id = type_id; 1762 v->next_member = 0; 1763 1764 if (env->resolve_mode == RESOLVE_TBD) { 1765 if (btf_type_is_ptr(t)) 1766 env->resolve_mode = RESOLVE_PTR; 1767 else if (btf_type_is_struct(t) || btf_type_is_array(t)) 1768 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY; 1769 } 1770 1771 return 0; 1772 } 1773 1774 static void env_stack_set_next_member(struct btf_verifier_env *env, 1775 u16 next_member) 1776 { 1777 env->stack[env->top_stack - 1].next_member = next_member; 1778 } 1779 1780 static void env_stack_pop_resolved(struct btf_verifier_env *env, 1781 u32 resolved_type_id, 1782 u32 resolved_size) 1783 { 1784 u32 type_id = env->stack[--(env->top_stack)].type_id; 1785 struct btf *btf = env->btf; 1786 1787 type_id -= btf->start_id; /* adjust to local type id */ 1788 btf->resolved_sizes[type_id] = resolved_size; 1789 btf->resolved_ids[type_id] = resolved_type_id; 1790 env->visit_states[type_id] = RESOLVED; 1791 } 1792 1793 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) 1794 { 1795 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; 1796 } 1797 1798 /* Resolve the size of a passed-in "type" 1799 * 1800 * type: is an array (e.g. u32 array[x][y]) 1801 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY, 1802 * *type_size: (x * y * sizeof(u32)). Hence, *type_size always 1803 * corresponds to the return type. 1804 * *elem_type: u32 1805 * *elem_id: id of u32 1806 * *total_nelems: (x * y). Hence, individual elem size is 1807 * (*type_size / *total_nelems) 1808 * *type_id: id of type if it's changed within the function, 0 if not 1809 * 1810 * type: is not an array (e.g. const struct X) 1811 * return type: type "struct X" 1812 * *type_size: sizeof(struct X) 1813 * *elem_type: same as return type ("struct X") 1814 * *elem_id: 0 1815 * *total_nelems: 1 1816 * *type_id: id of type if it's changed within the function, 0 if not 1817 */ 1818 static const struct btf_type * 1819 __btf_resolve_size(const struct btf *btf, const struct btf_type *type, 1820 u32 *type_size, const struct btf_type **elem_type, 1821 u32 *elem_id, u32 *total_nelems, u32 *type_id) 1822 { 1823 const struct btf_type *array_type = NULL; 1824 const struct btf_array *array = NULL; 1825 u32 i, size, nelems = 1, id = 0; 1826 1827 for (i = 0; i < MAX_RESOLVE_DEPTH; i++) { 1828 switch (BTF_INFO_KIND(type->info)) { 1829 /* type->size can be used */ 1830 case BTF_KIND_INT: 1831 case BTF_KIND_STRUCT: 1832 case BTF_KIND_UNION: 1833 case BTF_KIND_ENUM: 1834 case BTF_KIND_FLOAT: 1835 size = type->size; 1836 goto resolved; 1837 1838 case BTF_KIND_PTR: 1839 size = sizeof(void *); 1840 goto resolved; 1841 1842 /* Modifiers */ 1843 case BTF_KIND_TYPEDEF: 1844 case BTF_KIND_VOLATILE: 1845 case BTF_KIND_CONST: 1846 case BTF_KIND_RESTRICT: 1847 case BTF_KIND_TYPE_TAG: 1848 id = type->type; 1849 type = btf_type_by_id(btf, type->type); 1850 break; 1851 1852 case BTF_KIND_ARRAY: 1853 if (!array_type) 1854 array_type = type; 1855 array = btf_type_array(type); 1856 if (nelems && array->nelems > U32_MAX / nelems) 1857 return ERR_PTR(-EINVAL); 1858 nelems *= array->nelems; 1859 type = btf_type_by_id(btf, array->type); 1860 break; 1861 1862 /* type without size */ 1863 default: 1864 return ERR_PTR(-EINVAL); 1865 } 1866 } 1867 1868 return ERR_PTR(-EINVAL); 1869 1870 resolved: 1871 if (nelems && size > U32_MAX / nelems) 1872 return ERR_PTR(-EINVAL); 1873 1874 *type_size = nelems * size; 1875 if (total_nelems) 1876 *total_nelems = nelems; 1877 if (elem_type) 1878 *elem_type = type; 1879 if (elem_id) 1880 *elem_id = array ? array->type : 0; 1881 if (type_id && id) 1882 *type_id = id; 1883 1884 return array_type ? : type; 1885 } 1886 1887 const struct btf_type * 1888 btf_resolve_size(const struct btf *btf, const struct btf_type *type, 1889 u32 *type_size) 1890 { 1891 return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL); 1892 } 1893 1894 static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id) 1895 { 1896 while (type_id < btf->start_id) 1897 btf = btf->base_btf; 1898 1899 return btf->resolved_ids[type_id - btf->start_id]; 1900 } 1901 1902 /* The input param "type_id" must point to a needs_resolve type */ 1903 static const struct btf_type *btf_type_id_resolve(const struct btf *btf, 1904 u32 *type_id) 1905 { 1906 *type_id = btf_resolved_type_id(btf, *type_id); 1907 return btf_type_by_id(btf, *type_id); 1908 } 1909 1910 static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id) 1911 { 1912 while (type_id < btf->start_id) 1913 btf = btf->base_btf; 1914 1915 return btf->resolved_sizes[type_id - btf->start_id]; 1916 } 1917 1918 const struct btf_type *btf_type_id_size(const struct btf *btf, 1919 u32 *type_id, u32 *ret_size) 1920 { 1921 const struct btf_type *size_type; 1922 u32 size_type_id = *type_id; 1923 u32 size = 0; 1924 1925 size_type = btf_type_by_id(btf, size_type_id); 1926 if (btf_type_nosize_or_null(size_type)) 1927 return NULL; 1928 1929 if (btf_type_has_size(size_type)) { 1930 size = size_type->size; 1931 } else if (btf_type_is_array(size_type)) { 1932 size = btf_resolved_type_size(btf, size_type_id); 1933 } else if (btf_type_is_ptr(size_type)) { 1934 size = sizeof(void *); 1935 } else { 1936 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) && 1937 !btf_type_is_var(size_type))) 1938 return NULL; 1939 1940 size_type_id = btf_resolved_type_id(btf, size_type_id); 1941 size_type = btf_type_by_id(btf, size_type_id); 1942 if (btf_type_nosize_or_null(size_type)) 1943 return NULL; 1944 else if (btf_type_has_size(size_type)) 1945 size = size_type->size; 1946 else if (btf_type_is_array(size_type)) 1947 size = btf_resolved_type_size(btf, size_type_id); 1948 else if (btf_type_is_ptr(size_type)) 1949 size = sizeof(void *); 1950 else 1951 return NULL; 1952 } 1953 1954 *type_id = size_type_id; 1955 if (ret_size) 1956 *ret_size = size; 1957 1958 return size_type; 1959 } 1960 1961 static int btf_df_check_member(struct btf_verifier_env *env, 1962 const struct btf_type *struct_type, 1963 const struct btf_member *member, 1964 const struct btf_type *member_type) 1965 { 1966 btf_verifier_log_basic(env, struct_type, 1967 "Unsupported check_member"); 1968 return -EINVAL; 1969 } 1970 1971 static int btf_df_check_kflag_member(struct btf_verifier_env *env, 1972 const struct btf_type *struct_type, 1973 const struct btf_member *member, 1974 const struct btf_type *member_type) 1975 { 1976 btf_verifier_log_basic(env, struct_type, 1977 "Unsupported check_kflag_member"); 1978 return -EINVAL; 1979 } 1980 1981 /* Used for ptr, array struct/union and float type members. 1982 * int, enum and modifier types have their specific callback functions. 1983 */ 1984 static int btf_generic_check_kflag_member(struct btf_verifier_env *env, 1985 const struct btf_type *struct_type, 1986 const struct btf_member *member, 1987 const struct btf_type *member_type) 1988 { 1989 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) { 1990 btf_verifier_log_member(env, struct_type, member, 1991 "Invalid member bitfield_size"); 1992 return -EINVAL; 1993 } 1994 1995 /* bitfield size is 0, so member->offset represents bit offset only. 1996 * It is safe to call non kflag check_member variants. 1997 */ 1998 return btf_type_ops(member_type)->check_member(env, struct_type, 1999 member, 2000 member_type); 2001 } 2002 2003 static int btf_df_resolve(struct btf_verifier_env *env, 2004 const struct resolve_vertex *v) 2005 { 2006 btf_verifier_log_basic(env, v->t, "Unsupported resolve"); 2007 return -EINVAL; 2008 } 2009 2010 static void btf_df_show(const struct btf *btf, const struct btf_type *t, 2011 u32 type_id, void *data, u8 bits_offsets, 2012 struct btf_show *show) 2013 { 2014 btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info)); 2015 } 2016 2017 static int btf_int_check_member(struct btf_verifier_env *env, 2018 const struct btf_type *struct_type, 2019 const struct btf_member *member, 2020 const struct btf_type *member_type) 2021 { 2022 u32 int_data = btf_type_int(member_type); 2023 u32 struct_bits_off = member->offset; 2024 u32 struct_size = struct_type->size; 2025 u32 nr_copy_bits; 2026 u32 bytes_offset; 2027 2028 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) { 2029 btf_verifier_log_member(env, struct_type, member, 2030 "bits_offset exceeds U32_MAX"); 2031 return -EINVAL; 2032 } 2033 2034 struct_bits_off += BTF_INT_OFFSET(int_data); 2035 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2036 nr_copy_bits = BTF_INT_BITS(int_data) + 2037 BITS_PER_BYTE_MASKED(struct_bits_off); 2038 2039 if (nr_copy_bits > BITS_PER_U128) { 2040 btf_verifier_log_member(env, struct_type, member, 2041 "nr_copy_bits exceeds 128"); 2042 return -EINVAL; 2043 } 2044 2045 if (struct_size < bytes_offset || 2046 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 2047 btf_verifier_log_member(env, struct_type, member, 2048 "Member exceeds struct_size"); 2049 return -EINVAL; 2050 } 2051 2052 return 0; 2053 } 2054 2055 static int btf_int_check_kflag_member(struct btf_verifier_env *env, 2056 const struct btf_type *struct_type, 2057 const struct btf_member *member, 2058 const struct btf_type *member_type) 2059 { 2060 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset; 2061 u32 int_data = btf_type_int(member_type); 2062 u32 struct_size = struct_type->size; 2063 u32 nr_copy_bits; 2064 2065 /* a regular int type is required for the kflag int member */ 2066 if (!btf_type_int_is_regular(member_type)) { 2067 btf_verifier_log_member(env, struct_type, member, 2068 "Invalid member base type"); 2069 return -EINVAL; 2070 } 2071 2072 /* check sanity of bitfield size */ 2073 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 2074 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 2075 nr_int_data_bits = BTF_INT_BITS(int_data); 2076 if (!nr_bits) { 2077 /* Not a bitfield member, member offset must be at byte 2078 * boundary. 2079 */ 2080 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2081 btf_verifier_log_member(env, struct_type, member, 2082 "Invalid member offset"); 2083 return -EINVAL; 2084 } 2085 2086 nr_bits = nr_int_data_bits; 2087 } else if (nr_bits > nr_int_data_bits) { 2088 btf_verifier_log_member(env, struct_type, member, 2089 "Invalid member bitfield_size"); 2090 return -EINVAL; 2091 } 2092 2093 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2094 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off); 2095 if (nr_copy_bits > BITS_PER_U128) { 2096 btf_verifier_log_member(env, struct_type, member, 2097 "nr_copy_bits exceeds 128"); 2098 return -EINVAL; 2099 } 2100 2101 if (struct_size < bytes_offset || 2102 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 2103 btf_verifier_log_member(env, struct_type, member, 2104 "Member exceeds struct_size"); 2105 return -EINVAL; 2106 } 2107 2108 return 0; 2109 } 2110 2111 static s32 btf_int_check_meta(struct btf_verifier_env *env, 2112 const struct btf_type *t, 2113 u32 meta_left) 2114 { 2115 u32 int_data, nr_bits, meta_needed = sizeof(int_data); 2116 u16 encoding; 2117 2118 if (meta_left < meta_needed) { 2119 btf_verifier_log_basic(env, t, 2120 "meta_left:%u meta_needed:%u", 2121 meta_left, meta_needed); 2122 return -EINVAL; 2123 } 2124 2125 if (btf_type_vlen(t)) { 2126 btf_verifier_log_type(env, t, "vlen != 0"); 2127 return -EINVAL; 2128 } 2129 2130 if (btf_type_kflag(t)) { 2131 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2132 return -EINVAL; 2133 } 2134 2135 int_data = btf_type_int(t); 2136 if (int_data & ~BTF_INT_MASK) { 2137 btf_verifier_log_basic(env, t, "Invalid int_data:%x", 2138 int_data); 2139 return -EINVAL; 2140 } 2141 2142 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); 2143 2144 if (nr_bits > BITS_PER_U128) { 2145 btf_verifier_log_type(env, t, "nr_bits exceeds %zu", 2146 BITS_PER_U128); 2147 return -EINVAL; 2148 } 2149 2150 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) { 2151 btf_verifier_log_type(env, t, "nr_bits exceeds type_size"); 2152 return -EINVAL; 2153 } 2154 2155 /* 2156 * Only one of the encoding bits is allowed and it 2157 * should be sufficient for the pretty print purpose (i.e. decoding). 2158 * Multiple bits can be allowed later if it is found 2159 * to be insufficient. 2160 */ 2161 encoding = BTF_INT_ENCODING(int_data); 2162 if (encoding && 2163 encoding != BTF_INT_SIGNED && 2164 encoding != BTF_INT_CHAR && 2165 encoding != BTF_INT_BOOL) { 2166 btf_verifier_log_type(env, t, "Unsupported encoding"); 2167 return -ENOTSUPP; 2168 } 2169 2170 btf_verifier_log_type(env, t, NULL); 2171 2172 return meta_needed; 2173 } 2174 2175 static void btf_int_log(struct btf_verifier_env *env, 2176 const struct btf_type *t) 2177 { 2178 int int_data = btf_type_int(t); 2179 2180 btf_verifier_log(env, 2181 "size=%u bits_offset=%u nr_bits=%u encoding=%s", 2182 t->size, BTF_INT_OFFSET(int_data), 2183 BTF_INT_BITS(int_data), 2184 btf_int_encoding_str(BTF_INT_ENCODING(int_data))); 2185 } 2186 2187 static void btf_int128_print(struct btf_show *show, void *data) 2188 { 2189 /* data points to a __int128 number. 2190 * Suppose 2191 * int128_num = *(__int128 *)data; 2192 * The below formulas shows what upper_num and lower_num represents: 2193 * upper_num = int128_num >> 64; 2194 * lower_num = int128_num & 0xffffffffFFFFFFFFULL; 2195 */ 2196 u64 upper_num, lower_num; 2197 2198 #ifdef __BIG_ENDIAN_BITFIELD 2199 upper_num = *(u64 *)data; 2200 lower_num = *(u64 *)(data + 8); 2201 #else 2202 upper_num = *(u64 *)(data + 8); 2203 lower_num = *(u64 *)data; 2204 #endif 2205 if (upper_num == 0) 2206 btf_show_type_value(show, "0x%llx", lower_num); 2207 else 2208 btf_show_type_values(show, "0x%llx%016llx", upper_num, 2209 lower_num); 2210 } 2211 2212 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits, 2213 u16 right_shift_bits) 2214 { 2215 u64 upper_num, lower_num; 2216 2217 #ifdef __BIG_ENDIAN_BITFIELD 2218 upper_num = print_num[0]; 2219 lower_num = print_num[1]; 2220 #else 2221 upper_num = print_num[1]; 2222 lower_num = print_num[0]; 2223 #endif 2224 2225 /* shake out un-needed bits by shift/or operations */ 2226 if (left_shift_bits >= 64) { 2227 upper_num = lower_num << (left_shift_bits - 64); 2228 lower_num = 0; 2229 } else { 2230 upper_num = (upper_num << left_shift_bits) | 2231 (lower_num >> (64 - left_shift_bits)); 2232 lower_num = lower_num << left_shift_bits; 2233 } 2234 2235 if (right_shift_bits >= 64) { 2236 lower_num = upper_num >> (right_shift_bits - 64); 2237 upper_num = 0; 2238 } else { 2239 lower_num = (lower_num >> right_shift_bits) | 2240 (upper_num << (64 - right_shift_bits)); 2241 upper_num = upper_num >> right_shift_bits; 2242 } 2243 2244 #ifdef __BIG_ENDIAN_BITFIELD 2245 print_num[0] = upper_num; 2246 print_num[1] = lower_num; 2247 #else 2248 print_num[0] = lower_num; 2249 print_num[1] = upper_num; 2250 #endif 2251 } 2252 2253 static void btf_bitfield_show(void *data, u8 bits_offset, 2254 u8 nr_bits, struct btf_show *show) 2255 { 2256 u16 left_shift_bits, right_shift_bits; 2257 u8 nr_copy_bytes; 2258 u8 nr_copy_bits; 2259 u64 print_num[2] = {}; 2260 2261 nr_copy_bits = nr_bits + bits_offset; 2262 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 2263 2264 memcpy(print_num, data, nr_copy_bytes); 2265 2266 #ifdef __BIG_ENDIAN_BITFIELD 2267 left_shift_bits = bits_offset; 2268 #else 2269 left_shift_bits = BITS_PER_U128 - nr_copy_bits; 2270 #endif 2271 right_shift_bits = BITS_PER_U128 - nr_bits; 2272 2273 btf_int128_shift(print_num, left_shift_bits, right_shift_bits); 2274 btf_int128_print(show, print_num); 2275 } 2276 2277 2278 static void btf_int_bits_show(const struct btf *btf, 2279 const struct btf_type *t, 2280 void *data, u8 bits_offset, 2281 struct btf_show *show) 2282 { 2283 u32 int_data = btf_type_int(t); 2284 u8 nr_bits = BTF_INT_BITS(int_data); 2285 u8 total_bits_offset; 2286 2287 /* 2288 * bits_offset is at most 7. 2289 * BTF_INT_OFFSET() cannot exceed 128 bits. 2290 */ 2291 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 2292 data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 2293 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); 2294 btf_bitfield_show(data, bits_offset, nr_bits, show); 2295 } 2296 2297 static void btf_int_show(const struct btf *btf, const struct btf_type *t, 2298 u32 type_id, void *data, u8 bits_offset, 2299 struct btf_show *show) 2300 { 2301 u32 int_data = btf_type_int(t); 2302 u8 encoding = BTF_INT_ENCODING(int_data); 2303 bool sign = encoding & BTF_INT_SIGNED; 2304 u8 nr_bits = BTF_INT_BITS(int_data); 2305 void *safe_data; 2306 2307 safe_data = btf_show_start_type(show, t, type_id, data); 2308 if (!safe_data) 2309 return; 2310 2311 if (bits_offset || BTF_INT_OFFSET(int_data) || 2312 BITS_PER_BYTE_MASKED(nr_bits)) { 2313 btf_int_bits_show(btf, t, safe_data, bits_offset, show); 2314 goto out; 2315 } 2316 2317 switch (nr_bits) { 2318 case 128: 2319 btf_int128_print(show, safe_data); 2320 break; 2321 case 64: 2322 if (sign) 2323 btf_show_type_value(show, "%lld", *(s64 *)safe_data); 2324 else 2325 btf_show_type_value(show, "%llu", *(u64 *)safe_data); 2326 break; 2327 case 32: 2328 if (sign) 2329 btf_show_type_value(show, "%d", *(s32 *)safe_data); 2330 else 2331 btf_show_type_value(show, "%u", *(u32 *)safe_data); 2332 break; 2333 case 16: 2334 if (sign) 2335 btf_show_type_value(show, "%d", *(s16 *)safe_data); 2336 else 2337 btf_show_type_value(show, "%u", *(u16 *)safe_data); 2338 break; 2339 case 8: 2340 if (show->state.array_encoding == BTF_INT_CHAR) { 2341 /* check for null terminator */ 2342 if (show->state.array_terminated) 2343 break; 2344 if (*(char *)data == '\0') { 2345 show->state.array_terminated = 1; 2346 break; 2347 } 2348 if (isprint(*(char *)data)) { 2349 btf_show_type_value(show, "'%c'", 2350 *(char *)safe_data); 2351 break; 2352 } 2353 } 2354 if (sign) 2355 btf_show_type_value(show, "%d", *(s8 *)safe_data); 2356 else 2357 btf_show_type_value(show, "%u", *(u8 *)safe_data); 2358 break; 2359 default: 2360 btf_int_bits_show(btf, t, safe_data, bits_offset, show); 2361 break; 2362 } 2363 out: 2364 btf_show_end_type(show); 2365 } 2366 2367 static const struct btf_kind_operations int_ops = { 2368 .check_meta = btf_int_check_meta, 2369 .resolve = btf_df_resolve, 2370 .check_member = btf_int_check_member, 2371 .check_kflag_member = btf_int_check_kflag_member, 2372 .log_details = btf_int_log, 2373 .show = btf_int_show, 2374 }; 2375 2376 static int btf_modifier_check_member(struct btf_verifier_env *env, 2377 const struct btf_type *struct_type, 2378 const struct btf_member *member, 2379 const struct btf_type *member_type) 2380 { 2381 const struct btf_type *resolved_type; 2382 u32 resolved_type_id = member->type; 2383 struct btf_member resolved_member; 2384 struct btf *btf = env->btf; 2385 2386 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 2387 if (!resolved_type) { 2388 btf_verifier_log_member(env, struct_type, member, 2389 "Invalid member"); 2390 return -EINVAL; 2391 } 2392 2393 resolved_member = *member; 2394 resolved_member.type = resolved_type_id; 2395 2396 return btf_type_ops(resolved_type)->check_member(env, struct_type, 2397 &resolved_member, 2398 resolved_type); 2399 } 2400 2401 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env, 2402 const struct btf_type *struct_type, 2403 const struct btf_member *member, 2404 const struct btf_type *member_type) 2405 { 2406 const struct btf_type *resolved_type; 2407 u32 resolved_type_id = member->type; 2408 struct btf_member resolved_member; 2409 struct btf *btf = env->btf; 2410 2411 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 2412 if (!resolved_type) { 2413 btf_verifier_log_member(env, struct_type, member, 2414 "Invalid member"); 2415 return -EINVAL; 2416 } 2417 2418 resolved_member = *member; 2419 resolved_member.type = resolved_type_id; 2420 2421 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type, 2422 &resolved_member, 2423 resolved_type); 2424 } 2425 2426 static int btf_ptr_check_member(struct btf_verifier_env *env, 2427 const struct btf_type *struct_type, 2428 const struct btf_member *member, 2429 const struct btf_type *member_type) 2430 { 2431 u32 struct_size, struct_bits_off, bytes_offset; 2432 2433 struct_size = struct_type->size; 2434 struct_bits_off = member->offset; 2435 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2436 2437 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2438 btf_verifier_log_member(env, struct_type, member, 2439 "Member is not byte aligned"); 2440 return -EINVAL; 2441 } 2442 2443 if (struct_size - bytes_offset < sizeof(void *)) { 2444 btf_verifier_log_member(env, struct_type, member, 2445 "Member exceeds struct_size"); 2446 return -EINVAL; 2447 } 2448 2449 return 0; 2450 } 2451 2452 static int btf_ref_type_check_meta(struct btf_verifier_env *env, 2453 const struct btf_type *t, 2454 u32 meta_left) 2455 { 2456 const char *value; 2457 2458 if (btf_type_vlen(t)) { 2459 btf_verifier_log_type(env, t, "vlen != 0"); 2460 return -EINVAL; 2461 } 2462 2463 if (btf_type_kflag(t)) { 2464 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2465 return -EINVAL; 2466 } 2467 2468 if (!BTF_TYPE_ID_VALID(t->type)) { 2469 btf_verifier_log_type(env, t, "Invalid type_id"); 2470 return -EINVAL; 2471 } 2472 2473 /* typedef/type_tag type must have a valid name, and other ref types, 2474 * volatile, const, restrict, should have a null name. 2475 */ 2476 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { 2477 if (!t->name_off || 2478 !btf_name_valid_identifier(env->btf, t->name_off)) { 2479 btf_verifier_log_type(env, t, "Invalid name"); 2480 return -EINVAL; 2481 } 2482 } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) { 2483 value = btf_name_by_offset(env->btf, t->name_off); 2484 if (!value || !value[0]) { 2485 btf_verifier_log_type(env, t, "Invalid name"); 2486 return -EINVAL; 2487 } 2488 } else { 2489 if (t->name_off) { 2490 btf_verifier_log_type(env, t, "Invalid name"); 2491 return -EINVAL; 2492 } 2493 } 2494 2495 btf_verifier_log_type(env, t, NULL); 2496 2497 return 0; 2498 } 2499 2500 static int btf_modifier_resolve(struct btf_verifier_env *env, 2501 const struct resolve_vertex *v) 2502 { 2503 const struct btf_type *t = v->t; 2504 const struct btf_type *next_type; 2505 u32 next_type_id = t->type; 2506 struct btf *btf = env->btf; 2507 2508 next_type = btf_type_by_id(btf, next_type_id); 2509 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2510 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2511 return -EINVAL; 2512 } 2513 2514 if (!env_type_is_resolve_sink(env, next_type) && 2515 !env_type_is_resolved(env, next_type_id)) 2516 return env_stack_push(env, next_type, next_type_id); 2517 2518 /* Figure out the resolved next_type_id with size. 2519 * They will be stored in the current modifier's 2520 * resolved_ids and resolved_sizes such that it can 2521 * save us a few type-following when we use it later (e.g. in 2522 * pretty print). 2523 */ 2524 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2525 if (env_type_is_resolved(env, next_type_id)) 2526 next_type = btf_type_id_resolve(btf, &next_type_id); 2527 2528 /* "typedef void new_void", "const void"...etc */ 2529 if (!btf_type_is_void(next_type) && 2530 !btf_type_is_fwd(next_type) && 2531 !btf_type_is_func_proto(next_type)) { 2532 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2533 return -EINVAL; 2534 } 2535 } 2536 2537 env_stack_pop_resolved(env, next_type_id, 0); 2538 2539 return 0; 2540 } 2541 2542 static int btf_var_resolve(struct btf_verifier_env *env, 2543 const struct resolve_vertex *v) 2544 { 2545 const struct btf_type *next_type; 2546 const struct btf_type *t = v->t; 2547 u32 next_type_id = t->type; 2548 struct btf *btf = env->btf; 2549 2550 next_type = btf_type_by_id(btf, next_type_id); 2551 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2552 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2553 return -EINVAL; 2554 } 2555 2556 if (!env_type_is_resolve_sink(env, next_type) && 2557 !env_type_is_resolved(env, next_type_id)) 2558 return env_stack_push(env, next_type, next_type_id); 2559 2560 if (btf_type_is_modifier(next_type)) { 2561 const struct btf_type *resolved_type; 2562 u32 resolved_type_id; 2563 2564 resolved_type_id = next_type_id; 2565 resolved_type = btf_type_id_resolve(btf, &resolved_type_id); 2566 2567 if (btf_type_is_ptr(resolved_type) && 2568 !env_type_is_resolve_sink(env, resolved_type) && 2569 !env_type_is_resolved(env, resolved_type_id)) 2570 return env_stack_push(env, resolved_type, 2571 resolved_type_id); 2572 } 2573 2574 /* We must resolve to something concrete at this point, no 2575 * forward types or similar that would resolve to size of 2576 * zero is allowed. 2577 */ 2578 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2579 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2580 return -EINVAL; 2581 } 2582 2583 env_stack_pop_resolved(env, next_type_id, 0); 2584 2585 return 0; 2586 } 2587 2588 static int btf_ptr_resolve(struct btf_verifier_env *env, 2589 const struct resolve_vertex *v) 2590 { 2591 const struct btf_type *next_type; 2592 const struct btf_type *t = v->t; 2593 u32 next_type_id = t->type; 2594 struct btf *btf = env->btf; 2595 2596 next_type = btf_type_by_id(btf, next_type_id); 2597 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2598 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2599 return -EINVAL; 2600 } 2601 2602 if (!env_type_is_resolve_sink(env, next_type) && 2603 !env_type_is_resolved(env, next_type_id)) 2604 return env_stack_push(env, next_type, next_type_id); 2605 2606 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY, 2607 * the modifier may have stopped resolving when it was resolved 2608 * to a ptr (last-resolved-ptr). 2609 * 2610 * We now need to continue from the last-resolved-ptr to 2611 * ensure the last-resolved-ptr will not referring back to 2612 * the current ptr (t). 2613 */ 2614 if (btf_type_is_modifier(next_type)) { 2615 const struct btf_type *resolved_type; 2616 u32 resolved_type_id; 2617 2618 resolved_type_id = next_type_id; 2619 resolved_type = btf_type_id_resolve(btf, &resolved_type_id); 2620 2621 if (btf_type_is_ptr(resolved_type) && 2622 !env_type_is_resolve_sink(env, resolved_type) && 2623 !env_type_is_resolved(env, resolved_type_id)) 2624 return env_stack_push(env, resolved_type, 2625 resolved_type_id); 2626 } 2627 2628 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2629 if (env_type_is_resolved(env, next_type_id)) 2630 next_type = btf_type_id_resolve(btf, &next_type_id); 2631 2632 if (!btf_type_is_void(next_type) && 2633 !btf_type_is_fwd(next_type) && 2634 !btf_type_is_func_proto(next_type)) { 2635 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2636 return -EINVAL; 2637 } 2638 } 2639 2640 env_stack_pop_resolved(env, next_type_id, 0); 2641 2642 return 0; 2643 } 2644 2645 static void btf_modifier_show(const struct btf *btf, 2646 const struct btf_type *t, 2647 u32 type_id, void *data, 2648 u8 bits_offset, struct btf_show *show) 2649 { 2650 if (btf->resolved_ids) 2651 t = btf_type_id_resolve(btf, &type_id); 2652 else 2653 t = btf_type_skip_modifiers(btf, type_id, NULL); 2654 2655 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); 2656 } 2657 2658 static void btf_var_show(const struct btf *btf, const struct btf_type *t, 2659 u32 type_id, void *data, u8 bits_offset, 2660 struct btf_show *show) 2661 { 2662 t = btf_type_id_resolve(btf, &type_id); 2663 2664 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); 2665 } 2666 2667 static void btf_ptr_show(const struct btf *btf, const struct btf_type *t, 2668 u32 type_id, void *data, u8 bits_offset, 2669 struct btf_show *show) 2670 { 2671 void *safe_data; 2672 2673 safe_data = btf_show_start_type(show, t, type_id, data); 2674 if (!safe_data) 2675 return; 2676 2677 /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */ 2678 if (show->flags & BTF_SHOW_PTR_RAW) 2679 btf_show_type_value(show, "0x%px", *(void **)safe_data); 2680 else 2681 btf_show_type_value(show, "0x%p", *(void **)safe_data); 2682 btf_show_end_type(show); 2683 } 2684 2685 static void btf_ref_type_log(struct btf_verifier_env *env, 2686 const struct btf_type *t) 2687 { 2688 btf_verifier_log(env, "type_id=%u", t->type); 2689 } 2690 2691 static struct btf_kind_operations modifier_ops = { 2692 .check_meta = btf_ref_type_check_meta, 2693 .resolve = btf_modifier_resolve, 2694 .check_member = btf_modifier_check_member, 2695 .check_kflag_member = btf_modifier_check_kflag_member, 2696 .log_details = btf_ref_type_log, 2697 .show = btf_modifier_show, 2698 }; 2699 2700 static struct btf_kind_operations ptr_ops = { 2701 .check_meta = btf_ref_type_check_meta, 2702 .resolve = btf_ptr_resolve, 2703 .check_member = btf_ptr_check_member, 2704 .check_kflag_member = btf_generic_check_kflag_member, 2705 .log_details = btf_ref_type_log, 2706 .show = btf_ptr_show, 2707 }; 2708 2709 static s32 btf_fwd_check_meta(struct btf_verifier_env *env, 2710 const struct btf_type *t, 2711 u32 meta_left) 2712 { 2713 if (btf_type_vlen(t)) { 2714 btf_verifier_log_type(env, t, "vlen != 0"); 2715 return -EINVAL; 2716 } 2717 2718 if (t->type) { 2719 btf_verifier_log_type(env, t, "type != 0"); 2720 return -EINVAL; 2721 } 2722 2723 /* fwd type must have a valid name */ 2724 if (!t->name_off || 2725 !btf_name_valid_identifier(env->btf, t->name_off)) { 2726 btf_verifier_log_type(env, t, "Invalid name"); 2727 return -EINVAL; 2728 } 2729 2730 btf_verifier_log_type(env, t, NULL); 2731 2732 return 0; 2733 } 2734 2735 static void btf_fwd_type_log(struct btf_verifier_env *env, 2736 const struct btf_type *t) 2737 { 2738 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct"); 2739 } 2740 2741 static struct btf_kind_operations fwd_ops = { 2742 .check_meta = btf_fwd_check_meta, 2743 .resolve = btf_df_resolve, 2744 .check_member = btf_df_check_member, 2745 .check_kflag_member = btf_df_check_kflag_member, 2746 .log_details = btf_fwd_type_log, 2747 .show = btf_df_show, 2748 }; 2749 2750 static int btf_array_check_member(struct btf_verifier_env *env, 2751 const struct btf_type *struct_type, 2752 const struct btf_member *member, 2753 const struct btf_type *member_type) 2754 { 2755 u32 struct_bits_off = member->offset; 2756 u32 struct_size, bytes_offset; 2757 u32 array_type_id, array_size; 2758 struct btf *btf = env->btf; 2759 2760 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2761 btf_verifier_log_member(env, struct_type, member, 2762 "Member is not byte aligned"); 2763 return -EINVAL; 2764 } 2765 2766 array_type_id = member->type; 2767 btf_type_id_size(btf, &array_type_id, &array_size); 2768 struct_size = struct_type->size; 2769 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2770 if (struct_size - bytes_offset < array_size) { 2771 btf_verifier_log_member(env, struct_type, member, 2772 "Member exceeds struct_size"); 2773 return -EINVAL; 2774 } 2775 2776 return 0; 2777 } 2778 2779 static s32 btf_array_check_meta(struct btf_verifier_env *env, 2780 const struct btf_type *t, 2781 u32 meta_left) 2782 { 2783 const struct btf_array *array = btf_type_array(t); 2784 u32 meta_needed = sizeof(*array); 2785 2786 if (meta_left < meta_needed) { 2787 btf_verifier_log_basic(env, t, 2788 "meta_left:%u meta_needed:%u", 2789 meta_left, meta_needed); 2790 return -EINVAL; 2791 } 2792 2793 /* array type should not have a name */ 2794 if (t->name_off) { 2795 btf_verifier_log_type(env, t, "Invalid name"); 2796 return -EINVAL; 2797 } 2798 2799 if (btf_type_vlen(t)) { 2800 btf_verifier_log_type(env, t, "vlen != 0"); 2801 return -EINVAL; 2802 } 2803 2804 if (btf_type_kflag(t)) { 2805 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2806 return -EINVAL; 2807 } 2808 2809 if (t->size) { 2810 btf_verifier_log_type(env, t, "size != 0"); 2811 return -EINVAL; 2812 } 2813 2814 /* Array elem type and index type cannot be in type void, 2815 * so !array->type and !array->index_type are not allowed. 2816 */ 2817 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { 2818 btf_verifier_log_type(env, t, "Invalid elem"); 2819 return -EINVAL; 2820 } 2821 2822 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { 2823 btf_verifier_log_type(env, t, "Invalid index"); 2824 return -EINVAL; 2825 } 2826 2827 btf_verifier_log_type(env, t, NULL); 2828 2829 return meta_needed; 2830 } 2831 2832 static int btf_array_resolve(struct btf_verifier_env *env, 2833 const struct resolve_vertex *v) 2834 { 2835 const struct btf_array *array = btf_type_array(v->t); 2836 const struct btf_type *elem_type, *index_type; 2837 u32 elem_type_id, index_type_id; 2838 struct btf *btf = env->btf; 2839 u32 elem_size; 2840 2841 /* Check array->index_type */ 2842 index_type_id = array->index_type; 2843 index_type = btf_type_by_id(btf, index_type_id); 2844 if (btf_type_nosize_or_null(index_type) || 2845 btf_type_is_resolve_source_only(index_type)) { 2846 btf_verifier_log_type(env, v->t, "Invalid index"); 2847 return -EINVAL; 2848 } 2849 2850 if (!env_type_is_resolve_sink(env, index_type) && 2851 !env_type_is_resolved(env, index_type_id)) 2852 return env_stack_push(env, index_type, index_type_id); 2853 2854 index_type = btf_type_id_size(btf, &index_type_id, NULL); 2855 if (!index_type || !btf_type_is_int(index_type) || 2856 !btf_type_int_is_regular(index_type)) { 2857 btf_verifier_log_type(env, v->t, "Invalid index"); 2858 return -EINVAL; 2859 } 2860 2861 /* Check array->type */ 2862 elem_type_id = array->type; 2863 elem_type = btf_type_by_id(btf, elem_type_id); 2864 if (btf_type_nosize_or_null(elem_type) || 2865 btf_type_is_resolve_source_only(elem_type)) { 2866 btf_verifier_log_type(env, v->t, 2867 "Invalid elem"); 2868 return -EINVAL; 2869 } 2870 2871 if (!env_type_is_resolve_sink(env, elem_type) && 2872 !env_type_is_resolved(env, elem_type_id)) 2873 return env_stack_push(env, elem_type, elem_type_id); 2874 2875 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 2876 if (!elem_type) { 2877 btf_verifier_log_type(env, v->t, "Invalid elem"); 2878 return -EINVAL; 2879 } 2880 2881 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) { 2882 btf_verifier_log_type(env, v->t, "Invalid array of int"); 2883 return -EINVAL; 2884 } 2885 2886 if (array->nelems && elem_size > U32_MAX / array->nelems) { 2887 btf_verifier_log_type(env, v->t, 2888 "Array size overflows U32_MAX"); 2889 return -EINVAL; 2890 } 2891 2892 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems); 2893 2894 return 0; 2895 } 2896 2897 static void btf_array_log(struct btf_verifier_env *env, 2898 const struct btf_type *t) 2899 { 2900 const struct btf_array *array = btf_type_array(t); 2901 2902 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u", 2903 array->type, array->index_type, array->nelems); 2904 } 2905 2906 static void __btf_array_show(const struct btf *btf, const struct btf_type *t, 2907 u32 type_id, void *data, u8 bits_offset, 2908 struct btf_show *show) 2909 { 2910 const struct btf_array *array = btf_type_array(t); 2911 const struct btf_kind_operations *elem_ops; 2912 const struct btf_type *elem_type; 2913 u32 i, elem_size = 0, elem_type_id; 2914 u16 encoding = 0; 2915 2916 elem_type_id = array->type; 2917 elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL); 2918 if (elem_type && btf_type_has_size(elem_type)) 2919 elem_size = elem_type->size; 2920 2921 if (elem_type && btf_type_is_int(elem_type)) { 2922 u32 int_type = btf_type_int(elem_type); 2923 2924 encoding = BTF_INT_ENCODING(int_type); 2925 2926 /* 2927 * BTF_INT_CHAR encoding never seems to be set for 2928 * char arrays, so if size is 1 and element is 2929 * printable as a char, we'll do that. 2930 */ 2931 if (elem_size == 1) 2932 encoding = BTF_INT_CHAR; 2933 } 2934 2935 if (!btf_show_start_array_type(show, t, type_id, encoding, data)) 2936 return; 2937 2938 if (!elem_type) 2939 goto out; 2940 elem_ops = btf_type_ops(elem_type); 2941 2942 for (i = 0; i < array->nelems; i++) { 2943 2944 btf_show_start_array_member(show); 2945 2946 elem_ops->show(btf, elem_type, elem_type_id, data, 2947 bits_offset, show); 2948 data += elem_size; 2949 2950 btf_show_end_array_member(show); 2951 2952 if (show->state.array_terminated) 2953 break; 2954 } 2955 out: 2956 btf_show_end_array_type(show); 2957 } 2958 2959 static void btf_array_show(const struct btf *btf, const struct btf_type *t, 2960 u32 type_id, void *data, u8 bits_offset, 2961 struct btf_show *show) 2962 { 2963 const struct btf_member *m = show->state.member; 2964 2965 /* 2966 * First check if any members would be shown (are non-zero). 2967 * See comments above "struct btf_show" definition for more 2968 * details on how this works at a high-level. 2969 */ 2970 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { 2971 if (!show->state.depth_check) { 2972 show->state.depth_check = show->state.depth + 1; 2973 show->state.depth_to_show = 0; 2974 } 2975 __btf_array_show(btf, t, type_id, data, bits_offset, show); 2976 show->state.member = m; 2977 2978 if (show->state.depth_check != show->state.depth + 1) 2979 return; 2980 show->state.depth_check = 0; 2981 2982 if (show->state.depth_to_show <= show->state.depth) 2983 return; 2984 /* 2985 * Reaching here indicates we have recursed and found 2986 * non-zero array member(s). 2987 */ 2988 } 2989 __btf_array_show(btf, t, type_id, data, bits_offset, show); 2990 } 2991 2992 static struct btf_kind_operations array_ops = { 2993 .check_meta = btf_array_check_meta, 2994 .resolve = btf_array_resolve, 2995 .check_member = btf_array_check_member, 2996 .check_kflag_member = btf_generic_check_kflag_member, 2997 .log_details = btf_array_log, 2998 .show = btf_array_show, 2999 }; 3000 3001 static int btf_struct_check_member(struct btf_verifier_env *env, 3002 const struct btf_type *struct_type, 3003 const struct btf_member *member, 3004 const struct btf_type *member_type) 3005 { 3006 u32 struct_bits_off = member->offset; 3007 u32 struct_size, bytes_offset; 3008 3009 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 3010 btf_verifier_log_member(env, struct_type, member, 3011 "Member is not byte aligned"); 3012 return -EINVAL; 3013 } 3014 3015 struct_size = struct_type->size; 3016 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 3017 if (struct_size - bytes_offset < member_type->size) { 3018 btf_verifier_log_member(env, struct_type, member, 3019 "Member exceeds struct_size"); 3020 return -EINVAL; 3021 } 3022 3023 return 0; 3024 } 3025 3026 static s32 btf_struct_check_meta(struct btf_verifier_env *env, 3027 const struct btf_type *t, 3028 u32 meta_left) 3029 { 3030 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; 3031 const struct btf_member *member; 3032 u32 meta_needed, last_offset; 3033 struct btf *btf = env->btf; 3034 u32 struct_size = t->size; 3035 u32 offset; 3036 u16 i; 3037 3038 meta_needed = btf_type_vlen(t) * sizeof(*member); 3039 if (meta_left < meta_needed) { 3040 btf_verifier_log_basic(env, t, 3041 "meta_left:%u meta_needed:%u", 3042 meta_left, meta_needed); 3043 return -EINVAL; 3044 } 3045 3046 /* struct type either no name or a valid one */ 3047 if (t->name_off && 3048 !btf_name_valid_identifier(env->btf, t->name_off)) { 3049 btf_verifier_log_type(env, t, "Invalid name"); 3050 return -EINVAL; 3051 } 3052 3053 btf_verifier_log_type(env, t, NULL); 3054 3055 last_offset = 0; 3056 for_each_member(i, t, member) { 3057 if (!btf_name_offset_valid(btf, member->name_off)) { 3058 btf_verifier_log_member(env, t, member, 3059 "Invalid member name_offset:%u", 3060 member->name_off); 3061 return -EINVAL; 3062 } 3063 3064 /* struct member either no name or a valid one */ 3065 if (member->name_off && 3066 !btf_name_valid_identifier(btf, member->name_off)) { 3067 btf_verifier_log_member(env, t, member, "Invalid name"); 3068 return -EINVAL; 3069 } 3070 /* A member cannot be in type void */ 3071 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { 3072 btf_verifier_log_member(env, t, member, 3073 "Invalid type_id"); 3074 return -EINVAL; 3075 } 3076 3077 offset = __btf_member_bit_offset(t, member); 3078 if (is_union && offset) { 3079 btf_verifier_log_member(env, t, member, 3080 "Invalid member bits_offset"); 3081 return -EINVAL; 3082 } 3083 3084 /* 3085 * ">" instead of ">=" because the last member could be 3086 * "char a[0];" 3087 */ 3088 if (last_offset > offset) { 3089 btf_verifier_log_member(env, t, member, 3090 "Invalid member bits_offset"); 3091 return -EINVAL; 3092 } 3093 3094 if (BITS_ROUNDUP_BYTES(offset) > struct_size) { 3095 btf_verifier_log_member(env, t, member, 3096 "Member bits_offset exceeds its struct size"); 3097 return -EINVAL; 3098 } 3099 3100 btf_verifier_log_member(env, t, member, NULL); 3101 last_offset = offset; 3102 } 3103 3104 return meta_needed; 3105 } 3106 3107 static int btf_struct_resolve(struct btf_verifier_env *env, 3108 const struct resolve_vertex *v) 3109 { 3110 const struct btf_member *member; 3111 int err; 3112 u16 i; 3113 3114 /* Before continue resolving the next_member, 3115 * ensure the last member is indeed resolved to a 3116 * type with size info. 3117 */ 3118 if (v->next_member) { 3119 const struct btf_type *last_member_type; 3120 const struct btf_member *last_member; 3121 u16 last_member_type_id; 3122 3123 last_member = btf_type_member(v->t) + v->next_member - 1; 3124 last_member_type_id = last_member->type; 3125 if (WARN_ON_ONCE(!env_type_is_resolved(env, 3126 last_member_type_id))) 3127 return -EINVAL; 3128 3129 last_member_type = btf_type_by_id(env->btf, 3130 last_member_type_id); 3131 if (btf_type_kflag(v->t)) 3132 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t, 3133 last_member, 3134 last_member_type); 3135 else 3136 err = btf_type_ops(last_member_type)->check_member(env, v->t, 3137 last_member, 3138 last_member_type); 3139 if (err) 3140 return err; 3141 } 3142 3143 for_each_member_from(i, v->next_member, v->t, member) { 3144 u32 member_type_id = member->type; 3145 const struct btf_type *member_type = btf_type_by_id(env->btf, 3146 member_type_id); 3147 3148 if (btf_type_nosize_or_null(member_type) || 3149 btf_type_is_resolve_source_only(member_type)) { 3150 btf_verifier_log_member(env, v->t, member, 3151 "Invalid member"); 3152 return -EINVAL; 3153 } 3154 3155 if (!env_type_is_resolve_sink(env, member_type) && 3156 !env_type_is_resolved(env, member_type_id)) { 3157 env_stack_set_next_member(env, i + 1); 3158 return env_stack_push(env, member_type, member_type_id); 3159 } 3160 3161 if (btf_type_kflag(v->t)) 3162 err = btf_type_ops(member_type)->check_kflag_member(env, v->t, 3163 member, 3164 member_type); 3165 else 3166 err = btf_type_ops(member_type)->check_member(env, v->t, 3167 member, 3168 member_type); 3169 if (err) 3170 return err; 3171 } 3172 3173 env_stack_pop_resolved(env, 0, 0); 3174 3175 return 0; 3176 } 3177 3178 static void btf_struct_log(struct btf_verifier_env *env, 3179 const struct btf_type *t) 3180 { 3181 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 3182 } 3183 3184 enum btf_field_type { 3185 BTF_FIELD_SPIN_LOCK, 3186 BTF_FIELD_TIMER, 3187 BTF_FIELD_KPTR, 3188 }; 3189 3190 enum { 3191 BTF_FIELD_IGNORE = 0, 3192 BTF_FIELD_FOUND = 1, 3193 }; 3194 3195 struct btf_field_info { 3196 u32 type_id; 3197 u32 off; 3198 enum bpf_kptr_type type; 3199 }; 3200 3201 static int btf_find_struct(const struct btf *btf, const struct btf_type *t, 3202 u32 off, int sz, struct btf_field_info *info) 3203 { 3204 if (!__btf_type_is_struct(t)) 3205 return BTF_FIELD_IGNORE; 3206 if (t->size != sz) 3207 return BTF_FIELD_IGNORE; 3208 info->off = off; 3209 return BTF_FIELD_FOUND; 3210 } 3211 3212 static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, 3213 u32 off, int sz, struct btf_field_info *info) 3214 { 3215 enum bpf_kptr_type type; 3216 u32 res_id; 3217 3218 /* For PTR, sz is always == 8 */ 3219 if (!btf_type_is_ptr(t)) 3220 return BTF_FIELD_IGNORE; 3221 t = btf_type_by_id(btf, t->type); 3222 3223 if (!btf_type_is_type_tag(t)) 3224 return BTF_FIELD_IGNORE; 3225 /* Reject extra tags */ 3226 if (btf_type_is_type_tag(btf_type_by_id(btf, t->type))) 3227 return -EINVAL; 3228 if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off))) 3229 type = BPF_KPTR_UNREF; 3230 else if (!strcmp("kptr_ref", __btf_name_by_offset(btf, t->name_off))) 3231 type = BPF_KPTR_REF; 3232 else 3233 return -EINVAL; 3234 3235 /* Get the base type */ 3236 t = btf_type_skip_modifiers(btf, t->type, &res_id); 3237 /* Only pointer to struct is allowed */ 3238 if (!__btf_type_is_struct(t)) 3239 return -EINVAL; 3240 3241 info->type_id = res_id; 3242 info->off = off; 3243 info->type = type; 3244 return BTF_FIELD_FOUND; 3245 } 3246 3247 static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t, 3248 const char *name, int sz, int align, 3249 enum btf_field_type field_type, 3250 struct btf_field_info *info, int info_cnt) 3251 { 3252 const struct btf_member *member; 3253 struct btf_field_info tmp; 3254 int ret, idx = 0; 3255 u32 i, off; 3256 3257 for_each_member(i, t, member) { 3258 const struct btf_type *member_type = btf_type_by_id(btf, 3259 member->type); 3260 3261 if (name && strcmp(__btf_name_by_offset(btf, member_type->name_off), name)) 3262 continue; 3263 3264 off = __btf_member_bit_offset(t, member); 3265 if (off % 8) 3266 /* valid C code cannot generate such BTF */ 3267 return -EINVAL; 3268 off /= 8; 3269 if (off % align) 3270 return -EINVAL; 3271 3272 switch (field_type) { 3273 case BTF_FIELD_SPIN_LOCK: 3274 case BTF_FIELD_TIMER: 3275 ret = btf_find_struct(btf, member_type, off, sz, 3276 idx < info_cnt ? &info[idx] : &tmp); 3277 if (ret < 0) 3278 return ret; 3279 break; 3280 case BTF_FIELD_KPTR: 3281 ret = btf_find_kptr(btf, member_type, off, sz, 3282 idx < info_cnt ? &info[idx] : &tmp); 3283 if (ret < 0) 3284 return ret; 3285 break; 3286 default: 3287 return -EFAULT; 3288 } 3289 3290 if (ret == BTF_FIELD_IGNORE) 3291 continue; 3292 if (idx >= info_cnt) 3293 return -E2BIG; 3294 ++idx; 3295 } 3296 return idx; 3297 } 3298 3299 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, 3300 const char *name, int sz, int align, 3301 enum btf_field_type field_type, 3302 struct btf_field_info *info, int info_cnt) 3303 { 3304 const struct btf_var_secinfo *vsi; 3305 struct btf_field_info tmp; 3306 int ret, idx = 0; 3307 u32 i, off; 3308 3309 for_each_vsi(i, t, vsi) { 3310 const struct btf_type *var = btf_type_by_id(btf, vsi->type); 3311 const struct btf_type *var_type = btf_type_by_id(btf, var->type); 3312 3313 off = vsi->offset; 3314 3315 if (name && strcmp(__btf_name_by_offset(btf, var_type->name_off), name)) 3316 continue; 3317 if (vsi->size != sz) 3318 continue; 3319 if (off % align) 3320 return -EINVAL; 3321 3322 switch (field_type) { 3323 case BTF_FIELD_SPIN_LOCK: 3324 case BTF_FIELD_TIMER: 3325 ret = btf_find_struct(btf, var_type, off, sz, 3326 idx < info_cnt ? &info[idx] : &tmp); 3327 if (ret < 0) 3328 return ret; 3329 break; 3330 case BTF_FIELD_KPTR: 3331 ret = btf_find_kptr(btf, var_type, off, sz, 3332 idx < info_cnt ? &info[idx] : &tmp); 3333 if (ret < 0) 3334 return ret; 3335 break; 3336 default: 3337 return -EFAULT; 3338 } 3339 3340 if (ret == BTF_FIELD_IGNORE) 3341 continue; 3342 if (idx >= info_cnt) 3343 return -E2BIG; 3344 ++idx; 3345 } 3346 return idx; 3347 } 3348 3349 static int btf_find_field(const struct btf *btf, const struct btf_type *t, 3350 enum btf_field_type field_type, 3351 struct btf_field_info *info, int info_cnt) 3352 { 3353 const char *name; 3354 int sz, align; 3355 3356 switch (field_type) { 3357 case BTF_FIELD_SPIN_LOCK: 3358 name = "bpf_spin_lock"; 3359 sz = sizeof(struct bpf_spin_lock); 3360 align = __alignof__(struct bpf_spin_lock); 3361 break; 3362 case BTF_FIELD_TIMER: 3363 name = "bpf_timer"; 3364 sz = sizeof(struct bpf_timer); 3365 align = __alignof__(struct bpf_timer); 3366 break; 3367 case BTF_FIELD_KPTR: 3368 name = NULL; 3369 sz = sizeof(u64); 3370 align = 8; 3371 break; 3372 default: 3373 return -EFAULT; 3374 } 3375 3376 if (__btf_type_is_struct(t)) 3377 return btf_find_struct_field(btf, t, name, sz, align, field_type, info, info_cnt); 3378 else if (btf_type_is_datasec(t)) 3379 return btf_find_datasec_var(btf, t, name, sz, align, field_type, info, info_cnt); 3380 return -EINVAL; 3381 } 3382 3383 /* find 'struct bpf_spin_lock' in map value. 3384 * return >= 0 offset if found 3385 * and < 0 in case of error 3386 */ 3387 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t) 3388 { 3389 struct btf_field_info info; 3390 int ret; 3391 3392 ret = btf_find_field(btf, t, BTF_FIELD_SPIN_LOCK, &info, 1); 3393 if (ret < 0) 3394 return ret; 3395 if (!ret) 3396 return -ENOENT; 3397 return info.off; 3398 } 3399 3400 int btf_find_timer(const struct btf *btf, const struct btf_type *t) 3401 { 3402 struct btf_field_info info; 3403 int ret; 3404 3405 ret = btf_find_field(btf, t, BTF_FIELD_TIMER, &info, 1); 3406 if (ret < 0) 3407 return ret; 3408 if (!ret) 3409 return -ENOENT; 3410 return info.off; 3411 } 3412 3413 struct bpf_map_value_off *btf_parse_kptrs(const struct btf *btf, 3414 const struct btf_type *t) 3415 { 3416 struct btf_field_info info_arr[BPF_MAP_VALUE_OFF_MAX]; 3417 struct bpf_map_value_off *tab; 3418 struct btf *kernel_btf = NULL; 3419 struct module *mod = NULL; 3420 int ret, i, nr_off; 3421 3422 ret = btf_find_field(btf, t, BTF_FIELD_KPTR, info_arr, ARRAY_SIZE(info_arr)); 3423 if (ret < 0) 3424 return ERR_PTR(ret); 3425 if (!ret) 3426 return NULL; 3427 3428 nr_off = ret; 3429 tab = kzalloc(offsetof(struct bpf_map_value_off, off[nr_off]), GFP_KERNEL | __GFP_NOWARN); 3430 if (!tab) 3431 return ERR_PTR(-ENOMEM); 3432 3433 for (i = 0; i < nr_off; i++) { 3434 const struct btf_type *t; 3435 s32 id; 3436 3437 /* Find type in map BTF, and use it to look up the matching type 3438 * in vmlinux or module BTFs, by name and kind. 3439 */ 3440 t = btf_type_by_id(btf, info_arr[i].type_id); 3441 id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info), 3442 &kernel_btf); 3443 if (id < 0) { 3444 ret = id; 3445 goto end; 3446 } 3447 3448 /* Find and stash the function pointer for the destruction function that 3449 * needs to be eventually invoked from the map free path. 3450 */ 3451 if (info_arr[i].type == BPF_KPTR_REF) { 3452 const struct btf_type *dtor_func; 3453 const char *dtor_func_name; 3454 unsigned long addr; 3455 s32 dtor_btf_id; 3456 3457 /* This call also serves as a whitelist of allowed objects that 3458 * can be used as a referenced pointer and be stored in a map at 3459 * the same time. 3460 */ 3461 dtor_btf_id = btf_find_dtor_kfunc(kernel_btf, id); 3462 if (dtor_btf_id < 0) { 3463 ret = dtor_btf_id; 3464 goto end_btf; 3465 } 3466 3467 dtor_func = btf_type_by_id(kernel_btf, dtor_btf_id); 3468 if (!dtor_func) { 3469 ret = -ENOENT; 3470 goto end_btf; 3471 } 3472 3473 if (btf_is_module(kernel_btf)) { 3474 mod = btf_try_get_module(kernel_btf); 3475 if (!mod) { 3476 ret = -ENXIO; 3477 goto end_btf; 3478 } 3479 } 3480 3481 /* We already verified dtor_func to be btf_type_is_func 3482 * in register_btf_id_dtor_kfuncs. 3483 */ 3484 dtor_func_name = __btf_name_by_offset(kernel_btf, dtor_func->name_off); 3485 addr = kallsyms_lookup_name(dtor_func_name); 3486 if (!addr) { 3487 ret = -EINVAL; 3488 goto end_mod; 3489 } 3490 tab->off[i].kptr.dtor = (void *)addr; 3491 } 3492 3493 tab->off[i].offset = info_arr[i].off; 3494 tab->off[i].type = info_arr[i].type; 3495 tab->off[i].kptr.btf_id = id; 3496 tab->off[i].kptr.btf = kernel_btf; 3497 tab->off[i].kptr.module = mod; 3498 } 3499 tab->nr_off = nr_off; 3500 return tab; 3501 end_mod: 3502 module_put(mod); 3503 end_btf: 3504 btf_put(kernel_btf); 3505 end: 3506 while (i--) { 3507 btf_put(tab->off[i].kptr.btf); 3508 if (tab->off[i].kptr.module) 3509 module_put(tab->off[i].kptr.module); 3510 } 3511 kfree(tab); 3512 return ERR_PTR(ret); 3513 } 3514 3515 static void __btf_struct_show(const struct btf *btf, const struct btf_type *t, 3516 u32 type_id, void *data, u8 bits_offset, 3517 struct btf_show *show) 3518 { 3519 const struct btf_member *member; 3520 void *safe_data; 3521 u32 i; 3522 3523 safe_data = btf_show_start_struct_type(show, t, type_id, data); 3524 if (!safe_data) 3525 return; 3526 3527 for_each_member(i, t, member) { 3528 const struct btf_type *member_type = btf_type_by_id(btf, 3529 member->type); 3530 const struct btf_kind_operations *ops; 3531 u32 member_offset, bitfield_size; 3532 u32 bytes_offset; 3533 u8 bits8_offset; 3534 3535 btf_show_start_member(show, member); 3536 3537 member_offset = __btf_member_bit_offset(t, member); 3538 bitfield_size = __btf_member_bitfield_size(t, member); 3539 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); 3540 bits8_offset = BITS_PER_BYTE_MASKED(member_offset); 3541 if (bitfield_size) { 3542 safe_data = btf_show_start_type(show, member_type, 3543 member->type, 3544 data + bytes_offset); 3545 if (safe_data) 3546 btf_bitfield_show(safe_data, 3547 bits8_offset, 3548 bitfield_size, show); 3549 btf_show_end_type(show); 3550 } else { 3551 ops = btf_type_ops(member_type); 3552 ops->show(btf, member_type, member->type, 3553 data + bytes_offset, bits8_offset, show); 3554 } 3555 3556 btf_show_end_member(show); 3557 } 3558 3559 btf_show_end_struct_type(show); 3560 } 3561 3562 static void btf_struct_show(const struct btf *btf, const struct btf_type *t, 3563 u32 type_id, void *data, u8 bits_offset, 3564 struct btf_show *show) 3565 { 3566 const struct btf_member *m = show->state.member; 3567 3568 /* 3569 * First check if any members would be shown (are non-zero). 3570 * See comments above "struct btf_show" definition for more 3571 * details on how this works at a high-level. 3572 */ 3573 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { 3574 if (!show->state.depth_check) { 3575 show->state.depth_check = show->state.depth + 1; 3576 show->state.depth_to_show = 0; 3577 } 3578 __btf_struct_show(btf, t, type_id, data, bits_offset, show); 3579 /* Restore saved member data here */ 3580 show->state.member = m; 3581 if (show->state.depth_check != show->state.depth + 1) 3582 return; 3583 show->state.depth_check = 0; 3584 3585 if (show->state.depth_to_show <= show->state.depth) 3586 return; 3587 /* 3588 * Reaching here indicates we have recursed and found 3589 * non-zero child values. 3590 */ 3591 } 3592 3593 __btf_struct_show(btf, t, type_id, data, bits_offset, show); 3594 } 3595 3596 static struct btf_kind_operations struct_ops = { 3597 .check_meta = btf_struct_check_meta, 3598 .resolve = btf_struct_resolve, 3599 .check_member = btf_struct_check_member, 3600 .check_kflag_member = btf_generic_check_kflag_member, 3601 .log_details = btf_struct_log, 3602 .show = btf_struct_show, 3603 }; 3604 3605 static int btf_enum_check_member(struct btf_verifier_env *env, 3606 const struct btf_type *struct_type, 3607 const struct btf_member *member, 3608 const struct btf_type *member_type) 3609 { 3610 u32 struct_bits_off = member->offset; 3611 u32 struct_size, bytes_offset; 3612 3613 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 3614 btf_verifier_log_member(env, struct_type, member, 3615 "Member is not byte aligned"); 3616 return -EINVAL; 3617 } 3618 3619 struct_size = struct_type->size; 3620 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 3621 if (struct_size - bytes_offset < member_type->size) { 3622 btf_verifier_log_member(env, struct_type, member, 3623 "Member exceeds struct_size"); 3624 return -EINVAL; 3625 } 3626 3627 return 0; 3628 } 3629 3630 static int btf_enum_check_kflag_member(struct btf_verifier_env *env, 3631 const struct btf_type *struct_type, 3632 const struct btf_member *member, 3633 const struct btf_type *member_type) 3634 { 3635 u32 struct_bits_off, nr_bits, bytes_end, struct_size; 3636 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE; 3637 3638 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 3639 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 3640 if (!nr_bits) { 3641 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 3642 btf_verifier_log_member(env, struct_type, member, 3643 "Member is not byte aligned"); 3644 return -EINVAL; 3645 } 3646 3647 nr_bits = int_bitsize; 3648 } else if (nr_bits > int_bitsize) { 3649 btf_verifier_log_member(env, struct_type, member, 3650 "Invalid member bitfield_size"); 3651 return -EINVAL; 3652 } 3653 3654 struct_size = struct_type->size; 3655 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits); 3656 if (struct_size < bytes_end) { 3657 btf_verifier_log_member(env, struct_type, member, 3658 "Member exceeds struct_size"); 3659 return -EINVAL; 3660 } 3661 3662 return 0; 3663 } 3664 3665 static s32 btf_enum_check_meta(struct btf_verifier_env *env, 3666 const struct btf_type *t, 3667 u32 meta_left) 3668 { 3669 const struct btf_enum *enums = btf_type_enum(t); 3670 struct btf *btf = env->btf; 3671 u16 i, nr_enums; 3672 u32 meta_needed; 3673 3674 nr_enums = btf_type_vlen(t); 3675 meta_needed = nr_enums * sizeof(*enums); 3676 3677 if (meta_left < meta_needed) { 3678 btf_verifier_log_basic(env, t, 3679 "meta_left:%u meta_needed:%u", 3680 meta_left, meta_needed); 3681 return -EINVAL; 3682 } 3683 3684 if (btf_type_kflag(t)) { 3685 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3686 return -EINVAL; 3687 } 3688 3689 if (t->size > 8 || !is_power_of_2(t->size)) { 3690 btf_verifier_log_type(env, t, "Unexpected size"); 3691 return -EINVAL; 3692 } 3693 3694 /* enum type either no name or a valid one */ 3695 if (t->name_off && 3696 !btf_name_valid_identifier(env->btf, t->name_off)) { 3697 btf_verifier_log_type(env, t, "Invalid name"); 3698 return -EINVAL; 3699 } 3700 3701 btf_verifier_log_type(env, t, NULL); 3702 3703 for (i = 0; i < nr_enums; i++) { 3704 if (!btf_name_offset_valid(btf, enums[i].name_off)) { 3705 btf_verifier_log(env, "\tInvalid name_offset:%u", 3706 enums[i].name_off); 3707 return -EINVAL; 3708 } 3709 3710 /* enum member must have a valid name */ 3711 if (!enums[i].name_off || 3712 !btf_name_valid_identifier(btf, enums[i].name_off)) { 3713 btf_verifier_log_type(env, t, "Invalid name"); 3714 return -EINVAL; 3715 } 3716 3717 if (env->log.level == BPF_LOG_KERNEL) 3718 continue; 3719 btf_verifier_log(env, "\t%s val=%d\n", 3720 __btf_name_by_offset(btf, enums[i].name_off), 3721 enums[i].val); 3722 } 3723 3724 return meta_needed; 3725 } 3726 3727 static void btf_enum_log(struct btf_verifier_env *env, 3728 const struct btf_type *t) 3729 { 3730 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 3731 } 3732 3733 static void btf_enum_show(const struct btf *btf, const struct btf_type *t, 3734 u32 type_id, void *data, u8 bits_offset, 3735 struct btf_show *show) 3736 { 3737 const struct btf_enum *enums = btf_type_enum(t); 3738 u32 i, nr_enums = btf_type_vlen(t); 3739 void *safe_data; 3740 int v; 3741 3742 safe_data = btf_show_start_type(show, t, type_id, data); 3743 if (!safe_data) 3744 return; 3745 3746 v = *(int *)safe_data; 3747 3748 for (i = 0; i < nr_enums; i++) { 3749 if (v != enums[i].val) 3750 continue; 3751 3752 btf_show_type_value(show, "%s", 3753 __btf_name_by_offset(btf, 3754 enums[i].name_off)); 3755 3756 btf_show_end_type(show); 3757 return; 3758 } 3759 3760 btf_show_type_value(show, "%d", v); 3761 btf_show_end_type(show); 3762 } 3763 3764 static struct btf_kind_operations enum_ops = { 3765 .check_meta = btf_enum_check_meta, 3766 .resolve = btf_df_resolve, 3767 .check_member = btf_enum_check_member, 3768 .check_kflag_member = btf_enum_check_kflag_member, 3769 .log_details = btf_enum_log, 3770 .show = btf_enum_show, 3771 }; 3772 3773 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env, 3774 const struct btf_type *t, 3775 u32 meta_left) 3776 { 3777 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param); 3778 3779 if (meta_left < meta_needed) { 3780 btf_verifier_log_basic(env, t, 3781 "meta_left:%u meta_needed:%u", 3782 meta_left, meta_needed); 3783 return -EINVAL; 3784 } 3785 3786 if (t->name_off) { 3787 btf_verifier_log_type(env, t, "Invalid name"); 3788 return -EINVAL; 3789 } 3790 3791 if (btf_type_kflag(t)) { 3792 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3793 return -EINVAL; 3794 } 3795 3796 btf_verifier_log_type(env, t, NULL); 3797 3798 return meta_needed; 3799 } 3800 3801 static void btf_func_proto_log(struct btf_verifier_env *env, 3802 const struct btf_type *t) 3803 { 3804 const struct btf_param *args = (const struct btf_param *)(t + 1); 3805 u16 nr_args = btf_type_vlen(t), i; 3806 3807 btf_verifier_log(env, "return=%u args=(", t->type); 3808 if (!nr_args) { 3809 btf_verifier_log(env, "void"); 3810 goto done; 3811 } 3812 3813 if (nr_args == 1 && !args[0].type) { 3814 /* Only one vararg */ 3815 btf_verifier_log(env, "vararg"); 3816 goto done; 3817 } 3818 3819 btf_verifier_log(env, "%u %s", args[0].type, 3820 __btf_name_by_offset(env->btf, 3821 args[0].name_off)); 3822 for (i = 1; i < nr_args - 1; i++) 3823 btf_verifier_log(env, ", %u %s", args[i].type, 3824 __btf_name_by_offset(env->btf, 3825 args[i].name_off)); 3826 3827 if (nr_args > 1) { 3828 const struct btf_param *last_arg = &args[nr_args - 1]; 3829 3830 if (last_arg->type) 3831 btf_verifier_log(env, ", %u %s", last_arg->type, 3832 __btf_name_by_offset(env->btf, 3833 last_arg->name_off)); 3834 else 3835 btf_verifier_log(env, ", vararg"); 3836 } 3837 3838 done: 3839 btf_verifier_log(env, ")"); 3840 } 3841 3842 static struct btf_kind_operations func_proto_ops = { 3843 .check_meta = btf_func_proto_check_meta, 3844 .resolve = btf_df_resolve, 3845 /* 3846 * BTF_KIND_FUNC_PROTO cannot be directly referred by 3847 * a struct's member. 3848 * 3849 * It should be a function pointer instead. 3850 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO) 3851 * 3852 * Hence, there is no btf_func_check_member(). 3853 */ 3854 .check_member = btf_df_check_member, 3855 .check_kflag_member = btf_df_check_kflag_member, 3856 .log_details = btf_func_proto_log, 3857 .show = btf_df_show, 3858 }; 3859 3860 static s32 btf_func_check_meta(struct btf_verifier_env *env, 3861 const struct btf_type *t, 3862 u32 meta_left) 3863 { 3864 if (!t->name_off || 3865 !btf_name_valid_identifier(env->btf, t->name_off)) { 3866 btf_verifier_log_type(env, t, "Invalid name"); 3867 return -EINVAL; 3868 } 3869 3870 if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) { 3871 btf_verifier_log_type(env, t, "Invalid func linkage"); 3872 return -EINVAL; 3873 } 3874 3875 if (btf_type_kflag(t)) { 3876 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3877 return -EINVAL; 3878 } 3879 3880 btf_verifier_log_type(env, t, NULL); 3881 3882 return 0; 3883 } 3884 3885 static int btf_func_resolve(struct btf_verifier_env *env, 3886 const struct resolve_vertex *v) 3887 { 3888 const struct btf_type *t = v->t; 3889 u32 next_type_id = t->type; 3890 int err; 3891 3892 err = btf_func_check(env, t); 3893 if (err) 3894 return err; 3895 3896 env_stack_pop_resolved(env, next_type_id, 0); 3897 return 0; 3898 } 3899 3900 static struct btf_kind_operations func_ops = { 3901 .check_meta = btf_func_check_meta, 3902 .resolve = btf_func_resolve, 3903 .check_member = btf_df_check_member, 3904 .check_kflag_member = btf_df_check_kflag_member, 3905 .log_details = btf_ref_type_log, 3906 .show = btf_df_show, 3907 }; 3908 3909 static s32 btf_var_check_meta(struct btf_verifier_env *env, 3910 const struct btf_type *t, 3911 u32 meta_left) 3912 { 3913 const struct btf_var *var; 3914 u32 meta_needed = sizeof(*var); 3915 3916 if (meta_left < meta_needed) { 3917 btf_verifier_log_basic(env, t, 3918 "meta_left:%u meta_needed:%u", 3919 meta_left, meta_needed); 3920 return -EINVAL; 3921 } 3922 3923 if (btf_type_vlen(t)) { 3924 btf_verifier_log_type(env, t, "vlen != 0"); 3925 return -EINVAL; 3926 } 3927 3928 if (btf_type_kflag(t)) { 3929 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3930 return -EINVAL; 3931 } 3932 3933 if (!t->name_off || 3934 !__btf_name_valid(env->btf, t->name_off, true)) { 3935 btf_verifier_log_type(env, t, "Invalid name"); 3936 return -EINVAL; 3937 } 3938 3939 /* A var cannot be in type void */ 3940 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) { 3941 btf_verifier_log_type(env, t, "Invalid type_id"); 3942 return -EINVAL; 3943 } 3944 3945 var = btf_type_var(t); 3946 if (var->linkage != BTF_VAR_STATIC && 3947 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) { 3948 btf_verifier_log_type(env, t, "Linkage not supported"); 3949 return -EINVAL; 3950 } 3951 3952 btf_verifier_log_type(env, t, NULL); 3953 3954 return meta_needed; 3955 } 3956 3957 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t) 3958 { 3959 const struct btf_var *var = btf_type_var(t); 3960 3961 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage); 3962 } 3963 3964 static const struct btf_kind_operations var_ops = { 3965 .check_meta = btf_var_check_meta, 3966 .resolve = btf_var_resolve, 3967 .check_member = btf_df_check_member, 3968 .check_kflag_member = btf_df_check_kflag_member, 3969 .log_details = btf_var_log, 3970 .show = btf_var_show, 3971 }; 3972 3973 static s32 btf_datasec_check_meta(struct btf_verifier_env *env, 3974 const struct btf_type *t, 3975 u32 meta_left) 3976 { 3977 const struct btf_var_secinfo *vsi; 3978 u64 last_vsi_end_off = 0, sum = 0; 3979 u32 i, meta_needed; 3980 3981 meta_needed = btf_type_vlen(t) * sizeof(*vsi); 3982 if (meta_left < meta_needed) { 3983 btf_verifier_log_basic(env, t, 3984 "meta_left:%u meta_needed:%u", 3985 meta_left, meta_needed); 3986 return -EINVAL; 3987 } 3988 3989 if (!t->size) { 3990 btf_verifier_log_type(env, t, "size == 0"); 3991 return -EINVAL; 3992 } 3993 3994 if (btf_type_kflag(t)) { 3995 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3996 return -EINVAL; 3997 } 3998 3999 if (!t->name_off || 4000 !btf_name_valid_section(env->btf, t->name_off)) { 4001 btf_verifier_log_type(env, t, "Invalid name"); 4002 return -EINVAL; 4003 } 4004 4005 btf_verifier_log_type(env, t, NULL); 4006 4007 for_each_vsi(i, t, vsi) { 4008 /* A var cannot be in type void */ 4009 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) { 4010 btf_verifier_log_vsi(env, t, vsi, 4011 "Invalid type_id"); 4012 return -EINVAL; 4013 } 4014 4015 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) { 4016 btf_verifier_log_vsi(env, t, vsi, 4017 "Invalid offset"); 4018 return -EINVAL; 4019 } 4020 4021 if (!vsi->size || vsi->size > t->size) { 4022 btf_verifier_log_vsi(env, t, vsi, 4023 "Invalid size"); 4024 return -EINVAL; 4025 } 4026 4027 last_vsi_end_off = vsi->offset + vsi->size; 4028 if (last_vsi_end_off > t->size) { 4029 btf_verifier_log_vsi(env, t, vsi, 4030 "Invalid offset+size"); 4031 return -EINVAL; 4032 } 4033 4034 btf_verifier_log_vsi(env, t, vsi, NULL); 4035 sum += vsi->size; 4036 } 4037 4038 if (t->size < sum) { 4039 btf_verifier_log_type(env, t, "Invalid btf_info size"); 4040 return -EINVAL; 4041 } 4042 4043 return meta_needed; 4044 } 4045 4046 static int btf_datasec_resolve(struct btf_verifier_env *env, 4047 const struct resolve_vertex *v) 4048 { 4049 const struct btf_var_secinfo *vsi; 4050 struct btf *btf = env->btf; 4051 u16 i; 4052 4053 for_each_vsi_from(i, v->next_member, v->t, vsi) { 4054 u32 var_type_id = vsi->type, type_id, type_size = 0; 4055 const struct btf_type *var_type = btf_type_by_id(env->btf, 4056 var_type_id); 4057 if (!var_type || !btf_type_is_var(var_type)) { 4058 btf_verifier_log_vsi(env, v->t, vsi, 4059 "Not a VAR kind member"); 4060 return -EINVAL; 4061 } 4062 4063 if (!env_type_is_resolve_sink(env, var_type) && 4064 !env_type_is_resolved(env, var_type_id)) { 4065 env_stack_set_next_member(env, i + 1); 4066 return env_stack_push(env, var_type, var_type_id); 4067 } 4068 4069 type_id = var_type->type; 4070 if (!btf_type_id_size(btf, &type_id, &type_size)) { 4071 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type"); 4072 return -EINVAL; 4073 } 4074 4075 if (vsi->size < type_size) { 4076 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size"); 4077 return -EINVAL; 4078 } 4079 } 4080 4081 env_stack_pop_resolved(env, 0, 0); 4082 return 0; 4083 } 4084 4085 static void btf_datasec_log(struct btf_verifier_env *env, 4086 const struct btf_type *t) 4087 { 4088 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 4089 } 4090 4091 static void btf_datasec_show(const struct btf *btf, 4092 const struct btf_type *t, u32 type_id, 4093 void *data, u8 bits_offset, 4094 struct btf_show *show) 4095 { 4096 const struct btf_var_secinfo *vsi; 4097 const struct btf_type *var; 4098 u32 i; 4099 4100 if (!btf_show_start_type(show, t, type_id, data)) 4101 return; 4102 4103 btf_show_type_value(show, "section (\"%s\") = {", 4104 __btf_name_by_offset(btf, t->name_off)); 4105 for_each_vsi(i, t, vsi) { 4106 var = btf_type_by_id(btf, vsi->type); 4107 if (i) 4108 btf_show(show, ","); 4109 btf_type_ops(var)->show(btf, var, vsi->type, 4110 data + vsi->offset, bits_offset, show); 4111 } 4112 btf_show_end_type(show); 4113 } 4114 4115 static const struct btf_kind_operations datasec_ops = { 4116 .check_meta = btf_datasec_check_meta, 4117 .resolve = btf_datasec_resolve, 4118 .check_member = btf_df_check_member, 4119 .check_kflag_member = btf_df_check_kflag_member, 4120 .log_details = btf_datasec_log, 4121 .show = btf_datasec_show, 4122 }; 4123 4124 static s32 btf_float_check_meta(struct btf_verifier_env *env, 4125 const struct btf_type *t, 4126 u32 meta_left) 4127 { 4128 if (btf_type_vlen(t)) { 4129 btf_verifier_log_type(env, t, "vlen != 0"); 4130 return -EINVAL; 4131 } 4132 4133 if (btf_type_kflag(t)) { 4134 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 4135 return -EINVAL; 4136 } 4137 4138 if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 && 4139 t->size != 16) { 4140 btf_verifier_log_type(env, t, "Invalid type_size"); 4141 return -EINVAL; 4142 } 4143 4144 btf_verifier_log_type(env, t, NULL); 4145 4146 return 0; 4147 } 4148 4149 static int btf_float_check_member(struct btf_verifier_env *env, 4150 const struct btf_type *struct_type, 4151 const struct btf_member *member, 4152 const struct btf_type *member_type) 4153 { 4154 u64 start_offset_bytes; 4155 u64 end_offset_bytes; 4156 u64 misalign_bits; 4157 u64 align_bytes; 4158 u64 align_bits; 4159 4160 /* Different architectures have different alignment requirements, so 4161 * here we check only for the reasonable minimum. This way we ensure 4162 * that types after CO-RE can pass the kernel BTF verifier. 4163 */ 4164 align_bytes = min_t(u64, sizeof(void *), member_type->size); 4165 align_bits = align_bytes * BITS_PER_BYTE; 4166 div64_u64_rem(member->offset, align_bits, &misalign_bits); 4167 if (misalign_bits) { 4168 btf_verifier_log_member(env, struct_type, member, 4169 "Member is not properly aligned"); 4170 return -EINVAL; 4171 } 4172 4173 start_offset_bytes = member->offset / BITS_PER_BYTE; 4174 end_offset_bytes = start_offset_bytes + member_type->size; 4175 if (end_offset_bytes > struct_type->size) { 4176 btf_verifier_log_member(env, struct_type, member, 4177 "Member exceeds struct_size"); 4178 return -EINVAL; 4179 } 4180 4181 return 0; 4182 } 4183 4184 static void btf_float_log(struct btf_verifier_env *env, 4185 const struct btf_type *t) 4186 { 4187 btf_verifier_log(env, "size=%u", t->size); 4188 } 4189 4190 static const struct btf_kind_operations float_ops = { 4191 .check_meta = btf_float_check_meta, 4192 .resolve = btf_df_resolve, 4193 .check_member = btf_float_check_member, 4194 .check_kflag_member = btf_generic_check_kflag_member, 4195 .log_details = btf_float_log, 4196 .show = btf_df_show, 4197 }; 4198 4199 static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env, 4200 const struct btf_type *t, 4201 u32 meta_left) 4202 { 4203 const struct btf_decl_tag *tag; 4204 u32 meta_needed = sizeof(*tag); 4205 s32 component_idx; 4206 const char *value; 4207 4208 if (meta_left < meta_needed) { 4209 btf_verifier_log_basic(env, t, 4210 "meta_left:%u meta_needed:%u", 4211 meta_left, meta_needed); 4212 return -EINVAL; 4213 } 4214 4215 value = btf_name_by_offset(env->btf, t->name_off); 4216 if (!value || !value[0]) { 4217 btf_verifier_log_type(env, t, "Invalid value"); 4218 return -EINVAL; 4219 } 4220 4221 if (btf_type_vlen(t)) { 4222 btf_verifier_log_type(env, t, "vlen != 0"); 4223 return -EINVAL; 4224 } 4225 4226 if (btf_type_kflag(t)) { 4227 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 4228 return -EINVAL; 4229 } 4230 4231 component_idx = btf_type_decl_tag(t)->component_idx; 4232 if (component_idx < -1) { 4233 btf_verifier_log_type(env, t, "Invalid component_idx"); 4234 return -EINVAL; 4235 } 4236 4237 btf_verifier_log_type(env, t, NULL); 4238 4239 return meta_needed; 4240 } 4241 4242 static int btf_decl_tag_resolve(struct btf_verifier_env *env, 4243 const struct resolve_vertex *v) 4244 { 4245 const struct btf_type *next_type; 4246 const struct btf_type *t = v->t; 4247 u32 next_type_id = t->type; 4248 struct btf *btf = env->btf; 4249 s32 component_idx; 4250 u32 vlen; 4251 4252 next_type = btf_type_by_id(btf, next_type_id); 4253 if (!next_type || !btf_type_is_decl_tag_target(next_type)) { 4254 btf_verifier_log_type(env, v->t, "Invalid type_id"); 4255 return -EINVAL; 4256 } 4257 4258 if (!env_type_is_resolve_sink(env, next_type) && 4259 !env_type_is_resolved(env, next_type_id)) 4260 return env_stack_push(env, next_type, next_type_id); 4261 4262 component_idx = btf_type_decl_tag(t)->component_idx; 4263 if (component_idx != -1) { 4264 if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) { 4265 btf_verifier_log_type(env, v->t, "Invalid component_idx"); 4266 return -EINVAL; 4267 } 4268 4269 if (btf_type_is_struct(next_type)) { 4270 vlen = btf_type_vlen(next_type); 4271 } else { 4272 /* next_type should be a function */ 4273 next_type = btf_type_by_id(btf, next_type->type); 4274 vlen = btf_type_vlen(next_type); 4275 } 4276 4277 if ((u32)component_idx >= vlen) { 4278 btf_verifier_log_type(env, v->t, "Invalid component_idx"); 4279 return -EINVAL; 4280 } 4281 } 4282 4283 env_stack_pop_resolved(env, next_type_id, 0); 4284 4285 return 0; 4286 } 4287 4288 static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t) 4289 { 4290 btf_verifier_log(env, "type=%u component_idx=%d", t->type, 4291 btf_type_decl_tag(t)->component_idx); 4292 } 4293 4294 static const struct btf_kind_operations decl_tag_ops = { 4295 .check_meta = btf_decl_tag_check_meta, 4296 .resolve = btf_decl_tag_resolve, 4297 .check_member = btf_df_check_member, 4298 .check_kflag_member = btf_df_check_kflag_member, 4299 .log_details = btf_decl_tag_log, 4300 .show = btf_df_show, 4301 }; 4302 4303 static int btf_func_proto_check(struct btf_verifier_env *env, 4304 const struct btf_type *t) 4305 { 4306 const struct btf_type *ret_type; 4307 const struct btf_param *args; 4308 const struct btf *btf; 4309 u16 nr_args, i; 4310 int err; 4311 4312 btf = env->btf; 4313 args = (const struct btf_param *)(t + 1); 4314 nr_args = btf_type_vlen(t); 4315 4316 /* Check func return type which could be "void" (t->type == 0) */ 4317 if (t->type) { 4318 u32 ret_type_id = t->type; 4319 4320 ret_type = btf_type_by_id(btf, ret_type_id); 4321 if (!ret_type) { 4322 btf_verifier_log_type(env, t, "Invalid return type"); 4323 return -EINVAL; 4324 } 4325 4326 if (btf_type_needs_resolve(ret_type) && 4327 !env_type_is_resolved(env, ret_type_id)) { 4328 err = btf_resolve(env, ret_type, ret_type_id); 4329 if (err) 4330 return err; 4331 } 4332 4333 /* Ensure the return type is a type that has a size */ 4334 if (!btf_type_id_size(btf, &ret_type_id, NULL)) { 4335 btf_verifier_log_type(env, t, "Invalid return type"); 4336 return -EINVAL; 4337 } 4338 } 4339 4340 if (!nr_args) 4341 return 0; 4342 4343 /* Last func arg type_id could be 0 if it is a vararg */ 4344 if (!args[nr_args - 1].type) { 4345 if (args[nr_args - 1].name_off) { 4346 btf_verifier_log_type(env, t, "Invalid arg#%u", 4347 nr_args); 4348 return -EINVAL; 4349 } 4350 nr_args--; 4351 } 4352 4353 err = 0; 4354 for (i = 0; i < nr_args; i++) { 4355 const struct btf_type *arg_type; 4356 u32 arg_type_id; 4357 4358 arg_type_id = args[i].type; 4359 arg_type = btf_type_by_id(btf, arg_type_id); 4360 if (!arg_type) { 4361 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4362 err = -EINVAL; 4363 break; 4364 } 4365 4366 if (args[i].name_off && 4367 (!btf_name_offset_valid(btf, args[i].name_off) || 4368 !btf_name_valid_identifier(btf, args[i].name_off))) { 4369 btf_verifier_log_type(env, t, 4370 "Invalid arg#%u", i + 1); 4371 err = -EINVAL; 4372 break; 4373 } 4374 4375 if (btf_type_needs_resolve(arg_type) && 4376 !env_type_is_resolved(env, arg_type_id)) { 4377 err = btf_resolve(env, arg_type, arg_type_id); 4378 if (err) 4379 break; 4380 } 4381 4382 if (!btf_type_id_size(btf, &arg_type_id, NULL)) { 4383 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4384 err = -EINVAL; 4385 break; 4386 } 4387 } 4388 4389 return err; 4390 } 4391 4392 static int btf_func_check(struct btf_verifier_env *env, 4393 const struct btf_type *t) 4394 { 4395 const struct btf_type *proto_type; 4396 const struct btf_param *args; 4397 const struct btf *btf; 4398 u16 nr_args, i; 4399 4400 btf = env->btf; 4401 proto_type = btf_type_by_id(btf, t->type); 4402 4403 if (!proto_type || !btf_type_is_func_proto(proto_type)) { 4404 btf_verifier_log_type(env, t, "Invalid type_id"); 4405 return -EINVAL; 4406 } 4407 4408 args = (const struct btf_param *)(proto_type + 1); 4409 nr_args = btf_type_vlen(proto_type); 4410 for (i = 0; i < nr_args; i++) { 4411 if (!args[i].name_off && args[i].type) { 4412 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4413 return -EINVAL; 4414 } 4415 } 4416 4417 return 0; 4418 } 4419 4420 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { 4421 [BTF_KIND_INT] = &int_ops, 4422 [BTF_KIND_PTR] = &ptr_ops, 4423 [BTF_KIND_ARRAY] = &array_ops, 4424 [BTF_KIND_STRUCT] = &struct_ops, 4425 [BTF_KIND_UNION] = &struct_ops, 4426 [BTF_KIND_ENUM] = &enum_ops, 4427 [BTF_KIND_FWD] = &fwd_ops, 4428 [BTF_KIND_TYPEDEF] = &modifier_ops, 4429 [BTF_KIND_VOLATILE] = &modifier_ops, 4430 [BTF_KIND_CONST] = &modifier_ops, 4431 [BTF_KIND_RESTRICT] = &modifier_ops, 4432 [BTF_KIND_FUNC] = &func_ops, 4433 [BTF_KIND_FUNC_PROTO] = &func_proto_ops, 4434 [BTF_KIND_VAR] = &var_ops, 4435 [BTF_KIND_DATASEC] = &datasec_ops, 4436 [BTF_KIND_FLOAT] = &float_ops, 4437 [BTF_KIND_DECL_TAG] = &decl_tag_ops, 4438 [BTF_KIND_TYPE_TAG] = &modifier_ops, 4439 }; 4440 4441 static s32 btf_check_meta(struct btf_verifier_env *env, 4442 const struct btf_type *t, 4443 u32 meta_left) 4444 { 4445 u32 saved_meta_left = meta_left; 4446 s32 var_meta_size; 4447 4448 if (meta_left < sizeof(*t)) { 4449 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu", 4450 env->log_type_id, meta_left, sizeof(*t)); 4451 return -EINVAL; 4452 } 4453 meta_left -= sizeof(*t); 4454 4455 if (t->info & ~BTF_INFO_MASK) { 4456 btf_verifier_log(env, "[%u] Invalid btf_info:%x", 4457 env->log_type_id, t->info); 4458 return -EINVAL; 4459 } 4460 4461 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX || 4462 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) { 4463 btf_verifier_log(env, "[%u] Invalid kind:%u", 4464 env->log_type_id, BTF_INFO_KIND(t->info)); 4465 return -EINVAL; 4466 } 4467 4468 if (!btf_name_offset_valid(env->btf, t->name_off)) { 4469 btf_verifier_log(env, "[%u] Invalid name_offset:%u", 4470 env->log_type_id, t->name_off); 4471 return -EINVAL; 4472 } 4473 4474 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left); 4475 if (var_meta_size < 0) 4476 return var_meta_size; 4477 4478 meta_left -= var_meta_size; 4479 4480 return saved_meta_left - meta_left; 4481 } 4482 4483 static int btf_check_all_metas(struct btf_verifier_env *env) 4484 { 4485 struct btf *btf = env->btf; 4486 struct btf_header *hdr; 4487 void *cur, *end; 4488 4489 hdr = &btf->hdr; 4490 cur = btf->nohdr_data + hdr->type_off; 4491 end = cur + hdr->type_len; 4492 4493 env->log_type_id = btf->base_btf ? btf->start_id : 1; 4494 while (cur < end) { 4495 struct btf_type *t = cur; 4496 s32 meta_size; 4497 4498 meta_size = btf_check_meta(env, t, end - cur); 4499 if (meta_size < 0) 4500 return meta_size; 4501 4502 btf_add_type(env, t); 4503 cur += meta_size; 4504 env->log_type_id++; 4505 } 4506 4507 return 0; 4508 } 4509 4510 static bool btf_resolve_valid(struct btf_verifier_env *env, 4511 const struct btf_type *t, 4512 u32 type_id) 4513 { 4514 struct btf *btf = env->btf; 4515 4516 if (!env_type_is_resolved(env, type_id)) 4517 return false; 4518 4519 if (btf_type_is_struct(t) || btf_type_is_datasec(t)) 4520 return !btf_resolved_type_id(btf, type_id) && 4521 !btf_resolved_type_size(btf, type_id); 4522 4523 if (btf_type_is_decl_tag(t) || btf_type_is_func(t)) 4524 return btf_resolved_type_id(btf, type_id) && 4525 !btf_resolved_type_size(btf, type_id); 4526 4527 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) || 4528 btf_type_is_var(t)) { 4529 t = btf_type_id_resolve(btf, &type_id); 4530 return t && 4531 !btf_type_is_modifier(t) && 4532 !btf_type_is_var(t) && 4533 !btf_type_is_datasec(t); 4534 } 4535 4536 if (btf_type_is_array(t)) { 4537 const struct btf_array *array = btf_type_array(t); 4538 const struct btf_type *elem_type; 4539 u32 elem_type_id = array->type; 4540 u32 elem_size; 4541 4542 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 4543 return elem_type && !btf_type_is_modifier(elem_type) && 4544 (array->nelems * elem_size == 4545 btf_resolved_type_size(btf, type_id)); 4546 } 4547 4548 return false; 4549 } 4550 4551 static int btf_resolve(struct btf_verifier_env *env, 4552 const struct btf_type *t, u32 type_id) 4553 { 4554 u32 save_log_type_id = env->log_type_id; 4555 const struct resolve_vertex *v; 4556 int err = 0; 4557 4558 env->resolve_mode = RESOLVE_TBD; 4559 env_stack_push(env, t, type_id); 4560 while (!err && (v = env_stack_peak(env))) { 4561 env->log_type_id = v->type_id; 4562 err = btf_type_ops(v->t)->resolve(env, v); 4563 } 4564 4565 env->log_type_id = type_id; 4566 if (err == -E2BIG) { 4567 btf_verifier_log_type(env, t, 4568 "Exceeded max resolving depth:%u", 4569 MAX_RESOLVE_DEPTH); 4570 } else if (err == -EEXIST) { 4571 btf_verifier_log_type(env, t, "Loop detected"); 4572 } 4573 4574 /* Final sanity check */ 4575 if (!err && !btf_resolve_valid(env, t, type_id)) { 4576 btf_verifier_log_type(env, t, "Invalid resolve state"); 4577 err = -EINVAL; 4578 } 4579 4580 env->log_type_id = save_log_type_id; 4581 return err; 4582 } 4583 4584 static int btf_check_all_types(struct btf_verifier_env *env) 4585 { 4586 struct btf *btf = env->btf; 4587 const struct btf_type *t; 4588 u32 type_id, i; 4589 int err; 4590 4591 err = env_resolve_init(env); 4592 if (err) 4593 return err; 4594 4595 env->phase++; 4596 for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) { 4597 type_id = btf->start_id + i; 4598 t = btf_type_by_id(btf, type_id); 4599 4600 env->log_type_id = type_id; 4601 if (btf_type_needs_resolve(t) && 4602 !env_type_is_resolved(env, type_id)) { 4603 err = btf_resolve(env, t, type_id); 4604 if (err) 4605 return err; 4606 } 4607 4608 if (btf_type_is_func_proto(t)) { 4609 err = btf_func_proto_check(env, t); 4610 if (err) 4611 return err; 4612 } 4613 } 4614 4615 return 0; 4616 } 4617 4618 static int btf_parse_type_sec(struct btf_verifier_env *env) 4619 { 4620 const struct btf_header *hdr = &env->btf->hdr; 4621 int err; 4622 4623 /* Type section must align to 4 bytes */ 4624 if (hdr->type_off & (sizeof(u32) - 1)) { 4625 btf_verifier_log(env, "Unaligned type_off"); 4626 return -EINVAL; 4627 } 4628 4629 if (!env->btf->base_btf && !hdr->type_len) { 4630 btf_verifier_log(env, "No type found"); 4631 return -EINVAL; 4632 } 4633 4634 err = btf_check_all_metas(env); 4635 if (err) 4636 return err; 4637 4638 return btf_check_all_types(env); 4639 } 4640 4641 static int btf_parse_str_sec(struct btf_verifier_env *env) 4642 { 4643 const struct btf_header *hdr; 4644 struct btf *btf = env->btf; 4645 const char *start, *end; 4646 4647 hdr = &btf->hdr; 4648 start = btf->nohdr_data + hdr->str_off; 4649 end = start + hdr->str_len; 4650 4651 if (end != btf->data + btf->data_size) { 4652 btf_verifier_log(env, "String section is not at the end"); 4653 return -EINVAL; 4654 } 4655 4656 btf->strings = start; 4657 4658 if (btf->base_btf && !hdr->str_len) 4659 return 0; 4660 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) { 4661 btf_verifier_log(env, "Invalid string section"); 4662 return -EINVAL; 4663 } 4664 if (!btf->base_btf && start[0]) { 4665 btf_verifier_log(env, "Invalid string section"); 4666 return -EINVAL; 4667 } 4668 4669 return 0; 4670 } 4671 4672 static const size_t btf_sec_info_offset[] = { 4673 offsetof(struct btf_header, type_off), 4674 offsetof(struct btf_header, str_off), 4675 }; 4676 4677 static int btf_sec_info_cmp(const void *a, const void *b) 4678 { 4679 const struct btf_sec_info *x = a; 4680 const struct btf_sec_info *y = b; 4681 4682 return (int)(x->off - y->off) ? : (int)(x->len - y->len); 4683 } 4684 4685 static int btf_check_sec_info(struct btf_verifier_env *env, 4686 u32 btf_data_size) 4687 { 4688 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)]; 4689 u32 total, expected_total, i; 4690 const struct btf_header *hdr; 4691 const struct btf *btf; 4692 4693 btf = env->btf; 4694 hdr = &btf->hdr; 4695 4696 /* Populate the secs from hdr */ 4697 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) 4698 secs[i] = *(struct btf_sec_info *)((void *)hdr + 4699 btf_sec_info_offset[i]); 4700 4701 sort(secs, ARRAY_SIZE(btf_sec_info_offset), 4702 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL); 4703 4704 /* Check for gaps and overlap among sections */ 4705 total = 0; 4706 expected_total = btf_data_size - hdr->hdr_len; 4707 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) { 4708 if (expected_total < secs[i].off) { 4709 btf_verifier_log(env, "Invalid section offset"); 4710 return -EINVAL; 4711 } 4712 if (total < secs[i].off) { 4713 /* gap */ 4714 btf_verifier_log(env, "Unsupported section found"); 4715 return -EINVAL; 4716 } 4717 if (total > secs[i].off) { 4718 btf_verifier_log(env, "Section overlap found"); 4719 return -EINVAL; 4720 } 4721 if (expected_total - total < secs[i].len) { 4722 btf_verifier_log(env, 4723 "Total section length too long"); 4724 return -EINVAL; 4725 } 4726 total += secs[i].len; 4727 } 4728 4729 /* There is data other than hdr and known sections */ 4730 if (expected_total != total) { 4731 btf_verifier_log(env, "Unsupported section found"); 4732 return -EINVAL; 4733 } 4734 4735 return 0; 4736 } 4737 4738 static int btf_parse_hdr(struct btf_verifier_env *env) 4739 { 4740 u32 hdr_len, hdr_copy, btf_data_size; 4741 const struct btf_header *hdr; 4742 struct btf *btf; 4743 int err; 4744 4745 btf = env->btf; 4746 btf_data_size = btf->data_size; 4747 4748 if (btf_data_size < offsetofend(struct btf_header, hdr_len)) { 4749 btf_verifier_log(env, "hdr_len not found"); 4750 return -EINVAL; 4751 } 4752 4753 hdr = btf->data; 4754 hdr_len = hdr->hdr_len; 4755 if (btf_data_size < hdr_len) { 4756 btf_verifier_log(env, "btf_header not found"); 4757 return -EINVAL; 4758 } 4759 4760 /* Ensure the unsupported header fields are zero */ 4761 if (hdr_len > sizeof(btf->hdr)) { 4762 u8 *expected_zero = btf->data + sizeof(btf->hdr); 4763 u8 *end = btf->data + hdr_len; 4764 4765 for (; expected_zero < end; expected_zero++) { 4766 if (*expected_zero) { 4767 btf_verifier_log(env, "Unsupported btf_header"); 4768 return -E2BIG; 4769 } 4770 } 4771 } 4772 4773 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); 4774 memcpy(&btf->hdr, btf->data, hdr_copy); 4775 4776 hdr = &btf->hdr; 4777 4778 btf_verifier_log_hdr(env, btf_data_size); 4779 4780 if (hdr->magic != BTF_MAGIC) { 4781 btf_verifier_log(env, "Invalid magic"); 4782 return -EINVAL; 4783 } 4784 4785 if (hdr->version != BTF_VERSION) { 4786 btf_verifier_log(env, "Unsupported version"); 4787 return -ENOTSUPP; 4788 } 4789 4790 if (hdr->flags) { 4791 btf_verifier_log(env, "Unsupported flags"); 4792 return -ENOTSUPP; 4793 } 4794 4795 if (!btf->base_btf && btf_data_size == hdr->hdr_len) { 4796 btf_verifier_log(env, "No data"); 4797 return -EINVAL; 4798 } 4799 4800 err = btf_check_sec_info(env, btf_data_size); 4801 if (err) 4802 return err; 4803 4804 return 0; 4805 } 4806 4807 static int btf_check_type_tags(struct btf_verifier_env *env, 4808 struct btf *btf, int start_id) 4809 { 4810 int i, n, good_id = start_id - 1; 4811 bool in_tags; 4812 4813 n = btf_nr_types(btf); 4814 for (i = start_id; i < n; i++) { 4815 const struct btf_type *t; 4816 u32 cur_id = i; 4817 4818 t = btf_type_by_id(btf, i); 4819 if (!t) 4820 return -EINVAL; 4821 if (!btf_type_is_modifier(t)) 4822 continue; 4823 4824 cond_resched(); 4825 4826 in_tags = btf_type_is_type_tag(t); 4827 while (btf_type_is_modifier(t)) { 4828 if (btf_type_is_type_tag(t)) { 4829 if (!in_tags) { 4830 btf_verifier_log(env, "Type tags don't precede modifiers"); 4831 return -EINVAL; 4832 } 4833 } else if (in_tags) { 4834 in_tags = false; 4835 } 4836 if (cur_id <= good_id) 4837 break; 4838 /* Move to next type */ 4839 cur_id = t->type; 4840 t = btf_type_by_id(btf, cur_id); 4841 if (!t) 4842 return -EINVAL; 4843 } 4844 good_id = i; 4845 } 4846 return 0; 4847 } 4848 4849 static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, 4850 u32 log_level, char __user *log_ubuf, u32 log_size) 4851 { 4852 struct btf_verifier_env *env = NULL; 4853 struct bpf_verifier_log *log; 4854 struct btf *btf = NULL; 4855 u8 *data; 4856 int err; 4857 4858 if (btf_data_size > BTF_MAX_SIZE) 4859 return ERR_PTR(-E2BIG); 4860 4861 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 4862 if (!env) 4863 return ERR_PTR(-ENOMEM); 4864 4865 log = &env->log; 4866 if (log_level || log_ubuf || log_size) { 4867 /* user requested verbose verifier output 4868 * and supplied buffer to store the verification trace 4869 */ 4870 log->level = log_level; 4871 log->ubuf = log_ubuf; 4872 log->len_total = log_size; 4873 4874 /* log attributes have to be sane */ 4875 if (!bpf_verifier_log_attr_valid(log)) { 4876 err = -EINVAL; 4877 goto errout; 4878 } 4879 } 4880 4881 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 4882 if (!btf) { 4883 err = -ENOMEM; 4884 goto errout; 4885 } 4886 env->btf = btf; 4887 4888 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); 4889 if (!data) { 4890 err = -ENOMEM; 4891 goto errout; 4892 } 4893 4894 btf->data = data; 4895 btf->data_size = btf_data_size; 4896 4897 if (copy_from_bpfptr(data, btf_data, btf_data_size)) { 4898 err = -EFAULT; 4899 goto errout; 4900 } 4901 4902 err = btf_parse_hdr(env); 4903 if (err) 4904 goto errout; 4905 4906 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 4907 4908 err = btf_parse_str_sec(env); 4909 if (err) 4910 goto errout; 4911 4912 err = btf_parse_type_sec(env); 4913 if (err) 4914 goto errout; 4915 4916 err = btf_check_type_tags(env, btf, 1); 4917 if (err) 4918 goto errout; 4919 4920 if (log->level && bpf_verifier_log_full(log)) { 4921 err = -ENOSPC; 4922 goto errout; 4923 } 4924 4925 btf_verifier_env_free(env); 4926 refcount_set(&btf->refcnt, 1); 4927 return btf; 4928 4929 errout: 4930 btf_verifier_env_free(env); 4931 if (btf) 4932 btf_free(btf); 4933 return ERR_PTR(err); 4934 } 4935 4936 extern char __weak __start_BTF[]; 4937 extern char __weak __stop_BTF[]; 4938 extern struct btf *btf_vmlinux; 4939 4940 #define BPF_MAP_TYPE(_id, _ops) 4941 #define BPF_LINK_TYPE(_id, _name) 4942 static union { 4943 struct bpf_ctx_convert { 4944 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 4945 prog_ctx_type _id##_prog; \ 4946 kern_ctx_type _id##_kern; 4947 #include <linux/bpf_types.h> 4948 #undef BPF_PROG_TYPE 4949 } *__t; 4950 /* 't' is written once under lock. Read many times. */ 4951 const struct btf_type *t; 4952 } bpf_ctx_convert; 4953 enum { 4954 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 4955 __ctx_convert##_id, 4956 #include <linux/bpf_types.h> 4957 #undef BPF_PROG_TYPE 4958 __ctx_convert_unused, /* to avoid empty enum in extreme .config */ 4959 }; 4960 static u8 bpf_ctx_convert_map[] = { 4961 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 4962 [_id] = __ctx_convert##_id, 4963 #include <linux/bpf_types.h> 4964 #undef BPF_PROG_TYPE 4965 0, /* avoid empty array */ 4966 }; 4967 #undef BPF_MAP_TYPE 4968 #undef BPF_LINK_TYPE 4969 4970 static const struct btf_member * 4971 btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, 4972 const struct btf_type *t, enum bpf_prog_type prog_type, 4973 int arg) 4974 { 4975 const struct btf_type *conv_struct; 4976 const struct btf_type *ctx_struct; 4977 const struct btf_member *ctx_type; 4978 const char *tname, *ctx_tname; 4979 4980 conv_struct = bpf_ctx_convert.t; 4981 if (!conv_struct) { 4982 bpf_log(log, "btf_vmlinux is malformed\n"); 4983 return NULL; 4984 } 4985 t = btf_type_by_id(btf, t->type); 4986 while (btf_type_is_modifier(t)) 4987 t = btf_type_by_id(btf, t->type); 4988 if (!btf_type_is_struct(t)) { 4989 /* Only pointer to struct is supported for now. 4990 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF 4991 * is not supported yet. 4992 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine. 4993 */ 4994 return NULL; 4995 } 4996 tname = btf_name_by_offset(btf, t->name_off); 4997 if (!tname) { 4998 bpf_log(log, "arg#%d struct doesn't have a name\n", arg); 4999 return NULL; 5000 } 5001 /* prog_type is valid bpf program type. No need for bounds check. */ 5002 ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2; 5003 /* ctx_struct is a pointer to prog_ctx_type in vmlinux. 5004 * Like 'struct __sk_buff' 5005 */ 5006 ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type); 5007 if (!ctx_struct) 5008 /* should not happen */ 5009 return NULL; 5010 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off); 5011 if (!ctx_tname) { 5012 /* should not happen */ 5013 bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n"); 5014 return NULL; 5015 } 5016 /* only compare that prog's ctx type name is the same as 5017 * kernel expects. No need to compare field by field. 5018 * It's ok for bpf prog to do: 5019 * struct __sk_buff {}; 5020 * int socket_filter_bpf_prog(struct __sk_buff *skb) 5021 * { // no fields of skb are ever used } 5022 */ 5023 if (strcmp(ctx_tname, tname)) 5024 return NULL; 5025 return ctx_type; 5026 } 5027 5028 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, 5029 struct btf *btf, 5030 const struct btf_type *t, 5031 enum bpf_prog_type prog_type, 5032 int arg) 5033 { 5034 const struct btf_member *prog_ctx_type, *kern_ctx_type; 5035 5036 prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg); 5037 if (!prog_ctx_type) 5038 return -ENOENT; 5039 kern_ctx_type = prog_ctx_type + 1; 5040 return kern_ctx_type->type; 5041 } 5042 5043 BTF_ID_LIST(bpf_ctx_convert_btf_id) 5044 BTF_ID(struct, bpf_ctx_convert) 5045 5046 struct btf *btf_parse_vmlinux(void) 5047 { 5048 struct btf_verifier_env *env = NULL; 5049 struct bpf_verifier_log *log; 5050 struct btf *btf = NULL; 5051 int err; 5052 5053 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 5054 if (!env) 5055 return ERR_PTR(-ENOMEM); 5056 5057 log = &env->log; 5058 log->level = BPF_LOG_KERNEL; 5059 5060 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 5061 if (!btf) { 5062 err = -ENOMEM; 5063 goto errout; 5064 } 5065 env->btf = btf; 5066 5067 btf->data = __start_BTF; 5068 btf->data_size = __stop_BTF - __start_BTF; 5069 btf->kernel_btf = true; 5070 snprintf(btf->name, sizeof(btf->name), "vmlinux"); 5071 5072 err = btf_parse_hdr(env); 5073 if (err) 5074 goto errout; 5075 5076 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 5077 5078 err = btf_parse_str_sec(env); 5079 if (err) 5080 goto errout; 5081 5082 err = btf_check_all_metas(env); 5083 if (err) 5084 goto errout; 5085 5086 err = btf_check_type_tags(env, btf, 1); 5087 if (err) 5088 goto errout; 5089 5090 /* btf_parse_vmlinux() runs under bpf_verifier_lock */ 5091 bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); 5092 5093 bpf_struct_ops_init(btf, log); 5094 5095 refcount_set(&btf->refcnt, 1); 5096 5097 err = btf_alloc_id(btf); 5098 if (err) 5099 goto errout; 5100 5101 btf_verifier_env_free(env); 5102 return btf; 5103 5104 errout: 5105 btf_verifier_env_free(env); 5106 if (btf) { 5107 kvfree(btf->types); 5108 kfree(btf); 5109 } 5110 return ERR_PTR(err); 5111 } 5112 5113 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 5114 5115 static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size) 5116 { 5117 struct btf_verifier_env *env = NULL; 5118 struct bpf_verifier_log *log; 5119 struct btf *btf = NULL, *base_btf; 5120 int err; 5121 5122 base_btf = bpf_get_btf_vmlinux(); 5123 if (IS_ERR(base_btf)) 5124 return base_btf; 5125 if (!base_btf) 5126 return ERR_PTR(-EINVAL); 5127 5128 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 5129 if (!env) 5130 return ERR_PTR(-ENOMEM); 5131 5132 log = &env->log; 5133 log->level = BPF_LOG_KERNEL; 5134 5135 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 5136 if (!btf) { 5137 err = -ENOMEM; 5138 goto errout; 5139 } 5140 env->btf = btf; 5141 5142 btf->base_btf = base_btf; 5143 btf->start_id = base_btf->nr_types; 5144 btf->start_str_off = base_btf->hdr.str_len; 5145 btf->kernel_btf = true; 5146 snprintf(btf->name, sizeof(btf->name), "%s", module_name); 5147 5148 btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN); 5149 if (!btf->data) { 5150 err = -ENOMEM; 5151 goto errout; 5152 } 5153 memcpy(btf->data, data, data_size); 5154 btf->data_size = data_size; 5155 5156 err = btf_parse_hdr(env); 5157 if (err) 5158 goto errout; 5159 5160 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 5161 5162 err = btf_parse_str_sec(env); 5163 if (err) 5164 goto errout; 5165 5166 err = btf_check_all_metas(env); 5167 if (err) 5168 goto errout; 5169 5170 err = btf_check_type_tags(env, btf, btf_nr_types(base_btf)); 5171 if (err) 5172 goto errout; 5173 5174 btf_verifier_env_free(env); 5175 refcount_set(&btf->refcnt, 1); 5176 return btf; 5177 5178 errout: 5179 btf_verifier_env_free(env); 5180 if (btf) { 5181 kvfree(btf->data); 5182 kvfree(btf->types); 5183 kfree(btf); 5184 } 5185 return ERR_PTR(err); 5186 } 5187 5188 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ 5189 5190 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog) 5191 { 5192 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 5193 5194 if (tgt_prog) 5195 return tgt_prog->aux->btf; 5196 else 5197 return prog->aux->attach_btf; 5198 } 5199 5200 static bool is_int_ptr(struct btf *btf, const struct btf_type *t) 5201 { 5202 /* t comes in already as a pointer */ 5203 t = btf_type_by_id(btf, t->type); 5204 5205 /* allow const */ 5206 if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST) 5207 t = btf_type_by_id(btf, t->type); 5208 5209 return btf_type_is_int(t); 5210 } 5211 5212 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 5213 const struct bpf_prog *prog, 5214 struct bpf_insn_access_aux *info) 5215 { 5216 const struct btf_type *t = prog->aux->attach_func_proto; 5217 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 5218 struct btf *btf = bpf_prog_get_target_btf(prog); 5219 const char *tname = prog->aux->attach_func_name; 5220 struct bpf_verifier_log *log = info->log; 5221 const struct btf_param *args; 5222 const char *tag_value; 5223 u32 nr_args, arg; 5224 int i, ret; 5225 5226 if (off % 8) { 5227 bpf_log(log, "func '%s' offset %d is not multiple of 8\n", 5228 tname, off); 5229 return false; 5230 } 5231 arg = off / 8; 5232 args = (const struct btf_param *)(t + 1); 5233 /* if (t == NULL) Fall back to default BPF prog with 5234 * MAX_BPF_FUNC_REG_ARGS u64 arguments. 5235 */ 5236 nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS; 5237 if (prog->aux->attach_btf_trace) { 5238 /* skip first 'void *__data' argument in btf_trace_##name typedef */ 5239 args++; 5240 nr_args--; 5241 } 5242 5243 if (arg > nr_args) { 5244 bpf_log(log, "func '%s' doesn't have %d-th argument\n", 5245 tname, arg + 1); 5246 return false; 5247 } 5248 5249 if (arg == nr_args) { 5250 switch (prog->expected_attach_type) { 5251 case BPF_LSM_MAC: 5252 case BPF_TRACE_FEXIT: 5253 /* When LSM programs are attached to void LSM hooks 5254 * they use FEXIT trampolines and when attached to 5255 * int LSM hooks, they use MODIFY_RETURN trampolines. 5256 * 5257 * While the LSM programs are BPF_MODIFY_RETURN-like 5258 * the check: 5259 * 5260 * if (ret_type != 'int') 5261 * return -EINVAL; 5262 * 5263 * is _not_ done here. This is still safe as LSM hooks 5264 * have only void and int return types. 5265 */ 5266 if (!t) 5267 return true; 5268 t = btf_type_by_id(btf, t->type); 5269 break; 5270 case BPF_MODIFY_RETURN: 5271 /* For now the BPF_MODIFY_RETURN can only be attached to 5272 * functions that return an int. 5273 */ 5274 if (!t) 5275 return false; 5276 5277 t = btf_type_skip_modifiers(btf, t->type, NULL); 5278 if (!btf_type_is_small_int(t)) { 5279 bpf_log(log, 5280 "ret type %s not allowed for fmod_ret\n", 5281 btf_kind_str[BTF_INFO_KIND(t->info)]); 5282 return false; 5283 } 5284 break; 5285 default: 5286 bpf_log(log, "func '%s' doesn't have %d-th argument\n", 5287 tname, arg + 1); 5288 return false; 5289 } 5290 } else { 5291 if (!t) 5292 /* Default prog with MAX_BPF_FUNC_REG_ARGS args */ 5293 return true; 5294 t = btf_type_by_id(btf, args[arg].type); 5295 } 5296 5297 /* skip modifiers */ 5298 while (btf_type_is_modifier(t)) 5299 t = btf_type_by_id(btf, t->type); 5300 if (btf_type_is_small_int(t) || btf_type_is_enum(t)) 5301 /* accessing a scalar */ 5302 return true; 5303 if (!btf_type_is_ptr(t)) { 5304 bpf_log(log, 5305 "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n", 5306 tname, arg, 5307 __btf_name_by_offset(btf, t->name_off), 5308 btf_kind_str[BTF_INFO_KIND(t->info)]); 5309 return false; 5310 } 5311 5312 /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ 5313 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { 5314 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; 5315 u32 type, flag; 5316 5317 type = base_type(ctx_arg_info->reg_type); 5318 flag = type_flag(ctx_arg_info->reg_type); 5319 if (ctx_arg_info->offset == off && type == PTR_TO_BUF && 5320 (flag & PTR_MAYBE_NULL)) { 5321 info->reg_type = ctx_arg_info->reg_type; 5322 return true; 5323 } 5324 } 5325 5326 if (t->type == 0) 5327 /* This is a pointer to void. 5328 * It is the same as scalar from the verifier safety pov. 5329 * No further pointer walking is allowed. 5330 */ 5331 return true; 5332 5333 if (is_int_ptr(btf, t)) 5334 return true; 5335 5336 /* this is a pointer to another type */ 5337 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { 5338 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; 5339 5340 if (ctx_arg_info->offset == off) { 5341 if (!ctx_arg_info->btf_id) { 5342 bpf_log(log,"invalid btf_id for context argument offset %u\n", off); 5343 return false; 5344 } 5345 5346 info->reg_type = ctx_arg_info->reg_type; 5347 info->btf = btf_vmlinux; 5348 info->btf_id = ctx_arg_info->btf_id; 5349 return true; 5350 } 5351 } 5352 5353 info->reg_type = PTR_TO_BTF_ID; 5354 if (tgt_prog) { 5355 enum bpf_prog_type tgt_type; 5356 5357 if (tgt_prog->type == BPF_PROG_TYPE_EXT) 5358 tgt_type = tgt_prog->aux->saved_dst_prog_type; 5359 else 5360 tgt_type = tgt_prog->type; 5361 5362 ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg); 5363 if (ret > 0) { 5364 info->btf = btf_vmlinux; 5365 info->btf_id = ret; 5366 return true; 5367 } else { 5368 return false; 5369 } 5370 } 5371 5372 info->btf = btf; 5373 info->btf_id = t->type; 5374 t = btf_type_by_id(btf, t->type); 5375 5376 if (btf_type_is_type_tag(t)) { 5377 tag_value = __btf_name_by_offset(btf, t->name_off); 5378 if (strcmp(tag_value, "user") == 0) 5379 info->reg_type |= MEM_USER; 5380 if (strcmp(tag_value, "percpu") == 0) 5381 info->reg_type |= MEM_PERCPU; 5382 } 5383 5384 /* skip modifiers */ 5385 while (btf_type_is_modifier(t)) { 5386 info->btf_id = t->type; 5387 t = btf_type_by_id(btf, t->type); 5388 } 5389 if (!btf_type_is_struct(t)) { 5390 bpf_log(log, 5391 "func '%s' arg%d type %s is not a struct\n", 5392 tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]); 5393 return false; 5394 } 5395 bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n", 5396 tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)], 5397 __btf_name_by_offset(btf, t->name_off)); 5398 return true; 5399 } 5400 5401 enum bpf_struct_walk_result { 5402 /* < 0 error */ 5403 WALK_SCALAR = 0, 5404 WALK_PTR, 5405 WALK_STRUCT, 5406 }; 5407 5408 static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, 5409 const struct btf_type *t, int off, int size, 5410 u32 *next_btf_id, enum bpf_type_flag *flag) 5411 { 5412 u32 i, moff, mtrue_end, msize = 0, total_nelems = 0; 5413 const struct btf_type *mtype, *elem_type = NULL; 5414 const struct btf_member *member; 5415 const char *tname, *mname, *tag_value; 5416 u32 vlen, elem_id, mid; 5417 5418 again: 5419 tname = __btf_name_by_offset(btf, t->name_off); 5420 if (!btf_type_is_struct(t)) { 5421 bpf_log(log, "Type '%s' is not a struct\n", tname); 5422 return -EINVAL; 5423 } 5424 5425 vlen = btf_type_vlen(t); 5426 if (off + size > t->size) { 5427 /* If the last element is a variable size array, we may 5428 * need to relax the rule. 5429 */ 5430 struct btf_array *array_elem; 5431 5432 if (vlen == 0) 5433 goto error; 5434 5435 member = btf_type_member(t) + vlen - 1; 5436 mtype = btf_type_skip_modifiers(btf, member->type, 5437 NULL); 5438 if (!btf_type_is_array(mtype)) 5439 goto error; 5440 5441 array_elem = (struct btf_array *)(mtype + 1); 5442 if (array_elem->nelems != 0) 5443 goto error; 5444 5445 moff = __btf_member_bit_offset(t, member) / 8; 5446 if (off < moff) 5447 goto error; 5448 5449 /* Only allow structure for now, can be relaxed for 5450 * other types later. 5451 */ 5452 t = btf_type_skip_modifiers(btf, array_elem->type, 5453 NULL); 5454 if (!btf_type_is_struct(t)) 5455 goto error; 5456 5457 off = (off - moff) % t->size; 5458 goto again; 5459 5460 error: 5461 bpf_log(log, "access beyond struct %s at off %u size %u\n", 5462 tname, off, size); 5463 return -EACCES; 5464 } 5465 5466 for_each_member(i, t, member) { 5467 /* offset of the field in bytes */ 5468 moff = __btf_member_bit_offset(t, member) / 8; 5469 if (off + size <= moff) 5470 /* won't find anything, field is already too far */ 5471 break; 5472 5473 if (__btf_member_bitfield_size(t, member)) { 5474 u32 end_bit = __btf_member_bit_offset(t, member) + 5475 __btf_member_bitfield_size(t, member); 5476 5477 /* off <= moff instead of off == moff because clang 5478 * does not generate a BTF member for anonymous 5479 * bitfield like the ":16" here: 5480 * struct { 5481 * int :16; 5482 * int x:8; 5483 * }; 5484 */ 5485 if (off <= moff && 5486 BITS_ROUNDUP_BYTES(end_bit) <= off + size) 5487 return WALK_SCALAR; 5488 5489 /* off may be accessing a following member 5490 * 5491 * or 5492 * 5493 * Doing partial access at either end of this 5494 * bitfield. Continue on this case also to 5495 * treat it as not accessing this bitfield 5496 * and eventually error out as field not 5497 * found to keep it simple. 5498 * It could be relaxed if there was a legit 5499 * partial access case later. 5500 */ 5501 continue; 5502 } 5503 5504 /* In case of "off" is pointing to holes of a struct */ 5505 if (off < moff) 5506 break; 5507 5508 /* type of the field */ 5509 mid = member->type; 5510 mtype = btf_type_by_id(btf, member->type); 5511 mname = __btf_name_by_offset(btf, member->name_off); 5512 5513 mtype = __btf_resolve_size(btf, mtype, &msize, 5514 &elem_type, &elem_id, &total_nelems, 5515 &mid); 5516 if (IS_ERR(mtype)) { 5517 bpf_log(log, "field %s doesn't have size\n", mname); 5518 return -EFAULT; 5519 } 5520 5521 mtrue_end = moff + msize; 5522 if (off >= mtrue_end) 5523 /* no overlap with member, keep iterating */ 5524 continue; 5525 5526 if (btf_type_is_array(mtype)) { 5527 u32 elem_idx; 5528 5529 /* __btf_resolve_size() above helps to 5530 * linearize a multi-dimensional array. 5531 * 5532 * The logic here is treating an array 5533 * in a struct as the following way: 5534 * 5535 * struct outer { 5536 * struct inner array[2][2]; 5537 * }; 5538 * 5539 * looks like: 5540 * 5541 * struct outer { 5542 * struct inner array_elem0; 5543 * struct inner array_elem1; 5544 * struct inner array_elem2; 5545 * struct inner array_elem3; 5546 * }; 5547 * 5548 * When accessing outer->array[1][0], it moves 5549 * moff to "array_elem2", set mtype to 5550 * "struct inner", and msize also becomes 5551 * sizeof(struct inner). Then most of the 5552 * remaining logic will fall through without 5553 * caring the current member is an array or 5554 * not. 5555 * 5556 * Unlike mtype/msize/moff, mtrue_end does not 5557 * change. The naming difference ("_true") tells 5558 * that it is not always corresponding to 5559 * the current mtype/msize/moff. 5560 * It is the true end of the current 5561 * member (i.e. array in this case). That 5562 * will allow an int array to be accessed like 5563 * a scratch space, 5564 * i.e. allow access beyond the size of 5565 * the array's element as long as it is 5566 * within the mtrue_end boundary. 5567 */ 5568 5569 /* skip empty array */ 5570 if (moff == mtrue_end) 5571 continue; 5572 5573 msize /= total_nelems; 5574 elem_idx = (off - moff) / msize; 5575 moff += elem_idx * msize; 5576 mtype = elem_type; 5577 mid = elem_id; 5578 } 5579 5580 /* the 'off' we're looking for is either equal to start 5581 * of this field or inside of this struct 5582 */ 5583 if (btf_type_is_struct(mtype)) { 5584 /* our field must be inside that union or struct */ 5585 t = mtype; 5586 5587 /* return if the offset matches the member offset */ 5588 if (off == moff) { 5589 *next_btf_id = mid; 5590 return WALK_STRUCT; 5591 } 5592 5593 /* adjust offset we're looking for */ 5594 off -= moff; 5595 goto again; 5596 } 5597 5598 if (btf_type_is_ptr(mtype)) { 5599 const struct btf_type *stype, *t; 5600 enum bpf_type_flag tmp_flag = 0; 5601 u32 id; 5602 5603 if (msize != size || off != moff) { 5604 bpf_log(log, 5605 "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n", 5606 mname, moff, tname, off, size); 5607 return -EACCES; 5608 } 5609 5610 /* check type tag */ 5611 t = btf_type_by_id(btf, mtype->type); 5612 if (btf_type_is_type_tag(t)) { 5613 tag_value = __btf_name_by_offset(btf, t->name_off); 5614 /* check __user tag */ 5615 if (strcmp(tag_value, "user") == 0) 5616 tmp_flag = MEM_USER; 5617 /* check __percpu tag */ 5618 if (strcmp(tag_value, "percpu") == 0) 5619 tmp_flag = MEM_PERCPU; 5620 } 5621 5622 stype = btf_type_skip_modifiers(btf, mtype->type, &id); 5623 if (btf_type_is_struct(stype)) { 5624 *next_btf_id = id; 5625 *flag = tmp_flag; 5626 return WALK_PTR; 5627 } 5628 } 5629 5630 /* Allow more flexible access within an int as long as 5631 * it is within mtrue_end. 5632 * Since mtrue_end could be the end of an array, 5633 * that also allows using an array of int as a scratch 5634 * space. e.g. skb->cb[]. 5635 */ 5636 if (off + size > mtrue_end) { 5637 bpf_log(log, 5638 "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n", 5639 mname, mtrue_end, tname, off, size); 5640 return -EACCES; 5641 } 5642 5643 return WALK_SCALAR; 5644 } 5645 bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off); 5646 return -EINVAL; 5647 } 5648 5649 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, 5650 const struct btf_type *t, int off, int size, 5651 enum bpf_access_type atype __maybe_unused, 5652 u32 *next_btf_id, enum bpf_type_flag *flag) 5653 { 5654 enum bpf_type_flag tmp_flag = 0; 5655 int err; 5656 u32 id; 5657 5658 do { 5659 err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag); 5660 5661 switch (err) { 5662 case WALK_PTR: 5663 /* If we found the pointer or scalar on t+off, 5664 * we're done. 5665 */ 5666 *next_btf_id = id; 5667 *flag = tmp_flag; 5668 return PTR_TO_BTF_ID; 5669 case WALK_SCALAR: 5670 return SCALAR_VALUE; 5671 case WALK_STRUCT: 5672 /* We found nested struct, so continue the search 5673 * by diving in it. At this point the offset is 5674 * aligned with the new type, so set it to 0. 5675 */ 5676 t = btf_type_by_id(btf, id); 5677 off = 0; 5678 break; 5679 default: 5680 /* It's either error or unknown return value.. 5681 * scream and leave. 5682 */ 5683 if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value")) 5684 return -EINVAL; 5685 return err; 5686 } 5687 } while (t); 5688 5689 return -EINVAL; 5690 } 5691 5692 /* Check that two BTF types, each specified as an BTF object + id, are exactly 5693 * the same. Trivial ID check is not enough due to module BTFs, because we can 5694 * end up with two different module BTFs, but IDs point to the common type in 5695 * vmlinux BTF. 5696 */ 5697 static bool btf_types_are_same(const struct btf *btf1, u32 id1, 5698 const struct btf *btf2, u32 id2) 5699 { 5700 if (id1 != id2) 5701 return false; 5702 if (btf1 == btf2) 5703 return true; 5704 return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2); 5705 } 5706 5707 bool btf_struct_ids_match(struct bpf_verifier_log *log, 5708 const struct btf *btf, u32 id, int off, 5709 const struct btf *need_btf, u32 need_type_id, 5710 bool strict) 5711 { 5712 const struct btf_type *type; 5713 enum bpf_type_flag flag; 5714 int err; 5715 5716 /* Are we already done? */ 5717 if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id)) 5718 return true; 5719 /* In case of strict type match, we do not walk struct, the top level 5720 * type match must succeed. When strict is true, off should have already 5721 * been 0. 5722 */ 5723 if (strict) 5724 return false; 5725 again: 5726 type = btf_type_by_id(btf, id); 5727 if (!type) 5728 return false; 5729 err = btf_struct_walk(log, btf, type, off, 1, &id, &flag); 5730 if (err != WALK_STRUCT) 5731 return false; 5732 5733 /* We found nested struct object. If it matches 5734 * the requested ID, we're done. Otherwise let's 5735 * continue the search with offset 0 in the new 5736 * type. 5737 */ 5738 if (!btf_types_are_same(btf, id, need_btf, need_type_id)) { 5739 off = 0; 5740 goto again; 5741 } 5742 5743 return true; 5744 } 5745 5746 static int __get_type_size(struct btf *btf, u32 btf_id, 5747 const struct btf_type **bad_type) 5748 { 5749 const struct btf_type *t; 5750 5751 if (!btf_id) 5752 /* void */ 5753 return 0; 5754 t = btf_type_by_id(btf, btf_id); 5755 while (t && btf_type_is_modifier(t)) 5756 t = btf_type_by_id(btf, t->type); 5757 if (!t) { 5758 *bad_type = btf_type_by_id(btf, 0); 5759 return -EINVAL; 5760 } 5761 if (btf_type_is_ptr(t)) 5762 /* kernel size of pointer. Not BPF's size of pointer*/ 5763 return sizeof(void *); 5764 if (btf_type_is_int(t) || btf_type_is_enum(t)) 5765 return t->size; 5766 *bad_type = t; 5767 return -EINVAL; 5768 } 5769 5770 int btf_distill_func_proto(struct bpf_verifier_log *log, 5771 struct btf *btf, 5772 const struct btf_type *func, 5773 const char *tname, 5774 struct btf_func_model *m) 5775 { 5776 const struct btf_param *args; 5777 const struct btf_type *t; 5778 u32 i, nargs; 5779 int ret; 5780 5781 if (!func) { 5782 /* BTF function prototype doesn't match the verifier types. 5783 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args. 5784 */ 5785 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) 5786 m->arg_size[i] = 8; 5787 m->ret_size = 8; 5788 m->nr_args = MAX_BPF_FUNC_REG_ARGS; 5789 return 0; 5790 } 5791 args = (const struct btf_param *)(func + 1); 5792 nargs = btf_type_vlen(func); 5793 if (nargs > MAX_BPF_FUNC_ARGS) { 5794 bpf_log(log, 5795 "The function %s has %d arguments. Too many.\n", 5796 tname, nargs); 5797 return -EINVAL; 5798 } 5799 ret = __get_type_size(btf, func->type, &t); 5800 if (ret < 0) { 5801 bpf_log(log, 5802 "The function %s return type %s is unsupported.\n", 5803 tname, btf_kind_str[BTF_INFO_KIND(t->info)]); 5804 return -EINVAL; 5805 } 5806 m->ret_size = ret; 5807 5808 for (i = 0; i < nargs; i++) { 5809 if (i == nargs - 1 && args[i].type == 0) { 5810 bpf_log(log, 5811 "The function %s with variable args is unsupported.\n", 5812 tname); 5813 return -EINVAL; 5814 } 5815 ret = __get_type_size(btf, args[i].type, &t); 5816 if (ret < 0) { 5817 bpf_log(log, 5818 "The function %s arg%d type %s is unsupported.\n", 5819 tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]); 5820 return -EINVAL; 5821 } 5822 if (ret == 0) { 5823 bpf_log(log, 5824 "The function %s has malformed void argument.\n", 5825 tname); 5826 return -EINVAL; 5827 } 5828 m->arg_size[i] = ret; 5829 } 5830 m->nr_args = nargs; 5831 return 0; 5832 } 5833 5834 /* Compare BTFs of two functions assuming only scalars and pointers to context. 5835 * t1 points to BTF_KIND_FUNC in btf1 5836 * t2 points to BTF_KIND_FUNC in btf2 5837 * Returns: 5838 * EINVAL - function prototype mismatch 5839 * EFAULT - verifier bug 5840 * 0 - 99% match. The last 1% is validated by the verifier. 5841 */ 5842 static int btf_check_func_type_match(struct bpf_verifier_log *log, 5843 struct btf *btf1, const struct btf_type *t1, 5844 struct btf *btf2, const struct btf_type *t2) 5845 { 5846 const struct btf_param *args1, *args2; 5847 const char *fn1, *fn2, *s1, *s2; 5848 u32 nargs1, nargs2, i; 5849 5850 fn1 = btf_name_by_offset(btf1, t1->name_off); 5851 fn2 = btf_name_by_offset(btf2, t2->name_off); 5852 5853 if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) { 5854 bpf_log(log, "%s() is not a global function\n", fn1); 5855 return -EINVAL; 5856 } 5857 if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) { 5858 bpf_log(log, "%s() is not a global function\n", fn2); 5859 return -EINVAL; 5860 } 5861 5862 t1 = btf_type_by_id(btf1, t1->type); 5863 if (!t1 || !btf_type_is_func_proto(t1)) 5864 return -EFAULT; 5865 t2 = btf_type_by_id(btf2, t2->type); 5866 if (!t2 || !btf_type_is_func_proto(t2)) 5867 return -EFAULT; 5868 5869 args1 = (const struct btf_param *)(t1 + 1); 5870 nargs1 = btf_type_vlen(t1); 5871 args2 = (const struct btf_param *)(t2 + 1); 5872 nargs2 = btf_type_vlen(t2); 5873 5874 if (nargs1 != nargs2) { 5875 bpf_log(log, "%s() has %d args while %s() has %d args\n", 5876 fn1, nargs1, fn2, nargs2); 5877 return -EINVAL; 5878 } 5879 5880 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL); 5881 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL); 5882 if (t1->info != t2->info) { 5883 bpf_log(log, 5884 "Return type %s of %s() doesn't match type %s of %s()\n", 5885 btf_type_str(t1), fn1, 5886 btf_type_str(t2), fn2); 5887 return -EINVAL; 5888 } 5889 5890 for (i = 0; i < nargs1; i++) { 5891 t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL); 5892 t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL); 5893 5894 if (t1->info != t2->info) { 5895 bpf_log(log, "arg%d in %s() is %s while %s() has %s\n", 5896 i, fn1, btf_type_str(t1), 5897 fn2, btf_type_str(t2)); 5898 return -EINVAL; 5899 } 5900 if (btf_type_has_size(t1) && t1->size != t2->size) { 5901 bpf_log(log, 5902 "arg%d in %s() has size %d while %s() has %d\n", 5903 i, fn1, t1->size, 5904 fn2, t2->size); 5905 return -EINVAL; 5906 } 5907 5908 /* global functions are validated with scalars and pointers 5909 * to context only. And only global functions can be replaced. 5910 * Hence type check only those types. 5911 */ 5912 if (btf_type_is_int(t1) || btf_type_is_enum(t1)) 5913 continue; 5914 if (!btf_type_is_ptr(t1)) { 5915 bpf_log(log, 5916 "arg%d in %s() has unrecognized type\n", 5917 i, fn1); 5918 return -EINVAL; 5919 } 5920 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL); 5921 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL); 5922 if (!btf_type_is_struct(t1)) { 5923 bpf_log(log, 5924 "arg%d in %s() is not a pointer to context\n", 5925 i, fn1); 5926 return -EINVAL; 5927 } 5928 if (!btf_type_is_struct(t2)) { 5929 bpf_log(log, 5930 "arg%d in %s() is not a pointer to context\n", 5931 i, fn2); 5932 return -EINVAL; 5933 } 5934 /* This is an optional check to make program writing easier. 5935 * Compare names of structs and report an error to the user. 5936 * btf_prepare_func_args() already checked that t2 struct 5937 * is a context type. btf_prepare_func_args() will check 5938 * later that t1 struct is a context type as well. 5939 */ 5940 s1 = btf_name_by_offset(btf1, t1->name_off); 5941 s2 = btf_name_by_offset(btf2, t2->name_off); 5942 if (strcmp(s1, s2)) { 5943 bpf_log(log, 5944 "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n", 5945 i, fn1, s1, fn2, s2); 5946 return -EINVAL; 5947 } 5948 } 5949 return 0; 5950 } 5951 5952 /* Compare BTFs of given program with BTF of target program */ 5953 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 5954 struct btf *btf2, const struct btf_type *t2) 5955 { 5956 struct btf *btf1 = prog->aux->btf; 5957 const struct btf_type *t1; 5958 u32 btf_id = 0; 5959 5960 if (!prog->aux->func_info) { 5961 bpf_log(log, "Program extension requires BTF\n"); 5962 return -EINVAL; 5963 } 5964 5965 btf_id = prog->aux->func_info[0].type_id; 5966 if (!btf_id) 5967 return -EFAULT; 5968 5969 t1 = btf_type_by_id(btf1, btf_id); 5970 if (!t1 || !btf_type_is_func(t1)) 5971 return -EFAULT; 5972 5973 return btf_check_func_type_match(log, btf1, t1, btf2, t2); 5974 } 5975 5976 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { 5977 #ifdef CONFIG_NET 5978 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK], 5979 [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 5980 [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP], 5981 #endif 5982 }; 5983 5984 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */ 5985 static bool __btf_type_is_scalar_struct(struct bpf_verifier_log *log, 5986 const struct btf *btf, 5987 const struct btf_type *t, int rec) 5988 { 5989 const struct btf_type *member_type; 5990 const struct btf_member *member; 5991 u32 i; 5992 5993 if (!btf_type_is_struct(t)) 5994 return false; 5995 5996 for_each_member(i, t, member) { 5997 const struct btf_array *array; 5998 5999 member_type = btf_type_skip_modifiers(btf, member->type, NULL); 6000 if (btf_type_is_struct(member_type)) { 6001 if (rec >= 3) { 6002 bpf_log(log, "max struct nesting depth exceeded\n"); 6003 return false; 6004 } 6005 if (!__btf_type_is_scalar_struct(log, btf, member_type, rec + 1)) 6006 return false; 6007 continue; 6008 } 6009 if (btf_type_is_array(member_type)) { 6010 array = btf_type_array(member_type); 6011 if (!array->nelems) 6012 return false; 6013 member_type = btf_type_skip_modifiers(btf, array->type, NULL); 6014 if (!btf_type_is_scalar(member_type)) 6015 return false; 6016 continue; 6017 } 6018 if (!btf_type_is_scalar(member_type)) 6019 return false; 6020 } 6021 return true; 6022 } 6023 6024 static bool is_kfunc_arg_mem_size(const struct btf *btf, 6025 const struct btf_param *arg, 6026 const struct bpf_reg_state *reg) 6027 { 6028 int len, sfx_len = sizeof("__sz") - 1; 6029 const struct btf_type *t; 6030 const char *param_name; 6031 6032 t = btf_type_skip_modifiers(btf, arg->type, NULL); 6033 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) 6034 return false; 6035 6036 /* In the future, this can be ported to use BTF tagging */ 6037 param_name = btf_name_by_offset(btf, arg->name_off); 6038 if (str_is_empty(param_name)) 6039 return false; 6040 len = strlen(param_name); 6041 if (len < sfx_len) 6042 return false; 6043 param_name += len - sfx_len; 6044 if (strncmp(param_name, "__sz", sfx_len)) 6045 return false; 6046 6047 return true; 6048 } 6049 6050 static int btf_check_func_arg_match(struct bpf_verifier_env *env, 6051 const struct btf *btf, u32 func_id, 6052 struct bpf_reg_state *regs, 6053 bool ptr_to_mem_ok) 6054 { 6055 struct bpf_verifier_log *log = &env->log; 6056 u32 i, nargs, ref_id, ref_obj_id = 0; 6057 bool is_kfunc = btf_is_kernel(btf); 6058 bool rel = false, kptr_get = false; 6059 const char *func_name, *ref_tname; 6060 const struct btf_type *t, *ref_t; 6061 const struct btf_param *args; 6062 int ref_regno = 0, ret; 6063 6064 t = btf_type_by_id(btf, func_id); 6065 if (!t || !btf_type_is_func(t)) { 6066 /* These checks were already done by the verifier while loading 6067 * struct bpf_func_info or in add_kfunc_call(). 6068 */ 6069 bpf_log(log, "BTF of func_id %u doesn't point to KIND_FUNC\n", 6070 func_id); 6071 return -EFAULT; 6072 } 6073 func_name = btf_name_by_offset(btf, t->name_off); 6074 6075 t = btf_type_by_id(btf, t->type); 6076 if (!t || !btf_type_is_func_proto(t)) { 6077 bpf_log(log, "Invalid BTF of func %s\n", func_name); 6078 return -EFAULT; 6079 } 6080 args = (const struct btf_param *)(t + 1); 6081 nargs = btf_type_vlen(t); 6082 if (nargs > MAX_BPF_FUNC_REG_ARGS) { 6083 bpf_log(log, "Function %s has %d > %d args\n", func_name, nargs, 6084 MAX_BPF_FUNC_REG_ARGS); 6085 return -EINVAL; 6086 } 6087 6088 if (is_kfunc) { 6089 /* Only kfunc can be release func */ 6090 rel = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), 6091 BTF_KFUNC_TYPE_RELEASE, func_id); 6092 kptr_get = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), 6093 BTF_KFUNC_TYPE_KPTR_ACQUIRE, func_id); 6094 } 6095 6096 /* check that BTF function arguments match actual types that the 6097 * verifier sees. 6098 */ 6099 for (i = 0; i < nargs; i++) { 6100 enum bpf_arg_type arg_type = ARG_DONTCARE; 6101 u32 regno = i + 1; 6102 struct bpf_reg_state *reg = ®s[regno]; 6103 6104 t = btf_type_skip_modifiers(btf, args[i].type, NULL); 6105 if (btf_type_is_scalar(t)) { 6106 if (reg->type == SCALAR_VALUE) 6107 continue; 6108 bpf_log(log, "R%d is not a scalar\n", regno); 6109 return -EINVAL; 6110 } 6111 6112 if (!btf_type_is_ptr(t)) { 6113 bpf_log(log, "Unrecognized arg#%d type %s\n", 6114 i, btf_type_str(t)); 6115 return -EINVAL; 6116 } 6117 6118 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); 6119 ref_tname = btf_name_by_offset(btf, ref_t->name_off); 6120 6121 if (rel && reg->ref_obj_id) 6122 arg_type |= OBJ_RELEASE; 6123 ret = check_func_arg_reg_off(env, reg, regno, arg_type); 6124 if (ret < 0) 6125 return ret; 6126 6127 /* kptr_get is only true for kfunc */ 6128 if (i == 0 && kptr_get) { 6129 struct bpf_map_value_off_desc *off_desc; 6130 6131 if (reg->type != PTR_TO_MAP_VALUE) { 6132 bpf_log(log, "arg#0 expected pointer to map value\n"); 6133 return -EINVAL; 6134 } 6135 6136 /* check_func_arg_reg_off allows var_off for 6137 * PTR_TO_MAP_VALUE, but we need fixed offset to find 6138 * off_desc. 6139 */ 6140 if (!tnum_is_const(reg->var_off)) { 6141 bpf_log(log, "arg#0 must have constant offset\n"); 6142 return -EINVAL; 6143 } 6144 6145 off_desc = bpf_map_kptr_off_contains(reg->map_ptr, reg->off + reg->var_off.value); 6146 if (!off_desc || off_desc->type != BPF_KPTR_REF) { 6147 bpf_log(log, "arg#0 no referenced kptr at map value offset=%llu\n", 6148 reg->off + reg->var_off.value); 6149 return -EINVAL; 6150 } 6151 6152 if (!btf_type_is_ptr(ref_t)) { 6153 bpf_log(log, "arg#0 BTF type must be a double pointer\n"); 6154 return -EINVAL; 6155 } 6156 6157 ref_t = btf_type_skip_modifiers(btf, ref_t->type, &ref_id); 6158 ref_tname = btf_name_by_offset(btf, ref_t->name_off); 6159 6160 if (!btf_type_is_struct(ref_t)) { 6161 bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n", 6162 func_name, i, btf_type_str(ref_t), ref_tname); 6163 return -EINVAL; 6164 } 6165 if (!btf_struct_ids_match(log, btf, ref_id, 0, off_desc->kptr.btf, 6166 off_desc->kptr.btf_id, true)) { 6167 bpf_log(log, "kernel function %s args#%d expected pointer to %s %s\n", 6168 func_name, i, btf_type_str(ref_t), ref_tname); 6169 return -EINVAL; 6170 } 6171 /* rest of the arguments can be anything, like normal kfunc */ 6172 } else if (btf_get_prog_ctx_type(log, btf, t, env->prog->type, i)) { 6173 /* If function expects ctx type in BTF check that caller 6174 * is passing PTR_TO_CTX. 6175 */ 6176 if (reg->type != PTR_TO_CTX) { 6177 bpf_log(log, 6178 "arg#%d expected pointer to ctx, but got %s\n", 6179 i, btf_type_str(t)); 6180 return -EINVAL; 6181 } 6182 } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || 6183 (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) { 6184 const struct btf_type *reg_ref_t; 6185 const struct btf *reg_btf; 6186 const char *reg_ref_tname; 6187 u32 reg_ref_id; 6188 6189 if (!btf_type_is_struct(ref_t)) { 6190 bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n", 6191 func_name, i, btf_type_str(ref_t), 6192 ref_tname); 6193 return -EINVAL; 6194 } 6195 6196 if (reg->type == PTR_TO_BTF_ID) { 6197 reg_btf = reg->btf; 6198 reg_ref_id = reg->btf_id; 6199 /* Ensure only one argument is referenced PTR_TO_BTF_ID */ 6200 if (reg->ref_obj_id) { 6201 if (ref_obj_id) { 6202 bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 6203 regno, reg->ref_obj_id, ref_obj_id); 6204 return -EFAULT; 6205 } 6206 ref_regno = regno; 6207 ref_obj_id = reg->ref_obj_id; 6208 } 6209 } else { 6210 reg_btf = btf_vmlinux; 6211 reg_ref_id = *reg2btf_ids[base_type(reg->type)]; 6212 } 6213 6214 reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, 6215 ®_ref_id); 6216 reg_ref_tname = btf_name_by_offset(reg_btf, 6217 reg_ref_t->name_off); 6218 if (!btf_struct_ids_match(log, reg_btf, reg_ref_id, 6219 reg->off, btf, ref_id, rel && reg->ref_obj_id)) { 6220 bpf_log(log, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n", 6221 func_name, i, 6222 btf_type_str(ref_t), ref_tname, 6223 regno, btf_type_str(reg_ref_t), 6224 reg_ref_tname); 6225 return -EINVAL; 6226 } 6227 } else if (ptr_to_mem_ok) { 6228 const struct btf_type *resolve_ret; 6229 u32 type_size; 6230 6231 if (is_kfunc) { 6232 bool arg_mem_size = i + 1 < nargs && is_kfunc_arg_mem_size(btf, &args[i + 1], ®s[regno + 1]); 6233 6234 /* Permit pointer to mem, but only when argument 6235 * type is pointer to scalar, or struct composed 6236 * (recursively) of scalars. 6237 * When arg_mem_size is true, the pointer can be 6238 * void *. 6239 */ 6240 if (!btf_type_is_scalar(ref_t) && 6241 !__btf_type_is_scalar_struct(log, btf, ref_t, 0) && 6242 (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) { 6243 bpf_log(log, 6244 "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n", 6245 i, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : ""); 6246 return -EINVAL; 6247 } 6248 6249 /* Check for mem, len pair */ 6250 if (arg_mem_size) { 6251 if (check_kfunc_mem_size_reg(env, ®s[regno + 1], regno + 1)) { 6252 bpf_log(log, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", 6253 i, i + 1); 6254 return -EINVAL; 6255 } 6256 i++; 6257 continue; 6258 } 6259 } 6260 6261 resolve_ret = btf_resolve_size(btf, ref_t, &type_size); 6262 if (IS_ERR(resolve_ret)) { 6263 bpf_log(log, 6264 "arg#%d reference type('%s %s') size cannot be determined: %ld\n", 6265 i, btf_type_str(ref_t), ref_tname, 6266 PTR_ERR(resolve_ret)); 6267 return -EINVAL; 6268 } 6269 6270 if (check_mem_reg(env, reg, regno, type_size)) 6271 return -EINVAL; 6272 } else { 6273 bpf_log(log, "reg type unsupported for arg#%d %sfunction %s#%d\n", i, 6274 is_kfunc ? "kernel " : "", func_name, func_id); 6275 return -EINVAL; 6276 } 6277 } 6278 6279 /* Either both are set, or neither */ 6280 WARN_ON_ONCE((ref_obj_id && !ref_regno) || (!ref_obj_id && ref_regno)); 6281 /* We already made sure ref_obj_id is set only for one argument. We do 6282 * allow (!rel && ref_obj_id), so that passing such referenced 6283 * PTR_TO_BTF_ID to other kfuncs works. Note that rel is only true when 6284 * is_kfunc is true. 6285 */ 6286 if (rel && !ref_obj_id) { 6287 bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", 6288 func_name); 6289 return -EINVAL; 6290 } 6291 /* returns argument register number > 0 in case of reference release kfunc */ 6292 return rel ? ref_regno : 0; 6293 } 6294 6295 /* Compare BTF of a function with given bpf_reg_state. 6296 * Returns: 6297 * EFAULT - there is a verifier bug. Abort verification. 6298 * EINVAL - there is a type mismatch or BTF is not available. 6299 * 0 - BTF matches with what bpf_reg_state expects. 6300 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized. 6301 */ 6302 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, 6303 struct bpf_reg_state *regs) 6304 { 6305 struct bpf_prog *prog = env->prog; 6306 struct btf *btf = prog->aux->btf; 6307 bool is_global; 6308 u32 btf_id; 6309 int err; 6310 6311 if (!prog->aux->func_info) 6312 return -EINVAL; 6313 6314 btf_id = prog->aux->func_info[subprog].type_id; 6315 if (!btf_id) 6316 return -EFAULT; 6317 6318 if (prog->aux->func_info_aux[subprog].unreliable) 6319 return -EINVAL; 6320 6321 is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 6322 err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global); 6323 6324 /* Compiler optimizations can remove arguments from static functions 6325 * or mismatched type can be passed into a global function. 6326 * In such cases mark the function as unreliable from BTF point of view. 6327 */ 6328 if (err) 6329 prog->aux->func_info_aux[subprog].unreliable = true; 6330 return err; 6331 } 6332 6333 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 6334 const struct btf *btf, u32 func_id, 6335 struct bpf_reg_state *regs) 6336 { 6337 return btf_check_func_arg_match(env, btf, func_id, regs, true); 6338 } 6339 6340 /* Convert BTF of a function into bpf_reg_state if possible 6341 * Returns: 6342 * EFAULT - there is a verifier bug. Abort verification. 6343 * EINVAL - cannot convert BTF. 6344 * 0 - Successfully converted BTF into bpf_reg_state 6345 * (either PTR_TO_CTX or SCALAR_VALUE). 6346 */ 6347 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 6348 struct bpf_reg_state *regs) 6349 { 6350 struct bpf_verifier_log *log = &env->log; 6351 struct bpf_prog *prog = env->prog; 6352 enum bpf_prog_type prog_type = prog->type; 6353 struct btf *btf = prog->aux->btf; 6354 const struct btf_param *args; 6355 const struct btf_type *t, *ref_t; 6356 u32 i, nargs, btf_id; 6357 const char *tname; 6358 6359 if (!prog->aux->func_info || 6360 prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) { 6361 bpf_log(log, "Verifier bug\n"); 6362 return -EFAULT; 6363 } 6364 6365 btf_id = prog->aux->func_info[subprog].type_id; 6366 if (!btf_id) { 6367 bpf_log(log, "Global functions need valid BTF\n"); 6368 return -EFAULT; 6369 } 6370 6371 t = btf_type_by_id(btf, btf_id); 6372 if (!t || !btf_type_is_func(t)) { 6373 /* These checks were already done by the verifier while loading 6374 * struct bpf_func_info 6375 */ 6376 bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n", 6377 subprog); 6378 return -EFAULT; 6379 } 6380 tname = btf_name_by_offset(btf, t->name_off); 6381 6382 if (log->level & BPF_LOG_LEVEL) 6383 bpf_log(log, "Validating %s() func#%d...\n", 6384 tname, subprog); 6385 6386 if (prog->aux->func_info_aux[subprog].unreliable) { 6387 bpf_log(log, "Verifier bug in function %s()\n", tname); 6388 return -EFAULT; 6389 } 6390 if (prog_type == BPF_PROG_TYPE_EXT) 6391 prog_type = prog->aux->dst_prog->type; 6392 6393 t = btf_type_by_id(btf, t->type); 6394 if (!t || !btf_type_is_func_proto(t)) { 6395 bpf_log(log, "Invalid type of function %s()\n", tname); 6396 return -EFAULT; 6397 } 6398 args = (const struct btf_param *)(t + 1); 6399 nargs = btf_type_vlen(t); 6400 if (nargs > MAX_BPF_FUNC_REG_ARGS) { 6401 bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n", 6402 tname, nargs, MAX_BPF_FUNC_REG_ARGS); 6403 return -EINVAL; 6404 } 6405 /* check that function returns int */ 6406 t = btf_type_by_id(btf, t->type); 6407 while (btf_type_is_modifier(t)) 6408 t = btf_type_by_id(btf, t->type); 6409 if (!btf_type_is_int(t) && !btf_type_is_enum(t)) { 6410 bpf_log(log, 6411 "Global function %s() doesn't return scalar. Only those are supported.\n", 6412 tname); 6413 return -EINVAL; 6414 } 6415 /* Convert BTF function arguments into verifier types. 6416 * Only PTR_TO_CTX and SCALAR are supported atm. 6417 */ 6418 for (i = 0; i < nargs; i++) { 6419 struct bpf_reg_state *reg = ®s[i + 1]; 6420 6421 t = btf_type_by_id(btf, args[i].type); 6422 while (btf_type_is_modifier(t)) 6423 t = btf_type_by_id(btf, t->type); 6424 if (btf_type_is_int(t) || btf_type_is_enum(t)) { 6425 reg->type = SCALAR_VALUE; 6426 continue; 6427 } 6428 if (btf_type_is_ptr(t)) { 6429 if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) { 6430 reg->type = PTR_TO_CTX; 6431 continue; 6432 } 6433 6434 t = btf_type_skip_modifiers(btf, t->type, NULL); 6435 6436 ref_t = btf_resolve_size(btf, t, ®->mem_size); 6437 if (IS_ERR(ref_t)) { 6438 bpf_log(log, 6439 "arg#%d reference type('%s %s') size cannot be determined: %ld\n", 6440 i, btf_type_str(t), btf_name_by_offset(btf, t->name_off), 6441 PTR_ERR(ref_t)); 6442 return -EINVAL; 6443 } 6444 6445 reg->type = PTR_TO_MEM | PTR_MAYBE_NULL; 6446 reg->id = ++env->id_gen; 6447 6448 continue; 6449 } 6450 bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n", 6451 i, btf_kind_str[BTF_INFO_KIND(t->info)], tname); 6452 return -EINVAL; 6453 } 6454 return 0; 6455 } 6456 6457 static void btf_type_show(const struct btf *btf, u32 type_id, void *obj, 6458 struct btf_show *show) 6459 { 6460 const struct btf_type *t = btf_type_by_id(btf, type_id); 6461 6462 show->btf = btf; 6463 memset(&show->state, 0, sizeof(show->state)); 6464 memset(&show->obj, 0, sizeof(show->obj)); 6465 6466 btf_type_ops(t)->show(btf, t, type_id, obj, 0, show); 6467 } 6468 6469 static void btf_seq_show(struct btf_show *show, const char *fmt, 6470 va_list args) 6471 { 6472 seq_vprintf((struct seq_file *)show->target, fmt, args); 6473 } 6474 6475 int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, 6476 void *obj, struct seq_file *m, u64 flags) 6477 { 6478 struct btf_show sseq; 6479 6480 sseq.target = m; 6481 sseq.showfn = btf_seq_show; 6482 sseq.flags = flags; 6483 6484 btf_type_show(btf, type_id, obj, &sseq); 6485 6486 return sseq.state.status; 6487 } 6488 6489 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, 6490 struct seq_file *m) 6491 { 6492 (void) btf_type_seq_show_flags(btf, type_id, obj, m, 6493 BTF_SHOW_NONAME | BTF_SHOW_COMPACT | 6494 BTF_SHOW_ZERO | BTF_SHOW_UNSAFE); 6495 } 6496 6497 struct btf_show_snprintf { 6498 struct btf_show show; 6499 int len_left; /* space left in string */ 6500 int len; /* length we would have written */ 6501 }; 6502 6503 static void btf_snprintf_show(struct btf_show *show, const char *fmt, 6504 va_list args) 6505 { 6506 struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show; 6507 int len; 6508 6509 len = vsnprintf(show->target, ssnprintf->len_left, fmt, args); 6510 6511 if (len < 0) { 6512 ssnprintf->len_left = 0; 6513 ssnprintf->len = len; 6514 } else if (len > ssnprintf->len_left) { 6515 /* no space, drive on to get length we would have written */ 6516 ssnprintf->len_left = 0; 6517 ssnprintf->len += len; 6518 } else { 6519 ssnprintf->len_left -= len; 6520 ssnprintf->len += len; 6521 show->target += len; 6522 } 6523 } 6524 6525 int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj, 6526 char *buf, int len, u64 flags) 6527 { 6528 struct btf_show_snprintf ssnprintf; 6529 6530 ssnprintf.show.target = buf; 6531 ssnprintf.show.flags = flags; 6532 ssnprintf.show.showfn = btf_snprintf_show; 6533 ssnprintf.len_left = len; 6534 ssnprintf.len = 0; 6535 6536 btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf); 6537 6538 /* If we encountered an error, return it. */ 6539 if (ssnprintf.show.state.status) 6540 return ssnprintf.show.state.status; 6541 6542 /* Otherwise return length we would have written */ 6543 return ssnprintf.len; 6544 } 6545 6546 #ifdef CONFIG_PROC_FS 6547 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp) 6548 { 6549 const struct btf *btf = filp->private_data; 6550 6551 seq_printf(m, "btf_id:\t%u\n", btf->id); 6552 } 6553 #endif 6554 6555 static int btf_release(struct inode *inode, struct file *filp) 6556 { 6557 btf_put(filp->private_data); 6558 return 0; 6559 } 6560 6561 const struct file_operations btf_fops = { 6562 #ifdef CONFIG_PROC_FS 6563 .show_fdinfo = bpf_btf_show_fdinfo, 6564 #endif 6565 .release = btf_release, 6566 }; 6567 6568 static int __btf_new_fd(struct btf *btf) 6569 { 6570 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC); 6571 } 6572 6573 int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr) 6574 { 6575 struct btf *btf; 6576 int ret; 6577 6578 btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel), 6579 attr->btf_size, attr->btf_log_level, 6580 u64_to_user_ptr(attr->btf_log_buf), 6581 attr->btf_log_size); 6582 if (IS_ERR(btf)) 6583 return PTR_ERR(btf); 6584 6585 ret = btf_alloc_id(btf); 6586 if (ret) { 6587 btf_free(btf); 6588 return ret; 6589 } 6590 6591 /* 6592 * The BTF ID is published to the userspace. 6593 * All BTF free must go through call_rcu() from 6594 * now on (i.e. free by calling btf_put()). 6595 */ 6596 6597 ret = __btf_new_fd(btf); 6598 if (ret < 0) 6599 btf_put(btf); 6600 6601 return ret; 6602 } 6603 6604 struct btf *btf_get_by_fd(int fd) 6605 { 6606 struct btf *btf; 6607 struct fd f; 6608 6609 f = fdget(fd); 6610 6611 if (!f.file) 6612 return ERR_PTR(-EBADF); 6613 6614 if (f.file->f_op != &btf_fops) { 6615 fdput(f); 6616 return ERR_PTR(-EINVAL); 6617 } 6618 6619 btf = f.file->private_data; 6620 refcount_inc(&btf->refcnt); 6621 fdput(f); 6622 6623 return btf; 6624 } 6625 6626 int btf_get_info_by_fd(const struct btf *btf, 6627 const union bpf_attr *attr, 6628 union bpf_attr __user *uattr) 6629 { 6630 struct bpf_btf_info __user *uinfo; 6631 struct bpf_btf_info info; 6632 u32 info_copy, btf_copy; 6633 void __user *ubtf; 6634 char __user *uname; 6635 u32 uinfo_len, uname_len, name_len; 6636 int ret = 0; 6637 6638 uinfo = u64_to_user_ptr(attr->info.info); 6639 uinfo_len = attr->info.info_len; 6640 6641 info_copy = min_t(u32, uinfo_len, sizeof(info)); 6642 memset(&info, 0, sizeof(info)); 6643 if (copy_from_user(&info, uinfo, info_copy)) 6644 return -EFAULT; 6645 6646 info.id = btf->id; 6647 ubtf = u64_to_user_ptr(info.btf); 6648 btf_copy = min_t(u32, btf->data_size, info.btf_size); 6649 if (copy_to_user(ubtf, btf->data, btf_copy)) 6650 return -EFAULT; 6651 info.btf_size = btf->data_size; 6652 6653 info.kernel_btf = btf->kernel_btf; 6654 6655 uname = u64_to_user_ptr(info.name); 6656 uname_len = info.name_len; 6657 if (!uname ^ !uname_len) 6658 return -EINVAL; 6659 6660 name_len = strlen(btf->name); 6661 info.name_len = name_len; 6662 6663 if (uname) { 6664 if (uname_len >= name_len + 1) { 6665 if (copy_to_user(uname, btf->name, name_len + 1)) 6666 return -EFAULT; 6667 } else { 6668 char zero = '\0'; 6669 6670 if (copy_to_user(uname, btf->name, uname_len - 1)) 6671 return -EFAULT; 6672 if (put_user(zero, uname + uname_len - 1)) 6673 return -EFAULT; 6674 /* let user-space know about too short buffer */ 6675 ret = -ENOSPC; 6676 } 6677 } 6678 6679 if (copy_to_user(uinfo, &info, info_copy) || 6680 put_user(info_copy, &uattr->info.info_len)) 6681 return -EFAULT; 6682 6683 return ret; 6684 } 6685 6686 int btf_get_fd_by_id(u32 id) 6687 { 6688 struct btf *btf; 6689 int fd; 6690 6691 rcu_read_lock(); 6692 btf = idr_find(&btf_idr, id); 6693 if (!btf || !refcount_inc_not_zero(&btf->refcnt)) 6694 btf = ERR_PTR(-ENOENT); 6695 rcu_read_unlock(); 6696 6697 if (IS_ERR(btf)) 6698 return PTR_ERR(btf); 6699 6700 fd = __btf_new_fd(btf); 6701 if (fd < 0) 6702 btf_put(btf); 6703 6704 return fd; 6705 } 6706 6707 u32 btf_obj_id(const struct btf *btf) 6708 { 6709 return btf->id; 6710 } 6711 6712 bool btf_is_kernel(const struct btf *btf) 6713 { 6714 return btf->kernel_btf; 6715 } 6716 6717 bool btf_is_module(const struct btf *btf) 6718 { 6719 return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0; 6720 } 6721 6722 static int btf_id_cmp_func(const void *a, const void *b) 6723 { 6724 const int *pa = a, *pb = b; 6725 6726 return *pa - *pb; 6727 } 6728 6729 bool btf_id_set_contains(const struct btf_id_set *set, u32 id) 6730 { 6731 return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL; 6732 } 6733 6734 enum { 6735 BTF_MODULE_F_LIVE = (1 << 0), 6736 }; 6737 6738 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 6739 struct btf_module { 6740 struct list_head list; 6741 struct module *module; 6742 struct btf *btf; 6743 struct bin_attribute *sysfs_attr; 6744 int flags; 6745 }; 6746 6747 static LIST_HEAD(btf_modules); 6748 static DEFINE_MUTEX(btf_module_mutex); 6749 6750 static ssize_t 6751 btf_module_read(struct file *file, struct kobject *kobj, 6752 struct bin_attribute *bin_attr, 6753 char *buf, loff_t off, size_t len) 6754 { 6755 const struct btf *btf = bin_attr->private; 6756 6757 memcpy(buf, btf->data + off, len); 6758 return len; 6759 } 6760 6761 static void purge_cand_cache(struct btf *btf); 6762 6763 static int btf_module_notify(struct notifier_block *nb, unsigned long op, 6764 void *module) 6765 { 6766 struct btf_module *btf_mod, *tmp; 6767 struct module *mod = module; 6768 struct btf *btf; 6769 int err = 0; 6770 6771 if (mod->btf_data_size == 0 || 6772 (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE && 6773 op != MODULE_STATE_GOING)) 6774 goto out; 6775 6776 switch (op) { 6777 case MODULE_STATE_COMING: 6778 btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL); 6779 if (!btf_mod) { 6780 err = -ENOMEM; 6781 goto out; 6782 } 6783 btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size); 6784 if (IS_ERR(btf)) { 6785 pr_warn("failed to validate module [%s] BTF: %ld\n", 6786 mod->name, PTR_ERR(btf)); 6787 kfree(btf_mod); 6788 if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) 6789 err = PTR_ERR(btf); 6790 goto out; 6791 } 6792 err = btf_alloc_id(btf); 6793 if (err) { 6794 btf_free(btf); 6795 kfree(btf_mod); 6796 goto out; 6797 } 6798 6799 purge_cand_cache(NULL); 6800 mutex_lock(&btf_module_mutex); 6801 btf_mod->module = module; 6802 btf_mod->btf = btf; 6803 list_add(&btf_mod->list, &btf_modules); 6804 mutex_unlock(&btf_module_mutex); 6805 6806 if (IS_ENABLED(CONFIG_SYSFS)) { 6807 struct bin_attribute *attr; 6808 6809 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 6810 if (!attr) 6811 goto out; 6812 6813 sysfs_bin_attr_init(attr); 6814 attr->attr.name = btf->name; 6815 attr->attr.mode = 0444; 6816 attr->size = btf->data_size; 6817 attr->private = btf; 6818 attr->read = btf_module_read; 6819 6820 err = sysfs_create_bin_file(btf_kobj, attr); 6821 if (err) { 6822 pr_warn("failed to register module [%s] BTF in sysfs: %d\n", 6823 mod->name, err); 6824 kfree(attr); 6825 err = 0; 6826 goto out; 6827 } 6828 6829 btf_mod->sysfs_attr = attr; 6830 } 6831 6832 break; 6833 case MODULE_STATE_LIVE: 6834 mutex_lock(&btf_module_mutex); 6835 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 6836 if (btf_mod->module != module) 6837 continue; 6838 6839 btf_mod->flags |= BTF_MODULE_F_LIVE; 6840 break; 6841 } 6842 mutex_unlock(&btf_module_mutex); 6843 break; 6844 case MODULE_STATE_GOING: 6845 mutex_lock(&btf_module_mutex); 6846 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 6847 if (btf_mod->module != module) 6848 continue; 6849 6850 list_del(&btf_mod->list); 6851 if (btf_mod->sysfs_attr) 6852 sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr); 6853 purge_cand_cache(btf_mod->btf); 6854 btf_put(btf_mod->btf); 6855 kfree(btf_mod->sysfs_attr); 6856 kfree(btf_mod); 6857 break; 6858 } 6859 mutex_unlock(&btf_module_mutex); 6860 break; 6861 } 6862 out: 6863 return notifier_from_errno(err); 6864 } 6865 6866 static struct notifier_block btf_module_nb = { 6867 .notifier_call = btf_module_notify, 6868 }; 6869 6870 static int __init btf_module_init(void) 6871 { 6872 register_module_notifier(&btf_module_nb); 6873 return 0; 6874 } 6875 6876 fs_initcall(btf_module_init); 6877 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ 6878 6879 struct module *btf_try_get_module(const struct btf *btf) 6880 { 6881 struct module *res = NULL; 6882 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 6883 struct btf_module *btf_mod, *tmp; 6884 6885 mutex_lock(&btf_module_mutex); 6886 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 6887 if (btf_mod->btf != btf) 6888 continue; 6889 6890 /* We must only consider module whose __init routine has 6891 * finished, hence we must check for BTF_MODULE_F_LIVE flag, 6892 * which is set from the notifier callback for 6893 * MODULE_STATE_LIVE. 6894 */ 6895 if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module)) 6896 res = btf_mod->module; 6897 6898 break; 6899 } 6900 mutex_unlock(&btf_module_mutex); 6901 #endif 6902 6903 return res; 6904 } 6905 6906 /* Returns struct btf corresponding to the struct module. 6907 * This function can return NULL or ERR_PTR. 6908 */ 6909 static struct btf *btf_get_module_btf(const struct module *module) 6910 { 6911 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 6912 struct btf_module *btf_mod, *tmp; 6913 #endif 6914 struct btf *btf = NULL; 6915 6916 if (!module) { 6917 btf = bpf_get_btf_vmlinux(); 6918 if (!IS_ERR_OR_NULL(btf)) 6919 btf_get(btf); 6920 return btf; 6921 } 6922 6923 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 6924 mutex_lock(&btf_module_mutex); 6925 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 6926 if (btf_mod->module != module) 6927 continue; 6928 6929 btf_get(btf_mod->btf); 6930 btf = btf_mod->btf; 6931 break; 6932 } 6933 mutex_unlock(&btf_module_mutex); 6934 #endif 6935 6936 return btf; 6937 } 6938 6939 BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) 6940 { 6941 struct btf *btf = NULL; 6942 int btf_obj_fd = 0; 6943 long ret; 6944 6945 if (flags) 6946 return -EINVAL; 6947 6948 if (name_sz <= 1 || name[name_sz - 1]) 6949 return -EINVAL; 6950 6951 ret = bpf_find_btf_id(name, kind, &btf); 6952 if (ret > 0 && btf_is_module(btf)) { 6953 btf_obj_fd = __btf_new_fd(btf); 6954 if (btf_obj_fd < 0) { 6955 btf_put(btf); 6956 return btf_obj_fd; 6957 } 6958 return ret | (((u64)btf_obj_fd) << 32); 6959 } 6960 if (ret > 0) 6961 btf_put(btf); 6962 return ret; 6963 } 6964 6965 const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { 6966 .func = bpf_btf_find_by_name_kind, 6967 .gpl_only = false, 6968 .ret_type = RET_INTEGER, 6969 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6970 .arg2_type = ARG_CONST_SIZE, 6971 .arg3_type = ARG_ANYTHING, 6972 .arg4_type = ARG_ANYTHING, 6973 }; 6974 6975 BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE) 6976 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type) 6977 BTF_TRACING_TYPE_xxx 6978 #undef BTF_TRACING_TYPE 6979 6980 /* Kernel Function (kfunc) BTF ID set registration API */ 6981 6982 static int __btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, 6983 enum btf_kfunc_type type, 6984 struct btf_id_set *add_set, bool vmlinux_set) 6985 { 6986 struct btf_kfunc_set_tab *tab; 6987 struct btf_id_set *set; 6988 u32 set_cnt; 6989 int ret; 6990 6991 if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX) { 6992 ret = -EINVAL; 6993 goto end; 6994 } 6995 6996 if (!add_set->cnt) 6997 return 0; 6998 6999 tab = btf->kfunc_set_tab; 7000 if (!tab) { 7001 tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN); 7002 if (!tab) 7003 return -ENOMEM; 7004 btf->kfunc_set_tab = tab; 7005 } 7006 7007 set = tab->sets[hook][type]; 7008 /* Warn when register_btf_kfunc_id_set is called twice for the same hook 7009 * for module sets. 7010 */ 7011 if (WARN_ON_ONCE(set && !vmlinux_set)) { 7012 ret = -EINVAL; 7013 goto end; 7014 } 7015 7016 /* We don't need to allocate, concatenate, and sort module sets, because 7017 * only one is allowed per hook. Hence, we can directly assign the 7018 * pointer and return. 7019 */ 7020 if (!vmlinux_set) { 7021 tab->sets[hook][type] = add_set; 7022 return 0; 7023 } 7024 7025 /* In case of vmlinux sets, there may be more than one set being 7026 * registered per hook. To create a unified set, we allocate a new set 7027 * and concatenate all individual sets being registered. While each set 7028 * is individually sorted, they may become unsorted when concatenated, 7029 * hence re-sorting the final set again is required to make binary 7030 * searching the set using btf_id_set_contains function work. 7031 */ 7032 set_cnt = set ? set->cnt : 0; 7033 7034 if (set_cnt > U32_MAX - add_set->cnt) { 7035 ret = -EOVERFLOW; 7036 goto end; 7037 } 7038 7039 if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) { 7040 ret = -E2BIG; 7041 goto end; 7042 } 7043 7044 /* Grow set */ 7045 set = krealloc(tab->sets[hook][type], 7046 offsetof(struct btf_id_set, ids[set_cnt + add_set->cnt]), 7047 GFP_KERNEL | __GFP_NOWARN); 7048 if (!set) { 7049 ret = -ENOMEM; 7050 goto end; 7051 } 7052 7053 /* For newly allocated set, initialize set->cnt to 0 */ 7054 if (!tab->sets[hook][type]) 7055 set->cnt = 0; 7056 tab->sets[hook][type] = set; 7057 7058 /* Concatenate the two sets */ 7059 memcpy(set->ids + set->cnt, add_set->ids, add_set->cnt * sizeof(set->ids[0])); 7060 set->cnt += add_set->cnt; 7061 7062 sort(set->ids, set->cnt, sizeof(set->ids[0]), btf_id_cmp_func, NULL); 7063 7064 return 0; 7065 end: 7066 btf_free_kfunc_set_tab(btf); 7067 return ret; 7068 } 7069 7070 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, 7071 const struct btf_kfunc_id_set *kset) 7072 { 7073 bool vmlinux_set = !btf_is_module(btf); 7074 int type, ret = 0; 7075 7076 for (type = 0; type < ARRAY_SIZE(kset->sets); type++) { 7077 if (!kset->sets[type]) 7078 continue; 7079 7080 ret = __btf_populate_kfunc_set(btf, hook, type, kset->sets[type], vmlinux_set); 7081 if (ret) 7082 break; 7083 } 7084 return ret; 7085 } 7086 7087 static bool __btf_kfunc_id_set_contains(const struct btf *btf, 7088 enum btf_kfunc_hook hook, 7089 enum btf_kfunc_type type, 7090 u32 kfunc_btf_id) 7091 { 7092 struct btf_id_set *set; 7093 7094 if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX) 7095 return false; 7096 if (!btf->kfunc_set_tab) 7097 return false; 7098 set = btf->kfunc_set_tab->sets[hook][type]; 7099 if (!set) 7100 return false; 7101 return btf_id_set_contains(set, kfunc_btf_id); 7102 } 7103 7104 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) 7105 { 7106 switch (prog_type) { 7107 case BPF_PROG_TYPE_XDP: 7108 return BTF_KFUNC_HOOK_XDP; 7109 case BPF_PROG_TYPE_SCHED_CLS: 7110 return BTF_KFUNC_HOOK_TC; 7111 case BPF_PROG_TYPE_STRUCT_OPS: 7112 return BTF_KFUNC_HOOK_STRUCT_OPS; 7113 default: 7114 return BTF_KFUNC_HOOK_MAX; 7115 } 7116 } 7117 7118 /* Caution: 7119 * Reference to the module (obtained using btf_try_get_module) corresponding to 7120 * the struct btf *MUST* be held when calling this function from verifier 7121 * context. This is usually true as we stash references in prog's kfunc_btf_tab; 7122 * keeping the reference for the duration of the call provides the necessary 7123 * protection for looking up a well-formed btf->kfunc_set_tab. 7124 */ 7125 bool btf_kfunc_id_set_contains(const struct btf *btf, 7126 enum bpf_prog_type prog_type, 7127 enum btf_kfunc_type type, u32 kfunc_btf_id) 7128 { 7129 enum btf_kfunc_hook hook; 7130 7131 hook = bpf_prog_type_to_kfunc_hook(prog_type); 7132 return __btf_kfunc_id_set_contains(btf, hook, type, kfunc_btf_id); 7133 } 7134 7135 /* This function must be invoked only from initcalls/module init functions */ 7136 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, 7137 const struct btf_kfunc_id_set *kset) 7138 { 7139 enum btf_kfunc_hook hook; 7140 struct btf *btf; 7141 int ret; 7142 7143 btf = btf_get_module_btf(kset->owner); 7144 if (!btf) { 7145 if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 7146 pr_err("missing vmlinux BTF, cannot register kfuncs\n"); 7147 return -ENOENT; 7148 } 7149 if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) { 7150 pr_err("missing module BTF, cannot register kfuncs\n"); 7151 return -ENOENT; 7152 } 7153 return 0; 7154 } 7155 if (IS_ERR(btf)) 7156 return PTR_ERR(btf); 7157 7158 hook = bpf_prog_type_to_kfunc_hook(prog_type); 7159 ret = btf_populate_kfunc_set(btf, hook, kset); 7160 btf_put(btf); 7161 return ret; 7162 } 7163 EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set); 7164 7165 s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id) 7166 { 7167 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; 7168 struct btf_id_dtor_kfunc *dtor; 7169 7170 if (!tab) 7171 return -ENOENT; 7172 /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need 7173 * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func. 7174 */ 7175 BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0); 7176 dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func); 7177 if (!dtor) 7178 return -ENOENT; 7179 return dtor->kfunc_btf_id; 7180 } 7181 7182 static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt) 7183 { 7184 const struct btf_type *dtor_func, *dtor_func_proto, *t; 7185 const struct btf_param *args; 7186 s32 dtor_btf_id; 7187 u32 nr_args, i; 7188 7189 for (i = 0; i < cnt; i++) { 7190 dtor_btf_id = dtors[i].kfunc_btf_id; 7191 7192 dtor_func = btf_type_by_id(btf, dtor_btf_id); 7193 if (!dtor_func || !btf_type_is_func(dtor_func)) 7194 return -EINVAL; 7195 7196 dtor_func_proto = btf_type_by_id(btf, dtor_func->type); 7197 if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto)) 7198 return -EINVAL; 7199 7200 /* Make sure the prototype of the destructor kfunc is 'void func(type *)' */ 7201 t = btf_type_by_id(btf, dtor_func_proto->type); 7202 if (!t || !btf_type_is_void(t)) 7203 return -EINVAL; 7204 7205 nr_args = btf_type_vlen(dtor_func_proto); 7206 if (nr_args != 1) 7207 return -EINVAL; 7208 args = btf_params(dtor_func_proto); 7209 t = btf_type_by_id(btf, args[0].type); 7210 /* Allow any pointer type, as width on targets Linux supports 7211 * will be same for all pointer types (i.e. sizeof(void *)) 7212 */ 7213 if (!t || !btf_type_is_ptr(t)) 7214 return -EINVAL; 7215 } 7216 return 0; 7217 } 7218 7219 /* This function must be invoked only from initcalls/module init functions */ 7220 int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt, 7221 struct module *owner) 7222 { 7223 struct btf_id_dtor_kfunc_tab *tab; 7224 struct btf *btf; 7225 u32 tab_cnt; 7226 int ret; 7227 7228 btf = btf_get_module_btf(owner); 7229 if (!btf) { 7230 if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 7231 pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n"); 7232 return -ENOENT; 7233 } 7234 if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) { 7235 pr_err("missing module BTF, cannot register dtor kfuncs\n"); 7236 return -ENOENT; 7237 } 7238 return 0; 7239 } 7240 if (IS_ERR(btf)) 7241 return PTR_ERR(btf); 7242 7243 if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { 7244 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT); 7245 ret = -E2BIG; 7246 goto end; 7247 } 7248 7249 /* Ensure that the prototype of dtor kfuncs being registered is sane */ 7250 ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt); 7251 if (ret < 0) 7252 goto end; 7253 7254 tab = btf->dtor_kfunc_tab; 7255 /* Only one call allowed for modules */ 7256 if (WARN_ON_ONCE(tab && btf_is_module(btf))) { 7257 ret = -EINVAL; 7258 goto end; 7259 } 7260 7261 tab_cnt = tab ? tab->cnt : 0; 7262 if (tab_cnt > U32_MAX - add_cnt) { 7263 ret = -EOVERFLOW; 7264 goto end; 7265 } 7266 if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { 7267 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT); 7268 ret = -E2BIG; 7269 goto end; 7270 } 7271 7272 tab = krealloc(btf->dtor_kfunc_tab, 7273 offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]), 7274 GFP_KERNEL | __GFP_NOWARN); 7275 if (!tab) { 7276 ret = -ENOMEM; 7277 goto end; 7278 } 7279 7280 if (!btf->dtor_kfunc_tab) 7281 tab->cnt = 0; 7282 btf->dtor_kfunc_tab = tab; 7283 7284 memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0])); 7285 tab->cnt += add_cnt; 7286 7287 sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL); 7288 7289 return 0; 7290 end: 7291 btf_free_dtor_kfunc_tab(btf); 7292 btf_put(btf); 7293 return ret; 7294 } 7295 EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs); 7296 7297 #define MAX_TYPES_ARE_COMPAT_DEPTH 2 7298 7299 static 7300 int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, 7301 const struct btf *targ_btf, __u32 targ_id, 7302 int level) 7303 { 7304 const struct btf_type *local_type, *targ_type; 7305 int depth = 32; /* max recursion depth */ 7306 7307 /* caller made sure that names match (ignoring flavor suffix) */ 7308 local_type = btf_type_by_id(local_btf, local_id); 7309 targ_type = btf_type_by_id(targ_btf, targ_id); 7310 if (btf_kind(local_type) != btf_kind(targ_type)) 7311 return 0; 7312 7313 recur: 7314 depth--; 7315 if (depth < 0) 7316 return -EINVAL; 7317 7318 local_type = btf_type_skip_modifiers(local_btf, local_id, &local_id); 7319 targ_type = btf_type_skip_modifiers(targ_btf, targ_id, &targ_id); 7320 if (!local_type || !targ_type) 7321 return -EINVAL; 7322 7323 if (btf_kind(local_type) != btf_kind(targ_type)) 7324 return 0; 7325 7326 switch (btf_kind(local_type)) { 7327 case BTF_KIND_UNKN: 7328 case BTF_KIND_STRUCT: 7329 case BTF_KIND_UNION: 7330 case BTF_KIND_ENUM: 7331 case BTF_KIND_FWD: 7332 return 1; 7333 case BTF_KIND_INT: 7334 /* just reject deprecated bitfield-like integers; all other 7335 * integers are by default compatible between each other 7336 */ 7337 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0; 7338 case BTF_KIND_PTR: 7339 local_id = local_type->type; 7340 targ_id = targ_type->type; 7341 goto recur; 7342 case BTF_KIND_ARRAY: 7343 local_id = btf_array(local_type)->type; 7344 targ_id = btf_array(targ_type)->type; 7345 goto recur; 7346 case BTF_KIND_FUNC_PROTO: { 7347 struct btf_param *local_p = btf_params(local_type); 7348 struct btf_param *targ_p = btf_params(targ_type); 7349 __u16 local_vlen = btf_vlen(local_type); 7350 __u16 targ_vlen = btf_vlen(targ_type); 7351 int i, err; 7352 7353 if (local_vlen != targ_vlen) 7354 return 0; 7355 7356 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { 7357 if (level <= 0) 7358 return -EINVAL; 7359 7360 btf_type_skip_modifiers(local_btf, local_p->type, &local_id); 7361 btf_type_skip_modifiers(targ_btf, targ_p->type, &targ_id); 7362 err = __bpf_core_types_are_compat(local_btf, local_id, 7363 targ_btf, targ_id, 7364 level - 1); 7365 if (err <= 0) 7366 return err; 7367 } 7368 7369 /* tail recurse for return type check */ 7370 btf_type_skip_modifiers(local_btf, local_type->type, &local_id); 7371 btf_type_skip_modifiers(targ_btf, targ_type->type, &targ_id); 7372 goto recur; 7373 } 7374 default: 7375 return 0; 7376 } 7377 } 7378 7379 /* Check local and target types for compatibility. This check is used for 7380 * type-based CO-RE relocations and follow slightly different rules than 7381 * field-based relocations. This function assumes that root types were already 7382 * checked for name match. Beyond that initial root-level name check, names 7383 * are completely ignored. Compatibility rules are as follows: 7384 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but 7385 * kind should match for local and target types (i.e., STRUCT is not 7386 * compatible with UNION); 7387 * - for ENUMs, the size is ignored; 7388 * - for INT, size and signedness are ignored; 7389 * - for ARRAY, dimensionality is ignored, element types are checked for 7390 * compatibility recursively; 7391 * - CONST/VOLATILE/RESTRICT modifiers are ignored; 7392 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; 7393 * - FUNC_PROTOs are compatible if they have compatible signature: same 7394 * number of input args and compatible return and argument types. 7395 * These rules are not set in stone and probably will be adjusted as we get 7396 * more experience with using BPF CO-RE relocations. 7397 */ 7398 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, 7399 const struct btf *targ_btf, __u32 targ_id) 7400 { 7401 return __bpf_core_types_are_compat(local_btf, local_id, 7402 targ_btf, targ_id, 7403 MAX_TYPES_ARE_COMPAT_DEPTH); 7404 } 7405 7406 static bool bpf_core_is_flavor_sep(const char *s) 7407 { 7408 /* check X___Y name pattern, where X and Y are not underscores */ 7409 return s[0] != '_' && /* X */ 7410 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ 7411 s[4] != '_'; /* Y */ 7412 } 7413 7414 size_t bpf_core_essential_name_len(const char *name) 7415 { 7416 size_t n = strlen(name); 7417 int i; 7418 7419 for (i = n - 5; i >= 0; i--) { 7420 if (bpf_core_is_flavor_sep(name + i)) 7421 return i + 1; 7422 } 7423 return n; 7424 } 7425 7426 struct bpf_cand_cache { 7427 const char *name; 7428 u32 name_len; 7429 u16 kind; 7430 u16 cnt; 7431 struct { 7432 const struct btf *btf; 7433 u32 id; 7434 } cands[]; 7435 }; 7436 7437 static void bpf_free_cands(struct bpf_cand_cache *cands) 7438 { 7439 if (!cands->cnt) 7440 /* empty candidate array was allocated on stack */ 7441 return; 7442 kfree(cands); 7443 } 7444 7445 static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands) 7446 { 7447 kfree(cands->name); 7448 kfree(cands); 7449 } 7450 7451 #define VMLINUX_CAND_CACHE_SIZE 31 7452 static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE]; 7453 7454 #define MODULE_CAND_CACHE_SIZE 31 7455 static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE]; 7456 7457 static DEFINE_MUTEX(cand_cache_mutex); 7458 7459 static void __print_cand_cache(struct bpf_verifier_log *log, 7460 struct bpf_cand_cache **cache, 7461 int cache_size) 7462 { 7463 struct bpf_cand_cache *cc; 7464 int i, j; 7465 7466 for (i = 0; i < cache_size; i++) { 7467 cc = cache[i]; 7468 if (!cc) 7469 continue; 7470 bpf_log(log, "[%d]%s(", i, cc->name); 7471 for (j = 0; j < cc->cnt; j++) { 7472 bpf_log(log, "%d", cc->cands[j].id); 7473 if (j < cc->cnt - 1) 7474 bpf_log(log, " "); 7475 } 7476 bpf_log(log, "), "); 7477 } 7478 } 7479 7480 static void print_cand_cache(struct bpf_verifier_log *log) 7481 { 7482 mutex_lock(&cand_cache_mutex); 7483 bpf_log(log, "vmlinux_cand_cache:"); 7484 __print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7485 bpf_log(log, "\nmodule_cand_cache:"); 7486 __print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7487 bpf_log(log, "\n"); 7488 mutex_unlock(&cand_cache_mutex); 7489 } 7490 7491 static u32 hash_cands(struct bpf_cand_cache *cands) 7492 { 7493 return jhash(cands->name, cands->name_len, 0); 7494 } 7495 7496 static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands, 7497 struct bpf_cand_cache **cache, 7498 int cache_size) 7499 { 7500 struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size]; 7501 7502 if (cc && cc->name_len == cands->name_len && 7503 !strncmp(cc->name, cands->name, cands->name_len)) 7504 return cc; 7505 return NULL; 7506 } 7507 7508 static size_t sizeof_cands(int cnt) 7509 { 7510 return offsetof(struct bpf_cand_cache, cands[cnt]); 7511 } 7512 7513 static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands, 7514 struct bpf_cand_cache **cache, 7515 int cache_size) 7516 { 7517 struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands; 7518 7519 if (*cc) { 7520 bpf_free_cands_from_cache(*cc); 7521 *cc = NULL; 7522 } 7523 new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL); 7524 if (!new_cands) { 7525 bpf_free_cands(cands); 7526 return ERR_PTR(-ENOMEM); 7527 } 7528 /* strdup the name, since it will stay in cache. 7529 * the cands->name points to strings in prog's BTF and the prog can be unloaded. 7530 */ 7531 new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL); 7532 bpf_free_cands(cands); 7533 if (!new_cands->name) { 7534 kfree(new_cands); 7535 return ERR_PTR(-ENOMEM); 7536 } 7537 *cc = new_cands; 7538 return new_cands; 7539 } 7540 7541 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 7542 static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache, 7543 int cache_size) 7544 { 7545 struct bpf_cand_cache *cc; 7546 int i, j; 7547 7548 for (i = 0; i < cache_size; i++) { 7549 cc = cache[i]; 7550 if (!cc) 7551 continue; 7552 if (!btf) { 7553 /* when new module is loaded purge all of module_cand_cache, 7554 * since new module might have candidates with the name 7555 * that matches cached cands. 7556 */ 7557 bpf_free_cands_from_cache(cc); 7558 cache[i] = NULL; 7559 continue; 7560 } 7561 /* when module is unloaded purge cache entries 7562 * that match module's btf 7563 */ 7564 for (j = 0; j < cc->cnt; j++) 7565 if (cc->cands[j].btf == btf) { 7566 bpf_free_cands_from_cache(cc); 7567 cache[i] = NULL; 7568 break; 7569 } 7570 } 7571 7572 } 7573 7574 static void purge_cand_cache(struct btf *btf) 7575 { 7576 mutex_lock(&cand_cache_mutex); 7577 __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7578 mutex_unlock(&cand_cache_mutex); 7579 } 7580 #endif 7581 7582 static struct bpf_cand_cache * 7583 bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf, 7584 int targ_start_id) 7585 { 7586 struct bpf_cand_cache *new_cands; 7587 const struct btf_type *t; 7588 const char *targ_name; 7589 size_t targ_essent_len; 7590 int n, i; 7591 7592 n = btf_nr_types(targ_btf); 7593 for (i = targ_start_id; i < n; i++) { 7594 t = btf_type_by_id(targ_btf, i); 7595 if (btf_kind(t) != cands->kind) 7596 continue; 7597 7598 targ_name = btf_name_by_offset(targ_btf, t->name_off); 7599 if (!targ_name) 7600 continue; 7601 7602 /* the resched point is before strncmp to make sure that search 7603 * for non-existing name will have a chance to schedule(). 7604 */ 7605 cond_resched(); 7606 7607 if (strncmp(cands->name, targ_name, cands->name_len) != 0) 7608 continue; 7609 7610 targ_essent_len = bpf_core_essential_name_len(targ_name); 7611 if (targ_essent_len != cands->name_len) 7612 continue; 7613 7614 /* most of the time there is only one candidate for a given kind+name pair */ 7615 new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL); 7616 if (!new_cands) { 7617 bpf_free_cands(cands); 7618 return ERR_PTR(-ENOMEM); 7619 } 7620 7621 memcpy(new_cands, cands, sizeof_cands(cands->cnt)); 7622 bpf_free_cands(cands); 7623 cands = new_cands; 7624 cands->cands[cands->cnt].btf = targ_btf; 7625 cands->cands[cands->cnt].id = i; 7626 cands->cnt++; 7627 } 7628 return cands; 7629 } 7630 7631 static struct bpf_cand_cache * 7632 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id) 7633 { 7634 struct bpf_cand_cache *cands, *cc, local_cand = {}; 7635 const struct btf *local_btf = ctx->btf; 7636 const struct btf_type *local_type; 7637 const struct btf *main_btf; 7638 size_t local_essent_len; 7639 struct btf *mod_btf; 7640 const char *name; 7641 int id; 7642 7643 main_btf = bpf_get_btf_vmlinux(); 7644 if (IS_ERR(main_btf)) 7645 return ERR_CAST(main_btf); 7646 if (!main_btf) 7647 return ERR_PTR(-EINVAL); 7648 7649 local_type = btf_type_by_id(local_btf, local_type_id); 7650 if (!local_type) 7651 return ERR_PTR(-EINVAL); 7652 7653 name = btf_name_by_offset(local_btf, local_type->name_off); 7654 if (str_is_empty(name)) 7655 return ERR_PTR(-EINVAL); 7656 local_essent_len = bpf_core_essential_name_len(name); 7657 7658 cands = &local_cand; 7659 cands->name = name; 7660 cands->kind = btf_kind(local_type); 7661 cands->name_len = local_essent_len; 7662 7663 cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7664 /* cands is a pointer to stack here */ 7665 if (cc) { 7666 if (cc->cnt) 7667 return cc; 7668 goto check_modules; 7669 } 7670 7671 /* Attempt to find target candidates in vmlinux BTF first */ 7672 cands = bpf_core_add_cands(cands, main_btf, 1); 7673 if (IS_ERR(cands)) 7674 return ERR_CAST(cands); 7675 7676 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */ 7677 7678 /* populate cache even when cands->cnt == 0 */ 7679 cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7680 if (IS_ERR(cc)) 7681 return ERR_CAST(cc); 7682 7683 /* if vmlinux BTF has any candidate, don't go for module BTFs */ 7684 if (cc->cnt) 7685 return cc; 7686 7687 check_modules: 7688 /* cands is a pointer to stack here and cands->cnt == 0 */ 7689 cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7690 if (cc) 7691 /* if cache has it return it even if cc->cnt == 0 */ 7692 return cc; 7693 7694 /* If candidate is not found in vmlinux's BTF then search in module's BTFs */ 7695 spin_lock_bh(&btf_idr_lock); 7696 idr_for_each_entry(&btf_idr, mod_btf, id) { 7697 if (!btf_is_module(mod_btf)) 7698 continue; 7699 /* linear search could be slow hence unlock/lock 7700 * the IDR to avoiding holding it for too long 7701 */ 7702 btf_get(mod_btf); 7703 spin_unlock_bh(&btf_idr_lock); 7704 cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf)); 7705 if (IS_ERR(cands)) { 7706 btf_put(mod_btf); 7707 return ERR_CAST(cands); 7708 } 7709 spin_lock_bh(&btf_idr_lock); 7710 btf_put(mod_btf); 7711 } 7712 spin_unlock_bh(&btf_idr_lock); 7713 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 7714 * or pointer to stack if cands->cnd == 0. 7715 * Copy it into the cache even when cands->cnt == 0 and 7716 * return the result. 7717 */ 7718 return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7719 } 7720 7721 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, 7722 int relo_idx, void *insn) 7723 { 7724 bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL; 7725 struct bpf_core_cand_list cands = {}; 7726 struct bpf_core_relo_res targ_res; 7727 struct bpf_core_spec *specs; 7728 int err; 7729 7730 /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" 7731 * into arrays of btf_ids of struct fields and array indices. 7732 */ 7733 specs = kcalloc(3, sizeof(*specs), GFP_KERNEL); 7734 if (!specs) 7735 return -ENOMEM; 7736 7737 if (need_cands) { 7738 struct bpf_cand_cache *cc; 7739 int i; 7740 7741 mutex_lock(&cand_cache_mutex); 7742 cc = bpf_core_find_cands(ctx, relo->type_id); 7743 if (IS_ERR(cc)) { 7744 bpf_log(ctx->log, "target candidate search failed for %d\n", 7745 relo->type_id); 7746 err = PTR_ERR(cc); 7747 goto out; 7748 } 7749 if (cc->cnt) { 7750 cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL); 7751 if (!cands.cands) { 7752 err = -ENOMEM; 7753 goto out; 7754 } 7755 } 7756 for (i = 0; i < cc->cnt; i++) { 7757 bpf_log(ctx->log, 7758 "CO-RE relocating %s %s: found target candidate [%d]\n", 7759 btf_kind_str[cc->kind], cc->name, cc->cands[i].id); 7760 cands.cands[i].btf = cc->cands[i].btf; 7761 cands.cands[i].id = cc->cands[i].id; 7762 } 7763 cands.len = cc->cnt; 7764 /* cand_cache_mutex needs to span the cache lookup and 7765 * copy of btf pointer into bpf_core_cand_list, 7766 * since module can be unloaded while bpf_core_calc_relo_insn 7767 * is working with module's btf. 7768 */ 7769 } 7770 7771 err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs, 7772 &targ_res); 7773 if (err) 7774 goto out; 7775 7776 err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx, 7777 &targ_res); 7778 7779 out: 7780 kfree(specs); 7781 if (need_cands) { 7782 kfree(cands.cands); 7783 mutex_unlock(&cand_cache_mutex); 7784 if (ctx->log->level & BPF_LOG_LEVEL2) 7785 print_cand_cache(ctx->log); 7786 } 7787 return err; 7788 } 7789