1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #include <string.h> 7 #include <stdlib.h> 8 #include <inttypes.h> 9 #include <sys/mman.h> 10 11 #include <arch/elf.h> 12 #include <objtool/builtin.h> 13 #include <objtool/cfi.h> 14 #include <objtool/arch.h> 15 #include <objtool/check.h> 16 #include <objtool/special.h> 17 #include <objtool/warn.h> 18 #include <objtool/endianness.h> 19 20 #include <linux/objtool.h> 21 #include <linux/hashtable.h> 22 #include <linux/kernel.h> 23 #include <linux/static_call_types.h> 24 25 struct alternative { 26 struct list_head list; 27 struct instruction *insn; 28 bool skip_orig; 29 }; 30 31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; 32 33 static struct cfi_init_state initial_func_cfi; 34 static struct cfi_state init_cfi; 35 static struct cfi_state func_cfi; 36 37 struct instruction *find_insn(struct objtool_file *file, 38 struct section *sec, unsigned long offset) 39 { 40 struct instruction *insn; 41 42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 43 if (insn->sec == sec && insn->offset == offset) 44 return insn; 45 } 46 47 return NULL; 48 } 49 50 static struct instruction *next_insn_same_sec(struct objtool_file *file, 51 struct instruction *insn) 52 { 53 struct instruction *next = list_next_entry(insn, list); 54 55 if (!next || &next->list == &file->insn_list || next->sec != insn->sec) 56 return NULL; 57 58 return next; 59 } 60 61 static struct instruction *next_insn_same_func(struct objtool_file *file, 62 struct instruction *insn) 63 { 64 struct instruction *next = list_next_entry(insn, list); 65 struct symbol *func = insn_func(insn); 66 67 if (!func) 68 return NULL; 69 70 if (&next->list != &file->insn_list && insn_func(next) == func) 71 return next; 72 73 /* Check if we're already in the subfunction: */ 74 if (func == func->cfunc) 75 return NULL; 76 77 /* Move to the subfunction: */ 78 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 79 } 80 81 static struct instruction *prev_insn_same_sym(struct objtool_file *file, 82 struct instruction *insn) 83 { 84 struct instruction *prev = list_prev_entry(insn, list); 85 86 if (&prev->list != &file->insn_list && insn_func(prev) == insn_func(insn)) 87 return prev; 88 89 return NULL; 90 } 91 92 #define func_for_each_insn(file, func, insn) \ 93 for (insn = find_insn(file, func->sec, func->offset); \ 94 insn; \ 95 insn = next_insn_same_func(file, insn)) 96 97 #define sym_for_each_insn(file, sym, insn) \ 98 for (insn = find_insn(file, sym->sec, sym->offset); \ 99 insn && &insn->list != &file->insn_list && \ 100 insn->sec == sym->sec && \ 101 insn->offset < sym->offset + sym->len; \ 102 insn = list_next_entry(insn, list)) 103 104 #define sym_for_each_insn_continue_reverse(file, sym, insn) \ 105 for (insn = list_prev_entry(insn, list); \ 106 &insn->list != &file->insn_list && \ 107 insn->sec == sym->sec && insn->offset >= sym->offset; \ 108 insn = list_prev_entry(insn, list)) 109 110 #define sec_for_each_insn_from(file, insn) \ 111 for (; insn; insn = next_insn_same_sec(file, insn)) 112 113 #define sec_for_each_insn_continue(file, insn) \ 114 for (insn = next_insn_same_sec(file, insn); insn; \ 115 insn = next_insn_same_sec(file, insn)) 116 117 static bool is_jump_table_jump(struct instruction *insn) 118 { 119 struct alt_group *alt_group = insn->alt_group; 120 121 if (insn->jump_table) 122 return true; 123 124 /* Retpoline alternative for a jump table? */ 125 return alt_group && alt_group->orig_group && 126 alt_group->orig_group->first_insn->jump_table; 127 } 128 129 static bool is_sibling_call(struct instruction *insn) 130 { 131 /* 132 * Assume only STT_FUNC calls have jump-tables. 133 */ 134 if (insn_func(insn)) { 135 /* An indirect jump is either a sibling call or a jump to a table. */ 136 if (insn->type == INSN_JUMP_DYNAMIC) 137 return !is_jump_table_jump(insn); 138 } 139 140 /* add_jump_destinations() sets insn->call_dest for sibling calls. */ 141 return (is_static_jump(insn) && insn->call_dest); 142 } 143 144 /* 145 * This checks to see if the given function is a "noreturn" function. 146 * 147 * For global functions which are outside the scope of this object file, we 148 * have to keep a manual list of them. 149 * 150 * For local functions, we have to detect them manually by simply looking for 151 * the lack of a return instruction. 152 */ 153 static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 154 int recursion) 155 { 156 int i; 157 struct instruction *insn; 158 bool empty = true; 159 160 /* 161 * Unfortunately these have to be hard coded because the noreturn 162 * attribute isn't provided in ELF data. Keep 'em sorted. 163 */ 164 static const char * const global_noreturns[] = { 165 "__invalid_creds", 166 "__module_put_and_kthread_exit", 167 "__reiserfs_panic", 168 "__stack_chk_fail", 169 "__ubsan_handle_builtin_unreachable", 170 "cpu_bringup_and_idle", 171 "cpu_startup_entry", 172 "do_exit", 173 "do_group_exit", 174 "do_task_dead", 175 "ex_handler_msr_mce", 176 "fortify_panic", 177 "kthread_complete_and_exit", 178 "kthread_exit", 179 "kunit_try_catch_throw", 180 "lbug_with_loc", 181 "machine_real_restart", 182 "make_task_dead", 183 "panic", 184 "rewind_stack_and_make_dead", 185 "sev_es_terminate", 186 "snp_abort", 187 "stop_this_cpu", 188 "usercopy_abort", 189 "xen_start_kernel", 190 }; 191 192 if (!func) 193 return false; 194 195 if (func->bind == STB_WEAK) 196 return false; 197 198 if (func->bind == STB_GLOBAL) 199 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 200 if (!strcmp(func->name, global_noreturns[i])) 201 return true; 202 203 if (!func->len) 204 return false; 205 206 insn = find_insn(file, func->sec, func->offset); 207 if (!insn_func(insn)) 208 return false; 209 210 func_for_each_insn(file, func, insn) { 211 empty = false; 212 213 if (insn->type == INSN_RETURN) 214 return false; 215 } 216 217 if (empty) 218 return false; 219 220 /* 221 * A function can have a sibling call instead of a return. In that 222 * case, the function's dead-end status depends on whether the target 223 * of the sibling call returns. 224 */ 225 func_for_each_insn(file, func, insn) { 226 if (is_sibling_call(insn)) { 227 struct instruction *dest = insn->jump_dest; 228 229 if (!dest) 230 /* sibling call to another file */ 231 return false; 232 233 /* local sibling call */ 234 if (recursion == 5) { 235 /* 236 * Infinite recursion: two functions have 237 * sibling calls to each other. This is a very 238 * rare case. It means they aren't dead ends. 239 */ 240 return false; 241 } 242 243 return __dead_end_function(file, insn_func(dest), recursion+1); 244 } 245 } 246 247 return true; 248 } 249 250 static bool dead_end_function(struct objtool_file *file, struct symbol *func) 251 { 252 return __dead_end_function(file, func, 0); 253 } 254 255 static void init_cfi_state(struct cfi_state *cfi) 256 { 257 int i; 258 259 for (i = 0; i < CFI_NUM_REGS; i++) { 260 cfi->regs[i].base = CFI_UNDEFINED; 261 cfi->vals[i].base = CFI_UNDEFINED; 262 } 263 cfi->cfa.base = CFI_UNDEFINED; 264 cfi->drap_reg = CFI_UNDEFINED; 265 cfi->drap_offset = -1; 266 } 267 268 static void init_insn_state(struct objtool_file *file, struct insn_state *state, 269 struct section *sec) 270 { 271 memset(state, 0, sizeof(*state)); 272 init_cfi_state(&state->cfi); 273 274 /* 275 * We need the full vmlinux for noinstr validation, otherwise we can 276 * not correctly determine insn->call_dest->sec (external symbols do 277 * not have a section). 278 */ 279 if (opts.link && opts.noinstr && sec) 280 state->noinstr = sec->noinstr; 281 } 282 283 static struct cfi_state *cfi_alloc(void) 284 { 285 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); 286 if (!cfi) { 287 WARN("calloc failed"); 288 exit(1); 289 } 290 nr_cfi++; 291 return cfi; 292 } 293 294 static int cfi_bits; 295 static struct hlist_head *cfi_hash; 296 297 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) 298 { 299 return memcmp((void *)cfi1 + sizeof(cfi1->hash), 300 (void *)cfi2 + sizeof(cfi2->hash), 301 sizeof(struct cfi_state) - sizeof(struct hlist_node)); 302 } 303 304 static inline u32 cfi_key(struct cfi_state *cfi) 305 { 306 return jhash((void *)cfi + sizeof(cfi->hash), 307 sizeof(*cfi) - sizeof(cfi->hash), 0); 308 } 309 310 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) 311 { 312 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 313 struct cfi_state *obj; 314 315 hlist_for_each_entry(obj, head, hash) { 316 if (!cficmp(cfi, obj)) { 317 nr_cfi_cache++; 318 return obj; 319 } 320 } 321 322 obj = cfi_alloc(); 323 *obj = *cfi; 324 hlist_add_head(&obj->hash, head); 325 326 return obj; 327 } 328 329 static void cfi_hash_add(struct cfi_state *cfi) 330 { 331 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 332 333 hlist_add_head(&cfi->hash, head); 334 } 335 336 static void *cfi_hash_alloc(unsigned long size) 337 { 338 cfi_bits = max(10, ilog2(size)); 339 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, 340 PROT_READ|PROT_WRITE, 341 MAP_PRIVATE|MAP_ANON, -1, 0); 342 if (cfi_hash == (void *)-1L) { 343 WARN("mmap fail cfi_hash"); 344 cfi_hash = NULL; 345 } else if (opts.stats) { 346 printf("cfi_bits: %d\n", cfi_bits); 347 } 348 349 return cfi_hash; 350 } 351 352 static unsigned long nr_insns; 353 static unsigned long nr_insns_visited; 354 355 /* 356 * Call the arch-specific instruction decoder for all the instructions and add 357 * them to the global instruction list. 358 */ 359 static int decode_instructions(struct objtool_file *file) 360 { 361 struct section *sec; 362 struct symbol *func; 363 unsigned long offset; 364 struct instruction *insn; 365 int ret; 366 367 for_each_sec(file, sec) { 368 369 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 370 continue; 371 372 if (strcmp(sec->name, ".altinstr_replacement") && 373 strcmp(sec->name, ".altinstr_aux") && 374 strncmp(sec->name, ".discard.", 9)) 375 sec->text = true; 376 377 if (!strcmp(sec->name, ".noinstr.text") || 378 !strcmp(sec->name, ".entry.text") || 379 !strncmp(sec->name, ".text.__x86.", 12)) 380 sec->noinstr = true; 381 382 /* 383 * .init.text code is ran before userspace and thus doesn't 384 * strictly need retpolines, except for modules which are 385 * loaded late, they very much do need retpoline in their 386 * .init.text 387 */ 388 if (!strcmp(sec->name, ".init.text") && !opts.module) 389 sec->init = true; 390 391 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { 392 insn = malloc(sizeof(*insn)); 393 if (!insn) { 394 WARN("malloc failed"); 395 return -1; 396 } 397 memset(insn, 0, sizeof(*insn)); 398 INIT_LIST_HEAD(&insn->alts); 399 INIT_LIST_HEAD(&insn->stack_ops); 400 INIT_LIST_HEAD(&insn->call_node); 401 402 insn->sec = sec; 403 insn->offset = offset; 404 405 ret = arch_decode_instruction(file, sec, offset, 406 sec->sh.sh_size - offset, 407 &insn->len, &insn->type, 408 &insn->immediate, 409 &insn->stack_ops); 410 if (ret) 411 goto err; 412 413 /* 414 * By default, "ud2" is a dead end unless otherwise 415 * annotated, because GCC 7 inserts it for certain 416 * divide-by-zero cases. 417 */ 418 if (insn->type == INSN_BUG) 419 insn->dead_end = true; 420 421 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 422 list_add_tail(&insn->list, &file->insn_list); 423 nr_insns++; 424 } 425 426 list_for_each_entry(func, &sec->symbol_list, list) { 427 if (func->type != STT_NOTYPE && func->type != STT_FUNC) 428 continue; 429 430 if (func->return_thunk || func->alias != func) 431 continue; 432 433 if (!find_insn(file, sec, func->offset)) { 434 WARN("%s(): can't find starting instruction", 435 func->name); 436 return -1; 437 } 438 439 sym_for_each_insn(file, func, insn) { 440 insn->sym = func; 441 if (func->type == STT_FUNC && 442 insn->type == INSN_ENDBR && 443 list_empty(&insn->call_node)) { 444 if (insn->offset == func->offset) { 445 list_add_tail(&insn->call_node, &file->endbr_list); 446 file->nr_endbr++; 447 } else { 448 file->nr_endbr_int++; 449 } 450 } 451 } 452 } 453 } 454 455 if (opts.stats) 456 printf("nr_insns: %lu\n", nr_insns); 457 458 return 0; 459 460 err: 461 free(insn); 462 return ret; 463 } 464 465 /* 466 * Read the pv_ops[] .data table to find the static initialized values. 467 */ 468 static int add_pv_ops(struct objtool_file *file, const char *symname) 469 { 470 struct symbol *sym, *func; 471 unsigned long off, end; 472 struct reloc *rel; 473 int idx; 474 475 sym = find_symbol_by_name(file->elf, symname); 476 if (!sym) 477 return 0; 478 479 off = sym->offset; 480 end = off + sym->len; 481 for (;;) { 482 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off); 483 if (!rel) 484 break; 485 486 func = rel->sym; 487 if (func->type == STT_SECTION) 488 func = find_symbol_by_offset(rel->sym->sec, rel->addend); 489 490 idx = (rel->offset - sym->offset) / sizeof(unsigned long); 491 492 objtool_pv_add(file, idx, func); 493 494 off = rel->offset + 1; 495 if (off > end) 496 break; 497 } 498 499 return 0; 500 } 501 502 /* 503 * Allocate and initialize file->pv_ops[]. 504 */ 505 static int init_pv_ops(struct objtool_file *file) 506 { 507 static const char *pv_ops_tables[] = { 508 "pv_ops", 509 "xen_cpu_ops", 510 "xen_irq_ops", 511 "xen_mmu_ops", 512 NULL, 513 }; 514 const char *pv_ops; 515 struct symbol *sym; 516 int idx, nr; 517 518 if (!opts.noinstr) 519 return 0; 520 521 file->pv_ops = NULL; 522 523 sym = find_symbol_by_name(file->elf, "pv_ops"); 524 if (!sym) 525 return 0; 526 527 nr = sym->len / sizeof(unsigned long); 528 file->pv_ops = calloc(sizeof(struct pv_state), nr); 529 if (!file->pv_ops) 530 return -1; 531 532 for (idx = 0; idx < nr; idx++) 533 INIT_LIST_HEAD(&file->pv_ops[idx].targets); 534 535 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) 536 add_pv_ops(file, pv_ops); 537 538 return 0; 539 } 540 541 static struct instruction *find_last_insn(struct objtool_file *file, 542 struct section *sec) 543 { 544 struct instruction *insn = NULL; 545 unsigned int offset; 546 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0; 547 548 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--) 549 insn = find_insn(file, sec, offset); 550 551 return insn; 552 } 553 554 /* 555 * Mark "ud2" instructions and manually annotated dead ends. 556 */ 557 static int add_dead_ends(struct objtool_file *file) 558 { 559 struct section *sec; 560 struct reloc *reloc; 561 struct instruction *insn; 562 563 /* 564 * Check for manually annotated dead ends. 565 */ 566 sec = find_section_by_name(file->elf, ".rela.discard.unreachable"); 567 if (!sec) 568 goto reachable; 569 570 list_for_each_entry(reloc, &sec->reloc_list, list) { 571 if (reloc->sym->type != STT_SECTION) { 572 WARN("unexpected relocation symbol type in %s", sec->name); 573 return -1; 574 } 575 insn = find_insn(file, reloc->sym->sec, reloc->addend); 576 if (insn) 577 insn = list_prev_entry(insn, list); 578 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 579 insn = find_last_insn(file, reloc->sym->sec); 580 if (!insn) { 581 WARN("can't find unreachable insn at %s+0x%" PRIx64, 582 reloc->sym->sec->name, reloc->addend); 583 return -1; 584 } 585 } else { 586 WARN("can't find unreachable insn at %s+0x%" PRIx64, 587 reloc->sym->sec->name, reloc->addend); 588 return -1; 589 } 590 591 insn->dead_end = true; 592 } 593 594 reachable: 595 /* 596 * These manually annotated reachable checks are needed for GCC 4.4, 597 * where the Linux unreachable() macro isn't supported. In that case 598 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's 599 * not a dead end. 600 */ 601 sec = find_section_by_name(file->elf, ".rela.discard.reachable"); 602 if (!sec) 603 return 0; 604 605 list_for_each_entry(reloc, &sec->reloc_list, list) { 606 if (reloc->sym->type != STT_SECTION) { 607 WARN("unexpected relocation symbol type in %s", sec->name); 608 return -1; 609 } 610 insn = find_insn(file, reloc->sym->sec, reloc->addend); 611 if (insn) 612 insn = list_prev_entry(insn, list); 613 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 614 insn = find_last_insn(file, reloc->sym->sec); 615 if (!insn) { 616 WARN("can't find reachable insn at %s+0x%" PRIx64, 617 reloc->sym->sec->name, reloc->addend); 618 return -1; 619 } 620 } else { 621 WARN("can't find reachable insn at %s+0x%" PRIx64, 622 reloc->sym->sec->name, reloc->addend); 623 return -1; 624 } 625 626 insn->dead_end = false; 627 } 628 629 return 0; 630 } 631 632 static int create_static_call_sections(struct objtool_file *file) 633 { 634 struct section *sec; 635 struct static_call_site *site; 636 struct instruction *insn; 637 struct symbol *key_sym; 638 char *key_name, *tmp; 639 int idx; 640 641 sec = find_section_by_name(file->elf, ".static_call_sites"); 642 if (sec) { 643 INIT_LIST_HEAD(&file->static_call_list); 644 WARN("file already has .static_call_sites section, skipping"); 645 return 0; 646 } 647 648 if (list_empty(&file->static_call_list)) 649 return 0; 650 651 idx = 0; 652 list_for_each_entry(insn, &file->static_call_list, call_node) 653 idx++; 654 655 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE, 656 sizeof(struct static_call_site), idx); 657 if (!sec) 658 return -1; 659 660 idx = 0; 661 list_for_each_entry(insn, &file->static_call_list, call_node) { 662 663 site = (struct static_call_site *)sec->data->d_buf + idx; 664 memset(site, 0, sizeof(struct static_call_site)); 665 666 /* populate reloc for 'addr' */ 667 if (elf_add_reloc_to_insn(file->elf, sec, 668 idx * sizeof(struct static_call_site), 669 R_X86_64_PC32, 670 insn->sec, insn->offset)) 671 return -1; 672 673 /* find key symbol */ 674 key_name = strdup(insn->call_dest->name); 675 if (!key_name) { 676 perror("strdup"); 677 return -1; 678 } 679 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 680 STATIC_CALL_TRAMP_PREFIX_LEN)) { 681 WARN("static_call: trampoline name malformed: %s", key_name); 682 return -1; 683 } 684 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 685 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 686 687 key_sym = find_symbol_by_name(file->elf, tmp); 688 if (!key_sym) { 689 if (!opts.module) { 690 WARN("static_call: can't find static_call_key symbol: %s", tmp); 691 return -1; 692 } 693 694 /* 695 * For modules(), the key might not be exported, which 696 * means the module can make static calls but isn't 697 * allowed to change them. 698 * 699 * In that case we temporarily set the key to be the 700 * trampoline address. This is fixed up in 701 * static_call_add_module(). 702 */ 703 key_sym = insn->call_dest; 704 } 705 free(key_name); 706 707 /* populate reloc for 'key' */ 708 if (elf_add_reloc(file->elf, sec, 709 idx * sizeof(struct static_call_site) + 4, 710 R_X86_64_PC32, key_sym, 711 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 712 return -1; 713 714 idx++; 715 } 716 717 return 0; 718 } 719 720 static int create_retpoline_sites_sections(struct objtool_file *file) 721 { 722 struct instruction *insn; 723 struct section *sec; 724 int idx; 725 726 sec = find_section_by_name(file->elf, ".retpoline_sites"); 727 if (sec) { 728 WARN("file already has .retpoline_sites, skipping"); 729 return 0; 730 } 731 732 idx = 0; 733 list_for_each_entry(insn, &file->retpoline_call_list, call_node) 734 idx++; 735 736 if (!idx) 737 return 0; 738 739 sec = elf_create_section(file->elf, ".retpoline_sites", 0, 740 sizeof(int), idx); 741 if (!sec) { 742 WARN("elf_create_section: .retpoline_sites"); 743 return -1; 744 } 745 746 idx = 0; 747 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 748 749 int *site = (int *)sec->data->d_buf + idx; 750 *site = 0; 751 752 if (elf_add_reloc_to_insn(file->elf, sec, 753 idx * sizeof(int), 754 R_X86_64_PC32, 755 insn->sec, insn->offset)) { 756 WARN("elf_add_reloc_to_insn: .retpoline_sites"); 757 return -1; 758 } 759 760 idx++; 761 } 762 763 return 0; 764 } 765 766 static int create_return_sites_sections(struct objtool_file *file) 767 { 768 struct instruction *insn; 769 struct section *sec; 770 int idx; 771 772 sec = find_section_by_name(file->elf, ".return_sites"); 773 if (sec) { 774 WARN("file already has .return_sites, skipping"); 775 return 0; 776 } 777 778 idx = 0; 779 list_for_each_entry(insn, &file->return_thunk_list, call_node) 780 idx++; 781 782 if (!idx) 783 return 0; 784 785 sec = elf_create_section(file->elf, ".return_sites", 0, 786 sizeof(int), idx); 787 if (!sec) { 788 WARN("elf_create_section: .return_sites"); 789 return -1; 790 } 791 792 idx = 0; 793 list_for_each_entry(insn, &file->return_thunk_list, call_node) { 794 795 int *site = (int *)sec->data->d_buf + idx; 796 *site = 0; 797 798 if (elf_add_reloc_to_insn(file->elf, sec, 799 idx * sizeof(int), 800 R_X86_64_PC32, 801 insn->sec, insn->offset)) { 802 WARN("elf_add_reloc_to_insn: .return_sites"); 803 return -1; 804 } 805 806 idx++; 807 } 808 809 return 0; 810 } 811 812 static int create_ibt_endbr_seal_sections(struct objtool_file *file) 813 { 814 struct instruction *insn; 815 struct section *sec; 816 int idx; 817 818 sec = find_section_by_name(file->elf, ".ibt_endbr_seal"); 819 if (sec) { 820 WARN("file already has .ibt_endbr_seal, skipping"); 821 return 0; 822 } 823 824 idx = 0; 825 list_for_each_entry(insn, &file->endbr_list, call_node) 826 idx++; 827 828 if (opts.stats) { 829 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr); 830 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int); 831 printf("ibt: superfluous ENDBR: %d\n", idx); 832 } 833 834 if (!idx) 835 return 0; 836 837 sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0, 838 sizeof(int), idx); 839 if (!sec) { 840 WARN("elf_create_section: .ibt_endbr_seal"); 841 return -1; 842 } 843 844 idx = 0; 845 list_for_each_entry(insn, &file->endbr_list, call_node) { 846 847 int *site = (int *)sec->data->d_buf + idx; 848 *site = 0; 849 850 if (elf_add_reloc_to_insn(file->elf, sec, 851 idx * sizeof(int), 852 R_X86_64_PC32, 853 insn->sec, insn->offset)) { 854 WARN("elf_add_reloc_to_insn: .ibt_endbr_seal"); 855 return -1; 856 } 857 858 idx++; 859 } 860 861 return 0; 862 } 863 864 static int create_cfi_sections(struct objtool_file *file) 865 { 866 struct section *sec, *s; 867 struct symbol *sym; 868 unsigned int *loc; 869 int idx; 870 871 sec = find_section_by_name(file->elf, ".cfi_sites"); 872 if (sec) { 873 INIT_LIST_HEAD(&file->call_list); 874 WARN("file already has .cfi_sites section, skipping"); 875 return 0; 876 } 877 878 idx = 0; 879 for_each_sec(file, s) { 880 if (!s->text) 881 continue; 882 883 list_for_each_entry(sym, &s->symbol_list, list) { 884 if (sym->type != STT_FUNC) 885 continue; 886 887 if (strncmp(sym->name, "__cfi_", 6)) 888 continue; 889 890 idx++; 891 } 892 } 893 894 sec = elf_create_section(file->elf, ".cfi_sites", 0, sizeof(unsigned int), idx); 895 if (!sec) 896 return -1; 897 898 idx = 0; 899 for_each_sec(file, s) { 900 if (!s->text) 901 continue; 902 903 list_for_each_entry(sym, &s->symbol_list, list) { 904 if (sym->type != STT_FUNC) 905 continue; 906 907 if (strncmp(sym->name, "__cfi_", 6)) 908 continue; 909 910 loc = (unsigned int *)sec->data->d_buf + idx; 911 memset(loc, 0, sizeof(unsigned int)); 912 913 if (elf_add_reloc_to_insn(file->elf, sec, 914 idx * sizeof(unsigned int), 915 R_X86_64_PC32, 916 s, sym->offset)) 917 return -1; 918 919 idx++; 920 } 921 } 922 923 return 0; 924 } 925 926 static int create_mcount_loc_sections(struct objtool_file *file) 927 { 928 struct section *sec; 929 unsigned long *loc; 930 struct instruction *insn; 931 int idx; 932 933 sec = find_section_by_name(file->elf, "__mcount_loc"); 934 if (sec) { 935 INIT_LIST_HEAD(&file->mcount_loc_list); 936 WARN("file already has __mcount_loc section, skipping"); 937 return 0; 938 } 939 940 if (list_empty(&file->mcount_loc_list)) 941 return 0; 942 943 idx = 0; 944 list_for_each_entry(insn, &file->mcount_loc_list, call_node) 945 idx++; 946 947 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx); 948 if (!sec) 949 return -1; 950 951 idx = 0; 952 list_for_each_entry(insn, &file->mcount_loc_list, call_node) { 953 954 loc = (unsigned long *)sec->data->d_buf + idx; 955 memset(loc, 0, sizeof(unsigned long)); 956 957 if (elf_add_reloc_to_insn(file->elf, sec, 958 idx * sizeof(unsigned long), 959 R_X86_64_64, 960 insn->sec, insn->offset)) 961 return -1; 962 963 idx++; 964 } 965 966 return 0; 967 } 968 969 static int create_direct_call_sections(struct objtool_file *file) 970 { 971 struct instruction *insn; 972 struct section *sec; 973 unsigned int *loc; 974 int idx; 975 976 sec = find_section_by_name(file->elf, ".call_sites"); 977 if (sec) { 978 INIT_LIST_HEAD(&file->call_list); 979 WARN("file already has .call_sites section, skipping"); 980 return 0; 981 } 982 983 if (list_empty(&file->call_list)) 984 return 0; 985 986 idx = 0; 987 list_for_each_entry(insn, &file->call_list, call_node) 988 idx++; 989 990 sec = elf_create_section(file->elf, ".call_sites", 0, sizeof(unsigned int), idx); 991 if (!sec) 992 return -1; 993 994 idx = 0; 995 list_for_each_entry(insn, &file->call_list, call_node) { 996 997 loc = (unsigned int *)sec->data->d_buf + idx; 998 memset(loc, 0, sizeof(unsigned int)); 999 1000 if (elf_add_reloc_to_insn(file->elf, sec, 1001 idx * sizeof(unsigned int), 1002 R_X86_64_PC32, 1003 insn->sec, insn->offset)) 1004 return -1; 1005 1006 idx++; 1007 } 1008 1009 return 0; 1010 } 1011 1012 /* 1013 * Warnings shouldn't be reported for ignored functions. 1014 */ 1015 static void add_ignores(struct objtool_file *file) 1016 { 1017 struct instruction *insn; 1018 struct section *sec; 1019 struct symbol *func; 1020 struct reloc *reloc; 1021 1022 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 1023 if (!sec) 1024 return; 1025 1026 list_for_each_entry(reloc, &sec->reloc_list, list) { 1027 switch (reloc->sym->type) { 1028 case STT_FUNC: 1029 func = reloc->sym; 1030 break; 1031 1032 case STT_SECTION: 1033 func = find_func_by_offset(reloc->sym->sec, reloc->addend); 1034 if (!func) 1035 continue; 1036 break; 1037 1038 default: 1039 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type); 1040 continue; 1041 } 1042 1043 func_for_each_insn(file, func, insn) 1044 insn->ignore = true; 1045 } 1046 } 1047 1048 /* 1049 * This is a whitelist of functions that is allowed to be called with AC set. 1050 * The list is meant to be minimal and only contains compiler instrumentation 1051 * ABI and a few functions used to implement *_{to,from}_user() functions. 1052 * 1053 * These functions must not directly change AC, but may PUSHF/POPF. 1054 */ 1055 static const char *uaccess_safe_builtin[] = { 1056 /* KASAN */ 1057 "kasan_report", 1058 "kasan_check_range", 1059 /* KASAN out-of-line */ 1060 "__asan_loadN_noabort", 1061 "__asan_load1_noabort", 1062 "__asan_load2_noabort", 1063 "__asan_load4_noabort", 1064 "__asan_load8_noabort", 1065 "__asan_load16_noabort", 1066 "__asan_storeN_noabort", 1067 "__asan_store1_noabort", 1068 "__asan_store2_noabort", 1069 "__asan_store4_noabort", 1070 "__asan_store8_noabort", 1071 "__asan_store16_noabort", 1072 "__kasan_check_read", 1073 "__kasan_check_write", 1074 /* KASAN in-line */ 1075 "__asan_report_load_n_noabort", 1076 "__asan_report_load1_noabort", 1077 "__asan_report_load2_noabort", 1078 "__asan_report_load4_noabort", 1079 "__asan_report_load8_noabort", 1080 "__asan_report_load16_noabort", 1081 "__asan_report_store_n_noabort", 1082 "__asan_report_store1_noabort", 1083 "__asan_report_store2_noabort", 1084 "__asan_report_store4_noabort", 1085 "__asan_report_store8_noabort", 1086 "__asan_report_store16_noabort", 1087 /* KCSAN */ 1088 "__kcsan_check_access", 1089 "__kcsan_mb", 1090 "__kcsan_wmb", 1091 "__kcsan_rmb", 1092 "__kcsan_release", 1093 "kcsan_found_watchpoint", 1094 "kcsan_setup_watchpoint", 1095 "kcsan_check_scoped_accesses", 1096 "kcsan_disable_current", 1097 "kcsan_enable_current_nowarn", 1098 /* KCSAN/TSAN */ 1099 "__tsan_func_entry", 1100 "__tsan_func_exit", 1101 "__tsan_read_range", 1102 "__tsan_write_range", 1103 "__tsan_read1", 1104 "__tsan_read2", 1105 "__tsan_read4", 1106 "__tsan_read8", 1107 "__tsan_read16", 1108 "__tsan_write1", 1109 "__tsan_write2", 1110 "__tsan_write4", 1111 "__tsan_write8", 1112 "__tsan_write16", 1113 "__tsan_read_write1", 1114 "__tsan_read_write2", 1115 "__tsan_read_write4", 1116 "__tsan_read_write8", 1117 "__tsan_read_write16", 1118 "__tsan_volatile_read1", 1119 "__tsan_volatile_read2", 1120 "__tsan_volatile_read4", 1121 "__tsan_volatile_read8", 1122 "__tsan_volatile_read16", 1123 "__tsan_volatile_write1", 1124 "__tsan_volatile_write2", 1125 "__tsan_volatile_write4", 1126 "__tsan_volatile_write8", 1127 "__tsan_volatile_write16", 1128 "__tsan_atomic8_load", 1129 "__tsan_atomic16_load", 1130 "__tsan_atomic32_load", 1131 "__tsan_atomic64_load", 1132 "__tsan_atomic8_store", 1133 "__tsan_atomic16_store", 1134 "__tsan_atomic32_store", 1135 "__tsan_atomic64_store", 1136 "__tsan_atomic8_exchange", 1137 "__tsan_atomic16_exchange", 1138 "__tsan_atomic32_exchange", 1139 "__tsan_atomic64_exchange", 1140 "__tsan_atomic8_fetch_add", 1141 "__tsan_atomic16_fetch_add", 1142 "__tsan_atomic32_fetch_add", 1143 "__tsan_atomic64_fetch_add", 1144 "__tsan_atomic8_fetch_sub", 1145 "__tsan_atomic16_fetch_sub", 1146 "__tsan_atomic32_fetch_sub", 1147 "__tsan_atomic64_fetch_sub", 1148 "__tsan_atomic8_fetch_and", 1149 "__tsan_atomic16_fetch_and", 1150 "__tsan_atomic32_fetch_and", 1151 "__tsan_atomic64_fetch_and", 1152 "__tsan_atomic8_fetch_or", 1153 "__tsan_atomic16_fetch_or", 1154 "__tsan_atomic32_fetch_or", 1155 "__tsan_atomic64_fetch_or", 1156 "__tsan_atomic8_fetch_xor", 1157 "__tsan_atomic16_fetch_xor", 1158 "__tsan_atomic32_fetch_xor", 1159 "__tsan_atomic64_fetch_xor", 1160 "__tsan_atomic8_fetch_nand", 1161 "__tsan_atomic16_fetch_nand", 1162 "__tsan_atomic32_fetch_nand", 1163 "__tsan_atomic64_fetch_nand", 1164 "__tsan_atomic8_compare_exchange_strong", 1165 "__tsan_atomic16_compare_exchange_strong", 1166 "__tsan_atomic32_compare_exchange_strong", 1167 "__tsan_atomic64_compare_exchange_strong", 1168 "__tsan_atomic8_compare_exchange_weak", 1169 "__tsan_atomic16_compare_exchange_weak", 1170 "__tsan_atomic32_compare_exchange_weak", 1171 "__tsan_atomic64_compare_exchange_weak", 1172 "__tsan_atomic8_compare_exchange_val", 1173 "__tsan_atomic16_compare_exchange_val", 1174 "__tsan_atomic32_compare_exchange_val", 1175 "__tsan_atomic64_compare_exchange_val", 1176 "__tsan_atomic_thread_fence", 1177 "__tsan_atomic_signal_fence", 1178 /* KCOV */ 1179 "write_comp_data", 1180 "check_kcov_mode", 1181 "__sanitizer_cov_trace_pc", 1182 "__sanitizer_cov_trace_const_cmp1", 1183 "__sanitizer_cov_trace_const_cmp2", 1184 "__sanitizer_cov_trace_const_cmp4", 1185 "__sanitizer_cov_trace_const_cmp8", 1186 "__sanitizer_cov_trace_cmp1", 1187 "__sanitizer_cov_trace_cmp2", 1188 "__sanitizer_cov_trace_cmp4", 1189 "__sanitizer_cov_trace_cmp8", 1190 "__sanitizer_cov_trace_switch", 1191 /* KMSAN */ 1192 "kmsan_copy_to_user", 1193 "kmsan_report", 1194 "kmsan_unpoison_entry_regs", 1195 "kmsan_unpoison_memory", 1196 "__msan_chain_origin", 1197 "__msan_get_context_state", 1198 "__msan_instrument_asm_store", 1199 "__msan_metadata_ptr_for_load_1", 1200 "__msan_metadata_ptr_for_load_2", 1201 "__msan_metadata_ptr_for_load_4", 1202 "__msan_metadata_ptr_for_load_8", 1203 "__msan_metadata_ptr_for_load_n", 1204 "__msan_metadata_ptr_for_store_1", 1205 "__msan_metadata_ptr_for_store_2", 1206 "__msan_metadata_ptr_for_store_4", 1207 "__msan_metadata_ptr_for_store_8", 1208 "__msan_metadata_ptr_for_store_n", 1209 "__msan_poison_alloca", 1210 "__msan_warning", 1211 /* UBSAN */ 1212 "ubsan_type_mismatch_common", 1213 "__ubsan_handle_type_mismatch", 1214 "__ubsan_handle_type_mismatch_v1", 1215 "__ubsan_handle_shift_out_of_bounds", 1216 /* misc */ 1217 "csum_partial_copy_generic", 1218 "copy_mc_fragile", 1219 "copy_mc_fragile_handle_tail", 1220 "copy_mc_enhanced_fast_string", 1221 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ 1222 "clear_user_erms", 1223 "clear_user_rep_good", 1224 "clear_user_original", 1225 NULL 1226 }; 1227 1228 static void add_uaccess_safe(struct objtool_file *file) 1229 { 1230 struct symbol *func; 1231 const char **name; 1232 1233 if (!opts.uaccess) 1234 return; 1235 1236 for (name = uaccess_safe_builtin; *name; name++) { 1237 func = find_symbol_by_name(file->elf, *name); 1238 if (!func) 1239 continue; 1240 1241 func->uaccess_safe = true; 1242 } 1243 } 1244 1245 /* 1246 * FIXME: For now, just ignore any alternatives which add retpolines. This is 1247 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. 1248 * But it at least allows objtool to understand the control flow *around* the 1249 * retpoline. 1250 */ 1251 static int add_ignore_alternatives(struct objtool_file *file) 1252 { 1253 struct section *sec; 1254 struct reloc *reloc; 1255 struct instruction *insn; 1256 1257 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); 1258 if (!sec) 1259 return 0; 1260 1261 list_for_each_entry(reloc, &sec->reloc_list, list) { 1262 if (reloc->sym->type != STT_SECTION) { 1263 WARN("unexpected relocation symbol type in %s", sec->name); 1264 return -1; 1265 } 1266 1267 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1268 if (!insn) { 1269 WARN("bad .discard.ignore_alts entry"); 1270 return -1; 1271 } 1272 1273 insn->ignore_alts = true; 1274 } 1275 1276 return 0; 1277 } 1278 1279 __weak bool arch_is_retpoline(struct symbol *sym) 1280 { 1281 return false; 1282 } 1283 1284 __weak bool arch_is_rethunk(struct symbol *sym) 1285 { 1286 return false; 1287 } 1288 1289 #define NEGATIVE_RELOC ((void *)-1L) 1290 1291 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 1292 { 1293 if (insn->reloc == NEGATIVE_RELOC) 1294 return NULL; 1295 1296 if (!insn->reloc) { 1297 if (!file) 1298 return NULL; 1299 1300 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec, 1301 insn->offset, insn->len); 1302 if (!insn->reloc) { 1303 insn->reloc = NEGATIVE_RELOC; 1304 return NULL; 1305 } 1306 } 1307 1308 return insn->reloc; 1309 } 1310 1311 static void remove_insn_ops(struct instruction *insn) 1312 { 1313 struct stack_op *op, *tmp; 1314 1315 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) { 1316 list_del(&op->list); 1317 free(op); 1318 } 1319 } 1320 1321 static void annotate_call_site(struct objtool_file *file, 1322 struct instruction *insn, bool sibling) 1323 { 1324 struct reloc *reloc = insn_reloc(file, insn); 1325 struct symbol *sym = insn->call_dest; 1326 1327 if (!sym) 1328 sym = reloc->sym; 1329 1330 /* 1331 * Alternative replacement code is just template code which is 1332 * sometimes copied to the original instruction. For now, don't 1333 * annotate it. (In the future we might consider annotating the 1334 * original instruction if/when it ever makes sense to do so.) 1335 */ 1336 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1337 return; 1338 1339 if (sym->static_call_tramp) { 1340 list_add_tail(&insn->call_node, &file->static_call_list); 1341 return; 1342 } 1343 1344 if (sym->retpoline_thunk) { 1345 list_add_tail(&insn->call_node, &file->retpoline_call_list); 1346 return; 1347 } 1348 1349 /* 1350 * Many compilers cannot disable KCOV or sanitizer calls with a function 1351 * attribute so they need a little help, NOP out any such calls from 1352 * noinstr text. 1353 */ 1354 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) { 1355 if (reloc) { 1356 reloc->type = R_NONE; 1357 elf_write_reloc(file->elf, reloc); 1358 } 1359 1360 elf_write_insn(file->elf, insn->sec, 1361 insn->offset, insn->len, 1362 sibling ? arch_ret_insn(insn->len) 1363 : arch_nop_insn(insn->len)); 1364 1365 insn->type = sibling ? INSN_RETURN : INSN_NOP; 1366 1367 if (sibling) { 1368 /* 1369 * We've replaced the tail-call JMP insn by two new 1370 * insn: RET; INT3, except we only have a single struct 1371 * insn here. Mark it retpoline_safe to avoid the SLS 1372 * warning, instead of adding another insn. 1373 */ 1374 insn->retpoline_safe = true; 1375 } 1376 1377 return; 1378 } 1379 1380 if (opts.mcount && sym->fentry) { 1381 if (sibling) 1382 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset); 1383 1384 if (reloc) { 1385 reloc->type = R_NONE; 1386 elf_write_reloc(file->elf, reloc); 1387 } 1388 1389 elf_write_insn(file->elf, insn->sec, 1390 insn->offset, insn->len, 1391 arch_nop_insn(insn->len)); 1392 1393 insn->type = INSN_NOP; 1394 1395 list_add_tail(&insn->call_node, &file->mcount_loc_list); 1396 return; 1397 } 1398 1399 if (insn->type == INSN_CALL && !insn->sec->init) 1400 list_add_tail(&insn->call_node, &file->call_list); 1401 1402 if (!sibling && dead_end_function(file, sym)) 1403 insn->dead_end = true; 1404 } 1405 1406 static void add_call_dest(struct objtool_file *file, struct instruction *insn, 1407 struct symbol *dest, bool sibling) 1408 { 1409 insn->call_dest = dest; 1410 if (!dest) 1411 return; 1412 1413 /* 1414 * Whatever stack impact regular CALLs have, should be undone 1415 * by the RETURN of the called function. 1416 * 1417 * Annotated intra-function calls retain the stack_ops but 1418 * are converted to JUMP, see read_intra_function_calls(). 1419 */ 1420 remove_insn_ops(insn); 1421 1422 annotate_call_site(file, insn, sibling); 1423 } 1424 1425 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn) 1426 { 1427 /* 1428 * Retpoline calls/jumps are really dynamic calls/jumps in disguise, 1429 * so convert them accordingly. 1430 */ 1431 switch (insn->type) { 1432 case INSN_CALL: 1433 insn->type = INSN_CALL_DYNAMIC; 1434 break; 1435 case INSN_JUMP_UNCONDITIONAL: 1436 insn->type = INSN_JUMP_DYNAMIC; 1437 break; 1438 case INSN_JUMP_CONDITIONAL: 1439 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 1440 break; 1441 default: 1442 return; 1443 } 1444 1445 insn->retpoline_safe = true; 1446 1447 /* 1448 * Whatever stack impact regular CALLs have, should be undone 1449 * by the RETURN of the called function. 1450 * 1451 * Annotated intra-function calls retain the stack_ops but 1452 * are converted to JUMP, see read_intra_function_calls(). 1453 */ 1454 remove_insn_ops(insn); 1455 1456 annotate_call_site(file, insn, false); 1457 } 1458 1459 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add) 1460 { 1461 /* 1462 * Return thunk tail calls are really just returns in disguise, 1463 * so convert them accordingly. 1464 */ 1465 insn->type = INSN_RETURN; 1466 insn->retpoline_safe = true; 1467 1468 if (add) 1469 list_add_tail(&insn->call_node, &file->return_thunk_list); 1470 } 1471 1472 static bool is_first_func_insn(struct objtool_file *file, 1473 struct instruction *insn, struct symbol *sym) 1474 { 1475 if (insn->offset == sym->offset) 1476 return true; 1477 1478 /* Allow direct CALL/JMP past ENDBR */ 1479 if (opts.ibt) { 1480 struct instruction *prev = prev_insn_same_sym(file, insn); 1481 1482 if (prev && prev->type == INSN_ENDBR && 1483 insn->offset == sym->offset + prev->len) 1484 return true; 1485 } 1486 1487 return false; 1488 } 1489 1490 /* 1491 * A sibling call is a tail-call to another symbol -- to differentiate from a 1492 * recursive tail-call which is to the same symbol. 1493 */ 1494 static bool jump_is_sibling_call(struct objtool_file *file, 1495 struct instruction *from, struct instruction *to) 1496 { 1497 struct symbol *fs = from->sym; 1498 struct symbol *ts = to->sym; 1499 1500 /* Not a sibling call if from/to a symbol hole */ 1501 if (!fs || !ts) 1502 return false; 1503 1504 /* Not a sibling call if not targeting the start of a symbol. */ 1505 if (!is_first_func_insn(file, to, ts)) 1506 return false; 1507 1508 /* Disallow sibling calls into STT_NOTYPE */ 1509 if (ts->type == STT_NOTYPE) 1510 return false; 1511 1512 /* Must not be self to be a sibling */ 1513 return fs->pfunc != ts->pfunc; 1514 } 1515 1516 /* 1517 * Find the destination instructions for all jumps. 1518 */ 1519 static int add_jump_destinations(struct objtool_file *file) 1520 { 1521 struct instruction *insn, *jump_dest; 1522 struct reloc *reloc; 1523 struct section *dest_sec; 1524 unsigned long dest_off; 1525 1526 for_each_insn(file, insn) { 1527 if (insn->jump_dest) { 1528 /* 1529 * handle_group_alt() may have previously set 1530 * 'jump_dest' for some alternatives. 1531 */ 1532 continue; 1533 } 1534 if (!is_static_jump(insn)) 1535 continue; 1536 1537 reloc = insn_reloc(file, insn); 1538 if (!reloc) { 1539 dest_sec = insn->sec; 1540 dest_off = arch_jump_destination(insn); 1541 } else if (reloc->sym->type == STT_SECTION) { 1542 dest_sec = reloc->sym->sec; 1543 dest_off = arch_dest_reloc_offset(reloc->addend); 1544 } else if (reloc->sym->retpoline_thunk) { 1545 add_retpoline_call(file, insn); 1546 continue; 1547 } else if (reloc->sym->return_thunk) { 1548 add_return_call(file, insn, true); 1549 continue; 1550 } else if (insn_func(insn)) { 1551 /* 1552 * External sibling call or internal sibling call with 1553 * STT_FUNC reloc. 1554 */ 1555 add_call_dest(file, insn, reloc->sym, true); 1556 continue; 1557 } else if (reloc->sym->sec->idx) { 1558 dest_sec = reloc->sym->sec; 1559 dest_off = reloc->sym->sym.st_value + 1560 arch_dest_reloc_offset(reloc->addend); 1561 } else { 1562 /* non-func asm code jumping to another file */ 1563 continue; 1564 } 1565 1566 jump_dest = find_insn(file, dest_sec, dest_off); 1567 if (!jump_dest) { 1568 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); 1569 1570 /* 1571 * This is a special case for zen_untrain_ret(). 1572 * It jumps to __x86_return_thunk(), but objtool 1573 * can't find the thunk's starting RET 1574 * instruction, because the RET is also in the 1575 * middle of another instruction. Objtool only 1576 * knows about the outer instruction. 1577 */ 1578 if (sym && sym->return_thunk) { 1579 add_return_call(file, insn, false); 1580 continue; 1581 } 1582 1583 WARN_FUNC("can't find jump dest instruction at %s+0x%lx", 1584 insn->sec, insn->offset, dest_sec->name, 1585 dest_off); 1586 return -1; 1587 } 1588 1589 /* 1590 * Cross-function jump. 1591 */ 1592 if (insn_func(insn) && insn_func(jump_dest) && 1593 insn_func(insn) != insn_func(jump_dest)) { 1594 1595 /* 1596 * For GCC 8+, create parent/child links for any cold 1597 * subfunctions. This is _mostly_ redundant with a 1598 * similar initialization in read_symbols(). 1599 * 1600 * If a function has aliases, we want the *first* such 1601 * function in the symbol table to be the subfunction's 1602 * parent. In that case we overwrite the 1603 * initialization done in read_symbols(). 1604 * 1605 * However this code can't completely replace the 1606 * read_symbols() code because this doesn't detect the 1607 * case where the parent function's only reference to a 1608 * subfunction is through a jump table. 1609 */ 1610 if (!strstr(insn_func(insn)->name, ".cold") && 1611 strstr(insn_func(jump_dest)->name, ".cold")) { 1612 insn_func(insn)->cfunc = insn_func(jump_dest); 1613 insn_func(jump_dest)->pfunc = insn_func(insn); 1614 } 1615 } 1616 1617 if (jump_is_sibling_call(file, insn, jump_dest)) { 1618 /* 1619 * Internal sibling call without reloc or with 1620 * STT_SECTION reloc. 1621 */ 1622 add_call_dest(file, insn, insn_func(jump_dest), true); 1623 continue; 1624 } 1625 1626 insn->jump_dest = jump_dest; 1627 } 1628 1629 return 0; 1630 } 1631 1632 static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 1633 { 1634 struct symbol *call_dest; 1635 1636 call_dest = find_func_by_offset(sec, offset); 1637 if (!call_dest) 1638 call_dest = find_symbol_by_offset(sec, offset); 1639 1640 return call_dest; 1641 } 1642 1643 /* 1644 * Find the destination instructions for all calls. 1645 */ 1646 static int add_call_destinations(struct objtool_file *file) 1647 { 1648 struct instruction *insn; 1649 unsigned long dest_off; 1650 struct symbol *dest; 1651 struct reloc *reloc; 1652 1653 for_each_insn(file, insn) { 1654 if (insn->type != INSN_CALL) 1655 continue; 1656 1657 reloc = insn_reloc(file, insn); 1658 if (!reloc) { 1659 dest_off = arch_jump_destination(insn); 1660 dest = find_call_destination(insn->sec, dest_off); 1661 1662 add_call_dest(file, insn, dest, false); 1663 1664 if (insn->ignore) 1665 continue; 1666 1667 if (!insn->call_dest) { 1668 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset); 1669 return -1; 1670 } 1671 1672 if (insn_func(insn) && insn->call_dest->type != STT_FUNC) { 1673 WARN_FUNC("unsupported call to non-function", 1674 insn->sec, insn->offset); 1675 return -1; 1676 } 1677 1678 } else if (reloc->sym->type == STT_SECTION) { 1679 dest_off = arch_dest_reloc_offset(reloc->addend); 1680 dest = find_call_destination(reloc->sym->sec, dest_off); 1681 if (!dest) { 1682 WARN_FUNC("can't find call dest symbol at %s+0x%lx", 1683 insn->sec, insn->offset, 1684 reloc->sym->sec->name, 1685 dest_off); 1686 return -1; 1687 } 1688 1689 add_call_dest(file, insn, dest, false); 1690 1691 } else if (reloc->sym->retpoline_thunk) { 1692 add_retpoline_call(file, insn); 1693 1694 } else 1695 add_call_dest(file, insn, reloc->sym, false); 1696 } 1697 1698 return 0; 1699 } 1700 1701 /* 1702 * The .alternatives section requires some extra special care over and above 1703 * other special sections because alternatives are patched in place. 1704 */ 1705 static int handle_group_alt(struct objtool_file *file, 1706 struct special_alt *special_alt, 1707 struct instruction *orig_insn, 1708 struct instruction **new_insn) 1709 { 1710 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL; 1711 struct alt_group *orig_alt_group, *new_alt_group; 1712 unsigned long dest_off; 1713 1714 1715 orig_alt_group = malloc(sizeof(*orig_alt_group)); 1716 if (!orig_alt_group) { 1717 WARN("malloc failed"); 1718 return -1; 1719 } 1720 orig_alt_group->cfi = calloc(special_alt->orig_len, 1721 sizeof(struct cfi_state *)); 1722 if (!orig_alt_group->cfi) { 1723 WARN("calloc failed"); 1724 return -1; 1725 } 1726 1727 last_orig_insn = NULL; 1728 insn = orig_insn; 1729 sec_for_each_insn_from(file, insn) { 1730 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1731 break; 1732 1733 insn->alt_group = orig_alt_group; 1734 last_orig_insn = insn; 1735 } 1736 orig_alt_group->orig_group = NULL; 1737 orig_alt_group->first_insn = orig_insn; 1738 orig_alt_group->last_insn = last_orig_insn; 1739 1740 1741 new_alt_group = malloc(sizeof(*new_alt_group)); 1742 if (!new_alt_group) { 1743 WARN("malloc failed"); 1744 return -1; 1745 } 1746 1747 if (special_alt->new_len < special_alt->orig_len) { 1748 /* 1749 * Insert a fake nop at the end to make the replacement 1750 * alt_group the same size as the original. This is needed to 1751 * allow propagate_alt_cfi() to do its magic. When the last 1752 * instruction affects the stack, the instruction after it (the 1753 * nop) will propagate the new state to the shared CFI array. 1754 */ 1755 nop = malloc(sizeof(*nop)); 1756 if (!nop) { 1757 WARN("malloc failed"); 1758 return -1; 1759 } 1760 memset(nop, 0, sizeof(*nop)); 1761 INIT_LIST_HEAD(&nop->alts); 1762 INIT_LIST_HEAD(&nop->stack_ops); 1763 1764 nop->sec = special_alt->new_sec; 1765 nop->offset = special_alt->new_off + special_alt->new_len; 1766 nop->len = special_alt->orig_len - special_alt->new_len; 1767 nop->type = INSN_NOP; 1768 nop->sym = orig_insn->sym; 1769 nop->alt_group = new_alt_group; 1770 nop->ignore = orig_insn->ignore_alts; 1771 } 1772 1773 if (!special_alt->new_len) { 1774 *new_insn = nop; 1775 goto end; 1776 } 1777 1778 insn = *new_insn; 1779 sec_for_each_insn_from(file, insn) { 1780 struct reloc *alt_reloc; 1781 1782 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1783 break; 1784 1785 last_new_insn = insn; 1786 1787 insn->ignore = orig_insn->ignore_alts; 1788 insn->sym = orig_insn->sym; 1789 insn->alt_group = new_alt_group; 1790 1791 /* 1792 * Since alternative replacement code is copy/pasted by the 1793 * kernel after applying relocations, generally such code can't 1794 * have relative-address relocation references to outside the 1795 * .altinstr_replacement section, unless the arch's 1796 * alternatives code can adjust the relative offsets 1797 * accordingly. 1798 */ 1799 alt_reloc = insn_reloc(file, insn); 1800 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) && 1801 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1802 1803 WARN_FUNC("unsupported relocation in alternatives section", 1804 insn->sec, insn->offset); 1805 return -1; 1806 } 1807 1808 if (!is_static_jump(insn)) 1809 continue; 1810 1811 if (!insn->immediate) 1812 continue; 1813 1814 dest_off = arch_jump_destination(insn); 1815 if (dest_off == special_alt->new_off + special_alt->new_len) { 1816 insn->jump_dest = next_insn_same_sec(file, last_orig_insn); 1817 if (!insn->jump_dest) { 1818 WARN_FUNC("can't find alternative jump destination", 1819 insn->sec, insn->offset); 1820 return -1; 1821 } 1822 } 1823 } 1824 1825 if (!last_new_insn) { 1826 WARN_FUNC("can't find last new alternative instruction", 1827 special_alt->new_sec, special_alt->new_off); 1828 return -1; 1829 } 1830 1831 if (nop) 1832 list_add(&nop->list, &last_new_insn->list); 1833 end: 1834 new_alt_group->orig_group = orig_alt_group; 1835 new_alt_group->first_insn = *new_insn; 1836 new_alt_group->last_insn = nop ? : last_new_insn; 1837 new_alt_group->cfi = orig_alt_group->cfi; 1838 return 0; 1839 } 1840 1841 /* 1842 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1843 * If the original instruction is a jump, make the alt entry an effective nop 1844 * by just skipping the original instruction. 1845 */ 1846 static int handle_jump_alt(struct objtool_file *file, 1847 struct special_alt *special_alt, 1848 struct instruction *orig_insn, 1849 struct instruction **new_insn) 1850 { 1851 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL && 1852 orig_insn->type != INSN_NOP) { 1853 1854 WARN_FUNC("unsupported instruction at jump label", 1855 orig_insn->sec, orig_insn->offset); 1856 return -1; 1857 } 1858 1859 if (opts.hack_jump_label && special_alt->key_addend & 2) { 1860 struct reloc *reloc = insn_reloc(file, orig_insn); 1861 1862 if (reloc) { 1863 reloc->type = R_NONE; 1864 elf_write_reloc(file->elf, reloc); 1865 } 1866 elf_write_insn(file->elf, orig_insn->sec, 1867 orig_insn->offset, orig_insn->len, 1868 arch_nop_insn(orig_insn->len)); 1869 orig_insn->type = INSN_NOP; 1870 } 1871 1872 if (orig_insn->type == INSN_NOP) { 1873 if (orig_insn->len == 2) 1874 file->jl_nop_short++; 1875 else 1876 file->jl_nop_long++; 1877 1878 return 0; 1879 } 1880 1881 if (orig_insn->len == 2) 1882 file->jl_short++; 1883 else 1884 file->jl_long++; 1885 1886 *new_insn = list_next_entry(orig_insn, list); 1887 return 0; 1888 } 1889 1890 /* 1891 * Read all the special sections which have alternate instructions which can be 1892 * patched in or redirected to at runtime. Each instruction having alternate 1893 * instruction(s) has them added to its insn->alts list, which will be 1894 * traversed in validate_branch(). 1895 */ 1896 static int add_special_section_alts(struct objtool_file *file) 1897 { 1898 struct list_head special_alts; 1899 struct instruction *orig_insn, *new_insn; 1900 struct special_alt *special_alt, *tmp; 1901 struct alternative *alt; 1902 int ret; 1903 1904 ret = special_get_alts(file->elf, &special_alts); 1905 if (ret) 1906 return ret; 1907 1908 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1909 1910 orig_insn = find_insn(file, special_alt->orig_sec, 1911 special_alt->orig_off); 1912 if (!orig_insn) { 1913 WARN_FUNC("special: can't find orig instruction", 1914 special_alt->orig_sec, special_alt->orig_off); 1915 ret = -1; 1916 goto out; 1917 } 1918 1919 new_insn = NULL; 1920 if (!special_alt->group || special_alt->new_len) { 1921 new_insn = find_insn(file, special_alt->new_sec, 1922 special_alt->new_off); 1923 if (!new_insn) { 1924 WARN_FUNC("special: can't find new instruction", 1925 special_alt->new_sec, 1926 special_alt->new_off); 1927 ret = -1; 1928 goto out; 1929 } 1930 } 1931 1932 if (special_alt->group) { 1933 if (!special_alt->orig_len) { 1934 WARN_FUNC("empty alternative entry", 1935 orig_insn->sec, orig_insn->offset); 1936 continue; 1937 } 1938 1939 ret = handle_group_alt(file, special_alt, orig_insn, 1940 &new_insn); 1941 if (ret) 1942 goto out; 1943 } else if (special_alt->jump_or_nop) { 1944 ret = handle_jump_alt(file, special_alt, orig_insn, 1945 &new_insn); 1946 if (ret) 1947 goto out; 1948 } 1949 1950 alt = malloc(sizeof(*alt)); 1951 if (!alt) { 1952 WARN("malloc failed"); 1953 ret = -1; 1954 goto out; 1955 } 1956 1957 alt->insn = new_insn; 1958 alt->skip_orig = special_alt->skip_orig; 1959 orig_insn->ignore_alts |= special_alt->skip_alt; 1960 list_add_tail(&alt->list, &orig_insn->alts); 1961 1962 list_del(&special_alt->list); 1963 free(special_alt); 1964 } 1965 1966 if (opts.stats) { 1967 printf("jl\\\tNOP\tJMP\n"); 1968 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short); 1969 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long); 1970 } 1971 1972 out: 1973 return ret; 1974 } 1975 1976 static int add_jump_table(struct objtool_file *file, struct instruction *insn, 1977 struct reloc *table) 1978 { 1979 struct reloc *reloc = table; 1980 struct instruction *dest_insn; 1981 struct alternative *alt; 1982 struct symbol *pfunc = insn_func(insn)->pfunc; 1983 unsigned int prev_offset = 0; 1984 1985 /* 1986 * Each @reloc is a switch table relocation which points to the target 1987 * instruction. 1988 */ 1989 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) { 1990 1991 /* Check for the end of the table: */ 1992 if (reloc != table && reloc->jump_table_start) 1993 break; 1994 1995 /* Make sure the table entries are consecutive: */ 1996 if (prev_offset && reloc->offset != prev_offset + 8) 1997 break; 1998 1999 /* Detect function pointers from contiguous objects: */ 2000 if (reloc->sym->sec == pfunc->sec && 2001 reloc->addend == pfunc->offset) 2002 break; 2003 2004 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend); 2005 if (!dest_insn) 2006 break; 2007 2008 /* Make sure the destination is in the same function: */ 2009 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc) 2010 break; 2011 2012 alt = malloc(sizeof(*alt)); 2013 if (!alt) { 2014 WARN("malloc failed"); 2015 return -1; 2016 } 2017 2018 alt->insn = dest_insn; 2019 list_add_tail(&alt->list, &insn->alts); 2020 prev_offset = reloc->offset; 2021 } 2022 2023 if (!prev_offset) { 2024 WARN_FUNC("can't find switch jump table", 2025 insn->sec, insn->offset); 2026 return -1; 2027 } 2028 2029 return 0; 2030 } 2031 2032 /* 2033 * find_jump_table() - Given a dynamic jump, find the switch jump table 2034 * associated with it. 2035 */ 2036 static struct reloc *find_jump_table(struct objtool_file *file, 2037 struct symbol *func, 2038 struct instruction *insn) 2039 { 2040 struct reloc *table_reloc; 2041 struct instruction *dest_insn, *orig_insn = insn; 2042 2043 /* 2044 * Backward search using the @first_jump_src links, these help avoid 2045 * much of the 'in between' code. Which avoids us getting confused by 2046 * it. 2047 */ 2048 for (; 2049 insn && insn_func(insn) && insn_func(insn)->pfunc == func; 2050 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 2051 2052 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 2053 break; 2054 2055 /* allow small jumps within the range */ 2056 if (insn->type == INSN_JUMP_UNCONDITIONAL && 2057 insn->jump_dest && 2058 (insn->jump_dest->offset <= insn->offset || 2059 insn->jump_dest->offset > orig_insn->offset)) 2060 break; 2061 2062 table_reloc = arch_find_switch_table(file, insn); 2063 if (!table_reloc) 2064 continue; 2065 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend); 2066 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func) 2067 continue; 2068 2069 return table_reloc; 2070 } 2071 2072 return NULL; 2073 } 2074 2075 /* 2076 * First pass: Mark the head of each jump table so that in the next pass, 2077 * we know when a given jump table ends and the next one starts. 2078 */ 2079 static void mark_func_jump_tables(struct objtool_file *file, 2080 struct symbol *func) 2081 { 2082 struct instruction *insn, *last = NULL; 2083 struct reloc *reloc; 2084 2085 func_for_each_insn(file, func, insn) { 2086 if (!last) 2087 last = insn; 2088 2089 /* 2090 * Store back-pointers for unconditional forward jumps such 2091 * that find_jump_table() can back-track using those and 2092 * avoid some potentially confusing code. 2093 */ 2094 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 2095 insn->offset > last->offset && 2096 insn->jump_dest->offset > insn->offset && 2097 !insn->jump_dest->first_jump_src) { 2098 2099 insn->jump_dest->first_jump_src = insn; 2100 last = insn->jump_dest; 2101 } 2102 2103 if (insn->type != INSN_JUMP_DYNAMIC) 2104 continue; 2105 2106 reloc = find_jump_table(file, func, insn); 2107 if (reloc) { 2108 reloc->jump_table_start = true; 2109 insn->jump_table = reloc; 2110 } 2111 } 2112 } 2113 2114 static int add_func_jump_tables(struct objtool_file *file, 2115 struct symbol *func) 2116 { 2117 struct instruction *insn; 2118 int ret; 2119 2120 func_for_each_insn(file, func, insn) { 2121 if (!insn->jump_table) 2122 continue; 2123 2124 ret = add_jump_table(file, insn, insn->jump_table); 2125 if (ret) 2126 return ret; 2127 } 2128 2129 return 0; 2130 } 2131 2132 /* 2133 * For some switch statements, gcc generates a jump table in the .rodata 2134 * section which contains a list of addresses within the function to jump to. 2135 * This finds these jump tables and adds them to the insn->alts lists. 2136 */ 2137 static int add_jump_table_alts(struct objtool_file *file) 2138 { 2139 struct section *sec; 2140 struct symbol *func; 2141 int ret; 2142 2143 if (!file->rodata) 2144 return 0; 2145 2146 for_each_sec(file, sec) { 2147 list_for_each_entry(func, &sec->symbol_list, list) { 2148 if (func->type != STT_FUNC) 2149 continue; 2150 2151 mark_func_jump_tables(file, func); 2152 ret = add_func_jump_tables(file, func); 2153 if (ret) 2154 return ret; 2155 } 2156 } 2157 2158 return 0; 2159 } 2160 2161 static void set_func_state(struct cfi_state *state) 2162 { 2163 state->cfa = initial_func_cfi.cfa; 2164 memcpy(&state->regs, &initial_func_cfi.regs, 2165 CFI_NUM_REGS * sizeof(struct cfi_reg)); 2166 state->stack_size = initial_func_cfi.cfa.offset; 2167 } 2168 2169 static int read_unwind_hints(struct objtool_file *file) 2170 { 2171 struct cfi_state cfi = init_cfi; 2172 struct section *sec, *relocsec; 2173 struct unwind_hint *hint; 2174 struct instruction *insn; 2175 struct reloc *reloc; 2176 int i; 2177 2178 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 2179 if (!sec) 2180 return 0; 2181 2182 relocsec = sec->reloc; 2183 if (!relocsec) { 2184 WARN("missing .rela.discard.unwind_hints section"); 2185 return -1; 2186 } 2187 2188 if (sec->sh.sh_size % sizeof(struct unwind_hint)) { 2189 WARN("struct unwind_hint size mismatch"); 2190 return -1; 2191 } 2192 2193 file->hints = true; 2194 2195 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { 2196 hint = (struct unwind_hint *)sec->data->d_buf + i; 2197 2198 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 2199 if (!reloc) { 2200 WARN("can't find reloc for unwind_hints[%d]", i); 2201 return -1; 2202 } 2203 2204 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2205 if (!insn) { 2206 WARN("can't find insn for unwind_hints[%d]", i); 2207 return -1; 2208 } 2209 2210 insn->hint = true; 2211 2212 if (hint->type == UNWIND_HINT_TYPE_SAVE) { 2213 insn->hint = false; 2214 insn->save = true; 2215 continue; 2216 } 2217 2218 if (hint->type == UNWIND_HINT_TYPE_RESTORE) { 2219 insn->restore = true; 2220 continue; 2221 } 2222 2223 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { 2224 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); 2225 2226 if (sym && sym->bind == STB_GLOBAL) { 2227 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { 2228 WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR", 2229 insn->sec, insn->offset); 2230 } 2231 2232 insn->entry = 1; 2233 } 2234 } 2235 2236 if (hint->type == UNWIND_HINT_TYPE_ENTRY) { 2237 hint->type = UNWIND_HINT_TYPE_CALL; 2238 insn->entry = 1; 2239 } 2240 2241 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 2242 insn->cfi = &func_cfi; 2243 continue; 2244 } 2245 2246 if (insn->cfi) 2247 cfi = *(insn->cfi); 2248 2249 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 2250 WARN_FUNC("unsupported unwind_hint sp base reg %d", 2251 insn->sec, insn->offset, hint->sp_reg); 2252 return -1; 2253 } 2254 2255 cfi.cfa.offset = bswap_if_needed(hint->sp_offset); 2256 cfi.type = hint->type; 2257 cfi.end = hint->end; 2258 2259 insn->cfi = cfi_hash_find_or_add(&cfi); 2260 } 2261 2262 return 0; 2263 } 2264 2265 static int read_noendbr_hints(struct objtool_file *file) 2266 { 2267 struct section *sec; 2268 struct instruction *insn; 2269 struct reloc *reloc; 2270 2271 sec = find_section_by_name(file->elf, ".rela.discard.noendbr"); 2272 if (!sec) 2273 return 0; 2274 2275 list_for_each_entry(reloc, &sec->reloc_list, list) { 2276 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend); 2277 if (!insn) { 2278 WARN("bad .discard.noendbr entry"); 2279 return -1; 2280 } 2281 2282 insn->noendbr = 1; 2283 } 2284 2285 return 0; 2286 } 2287 2288 static int read_retpoline_hints(struct objtool_file *file) 2289 { 2290 struct section *sec; 2291 struct instruction *insn; 2292 struct reloc *reloc; 2293 2294 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe"); 2295 if (!sec) 2296 return 0; 2297 2298 list_for_each_entry(reloc, &sec->reloc_list, list) { 2299 if (reloc->sym->type != STT_SECTION) { 2300 WARN("unexpected relocation symbol type in %s", sec->name); 2301 return -1; 2302 } 2303 2304 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2305 if (!insn) { 2306 WARN("bad .discard.retpoline_safe entry"); 2307 return -1; 2308 } 2309 2310 if (insn->type != INSN_JUMP_DYNAMIC && 2311 insn->type != INSN_CALL_DYNAMIC && 2312 insn->type != INSN_RETURN && 2313 insn->type != INSN_NOP) { 2314 WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop", 2315 insn->sec, insn->offset); 2316 return -1; 2317 } 2318 2319 insn->retpoline_safe = true; 2320 } 2321 2322 return 0; 2323 } 2324 2325 static int read_instr_hints(struct objtool_file *file) 2326 { 2327 struct section *sec; 2328 struct instruction *insn; 2329 struct reloc *reloc; 2330 2331 sec = find_section_by_name(file->elf, ".rela.discard.instr_end"); 2332 if (!sec) 2333 return 0; 2334 2335 list_for_each_entry(reloc, &sec->reloc_list, list) { 2336 if (reloc->sym->type != STT_SECTION) { 2337 WARN("unexpected relocation symbol type in %s", sec->name); 2338 return -1; 2339 } 2340 2341 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2342 if (!insn) { 2343 WARN("bad .discard.instr_end entry"); 2344 return -1; 2345 } 2346 2347 insn->instr--; 2348 } 2349 2350 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin"); 2351 if (!sec) 2352 return 0; 2353 2354 list_for_each_entry(reloc, &sec->reloc_list, list) { 2355 if (reloc->sym->type != STT_SECTION) { 2356 WARN("unexpected relocation symbol type in %s", sec->name); 2357 return -1; 2358 } 2359 2360 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2361 if (!insn) { 2362 WARN("bad .discard.instr_begin entry"); 2363 return -1; 2364 } 2365 2366 insn->instr++; 2367 } 2368 2369 return 0; 2370 } 2371 2372 static int read_intra_function_calls(struct objtool_file *file) 2373 { 2374 struct instruction *insn; 2375 struct section *sec; 2376 struct reloc *reloc; 2377 2378 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls"); 2379 if (!sec) 2380 return 0; 2381 2382 list_for_each_entry(reloc, &sec->reloc_list, list) { 2383 unsigned long dest_off; 2384 2385 if (reloc->sym->type != STT_SECTION) { 2386 WARN("unexpected relocation symbol type in %s", 2387 sec->name); 2388 return -1; 2389 } 2390 2391 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2392 if (!insn) { 2393 WARN("bad .discard.intra_function_call entry"); 2394 return -1; 2395 } 2396 2397 if (insn->type != INSN_CALL) { 2398 WARN_FUNC("intra_function_call not a direct call", 2399 insn->sec, insn->offset); 2400 return -1; 2401 } 2402 2403 /* 2404 * Treat intra-function CALLs as JMPs, but with a stack_op. 2405 * See add_call_destinations(), which strips stack_ops from 2406 * normal CALLs. 2407 */ 2408 insn->type = INSN_JUMP_UNCONDITIONAL; 2409 2410 dest_off = arch_jump_destination(insn); 2411 insn->jump_dest = find_insn(file, insn->sec, dest_off); 2412 if (!insn->jump_dest) { 2413 WARN_FUNC("can't find call dest at %s+0x%lx", 2414 insn->sec, insn->offset, 2415 insn->sec->name, dest_off); 2416 return -1; 2417 } 2418 } 2419 2420 return 0; 2421 } 2422 2423 /* 2424 * Return true if name matches an instrumentation function, where calls to that 2425 * function from noinstr code can safely be removed, but compilers won't do so. 2426 */ 2427 static bool is_profiling_func(const char *name) 2428 { 2429 /* 2430 * Many compilers cannot disable KCOV with a function attribute. 2431 */ 2432 if (!strncmp(name, "__sanitizer_cov_", 16)) 2433 return true; 2434 2435 /* 2436 * Some compilers currently do not remove __tsan_func_entry/exit nor 2437 * __tsan_atomic_signal_fence (used for barrier instrumentation) with 2438 * the __no_sanitize_thread attribute, remove them. Once the kernel's 2439 * minimum Clang version is 14.0, this can be removed. 2440 */ 2441 if (!strncmp(name, "__tsan_func_", 12) || 2442 !strcmp(name, "__tsan_atomic_signal_fence")) 2443 return true; 2444 2445 return false; 2446 } 2447 2448 static int classify_symbols(struct objtool_file *file) 2449 { 2450 struct section *sec; 2451 struct symbol *func; 2452 2453 for_each_sec(file, sec) { 2454 list_for_each_entry(func, &sec->symbol_list, list) { 2455 if (func->bind != STB_GLOBAL) 2456 continue; 2457 2458 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2459 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2460 func->static_call_tramp = true; 2461 2462 if (arch_is_retpoline(func)) 2463 func->retpoline_thunk = true; 2464 2465 if (arch_is_rethunk(func)) 2466 func->return_thunk = true; 2467 2468 if (!strcmp(func->name, "__fentry__")) 2469 func->fentry = true; 2470 2471 if (is_profiling_func(func->name)) 2472 func->profiling_func = true; 2473 } 2474 } 2475 2476 return 0; 2477 } 2478 2479 static void mark_rodata(struct objtool_file *file) 2480 { 2481 struct section *sec; 2482 bool found = false; 2483 2484 /* 2485 * Search for the following rodata sections, each of which can 2486 * potentially contain jump tables: 2487 * 2488 * - .rodata: can contain GCC switch tables 2489 * - .rodata.<func>: same, if -fdata-sections is being used 2490 * - .rodata..c_jump_table: contains C annotated jump tables 2491 * 2492 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2493 */ 2494 for_each_sec(file, sec) { 2495 if (!strncmp(sec->name, ".rodata", 7) && 2496 !strstr(sec->name, ".str1.")) { 2497 sec->rodata = true; 2498 found = true; 2499 } 2500 } 2501 2502 file->rodata = found; 2503 } 2504 2505 static int decode_sections(struct objtool_file *file) 2506 { 2507 int ret; 2508 2509 mark_rodata(file); 2510 2511 ret = init_pv_ops(file); 2512 if (ret) 2513 return ret; 2514 2515 /* 2516 * Must be before add_{jump_call}_destination. 2517 */ 2518 ret = classify_symbols(file); 2519 if (ret) 2520 return ret; 2521 2522 ret = decode_instructions(file); 2523 if (ret) 2524 return ret; 2525 2526 add_ignores(file); 2527 add_uaccess_safe(file); 2528 2529 ret = add_ignore_alternatives(file); 2530 if (ret) 2531 return ret; 2532 2533 /* 2534 * Must be before read_unwind_hints() since that needs insn->noendbr. 2535 */ 2536 ret = read_noendbr_hints(file); 2537 if (ret) 2538 return ret; 2539 2540 /* 2541 * Must be before add_jump_destinations(), which depends on 'func' 2542 * being set for alternatives, to enable proper sibling call detection. 2543 */ 2544 ret = add_special_section_alts(file); 2545 if (ret) 2546 return ret; 2547 2548 ret = add_jump_destinations(file); 2549 if (ret) 2550 return ret; 2551 2552 /* 2553 * Must be before add_call_destination(); it changes INSN_CALL to 2554 * INSN_JUMP. 2555 */ 2556 ret = read_intra_function_calls(file); 2557 if (ret) 2558 return ret; 2559 2560 ret = add_call_destinations(file); 2561 if (ret) 2562 return ret; 2563 2564 /* 2565 * Must be after add_call_destinations() such that it can override 2566 * dead_end_function() marks. 2567 */ 2568 ret = add_dead_ends(file); 2569 if (ret) 2570 return ret; 2571 2572 ret = add_jump_table_alts(file); 2573 if (ret) 2574 return ret; 2575 2576 ret = read_unwind_hints(file); 2577 if (ret) 2578 return ret; 2579 2580 ret = read_retpoline_hints(file); 2581 if (ret) 2582 return ret; 2583 2584 ret = read_instr_hints(file); 2585 if (ret) 2586 return ret; 2587 2588 return 0; 2589 } 2590 2591 static bool is_fentry_call(struct instruction *insn) 2592 { 2593 if (insn->type == INSN_CALL && 2594 insn->call_dest && 2595 insn->call_dest->fentry) 2596 return true; 2597 2598 return false; 2599 } 2600 2601 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 2602 { 2603 struct cfi_state *cfi = &state->cfi; 2604 int i; 2605 2606 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 2607 return true; 2608 2609 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 2610 return true; 2611 2612 if (cfi->stack_size != initial_func_cfi.cfa.offset) 2613 return true; 2614 2615 for (i = 0; i < CFI_NUM_REGS; i++) { 2616 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 2617 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 2618 return true; 2619 } 2620 2621 return false; 2622 } 2623 2624 static bool check_reg_frame_pos(const struct cfi_reg *reg, 2625 int expected_offset) 2626 { 2627 return reg->base == CFI_CFA && 2628 reg->offset == expected_offset; 2629 } 2630 2631 static bool has_valid_stack_frame(struct insn_state *state) 2632 { 2633 struct cfi_state *cfi = &state->cfi; 2634 2635 if (cfi->cfa.base == CFI_BP && 2636 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) && 2637 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8)) 2638 return true; 2639 2640 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 2641 return true; 2642 2643 return false; 2644 } 2645 2646 static int update_cfi_state_regs(struct instruction *insn, 2647 struct cfi_state *cfi, 2648 struct stack_op *op) 2649 { 2650 struct cfi_reg *cfa = &cfi->cfa; 2651 2652 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 2653 return 0; 2654 2655 /* push */ 2656 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 2657 cfa->offset += 8; 2658 2659 /* pop */ 2660 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 2661 cfa->offset -= 8; 2662 2663 /* add immediate to sp */ 2664 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 2665 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 2666 cfa->offset -= op->src.offset; 2667 2668 return 0; 2669 } 2670 2671 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 2672 { 2673 if (arch_callee_saved_reg(reg) && 2674 cfi->regs[reg].base == CFI_UNDEFINED) { 2675 cfi->regs[reg].base = base; 2676 cfi->regs[reg].offset = offset; 2677 } 2678 } 2679 2680 static void restore_reg(struct cfi_state *cfi, unsigned char reg) 2681 { 2682 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 2683 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 2684 } 2685 2686 /* 2687 * A note about DRAP stack alignment: 2688 * 2689 * GCC has the concept of a DRAP register, which is used to help keep track of 2690 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 2691 * register. The typical DRAP pattern is: 2692 * 2693 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 2694 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 2695 * 41 ff 72 f8 pushq -0x8(%r10) 2696 * 55 push %rbp 2697 * 48 89 e5 mov %rsp,%rbp 2698 * (more pushes) 2699 * 41 52 push %r10 2700 * ... 2701 * 41 5a pop %r10 2702 * (more pops) 2703 * 5d pop %rbp 2704 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2705 * c3 retq 2706 * 2707 * There are some variations in the epilogues, like: 2708 * 2709 * 5b pop %rbx 2710 * 41 5a pop %r10 2711 * 41 5c pop %r12 2712 * 41 5d pop %r13 2713 * 41 5e pop %r14 2714 * c9 leaveq 2715 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2716 * c3 retq 2717 * 2718 * and: 2719 * 2720 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 2721 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 2722 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 2723 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 2724 * c9 leaveq 2725 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2726 * c3 retq 2727 * 2728 * Sometimes r13 is used as the DRAP register, in which case it's saved and 2729 * restored beforehand: 2730 * 2731 * 41 55 push %r13 2732 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 2733 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 2734 * ... 2735 * 49 8d 65 f0 lea -0x10(%r13),%rsp 2736 * 41 5d pop %r13 2737 * c3 retq 2738 */ 2739 static int update_cfi_state(struct instruction *insn, 2740 struct instruction *next_insn, 2741 struct cfi_state *cfi, struct stack_op *op) 2742 { 2743 struct cfi_reg *cfa = &cfi->cfa; 2744 struct cfi_reg *regs = cfi->regs; 2745 2746 /* stack operations don't make sense with an undefined CFA */ 2747 if (cfa->base == CFI_UNDEFINED) { 2748 if (insn_func(insn)) { 2749 WARN_FUNC("undefined stack state", insn->sec, insn->offset); 2750 return -1; 2751 } 2752 return 0; 2753 } 2754 2755 if (cfi->type == UNWIND_HINT_TYPE_REGS || 2756 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 2757 return update_cfi_state_regs(insn, cfi, op); 2758 2759 switch (op->dest.type) { 2760 2761 case OP_DEST_REG: 2762 switch (op->src.type) { 2763 2764 case OP_SRC_REG: 2765 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 2766 cfa->base == CFI_SP && 2767 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) { 2768 2769 /* mov %rsp, %rbp */ 2770 cfa->base = op->dest.reg; 2771 cfi->bp_scratch = false; 2772 } 2773 2774 else if (op->src.reg == CFI_SP && 2775 op->dest.reg == CFI_BP && cfi->drap) { 2776 2777 /* drap: mov %rsp, %rbp */ 2778 regs[CFI_BP].base = CFI_BP; 2779 regs[CFI_BP].offset = -cfi->stack_size; 2780 cfi->bp_scratch = false; 2781 } 2782 2783 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2784 2785 /* 2786 * mov %rsp, %reg 2787 * 2788 * This is needed for the rare case where GCC 2789 * does: 2790 * 2791 * mov %rsp, %rax 2792 * ... 2793 * mov %rax, %rsp 2794 */ 2795 cfi->vals[op->dest.reg].base = CFI_CFA; 2796 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2797 } 2798 2799 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2800 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) { 2801 2802 /* 2803 * mov %rbp, %rsp 2804 * 2805 * Restore the original stack pointer (Clang). 2806 */ 2807 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2808 } 2809 2810 else if (op->dest.reg == cfa->base) { 2811 2812 /* mov %reg, %rsp */ 2813 if (cfa->base == CFI_SP && 2814 cfi->vals[op->src.reg].base == CFI_CFA) { 2815 2816 /* 2817 * This is needed for the rare case 2818 * where GCC does something dumb like: 2819 * 2820 * lea 0x8(%rsp), %rcx 2821 * ... 2822 * mov %rcx, %rsp 2823 */ 2824 cfa->offset = -cfi->vals[op->src.reg].offset; 2825 cfi->stack_size = cfa->offset; 2826 2827 } else if (cfa->base == CFI_SP && 2828 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2829 cfi->vals[op->src.reg].offset == cfa->offset) { 2830 2831 /* 2832 * Stack swizzle: 2833 * 2834 * 1: mov %rsp, (%[tos]) 2835 * 2: mov %[tos], %rsp 2836 * ... 2837 * 3: pop %rsp 2838 * 2839 * Where: 2840 * 2841 * 1 - places a pointer to the previous 2842 * stack at the Top-of-Stack of the 2843 * new stack. 2844 * 2845 * 2 - switches to the new stack. 2846 * 2847 * 3 - pops the Top-of-Stack to restore 2848 * the original stack. 2849 * 2850 * Note: we set base to SP_INDIRECT 2851 * here and preserve offset. Therefore 2852 * when the unwinder reaches ToS it 2853 * will dereference SP and then add the 2854 * offset to find the next frame, IOW: 2855 * (%rsp) + offset. 2856 */ 2857 cfa->base = CFI_SP_INDIRECT; 2858 2859 } else { 2860 cfa->base = CFI_UNDEFINED; 2861 cfa->offset = 0; 2862 } 2863 } 2864 2865 else if (op->dest.reg == CFI_SP && 2866 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2867 cfi->vals[op->src.reg].offset == cfa->offset) { 2868 2869 /* 2870 * The same stack swizzle case 2) as above. But 2871 * because we can't change cfa->base, case 3) 2872 * will become a regular POP. Pretend we're a 2873 * PUSH so things don't go unbalanced. 2874 */ 2875 cfi->stack_size += 8; 2876 } 2877 2878 2879 break; 2880 2881 case OP_SRC_ADD: 2882 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 2883 2884 /* add imm, %rsp */ 2885 cfi->stack_size -= op->src.offset; 2886 if (cfa->base == CFI_SP) 2887 cfa->offset -= op->src.offset; 2888 break; 2889 } 2890 2891 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 2892 2893 /* lea disp(%rbp), %rsp */ 2894 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 2895 break; 2896 } 2897 2898 if (!cfi->drap && op->src.reg == CFI_SP && 2899 op->dest.reg == CFI_BP && cfa->base == CFI_SP && 2900 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) { 2901 2902 /* lea disp(%rsp), %rbp */ 2903 cfa->base = CFI_BP; 2904 cfa->offset -= op->src.offset; 2905 cfi->bp_scratch = false; 2906 break; 2907 } 2908 2909 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2910 2911 /* drap: lea disp(%rsp), %drap */ 2912 cfi->drap_reg = op->dest.reg; 2913 2914 /* 2915 * lea disp(%rsp), %reg 2916 * 2917 * This is needed for the rare case where GCC 2918 * does something dumb like: 2919 * 2920 * lea 0x8(%rsp), %rcx 2921 * ... 2922 * mov %rcx, %rsp 2923 */ 2924 cfi->vals[op->dest.reg].base = CFI_CFA; 2925 cfi->vals[op->dest.reg].offset = \ 2926 -cfi->stack_size + op->src.offset; 2927 2928 break; 2929 } 2930 2931 if (cfi->drap && op->dest.reg == CFI_SP && 2932 op->src.reg == cfi->drap_reg) { 2933 2934 /* drap: lea disp(%drap), %rsp */ 2935 cfa->base = CFI_SP; 2936 cfa->offset = cfi->stack_size = -op->src.offset; 2937 cfi->drap_reg = CFI_UNDEFINED; 2938 cfi->drap = false; 2939 break; 2940 } 2941 2942 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) { 2943 WARN_FUNC("unsupported stack register modification", 2944 insn->sec, insn->offset); 2945 return -1; 2946 } 2947 2948 break; 2949 2950 case OP_SRC_AND: 2951 if (op->dest.reg != CFI_SP || 2952 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 2953 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 2954 WARN_FUNC("unsupported stack pointer realignment", 2955 insn->sec, insn->offset); 2956 return -1; 2957 } 2958 2959 if (cfi->drap_reg != CFI_UNDEFINED) { 2960 /* drap: and imm, %rsp */ 2961 cfa->base = cfi->drap_reg; 2962 cfa->offset = cfi->stack_size = 0; 2963 cfi->drap = true; 2964 } 2965 2966 /* 2967 * Older versions of GCC (4.8ish) realign the stack 2968 * without DRAP, with a frame pointer. 2969 */ 2970 2971 break; 2972 2973 case OP_SRC_POP: 2974 case OP_SRC_POPF: 2975 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) { 2976 2977 /* pop %rsp; # restore from a stack swizzle */ 2978 cfa->base = CFI_SP; 2979 break; 2980 } 2981 2982 if (!cfi->drap && op->dest.reg == cfa->base) { 2983 2984 /* pop %rbp */ 2985 cfa->base = CFI_SP; 2986 } 2987 2988 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 2989 op->dest.reg == cfi->drap_reg && 2990 cfi->drap_offset == -cfi->stack_size) { 2991 2992 /* drap: pop %drap */ 2993 cfa->base = cfi->drap_reg; 2994 cfa->offset = 0; 2995 cfi->drap_offset = -1; 2996 2997 } else if (cfi->stack_size == -regs[op->dest.reg].offset) { 2998 2999 /* pop %reg */ 3000 restore_reg(cfi, op->dest.reg); 3001 } 3002 3003 cfi->stack_size -= 8; 3004 if (cfa->base == CFI_SP) 3005 cfa->offset -= 8; 3006 3007 break; 3008 3009 case OP_SRC_REG_INDIRECT: 3010 if (!cfi->drap && op->dest.reg == cfa->base && 3011 op->dest.reg == CFI_BP) { 3012 3013 /* mov disp(%rsp), %rbp */ 3014 cfa->base = CFI_SP; 3015 cfa->offset = cfi->stack_size; 3016 } 3017 3018 if (cfi->drap && op->src.reg == CFI_BP && 3019 op->src.offset == cfi->drap_offset) { 3020 3021 /* drap: mov disp(%rbp), %drap */ 3022 cfa->base = cfi->drap_reg; 3023 cfa->offset = 0; 3024 cfi->drap_offset = -1; 3025 } 3026 3027 if (cfi->drap && op->src.reg == CFI_BP && 3028 op->src.offset == regs[op->dest.reg].offset) { 3029 3030 /* drap: mov disp(%rbp), %reg */ 3031 restore_reg(cfi, op->dest.reg); 3032 3033 } else if (op->src.reg == cfa->base && 3034 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 3035 3036 /* mov disp(%rbp), %reg */ 3037 /* mov disp(%rsp), %reg */ 3038 restore_reg(cfi, op->dest.reg); 3039 3040 } else if (op->src.reg == CFI_SP && 3041 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) { 3042 3043 /* mov disp(%rsp), %reg */ 3044 restore_reg(cfi, op->dest.reg); 3045 } 3046 3047 break; 3048 3049 default: 3050 WARN_FUNC("unknown stack-related instruction", 3051 insn->sec, insn->offset); 3052 return -1; 3053 } 3054 3055 break; 3056 3057 case OP_DEST_PUSH: 3058 case OP_DEST_PUSHF: 3059 cfi->stack_size += 8; 3060 if (cfa->base == CFI_SP) 3061 cfa->offset += 8; 3062 3063 if (op->src.type != OP_SRC_REG) 3064 break; 3065 3066 if (cfi->drap) { 3067 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3068 3069 /* drap: push %drap */ 3070 cfa->base = CFI_BP_INDIRECT; 3071 cfa->offset = -cfi->stack_size; 3072 3073 /* save drap so we know when to restore it */ 3074 cfi->drap_offset = -cfi->stack_size; 3075 3076 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 3077 3078 /* drap: push %rbp */ 3079 cfi->stack_size = 0; 3080 3081 } else { 3082 3083 /* drap: push %reg */ 3084 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 3085 } 3086 3087 } else { 3088 3089 /* push %reg */ 3090 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 3091 } 3092 3093 /* detect when asm code uses rbp as a scratch register */ 3094 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP && 3095 cfa->base != CFI_BP) 3096 cfi->bp_scratch = true; 3097 break; 3098 3099 case OP_DEST_REG_INDIRECT: 3100 3101 if (cfi->drap) { 3102 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3103 3104 /* drap: mov %drap, disp(%rbp) */ 3105 cfa->base = CFI_BP_INDIRECT; 3106 cfa->offset = op->dest.offset; 3107 3108 /* save drap offset so we know when to restore it */ 3109 cfi->drap_offset = op->dest.offset; 3110 } else { 3111 3112 /* drap: mov reg, disp(%rbp) */ 3113 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 3114 } 3115 3116 } else if (op->dest.reg == cfa->base) { 3117 3118 /* mov reg, disp(%rbp) */ 3119 /* mov reg, disp(%rsp) */ 3120 save_reg(cfi, op->src.reg, CFI_CFA, 3121 op->dest.offset - cfi->cfa.offset); 3122 3123 } else if (op->dest.reg == CFI_SP) { 3124 3125 /* mov reg, disp(%rsp) */ 3126 save_reg(cfi, op->src.reg, CFI_CFA, 3127 op->dest.offset - cfi->stack_size); 3128 3129 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) { 3130 3131 /* mov %rsp, (%reg); # setup a stack swizzle. */ 3132 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT; 3133 cfi->vals[op->dest.reg].offset = cfa->offset; 3134 } 3135 3136 break; 3137 3138 case OP_DEST_MEM: 3139 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 3140 WARN_FUNC("unknown stack-related memory operation", 3141 insn->sec, insn->offset); 3142 return -1; 3143 } 3144 3145 /* pop mem */ 3146 cfi->stack_size -= 8; 3147 if (cfa->base == CFI_SP) 3148 cfa->offset -= 8; 3149 3150 break; 3151 3152 default: 3153 WARN_FUNC("unknown stack-related instruction", 3154 insn->sec, insn->offset); 3155 return -1; 3156 } 3157 3158 return 0; 3159 } 3160 3161 /* 3162 * The stack layouts of alternatives instructions can sometimes diverge when 3163 * they have stack modifications. That's fine as long as the potential stack 3164 * layouts don't conflict at any given potential instruction boundary. 3165 * 3166 * Flatten the CFIs of the different alternative code streams (both original 3167 * and replacement) into a single shared CFI array which can be used to detect 3168 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 3169 */ 3170 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 3171 { 3172 struct cfi_state **alt_cfi; 3173 int group_off; 3174 3175 if (!insn->alt_group) 3176 return 0; 3177 3178 if (!insn->cfi) { 3179 WARN("CFI missing"); 3180 return -1; 3181 } 3182 3183 alt_cfi = insn->alt_group->cfi; 3184 group_off = insn->offset - insn->alt_group->first_insn->offset; 3185 3186 if (!alt_cfi[group_off]) { 3187 alt_cfi[group_off] = insn->cfi; 3188 } else { 3189 if (cficmp(alt_cfi[group_off], insn->cfi)) { 3190 WARN_FUNC("stack layout conflict in alternatives", 3191 insn->sec, insn->offset); 3192 return -1; 3193 } 3194 } 3195 3196 return 0; 3197 } 3198 3199 static int handle_insn_ops(struct instruction *insn, 3200 struct instruction *next_insn, 3201 struct insn_state *state) 3202 { 3203 struct stack_op *op; 3204 3205 list_for_each_entry(op, &insn->stack_ops, list) { 3206 3207 if (update_cfi_state(insn, next_insn, &state->cfi, op)) 3208 return 1; 3209 3210 if (!insn->alt_group) 3211 continue; 3212 3213 if (op->dest.type == OP_DEST_PUSHF) { 3214 if (!state->uaccess_stack) { 3215 state->uaccess_stack = 1; 3216 } else if (state->uaccess_stack >> 31) { 3217 WARN_FUNC("PUSHF stack exhausted", 3218 insn->sec, insn->offset); 3219 return 1; 3220 } 3221 state->uaccess_stack <<= 1; 3222 state->uaccess_stack |= state->uaccess; 3223 } 3224 3225 if (op->src.type == OP_SRC_POPF) { 3226 if (state->uaccess_stack) { 3227 state->uaccess = state->uaccess_stack & 1; 3228 state->uaccess_stack >>= 1; 3229 if (state->uaccess_stack == 1) 3230 state->uaccess_stack = 0; 3231 } 3232 } 3233 } 3234 3235 return 0; 3236 } 3237 3238 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 3239 { 3240 struct cfi_state *cfi1 = insn->cfi; 3241 int i; 3242 3243 if (!cfi1) { 3244 WARN("CFI missing"); 3245 return false; 3246 } 3247 3248 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 3249 3250 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 3251 insn->sec, insn->offset, 3252 cfi1->cfa.base, cfi1->cfa.offset, 3253 cfi2->cfa.base, cfi2->cfa.offset); 3254 3255 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 3256 for (i = 0; i < CFI_NUM_REGS; i++) { 3257 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], 3258 sizeof(struct cfi_reg))) 3259 continue; 3260 3261 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 3262 insn->sec, insn->offset, 3263 i, cfi1->regs[i].base, cfi1->regs[i].offset, 3264 i, cfi2->regs[i].base, cfi2->regs[i].offset); 3265 break; 3266 } 3267 3268 } else if (cfi1->type != cfi2->type) { 3269 3270 WARN_FUNC("stack state mismatch: type1=%d type2=%d", 3271 insn->sec, insn->offset, cfi1->type, cfi2->type); 3272 3273 } else if (cfi1->drap != cfi2->drap || 3274 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 3275 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 3276 3277 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 3278 insn->sec, insn->offset, 3279 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 3280 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 3281 3282 } else 3283 return true; 3284 3285 return false; 3286 } 3287 3288 static inline bool func_uaccess_safe(struct symbol *func) 3289 { 3290 if (func) 3291 return func->uaccess_safe; 3292 3293 return false; 3294 } 3295 3296 static inline const char *call_dest_name(struct instruction *insn) 3297 { 3298 static char pvname[19]; 3299 struct reloc *rel; 3300 int idx; 3301 3302 if (insn->call_dest) 3303 return insn->call_dest->name; 3304 3305 rel = insn_reloc(NULL, insn); 3306 if (rel && !strcmp(rel->sym->name, "pv_ops")) { 3307 idx = (rel->addend / sizeof(void *)); 3308 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); 3309 return pvname; 3310 } 3311 3312 return "{dynamic}"; 3313 } 3314 3315 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn) 3316 { 3317 struct symbol *target; 3318 struct reloc *rel; 3319 int idx; 3320 3321 rel = insn_reloc(file, insn); 3322 if (!rel || strcmp(rel->sym->name, "pv_ops")) 3323 return false; 3324 3325 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *)); 3326 3327 if (file->pv_ops[idx].clean) 3328 return true; 3329 3330 file->pv_ops[idx].clean = true; 3331 3332 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) { 3333 if (!target->sec->noinstr) { 3334 WARN("pv_ops[%d]: %s", idx, target->name); 3335 file->pv_ops[idx].clean = false; 3336 } 3337 } 3338 3339 return file->pv_ops[idx].clean; 3340 } 3341 3342 static inline bool noinstr_call_dest(struct objtool_file *file, 3343 struct instruction *insn, 3344 struct symbol *func) 3345 { 3346 /* 3347 * We can't deal with indirect function calls at present; 3348 * assume they're instrumented. 3349 */ 3350 if (!func) { 3351 if (file->pv_ops) 3352 return pv_call_dest(file, insn); 3353 3354 return false; 3355 } 3356 3357 /* 3358 * If the symbol is from a noinstr section; we good. 3359 */ 3360 if (func->sec->noinstr) 3361 return true; 3362 3363 /* 3364 * The __ubsan_handle_*() calls are like WARN(), they only happen when 3365 * something 'BAD' happened. At the risk of taking the machine down, 3366 * let them proceed to get the message out. 3367 */ 3368 if (!strncmp(func->name, "__ubsan_handle_", 15)) 3369 return true; 3370 3371 return false; 3372 } 3373 3374 static int validate_call(struct objtool_file *file, 3375 struct instruction *insn, 3376 struct insn_state *state) 3377 { 3378 if (state->noinstr && state->instr <= 0 && 3379 !noinstr_call_dest(file, insn, insn->call_dest)) { 3380 WARN_FUNC("call to %s() leaves .noinstr.text section", 3381 insn->sec, insn->offset, call_dest_name(insn)); 3382 return 1; 3383 } 3384 3385 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { 3386 WARN_FUNC("call to %s() with UACCESS enabled", 3387 insn->sec, insn->offset, call_dest_name(insn)); 3388 return 1; 3389 } 3390 3391 if (state->df) { 3392 WARN_FUNC("call to %s() with DF set", 3393 insn->sec, insn->offset, call_dest_name(insn)); 3394 return 1; 3395 } 3396 3397 return 0; 3398 } 3399 3400 static int validate_sibling_call(struct objtool_file *file, 3401 struct instruction *insn, 3402 struct insn_state *state) 3403 { 3404 if (insn_func(insn) && has_modified_stack_frame(insn, state)) { 3405 WARN_FUNC("sibling call from callable instruction with modified stack frame", 3406 insn->sec, insn->offset); 3407 return 1; 3408 } 3409 3410 return validate_call(file, insn, state); 3411 } 3412 3413 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 3414 { 3415 if (state->noinstr && state->instr > 0) { 3416 WARN_FUNC("return with instrumentation enabled", 3417 insn->sec, insn->offset); 3418 return 1; 3419 } 3420 3421 if (state->uaccess && !func_uaccess_safe(func)) { 3422 WARN_FUNC("return with UACCESS enabled", 3423 insn->sec, insn->offset); 3424 return 1; 3425 } 3426 3427 if (!state->uaccess && func_uaccess_safe(func)) { 3428 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", 3429 insn->sec, insn->offset); 3430 return 1; 3431 } 3432 3433 if (state->df) { 3434 WARN_FUNC("return with DF set", 3435 insn->sec, insn->offset); 3436 return 1; 3437 } 3438 3439 if (func && has_modified_stack_frame(insn, state)) { 3440 WARN_FUNC("return with modified stack frame", 3441 insn->sec, insn->offset); 3442 return 1; 3443 } 3444 3445 if (state->cfi.bp_scratch) { 3446 WARN_FUNC("BP used as a scratch register", 3447 insn->sec, insn->offset); 3448 return 1; 3449 } 3450 3451 return 0; 3452 } 3453 3454 static struct instruction *next_insn_to_validate(struct objtool_file *file, 3455 struct instruction *insn) 3456 { 3457 struct alt_group *alt_group = insn->alt_group; 3458 3459 /* 3460 * Simulate the fact that alternatives are patched in-place. When the 3461 * end of a replacement alt_group is reached, redirect objtool flow to 3462 * the end of the original alt_group. 3463 */ 3464 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group) 3465 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 3466 3467 return next_insn_same_sec(file, insn); 3468 } 3469 3470 /* 3471 * Follow the branch starting at the given instruction, and recursively follow 3472 * any other branches (jumps). Meanwhile, track the frame pointer state at 3473 * each instruction and validate all the rules described in 3474 * tools/objtool/Documentation/objtool.txt. 3475 */ 3476 static int validate_branch(struct objtool_file *file, struct symbol *func, 3477 struct instruction *insn, struct insn_state state) 3478 { 3479 struct alternative *alt; 3480 struct instruction *next_insn, *prev_insn = NULL; 3481 struct section *sec; 3482 u8 visited; 3483 int ret; 3484 3485 sec = insn->sec; 3486 3487 while (1) { 3488 next_insn = next_insn_to_validate(file, insn); 3489 3490 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) { 3491 /* Ignore KCFI type preambles, which always fall through */ 3492 if (!strncmp(func->name, "__cfi_", 6) || 3493 !strncmp(func->name, "__pfx_", 6)) 3494 return 0; 3495 3496 WARN("%s() falls through to next function %s()", 3497 func->name, insn_func(insn)->name); 3498 return 1; 3499 } 3500 3501 if (func && insn->ignore) { 3502 WARN_FUNC("BUG: why am I validating an ignored function?", 3503 sec, insn->offset); 3504 return 1; 3505 } 3506 3507 visited = VISITED_BRANCH << state.uaccess; 3508 if (insn->visited & VISITED_BRANCH_MASK) { 3509 if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) 3510 return 1; 3511 3512 if (insn->visited & visited) 3513 return 0; 3514 } else { 3515 nr_insns_visited++; 3516 } 3517 3518 if (state.noinstr) 3519 state.instr += insn->instr; 3520 3521 if (insn->hint) { 3522 if (insn->restore) { 3523 struct instruction *save_insn, *i; 3524 3525 i = insn; 3526 save_insn = NULL; 3527 3528 sym_for_each_insn_continue_reverse(file, func, i) { 3529 if (i->save) { 3530 save_insn = i; 3531 break; 3532 } 3533 } 3534 3535 if (!save_insn) { 3536 WARN_FUNC("no corresponding CFI save for CFI restore", 3537 sec, insn->offset); 3538 return 1; 3539 } 3540 3541 if (!save_insn->visited) { 3542 WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo", 3543 sec, insn->offset); 3544 return 1; 3545 } 3546 3547 insn->cfi = save_insn->cfi; 3548 nr_cfi_reused++; 3549 } 3550 3551 state.cfi = *insn->cfi; 3552 } else { 3553 /* XXX track if we actually changed state.cfi */ 3554 3555 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) { 3556 insn->cfi = prev_insn->cfi; 3557 nr_cfi_reused++; 3558 } else { 3559 insn->cfi = cfi_hash_find_or_add(&state.cfi); 3560 } 3561 } 3562 3563 insn->visited |= visited; 3564 3565 if (propagate_alt_cfi(file, insn)) 3566 return 1; 3567 3568 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 3569 bool skip_orig = false; 3570 3571 list_for_each_entry(alt, &insn->alts, list) { 3572 if (alt->skip_orig) 3573 skip_orig = true; 3574 3575 ret = validate_branch(file, func, alt->insn, state); 3576 if (ret) { 3577 if (opts.backtrace) 3578 BT_FUNC("(alt)", insn); 3579 return ret; 3580 } 3581 } 3582 3583 if (skip_orig) 3584 return 0; 3585 } 3586 3587 if (handle_insn_ops(insn, next_insn, &state)) 3588 return 1; 3589 3590 switch (insn->type) { 3591 3592 case INSN_RETURN: 3593 return validate_return(func, insn, &state); 3594 3595 case INSN_CALL: 3596 case INSN_CALL_DYNAMIC: 3597 ret = validate_call(file, insn, &state); 3598 if (ret) 3599 return ret; 3600 3601 if (opts.stackval && func && !is_fentry_call(insn) && 3602 !has_valid_stack_frame(&state)) { 3603 WARN_FUNC("call without frame pointer save/setup", 3604 sec, insn->offset); 3605 return 1; 3606 } 3607 3608 if (insn->dead_end) 3609 return 0; 3610 3611 break; 3612 3613 case INSN_JUMP_CONDITIONAL: 3614 case INSN_JUMP_UNCONDITIONAL: 3615 if (is_sibling_call(insn)) { 3616 ret = validate_sibling_call(file, insn, &state); 3617 if (ret) 3618 return ret; 3619 3620 } else if (insn->jump_dest) { 3621 ret = validate_branch(file, func, 3622 insn->jump_dest, state); 3623 if (ret) { 3624 if (opts.backtrace) 3625 BT_FUNC("(branch)", insn); 3626 return ret; 3627 } 3628 } 3629 3630 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3631 return 0; 3632 3633 break; 3634 3635 case INSN_JUMP_DYNAMIC: 3636 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3637 if (is_sibling_call(insn)) { 3638 ret = validate_sibling_call(file, insn, &state); 3639 if (ret) 3640 return ret; 3641 } 3642 3643 if (insn->type == INSN_JUMP_DYNAMIC) 3644 return 0; 3645 3646 break; 3647 3648 case INSN_CONTEXT_SWITCH: 3649 if (func && (!next_insn || !next_insn->hint)) { 3650 WARN_FUNC("unsupported instruction in callable function", 3651 sec, insn->offset); 3652 return 1; 3653 } 3654 return 0; 3655 3656 case INSN_STAC: 3657 if (state.uaccess) { 3658 WARN_FUNC("recursive UACCESS enable", sec, insn->offset); 3659 return 1; 3660 } 3661 3662 state.uaccess = true; 3663 break; 3664 3665 case INSN_CLAC: 3666 if (!state.uaccess && func) { 3667 WARN_FUNC("redundant UACCESS disable", sec, insn->offset); 3668 return 1; 3669 } 3670 3671 if (func_uaccess_safe(func) && !state.uaccess_stack) { 3672 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); 3673 return 1; 3674 } 3675 3676 state.uaccess = false; 3677 break; 3678 3679 case INSN_STD: 3680 if (state.df) { 3681 WARN_FUNC("recursive STD", sec, insn->offset); 3682 return 1; 3683 } 3684 3685 state.df = true; 3686 break; 3687 3688 case INSN_CLD: 3689 if (!state.df && func) { 3690 WARN_FUNC("redundant CLD", sec, insn->offset); 3691 return 1; 3692 } 3693 3694 state.df = false; 3695 break; 3696 3697 default: 3698 break; 3699 } 3700 3701 if (insn->dead_end) 3702 return 0; 3703 3704 if (!next_insn) { 3705 if (state.cfi.cfa.base == CFI_UNDEFINED) 3706 return 0; 3707 WARN("%s: unexpected end of section", sec->name); 3708 return 1; 3709 } 3710 3711 prev_insn = insn; 3712 insn = next_insn; 3713 } 3714 3715 return 0; 3716 } 3717 3718 static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 3719 { 3720 struct instruction *insn; 3721 struct insn_state state; 3722 int ret, warnings = 0; 3723 3724 if (!file->hints) 3725 return 0; 3726 3727 init_insn_state(file, &state, sec); 3728 3729 if (sec) { 3730 insn = find_insn(file, sec, 0); 3731 if (!insn) 3732 return 0; 3733 } else { 3734 insn = list_first_entry(&file->insn_list, typeof(*insn), list); 3735 } 3736 3737 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) { 3738 if (insn->hint && !insn->visited && !insn->ignore) { 3739 ret = validate_branch(file, insn_func(insn), insn, state); 3740 if (ret && opts.backtrace) 3741 BT_FUNC("<=== (hint)", insn); 3742 warnings += ret; 3743 } 3744 3745 insn = list_next_entry(insn, list); 3746 } 3747 3748 return warnings; 3749 } 3750 3751 /* 3752 * Validate rethunk entry constraint: must untrain RET before the first RET. 3753 * 3754 * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes 3755 * before an actual RET instruction. 3756 */ 3757 static int validate_entry(struct objtool_file *file, struct instruction *insn) 3758 { 3759 struct instruction *next, *dest; 3760 int ret, warnings = 0; 3761 3762 for (;;) { 3763 next = next_insn_to_validate(file, insn); 3764 3765 if (insn->visited & VISITED_ENTRY) 3766 return 0; 3767 3768 insn->visited |= VISITED_ENTRY; 3769 3770 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 3771 struct alternative *alt; 3772 bool skip_orig = false; 3773 3774 list_for_each_entry(alt, &insn->alts, list) { 3775 if (alt->skip_orig) 3776 skip_orig = true; 3777 3778 ret = validate_entry(file, alt->insn); 3779 if (ret) { 3780 if (opts.backtrace) 3781 BT_FUNC("(alt)", insn); 3782 return ret; 3783 } 3784 } 3785 3786 if (skip_orig) 3787 return 0; 3788 } 3789 3790 switch (insn->type) { 3791 3792 case INSN_CALL_DYNAMIC: 3793 case INSN_JUMP_DYNAMIC: 3794 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3795 WARN_FUNC("early indirect call", insn->sec, insn->offset); 3796 return 1; 3797 3798 case INSN_JUMP_UNCONDITIONAL: 3799 case INSN_JUMP_CONDITIONAL: 3800 if (!is_sibling_call(insn)) { 3801 if (!insn->jump_dest) { 3802 WARN_FUNC("unresolved jump target after linking?!?", 3803 insn->sec, insn->offset); 3804 return -1; 3805 } 3806 ret = validate_entry(file, insn->jump_dest); 3807 if (ret) { 3808 if (opts.backtrace) { 3809 BT_FUNC("(branch%s)", insn, 3810 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : ""); 3811 } 3812 return ret; 3813 } 3814 3815 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3816 return 0; 3817 3818 break; 3819 } 3820 3821 /* fallthrough */ 3822 case INSN_CALL: 3823 dest = find_insn(file, insn->call_dest->sec, 3824 insn->call_dest->offset); 3825 if (!dest) { 3826 WARN("Unresolved function after linking!?: %s", 3827 insn->call_dest->name); 3828 return -1; 3829 } 3830 3831 ret = validate_entry(file, dest); 3832 if (ret) { 3833 if (opts.backtrace) 3834 BT_FUNC("(call)", insn); 3835 return ret; 3836 } 3837 /* 3838 * If a call returns without error, it must have seen UNTRAIN_RET. 3839 * Therefore any non-error return is a success. 3840 */ 3841 return 0; 3842 3843 case INSN_RETURN: 3844 WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset); 3845 return 1; 3846 3847 case INSN_NOP: 3848 if (insn->retpoline_safe) 3849 return 0; 3850 break; 3851 3852 default: 3853 break; 3854 } 3855 3856 if (!next) { 3857 WARN_FUNC("teh end!", insn->sec, insn->offset); 3858 return -1; 3859 } 3860 insn = next; 3861 } 3862 3863 return warnings; 3864 } 3865 3866 /* 3867 * Validate that all branches starting at 'insn->entry' encounter UNRET_END 3868 * before RET. 3869 */ 3870 static int validate_unret(struct objtool_file *file) 3871 { 3872 struct instruction *insn; 3873 int ret, warnings = 0; 3874 3875 for_each_insn(file, insn) { 3876 if (!insn->entry) 3877 continue; 3878 3879 ret = validate_entry(file, insn); 3880 if (ret < 0) { 3881 WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset); 3882 return ret; 3883 } 3884 warnings += ret; 3885 } 3886 3887 return warnings; 3888 } 3889 3890 static int validate_retpoline(struct objtool_file *file) 3891 { 3892 struct instruction *insn; 3893 int warnings = 0; 3894 3895 for_each_insn(file, insn) { 3896 if (insn->type != INSN_JUMP_DYNAMIC && 3897 insn->type != INSN_CALL_DYNAMIC && 3898 insn->type != INSN_RETURN) 3899 continue; 3900 3901 if (insn->retpoline_safe) 3902 continue; 3903 3904 if (insn->sec->init) 3905 continue; 3906 3907 if (insn->type == INSN_RETURN) { 3908 if (opts.rethunk) { 3909 WARN_FUNC("'naked' return found in RETHUNK build", 3910 insn->sec, insn->offset); 3911 } else 3912 continue; 3913 } else { 3914 WARN_FUNC("indirect %s found in RETPOLINE build", 3915 insn->sec, insn->offset, 3916 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 3917 } 3918 3919 warnings++; 3920 } 3921 3922 return warnings; 3923 } 3924 3925 static bool is_kasan_insn(struct instruction *insn) 3926 { 3927 return (insn->type == INSN_CALL && 3928 !strcmp(insn->call_dest->name, "__asan_handle_no_return")); 3929 } 3930 3931 static bool is_ubsan_insn(struct instruction *insn) 3932 { 3933 return (insn->type == INSN_CALL && 3934 !strcmp(insn->call_dest->name, 3935 "__ubsan_handle_builtin_unreachable")); 3936 } 3937 3938 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 3939 { 3940 int i; 3941 struct instruction *prev_insn; 3942 3943 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP) 3944 return true; 3945 3946 /* 3947 * Ignore alternative replacement instructions. This can happen 3948 * when a whitelisted function uses one of the ALTERNATIVE macros. 3949 */ 3950 if (!strcmp(insn->sec->name, ".altinstr_replacement") || 3951 !strcmp(insn->sec->name, ".altinstr_aux")) 3952 return true; 3953 3954 /* 3955 * Whole archive runs might encounter dead code from weak symbols. 3956 * This is where the linker will have dropped the weak symbol in 3957 * favour of a regular symbol, but leaves the code in place. 3958 * 3959 * In this case we'll find a piece of code (whole function) that is not 3960 * covered by a !section symbol. Ignore them. 3961 */ 3962 if (opts.link && !insn_func(insn)) { 3963 int size = find_symbol_hole_containing(insn->sec, insn->offset); 3964 unsigned long end = insn->offset + size; 3965 3966 if (!size) /* not a hole */ 3967 return false; 3968 3969 if (size < 0) /* hole until the end */ 3970 return true; 3971 3972 sec_for_each_insn_continue(file, insn) { 3973 /* 3974 * If we reach a visited instruction at or before the 3975 * end of the hole, ignore the unreachable. 3976 */ 3977 if (insn->visited) 3978 return true; 3979 3980 if (insn->offset >= end) 3981 break; 3982 3983 /* 3984 * If this hole jumps to a .cold function, mark it ignore too. 3985 */ 3986 if (insn->jump_dest && insn_func(insn->jump_dest) && 3987 strstr(insn_func(insn->jump_dest)->name, ".cold")) { 3988 struct instruction *dest = insn->jump_dest; 3989 func_for_each_insn(file, insn_func(dest), dest) 3990 dest->ignore = true; 3991 } 3992 } 3993 3994 return false; 3995 } 3996 3997 if (!insn_func(insn)) 3998 return false; 3999 4000 if (insn_func(insn)->static_call_tramp) 4001 return true; 4002 4003 /* 4004 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 4005 * __builtin_unreachable(). The BUG() macro has an unreachable() after 4006 * the UD2, which causes GCC's undefined trap logic to emit another UD2 4007 * (or occasionally a JMP to UD2). 4008 * 4009 * It may also insert a UD2 after calling a __noreturn function. 4010 */ 4011 prev_insn = list_prev_entry(insn, list); 4012 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) && 4013 (insn->type == INSN_BUG || 4014 (insn->type == INSN_JUMP_UNCONDITIONAL && 4015 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 4016 return true; 4017 4018 /* 4019 * Check if this (or a subsequent) instruction is related to 4020 * CONFIG_UBSAN or CONFIG_KASAN. 4021 * 4022 * End the search at 5 instructions to avoid going into the weeds. 4023 */ 4024 for (i = 0; i < 5; i++) { 4025 4026 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 4027 return true; 4028 4029 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 4030 if (insn->jump_dest && 4031 insn_func(insn->jump_dest) == insn_func(insn)) { 4032 insn = insn->jump_dest; 4033 continue; 4034 } 4035 4036 break; 4037 } 4038 4039 if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len) 4040 break; 4041 4042 insn = list_next_entry(insn, list); 4043 } 4044 4045 return false; 4046 } 4047 4048 static int add_prefix_symbol(struct objtool_file *file, struct symbol *func, 4049 struct instruction *insn) 4050 { 4051 if (!opts.prefix) 4052 return 0; 4053 4054 for (;;) { 4055 struct instruction *prev = list_prev_entry(insn, list); 4056 u64 offset; 4057 4058 if (&prev->list == &file->insn_list) 4059 break; 4060 4061 if (prev->type != INSN_NOP) 4062 break; 4063 4064 offset = func->offset - prev->offset; 4065 if (offset >= opts.prefix) { 4066 if (offset == opts.prefix) { 4067 /* 4068 * Since the sec->symbol_list is ordered by 4069 * offset (see elf_add_symbol()) the added 4070 * symbol will not be seen by the iteration in 4071 * validate_section(). 4072 * 4073 * Hence the lack of list_for_each_entry_safe() 4074 * there. 4075 * 4076 * The direct concequence is that prefix symbols 4077 * don't get visited (because pointless), except 4078 * for the logic in ignore_unreachable_insn() 4079 * that needs the terminating insn to be visited 4080 * otherwise it will report the hole. 4081 * 4082 * Hence mark the first instruction of the 4083 * prefix symbol as visisted. 4084 */ 4085 prev->visited |= VISITED_BRANCH; 4086 elf_create_prefix_symbol(file->elf, func, opts.prefix); 4087 } 4088 break; 4089 } 4090 insn = prev; 4091 } 4092 4093 return 0; 4094 } 4095 4096 static int validate_symbol(struct objtool_file *file, struct section *sec, 4097 struct symbol *sym, struct insn_state *state) 4098 { 4099 struct instruction *insn; 4100 int ret; 4101 4102 if (!sym->len) { 4103 WARN("%s() is missing an ELF size annotation", sym->name); 4104 return 1; 4105 } 4106 4107 if (sym->pfunc != sym || sym->alias != sym) 4108 return 0; 4109 4110 insn = find_insn(file, sec, sym->offset); 4111 if (!insn || insn->ignore || insn->visited) 4112 return 0; 4113 4114 add_prefix_symbol(file, sym, insn); 4115 4116 state->uaccess = sym->uaccess_safe; 4117 4118 ret = validate_branch(file, insn_func(insn), insn, *state); 4119 if (ret && opts.backtrace) 4120 BT_FUNC("<=== (sym)", insn); 4121 return ret; 4122 } 4123 4124 static int validate_section(struct objtool_file *file, struct section *sec) 4125 { 4126 struct insn_state state; 4127 struct symbol *func; 4128 int warnings = 0; 4129 4130 list_for_each_entry(func, &sec->symbol_list, list) { 4131 if (func->type != STT_FUNC) 4132 continue; 4133 4134 init_insn_state(file, &state, sec); 4135 set_func_state(&state.cfi); 4136 4137 warnings += validate_symbol(file, sec, func, &state); 4138 } 4139 4140 return warnings; 4141 } 4142 4143 static int validate_noinstr_sections(struct objtool_file *file) 4144 { 4145 struct section *sec; 4146 int warnings = 0; 4147 4148 sec = find_section_by_name(file->elf, ".noinstr.text"); 4149 if (sec) { 4150 warnings += validate_section(file, sec); 4151 warnings += validate_unwind_hints(file, sec); 4152 } 4153 4154 sec = find_section_by_name(file->elf, ".entry.text"); 4155 if (sec) { 4156 warnings += validate_section(file, sec); 4157 warnings += validate_unwind_hints(file, sec); 4158 } 4159 4160 return warnings; 4161 } 4162 4163 static int validate_functions(struct objtool_file *file) 4164 { 4165 struct section *sec; 4166 int warnings = 0; 4167 4168 for_each_sec(file, sec) { 4169 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 4170 continue; 4171 4172 warnings += validate_section(file, sec); 4173 } 4174 4175 return warnings; 4176 } 4177 4178 static void mark_endbr_used(struct instruction *insn) 4179 { 4180 if (!list_empty(&insn->call_node)) 4181 list_del_init(&insn->call_node); 4182 } 4183 4184 static bool noendbr_range(struct objtool_file *file, struct instruction *insn) 4185 { 4186 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1); 4187 struct instruction *first; 4188 4189 if (!sym) 4190 return false; 4191 4192 first = find_insn(file, sym->sec, sym->offset); 4193 if (!first) 4194 return false; 4195 4196 if (first->type != INSN_ENDBR && !first->noendbr) 4197 return false; 4198 4199 return insn->offset == sym->offset + sym->len; 4200 } 4201 4202 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn) 4203 { 4204 struct instruction *dest; 4205 struct reloc *reloc; 4206 unsigned long off; 4207 int warnings = 0; 4208 4209 /* 4210 * Looking for function pointer load relocations. Ignore 4211 * direct/indirect branches: 4212 */ 4213 switch (insn->type) { 4214 case INSN_CALL: 4215 case INSN_CALL_DYNAMIC: 4216 case INSN_JUMP_CONDITIONAL: 4217 case INSN_JUMP_UNCONDITIONAL: 4218 case INSN_JUMP_DYNAMIC: 4219 case INSN_JUMP_DYNAMIC_CONDITIONAL: 4220 case INSN_RETURN: 4221 case INSN_NOP: 4222 return 0; 4223 default: 4224 break; 4225 } 4226 4227 for (reloc = insn_reloc(file, insn); 4228 reloc; 4229 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 4230 reloc->offset + 1, 4231 (insn->offset + insn->len) - (reloc->offset + 1))) { 4232 4233 /* 4234 * static_call_update() references the trampoline, which 4235 * doesn't have (or need) ENDBR. Skip warning in that case. 4236 */ 4237 if (reloc->sym->static_call_tramp) 4238 continue; 4239 4240 off = reloc->sym->offset; 4241 if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32) 4242 off += arch_dest_reloc_offset(reloc->addend); 4243 else 4244 off += reloc->addend; 4245 4246 dest = find_insn(file, reloc->sym->sec, off); 4247 if (!dest) 4248 continue; 4249 4250 if (dest->type == INSN_ENDBR) { 4251 mark_endbr_used(dest); 4252 continue; 4253 } 4254 4255 if (insn_func(dest) && insn_func(dest) == insn_func(insn)) { 4256 /* 4257 * Anything from->to self is either _THIS_IP_ or 4258 * IRET-to-self. 4259 * 4260 * There is no sane way to annotate _THIS_IP_ since the 4261 * compiler treats the relocation as a constant and is 4262 * happy to fold in offsets, skewing any annotation we 4263 * do, leading to vast amounts of false-positives. 4264 * 4265 * There's also compiler generated _THIS_IP_ through 4266 * KCOV and such which we have no hope of annotating. 4267 * 4268 * As such, blanket accept self-references without 4269 * issue. 4270 */ 4271 continue; 4272 } 4273 4274 /* 4275 * Accept anything ANNOTATE_NOENDBR. 4276 */ 4277 if (dest->noendbr) 4278 continue; 4279 4280 /* 4281 * Accept if this is the instruction after a symbol 4282 * that is (no)endbr -- typical code-range usage. 4283 */ 4284 if (noendbr_range(file, dest)) 4285 continue; 4286 4287 WARN_FUNC("relocation to !ENDBR: %s", 4288 insn->sec, insn->offset, 4289 offstr(dest->sec, dest->offset)); 4290 4291 warnings++; 4292 } 4293 4294 return warnings; 4295 } 4296 4297 static int validate_ibt_data_reloc(struct objtool_file *file, 4298 struct reloc *reloc) 4299 { 4300 struct instruction *dest; 4301 4302 dest = find_insn(file, reloc->sym->sec, 4303 reloc->sym->offset + reloc->addend); 4304 if (!dest) 4305 return 0; 4306 4307 if (dest->type == INSN_ENDBR) { 4308 mark_endbr_used(dest); 4309 return 0; 4310 } 4311 4312 if (dest->noendbr) 4313 return 0; 4314 4315 WARN_FUNC("data relocation to !ENDBR: %s", 4316 reloc->sec->base, reloc->offset, 4317 offstr(dest->sec, dest->offset)); 4318 4319 return 1; 4320 } 4321 4322 /* 4323 * Validate IBT rules and remove used ENDBR instructions from the seal list. 4324 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with 4325 * NOPs) later, in create_ibt_endbr_seal_sections(). 4326 */ 4327 static int validate_ibt(struct objtool_file *file) 4328 { 4329 struct section *sec; 4330 struct reloc *reloc; 4331 struct instruction *insn; 4332 int warnings = 0; 4333 4334 for_each_insn(file, insn) 4335 warnings += validate_ibt_insn(file, insn); 4336 4337 for_each_sec(file, sec) { 4338 4339 /* Already done by validate_ibt_insn() */ 4340 if (sec->sh.sh_flags & SHF_EXECINSTR) 4341 continue; 4342 4343 if (!sec->reloc) 4344 continue; 4345 4346 /* 4347 * These sections can reference text addresses, but not with 4348 * the intent to indirect branch to them. 4349 */ 4350 if ((!strncmp(sec->name, ".discard", 8) && 4351 strcmp(sec->name, ".discard.ibt_endbr_noseal")) || 4352 !strncmp(sec->name, ".debug", 6) || 4353 !strcmp(sec->name, ".altinstructions") || 4354 !strcmp(sec->name, ".ibt_endbr_seal") || 4355 !strcmp(sec->name, ".orc_unwind_ip") || 4356 !strcmp(sec->name, ".parainstructions") || 4357 !strcmp(sec->name, ".retpoline_sites") || 4358 !strcmp(sec->name, ".smp_locks") || 4359 !strcmp(sec->name, ".static_call_sites") || 4360 !strcmp(sec->name, "_error_injection_whitelist") || 4361 !strcmp(sec->name, "_kprobe_blacklist") || 4362 !strcmp(sec->name, "__bug_table") || 4363 !strcmp(sec->name, "__ex_table") || 4364 !strcmp(sec->name, "__jump_table") || 4365 !strcmp(sec->name, "__mcount_loc") || 4366 !strcmp(sec->name, ".kcfi_traps") || 4367 strstr(sec->name, "__patchable_function_entries")) 4368 continue; 4369 4370 list_for_each_entry(reloc, &sec->reloc->reloc_list, list) 4371 warnings += validate_ibt_data_reloc(file, reloc); 4372 } 4373 4374 return warnings; 4375 } 4376 4377 static int validate_sls(struct objtool_file *file) 4378 { 4379 struct instruction *insn, *next_insn; 4380 int warnings = 0; 4381 4382 for_each_insn(file, insn) { 4383 next_insn = next_insn_same_sec(file, insn); 4384 4385 if (insn->retpoline_safe) 4386 continue; 4387 4388 switch (insn->type) { 4389 case INSN_RETURN: 4390 if (!next_insn || next_insn->type != INSN_TRAP) { 4391 WARN_FUNC("missing int3 after ret", 4392 insn->sec, insn->offset); 4393 warnings++; 4394 } 4395 4396 break; 4397 case INSN_JUMP_DYNAMIC: 4398 if (!next_insn || next_insn->type != INSN_TRAP) { 4399 WARN_FUNC("missing int3 after indirect jump", 4400 insn->sec, insn->offset); 4401 warnings++; 4402 } 4403 break; 4404 default: 4405 break; 4406 } 4407 } 4408 4409 return warnings; 4410 } 4411 4412 static int validate_reachable_instructions(struct objtool_file *file) 4413 { 4414 struct instruction *insn; 4415 4416 if (file->ignore_unreachables) 4417 return 0; 4418 4419 for_each_insn(file, insn) { 4420 if (insn->visited || ignore_unreachable_insn(file, insn)) 4421 continue; 4422 4423 WARN_FUNC("unreachable instruction", insn->sec, insn->offset); 4424 return 1; 4425 } 4426 4427 return 0; 4428 } 4429 4430 int check(struct objtool_file *file) 4431 { 4432 int ret, warnings = 0; 4433 4434 arch_initial_func_cfi_state(&initial_func_cfi); 4435 init_cfi_state(&init_cfi); 4436 init_cfi_state(&func_cfi); 4437 set_func_state(&func_cfi); 4438 4439 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) 4440 goto out; 4441 4442 cfi_hash_add(&init_cfi); 4443 cfi_hash_add(&func_cfi); 4444 4445 ret = decode_sections(file); 4446 if (ret < 0) 4447 goto out; 4448 4449 warnings += ret; 4450 4451 if (list_empty(&file->insn_list)) 4452 goto out; 4453 4454 if (opts.retpoline) { 4455 ret = validate_retpoline(file); 4456 if (ret < 0) 4457 return ret; 4458 warnings += ret; 4459 } 4460 4461 if (opts.stackval || opts.orc || opts.uaccess) { 4462 ret = validate_functions(file); 4463 if (ret < 0) 4464 goto out; 4465 warnings += ret; 4466 4467 ret = validate_unwind_hints(file, NULL); 4468 if (ret < 0) 4469 goto out; 4470 warnings += ret; 4471 4472 if (!warnings) { 4473 ret = validate_reachable_instructions(file); 4474 if (ret < 0) 4475 goto out; 4476 warnings += ret; 4477 } 4478 4479 } else if (opts.noinstr) { 4480 ret = validate_noinstr_sections(file); 4481 if (ret < 0) 4482 goto out; 4483 warnings += ret; 4484 } 4485 4486 if (opts.unret) { 4487 /* 4488 * Must be after validate_branch() and friends, it plays 4489 * further games with insn->visited. 4490 */ 4491 ret = validate_unret(file); 4492 if (ret < 0) 4493 return ret; 4494 warnings += ret; 4495 } 4496 4497 if (opts.ibt) { 4498 ret = validate_ibt(file); 4499 if (ret < 0) 4500 goto out; 4501 warnings += ret; 4502 } 4503 4504 if (opts.sls) { 4505 ret = validate_sls(file); 4506 if (ret < 0) 4507 goto out; 4508 warnings += ret; 4509 } 4510 4511 if (opts.static_call) { 4512 ret = create_static_call_sections(file); 4513 if (ret < 0) 4514 goto out; 4515 warnings += ret; 4516 } 4517 4518 if (opts.retpoline) { 4519 ret = create_retpoline_sites_sections(file); 4520 if (ret < 0) 4521 goto out; 4522 warnings += ret; 4523 } 4524 4525 if (opts.cfi) { 4526 ret = create_cfi_sections(file); 4527 if (ret < 0) 4528 goto out; 4529 warnings += ret; 4530 } 4531 4532 if (opts.rethunk) { 4533 ret = create_return_sites_sections(file); 4534 if (ret < 0) 4535 goto out; 4536 warnings += ret; 4537 4538 if (opts.hack_skylake) { 4539 ret = create_direct_call_sections(file); 4540 if (ret < 0) 4541 goto out; 4542 warnings += ret; 4543 } 4544 } 4545 4546 if (opts.mcount) { 4547 ret = create_mcount_loc_sections(file); 4548 if (ret < 0) 4549 goto out; 4550 warnings += ret; 4551 } 4552 4553 if (opts.ibt) { 4554 ret = create_ibt_endbr_seal_sections(file); 4555 if (ret < 0) 4556 goto out; 4557 warnings += ret; 4558 } 4559 4560 if (opts.orc && !list_empty(&file->insn_list)) { 4561 ret = orc_create(file); 4562 if (ret < 0) 4563 goto out; 4564 warnings += ret; 4565 } 4566 4567 4568 if (opts.stats) { 4569 printf("nr_insns_visited: %ld\n", nr_insns_visited); 4570 printf("nr_cfi: %ld\n", nr_cfi); 4571 printf("nr_cfi_reused: %ld\n", nr_cfi_reused); 4572 printf("nr_cfi_cache: %ld\n", nr_cfi_cache); 4573 } 4574 4575 out: 4576 /* 4577 * For now, don't fail the kernel build on fatal warnings. These 4578 * errors are still fairly common due to the growing matrix of 4579 * supported toolchains and their recent pace of change. 4580 */ 4581 return 0; 4582 } 4583