1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #include <string.h> 7 #include <stdlib.h> 8 #include <inttypes.h> 9 #include <sys/mman.h> 10 11 #include <arch/elf.h> 12 #include <objtool/builtin.h> 13 #include <objtool/cfi.h> 14 #include <objtool/arch.h> 15 #include <objtool/check.h> 16 #include <objtool/special.h> 17 #include <objtool/warn.h> 18 #include <objtool/endianness.h> 19 20 #include <linux/objtool.h> 21 #include <linux/hashtable.h> 22 #include <linux/kernel.h> 23 #include <linux/static_call_types.h> 24 25 struct alternative { 26 struct list_head list; 27 struct instruction *insn; 28 bool skip_orig; 29 }; 30 31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; 32 33 static struct cfi_init_state initial_func_cfi; 34 static struct cfi_state init_cfi; 35 static struct cfi_state func_cfi; 36 37 struct instruction *find_insn(struct objtool_file *file, 38 struct section *sec, unsigned long offset) 39 { 40 struct instruction *insn; 41 42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 43 if (insn->sec == sec && insn->offset == offset) 44 return insn; 45 } 46 47 return NULL; 48 } 49 50 static struct instruction *next_insn_same_sec(struct objtool_file *file, 51 struct instruction *insn) 52 { 53 struct instruction *next = list_next_entry(insn, list); 54 55 if (!next || &next->list == &file->insn_list || next->sec != insn->sec) 56 return NULL; 57 58 return next; 59 } 60 61 static struct instruction *next_insn_same_func(struct objtool_file *file, 62 struct instruction *insn) 63 { 64 struct instruction *next = list_next_entry(insn, list); 65 struct symbol *func = insn->func; 66 67 if (!func) 68 return NULL; 69 70 if (&next->list != &file->insn_list && next->func == func) 71 return next; 72 73 /* Check if we're already in the subfunction: */ 74 if (func == func->cfunc) 75 return NULL; 76 77 /* Move to the subfunction: */ 78 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 79 } 80 81 static struct instruction *prev_insn_same_sym(struct objtool_file *file, 82 struct instruction *insn) 83 { 84 struct instruction *prev = list_prev_entry(insn, list); 85 86 if (&prev->list != &file->insn_list && prev->func == insn->func) 87 return prev; 88 89 return NULL; 90 } 91 92 #define func_for_each_insn(file, func, insn) \ 93 for (insn = find_insn(file, func->sec, func->offset); \ 94 insn; \ 95 insn = next_insn_same_func(file, insn)) 96 97 #define sym_for_each_insn(file, sym, insn) \ 98 for (insn = find_insn(file, sym->sec, sym->offset); \ 99 insn && &insn->list != &file->insn_list && \ 100 insn->sec == sym->sec && \ 101 insn->offset < sym->offset + sym->len; \ 102 insn = list_next_entry(insn, list)) 103 104 #define sym_for_each_insn_continue_reverse(file, sym, insn) \ 105 for (insn = list_prev_entry(insn, list); \ 106 &insn->list != &file->insn_list && \ 107 insn->sec == sym->sec && insn->offset >= sym->offset; \ 108 insn = list_prev_entry(insn, list)) 109 110 #define sec_for_each_insn_from(file, insn) \ 111 for (; insn; insn = next_insn_same_sec(file, insn)) 112 113 #define sec_for_each_insn_continue(file, insn) \ 114 for (insn = next_insn_same_sec(file, insn); insn; \ 115 insn = next_insn_same_sec(file, insn)) 116 117 static bool is_jump_table_jump(struct instruction *insn) 118 { 119 struct alt_group *alt_group = insn->alt_group; 120 121 if (insn->jump_table) 122 return true; 123 124 /* Retpoline alternative for a jump table? */ 125 return alt_group && alt_group->orig_group && 126 alt_group->orig_group->first_insn->jump_table; 127 } 128 129 static bool is_sibling_call(struct instruction *insn) 130 { 131 /* 132 * Assume only ELF functions can make sibling calls. This ensures 133 * sibling call detection consistency between vmlinux.o and individual 134 * objects. 135 */ 136 if (!insn->func) 137 return false; 138 139 /* An indirect jump is either a sibling call or a jump to a table. */ 140 if (insn->type == INSN_JUMP_DYNAMIC) 141 return !is_jump_table_jump(insn); 142 143 /* add_jump_destinations() sets insn->call_dest for sibling calls. */ 144 return (is_static_jump(insn) && insn->call_dest); 145 } 146 147 /* 148 * This checks to see if the given function is a "noreturn" function. 149 * 150 * For global functions which are outside the scope of this object file, we 151 * have to keep a manual list of them. 152 * 153 * For local functions, we have to detect them manually by simply looking for 154 * the lack of a return instruction. 155 */ 156 static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 157 int recursion) 158 { 159 int i; 160 struct instruction *insn; 161 bool empty = true; 162 163 /* 164 * Unfortunately these have to be hard coded because the noreturn 165 * attribute isn't provided in ELF data. Keep 'em sorted. 166 */ 167 static const char * const global_noreturns[] = { 168 "__invalid_creds", 169 "__module_put_and_kthread_exit", 170 "__reiserfs_panic", 171 "__stack_chk_fail", 172 "__ubsan_handle_builtin_unreachable", 173 "cpu_bringup_and_idle", 174 "cpu_startup_entry", 175 "do_exit", 176 "do_group_exit", 177 "do_task_dead", 178 "ex_handler_msr_mce", 179 "fortify_panic", 180 "kthread_complete_and_exit", 181 "kthread_exit", 182 "kunit_try_catch_throw", 183 "lbug_with_loc", 184 "machine_real_restart", 185 "make_task_dead", 186 "panic", 187 "rewind_stack_and_make_dead", 188 "sev_es_terminate", 189 "snp_abort", 190 "stop_this_cpu", 191 "usercopy_abort", 192 "xen_start_kernel", 193 }; 194 195 if (!func) 196 return false; 197 198 if (func->bind == STB_WEAK) 199 return false; 200 201 if (func->bind == STB_GLOBAL) 202 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 203 if (!strcmp(func->name, global_noreturns[i])) 204 return true; 205 206 if (!func->len) 207 return false; 208 209 insn = find_insn(file, func->sec, func->offset); 210 if (!insn->func) 211 return false; 212 213 func_for_each_insn(file, func, insn) { 214 empty = false; 215 216 if (insn->type == INSN_RETURN) 217 return false; 218 } 219 220 if (empty) 221 return false; 222 223 /* 224 * A function can have a sibling call instead of a return. In that 225 * case, the function's dead-end status depends on whether the target 226 * of the sibling call returns. 227 */ 228 func_for_each_insn(file, func, insn) { 229 if (is_sibling_call(insn)) { 230 struct instruction *dest = insn->jump_dest; 231 232 if (!dest) 233 /* sibling call to another file */ 234 return false; 235 236 /* local sibling call */ 237 if (recursion == 5) { 238 /* 239 * Infinite recursion: two functions have 240 * sibling calls to each other. This is a very 241 * rare case. It means they aren't dead ends. 242 */ 243 return false; 244 } 245 246 return __dead_end_function(file, dest->func, recursion+1); 247 } 248 } 249 250 return true; 251 } 252 253 static bool dead_end_function(struct objtool_file *file, struct symbol *func) 254 { 255 return __dead_end_function(file, func, 0); 256 } 257 258 static void init_cfi_state(struct cfi_state *cfi) 259 { 260 int i; 261 262 for (i = 0; i < CFI_NUM_REGS; i++) { 263 cfi->regs[i].base = CFI_UNDEFINED; 264 cfi->vals[i].base = CFI_UNDEFINED; 265 } 266 cfi->cfa.base = CFI_UNDEFINED; 267 cfi->drap_reg = CFI_UNDEFINED; 268 cfi->drap_offset = -1; 269 } 270 271 static void init_insn_state(struct objtool_file *file, struct insn_state *state, 272 struct section *sec) 273 { 274 memset(state, 0, sizeof(*state)); 275 init_cfi_state(&state->cfi); 276 277 /* 278 * We need the full vmlinux for noinstr validation, otherwise we can 279 * not correctly determine insn->call_dest->sec (external symbols do 280 * not have a section). 281 */ 282 if (opts.link && opts.noinstr && sec) 283 state->noinstr = sec->noinstr; 284 } 285 286 static struct cfi_state *cfi_alloc(void) 287 { 288 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); 289 if (!cfi) { 290 WARN("calloc failed"); 291 exit(1); 292 } 293 nr_cfi++; 294 return cfi; 295 } 296 297 static int cfi_bits; 298 static struct hlist_head *cfi_hash; 299 300 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) 301 { 302 return memcmp((void *)cfi1 + sizeof(cfi1->hash), 303 (void *)cfi2 + sizeof(cfi2->hash), 304 sizeof(struct cfi_state) - sizeof(struct hlist_node)); 305 } 306 307 static inline u32 cfi_key(struct cfi_state *cfi) 308 { 309 return jhash((void *)cfi + sizeof(cfi->hash), 310 sizeof(*cfi) - sizeof(cfi->hash), 0); 311 } 312 313 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) 314 { 315 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 316 struct cfi_state *obj; 317 318 hlist_for_each_entry(obj, head, hash) { 319 if (!cficmp(cfi, obj)) { 320 nr_cfi_cache++; 321 return obj; 322 } 323 } 324 325 obj = cfi_alloc(); 326 *obj = *cfi; 327 hlist_add_head(&obj->hash, head); 328 329 return obj; 330 } 331 332 static void cfi_hash_add(struct cfi_state *cfi) 333 { 334 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 335 336 hlist_add_head(&cfi->hash, head); 337 } 338 339 static void *cfi_hash_alloc(unsigned long size) 340 { 341 cfi_bits = max(10, ilog2(size)); 342 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, 343 PROT_READ|PROT_WRITE, 344 MAP_PRIVATE|MAP_ANON, -1, 0); 345 if (cfi_hash == (void *)-1L) { 346 WARN("mmap fail cfi_hash"); 347 cfi_hash = NULL; 348 } else if (opts.stats) { 349 printf("cfi_bits: %d\n", cfi_bits); 350 } 351 352 return cfi_hash; 353 } 354 355 static unsigned long nr_insns; 356 static unsigned long nr_insns_visited; 357 358 /* 359 * Call the arch-specific instruction decoder for all the instructions and add 360 * them to the global instruction list. 361 */ 362 static int decode_instructions(struct objtool_file *file) 363 { 364 struct section *sec; 365 struct symbol *func; 366 unsigned long offset; 367 struct instruction *insn; 368 int ret; 369 370 for_each_sec(file, sec) { 371 372 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 373 continue; 374 375 if (strcmp(sec->name, ".altinstr_replacement") && 376 strcmp(sec->name, ".altinstr_aux") && 377 strncmp(sec->name, ".discard.", 9)) 378 sec->text = true; 379 380 if (!strcmp(sec->name, ".noinstr.text") || 381 !strcmp(sec->name, ".entry.text") || 382 !strncmp(sec->name, ".text.__x86.", 12)) 383 sec->noinstr = true; 384 385 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { 386 insn = malloc(sizeof(*insn)); 387 if (!insn) { 388 WARN("malloc failed"); 389 return -1; 390 } 391 memset(insn, 0, sizeof(*insn)); 392 INIT_LIST_HEAD(&insn->alts); 393 INIT_LIST_HEAD(&insn->stack_ops); 394 INIT_LIST_HEAD(&insn->call_node); 395 396 insn->sec = sec; 397 insn->offset = offset; 398 399 ret = arch_decode_instruction(file, sec, offset, 400 sec->sh.sh_size - offset, 401 &insn->len, &insn->type, 402 &insn->immediate, 403 &insn->stack_ops); 404 if (ret) 405 goto err; 406 407 /* 408 * By default, "ud2" is a dead end unless otherwise 409 * annotated, because GCC 7 inserts it for certain 410 * divide-by-zero cases. 411 */ 412 if (insn->type == INSN_BUG) 413 insn->dead_end = true; 414 415 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 416 list_add_tail(&insn->list, &file->insn_list); 417 nr_insns++; 418 } 419 420 list_for_each_entry(func, &sec->symbol_list, list) { 421 if (func->type != STT_FUNC || func->alias != func) 422 continue; 423 424 if (!find_insn(file, sec, func->offset)) { 425 WARN("%s(): can't find starting instruction", 426 func->name); 427 return -1; 428 } 429 430 sym_for_each_insn(file, func, insn) { 431 insn->func = func; 432 if (insn->type == INSN_ENDBR && list_empty(&insn->call_node)) { 433 if (insn->offset == insn->func->offset) { 434 list_add_tail(&insn->call_node, &file->endbr_list); 435 file->nr_endbr++; 436 } else { 437 file->nr_endbr_int++; 438 } 439 } 440 } 441 } 442 } 443 444 if (opts.stats) 445 printf("nr_insns: %lu\n", nr_insns); 446 447 return 0; 448 449 err: 450 free(insn); 451 return ret; 452 } 453 454 /* 455 * Read the pv_ops[] .data table to find the static initialized values. 456 */ 457 static int add_pv_ops(struct objtool_file *file, const char *symname) 458 { 459 struct symbol *sym, *func; 460 unsigned long off, end; 461 struct reloc *rel; 462 int idx; 463 464 sym = find_symbol_by_name(file->elf, symname); 465 if (!sym) 466 return 0; 467 468 off = sym->offset; 469 end = off + sym->len; 470 for (;;) { 471 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off); 472 if (!rel) 473 break; 474 475 func = rel->sym; 476 if (func->type == STT_SECTION) 477 func = find_symbol_by_offset(rel->sym->sec, rel->addend); 478 479 idx = (rel->offset - sym->offset) / sizeof(unsigned long); 480 481 objtool_pv_add(file, idx, func); 482 483 off = rel->offset + 1; 484 if (off > end) 485 break; 486 } 487 488 return 0; 489 } 490 491 /* 492 * Allocate and initialize file->pv_ops[]. 493 */ 494 static int init_pv_ops(struct objtool_file *file) 495 { 496 static const char *pv_ops_tables[] = { 497 "pv_ops", 498 "xen_cpu_ops", 499 "xen_irq_ops", 500 "xen_mmu_ops", 501 NULL, 502 }; 503 const char *pv_ops; 504 struct symbol *sym; 505 int idx, nr; 506 507 if (!opts.noinstr) 508 return 0; 509 510 file->pv_ops = NULL; 511 512 sym = find_symbol_by_name(file->elf, "pv_ops"); 513 if (!sym) 514 return 0; 515 516 nr = sym->len / sizeof(unsigned long); 517 file->pv_ops = calloc(sizeof(struct pv_state), nr); 518 if (!file->pv_ops) 519 return -1; 520 521 for (idx = 0; idx < nr; idx++) 522 INIT_LIST_HEAD(&file->pv_ops[idx].targets); 523 524 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) 525 add_pv_ops(file, pv_ops); 526 527 return 0; 528 } 529 530 static struct instruction *find_last_insn(struct objtool_file *file, 531 struct section *sec) 532 { 533 struct instruction *insn = NULL; 534 unsigned int offset; 535 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0; 536 537 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--) 538 insn = find_insn(file, sec, offset); 539 540 return insn; 541 } 542 543 /* 544 * Mark "ud2" instructions and manually annotated dead ends. 545 */ 546 static int add_dead_ends(struct objtool_file *file) 547 { 548 struct section *sec; 549 struct reloc *reloc; 550 struct instruction *insn; 551 552 /* 553 * Check for manually annotated dead ends. 554 */ 555 sec = find_section_by_name(file->elf, ".rela.discard.unreachable"); 556 if (!sec) 557 goto reachable; 558 559 list_for_each_entry(reloc, &sec->reloc_list, list) { 560 if (reloc->sym->type != STT_SECTION) { 561 WARN("unexpected relocation symbol type in %s", sec->name); 562 return -1; 563 } 564 insn = find_insn(file, reloc->sym->sec, reloc->addend); 565 if (insn) 566 insn = list_prev_entry(insn, list); 567 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 568 insn = find_last_insn(file, reloc->sym->sec); 569 if (!insn) { 570 WARN("can't find unreachable insn at %s+0x%" PRIx64, 571 reloc->sym->sec->name, reloc->addend); 572 return -1; 573 } 574 } else { 575 WARN("can't find unreachable insn at %s+0x%" PRIx64, 576 reloc->sym->sec->name, reloc->addend); 577 return -1; 578 } 579 580 insn->dead_end = true; 581 } 582 583 reachable: 584 /* 585 * These manually annotated reachable checks are needed for GCC 4.4, 586 * where the Linux unreachable() macro isn't supported. In that case 587 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's 588 * not a dead end. 589 */ 590 sec = find_section_by_name(file->elf, ".rela.discard.reachable"); 591 if (!sec) 592 return 0; 593 594 list_for_each_entry(reloc, &sec->reloc_list, list) { 595 if (reloc->sym->type != STT_SECTION) { 596 WARN("unexpected relocation symbol type in %s", sec->name); 597 return -1; 598 } 599 insn = find_insn(file, reloc->sym->sec, reloc->addend); 600 if (insn) 601 insn = list_prev_entry(insn, list); 602 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 603 insn = find_last_insn(file, reloc->sym->sec); 604 if (!insn) { 605 WARN("can't find reachable insn at %s+0x%" PRIx64, 606 reloc->sym->sec->name, reloc->addend); 607 return -1; 608 } 609 } else { 610 WARN("can't find reachable insn at %s+0x%" PRIx64, 611 reloc->sym->sec->name, reloc->addend); 612 return -1; 613 } 614 615 insn->dead_end = false; 616 } 617 618 return 0; 619 } 620 621 static int create_static_call_sections(struct objtool_file *file) 622 { 623 struct section *sec; 624 struct static_call_site *site; 625 struct instruction *insn; 626 struct symbol *key_sym; 627 char *key_name, *tmp; 628 int idx; 629 630 sec = find_section_by_name(file->elf, ".static_call_sites"); 631 if (sec) { 632 INIT_LIST_HEAD(&file->static_call_list); 633 WARN("file already has .static_call_sites section, skipping"); 634 return 0; 635 } 636 637 if (list_empty(&file->static_call_list)) 638 return 0; 639 640 idx = 0; 641 list_for_each_entry(insn, &file->static_call_list, call_node) 642 idx++; 643 644 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE, 645 sizeof(struct static_call_site), idx); 646 if (!sec) 647 return -1; 648 649 idx = 0; 650 list_for_each_entry(insn, &file->static_call_list, call_node) { 651 652 site = (struct static_call_site *)sec->data->d_buf + idx; 653 memset(site, 0, sizeof(struct static_call_site)); 654 655 /* populate reloc for 'addr' */ 656 if (elf_add_reloc_to_insn(file->elf, sec, 657 idx * sizeof(struct static_call_site), 658 R_X86_64_PC32, 659 insn->sec, insn->offset)) 660 return -1; 661 662 /* find key symbol */ 663 key_name = strdup(insn->call_dest->name); 664 if (!key_name) { 665 perror("strdup"); 666 return -1; 667 } 668 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 669 STATIC_CALL_TRAMP_PREFIX_LEN)) { 670 WARN("static_call: trampoline name malformed: %s", key_name); 671 return -1; 672 } 673 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 674 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 675 676 key_sym = find_symbol_by_name(file->elf, tmp); 677 if (!key_sym) { 678 if (!opts.module) { 679 WARN("static_call: can't find static_call_key symbol: %s", tmp); 680 return -1; 681 } 682 683 /* 684 * For modules(), the key might not be exported, which 685 * means the module can make static calls but isn't 686 * allowed to change them. 687 * 688 * In that case we temporarily set the key to be the 689 * trampoline address. This is fixed up in 690 * static_call_add_module(). 691 */ 692 key_sym = insn->call_dest; 693 } 694 free(key_name); 695 696 /* populate reloc for 'key' */ 697 if (elf_add_reloc(file->elf, sec, 698 idx * sizeof(struct static_call_site) + 4, 699 R_X86_64_PC32, key_sym, 700 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 701 return -1; 702 703 idx++; 704 } 705 706 return 0; 707 } 708 709 static int create_retpoline_sites_sections(struct objtool_file *file) 710 { 711 struct instruction *insn; 712 struct section *sec; 713 int idx; 714 715 sec = find_section_by_name(file->elf, ".retpoline_sites"); 716 if (sec) { 717 WARN("file already has .retpoline_sites, skipping"); 718 return 0; 719 } 720 721 idx = 0; 722 list_for_each_entry(insn, &file->retpoline_call_list, call_node) 723 idx++; 724 725 if (!idx) 726 return 0; 727 728 sec = elf_create_section(file->elf, ".retpoline_sites", 0, 729 sizeof(int), idx); 730 if (!sec) { 731 WARN("elf_create_section: .retpoline_sites"); 732 return -1; 733 } 734 735 idx = 0; 736 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 737 738 int *site = (int *)sec->data->d_buf + idx; 739 *site = 0; 740 741 if (elf_add_reloc_to_insn(file->elf, sec, 742 idx * sizeof(int), 743 R_X86_64_PC32, 744 insn->sec, insn->offset)) { 745 WARN("elf_add_reloc_to_insn: .retpoline_sites"); 746 return -1; 747 } 748 749 idx++; 750 } 751 752 return 0; 753 } 754 755 static int create_return_sites_sections(struct objtool_file *file) 756 { 757 struct instruction *insn; 758 struct section *sec; 759 int idx; 760 761 sec = find_section_by_name(file->elf, ".return_sites"); 762 if (sec) { 763 WARN("file already has .return_sites, skipping"); 764 return 0; 765 } 766 767 idx = 0; 768 list_for_each_entry(insn, &file->return_thunk_list, call_node) 769 idx++; 770 771 if (!idx) 772 return 0; 773 774 sec = elf_create_section(file->elf, ".return_sites", 0, 775 sizeof(int), idx); 776 if (!sec) { 777 WARN("elf_create_section: .return_sites"); 778 return -1; 779 } 780 781 idx = 0; 782 list_for_each_entry(insn, &file->return_thunk_list, call_node) { 783 784 int *site = (int *)sec->data->d_buf + idx; 785 *site = 0; 786 787 if (elf_add_reloc_to_insn(file->elf, sec, 788 idx * sizeof(int), 789 R_X86_64_PC32, 790 insn->sec, insn->offset)) { 791 WARN("elf_add_reloc_to_insn: .return_sites"); 792 return -1; 793 } 794 795 idx++; 796 } 797 798 return 0; 799 } 800 801 static int create_ibt_endbr_seal_sections(struct objtool_file *file) 802 { 803 struct instruction *insn; 804 struct section *sec; 805 int idx; 806 807 sec = find_section_by_name(file->elf, ".ibt_endbr_seal"); 808 if (sec) { 809 WARN("file already has .ibt_endbr_seal, skipping"); 810 return 0; 811 } 812 813 idx = 0; 814 list_for_each_entry(insn, &file->endbr_list, call_node) 815 idx++; 816 817 if (opts.stats) { 818 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr); 819 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int); 820 printf("ibt: superfluous ENDBR: %d\n", idx); 821 } 822 823 if (!idx) 824 return 0; 825 826 sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0, 827 sizeof(int), idx); 828 if (!sec) { 829 WARN("elf_create_section: .ibt_endbr_seal"); 830 return -1; 831 } 832 833 idx = 0; 834 list_for_each_entry(insn, &file->endbr_list, call_node) { 835 836 int *site = (int *)sec->data->d_buf + idx; 837 *site = 0; 838 839 if (elf_add_reloc_to_insn(file->elf, sec, 840 idx * sizeof(int), 841 R_X86_64_PC32, 842 insn->sec, insn->offset)) { 843 WARN("elf_add_reloc_to_insn: .ibt_endbr_seal"); 844 return -1; 845 } 846 847 idx++; 848 } 849 850 return 0; 851 } 852 853 static int create_mcount_loc_sections(struct objtool_file *file) 854 { 855 struct section *sec; 856 unsigned long *loc; 857 struct instruction *insn; 858 int idx; 859 860 sec = find_section_by_name(file->elf, "__mcount_loc"); 861 if (sec) { 862 INIT_LIST_HEAD(&file->mcount_loc_list); 863 WARN("file already has __mcount_loc section, skipping"); 864 return 0; 865 } 866 867 if (list_empty(&file->mcount_loc_list)) 868 return 0; 869 870 idx = 0; 871 list_for_each_entry(insn, &file->mcount_loc_list, call_node) 872 idx++; 873 874 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx); 875 if (!sec) 876 return -1; 877 878 idx = 0; 879 list_for_each_entry(insn, &file->mcount_loc_list, call_node) { 880 881 loc = (unsigned long *)sec->data->d_buf + idx; 882 memset(loc, 0, sizeof(unsigned long)); 883 884 if (elf_add_reloc_to_insn(file->elf, sec, 885 idx * sizeof(unsigned long), 886 R_X86_64_64, 887 insn->sec, insn->offset)) 888 return -1; 889 890 idx++; 891 } 892 893 return 0; 894 } 895 896 /* 897 * Warnings shouldn't be reported for ignored functions. 898 */ 899 static void add_ignores(struct objtool_file *file) 900 { 901 struct instruction *insn; 902 struct section *sec; 903 struct symbol *func; 904 struct reloc *reloc; 905 906 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 907 if (!sec) 908 return; 909 910 list_for_each_entry(reloc, &sec->reloc_list, list) { 911 switch (reloc->sym->type) { 912 case STT_FUNC: 913 func = reloc->sym; 914 break; 915 916 case STT_SECTION: 917 func = find_func_by_offset(reloc->sym->sec, reloc->addend); 918 if (!func) 919 continue; 920 break; 921 922 default: 923 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type); 924 continue; 925 } 926 927 func_for_each_insn(file, func, insn) 928 insn->ignore = true; 929 } 930 } 931 932 /* 933 * This is a whitelist of functions that is allowed to be called with AC set. 934 * The list is meant to be minimal and only contains compiler instrumentation 935 * ABI and a few functions used to implement *_{to,from}_user() functions. 936 * 937 * These functions must not directly change AC, but may PUSHF/POPF. 938 */ 939 static const char *uaccess_safe_builtin[] = { 940 /* KASAN */ 941 "kasan_report", 942 "kasan_check_range", 943 /* KASAN out-of-line */ 944 "__asan_loadN_noabort", 945 "__asan_load1_noabort", 946 "__asan_load2_noabort", 947 "__asan_load4_noabort", 948 "__asan_load8_noabort", 949 "__asan_load16_noabort", 950 "__asan_storeN_noabort", 951 "__asan_store1_noabort", 952 "__asan_store2_noabort", 953 "__asan_store4_noabort", 954 "__asan_store8_noabort", 955 "__asan_store16_noabort", 956 "__kasan_check_read", 957 "__kasan_check_write", 958 /* KASAN in-line */ 959 "__asan_report_load_n_noabort", 960 "__asan_report_load1_noabort", 961 "__asan_report_load2_noabort", 962 "__asan_report_load4_noabort", 963 "__asan_report_load8_noabort", 964 "__asan_report_load16_noabort", 965 "__asan_report_store_n_noabort", 966 "__asan_report_store1_noabort", 967 "__asan_report_store2_noabort", 968 "__asan_report_store4_noabort", 969 "__asan_report_store8_noabort", 970 "__asan_report_store16_noabort", 971 /* KCSAN */ 972 "__kcsan_check_access", 973 "__kcsan_mb", 974 "__kcsan_wmb", 975 "__kcsan_rmb", 976 "__kcsan_release", 977 "kcsan_found_watchpoint", 978 "kcsan_setup_watchpoint", 979 "kcsan_check_scoped_accesses", 980 "kcsan_disable_current", 981 "kcsan_enable_current_nowarn", 982 /* KCSAN/TSAN */ 983 "__tsan_func_entry", 984 "__tsan_func_exit", 985 "__tsan_read_range", 986 "__tsan_write_range", 987 "__tsan_read1", 988 "__tsan_read2", 989 "__tsan_read4", 990 "__tsan_read8", 991 "__tsan_read16", 992 "__tsan_write1", 993 "__tsan_write2", 994 "__tsan_write4", 995 "__tsan_write8", 996 "__tsan_write16", 997 "__tsan_read_write1", 998 "__tsan_read_write2", 999 "__tsan_read_write4", 1000 "__tsan_read_write8", 1001 "__tsan_read_write16", 1002 "__tsan_atomic8_load", 1003 "__tsan_atomic16_load", 1004 "__tsan_atomic32_load", 1005 "__tsan_atomic64_load", 1006 "__tsan_atomic8_store", 1007 "__tsan_atomic16_store", 1008 "__tsan_atomic32_store", 1009 "__tsan_atomic64_store", 1010 "__tsan_atomic8_exchange", 1011 "__tsan_atomic16_exchange", 1012 "__tsan_atomic32_exchange", 1013 "__tsan_atomic64_exchange", 1014 "__tsan_atomic8_fetch_add", 1015 "__tsan_atomic16_fetch_add", 1016 "__tsan_atomic32_fetch_add", 1017 "__tsan_atomic64_fetch_add", 1018 "__tsan_atomic8_fetch_sub", 1019 "__tsan_atomic16_fetch_sub", 1020 "__tsan_atomic32_fetch_sub", 1021 "__tsan_atomic64_fetch_sub", 1022 "__tsan_atomic8_fetch_and", 1023 "__tsan_atomic16_fetch_and", 1024 "__tsan_atomic32_fetch_and", 1025 "__tsan_atomic64_fetch_and", 1026 "__tsan_atomic8_fetch_or", 1027 "__tsan_atomic16_fetch_or", 1028 "__tsan_atomic32_fetch_or", 1029 "__tsan_atomic64_fetch_or", 1030 "__tsan_atomic8_fetch_xor", 1031 "__tsan_atomic16_fetch_xor", 1032 "__tsan_atomic32_fetch_xor", 1033 "__tsan_atomic64_fetch_xor", 1034 "__tsan_atomic8_fetch_nand", 1035 "__tsan_atomic16_fetch_nand", 1036 "__tsan_atomic32_fetch_nand", 1037 "__tsan_atomic64_fetch_nand", 1038 "__tsan_atomic8_compare_exchange_strong", 1039 "__tsan_atomic16_compare_exchange_strong", 1040 "__tsan_atomic32_compare_exchange_strong", 1041 "__tsan_atomic64_compare_exchange_strong", 1042 "__tsan_atomic8_compare_exchange_weak", 1043 "__tsan_atomic16_compare_exchange_weak", 1044 "__tsan_atomic32_compare_exchange_weak", 1045 "__tsan_atomic64_compare_exchange_weak", 1046 "__tsan_atomic8_compare_exchange_val", 1047 "__tsan_atomic16_compare_exchange_val", 1048 "__tsan_atomic32_compare_exchange_val", 1049 "__tsan_atomic64_compare_exchange_val", 1050 "__tsan_atomic_thread_fence", 1051 "__tsan_atomic_signal_fence", 1052 /* KCOV */ 1053 "write_comp_data", 1054 "check_kcov_mode", 1055 "__sanitizer_cov_trace_pc", 1056 "__sanitizer_cov_trace_const_cmp1", 1057 "__sanitizer_cov_trace_const_cmp2", 1058 "__sanitizer_cov_trace_const_cmp4", 1059 "__sanitizer_cov_trace_const_cmp8", 1060 "__sanitizer_cov_trace_cmp1", 1061 "__sanitizer_cov_trace_cmp2", 1062 "__sanitizer_cov_trace_cmp4", 1063 "__sanitizer_cov_trace_cmp8", 1064 "__sanitizer_cov_trace_switch", 1065 /* UBSAN */ 1066 "ubsan_type_mismatch_common", 1067 "__ubsan_handle_type_mismatch", 1068 "__ubsan_handle_type_mismatch_v1", 1069 "__ubsan_handle_shift_out_of_bounds", 1070 /* misc */ 1071 "csum_partial_copy_generic", 1072 "copy_mc_fragile", 1073 "copy_mc_fragile_handle_tail", 1074 "copy_mc_enhanced_fast_string", 1075 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ 1076 NULL 1077 }; 1078 1079 static void add_uaccess_safe(struct objtool_file *file) 1080 { 1081 struct symbol *func; 1082 const char **name; 1083 1084 if (!opts.uaccess) 1085 return; 1086 1087 for (name = uaccess_safe_builtin; *name; name++) { 1088 func = find_symbol_by_name(file->elf, *name); 1089 if (!func) 1090 continue; 1091 1092 func->uaccess_safe = true; 1093 } 1094 } 1095 1096 /* 1097 * FIXME: For now, just ignore any alternatives which add retpolines. This is 1098 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. 1099 * But it at least allows objtool to understand the control flow *around* the 1100 * retpoline. 1101 */ 1102 static int add_ignore_alternatives(struct objtool_file *file) 1103 { 1104 struct section *sec; 1105 struct reloc *reloc; 1106 struct instruction *insn; 1107 1108 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); 1109 if (!sec) 1110 return 0; 1111 1112 list_for_each_entry(reloc, &sec->reloc_list, list) { 1113 if (reloc->sym->type != STT_SECTION) { 1114 WARN("unexpected relocation symbol type in %s", sec->name); 1115 return -1; 1116 } 1117 1118 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1119 if (!insn) { 1120 WARN("bad .discard.ignore_alts entry"); 1121 return -1; 1122 } 1123 1124 insn->ignore_alts = true; 1125 } 1126 1127 return 0; 1128 } 1129 1130 __weak bool arch_is_retpoline(struct symbol *sym) 1131 { 1132 return false; 1133 } 1134 1135 __weak bool arch_is_rethunk(struct symbol *sym) 1136 { 1137 return false; 1138 } 1139 1140 #define NEGATIVE_RELOC ((void *)-1L) 1141 1142 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 1143 { 1144 if (insn->reloc == NEGATIVE_RELOC) 1145 return NULL; 1146 1147 if (!insn->reloc) { 1148 if (!file) 1149 return NULL; 1150 1151 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec, 1152 insn->offset, insn->len); 1153 if (!insn->reloc) { 1154 insn->reloc = NEGATIVE_RELOC; 1155 return NULL; 1156 } 1157 } 1158 1159 return insn->reloc; 1160 } 1161 1162 static void remove_insn_ops(struct instruction *insn) 1163 { 1164 struct stack_op *op, *tmp; 1165 1166 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) { 1167 list_del(&op->list); 1168 free(op); 1169 } 1170 } 1171 1172 static void annotate_call_site(struct objtool_file *file, 1173 struct instruction *insn, bool sibling) 1174 { 1175 struct reloc *reloc = insn_reloc(file, insn); 1176 struct symbol *sym = insn->call_dest; 1177 1178 if (!sym) 1179 sym = reloc->sym; 1180 1181 /* 1182 * Alternative replacement code is just template code which is 1183 * sometimes copied to the original instruction. For now, don't 1184 * annotate it. (In the future we might consider annotating the 1185 * original instruction if/when it ever makes sense to do so.) 1186 */ 1187 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1188 return; 1189 1190 if (sym->static_call_tramp) { 1191 list_add_tail(&insn->call_node, &file->static_call_list); 1192 return; 1193 } 1194 1195 if (sym->retpoline_thunk) { 1196 list_add_tail(&insn->call_node, &file->retpoline_call_list); 1197 return; 1198 } 1199 1200 /* 1201 * Many compilers cannot disable KCOV or sanitizer calls with a function 1202 * attribute so they need a little help, NOP out any such calls from 1203 * noinstr text. 1204 */ 1205 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) { 1206 if (reloc) { 1207 reloc->type = R_NONE; 1208 elf_write_reloc(file->elf, reloc); 1209 } 1210 1211 elf_write_insn(file->elf, insn->sec, 1212 insn->offset, insn->len, 1213 sibling ? arch_ret_insn(insn->len) 1214 : arch_nop_insn(insn->len)); 1215 1216 insn->type = sibling ? INSN_RETURN : INSN_NOP; 1217 1218 if (sibling) { 1219 /* 1220 * We've replaced the tail-call JMP insn by two new 1221 * insn: RET; INT3, except we only have a single struct 1222 * insn here. Mark it retpoline_safe to avoid the SLS 1223 * warning, instead of adding another insn. 1224 */ 1225 insn->retpoline_safe = true; 1226 } 1227 1228 return; 1229 } 1230 1231 if (opts.mcount && sym->fentry) { 1232 if (sibling) 1233 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset); 1234 1235 if (reloc) { 1236 reloc->type = R_NONE; 1237 elf_write_reloc(file->elf, reloc); 1238 } 1239 1240 elf_write_insn(file->elf, insn->sec, 1241 insn->offset, insn->len, 1242 arch_nop_insn(insn->len)); 1243 1244 insn->type = INSN_NOP; 1245 1246 list_add_tail(&insn->call_node, &file->mcount_loc_list); 1247 return; 1248 } 1249 1250 if (!sibling && dead_end_function(file, sym)) 1251 insn->dead_end = true; 1252 } 1253 1254 static void add_call_dest(struct objtool_file *file, struct instruction *insn, 1255 struct symbol *dest, bool sibling) 1256 { 1257 insn->call_dest = dest; 1258 if (!dest) 1259 return; 1260 1261 /* 1262 * Whatever stack impact regular CALLs have, should be undone 1263 * by the RETURN of the called function. 1264 * 1265 * Annotated intra-function calls retain the stack_ops but 1266 * are converted to JUMP, see read_intra_function_calls(). 1267 */ 1268 remove_insn_ops(insn); 1269 1270 annotate_call_site(file, insn, sibling); 1271 } 1272 1273 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn) 1274 { 1275 /* 1276 * Retpoline calls/jumps are really dynamic calls/jumps in disguise, 1277 * so convert them accordingly. 1278 */ 1279 switch (insn->type) { 1280 case INSN_CALL: 1281 insn->type = INSN_CALL_DYNAMIC; 1282 break; 1283 case INSN_JUMP_UNCONDITIONAL: 1284 insn->type = INSN_JUMP_DYNAMIC; 1285 break; 1286 case INSN_JUMP_CONDITIONAL: 1287 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 1288 break; 1289 default: 1290 return; 1291 } 1292 1293 insn->retpoline_safe = true; 1294 1295 /* 1296 * Whatever stack impact regular CALLs have, should be undone 1297 * by the RETURN of the called function. 1298 * 1299 * Annotated intra-function calls retain the stack_ops but 1300 * are converted to JUMP, see read_intra_function_calls(). 1301 */ 1302 remove_insn_ops(insn); 1303 1304 annotate_call_site(file, insn, false); 1305 } 1306 1307 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add) 1308 { 1309 /* 1310 * Return thunk tail calls are really just returns in disguise, 1311 * so convert them accordingly. 1312 */ 1313 insn->type = INSN_RETURN; 1314 insn->retpoline_safe = true; 1315 1316 if (add) 1317 list_add_tail(&insn->call_node, &file->return_thunk_list); 1318 } 1319 1320 static bool same_function(struct instruction *insn1, struct instruction *insn2) 1321 { 1322 return insn1->func->pfunc == insn2->func->pfunc; 1323 } 1324 1325 static bool is_first_func_insn(struct objtool_file *file, struct instruction *insn) 1326 { 1327 if (insn->offset == insn->func->offset) 1328 return true; 1329 1330 if (opts.ibt) { 1331 struct instruction *prev = prev_insn_same_sym(file, insn); 1332 1333 if (prev && prev->type == INSN_ENDBR && 1334 insn->offset == insn->func->offset + prev->len) 1335 return true; 1336 } 1337 1338 return false; 1339 } 1340 1341 /* 1342 * Find the destination instructions for all jumps. 1343 */ 1344 static int add_jump_destinations(struct objtool_file *file) 1345 { 1346 struct instruction *insn, *jump_dest; 1347 struct reloc *reloc; 1348 struct section *dest_sec; 1349 unsigned long dest_off; 1350 1351 for_each_insn(file, insn) { 1352 if (insn->jump_dest) { 1353 /* 1354 * handle_group_alt() may have previously set 1355 * 'jump_dest' for some alternatives. 1356 */ 1357 continue; 1358 } 1359 if (!is_static_jump(insn)) 1360 continue; 1361 1362 reloc = insn_reloc(file, insn); 1363 if (!reloc) { 1364 dest_sec = insn->sec; 1365 dest_off = arch_jump_destination(insn); 1366 } else if (reloc->sym->type == STT_SECTION) { 1367 dest_sec = reloc->sym->sec; 1368 dest_off = arch_dest_reloc_offset(reloc->addend); 1369 } else if (reloc->sym->retpoline_thunk) { 1370 add_retpoline_call(file, insn); 1371 continue; 1372 } else if (reloc->sym->return_thunk) { 1373 add_return_call(file, insn, true); 1374 continue; 1375 } else if (insn->func) { 1376 /* 1377 * External sibling call or internal sibling call with 1378 * STT_FUNC reloc. 1379 */ 1380 add_call_dest(file, insn, reloc->sym, true); 1381 continue; 1382 } else if (reloc->sym->sec->idx) { 1383 dest_sec = reloc->sym->sec; 1384 dest_off = reloc->sym->sym.st_value + 1385 arch_dest_reloc_offset(reloc->addend); 1386 } else { 1387 /* non-func asm code jumping to another file */ 1388 continue; 1389 } 1390 1391 jump_dest = find_insn(file, dest_sec, dest_off); 1392 if (!jump_dest) { 1393 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); 1394 1395 /* 1396 * This is a special case for zen_untrain_ret(). 1397 * It jumps to __x86_return_thunk(), but objtool 1398 * can't find the thunk's starting RET 1399 * instruction, because the RET is also in the 1400 * middle of another instruction. Objtool only 1401 * knows about the outer instruction. 1402 */ 1403 if (sym && sym->return_thunk) { 1404 add_return_call(file, insn, false); 1405 continue; 1406 } 1407 1408 WARN_FUNC("can't find jump dest instruction at %s+0x%lx", 1409 insn->sec, insn->offset, dest_sec->name, 1410 dest_off); 1411 return -1; 1412 } 1413 1414 /* 1415 * Cross-function jump. 1416 */ 1417 if (insn->func && jump_dest->func && 1418 insn->func != jump_dest->func) { 1419 1420 /* 1421 * For GCC 8+, create parent/child links for any cold 1422 * subfunctions. This is _mostly_ redundant with a 1423 * similar initialization in read_symbols(). 1424 * 1425 * If a function has aliases, we want the *first* such 1426 * function in the symbol table to be the subfunction's 1427 * parent. In that case we overwrite the 1428 * initialization done in read_symbols(). 1429 * 1430 * However this code can't completely replace the 1431 * read_symbols() code because this doesn't detect the 1432 * case where the parent function's only reference to a 1433 * subfunction is through a jump table. 1434 */ 1435 if (!strstr(insn->func->name, ".cold") && 1436 strstr(jump_dest->func->name, ".cold")) { 1437 insn->func->cfunc = jump_dest->func; 1438 jump_dest->func->pfunc = insn->func; 1439 1440 } else if (!same_function(insn, jump_dest) && 1441 is_first_func_insn(file, jump_dest)) { 1442 /* 1443 * Internal sibling call without reloc or with 1444 * STT_SECTION reloc. 1445 */ 1446 add_call_dest(file, insn, jump_dest->func, true); 1447 continue; 1448 } 1449 } 1450 1451 insn->jump_dest = jump_dest; 1452 } 1453 1454 return 0; 1455 } 1456 1457 static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 1458 { 1459 struct symbol *call_dest; 1460 1461 call_dest = find_func_by_offset(sec, offset); 1462 if (!call_dest) 1463 call_dest = find_symbol_by_offset(sec, offset); 1464 1465 return call_dest; 1466 } 1467 1468 /* 1469 * Find the destination instructions for all calls. 1470 */ 1471 static int add_call_destinations(struct objtool_file *file) 1472 { 1473 struct instruction *insn; 1474 unsigned long dest_off; 1475 struct symbol *dest; 1476 struct reloc *reloc; 1477 1478 for_each_insn(file, insn) { 1479 if (insn->type != INSN_CALL) 1480 continue; 1481 1482 reloc = insn_reloc(file, insn); 1483 if (!reloc) { 1484 dest_off = arch_jump_destination(insn); 1485 dest = find_call_destination(insn->sec, dest_off); 1486 1487 add_call_dest(file, insn, dest, false); 1488 1489 if (insn->ignore) 1490 continue; 1491 1492 if (!insn->call_dest) { 1493 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset); 1494 return -1; 1495 } 1496 1497 if (insn->func && insn->call_dest->type != STT_FUNC) { 1498 WARN_FUNC("unsupported call to non-function", 1499 insn->sec, insn->offset); 1500 return -1; 1501 } 1502 1503 } else if (reloc->sym->type == STT_SECTION) { 1504 dest_off = arch_dest_reloc_offset(reloc->addend); 1505 dest = find_call_destination(reloc->sym->sec, dest_off); 1506 if (!dest) { 1507 WARN_FUNC("can't find call dest symbol at %s+0x%lx", 1508 insn->sec, insn->offset, 1509 reloc->sym->sec->name, 1510 dest_off); 1511 return -1; 1512 } 1513 1514 add_call_dest(file, insn, dest, false); 1515 1516 } else if (reloc->sym->retpoline_thunk) { 1517 add_retpoline_call(file, insn); 1518 1519 } else 1520 add_call_dest(file, insn, reloc->sym, false); 1521 } 1522 1523 return 0; 1524 } 1525 1526 /* 1527 * The .alternatives section requires some extra special care over and above 1528 * other special sections because alternatives are patched in place. 1529 */ 1530 static int handle_group_alt(struct objtool_file *file, 1531 struct special_alt *special_alt, 1532 struct instruction *orig_insn, 1533 struct instruction **new_insn) 1534 { 1535 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL; 1536 struct alt_group *orig_alt_group, *new_alt_group; 1537 unsigned long dest_off; 1538 1539 1540 orig_alt_group = malloc(sizeof(*orig_alt_group)); 1541 if (!orig_alt_group) { 1542 WARN("malloc failed"); 1543 return -1; 1544 } 1545 orig_alt_group->cfi = calloc(special_alt->orig_len, 1546 sizeof(struct cfi_state *)); 1547 if (!orig_alt_group->cfi) { 1548 WARN("calloc failed"); 1549 return -1; 1550 } 1551 1552 last_orig_insn = NULL; 1553 insn = orig_insn; 1554 sec_for_each_insn_from(file, insn) { 1555 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1556 break; 1557 1558 insn->alt_group = orig_alt_group; 1559 last_orig_insn = insn; 1560 } 1561 orig_alt_group->orig_group = NULL; 1562 orig_alt_group->first_insn = orig_insn; 1563 orig_alt_group->last_insn = last_orig_insn; 1564 1565 1566 new_alt_group = malloc(sizeof(*new_alt_group)); 1567 if (!new_alt_group) { 1568 WARN("malloc failed"); 1569 return -1; 1570 } 1571 1572 if (special_alt->new_len < special_alt->orig_len) { 1573 /* 1574 * Insert a fake nop at the end to make the replacement 1575 * alt_group the same size as the original. This is needed to 1576 * allow propagate_alt_cfi() to do its magic. When the last 1577 * instruction affects the stack, the instruction after it (the 1578 * nop) will propagate the new state to the shared CFI array. 1579 */ 1580 nop = malloc(sizeof(*nop)); 1581 if (!nop) { 1582 WARN("malloc failed"); 1583 return -1; 1584 } 1585 memset(nop, 0, sizeof(*nop)); 1586 INIT_LIST_HEAD(&nop->alts); 1587 INIT_LIST_HEAD(&nop->stack_ops); 1588 1589 nop->sec = special_alt->new_sec; 1590 nop->offset = special_alt->new_off + special_alt->new_len; 1591 nop->len = special_alt->orig_len - special_alt->new_len; 1592 nop->type = INSN_NOP; 1593 nop->func = orig_insn->func; 1594 nop->alt_group = new_alt_group; 1595 nop->ignore = orig_insn->ignore_alts; 1596 } 1597 1598 if (!special_alt->new_len) { 1599 *new_insn = nop; 1600 goto end; 1601 } 1602 1603 insn = *new_insn; 1604 sec_for_each_insn_from(file, insn) { 1605 struct reloc *alt_reloc; 1606 1607 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1608 break; 1609 1610 last_new_insn = insn; 1611 1612 insn->ignore = orig_insn->ignore_alts; 1613 insn->func = orig_insn->func; 1614 insn->alt_group = new_alt_group; 1615 1616 /* 1617 * Since alternative replacement code is copy/pasted by the 1618 * kernel after applying relocations, generally such code can't 1619 * have relative-address relocation references to outside the 1620 * .altinstr_replacement section, unless the arch's 1621 * alternatives code can adjust the relative offsets 1622 * accordingly. 1623 */ 1624 alt_reloc = insn_reloc(file, insn); 1625 if (alt_reloc && 1626 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1627 1628 WARN_FUNC("unsupported relocation in alternatives section", 1629 insn->sec, insn->offset); 1630 return -1; 1631 } 1632 1633 if (!is_static_jump(insn)) 1634 continue; 1635 1636 if (!insn->immediate) 1637 continue; 1638 1639 dest_off = arch_jump_destination(insn); 1640 if (dest_off == special_alt->new_off + special_alt->new_len) { 1641 insn->jump_dest = next_insn_same_sec(file, last_orig_insn); 1642 if (!insn->jump_dest) { 1643 WARN_FUNC("can't find alternative jump destination", 1644 insn->sec, insn->offset); 1645 return -1; 1646 } 1647 } 1648 } 1649 1650 if (!last_new_insn) { 1651 WARN_FUNC("can't find last new alternative instruction", 1652 special_alt->new_sec, special_alt->new_off); 1653 return -1; 1654 } 1655 1656 if (nop) 1657 list_add(&nop->list, &last_new_insn->list); 1658 end: 1659 new_alt_group->orig_group = orig_alt_group; 1660 new_alt_group->first_insn = *new_insn; 1661 new_alt_group->last_insn = nop ? : last_new_insn; 1662 new_alt_group->cfi = orig_alt_group->cfi; 1663 return 0; 1664 } 1665 1666 /* 1667 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1668 * If the original instruction is a jump, make the alt entry an effective nop 1669 * by just skipping the original instruction. 1670 */ 1671 static int handle_jump_alt(struct objtool_file *file, 1672 struct special_alt *special_alt, 1673 struct instruction *orig_insn, 1674 struct instruction **new_insn) 1675 { 1676 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL && 1677 orig_insn->type != INSN_NOP) { 1678 1679 WARN_FUNC("unsupported instruction at jump label", 1680 orig_insn->sec, orig_insn->offset); 1681 return -1; 1682 } 1683 1684 if (opts.hack_jump_label && special_alt->key_addend & 2) { 1685 struct reloc *reloc = insn_reloc(file, orig_insn); 1686 1687 if (reloc) { 1688 reloc->type = R_NONE; 1689 elf_write_reloc(file->elf, reloc); 1690 } 1691 elf_write_insn(file->elf, orig_insn->sec, 1692 orig_insn->offset, orig_insn->len, 1693 arch_nop_insn(orig_insn->len)); 1694 orig_insn->type = INSN_NOP; 1695 } 1696 1697 if (orig_insn->type == INSN_NOP) { 1698 if (orig_insn->len == 2) 1699 file->jl_nop_short++; 1700 else 1701 file->jl_nop_long++; 1702 1703 return 0; 1704 } 1705 1706 if (orig_insn->len == 2) 1707 file->jl_short++; 1708 else 1709 file->jl_long++; 1710 1711 *new_insn = list_next_entry(orig_insn, list); 1712 return 0; 1713 } 1714 1715 /* 1716 * Read all the special sections which have alternate instructions which can be 1717 * patched in or redirected to at runtime. Each instruction having alternate 1718 * instruction(s) has them added to its insn->alts list, which will be 1719 * traversed in validate_branch(). 1720 */ 1721 static int add_special_section_alts(struct objtool_file *file) 1722 { 1723 struct list_head special_alts; 1724 struct instruction *orig_insn, *new_insn; 1725 struct special_alt *special_alt, *tmp; 1726 struct alternative *alt; 1727 int ret; 1728 1729 ret = special_get_alts(file->elf, &special_alts); 1730 if (ret) 1731 return ret; 1732 1733 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1734 1735 orig_insn = find_insn(file, special_alt->orig_sec, 1736 special_alt->orig_off); 1737 if (!orig_insn) { 1738 WARN_FUNC("special: can't find orig instruction", 1739 special_alt->orig_sec, special_alt->orig_off); 1740 ret = -1; 1741 goto out; 1742 } 1743 1744 new_insn = NULL; 1745 if (!special_alt->group || special_alt->new_len) { 1746 new_insn = find_insn(file, special_alt->new_sec, 1747 special_alt->new_off); 1748 if (!new_insn) { 1749 WARN_FUNC("special: can't find new instruction", 1750 special_alt->new_sec, 1751 special_alt->new_off); 1752 ret = -1; 1753 goto out; 1754 } 1755 } 1756 1757 if (special_alt->group) { 1758 if (!special_alt->orig_len) { 1759 WARN_FUNC("empty alternative entry", 1760 orig_insn->sec, orig_insn->offset); 1761 continue; 1762 } 1763 1764 ret = handle_group_alt(file, special_alt, orig_insn, 1765 &new_insn); 1766 if (ret) 1767 goto out; 1768 } else if (special_alt->jump_or_nop) { 1769 ret = handle_jump_alt(file, special_alt, orig_insn, 1770 &new_insn); 1771 if (ret) 1772 goto out; 1773 } 1774 1775 alt = malloc(sizeof(*alt)); 1776 if (!alt) { 1777 WARN("malloc failed"); 1778 ret = -1; 1779 goto out; 1780 } 1781 1782 alt->insn = new_insn; 1783 alt->skip_orig = special_alt->skip_orig; 1784 orig_insn->ignore_alts |= special_alt->skip_alt; 1785 list_add_tail(&alt->list, &orig_insn->alts); 1786 1787 list_del(&special_alt->list); 1788 free(special_alt); 1789 } 1790 1791 if (opts.stats) { 1792 printf("jl\\\tNOP\tJMP\n"); 1793 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short); 1794 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long); 1795 } 1796 1797 out: 1798 return ret; 1799 } 1800 1801 static int add_jump_table(struct objtool_file *file, struct instruction *insn, 1802 struct reloc *table) 1803 { 1804 struct reloc *reloc = table; 1805 struct instruction *dest_insn; 1806 struct alternative *alt; 1807 struct symbol *pfunc = insn->func->pfunc; 1808 unsigned int prev_offset = 0; 1809 1810 /* 1811 * Each @reloc is a switch table relocation which points to the target 1812 * instruction. 1813 */ 1814 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) { 1815 1816 /* Check for the end of the table: */ 1817 if (reloc != table && reloc->jump_table_start) 1818 break; 1819 1820 /* Make sure the table entries are consecutive: */ 1821 if (prev_offset && reloc->offset != prev_offset + 8) 1822 break; 1823 1824 /* Detect function pointers from contiguous objects: */ 1825 if (reloc->sym->sec == pfunc->sec && 1826 reloc->addend == pfunc->offset) 1827 break; 1828 1829 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend); 1830 if (!dest_insn) 1831 break; 1832 1833 /* Make sure the destination is in the same function: */ 1834 if (!dest_insn->func || dest_insn->func->pfunc != pfunc) 1835 break; 1836 1837 alt = malloc(sizeof(*alt)); 1838 if (!alt) { 1839 WARN("malloc failed"); 1840 return -1; 1841 } 1842 1843 alt->insn = dest_insn; 1844 list_add_tail(&alt->list, &insn->alts); 1845 prev_offset = reloc->offset; 1846 } 1847 1848 if (!prev_offset) { 1849 WARN_FUNC("can't find switch jump table", 1850 insn->sec, insn->offset); 1851 return -1; 1852 } 1853 1854 return 0; 1855 } 1856 1857 /* 1858 * find_jump_table() - Given a dynamic jump, find the switch jump table 1859 * associated with it. 1860 */ 1861 static struct reloc *find_jump_table(struct objtool_file *file, 1862 struct symbol *func, 1863 struct instruction *insn) 1864 { 1865 struct reloc *table_reloc; 1866 struct instruction *dest_insn, *orig_insn = insn; 1867 1868 /* 1869 * Backward search using the @first_jump_src links, these help avoid 1870 * much of the 'in between' code. Which avoids us getting confused by 1871 * it. 1872 */ 1873 for (; 1874 insn && insn->func && insn->func->pfunc == func; 1875 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 1876 1877 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 1878 break; 1879 1880 /* allow small jumps within the range */ 1881 if (insn->type == INSN_JUMP_UNCONDITIONAL && 1882 insn->jump_dest && 1883 (insn->jump_dest->offset <= insn->offset || 1884 insn->jump_dest->offset > orig_insn->offset)) 1885 break; 1886 1887 table_reloc = arch_find_switch_table(file, insn); 1888 if (!table_reloc) 1889 continue; 1890 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend); 1891 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func) 1892 continue; 1893 1894 return table_reloc; 1895 } 1896 1897 return NULL; 1898 } 1899 1900 /* 1901 * First pass: Mark the head of each jump table so that in the next pass, 1902 * we know when a given jump table ends and the next one starts. 1903 */ 1904 static void mark_func_jump_tables(struct objtool_file *file, 1905 struct symbol *func) 1906 { 1907 struct instruction *insn, *last = NULL; 1908 struct reloc *reloc; 1909 1910 func_for_each_insn(file, func, insn) { 1911 if (!last) 1912 last = insn; 1913 1914 /* 1915 * Store back-pointers for unconditional forward jumps such 1916 * that find_jump_table() can back-track using those and 1917 * avoid some potentially confusing code. 1918 */ 1919 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 1920 insn->offset > last->offset && 1921 insn->jump_dest->offset > insn->offset && 1922 !insn->jump_dest->first_jump_src) { 1923 1924 insn->jump_dest->first_jump_src = insn; 1925 last = insn->jump_dest; 1926 } 1927 1928 if (insn->type != INSN_JUMP_DYNAMIC) 1929 continue; 1930 1931 reloc = find_jump_table(file, func, insn); 1932 if (reloc) { 1933 reloc->jump_table_start = true; 1934 insn->jump_table = reloc; 1935 } 1936 } 1937 } 1938 1939 static int add_func_jump_tables(struct objtool_file *file, 1940 struct symbol *func) 1941 { 1942 struct instruction *insn; 1943 int ret; 1944 1945 func_for_each_insn(file, func, insn) { 1946 if (!insn->jump_table) 1947 continue; 1948 1949 ret = add_jump_table(file, insn, insn->jump_table); 1950 if (ret) 1951 return ret; 1952 } 1953 1954 return 0; 1955 } 1956 1957 /* 1958 * For some switch statements, gcc generates a jump table in the .rodata 1959 * section which contains a list of addresses within the function to jump to. 1960 * This finds these jump tables and adds them to the insn->alts lists. 1961 */ 1962 static int add_jump_table_alts(struct objtool_file *file) 1963 { 1964 struct section *sec; 1965 struct symbol *func; 1966 int ret; 1967 1968 if (!file->rodata) 1969 return 0; 1970 1971 for_each_sec(file, sec) { 1972 list_for_each_entry(func, &sec->symbol_list, list) { 1973 if (func->type != STT_FUNC) 1974 continue; 1975 1976 mark_func_jump_tables(file, func); 1977 ret = add_func_jump_tables(file, func); 1978 if (ret) 1979 return ret; 1980 } 1981 } 1982 1983 return 0; 1984 } 1985 1986 static void set_func_state(struct cfi_state *state) 1987 { 1988 state->cfa = initial_func_cfi.cfa; 1989 memcpy(&state->regs, &initial_func_cfi.regs, 1990 CFI_NUM_REGS * sizeof(struct cfi_reg)); 1991 state->stack_size = initial_func_cfi.cfa.offset; 1992 } 1993 1994 static int read_unwind_hints(struct objtool_file *file) 1995 { 1996 struct cfi_state cfi = init_cfi; 1997 struct section *sec, *relocsec; 1998 struct unwind_hint *hint; 1999 struct instruction *insn; 2000 struct reloc *reloc; 2001 int i; 2002 2003 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 2004 if (!sec) 2005 return 0; 2006 2007 relocsec = sec->reloc; 2008 if (!relocsec) { 2009 WARN("missing .rela.discard.unwind_hints section"); 2010 return -1; 2011 } 2012 2013 if (sec->sh.sh_size % sizeof(struct unwind_hint)) { 2014 WARN("struct unwind_hint size mismatch"); 2015 return -1; 2016 } 2017 2018 file->hints = true; 2019 2020 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { 2021 hint = (struct unwind_hint *)sec->data->d_buf + i; 2022 2023 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 2024 if (!reloc) { 2025 WARN("can't find reloc for unwind_hints[%d]", i); 2026 return -1; 2027 } 2028 2029 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2030 if (!insn) { 2031 WARN("can't find insn for unwind_hints[%d]", i); 2032 return -1; 2033 } 2034 2035 insn->hint = true; 2036 2037 if (hint->type == UNWIND_HINT_TYPE_SAVE) { 2038 insn->hint = false; 2039 insn->save = true; 2040 continue; 2041 } 2042 2043 if (hint->type == UNWIND_HINT_TYPE_RESTORE) { 2044 insn->restore = true; 2045 continue; 2046 } 2047 2048 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { 2049 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); 2050 2051 if (sym && sym->bind == STB_GLOBAL) { 2052 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { 2053 WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR", 2054 insn->sec, insn->offset); 2055 } 2056 2057 insn->entry = 1; 2058 } 2059 } 2060 2061 if (hint->type == UNWIND_HINT_TYPE_ENTRY) { 2062 hint->type = UNWIND_HINT_TYPE_CALL; 2063 insn->entry = 1; 2064 } 2065 2066 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 2067 insn->cfi = &func_cfi; 2068 continue; 2069 } 2070 2071 if (insn->cfi) 2072 cfi = *(insn->cfi); 2073 2074 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 2075 WARN_FUNC("unsupported unwind_hint sp base reg %d", 2076 insn->sec, insn->offset, hint->sp_reg); 2077 return -1; 2078 } 2079 2080 cfi.cfa.offset = bswap_if_needed(hint->sp_offset); 2081 cfi.type = hint->type; 2082 cfi.end = hint->end; 2083 2084 insn->cfi = cfi_hash_find_or_add(&cfi); 2085 } 2086 2087 return 0; 2088 } 2089 2090 static int read_noendbr_hints(struct objtool_file *file) 2091 { 2092 struct section *sec; 2093 struct instruction *insn; 2094 struct reloc *reloc; 2095 2096 sec = find_section_by_name(file->elf, ".rela.discard.noendbr"); 2097 if (!sec) 2098 return 0; 2099 2100 list_for_each_entry(reloc, &sec->reloc_list, list) { 2101 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend); 2102 if (!insn) { 2103 WARN("bad .discard.noendbr entry"); 2104 return -1; 2105 } 2106 2107 if (insn->type == INSN_ENDBR) 2108 WARN_FUNC("ANNOTATE_NOENDBR on ENDBR", insn->sec, insn->offset); 2109 2110 insn->noendbr = 1; 2111 } 2112 2113 return 0; 2114 } 2115 2116 static int read_retpoline_hints(struct objtool_file *file) 2117 { 2118 struct section *sec; 2119 struct instruction *insn; 2120 struct reloc *reloc; 2121 2122 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe"); 2123 if (!sec) 2124 return 0; 2125 2126 list_for_each_entry(reloc, &sec->reloc_list, list) { 2127 if (reloc->sym->type != STT_SECTION) { 2128 WARN("unexpected relocation symbol type in %s", sec->name); 2129 return -1; 2130 } 2131 2132 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2133 if (!insn) { 2134 WARN("bad .discard.retpoline_safe entry"); 2135 return -1; 2136 } 2137 2138 if (insn->type != INSN_JUMP_DYNAMIC && 2139 insn->type != INSN_CALL_DYNAMIC && 2140 insn->type != INSN_RETURN && 2141 insn->type != INSN_NOP) { 2142 WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop", 2143 insn->sec, insn->offset); 2144 return -1; 2145 } 2146 2147 insn->retpoline_safe = true; 2148 } 2149 2150 return 0; 2151 } 2152 2153 static int read_instr_hints(struct objtool_file *file) 2154 { 2155 struct section *sec; 2156 struct instruction *insn; 2157 struct reloc *reloc; 2158 2159 sec = find_section_by_name(file->elf, ".rela.discard.instr_end"); 2160 if (!sec) 2161 return 0; 2162 2163 list_for_each_entry(reloc, &sec->reloc_list, list) { 2164 if (reloc->sym->type != STT_SECTION) { 2165 WARN("unexpected relocation symbol type in %s", sec->name); 2166 return -1; 2167 } 2168 2169 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2170 if (!insn) { 2171 WARN("bad .discard.instr_end entry"); 2172 return -1; 2173 } 2174 2175 insn->instr--; 2176 } 2177 2178 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin"); 2179 if (!sec) 2180 return 0; 2181 2182 list_for_each_entry(reloc, &sec->reloc_list, list) { 2183 if (reloc->sym->type != STT_SECTION) { 2184 WARN("unexpected relocation symbol type in %s", sec->name); 2185 return -1; 2186 } 2187 2188 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2189 if (!insn) { 2190 WARN("bad .discard.instr_begin entry"); 2191 return -1; 2192 } 2193 2194 insn->instr++; 2195 } 2196 2197 return 0; 2198 } 2199 2200 static int read_intra_function_calls(struct objtool_file *file) 2201 { 2202 struct instruction *insn; 2203 struct section *sec; 2204 struct reloc *reloc; 2205 2206 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls"); 2207 if (!sec) 2208 return 0; 2209 2210 list_for_each_entry(reloc, &sec->reloc_list, list) { 2211 unsigned long dest_off; 2212 2213 if (reloc->sym->type != STT_SECTION) { 2214 WARN("unexpected relocation symbol type in %s", 2215 sec->name); 2216 return -1; 2217 } 2218 2219 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2220 if (!insn) { 2221 WARN("bad .discard.intra_function_call entry"); 2222 return -1; 2223 } 2224 2225 if (insn->type != INSN_CALL) { 2226 WARN_FUNC("intra_function_call not a direct call", 2227 insn->sec, insn->offset); 2228 return -1; 2229 } 2230 2231 /* 2232 * Treat intra-function CALLs as JMPs, but with a stack_op. 2233 * See add_call_destinations(), which strips stack_ops from 2234 * normal CALLs. 2235 */ 2236 insn->type = INSN_JUMP_UNCONDITIONAL; 2237 2238 dest_off = insn->offset + insn->len + insn->immediate; 2239 insn->jump_dest = find_insn(file, insn->sec, dest_off); 2240 if (!insn->jump_dest) { 2241 WARN_FUNC("can't find call dest at %s+0x%lx", 2242 insn->sec, insn->offset, 2243 insn->sec->name, dest_off); 2244 return -1; 2245 } 2246 } 2247 2248 return 0; 2249 } 2250 2251 /* 2252 * Return true if name matches an instrumentation function, where calls to that 2253 * function from noinstr code can safely be removed, but compilers won't do so. 2254 */ 2255 static bool is_profiling_func(const char *name) 2256 { 2257 /* 2258 * Many compilers cannot disable KCOV with a function attribute. 2259 */ 2260 if (!strncmp(name, "__sanitizer_cov_", 16)) 2261 return true; 2262 2263 /* 2264 * Some compilers currently do not remove __tsan_func_entry/exit nor 2265 * __tsan_atomic_signal_fence (used for barrier instrumentation) with 2266 * the __no_sanitize_thread attribute, remove them. Once the kernel's 2267 * minimum Clang version is 14.0, this can be removed. 2268 */ 2269 if (!strncmp(name, "__tsan_func_", 12) || 2270 !strcmp(name, "__tsan_atomic_signal_fence")) 2271 return true; 2272 2273 return false; 2274 } 2275 2276 static int classify_symbols(struct objtool_file *file) 2277 { 2278 struct section *sec; 2279 struct symbol *func; 2280 2281 for_each_sec(file, sec) { 2282 list_for_each_entry(func, &sec->symbol_list, list) { 2283 if (func->bind != STB_GLOBAL) 2284 continue; 2285 2286 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2287 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2288 func->static_call_tramp = true; 2289 2290 if (arch_is_retpoline(func)) 2291 func->retpoline_thunk = true; 2292 2293 if (arch_is_rethunk(func)) 2294 func->return_thunk = true; 2295 2296 if (!strcmp(func->name, "__fentry__")) 2297 func->fentry = true; 2298 2299 if (is_profiling_func(func->name)) 2300 func->profiling_func = true; 2301 } 2302 } 2303 2304 return 0; 2305 } 2306 2307 static void mark_rodata(struct objtool_file *file) 2308 { 2309 struct section *sec; 2310 bool found = false; 2311 2312 /* 2313 * Search for the following rodata sections, each of which can 2314 * potentially contain jump tables: 2315 * 2316 * - .rodata: can contain GCC switch tables 2317 * - .rodata.<func>: same, if -fdata-sections is being used 2318 * - .rodata..c_jump_table: contains C annotated jump tables 2319 * 2320 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2321 */ 2322 for_each_sec(file, sec) { 2323 if (!strncmp(sec->name, ".rodata", 7) && 2324 !strstr(sec->name, ".str1.")) { 2325 sec->rodata = true; 2326 found = true; 2327 } 2328 } 2329 2330 file->rodata = found; 2331 } 2332 2333 static int decode_sections(struct objtool_file *file) 2334 { 2335 int ret; 2336 2337 mark_rodata(file); 2338 2339 ret = init_pv_ops(file); 2340 if (ret) 2341 return ret; 2342 2343 ret = decode_instructions(file); 2344 if (ret) 2345 return ret; 2346 2347 add_ignores(file); 2348 add_uaccess_safe(file); 2349 2350 ret = add_ignore_alternatives(file); 2351 if (ret) 2352 return ret; 2353 2354 /* 2355 * Must be before read_unwind_hints() since that needs insn->noendbr. 2356 */ 2357 ret = read_noendbr_hints(file); 2358 if (ret) 2359 return ret; 2360 2361 /* 2362 * Must be before add_{jump_call}_destination. 2363 */ 2364 ret = classify_symbols(file); 2365 if (ret) 2366 return ret; 2367 2368 /* 2369 * Must be before add_jump_destinations(), which depends on 'func' 2370 * being set for alternatives, to enable proper sibling call detection. 2371 */ 2372 ret = add_special_section_alts(file); 2373 if (ret) 2374 return ret; 2375 2376 ret = add_jump_destinations(file); 2377 if (ret) 2378 return ret; 2379 2380 /* 2381 * Must be before add_call_destination(); it changes INSN_CALL to 2382 * INSN_JUMP. 2383 */ 2384 ret = read_intra_function_calls(file); 2385 if (ret) 2386 return ret; 2387 2388 ret = add_call_destinations(file); 2389 if (ret) 2390 return ret; 2391 2392 /* 2393 * Must be after add_call_destinations() such that it can override 2394 * dead_end_function() marks. 2395 */ 2396 ret = add_dead_ends(file); 2397 if (ret) 2398 return ret; 2399 2400 ret = add_jump_table_alts(file); 2401 if (ret) 2402 return ret; 2403 2404 ret = read_unwind_hints(file); 2405 if (ret) 2406 return ret; 2407 2408 ret = read_retpoline_hints(file); 2409 if (ret) 2410 return ret; 2411 2412 ret = read_instr_hints(file); 2413 if (ret) 2414 return ret; 2415 2416 return 0; 2417 } 2418 2419 static bool is_fentry_call(struct instruction *insn) 2420 { 2421 if (insn->type == INSN_CALL && 2422 insn->call_dest && 2423 insn->call_dest->fentry) 2424 return true; 2425 2426 return false; 2427 } 2428 2429 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 2430 { 2431 struct cfi_state *cfi = &state->cfi; 2432 int i; 2433 2434 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 2435 return true; 2436 2437 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 2438 return true; 2439 2440 if (cfi->stack_size != initial_func_cfi.cfa.offset) 2441 return true; 2442 2443 for (i = 0; i < CFI_NUM_REGS; i++) { 2444 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 2445 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 2446 return true; 2447 } 2448 2449 return false; 2450 } 2451 2452 static bool check_reg_frame_pos(const struct cfi_reg *reg, 2453 int expected_offset) 2454 { 2455 return reg->base == CFI_CFA && 2456 reg->offset == expected_offset; 2457 } 2458 2459 static bool has_valid_stack_frame(struct insn_state *state) 2460 { 2461 struct cfi_state *cfi = &state->cfi; 2462 2463 if (cfi->cfa.base == CFI_BP && 2464 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) && 2465 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8)) 2466 return true; 2467 2468 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 2469 return true; 2470 2471 return false; 2472 } 2473 2474 static int update_cfi_state_regs(struct instruction *insn, 2475 struct cfi_state *cfi, 2476 struct stack_op *op) 2477 { 2478 struct cfi_reg *cfa = &cfi->cfa; 2479 2480 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 2481 return 0; 2482 2483 /* push */ 2484 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 2485 cfa->offset += 8; 2486 2487 /* pop */ 2488 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 2489 cfa->offset -= 8; 2490 2491 /* add immediate to sp */ 2492 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 2493 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 2494 cfa->offset -= op->src.offset; 2495 2496 return 0; 2497 } 2498 2499 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 2500 { 2501 if (arch_callee_saved_reg(reg) && 2502 cfi->regs[reg].base == CFI_UNDEFINED) { 2503 cfi->regs[reg].base = base; 2504 cfi->regs[reg].offset = offset; 2505 } 2506 } 2507 2508 static void restore_reg(struct cfi_state *cfi, unsigned char reg) 2509 { 2510 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 2511 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 2512 } 2513 2514 /* 2515 * A note about DRAP stack alignment: 2516 * 2517 * GCC has the concept of a DRAP register, which is used to help keep track of 2518 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 2519 * register. The typical DRAP pattern is: 2520 * 2521 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 2522 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 2523 * 41 ff 72 f8 pushq -0x8(%r10) 2524 * 55 push %rbp 2525 * 48 89 e5 mov %rsp,%rbp 2526 * (more pushes) 2527 * 41 52 push %r10 2528 * ... 2529 * 41 5a pop %r10 2530 * (more pops) 2531 * 5d pop %rbp 2532 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2533 * c3 retq 2534 * 2535 * There are some variations in the epilogues, like: 2536 * 2537 * 5b pop %rbx 2538 * 41 5a pop %r10 2539 * 41 5c pop %r12 2540 * 41 5d pop %r13 2541 * 41 5e pop %r14 2542 * c9 leaveq 2543 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2544 * c3 retq 2545 * 2546 * and: 2547 * 2548 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 2549 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 2550 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 2551 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 2552 * c9 leaveq 2553 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2554 * c3 retq 2555 * 2556 * Sometimes r13 is used as the DRAP register, in which case it's saved and 2557 * restored beforehand: 2558 * 2559 * 41 55 push %r13 2560 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 2561 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 2562 * ... 2563 * 49 8d 65 f0 lea -0x10(%r13),%rsp 2564 * 41 5d pop %r13 2565 * c3 retq 2566 */ 2567 static int update_cfi_state(struct instruction *insn, 2568 struct instruction *next_insn, 2569 struct cfi_state *cfi, struct stack_op *op) 2570 { 2571 struct cfi_reg *cfa = &cfi->cfa; 2572 struct cfi_reg *regs = cfi->regs; 2573 2574 /* stack operations don't make sense with an undefined CFA */ 2575 if (cfa->base == CFI_UNDEFINED) { 2576 if (insn->func) { 2577 WARN_FUNC("undefined stack state", insn->sec, insn->offset); 2578 return -1; 2579 } 2580 return 0; 2581 } 2582 2583 if (cfi->type == UNWIND_HINT_TYPE_REGS || 2584 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 2585 return update_cfi_state_regs(insn, cfi, op); 2586 2587 switch (op->dest.type) { 2588 2589 case OP_DEST_REG: 2590 switch (op->src.type) { 2591 2592 case OP_SRC_REG: 2593 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 2594 cfa->base == CFI_SP && 2595 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) { 2596 2597 /* mov %rsp, %rbp */ 2598 cfa->base = op->dest.reg; 2599 cfi->bp_scratch = false; 2600 } 2601 2602 else if (op->src.reg == CFI_SP && 2603 op->dest.reg == CFI_BP && cfi->drap) { 2604 2605 /* drap: mov %rsp, %rbp */ 2606 regs[CFI_BP].base = CFI_BP; 2607 regs[CFI_BP].offset = -cfi->stack_size; 2608 cfi->bp_scratch = false; 2609 } 2610 2611 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2612 2613 /* 2614 * mov %rsp, %reg 2615 * 2616 * This is needed for the rare case where GCC 2617 * does: 2618 * 2619 * mov %rsp, %rax 2620 * ... 2621 * mov %rax, %rsp 2622 */ 2623 cfi->vals[op->dest.reg].base = CFI_CFA; 2624 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2625 } 2626 2627 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2628 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) { 2629 2630 /* 2631 * mov %rbp, %rsp 2632 * 2633 * Restore the original stack pointer (Clang). 2634 */ 2635 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2636 } 2637 2638 else if (op->dest.reg == cfa->base) { 2639 2640 /* mov %reg, %rsp */ 2641 if (cfa->base == CFI_SP && 2642 cfi->vals[op->src.reg].base == CFI_CFA) { 2643 2644 /* 2645 * This is needed for the rare case 2646 * where GCC does something dumb like: 2647 * 2648 * lea 0x8(%rsp), %rcx 2649 * ... 2650 * mov %rcx, %rsp 2651 */ 2652 cfa->offset = -cfi->vals[op->src.reg].offset; 2653 cfi->stack_size = cfa->offset; 2654 2655 } else if (cfa->base == CFI_SP && 2656 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2657 cfi->vals[op->src.reg].offset == cfa->offset) { 2658 2659 /* 2660 * Stack swizzle: 2661 * 2662 * 1: mov %rsp, (%[tos]) 2663 * 2: mov %[tos], %rsp 2664 * ... 2665 * 3: pop %rsp 2666 * 2667 * Where: 2668 * 2669 * 1 - places a pointer to the previous 2670 * stack at the Top-of-Stack of the 2671 * new stack. 2672 * 2673 * 2 - switches to the new stack. 2674 * 2675 * 3 - pops the Top-of-Stack to restore 2676 * the original stack. 2677 * 2678 * Note: we set base to SP_INDIRECT 2679 * here and preserve offset. Therefore 2680 * when the unwinder reaches ToS it 2681 * will dereference SP and then add the 2682 * offset to find the next frame, IOW: 2683 * (%rsp) + offset. 2684 */ 2685 cfa->base = CFI_SP_INDIRECT; 2686 2687 } else { 2688 cfa->base = CFI_UNDEFINED; 2689 cfa->offset = 0; 2690 } 2691 } 2692 2693 else if (op->dest.reg == CFI_SP && 2694 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2695 cfi->vals[op->src.reg].offset == cfa->offset) { 2696 2697 /* 2698 * The same stack swizzle case 2) as above. But 2699 * because we can't change cfa->base, case 3) 2700 * will become a regular POP. Pretend we're a 2701 * PUSH so things don't go unbalanced. 2702 */ 2703 cfi->stack_size += 8; 2704 } 2705 2706 2707 break; 2708 2709 case OP_SRC_ADD: 2710 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 2711 2712 /* add imm, %rsp */ 2713 cfi->stack_size -= op->src.offset; 2714 if (cfa->base == CFI_SP) 2715 cfa->offset -= op->src.offset; 2716 break; 2717 } 2718 2719 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 2720 2721 /* lea disp(%rbp), %rsp */ 2722 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 2723 break; 2724 } 2725 2726 if (!cfi->drap && op->src.reg == CFI_SP && 2727 op->dest.reg == CFI_BP && cfa->base == CFI_SP && 2728 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) { 2729 2730 /* lea disp(%rsp), %rbp */ 2731 cfa->base = CFI_BP; 2732 cfa->offset -= op->src.offset; 2733 cfi->bp_scratch = false; 2734 break; 2735 } 2736 2737 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2738 2739 /* drap: lea disp(%rsp), %drap */ 2740 cfi->drap_reg = op->dest.reg; 2741 2742 /* 2743 * lea disp(%rsp), %reg 2744 * 2745 * This is needed for the rare case where GCC 2746 * does something dumb like: 2747 * 2748 * lea 0x8(%rsp), %rcx 2749 * ... 2750 * mov %rcx, %rsp 2751 */ 2752 cfi->vals[op->dest.reg].base = CFI_CFA; 2753 cfi->vals[op->dest.reg].offset = \ 2754 -cfi->stack_size + op->src.offset; 2755 2756 break; 2757 } 2758 2759 if (cfi->drap && op->dest.reg == CFI_SP && 2760 op->src.reg == cfi->drap_reg) { 2761 2762 /* drap: lea disp(%drap), %rsp */ 2763 cfa->base = CFI_SP; 2764 cfa->offset = cfi->stack_size = -op->src.offset; 2765 cfi->drap_reg = CFI_UNDEFINED; 2766 cfi->drap = false; 2767 break; 2768 } 2769 2770 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) { 2771 WARN_FUNC("unsupported stack register modification", 2772 insn->sec, insn->offset); 2773 return -1; 2774 } 2775 2776 break; 2777 2778 case OP_SRC_AND: 2779 if (op->dest.reg != CFI_SP || 2780 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 2781 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 2782 WARN_FUNC("unsupported stack pointer realignment", 2783 insn->sec, insn->offset); 2784 return -1; 2785 } 2786 2787 if (cfi->drap_reg != CFI_UNDEFINED) { 2788 /* drap: and imm, %rsp */ 2789 cfa->base = cfi->drap_reg; 2790 cfa->offset = cfi->stack_size = 0; 2791 cfi->drap = true; 2792 } 2793 2794 /* 2795 * Older versions of GCC (4.8ish) realign the stack 2796 * without DRAP, with a frame pointer. 2797 */ 2798 2799 break; 2800 2801 case OP_SRC_POP: 2802 case OP_SRC_POPF: 2803 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) { 2804 2805 /* pop %rsp; # restore from a stack swizzle */ 2806 cfa->base = CFI_SP; 2807 break; 2808 } 2809 2810 if (!cfi->drap && op->dest.reg == cfa->base) { 2811 2812 /* pop %rbp */ 2813 cfa->base = CFI_SP; 2814 } 2815 2816 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 2817 op->dest.reg == cfi->drap_reg && 2818 cfi->drap_offset == -cfi->stack_size) { 2819 2820 /* drap: pop %drap */ 2821 cfa->base = cfi->drap_reg; 2822 cfa->offset = 0; 2823 cfi->drap_offset = -1; 2824 2825 } else if (cfi->stack_size == -regs[op->dest.reg].offset) { 2826 2827 /* pop %reg */ 2828 restore_reg(cfi, op->dest.reg); 2829 } 2830 2831 cfi->stack_size -= 8; 2832 if (cfa->base == CFI_SP) 2833 cfa->offset -= 8; 2834 2835 break; 2836 2837 case OP_SRC_REG_INDIRECT: 2838 if (!cfi->drap && op->dest.reg == cfa->base && 2839 op->dest.reg == CFI_BP) { 2840 2841 /* mov disp(%rsp), %rbp */ 2842 cfa->base = CFI_SP; 2843 cfa->offset = cfi->stack_size; 2844 } 2845 2846 if (cfi->drap && op->src.reg == CFI_BP && 2847 op->src.offset == cfi->drap_offset) { 2848 2849 /* drap: mov disp(%rbp), %drap */ 2850 cfa->base = cfi->drap_reg; 2851 cfa->offset = 0; 2852 cfi->drap_offset = -1; 2853 } 2854 2855 if (cfi->drap && op->src.reg == CFI_BP && 2856 op->src.offset == regs[op->dest.reg].offset) { 2857 2858 /* drap: mov disp(%rbp), %reg */ 2859 restore_reg(cfi, op->dest.reg); 2860 2861 } else if (op->src.reg == cfa->base && 2862 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 2863 2864 /* mov disp(%rbp), %reg */ 2865 /* mov disp(%rsp), %reg */ 2866 restore_reg(cfi, op->dest.reg); 2867 2868 } else if (op->src.reg == CFI_SP && 2869 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) { 2870 2871 /* mov disp(%rsp), %reg */ 2872 restore_reg(cfi, op->dest.reg); 2873 } 2874 2875 break; 2876 2877 default: 2878 WARN_FUNC("unknown stack-related instruction", 2879 insn->sec, insn->offset); 2880 return -1; 2881 } 2882 2883 break; 2884 2885 case OP_DEST_PUSH: 2886 case OP_DEST_PUSHF: 2887 cfi->stack_size += 8; 2888 if (cfa->base == CFI_SP) 2889 cfa->offset += 8; 2890 2891 if (op->src.type != OP_SRC_REG) 2892 break; 2893 2894 if (cfi->drap) { 2895 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2896 2897 /* drap: push %drap */ 2898 cfa->base = CFI_BP_INDIRECT; 2899 cfa->offset = -cfi->stack_size; 2900 2901 /* save drap so we know when to restore it */ 2902 cfi->drap_offset = -cfi->stack_size; 2903 2904 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 2905 2906 /* drap: push %rbp */ 2907 cfi->stack_size = 0; 2908 2909 } else { 2910 2911 /* drap: push %reg */ 2912 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 2913 } 2914 2915 } else { 2916 2917 /* push %reg */ 2918 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 2919 } 2920 2921 /* detect when asm code uses rbp as a scratch register */ 2922 if (opts.stackval && insn->func && op->src.reg == CFI_BP && 2923 cfa->base != CFI_BP) 2924 cfi->bp_scratch = true; 2925 break; 2926 2927 case OP_DEST_REG_INDIRECT: 2928 2929 if (cfi->drap) { 2930 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2931 2932 /* drap: mov %drap, disp(%rbp) */ 2933 cfa->base = CFI_BP_INDIRECT; 2934 cfa->offset = op->dest.offset; 2935 2936 /* save drap offset so we know when to restore it */ 2937 cfi->drap_offset = op->dest.offset; 2938 } else { 2939 2940 /* drap: mov reg, disp(%rbp) */ 2941 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 2942 } 2943 2944 } else if (op->dest.reg == cfa->base) { 2945 2946 /* mov reg, disp(%rbp) */ 2947 /* mov reg, disp(%rsp) */ 2948 save_reg(cfi, op->src.reg, CFI_CFA, 2949 op->dest.offset - cfi->cfa.offset); 2950 2951 } else if (op->dest.reg == CFI_SP) { 2952 2953 /* mov reg, disp(%rsp) */ 2954 save_reg(cfi, op->src.reg, CFI_CFA, 2955 op->dest.offset - cfi->stack_size); 2956 2957 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) { 2958 2959 /* mov %rsp, (%reg); # setup a stack swizzle. */ 2960 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT; 2961 cfi->vals[op->dest.reg].offset = cfa->offset; 2962 } 2963 2964 break; 2965 2966 case OP_DEST_MEM: 2967 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 2968 WARN_FUNC("unknown stack-related memory operation", 2969 insn->sec, insn->offset); 2970 return -1; 2971 } 2972 2973 /* pop mem */ 2974 cfi->stack_size -= 8; 2975 if (cfa->base == CFI_SP) 2976 cfa->offset -= 8; 2977 2978 break; 2979 2980 default: 2981 WARN_FUNC("unknown stack-related instruction", 2982 insn->sec, insn->offset); 2983 return -1; 2984 } 2985 2986 return 0; 2987 } 2988 2989 /* 2990 * The stack layouts of alternatives instructions can sometimes diverge when 2991 * they have stack modifications. That's fine as long as the potential stack 2992 * layouts don't conflict at any given potential instruction boundary. 2993 * 2994 * Flatten the CFIs of the different alternative code streams (both original 2995 * and replacement) into a single shared CFI array which can be used to detect 2996 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 2997 */ 2998 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 2999 { 3000 struct cfi_state **alt_cfi; 3001 int group_off; 3002 3003 if (!insn->alt_group) 3004 return 0; 3005 3006 if (!insn->cfi) { 3007 WARN("CFI missing"); 3008 return -1; 3009 } 3010 3011 alt_cfi = insn->alt_group->cfi; 3012 group_off = insn->offset - insn->alt_group->first_insn->offset; 3013 3014 if (!alt_cfi[group_off]) { 3015 alt_cfi[group_off] = insn->cfi; 3016 } else { 3017 if (cficmp(alt_cfi[group_off], insn->cfi)) { 3018 WARN_FUNC("stack layout conflict in alternatives", 3019 insn->sec, insn->offset); 3020 return -1; 3021 } 3022 } 3023 3024 return 0; 3025 } 3026 3027 static int handle_insn_ops(struct instruction *insn, 3028 struct instruction *next_insn, 3029 struct insn_state *state) 3030 { 3031 struct stack_op *op; 3032 3033 list_for_each_entry(op, &insn->stack_ops, list) { 3034 3035 if (update_cfi_state(insn, next_insn, &state->cfi, op)) 3036 return 1; 3037 3038 if (!insn->alt_group) 3039 continue; 3040 3041 if (op->dest.type == OP_DEST_PUSHF) { 3042 if (!state->uaccess_stack) { 3043 state->uaccess_stack = 1; 3044 } else if (state->uaccess_stack >> 31) { 3045 WARN_FUNC("PUSHF stack exhausted", 3046 insn->sec, insn->offset); 3047 return 1; 3048 } 3049 state->uaccess_stack <<= 1; 3050 state->uaccess_stack |= state->uaccess; 3051 } 3052 3053 if (op->src.type == OP_SRC_POPF) { 3054 if (state->uaccess_stack) { 3055 state->uaccess = state->uaccess_stack & 1; 3056 state->uaccess_stack >>= 1; 3057 if (state->uaccess_stack == 1) 3058 state->uaccess_stack = 0; 3059 } 3060 } 3061 } 3062 3063 return 0; 3064 } 3065 3066 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 3067 { 3068 struct cfi_state *cfi1 = insn->cfi; 3069 int i; 3070 3071 if (!cfi1) { 3072 WARN("CFI missing"); 3073 return false; 3074 } 3075 3076 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 3077 3078 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 3079 insn->sec, insn->offset, 3080 cfi1->cfa.base, cfi1->cfa.offset, 3081 cfi2->cfa.base, cfi2->cfa.offset); 3082 3083 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 3084 for (i = 0; i < CFI_NUM_REGS; i++) { 3085 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], 3086 sizeof(struct cfi_reg))) 3087 continue; 3088 3089 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 3090 insn->sec, insn->offset, 3091 i, cfi1->regs[i].base, cfi1->regs[i].offset, 3092 i, cfi2->regs[i].base, cfi2->regs[i].offset); 3093 break; 3094 } 3095 3096 } else if (cfi1->type != cfi2->type) { 3097 3098 WARN_FUNC("stack state mismatch: type1=%d type2=%d", 3099 insn->sec, insn->offset, cfi1->type, cfi2->type); 3100 3101 } else if (cfi1->drap != cfi2->drap || 3102 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 3103 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 3104 3105 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 3106 insn->sec, insn->offset, 3107 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 3108 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 3109 3110 } else 3111 return true; 3112 3113 return false; 3114 } 3115 3116 static inline bool func_uaccess_safe(struct symbol *func) 3117 { 3118 if (func) 3119 return func->uaccess_safe; 3120 3121 return false; 3122 } 3123 3124 static inline const char *call_dest_name(struct instruction *insn) 3125 { 3126 static char pvname[19]; 3127 struct reloc *rel; 3128 int idx; 3129 3130 if (insn->call_dest) 3131 return insn->call_dest->name; 3132 3133 rel = insn_reloc(NULL, insn); 3134 if (rel && !strcmp(rel->sym->name, "pv_ops")) { 3135 idx = (rel->addend / sizeof(void *)); 3136 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); 3137 return pvname; 3138 } 3139 3140 return "{dynamic}"; 3141 } 3142 3143 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn) 3144 { 3145 struct symbol *target; 3146 struct reloc *rel; 3147 int idx; 3148 3149 rel = insn_reloc(file, insn); 3150 if (!rel || strcmp(rel->sym->name, "pv_ops")) 3151 return false; 3152 3153 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *)); 3154 3155 if (file->pv_ops[idx].clean) 3156 return true; 3157 3158 file->pv_ops[idx].clean = true; 3159 3160 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) { 3161 if (!target->sec->noinstr) { 3162 WARN("pv_ops[%d]: %s", idx, target->name); 3163 file->pv_ops[idx].clean = false; 3164 } 3165 } 3166 3167 return file->pv_ops[idx].clean; 3168 } 3169 3170 static inline bool noinstr_call_dest(struct objtool_file *file, 3171 struct instruction *insn, 3172 struct symbol *func) 3173 { 3174 /* 3175 * We can't deal with indirect function calls at present; 3176 * assume they're instrumented. 3177 */ 3178 if (!func) { 3179 if (file->pv_ops) 3180 return pv_call_dest(file, insn); 3181 3182 return false; 3183 } 3184 3185 /* 3186 * If the symbol is from a noinstr section; we good. 3187 */ 3188 if (func->sec->noinstr) 3189 return true; 3190 3191 /* 3192 * The __ubsan_handle_*() calls are like WARN(), they only happen when 3193 * something 'BAD' happened. At the risk of taking the machine down, 3194 * let them proceed to get the message out. 3195 */ 3196 if (!strncmp(func->name, "__ubsan_handle_", 15)) 3197 return true; 3198 3199 return false; 3200 } 3201 3202 static int validate_call(struct objtool_file *file, 3203 struct instruction *insn, 3204 struct insn_state *state) 3205 { 3206 if (state->noinstr && state->instr <= 0 && 3207 !noinstr_call_dest(file, insn, insn->call_dest)) { 3208 WARN_FUNC("call to %s() leaves .noinstr.text section", 3209 insn->sec, insn->offset, call_dest_name(insn)); 3210 return 1; 3211 } 3212 3213 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { 3214 WARN_FUNC("call to %s() with UACCESS enabled", 3215 insn->sec, insn->offset, call_dest_name(insn)); 3216 return 1; 3217 } 3218 3219 if (state->df) { 3220 WARN_FUNC("call to %s() with DF set", 3221 insn->sec, insn->offset, call_dest_name(insn)); 3222 return 1; 3223 } 3224 3225 return 0; 3226 } 3227 3228 static int validate_sibling_call(struct objtool_file *file, 3229 struct instruction *insn, 3230 struct insn_state *state) 3231 { 3232 if (has_modified_stack_frame(insn, state)) { 3233 WARN_FUNC("sibling call from callable instruction with modified stack frame", 3234 insn->sec, insn->offset); 3235 return 1; 3236 } 3237 3238 return validate_call(file, insn, state); 3239 } 3240 3241 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 3242 { 3243 if (state->noinstr && state->instr > 0) { 3244 WARN_FUNC("return with instrumentation enabled", 3245 insn->sec, insn->offset); 3246 return 1; 3247 } 3248 3249 if (state->uaccess && !func_uaccess_safe(func)) { 3250 WARN_FUNC("return with UACCESS enabled", 3251 insn->sec, insn->offset); 3252 return 1; 3253 } 3254 3255 if (!state->uaccess && func_uaccess_safe(func)) { 3256 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", 3257 insn->sec, insn->offset); 3258 return 1; 3259 } 3260 3261 if (state->df) { 3262 WARN_FUNC("return with DF set", 3263 insn->sec, insn->offset); 3264 return 1; 3265 } 3266 3267 if (func && has_modified_stack_frame(insn, state)) { 3268 WARN_FUNC("return with modified stack frame", 3269 insn->sec, insn->offset); 3270 return 1; 3271 } 3272 3273 if (state->cfi.bp_scratch) { 3274 WARN_FUNC("BP used as a scratch register", 3275 insn->sec, insn->offset); 3276 return 1; 3277 } 3278 3279 return 0; 3280 } 3281 3282 static struct instruction *next_insn_to_validate(struct objtool_file *file, 3283 struct instruction *insn) 3284 { 3285 struct alt_group *alt_group = insn->alt_group; 3286 3287 /* 3288 * Simulate the fact that alternatives are patched in-place. When the 3289 * end of a replacement alt_group is reached, redirect objtool flow to 3290 * the end of the original alt_group. 3291 */ 3292 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group) 3293 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 3294 3295 return next_insn_same_sec(file, insn); 3296 } 3297 3298 /* 3299 * Follow the branch starting at the given instruction, and recursively follow 3300 * any other branches (jumps). Meanwhile, track the frame pointer state at 3301 * each instruction and validate all the rules described in 3302 * tools/objtool/Documentation/objtool.txt. 3303 */ 3304 static int validate_branch(struct objtool_file *file, struct symbol *func, 3305 struct instruction *insn, struct insn_state state) 3306 { 3307 struct alternative *alt; 3308 struct instruction *next_insn, *prev_insn = NULL; 3309 struct section *sec; 3310 u8 visited; 3311 int ret; 3312 3313 sec = insn->sec; 3314 3315 while (1) { 3316 next_insn = next_insn_to_validate(file, insn); 3317 3318 if (func && insn->func && func != insn->func->pfunc) { 3319 WARN("%s() falls through to next function %s()", 3320 func->name, insn->func->name); 3321 return 1; 3322 } 3323 3324 if (func && insn->ignore) { 3325 WARN_FUNC("BUG: why am I validating an ignored function?", 3326 sec, insn->offset); 3327 return 1; 3328 } 3329 3330 visited = VISITED_BRANCH << state.uaccess; 3331 if (insn->visited & VISITED_BRANCH_MASK) { 3332 if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) 3333 return 1; 3334 3335 if (insn->visited & visited) 3336 return 0; 3337 } else { 3338 nr_insns_visited++; 3339 } 3340 3341 if (state.noinstr) 3342 state.instr += insn->instr; 3343 3344 if (insn->hint) { 3345 if (insn->restore) { 3346 struct instruction *save_insn, *i; 3347 3348 i = insn; 3349 save_insn = NULL; 3350 3351 sym_for_each_insn_continue_reverse(file, func, i) { 3352 if (i->save) { 3353 save_insn = i; 3354 break; 3355 } 3356 } 3357 3358 if (!save_insn) { 3359 WARN_FUNC("no corresponding CFI save for CFI restore", 3360 sec, insn->offset); 3361 return 1; 3362 } 3363 3364 if (!save_insn->visited) { 3365 WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo", 3366 sec, insn->offset); 3367 return 1; 3368 } 3369 3370 insn->cfi = save_insn->cfi; 3371 nr_cfi_reused++; 3372 } 3373 3374 state.cfi = *insn->cfi; 3375 } else { 3376 /* XXX track if we actually changed state.cfi */ 3377 3378 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) { 3379 insn->cfi = prev_insn->cfi; 3380 nr_cfi_reused++; 3381 } else { 3382 insn->cfi = cfi_hash_find_or_add(&state.cfi); 3383 } 3384 } 3385 3386 insn->visited |= visited; 3387 3388 if (propagate_alt_cfi(file, insn)) 3389 return 1; 3390 3391 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 3392 bool skip_orig = false; 3393 3394 list_for_each_entry(alt, &insn->alts, list) { 3395 if (alt->skip_orig) 3396 skip_orig = true; 3397 3398 ret = validate_branch(file, func, alt->insn, state); 3399 if (ret) { 3400 if (opts.backtrace) 3401 BT_FUNC("(alt)", insn); 3402 return ret; 3403 } 3404 } 3405 3406 if (skip_orig) 3407 return 0; 3408 } 3409 3410 if (handle_insn_ops(insn, next_insn, &state)) 3411 return 1; 3412 3413 switch (insn->type) { 3414 3415 case INSN_RETURN: 3416 return validate_return(func, insn, &state); 3417 3418 case INSN_CALL: 3419 case INSN_CALL_DYNAMIC: 3420 ret = validate_call(file, insn, &state); 3421 if (ret) 3422 return ret; 3423 3424 if (opts.stackval && func && !is_fentry_call(insn) && 3425 !has_valid_stack_frame(&state)) { 3426 WARN_FUNC("call without frame pointer save/setup", 3427 sec, insn->offset); 3428 return 1; 3429 } 3430 3431 if (insn->dead_end) 3432 return 0; 3433 3434 break; 3435 3436 case INSN_JUMP_CONDITIONAL: 3437 case INSN_JUMP_UNCONDITIONAL: 3438 if (is_sibling_call(insn)) { 3439 ret = validate_sibling_call(file, insn, &state); 3440 if (ret) 3441 return ret; 3442 3443 } else if (insn->jump_dest) { 3444 ret = validate_branch(file, func, 3445 insn->jump_dest, state); 3446 if (ret) { 3447 if (opts.backtrace) 3448 BT_FUNC("(branch)", insn); 3449 return ret; 3450 } 3451 } 3452 3453 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3454 return 0; 3455 3456 break; 3457 3458 case INSN_JUMP_DYNAMIC: 3459 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3460 if (is_sibling_call(insn)) { 3461 ret = validate_sibling_call(file, insn, &state); 3462 if (ret) 3463 return ret; 3464 } 3465 3466 if (insn->type == INSN_JUMP_DYNAMIC) 3467 return 0; 3468 3469 break; 3470 3471 case INSN_CONTEXT_SWITCH: 3472 if (func && (!next_insn || !next_insn->hint)) { 3473 WARN_FUNC("unsupported instruction in callable function", 3474 sec, insn->offset); 3475 return 1; 3476 } 3477 return 0; 3478 3479 case INSN_STAC: 3480 if (state.uaccess) { 3481 WARN_FUNC("recursive UACCESS enable", sec, insn->offset); 3482 return 1; 3483 } 3484 3485 state.uaccess = true; 3486 break; 3487 3488 case INSN_CLAC: 3489 if (!state.uaccess && func) { 3490 WARN_FUNC("redundant UACCESS disable", sec, insn->offset); 3491 return 1; 3492 } 3493 3494 if (func_uaccess_safe(func) && !state.uaccess_stack) { 3495 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); 3496 return 1; 3497 } 3498 3499 state.uaccess = false; 3500 break; 3501 3502 case INSN_STD: 3503 if (state.df) { 3504 WARN_FUNC("recursive STD", sec, insn->offset); 3505 return 1; 3506 } 3507 3508 state.df = true; 3509 break; 3510 3511 case INSN_CLD: 3512 if (!state.df && func) { 3513 WARN_FUNC("redundant CLD", sec, insn->offset); 3514 return 1; 3515 } 3516 3517 state.df = false; 3518 break; 3519 3520 default: 3521 break; 3522 } 3523 3524 if (insn->dead_end) 3525 return 0; 3526 3527 if (!next_insn) { 3528 if (state.cfi.cfa.base == CFI_UNDEFINED) 3529 return 0; 3530 WARN("%s: unexpected end of section", sec->name); 3531 return 1; 3532 } 3533 3534 prev_insn = insn; 3535 insn = next_insn; 3536 } 3537 3538 return 0; 3539 } 3540 3541 static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 3542 { 3543 struct instruction *insn; 3544 struct insn_state state; 3545 int ret, warnings = 0; 3546 3547 if (!file->hints) 3548 return 0; 3549 3550 init_insn_state(file, &state, sec); 3551 3552 if (sec) { 3553 insn = find_insn(file, sec, 0); 3554 if (!insn) 3555 return 0; 3556 } else { 3557 insn = list_first_entry(&file->insn_list, typeof(*insn), list); 3558 } 3559 3560 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) { 3561 if (insn->hint && !insn->visited && !insn->ignore) { 3562 ret = validate_branch(file, insn->func, insn, state); 3563 if (ret && opts.backtrace) 3564 BT_FUNC("<=== (hint)", insn); 3565 warnings += ret; 3566 } 3567 3568 insn = list_next_entry(insn, list); 3569 } 3570 3571 return warnings; 3572 } 3573 3574 /* 3575 * Validate rethunk entry constraint: must untrain RET before the first RET. 3576 * 3577 * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes 3578 * before an actual RET instruction. 3579 */ 3580 static int validate_entry(struct objtool_file *file, struct instruction *insn) 3581 { 3582 struct instruction *next, *dest; 3583 int ret, warnings = 0; 3584 3585 for (;;) { 3586 next = next_insn_to_validate(file, insn); 3587 3588 if (insn->visited & VISITED_ENTRY) 3589 return 0; 3590 3591 insn->visited |= VISITED_ENTRY; 3592 3593 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 3594 struct alternative *alt; 3595 bool skip_orig = false; 3596 3597 list_for_each_entry(alt, &insn->alts, list) { 3598 if (alt->skip_orig) 3599 skip_orig = true; 3600 3601 ret = validate_entry(file, alt->insn); 3602 if (ret) { 3603 if (opts.backtrace) 3604 BT_FUNC("(alt)", insn); 3605 return ret; 3606 } 3607 } 3608 3609 if (skip_orig) 3610 return 0; 3611 } 3612 3613 switch (insn->type) { 3614 3615 case INSN_CALL_DYNAMIC: 3616 case INSN_JUMP_DYNAMIC: 3617 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3618 WARN_FUNC("early indirect call", insn->sec, insn->offset); 3619 return 1; 3620 3621 case INSN_JUMP_UNCONDITIONAL: 3622 case INSN_JUMP_CONDITIONAL: 3623 if (!is_sibling_call(insn)) { 3624 if (!insn->jump_dest) { 3625 WARN_FUNC("unresolved jump target after linking?!?", 3626 insn->sec, insn->offset); 3627 return -1; 3628 } 3629 ret = validate_entry(file, insn->jump_dest); 3630 if (ret) { 3631 if (opts.backtrace) { 3632 BT_FUNC("(branch%s)", insn, 3633 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : ""); 3634 } 3635 return ret; 3636 } 3637 3638 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3639 return 0; 3640 3641 break; 3642 } 3643 3644 /* fallthrough */ 3645 case INSN_CALL: 3646 dest = find_insn(file, insn->call_dest->sec, 3647 insn->call_dest->offset); 3648 if (!dest) { 3649 WARN("Unresolved function after linking!?: %s", 3650 insn->call_dest->name); 3651 return -1; 3652 } 3653 3654 ret = validate_entry(file, dest); 3655 if (ret) { 3656 if (opts.backtrace) 3657 BT_FUNC("(call)", insn); 3658 return ret; 3659 } 3660 /* 3661 * If a call returns without error, it must have seen UNTRAIN_RET. 3662 * Therefore any non-error return is a success. 3663 */ 3664 return 0; 3665 3666 case INSN_RETURN: 3667 WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset); 3668 return 1; 3669 3670 case INSN_NOP: 3671 if (insn->retpoline_safe) 3672 return 0; 3673 break; 3674 3675 default: 3676 break; 3677 } 3678 3679 if (!next) { 3680 WARN_FUNC("teh end!", insn->sec, insn->offset); 3681 return -1; 3682 } 3683 insn = next; 3684 } 3685 3686 return warnings; 3687 } 3688 3689 /* 3690 * Validate that all branches starting at 'insn->entry' encounter UNRET_END 3691 * before RET. 3692 */ 3693 static int validate_unret(struct objtool_file *file) 3694 { 3695 struct instruction *insn; 3696 int ret, warnings = 0; 3697 3698 for_each_insn(file, insn) { 3699 if (!insn->entry) 3700 continue; 3701 3702 ret = validate_entry(file, insn); 3703 if (ret < 0) { 3704 WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset); 3705 return ret; 3706 } 3707 warnings += ret; 3708 } 3709 3710 return warnings; 3711 } 3712 3713 static int validate_retpoline(struct objtool_file *file) 3714 { 3715 struct instruction *insn; 3716 int warnings = 0; 3717 3718 for_each_insn(file, insn) { 3719 if (insn->type != INSN_JUMP_DYNAMIC && 3720 insn->type != INSN_CALL_DYNAMIC && 3721 insn->type != INSN_RETURN) 3722 continue; 3723 3724 if (insn->retpoline_safe) 3725 continue; 3726 3727 /* 3728 * .init.text code is ran before userspace and thus doesn't 3729 * strictly need retpolines, except for modules which are 3730 * loaded late, they very much do need retpoline in their 3731 * .init.text 3732 */ 3733 if (!strcmp(insn->sec->name, ".init.text") && !opts.module) 3734 continue; 3735 3736 if (insn->type == INSN_RETURN) { 3737 if (opts.rethunk) { 3738 WARN_FUNC("'naked' return found in RETHUNK build", 3739 insn->sec, insn->offset); 3740 } else 3741 continue; 3742 } else { 3743 WARN_FUNC("indirect %s found in RETPOLINE build", 3744 insn->sec, insn->offset, 3745 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 3746 } 3747 3748 warnings++; 3749 } 3750 3751 return warnings; 3752 } 3753 3754 static bool is_kasan_insn(struct instruction *insn) 3755 { 3756 return (insn->type == INSN_CALL && 3757 !strcmp(insn->call_dest->name, "__asan_handle_no_return")); 3758 } 3759 3760 static bool is_ubsan_insn(struct instruction *insn) 3761 { 3762 return (insn->type == INSN_CALL && 3763 !strcmp(insn->call_dest->name, 3764 "__ubsan_handle_builtin_unreachable")); 3765 } 3766 3767 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 3768 { 3769 int i; 3770 struct instruction *prev_insn; 3771 3772 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP) 3773 return true; 3774 3775 /* 3776 * Ignore alternative replacement instructions. This can happen 3777 * when a whitelisted function uses one of the ALTERNATIVE macros. 3778 */ 3779 if (!strcmp(insn->sec->name, ".altinstr_replacement") || 3780 !strcmp(insn->sec->name, ".altinstr_aux")) 3781 return true; 3782 3783 /* 3784 * Whole archive runs might encounter dead code from weak symbols. 3785 * This is where the linker will have dropped the weak symbol in 3786 * favour of a regular symbol, but leaves the code in place. 3787 * 3788 * In this case we'll find a piece of code (whole function) that is not 3789 * covered by a !section symbol. Ignore them. 3790 */ 3791 if (opts.link && !insn->func) { 3792 int size = find_symbol_hole_containing(insn->sec, insn->offset); 3793 unsigned long end = insn->offset + size; 3794 3795 if (!size) /* not a hole */ 3796 return false; 3797 3798 if (size < 0) /* hole until the end */ 3799 return true; 3800 3801 sec_for_each_insn_continue(file, insn) { 3802 /* 3803 * If we reach a visited instruction at or before the 3804 * end of the hole, ignore the unreachable. 3805 */ 3806 if (insn->visited) 3807 return true; 3808 3809 if (insn->offset >= end) 3810 break; 3811 3812 /* 3813 * If this hole jumps to a .cold function, mark it ignore too. 3814 */ 3815 if (insn->jump_dest && insn->jump_dest->func && 3816 strstr(insn->jump_dest->func->name, ".cold")) { 3817 struct instruction *dest = insn->jump_dest; 3818 func_for_each_insn(file, dest->func, dest) 3819 dest->ignore = true; 3820 } 3821 } 3822 3823 return false; 3824 } 3825 3826 if (!insn->func) 3827 return false; 3828 3829 if (insn->func->static_call_tramp) 3830 return true; 3831 3832 /* 3833 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 3834 * __builtin_unreachable(). The BUG() macro has an unreachable() after 3835 * the UD2, which causes GCC's undefined trap logic to emit another UD2 3836 * (or occasionally a JMP to UD2). 3837 * 3838 * It may also insert a UD2 after calling a __noreturn function. 3839 */ 3840 prev_insn = list_prev_entry(insn, list); 3841 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) && 3842 (insn->type == INSN_BUG || 3843 (insn->type == INSN_JUMP_UNCONDITIONAL && 3844 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 3845 return true; 3846 3847 /* 3848 * Check if this (or a subsequent) instruction is related to 3849 * CONFIG_UBSAN or CONFIG_KASAN. 3850 * 3851 * End the search at 5 instructions to avoid going into the weeds. 3852 */ 3853 for (i = 0; i < 5; i++) { 3854 3855 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 3856 return true; 3857 3858 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 3859 if (insn->jump_dest && 3860 insn->jump_dest->func == insn->func) { 3861 insn = insn->jump_dest; 3862 continue; 3863 } 3864 3865 break; 3866 } 3867 3868 if (insn->offset + insn->len >= insn->func->offset + insn->func->len) 3869 break; 3870 3871 insn = list_next_entry(insn, list); 3872 } 3873 3874 return false; 3875 } 3876 3877 static int validate_symbol(struct objtool_file *file, struct section *sec, 3878 struct symbol *sym, struct insn_state *state) 3879 { 3880 struct instruction *insn; 3881 int ret; 3882 3883 if (!sym->len) { 3884 WARN("%s() is missing an ELF size annotation", sym->name); 3885 return 1; 3886 } 3887 3888 if (sym->pfunc != sym || sym->alias != sym) 3889 return 0; 3890 3891 insn = find_insn(file, sec, sym->offset); 3892 if (!insn || insn->ignore || insn->visited) 3893 return 0; 3894 3895 state->uaccess = sym->uaccess_safe; 3896 3897 ret = validate_branch(file, insn->func, insn, *state); 3898 if (ret && opts.backtrace) 3899 BT_FUNC("<=== (sym)", insn); 3900 return ret; 3901 } 3902 3903 static int validate_section(struct objtool_file *file, struct section *sec) 3904 { 3905 struct insn_state state; 3906 struct symbol *func; 3907 int warnings = 0; 3908 3909 list_for_each_entry(func, &sec->symbol_list, list) { 3910 if (func->type != STT_FUNC) 3911 continue; 3912 3913 init_insn_state(file, &state, sec); 3914 set_func_state(&state.cfi); 3915 3916 warnings += validate_symbol(file, sec, func, &state); 3917 } 3918 3919 return warnings; 3920 } 3921 3922 static int validate_noinstr_sections(struct objtool_file *file) 3923 { 3924 struct section *sec; 3925 int warnings = 0; 3926 3927 sec = find_section_by_name(file->elf, ".noinstr.text"); 3928 if (sec) { 3929 warnings += validate_section(file, sec); 3930 warnings += validate_unwind_hints(file, sec); 3931 } 3932 3933 sec = find_section_by_name(file->elf, ".entry.text"); 3934 if (sec) { 3935 warnings += validate_section(file, sec); 3936 warnings += validate_unwind_hints(file, sec); 3937 } 3938 3939 return warnings; 3940 } 3941 3942 static int validate_functions(struct objtool_file *file) 3943 { 3944 struct section *sec; 3945 int warnings = 0; 3946 3947 for_each_sec(file, sec) { 3948 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 3949 continue; 3950 3951 warnings += validate_section(file, sec); 3952 } 3953 3954 return warnings; 3955 } 3956 3957 static void mark_endbr_used(struct instruction *insn) 3958 { 3959 if (!list_empty(&insn->call_node)) 3960 list_del_init(&insn->call_node); 3961 } 3962 3963 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn) 3964 { 3965 struct instruction *dest; 3966 struct reloc *reloc; 3967 unsigned long off; 3968 int warnings = 0; 3969 3970 /* 3971 * Looking for function pointer load relocations. Ignore 3972 * direct/indirect branches: 3973 */ 3974 switch (insn->type) { 3975 case INSN_CALL: 3976 case INSN_CALL_DYNAMIC: 3977 case INSN_JUMP_CONDITIONAL: 3978 case INSN_JUMP_UNCONDITIONAL: 3979 case INSN_JUMP_DYNAMIC: 3980 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3981 case INSN_RETURN: 3982 case INSN_NOP: 3983 return 0; 3984 default: 3985 break; 3986 } 3987 3988 for (reloc = insn_reloc(file, insn); 3989 reloc; 3990 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 3991 reloc->offset + 1, 3992 (insn->offset + insn->len) - (reloc->offset + 1))) { 3993 3994 /* 3995 * static_call_update() references the trampoline, which 3996 * doesn't have (or need) ENDBR. Skip warning in that case. 3997 */ 3998 if (reloc->sym->static_call_tramp) 3999 continue; 4000 4001 off = reloc->sym->offset; 4002 if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32) 4003 off += arch_dest_reloc_offset(reloc->addend); 4004 else 4005 off += reloc->addend; 4006 4007 dest = find_insn(file, reloc->sym->sec, off); 4008 if (!dest) 4009 continue; 4010 4011 if (dest->type == INSN_ENDBR) { 4012 mark_endbr_used(dest); 4013 continue; 4014 } 4015 4016 if (dest->func && dest->func == insn->func) { 4017 /* 4018 * Anything from->to self is either _THIS_IP_ or 4019 * IRET-to-self. 4020 * 4021 * There is no sane way to annotate _THIS_IP_ since the 4022 * compiler treats the relocation as a constant and is 4023 * happy to fold in offsets, skewing any annotation we 4024 * do, leading to vast amounts of false-positives. 4025 * 4026 * There's also compiler generated _THIS_IP_ through 4027 * KCOV and such which we have no hope of annotating. 4028 * 4029 * As such, blanket accept self-references without 4030 * issue. 4031 */ 4032 continue; 4033 } 4034 4035 if (dest->noendbr) 4036 continue; 4037 4038 WARN_FUNC("relocation to !ENDBR: %s", 4039 insn->sec, insn->offset, 4040 offstr(dest->sec, dest->offset)); 4041 4042 warnings++; 4043 } 4044 4045 return warnings; 4046 } 4047 4048 static int validate_ibt_data_reloc(struct objtool_file *file, 4049 struct reloc *reloc) 4050 { 4051 struct instruction *dest; 4052 4053 dest = find_insn(file, reloc->sym->sec, 4054 reloc->sym->offset + reloc->addend); 4055 if (!dest) 4056 return 0; 4057 4058 if (dest->type == INSN_ENDBR) { 4059 mark_endbr_used(dest); 4060 return 0; 4061 } 4062 4063 if (dest->noendbr) 4064 return 0; 4065 4066 WARN_FUNC("data relocation to !ENDBR: %s", 4067 reloc->sec->base, reloc->offset, 4068 offstr(dest->sec, dest->offset)); 4069 4070 return 1; 4071 } 4072 4073 /* 4074 * Validate IBT rules and remove used ENDBR instructions from the seal list. 4075 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with 4076 * NOPs) later, in create_ibt_endbr_seal_sections(). 4077 */ 4078 static int validate_ibt(struct objtool_file *file) 4079 { 4080 struct section *sec; 4081 struct reloc *reloc; 4082 struct instruction *insn; 4083 int warnings = 0; 4084 4085 for_each_insn(file, insn) 4086 warnings += validate_ibt_insn(file, insn); 4087 4088 for_each_sec(file, sec) { 4089 4090 /* Already done by validate_ibt_insn() */ 4091 if (sec->sh.sh_flags & SHF_EXECINSTR) 4092 continue; 4093 4094 if (!sec->reloc) 4095 continue; 4096 4097 /* 4098 * These sections can reference text addresses, but not with 4099 * the intent to indirect branch to them. 4100 */ 4101 if ((!strncmp(sec->name, ".discard", 8) && 4102 strcmp(sec->name, ".discard.ibt_endbr_noseal")) || 4103 !strncmp(sec->name, ".debug", 6) || 4104 !strcmp(sec->name, ".altinstructions") || 4105 !strcmp(sec->name, ".ibt_endbr_seal") || 4106 !strcmp(sec->name, ".orc_unwind_ip") || 4107 !strcmp(sec->name, ".parainstructions") || 4108 !strcmp(sec->name, ".retpoline_sites") || 4109 !strcmp(sec->name, ".smp_locks") || 4110 !strcmp(sec->name, ".static_call_sites") || 4111 !strcmp(sec->name, "_error_injection_whitelist") || 4112 !strcmp(sec->name, "_kprobe_blacklist") || 4113 !strcmp(sec->name, "__bug_table") || 4114 !strcmp(sec->name, "__ex_table") || 4115 !strcmp(sec->name, "__jump_table") || 4116 !strcmp(sec->name, "__mcount_loc")) 4117 continue; 4118 4119 list_for_each_entry(reloc, &sec->reloc->reloc_list, list) 4120 warnings += validate_ibt_data_reloc(file, reloc); 4121 } 4122 4123 return warnings; 4124 } 4125 4126 static int validate_sls(struct objtool_file *file) 4127 { 4128 struct instruction *insn, *next_insn; 4129 int warnings = 0; 4130 4131 for_each_insn(file, insn) { 4132 next_insn = next_insn_same_sec(file, insn); 4133 4134 if (insn->retpoline_safe) 4135 continue; 4136 4137 switch (insn->type) { 4138 case INSN_RETURN: 4139 if (!next_insn || next_insn->type != INSN_TRAP) { 4140 WARN_FUNC("missing int3 after ret", 4141 insn->sec, insn->offset); 4142 warnings++; 4143 } 4144 4145 break; 4146 case INSN_JUMP_DYNAMIC: 4147 if (!next_insn || next_insn->type != INSN_TRAP) { 4148 WARN_FUNC("missing int3 after indirect jump", 4149 insn->sec, insn->offset); 4150 warnings++; 4151 } 4152 break; 4153 default: 4154 break; 4155 } 4156 } 4157 4158 return warnings; 4159 } 4160 4161 static int validate_reachable_instructions(struct objtool_file *file) 4162 { 4163 struct instruction *insn; 4164 4165 if (file->ignore_unreachables) 4166 return 0; 4167 4168 for_each_insn(file, insn) { 4169 if (insn->visited || ignore_unreachable_insn(file, insn)) 4170 continue; 4171 4172 WARN_FUNC("unreachable instruction", insn->sec, insn->offset); 4173 return 1; 4174 } 4175 4176 return 0; 4177 } 4178 4179 int check(struct objtool_file *file) 4180 { 4181 int ret, warnings = 0; 4182 4183 arch_initial_func_cfi_state(&initial_func_cfi); 4184 init_cfi_state(&init_cfi); 4185 init_cfi_state(&func_cfi); 4186 set_func_state(&func_cfi); 4187 4188 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) 4189 goto out; 4190 4191 cfi_hash_add(&init_cfi); 4192 cfi_hash_add(&func_cfi); 4193 4194 ret = decode_sections(file); 4195 if (ret < 0) 4196 goto out; 4197 4198 warnings += ret; 4199 4200 if (list_empty(&file->insn_list)) 4201 goto out; 4202 4203 if (opts.retpoline) { 4204 ret = validate_retpoline(file); 4205 if (ret < 0) 4206 return ret; 4207 warnings += ret; 4208 } 4209 4210 if (opts.stackval || opts.orc || opts.uaccess) { 4211 ret = validate_functions(file); 4212 if (ret < 0) 4213 goto out; 4214 warnings += ret; 4215 4216 ret = validate_unwind_hints(file, NULL); 4217 if (ret < 0) 4218 goto out; 4219 warnings += ret; 4220 4221 if (!warnings) { 4222 ret = validate_reachable_instructions(file); 4223 if (ret < 0) 4224 goto out; 4225 warnings += ret; 4226 } 4227 4228 } else if (opts.noinstr) { 4229 ret = validate_noinstr_sections(file); 4230 if (ret < 0) 4231 goto out; 4232 warnings += ret; 4233 } 4234 4235 if (opts.unret) { 4236 /* 4237 * Must be after validate_branch() and friends, it plays 4238 * further games with insn->visited. 4239 */ 4240 ret = validate_unret(file); 4241 if (ret < 0) 4242 return ret; 4243 warnings += ret; 4244 } 4245 4246 if (opts.ibt) { 4247 ret = validate_ibt(file); 4248 if (ret < 0) 4249 goto out; 4250 warnings += ret; 4251 } 4252 4253 if (opts.sls) { 4254 ret = validate_sls(file); 4255 if (ret < 0) 4256 goto out; 4257 warnings += ret; 4258 } 4259 4260 if (opts.static_call) { 4261 ret = create_static_call_sections(file); 4262 if (ret < 0) 4263 goto out; 4264 warnings += ret; 4265 } 4266 4267 if (opts.retpoline) { 4268 ret = create_retpoline_sites_sections(file); 4269 if (ret < 0) 4270 goto out; 4271 warnings += ret; 4272 } 4273 4274 if (opts.rethunk) { 4275 ret = create_return_sites_sections(file); 4276 if (ret < 0) 4277 goto out; 4278 warnings += ret; 4279 } 4280 4281 if (opts.mcount) { 4282 ret = create_mcount_loc_sections(file); 4283 if (ret < 0) 4284 goto out; 4285 warnings += ret; 4286 } 4287 4288 if (opts.ibt) { 4289 ret = create_ibt_endbr_seal_sections(file); 4290 if (ret < 0) 4291 goto out; 4292 warnings += ret; 4293 } 4294 4295 if (opts.orc && !list_empty(&file->insn_list)) { 4296 ret = orc_create(file); 4297 if (ret < 0) 4298 goto out; 4299 warnings += ret; 4300 } 4301 4302 4303 if (opts.stats) { 4304 printf("nr_insns_visited: %ld\n", nr_insns_visited); 4305 printf("nr_cfi: %ld\n", nr_cfi); 4306 printf("nr_cfi_reused: %ld\n", nr_cfi_reused); 4307 printf("nr_cfi_cache: %ld\n", nr_cfi_cache); 4308 } 4309 4310 out: 4311 /* 4312 * For now, don't fail the kernel build on fatal warnings. These 4313 * errors are still fairly common due to the growing matrix of 4314 * supported toolchains and their recent pace of change. 4315 */ 4316 return 0; 4317 } 4318