1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #include <string.h> 7 #include <stdlib.h> 8 #include <inttypes.h> 9 #include <sys/mman.h> 10 11 #include <arch/elf.h> 12 #include <objtool/builtin.h> 13 #include <objtool/cfi.h> 14 #include <objtool/arch.h> 15 #include <objtool/check.h> 16 #include <objtool/special.h> 17 #include <objtool/warn.h> 18 #include <objtool/endianness.h> 19 20 #include <linux/objtool.h> 21 #include <linux/hashtable.h> 22 #include <linux/kernel.h> 23 #include <linux/static_call_types.h> 24 25 struct alternative { 26 struct list_head list; 27 struct instruction *insn; 28 bool skip_orig; 29 }; 30 31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; 32 33 static struct cfi_init_state initial_func_cfi; 34 static struct cfi_state init_cfi; 35 static struct cfi_state func_cfi; 36 37 struct instruction *find_insn(struct objtool_file *file, 38 struct section *sec, unsigned long offset) 39 { 40 struct instruction *insn; 41 42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 43 if (insn->sec == sec && insn->offset == offset) 44 return insn; 45 } 46 47 return NULL; 48 } 49 50 static struct instruction *next_insn_same_sec(struct objtool_file *file, 51 struct instruction *insn) 52 { 53 struct instruction *next = list_next_entry(insn, list); 54 55 if (!next || &next->list == &file->insn_list || next->sec != insn->sec) 56 return NULL; 57 58 return next; 59 } 60 61 static struct instruction *next_insn_same_func(struct objtool_file *file, 62 struct instruction *insn) 63 { 64 struct instruction *next = list_next_entry(insn, list); 65 struct symbol *func = insn->func; 66 67 if (!func) 68 return NULL; 69 70 if (&next->list != &file->insn_list && next->func == func) 71 return next; 72 73 /* Check if we're already in the subfunction: */ 74 if (func == func->cfunc) 75 return NULL; 76 77 /* Move to the subfunction: */ 78 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 79 } 80 81 static struct instruction *prev_insn_same_sym(struct objtool_file *file, 82 struct instruction *insn) 83 { 84 struct instruction *prev = list_prev_entry(insn, list); 85 86 if (&prev->list != &file->insn_list && prev->func == insn->func) 87 return prev; 88 89 return NULL; 90 } 91 92 #define func_for_each_insn(file, func, insn) \ 93 for (insn = find_insn(file, func->sec, func->offset); \ 94 insn; \ 95 insn = next_insn_same_func(file, insn)) 96 97 #define sym_for_each_insn(file, sym, insn) \ 98 for (insn = find_insn(file, sym->sec, sym->offset); \ 99 insn && &insn->list != &file->insn_list && \ 100 insn->sec == sym->sec && \ 101 insn->offset < sym->offset + sym->len; \ 102 insn = list_next_entry(insn, list)) 103 104 #define sym_for_each_insn_continue_reverse(file, sym, insn) \ 105 for (insn = list_prev_entry(insn, list); \ 106 &insn->list != &file->insn_list && \ 107 insn->sec == sym->sec && insn->offset >= sym->offset; \ 108 insn = list_prev_entry(insn, list)) 109 110 #define sec_for_each_insn_from(file, insn) \ 111 for (; insn; insn = next_insn_same_sec(file, insn)) 112 113 #define sec_for_each_insn_continue(file, insn) \ 114 for (insn = next_insn_same_sec(file, insn); insn; \ 115 insn = next_insn_same_sec(file, insn)) 116 117 static bool is_jump_table_jump(struct instruction *insn) 118 { 119 struct alt_group *alt_group = insn->alt_group; 120 121 if (insn->jump_table) 122 return true; 123 124 /* Retpoline alternative for a jump table? */ 125 return alt_group && alt_group->orig_group && 126 alt_group->orig_group->first_insn->jump_table; 127 } 128 129 static bool is_sibling_call(struct instruction *insn) 130 { 131 /* 132 * Assume only ELF functions can make sibling calls. This ensures 133 * sibling call detection consistency between vmlinux.o and individual 134 * objects. 135 */ 136 if (!insn->func) 137 return false; 138 139 /* An indirect jump is either a sibling call or a jump to a table. */ 140 if (insn->type == INSN_JUMP_DYNAMIC) 141 return !is_jump_table_jump(insn); 142 143 /* add_jump_destinations() sets insn->call_dest for sibling calls. */ 144 return (is_static_jump(insn) && insn->call_dest); 145 } 146 147 /* 148 * This checks to see if the given function is a "noreturn" function. 149 * 150 * For global functions which are outside the scope of this object file, we 151 * have to keep a manual list of them. 152 * 153 * For local functions, we have to detect them manually by simply looking for 154 * the lack of a return instruction. 155 */ 156 static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 157 int recursion) 158 { 159 int i; 160 struct instruction *insn; 161 bool empty = true; 162 163 /* 164 * Unfortunately these have to be hard coded because the noreturn 165 * attribute isn't provided in ELF data. Keep 'em sorted. 166 */ 167 static const char * const global_noreturns[] = { 168 "__invalid_creds", 169 "__module_put_and_kthread_exit", 170 "__reiserfs_panic", 171 "__stack_chk_fail", 172 "__ubsan_handle_builtin_unreachable", 173 "cpu_bringup_and_idle", 174 "cpu_startup_entry", 175 "do_exit", 176 "do_group_exit", 177 "do_task_dead", 178 "ex_handler_msr_mce", 179 "fortify_panic", 180 "kthread_complete_and_exit", 181 "kthread_exit", 182 "kunit_try_catch_throw", 183 "lbug_with_loc", 184 "machine_real_restart", 185 "make_task_dead", 186 "panic", 187 "rewind_stack_and_make_dead", 188 "sev_es_terminate", 189 "snp_abort", 190 "stop_this_cpu", 191 "usercopy_abort", 192 "xen_start_kernel", 193 }; 194 195 if (!func) 196 return false; 197 198 if (func->bind == STB_WEAK) 199 return false; 200 201 if (func->bind == STB_GLOBAL) 202 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 203 if (!strcmp(func->name, global_noreturns[i])) 204 return true; 205 206 if (!func->len) 207 return false; 208 209 insn = find_insn(file, func->sec, func->offset); 210 if (!insn->func) 211 return false; 212 213 func_for_each_insn(file, func, insn) { 214 empty = false; 215 216 if (insn->type == INSN_RETURN) 217 return false; 218 } 219 220 if (empty) 221 return false; 222 223 /* 224 * A function can have a sibling call instead of a return. In that 225 * case, the function's dead-end status depends on whether the target 226 * of the sibling call returns. 227 */ 228 func_for_each_insn(file, func, insn) { 229 if (is_sibling_call(insn)) { 230 struct instruction *dest = insn->jump_dest; 231 232 if (!dest) 233 /* sibling call to another file */ 234 return false; 235 236 /* local sibling call */ 237 if (recursion == 5) { 238 /* 239 * Infinite recursion: two functions have 240 * sibling calls to each other. This is a very 241 * rare case. It means they aren't dead ends. 242 */ 243 return false; 244 } 245 246 return __dead_end_function(file, dest->func, recursion+1); 247 } 248 } 249 250 return true; 251 } 252 253 static bool dead_end_function(struct objtool_file *file, struct symbol *func) 254 { 255 return __dead_end_function(file, func, 0); 256 } 257 258 static void init_cfi_state(struct cfi_state *cfi) 259 { 260 int i; 261 262 for (i = 0; i < CFI_NUM_REGS; i++) { 263 cfi->regs[i].base = CFI_UNDEFINED; 264 cfi->vals[i].base = CFI_UNDEFINED; 265 } 266 cfi->cfa.base = CFI_UNDEFINED; 267 cfi->drap_reg = CFI_UNDEFINED; 268 cfi->drap_offset = -1; 269 } 270 271 static void init_insn_state(struct objtool_file *file, struct insn_state *state, 272 struct section *sec) 273 { 274 memset(state, 0, sizeof(*state)); 275 init_cfi_state(&state->cfi); 276 277 /* 278 * We need the full vmlinux for noinstr validation, otherwise we can 279 * not correctly determine insn->call_dest->sec (external symbols do 280 * not have a section). 281 */ 282 if (opts.link && opts.noinstr && sec) 283 state->noinstr = sec->noinstr; 284 } 285 286 static struct cfi_state *cfi_alloc(void) 287 { 288 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); 289 if (!cfi) { 290 WARN("calloc failed"); 291 exit(1); 292 } 293 nr_cfi++; 294 return cfi; 295 } 296 297 static int cfi_bits; 298 static struct hlist_head *cfi_hash; 299 300 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) 301 { 302 return memcmp((void *)cfi1 + sizeof(cfi1->hash), 303 (void *)cfi2 + sizeof(cfi2->hash), 304 sizeof(struct cfi_state) - sizeof(struct hlist_node)); 305 } 306 307 static inline u32 cfi_key(struct cfi_state *cfi) 308 { 309 return jhash((void *)cfi + sizeof(cfi->hash), 310 sizeof(*cfi) - sizeof(cfi->hash), 0); 311 } 312 313 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) 314 { 315 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 316 struct cfi_state *obj; 317 318 hlist_for_each_entry(obj, head, hash) { 319 if (!cficmp(cfi, obj)) { 320 nr_cfi_cache++; 321 return obj; 322 } 323 } 324 325 obj = cfi_alloc(); 326 *obj = *cfi; 327 hlist_add_head(&obj->hash, head); 328 329 return obj; 330 } 331 332 static void cfi_hash_add(struct cfi_state *cfi) 333 { 334 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 335 336 hlist_add_head(&cfi->hash, head); 337 } 338 339 static void *cfi_hash_alloc(unsigned long size) 340 { 341 cfi_bits = max(10, ilog2(size)); 342 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, 343 PROT_READ|PROT_WRITE, 344 MAP_PRIVATE|MAP_ANON, -1, 0); 345 if (cfi_hash == (void *)-1L) { 346 WARN("mmap fail cfi_hash"); 347 cfi_hash = NULL; 348 } else if (opts.stats) { 349 printf("cfi_bits: %d\n", cfi_bits); 350 } 351 352 return cfi_hash; 353 } 354 355 static unsigned long nr_insns; 356 static unsigned long nr_insns_visited; 357 358 /* 359 * Call the arch-specific instruction decoder for all the instructions and add 360 * them to the global instruction list. 361 */ 362 static int decode_instructions(struct objtool_file *file) 363 { 364 struct section *sec; 365 struct symbol *func; 366 unsigned long offset; 367 struct instruction *insn; 368 int ret; 369 370 for_each_sec(file, sec) { 371 372 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 373 continue; 374 375 if (strcmp(sec->name, ".altinstr_replacement") && 376 strcmp(sec->name, ".altinstr_aux") && 377 strncmp(sec->name, ".discard.", 9)) 378 sec->text = true; 379 380 if (!strcmp(sec->name, ".noinstr.text") || 381 !strcmp(sec->name, ".entry.text") || 382 !strncmp(sec->name, ".text.__x86.", 12)) 383 sec->noinstr = true; 384 385 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { 386 insn = malloc(sizeof(*insn)); 387 if (!insn) { 388 WARN("malloc failed"); 389 return -1; 390 } 391 memset(insn, 0, sizeof(*insn)); 392 INIT_LIST_HEAD(&insn->alts); 393 INIT_LIST_HEAD(&insn->stack_ops); 394 INIT_LIST_HEAD(&insn->call_node); 395 396 insn->sec = sec; 397 insn->offset = offset; 398 399 ret = arch_decode_instruction(file, sec, offset, 400 sec->sh.sh_size - offset, 401 &insn->len, &insn->type, 402 &insn->immediate, 403 &insn->stack_ops); 404 if (ret) 405 goto err; 406 407 /* 408 * By default, "ud2" is a dead end unless otherwise 409 * annotated, because GCC 7 inserts it for certain 410 * divide-by-zero cases. 411 */ 412 if (insn->type == INSN_BUG) 413 insn->dead_end = true; 414 415 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 416 list_add_tail(&insn->list, &file->insn_list); 417 nr_insns++; 418 } 419 420 list_for_each_entry(func, &sec->symbol_list, list) { 421 if (func->type != STT_FUNC || func->alias != func) 422 continue; 423 424 if (!find_insn(file, sec, func->offset)) { 425 WARN("%s(): can't find starting instruction", 426 func->name); 427 return -1; 428 } 429 430 sym_for_each_insn(file, func, insn) { 431 insn->func = func; 432 if (insn->type == INSN_ENDBR && list_empty(&insn->call_node)) { 433 if (insn->offset == insn->func->offset) { 434 list_add_tail(&insn->call_node, &file->endbr_list); 435 file->nr_endbr++; 436 } else { 437 file->nr_endbr_int++; 438 } 439 } 440 } 441 } 442 } 443 444 if (opts.stats) 445 printf("nr_insns: %lu\n", nr_insns); 446 447 return 0; 448 449 err: 450 free(insn); 451 return ret; 452 } 453 454 /* 455 * Read the pv_ops[] .data table to find the static initialized values. 456 */ 457 static int add_pv_ops(struct objtool_file *file, const char *symname) 458 { 459 struct symbol *sym, *func; 460 unsigned long off, end; 461 struct reloc *rel; 462 int idx; 463 464 sym = find_symbol_by_name(file->elf, symname); 465 if (!sym) 466 return 0; 467 468 off = sym->offset; 469 end = off + sym->len; 470 for (;;) { 471 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off); 472 if (!rel) 473 break; 474 475 func = rel->sym; 476 if (func->type == STT_SECTION) 477 func = find_symbol_by_offset(rel->sym->sec, rel->addend); 478 479 idx = (rel->offset - sym->offset) / sizeof(unsigned long); 480 481 objtool_pv_add(file, idx, func); 482 483 off = rel->offset + 1; 484 if (off > end) 485 break; 486 } 487 488 return 0; 489 } 490 491 /* 492 * Allocate and initialize file->pv_ops[]. 493 */ 494 static int init_pv_ops(struct objtool_file *file) 495 { 496 static const char *pv_ops_tables[] = { 497 "pv_ops", 498 "xen_cpu_ops", 499 "xen_irq_ops", 500 "xen_mmu_ops", 501 NULL, 502 }; 503 const char *pv_ops; 504 struct symbol *sym; 505 int idx, nr; 506 507 if (!opts.noinstr) 508 return 0; 509 510 file->pv_ops = NULL; 511 512 sym = find_symbol_by_name(file->elf, "pv_ops"); 513 if (!sym) 514 return 0; 515 516 nr = sym->len / sizeof(unsigned long); 517 file->pv_ops = calloc(sizeof(struct pv_state), nr); 518 if (!file->pv_ops) 519 return -1; 520 521 for (idx = 0; idx < nr; idx++) 522 INIT_LIST_HEAD(&file->pv_ops[idx].targets); 523 524 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) 525 add_pv_ops(file, pv_ops); 526 527 return 0; 528 } 529 530 static struct instruction *find_last_insn(struct objtool_file *file, 531 struct section *sec) 532 { 533 struct instruction *insn = NULL; 534 unsigned int offset; 535 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0; 536 537 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--) 538 insn = find_insn(file, sec, offset); 539 540 return insn; 541 } 542 543 /* 544 * Mark "ud2" instructions and manually annotated dead ends. 545 */ 546 static int add_dead_ends(struct objtool_file *file) 547 { 548 struct section *sec; 549 struct reloc *reloc; 550 struct instruction *insn; 551 552 /* 553 * Check for manually annotated dead ends. 554 */ 555 sec = find_section_by_name(file->elf, ".rela.discard.unreachable"); 556 if (!sec) 557 goto reachable; 558 559 list_for_each_entry(reloc, &sec->reloc_list, list) { 560 if (reloc->sym->type != STT_SECTION) { 561 WARN("unexpected relocation symbol type in %s", sec->name); 562 return -1; 563 } 564 insn = find_insn(file, reloc->sym->sec, reloc->addend); 565 if (insn) 566 insn = list_prev_entry(insn, list); 567 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 568 insn = find_last_insn(file, reloc->sym->sec); 569 if (!insn) { 570 WARN("can't find unreachable insn at %s+0x%" PRIx64, 571 reloc->sym->sec->name, reloc->addend); 572 return -1; 573 } 574 } else { 575 WARN("can't find unreachable insn at %s+0x%" PRIx64, 576 reloc->sym->sec->name, reloc->addend); 577 return -1; 578 } 579 580 insn->dead_end = true; 581 } 582 583 reachable: 584 /* 585 * These manually annotated reachable checks are needed for GCC 4.4, 586 * where the Linux unreachable() macro isn't supported. In that case 587 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's 588 * not a dead end. 589 */ 590 sec = find_section_by_name(file->elf, ".rela.discard.reachable"); 591 if (!sec) 592 return 0; 593 594 list_for_each_entry(reloc, &sec->reloc_list, list) { 595 if (reloc->sym->type != STT_SECTION) { 596 WARN("unexpected relocation symbol type in %s", sec->name); 597 return -1; 598 } 599 insn = find_insn(file, reloc->sym->sec, reloc->addend); 600 if (insn) 601 insn = list_prev_entry(insn, list); 602 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 603 insn = find_last_insn(file, reloc->sym->sec); 604 if (!insn) { 605 WARN("can't find reachable insn at %s+0x%" PRIx64, 606 reloc->sym->sec->name, reloc->addend); 607 return -1; 608 } 609 } else { 610 WARN("can't find reachable insn at %s+0x%" PRIx64, 611 reloc->sym->sec->name, reloc->addend); 612 return -1; 613 } 614 615 insn->dead_end = false; 616 } 617 618 return 0; 619 } 620 621 static int create_static_call_sections(struct objtool_file *file) 622 { 623 struct section *sec; 624 struct static_call_site *site; 625 struct instruction *insn; 626 struct symbol *key_sym; 627 char *key_name, *tmp; 628 int idx; 629 630 sec = find_section_by_name(file->elf, ".static_call_sites"); 631 if (sec) { 632 INIT_LIST_HEAD(&file->static_call_list); 633 WARN("file already has .static_call_sites section, skipping"); 634 return 0; 635 } 636 637 if (list_empty(&file->static_call_list)) 638 return 0; 639 640 idx = 0; 641 list_for_each_entry(insn, &file->static_call_list, call_node) 642 idx++; 643 644 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE, 645 sizeof(struct static_call_site), idx); 646 if (!sec) 647 return -1; 648 649 idx = 0; 650 list_for_each_entry(insn, &file->static_call_list, call_node) { 651 652 site = (struct static_call_site *)sec->data->d_buf + idx; 653 memset(site, 0, sizeof(struct static_call_site)); 654 655 /* populate reloc for 'addr' */ 656 if (elf_add_reloc_to_insn(file->elf, sec, 657 idx * sizeof(struct static_call_site), 658 R_X86_64_PC32, 659 insn->sec, insn->offset)) 660 return -1; 661 662 /* find key symbol */ 663 key_name = strdup(insn->call_dest->name); 664 if (!key_name) { 665 perror("strdup"); 666 return -1; 667 } 668 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 669 STATIC_CALL_TRAMP_PREFIX_LEN)) { 670 WARN("static_call: trampoline name malformed: %s", key_name); 671 return -1; 672 } 673 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 674 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 675 676 key_sym = find_symbol_by_name(file->elf, tmp); 677 if (!key_sym) { 678 if (!opts.module) { 679 WARN("static_call: can't find static_call_key symbol: %s", tmp); 680 return -1; 681 } 682 683 /* 684 * For modules(), the key might not be exported, which 685 * means the module can make static calls but isn't 686 * allowed to change them. 687 * 688 * In that case we temporarily set the key to be the 689 * trampoline address. This is fixed up in 690 * static_call_add_module(). 691 */ 692 key_sym = insn->call_dest; 693 } 694 free(key_name); 695 696 /* populate reloc for 'key' */ 697 if (elf_add_reloc(file->elf, sec, 698 idx * sizeof(struct static_call_site) + 4, 699 R_X86_64_PC32, key_sym, 700 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 701 return -1; 702 703 idx++; 704 } 705 706 return 0; 707 } 708 709 static int create_retpoline_sites_sections(struct objtool_file *file) 710 { 711 struct instruction *insn; 712 struct section *sec; 713 int idx; 714 715 sec = find_section_by_name(file->elf, ".retpoline_sites"); 716 if (sec) { 717 WARN("file already has .retpoline_sites, skipping"); 718 return 0; 719 } 720 721 idx = 0; 722 list_for_each_entry(insn, &file->retpoline_call_list, call_node) 723 idx++; 724 725 if (!idx) 726 return 0; 727 728 sec = elf_create_section(file->elf, ".retpoline_sites", 0, 729 sizeof(int), idx); 730 if (!sec) { 731 WARN("elf_create_section: .retpoline_sites"); 732 return -1; 733 } 734 735 idx = 0; 736 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 737 738 int *site = (int *)sec->data->d_buf + idx; 739 *site = 0; 740 741 if (elf_add_reloc_to_insn(file->elf, sec, 742 idx * sizeof(int), 743 R_X86_64_PC32, 744 insn->sec, insn->offset)) { 745 WARN("elf_add_reloc_to_insn: .retpoline_sites"); 746 return -1; 747 } 748 749 idx++; 750 } 751 752 return 0; 753 } 754 755 static int create_return_sites_sections(struct objtool_file *file) 756 { 757 struct instruction *insn; 758 struct section *sec; 759 int idx; 760 761 sec = find_section_by_name(file->elf, ".return_sites"); 762 if (sec) { 763 WARN("file already has .return_sites, skipping"); 764 return 0; 765 } 766 767 idx = 0; 768 list_for_each_entry(insn, &file->return_thunk_list, call_node) 769 idx++; 770 771 if (!idx) 772 return 0; 773 774 sec = elf_create_section(file->elf, ".return_sites", 0, 775 sizeof(int), idx); 776 if (!sec) { 777 WARN("elf_create_section: .return_sites"); 778 return -1; 779 } 780 781 idx = 0; 782 list_for_each_entry(insn, &file->return_thunk_list, call_node) { 783 784 int *site = (int *)sec->data->d_buf + idx; 785 *site = 0; 786 787 if (elf_add_reloc_to_insn(file->elf, sec, 788 idx * sizeof(int), 789 R_X86_64_PC32, 790 insn->sec, insn->offset)) { 791 WARN("elf_add_reloc_to_insn: .return_sites"); 792 return -1; 793 } 794 795 idx++; 796 } 797 798 return 0; 799 } 800 801 static int create_ibt_endbr_seal_sections(struct objtool_file *file) 802 { 803 struct instruction *insn; 804 struct section *sec; 805 int idx; 806 807 sec = find_section_by_name(file->elf, ".ibt_endbr_seal"); 808 if (sec) { 809 WARN("file already has .ibt_endbr_seal, skipping"); 810 return 0; 811 } 812 813 idx = 0; 814 list_for_each_entry(insn, &file->endbr_list, call_node) 815 idx++; 816 817 if (opts.stats) { 818 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr); 819 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int); 820 printf("ibt: superfluous ENDBR: %d\n", idx); 821 } 822 823 if (!idx) 824 return 0; 825 826 sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0, 827 sizeof(int), idx); 828 if (!sec) { 829 WARN("elf_create_section: .ibt_endbr_seal"); 830 return -1; 831 } 832 833 idx = 0; 834 list_for_each_entry(insn, &file->endbr_list, call_node) { 835 836 int *site = (int *)sec->data->d_buf + idx; 837 *site = 0; 838 839 if (elf_add_reloc_to_insn(file->elf, sec, 840 idx * sizeof(int), 841 R_X86_64_PC32, 842 insn->sec, insn->offset)) { 843 WARN("elf_add_reloc_to_insn: .ibt_endbr_seal"); 844 return -1; 845 } 846 847 idx++; 848 } 849 850 return 0; 851 } 852 853 static int create_mcount_loc_sections(struct objtool_file *file) 854 { 855 struct section *sec; 856 unsigned long *loc; 857 struct instruction *insn; 858 int idx; 859 860 sec = find_section_by_name(file->elf, "__mcount_loc"); 861 if (sec) { 862 INIT_LIST_HEAD(&file->mcount_loc_list); 863 WARN("file already has __mcount_loc section, skipping"); 864 return 0; 865 } 866 867 if (list_empty(&file->mcount_loc_list)) 868 return 0; 869 870 idx = 0; 871 list_for_each_entry(insn, &file->mcount_loc_list, call_node) 872 idx++; 873 874 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx); 875 if (!sec) 876 return -1; 877 878 idx = 0; 879 list_for_each_entry(insn, &file->mcount_loc_list, call_node) { 880 881 loc = (unsigned long *)sec->data->d_buf + idx; 882 memset(loc, 0, sizeof(unsigned long)); 883 884 if (elf_add_reloc_to_insn(file->elf, sec, 885 idx * sizeof(unsigned long), 886 R_X86_64_64, 887 insn->sec, insn->offset)) 888 return -1; 889 890 idx++; 891 } 892 893 return 0; 894 } 895 896 /* 897 * Warnings shouldn't be reported for ignored functions. 898 */ 899 static void add_ignores(struct objtool_file *file) 900 { 901 struct instruction *insn; 902 struct section *sec; 903 struct symbol *func; 904 struct reloc *reloc; 905 906 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 907 if (!sec) 908 return; 909 910 list_for_each_entry(reloc, &sec->reloc_list, list) { 911 switch (reloc->sym->type) { 912 case STT_FUNC: 913 func = reloc->sym; 914 break; 915 916 case STT_SECTION: 917 func = find_func_by_offset(reloc->sym->sec, reloc->addend); 918 if (!func) 919 continue; 920 break; 921 922 default: 923 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type); 924 continue; 925 } 926 927 func_for_each_insn(file, func, insn) 928 insn->ignore = true; 929 } 930 } 931 932 /* 933 * This is a whitelist of functions that is allowed to be called with AC set. 934 * The list is meant to be minimal and only contains compiler instrumentation 935 * ABI and a few functions used to implement *_{to,from}_user() functions. 936 * 937 * These functions must not directly change AC, but may PUSHF/POPF. 938 */ 939 static const char *uaccess_safe_builtin[] = { 940 /* KASAN */ 941 "kasan_report", 942 "kasan_check_range", 943 /* KASAN out-of-line */ 944 "__asan_loadN_noabort", 945 "__asan_load1_noabort", 946 "__asan_load2_noabort", 947 "__asan_load4_noabort", 948 "__asan_load8_noabort", 949 "__asan_load16_noabort", 950 "__asan_storeN_noabort", 951 "__asan_store1_noabort", 952 "__asan_store2_noabort", 953 "__asan_store4_noabort", 954 "__asan_store8_noabort", 955 "__asan_store16_noabort", 956 "__kasan_check_read", 957 "__kasan_check_write", 958 /* KASAN in-line */ 959 "__asan_report_load_n_noabort", 960 "__asan_report_load1_noabort", 961 "__asan_report_load2_noabort", 962 "__asan_report_load4_noabort", 963 "__asan_report_load8_noabort", 964 "__asan_report_load16_noabort", 965 "__asan_report_store_n_noabort", 966 "__asan_report_store1_noabort", 967 "__asan_report_store2_noabort", 968 "__asan_report_store4_noabort", 969 "__asan_report_store8_noabort", 970 "__asan_report_store16_noabort", 971 /* KCSAN */ 972 "__kcsan_check_access", 973 "__kcsan_mb", 974 "__kcsan_wmb", 975 "__kcsan_rmb", 976 "__kcsan_release", 977 "kcsan_found_watchpoint", 978 "kcsan_setup_watchpoint", 979 "kcsan_check_scoped_accesses", 980 "kcsan_disable_current", 981 "kcsan_enable_current_nowarn", 982 /* KCSAN/TSAN */ 983 "__tsan_func_entry", 984 "__tsan_func_exit", 985 "__tsan_read_range", 986 "__tsan_write_range", 987 "__tsan_read1", 988 "__tsan_read2", 989 "__tsan_read4", 990 "__tsan_read8", 991 "__tsan_read16", 992 "__tsan_write1", 993 "__tsan_write2", 994 "__tsan_write4", 995 "__tsan_write8", 996 "__tsan_write16", 997 "__tsan_read_write1", 998 "__tsan_read_write2", 999 "__tsan_read_write4", 1000 "__tsan_read_write8", 1001 "__tsan_read_write16", 1002 "__tsan_atomic8_load", 1003 "__tsan_atomic16_load", 1004 "__tsan_atomic32_load", 1005 "__tsan_atomic64_load", 1006 "__tsan_atomic8_store", 1007 "__tsan_atomic16_store", 1008 "__tsan_atomic32_store", 1009 "__tsan_atomic64_store", 1010 "__tsan_atomic8_exchange", 1011 "__tsan_atomic16_exchange", 1012 "__tsan_atomic32_exchange", 1013 "__tsan_atomic64_exchange", 1014 "__tsan_atomic8_fetch_add", 1015 "__tsan_atomic16_fetch_add", 1016 "__tsan_atomic32_fetch_add", 1017 "__tsan_atomic64_fetch_add", 1018 "__tsan_atomic8_fetch_sub", 1019 "__tsan_atomic16_fetch_sub", 1020 "__tsan_atomic32_fetch_sub", 1021 "__tsan_atomic64_fetch_sub", 1022 "__tsan_atomic8_fetch_and", 1023 "__tsan_atomic16_fetch_and", 1024 "__tsan_atomic32_fetch_and", 1025 "__tsan_atomic64_fetch_and", 1026 "__tsan_atomic8_fetch_or", 1027 "__tsan_atomic16_fetch_or", 1028 "__tsan_atomic32_fetch_or", 1029 "__tsan_atomic64_fetch_or", 1030 "__tsan_atomic8_fetch_xor", 1031 "__tsan_atomic16_fetch_xor", 1032 "__tsan_atomic32_fetch_xor", 1033 "__tsan_atomic64_fetch_xor", 1034 "__tsan_atomic8_fetch_nand", 1035 "__tsan_atomic16_fetch_nand", 1036 "__tsan_atomic32_fetch_nand", 1037 "__tsan_atomic64_fetch_nand", 1038 "__tsan_atomic8_compare_exchange_strong", 1039 "__tsan_atomic16_compare_exchange_strong", 1040 "__tsan_atomic32_compare_exchange_strong", 1041 "__tsan_atomic64_compare_exchange_strong", 1042 "__tsan_atomic8_compare_exchange_weak", 1043 "__tsan_atomic16_compare_exchange_weak", 1044 "__tsan_atomic32_compare_exchange_weak", 1045 "__tsan_atomic64_compare_exchange_weak", 1046 "__tsan_atomic8_compare_exchange_val", 1047 "__tsan_atomic16_compare_exchange_val", 1048 "__tsan_atomic32_compare_exchange_val", 1049 "__tsan_atomic64_compare_exchange_val", 1050 "__tsan_atomic_thread_fence", 1051 "__tsan_atomic_signal_fence", 1052 /* KCOV */ 1053 "write_comp_data", 1054 "check_kcov_mode", 1055 "__sanitizer_cov_trace_pc", 1056 "__sanitizer_cov_trace_const_cmp1", 1057 "__sanitizer_cov_trace_const_cmp2", 1058 "__sanitizer_cov_trace_const_cmp4", 1059 "__sanitizer_cov_trace_const_cmp8", 1060 "__sanitizer_cov_trace_cmp1", 1061 "__sanitizer_cov_trace_cmp2", 1062 "__sanitizer_cov_trace_cmp4", 1063 "__sanitizer_cov_trace_cmp8", 1064 "__sanitizer_cov_trace_switch", 1065 /* KMSAN */ 1066 "kmsan_copy_to_user", 1067 "kmsan_report", 1068 "kmsan_unpoison_entry_regs", 1069 "kmsan_unpoison_memory", 1070 "__msan_chain_origin", 1071 "__msan_get_context_state", 1072 "__msan_instrument_asm_store", 1073 "__msan_metadata_ptr_for_load_1", 1074 "__msan_metadata_ptr_for_load_2", 1075 "__msan_metadata_ptr_for_load_4", 1076 "__msan_metadata_ptr_for_load_8", 1077 "__msan_metadata_ptr_for_load_n", 1078 "__msan_metadata_ptr_for_store_1", 1079 "__msan_metadata_ptr_for_store_2", 1080 "__msan_metadata_ptr_for_store_4", 1081 "__msan_metadata_ptr_for_store_8", 1082 "__msan_metadata_ptr_for_store_n", 1083 "__msan_poison_alloca", 1084 "__msan_warning", 1085 /* UBSAN */ 1086 "ubsan_type_mismatch_common", 1087 "__ubsan_handle_type_mismatch", 1088 "__ubsan_handle_type_mismatch_v1", 1089 "__ubsan_handle_shift_out_of_bounds", 1090 /* misc */ 1091 "csum_partial_copy_generic", 1092 "copy_mc_fragile", 1093 "copy_mc_fragile_handle_tail", 1094 "copy_mc_enhanced_fast_string", 1095 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ 1096 "clear_user_erms", 1097 "clear_user_rep_good", 1098 "clear_user_original", 1099 NULL 1100 }; 1101 1102 static void add_uaccess_safe(struct objtool_file *file) 1103 { 1104 struct symbol *func; 1105 const char **name; 1106 1107 if (!opts.uaccess) 1108 return; 1109 1110 for (name = uaccess_safe_builtin; *name; name++) { 1111 func = find_symbol_by_name(file->elf, *name); 1112 if (!func) 1113 continue; 1114 1115 func->uaccess_safe = true; 1116 } 1117 } 1118 1119 /* 1120 * FIXME: For now, just ignore any alternatives which add retpolines. This is 1121 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. 1122 * But it at least allows objtool to understand the control flow *around* the 1123 * retpoline. 1124 */ 1125 static int add_ignore_alternatives(struct objtool_file *file) 1126 { 1127 struct section *sec; 1128 struct reloc *reloc; 1129 struct instruction *insn; 1130 1131 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); 1132 if (!sec) 1133 return 0; 1134 1135 list_for_each_entry(reloc, &sec->reloc_list, list) { 1136 if (reloc->sym->type != STT_SECTION) { 1137 WARN("unexpected relocation symbol type in %s", sec->name); 1138 return -1; 1139 } 1140 1141 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1142 if (!insn) { 1143 WARN("bad .discard.ignore_alts entry"); 1144 return -1; 1145 } 1146 1147 insn->ignore_alts = true; 1148 } 1149 1150 return 0; 1151 } 1152 1153 __weak bool arch_is_retpoline(struct symbol *sym) 1154 { 1155 return false; 1156 } 1157 1158 __weak bool arch_is_rethunk(struct symbol *sym) 1159 { 1160 return false; 1161 } 1162 1163 #define NEGATIVE_RELOC ((void *)-1L) 1164 1165 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 1166 { 1167 if (insn->reloc == NEGATIVE_RELOC) 1168 return NULL; 1169 1170 if (!insn->reloc) { 1171 if (!file) 1172 return NULL; 1173 1174 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec, 1175 insn->offset, insn->len); 1176 if (!insn->reloc) { 1177 insn->reloc = NEGATIVE_RELOC; 1178 return NULL; 1179 } 1180 } 1181 1182 return insn->reloc; 1183 } 1184 1185 static void remove_insn_ops(struct instruction *insn) 1186 { 1187 struct stack_op *op, *tmp; 1188 1189 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) { 1190 list_del(&op->list); 1191 free(op); 1192 } 1193 } 1194 1195 static void annotate_call_site(struct objtool_file *file, 1196 struct instruction *insn, bool sibling) 1197 { 1198 struct reloc *reloc = insn_reloc(file, insn); 1199 struct symbol *sym = insn->call_dest; 1200 1201 if (!sym) 1202 sym = reloc->sym; 1203 1204 /* 1205 * Alternative replacement code is just template code which is 1206 * sometimes copied to the original instruction. For now, don't 1207 * annotate it. (In the future we might consider annotating the 1208 * original instruction if/when it ever makes sense to do so.) 1209 */ 1210 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1211 return; 1212 1213 if (sym->static_call_tramp) { 1214 list_add_tail(&insn->call_node, &file->static_call_list); 1215 return; 1216 } 1217 1218 if (sym->retpoline_thunk) { 1219 list_add_tail(&insn->call_node, &file->retpoline_call_list); 1220 return; 1221 } 1222 1223 /* 1224 * Many compilers cannot disable KCOV or sanitizer calls with a function 1225 * attribute so they need a little help, NOP out any such calls from 1226 * noinstr text. 1227 */ 1228 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) { 1229 if (reloc) { 1230 reloc->type = R_NONE; 1231 elf_write_reloc(file->elf, reloc); 1232 } 1233 1234 elf_write_insn(file->elf, insn->sec, 1235 insn->offset, insn->len, 1236 sibling ? arch_ret_insn(insn->len) 1237 : arch_nop_insn(insn->len)); 1238 1239 insn->type = sibling ? INSN_RETURN : INSN_NOP; 1240 1241 if (sibling) { 1242 /* 1243 * We've replaced the tail-call JMP insn by two new 1244 * insn: RET; INT3, except we only have a single struct 1245 * insn here. Mark it retpoline_safe to avoid the SLS 1246 * warning, instead of adding another insn. 1247 */ 1248 insn->retpoline_safe = true; 1249 } 1250 1251 return; 1252 } 1253 1254 if (opts.mcount && sym->fentry) { 1255 if (sibling) 1256 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset); 1257 1258 if (reloc) { 1259 reloc->type = R_NONE; 1260 elf_write_reloc(file->elf, reloc); 1261 } 1262 1263 elf_write_insn(file->elf, insn->sec, 1264 insn->offset, insn->len, 1265 arch_nop_insn(insn->len)); 1266 1267 insn->type = INSN_NOP; 1268 1269 list_add_tail(&insn->call_node, &file->mcount_loc_list); 1270 return; 1271 } 1272 1273 if (!sibling && dead_end_function(file, sym)) 1274 insn->dead_end = true; 1275 } 1276 1277 static void add_call_dest(struct objtool_file *file, struct instruction *insn, 1278 struct symbol *dest, bool sibling) 1279 { 1280 insn->call_dest = dest; 1281 if (!dest) 1282 return; 1283 1284 /* 1285 * Whatever stack impact regular CALLs have, should be undone 1286 * by the RETURN of the called function. 1287 * 1288 * Annotated intra-function calls retain the stack_ops but 1289 * are converted to JUMP, see read_intra_function_calls(). 1290 */ 1291 remove_insn_ops(insn); 1292 1293 annotate_call_site(file, insn, sibling); 1294 } 1295 1296 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn) 1297 { 1298 /* 1299 * Retpoline calls/jumps are really dynamic calls/jumps in disguise, 1300 * so convert them accordingly. 1301 */ 1302 switch (insn->type) { 1303 case INSN_CALL: 1304 insn->type = INSN_CALL_DYNAMIC; 1305 break; 1306 case INSN_JUMP_UNCONDITIONAL: 1307 insn->type = INSN_JUMP_DYNAMIC; 1308 break; 1309 case INSN_JUMP_CONDITIONAL: 1310 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 1311 break; 1312 default: 1313 return; 1314 } 1315 1316 insn->retpoline_safe = true; 1317 1318 /* 1319 * Whatever stack impact regular CALLs have, should be undone 1320 * by the RETURN of the called function. 1321 * 1322 * Annotated intra-function calls retain the stack_ops but 1323 * are converted to JUMP, see read_intra_function_calls(). 1324 */ 1325 remove_insn_ops(insn); 1326 1327 annotate_call_site(file, insn, false); 1328 } 1329 1330 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add) 1331 { 1332 /* 1333 * Return thunk tail calls are really just returns in disguise, 1334 * so convert them accordingly. 1335 */ 1336 insn->type = INSN_RETURN; 1337 insn->retpoline_safe = true; 1338 1339 if (add) 1340 list_add_tail(&insn->call_node, &file->return_thunk_list); 1341 } 1342 1343 static bool same_function(struct instruction *insn1, struct instruction *insn2) 1344 { 1345 return insn1->func->pfunc == insn2->func->pfunc; 1346 } 1347 1348 static bool is_first_func_insn(struct objtool_file *file, struct instruction *insn) 1349 { 1350 if (insn->offset == insn->func->offset) 1351 return true; 1352 1353 if (opts.ibt) { 1354 struct instruction *prev = prev_insn_same_sym(file, insn); 1355 1356 if (prev && prev->type == INSN_ENDBR && 1357 insn->offset == insn->func->offset + prev->len) 1358 return true; 1359 } 1360 1361 return false; 1362 } 1363 1364 /* 1365 * Find the destination instructions for all jumps. 1366 */ 1367 static int add_jump_destinations(struct objtool_file *file) 1368 { 1369 struct instruction *insn, *jump_dest; 1370 struct reloc *reloc; 1371 struct section *dest_sec; 1372 unsigned long dest_off; 1373 1374 for_each_insn(file, insn) { 1375 if (insn->jump_dest) { 1376 /* 1377 * handle_group_alt() may have previously set 1378 * 'jump_dest' for some alternatives. 1379 */ 1380 continue; 1381 } 1382 if (!is_static_jump(insn)) 1383 continue; 1384 1385 reloc = insn_reloc(file, insn); 1386 if (!reloc) { 1387 dest_sec = insn->sec; 1388 dest_off = arch_jump_destination(insn); 1389 } else if (reloc->sym->type == STT_SECTION) { 1390 dest_sec = reloc->sym->sec; 1391 dest_off = arch_dest_reloc_offset(reloc->addend); 1392 } else if (reloc->sym->retpoline_thunk) { 1393 add_retpoline_call(file, insn); 1394 continue; 1395 } else if (reloc->sym->return_thunk) { 1396 add_return_call(file, insn, true); 1397 continue; 1398 } else if (insn->func) { 1399 /* 1400 * External sibling call or internal sibling call with 1401 * STT_FUNC reloc. 1402 */ 1403 add_call_dest(file, insn, reloc->sym, true); 1404 continue; 1405 } else if (reloc->sym->sec->idx) { 1406 dest_sec = reloc->sym->sec; 1407 dest_off = reloc->sym->sym.st_value + 1408 arch_dest_reloc_offset(reloc->addend); 1409 } else { 1410 /* non-func asm code jumping to another file */ 1411 continue; 1412 } 1413 1414 jump_dest = find_insn(file, dest_sec, dest_off); 1415 if (!jump_dest) { 1416 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); 1417 1418 /* 1419 * This is a special case for zen_untrain_ret(). 1420 * It jumps to __x86_return_thunk(), but objtool 1421 * can't find the thunk's starting RET 1422 * instruction, because the RET is also in the 1423 * middle of another instruction. Objtool only 1424 * knows about the outer instruction. 1425 */ 1426 if (sym && sym->return_thunk) { 1427 add_return_call(file, insn, false); 1428 continue; 1429 } 1430 1431 WARN_FUNC("can't find jump dest instruction at %s+0x%lx", 1432 insn->sec, insn->offset, dest_sec->name, 1433 dest_off); 1434 return -1; 1435 } 1436 1437 /* 1438 * Cross-function jump. 1439 */ 1440 if (insn->func && jump_dest->func && 1441 insn->func != jump_dest->func) { 1442 1443 /* 1444 * For GCC 8+, create parent/child links for any cold 1445 * subfunctions. This is _mostly_ redundant with a 1446 * similar initialization in read_symbols(). 1447 * 1448 * If a function has aliases, we want the *first* such 1449 * function in the symbol table to be the subfunction's 1450 * parent. In that case we overwrite the 1451 * initialization done in read_symbols(). 1452 * 1453 * However this code can't completely replace the 1454 * read_symbols() code because this doesn't detect the 1455 * case where the parent function's only reference to a 1456 * subfunction is through a jump table. 1457 */ 1458 if (!strstr(insn->func->name, ".cold") && 1459 strstr(jump_dest->func->name, ".cold")) { 1460 insn->func->cfunc = jump_dest->func; 1461 jump_dest->func->pfunc = insn->func; 1462 1463 } else if (!same_function(insn, jump_dest) && 1464 is_first_func_insn(file, jump_dest)) { 1465 /* 1466 * Internal sibling call without reloc or with 1467 * STT_SECTION reloc. 1468 */ 1469 add_call_dest(file, insn, jump_dest->func, true); 1470 continue; 1471 } 1472 } 1473 1474 insn->jump_dest = jump_dest; 1475 } 1476 1477 return 0; 1478 } 1479 1480 static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 1481 { 1482 struct symbol *call_dest; 1483 1484 call_dest = find_func_by_offset(sec, offset); 1485 if (!call_dest) 1486 call_dest = find_symbol_by_offset(sec, offset); 1487 1488 return call_dest; 1489 } 1490 1491 /* 1492 * Find the destination instructions for all calls. 1493 */ 1494 static int add_call_destinations(struct objtool_file *file) 1495 { 1496 struct instruction *insn; 1497 unsigned long dest_off; 1498 struct symbol *dest; 1499 struct reloc *reloc; 1500 1501 for_each_insn(file, insn) { 1502 if (insn->type != INSN_CALL) 1503 continue; 1504 1505 reloc = insn_reloc(file, insn); 1506 if (!reloc) { 1507 dest_off = arch_jump_destination(insn); 1508 dest = find_call_destination(insn->sec, dest_off); 1509 1510 add_call_dest(file, insn, dest, false); 1511 1512 if (insn->ignore) 1513 continue; 1514 1515 if (!insn->call_dest) { 1516 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset); 1517 return -1; 1518 } 1519 1520 if (insn->func && insn->call_dest->type != STT_FUNC) { 1521 WARN_FUNC("unsupported call to non-function", 1522 insn->sec, insn->offset); 1523 return -1; 1524 } 1525 1526 } else if (reloc->sym->type == STT_SECTION) { 1527 dest_off = arch_dest_reloc_offset(reloc->addend); 1528 dest = find_call_destination(reloc->sym->sec, dest_off); 1529 if (!dest) { 1530 WARN_FUNC("can't find call dest symbol at %s+0x%lx", 1531 insn->sec, insn->offset, 1532 reloc->sym->sec->name, 1533 dest_off); 1534 return -1; 1535 } 1536 1537 add_call_dest(file, insn, dest, false); 1538 1539 } else if (reloc->sym->retpoline_thunk) { 1540 add_retpoline_call(file, insn); 1541 1542 } else 1543 add_call_dest(file, insn, reloc->sym, false); 1544 } 1545 1546 return 0; 1547 } 1548 1549 /* 1550 * The .alternatives section requires some extra special care over and above 1551 * other special sections because alternatives are patched in place. 1552 */ 1553 static int handle_group_alt(struct objtool_file *file, 1554 struct special_alt *special_alt, 1555 struct instruction *orig_insn, 1556 struct instruction **new_insn) 1557 { 1558 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL; 1559 struct alt_group *orig_alt_group, *new_alt_group; 1560 unsigned long dest_off; 1561 1562 1563 orig_alt_group = malloc(sizeof(*orig_alt_group)); 1564 if (!orig_alt_group) { 1565 WARN("malloc failed"); 1566 return -1; 1567 } 1568 orig_alt_group->cfi = calloc(special_alt->orig_len, 1569 sizeof(struct cfi_state *)); 1570 if (!orig_alt_group->cfi) { 1571 WARN("calloc failed"); 1572 return -1; 1573 } 1574 1575 last_orig_insn = NULL; 1576 insn = orig_insn; 1577 sec_for_each_insn_from(file, insn) { 1578 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1579 break; 1580 1581 insn->alt_group = orig_alt_group; 1582 last_orig_insn = insn; 1583 } 1584 orig_alt_group->orig_group = NULL; 1585 orig_alt_group->first_insn = orig_insn; 1586 orig_alt_group->last_insn = last_orig_insn; 1587 1588 1589 new_alt_group = malloc(sizeof(*new_alt_group)); 1590 if (!new_alt_group) { 1591 WARN("malloc failed"); 1592 return -1; 1593 } 1594 1595 if (special_alt->new_len < special_alt->orig_len) { 1596 /* 1597 * Insert a fake nop at the end to make the replacement 1598 * alt_group the same size as the original. This is needed to 1599 * allow propagate_alt_cfi() to do its magic. When the last 1600 * instruction affects the stack, the instruction after it (the 1601 * nop) will propagate the new state to the shared CFI array. 1602 */ 1603 nop = malloc(sizeof(*nop)); 1604 if (!nop) { 1605 WARN("malloc failed"); 1606 return -1; 1607 } 1608 memset(nop, 0, sizeof(*nop)); 1609 INIT_LIST_HEAD(&nop->alts); 1610 INIT_LIST_HEAD(&nop->stack_ops); 1611 1612 nop->sec = special_alt->new_sec; 1613 nop->offset = special_alt->new_off + special_alt->new_len; 1614 nop->len = special_alt->orig_len - special_alt->new_len; 1615 nop->type = INSN_NOP; 1616 nop->func = orig_insn->func; 1617 nop->alt_group = new_alt_group; 1618 nop->ignore = orig_insn->ignore_alts; 1619 } 1620 1621 if (!special_alt->new_len) { 1622 *new_insn = nop; 1623 goto end; 1624 } 1625 1626 insn = *new_insn; 1627 sec_for_each_insn_from(file, insn) { 1628 struct reloc *alt_reloc; 1629 1630 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1631 break; 1632 1633 last_new_insn = insn; 1634 1635 insn->ignore = orig_insn->ignore_alts; 1636 insn->func = orig_insn->func; 1637 insn->alt_group = new_alt_group; 1638 1639 /* 1640 * Since alternative replacement code is copy/pasted by the 1641 * kernel after applying relocations, generally such code can't 1642 * have relative-address relocation references to outside the 1643 * .altinstr_replacement section, unless the arch's 1644 * alternatives code can adjust the relative offsets 1645 * accordingly. 1646 */ 1647 alt_reloc = insn_reloc(file, insn); 1648 if (alt_reloc && 1649 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1650 1651 WARN_FUNC("unsupported relocation in alternatives section", 1652 insn->sec, insn->offset); 1653 return -1; 1654 } 1655 1656 if (!is_static_jump(insn)) 1657 continue; 1658 1659 if (!insn->immediate) 1660 continue; 1661 1662 dest_off = arch_jump_destination(insn); 1663 if (dest_off == special_alt->new_off + special_alt->new_len) { 1664 insn->jump_dest = next_insn_same_sec(file, last_orig_insn); 1665 if (!insn->jump_dest) { 1666 WARN_FUNC("can't find alternative jump destination", 1667 insn->sec, insn->offset); 1668 return -1; 1669 } 1670 } 1671 } 1672 1673 if (!last_new_insn) { 1674 WARN_FUNC("can't find last new alternative instruction", 1675 special_alt->new_sec, special_alt->new_off); 1676 return -1; 1677 } 1678 1679 if (nop) 1680 list_add(&nop->list, &last_new_insn->list); 1681 end: 1682 new_alt_group->orig_group = orig_alt_group; 1683 new_alt_group->first_insn = *new_insn; 1684 new_alt_group->last_insn = nop ? : last_new_insn; 1685 new_alt_group->cfi = orig_alt_group->cfi; 1686 return 0; 1687 } 1688 1689 /* 1690 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1691 * If the original instruction is a jump, make the alt entry an effective nop 1692 * by just skipping the original instruction. 1693 */ 1694 static int handle_jump_alt(struct objtool_file *file, 1695 struct special_alt *special_alt, 1696 struct instruction *orig_insn, 1697 struct instruction **new_insn) 1698 { 1699 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL && 1700 orig_insn->type != INSN_NOP) { 1701 1702 WARN_FUNC("unsupported instruction at jump label", 1703 orig_insn->sec, orig_insn->offset); 1704 return -1; 1705 } 1706 1707 if (opts.hack_jump_label && special_alt->key_addend & 2) { 1708 struct reloc *reloc = insn_reloc(file, orig_insn); 1709 1710 if (reloc) { 1711 reloc->type = R_NONE; 1712 elf_write_reloc(file->elf, reloc); 1713 } 1714 elf_write_insn(file->elf, orig_insn->sec, 1715 orig_insn->offset, orig_insn->len, 1716 arch_nop_insn(orig_insn->len)); 1717 orig_insn->type = INSN_NOP; 1718 } 1719 1720 if (orig_insn->type == INSN_NOP) { 1721 if (orig_insn->len == 2) 1722 file->jl_nop_short++; 1723 else 1724 file->jl_nop_long++; 1725 1726 return 0; 1727 } 1728 1729 if (orig_insn->len == 2) 1730 file->jl_short++; 1731 else 1732 file->jl_long++; 1733 1734 *new_insn = list_next_entry(orig_insn, list); 1735 return 0; 1736 } 1737 1738 /* 1739 * Read all the special sections which have alternate instructions which can be 1740 * patched in or redirected to at runtime. Each instruction having alternate 1741 * instruction(s) has them added to its insn->alts list, which will be 1742 * traversed in validate_branch(). 1743 */ 1744 static int add_special_section_alts(struct objtool_file *file) 1745 { 1746 struct list_head special_alts; 1747 struct instruction *orig_insn, *new_insn; 1748 struct special_alt *special_alt, *tmp; 1749 struct alternative *alt; 1750 int ret; 1751 1752 ret = special_get_alts(file->elf, &special_alts); 1753 if (ret) 1754 return ret; 1755 1756 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1757 1758 orig_insn = find_insn(file, special_alt->orig_sec, 1759 special_alt->orig_off); 1760 if (!orig_insn) { 1761 WARN_FUNC("special: can't find orig instruction", 1762 special_alt->orig_sec, special_alt->orig_off); 1763 ret = -1; 1764 goto out; 1765 } 1766 1767 new_insn = NULL; 1768 if (!special_alt->group || special_alt->new_len) { 1769 new_insn = find_insn(file, special_alt->new_sec, 1770 special_alt->new_off); 1771 if (!new_insn) { 1772 WARN_FUNC("special: can't find new instruction", 1773 special_alt->new_sec, 1774 special_alt->new_off); 1775 ret = -1; 1776 goto out; 1777 } 1778 } 1779 1780 if (special_alt->group) { 1781 if (!special_alt->orig_len) { 1782 WARN_FUNC("empty alternative entry", 1783 orig_insn->sec, orig_insn->offset); 1784 continue; 1785 } 1786 1787 ret = handle_group_alt(file, special_alt, orig_insn, 1788 &new_insn); 1789 if (ret) 1790 goto out; 1791 } else if (special_alt->jump_or_nop) { 1792 ret = handle_jump_alt(file, special_alt, orig_insn, 1793 &new_insn); 1794 if (ret) 1795 goto out; 1796 } 1797 1798 alt = malloc(sizeof(*alt)); 1799 if (!alt) { 1800 WARN("malloc failed"); 1801 ret = -1; 1802 goto out; 1803 } 1804 1805 alt->insn = new_insn; 1806 alt->skip_orig = special_alt->skip_orig; 1807 orig_insn->ignore_alts |= special_alt->skip_alt; 1808 list_add_tail(&alt->list, &orig_insn->alts); 1809 1810 list_del(&special_alt->list); 1811 free(special_alt); 1812 } 1813 1814 if (opts.stats) { 1815 printf("jl\\\tNOP\tJMP\n"); 1816 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short); 1817 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long); 1818 } 1819 1820 out: 1821 return ret; 1822 } 1823 1824 static int add_jump_table(struct objtool_file *file, struct instruction *insn, 1825 struct reloc *table) 1826 { 1827 struct reloc *reloc = table; 1828 struct instruction *dest_insn; 1829 struct alternative *alt; 1830 struct symbol *pfunc = insn->func->pfunc; 1831 unsigned int prev_offset = 0; 1832 1833 /* 1834 * Each @reloc is a switch table relocation which points to the target 1835 * instruction. 1836 */ 1837 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) { 1838 1839 /* Check for the end of the table: */ 1840 if (reloc != table && reloc->jump_table_start) 1841 break; 1842 1843 /* Make sure the table entries are consecutive: */ 1844 if (prev_offset && reloc->offset != prev_offset + 8) 1845 break; 1846 1847 /* Detect function pointers from contiguous objects: */ 1848 if (reloc->sym->sec == pfunc->sec && 1849 reloc->addend == pfunc->offset) 1850 break; 1851 1852 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend); 1853 if (!dest_insn) 1854 break; 1855 1856 /* Make sure the destination is in the same function: */ 1857 if (!dest_insn->func || dest_insn->func->pfunc != pfunc) 1858 break; 1859 1860 alt = malloc(sizeof(*alt)); 1861 if (!alt) { 1862 WARN("malloc failed"); 1863 return -1; 1864 } 1865 1866 alt->insn = dest_insn; 1867 list_add_tail(&alt->list, &insn->alts); 1868 prev_offset = reloc->offset; 1869 } 1870 1871 if (!prev_offset) { 1872 WARN_FUNC("can't find switch jump table", 1873 insn->sec, insn->offset); 1874 return -1; 1875 } 1876 1877 return 0; 1878 } 1879 1880 /* 1881 * find_jump_table() - Given a dynamic jump, find the switch jump table 1882 * associated with it. 1883 */ 1884 static struct reloc *find_jump_table(struct objtool_file *file, 1885 struct symbol *func, 1886 struct instruction *insn) 1887 { 1888 struct reloc *table_reloc; 1889 struct instruction *dest_insn, *orig_insn = insn; 1890 1891 /* 1892 * Backward search using the @first_jump_src links, these help avoid 1893 * much of the 'in between' code. Which avoids us getting confused by 1894 * it. 1895 */ 1896 for (; 1897 insn && insn->func && insn->func->pfunc == func; 1898 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 1899 1900 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 1901 break; 1902 1903 /* allow small jumps within the range */ 1904 if (insn->type == INSN_JUMP_UNCONDITIONAL && 1905 insn->jump_dest && 1906 (insn->jump_dest->offset <= insn->offset || 1907 insn->jump_dest->offset > orig_insn->offset)) 1908 break; 1909 1910 table_reloc = arch_find_switch_table(file, insn); 1911 if (!table_reloc) 1912 continue; 1913 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend); 1914 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func) 1915 continue; 1916 1917 return table_reloc; 1918 } 1919 1920 return NULL; 1921 } 1922 1923 /* 1924 * First pass: Mark the head of each jump table so that in the next pass, 1925 * we know when a given jump table ends and the next one starts. 1926 */ 1927 static void mark_func_jump_tables(struct objtool_file *file, 1928 struct symbol *func) 1929 { 1930 struct instruction *insn, *last = NULL; 1931 struct reloc *reloc; 1932 1933 func_for_each_insn(file, func, insn) { 1934 if (!last) 1935 last = insn; 1936 1937 /* 1938 * Store back-pointers for unconditional forward jumps such 1939 * that find_jump_table() can back-track using those and 1940 * avoid some potentially confusing code. 1941 */ 1942 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 1943 insn->offset > last->offset && 1944 insn->jump_dest->offset > insn->offset && 1945 !insn->jump_dest->first_jump_src) { 1946 1947 insn->jump_dest->first_jump_src = insn; 1948 last = insn->jump_dest; 1949 } 1950 1951 if (insn->type != INSN_JUMP_DYNAMIC) 1952 continue; 1953 1954 reloc = find_jump_table(file, func, insn); 1955 if (reloc) { 1956 reloc->jump_table_start = true; 1957 insn->jump_table = reloc; 1958 } 1959 } 1960 } 1961 1962 static int add_func_jump_tables(struct objtool_file *file, 1963 struct symbol *func) 1964 { 1965 struct instruction *insn; 1966 int ret; 1967 1968 func_for_each_insn(file, func, insn) { 1969 if (!insn->jump_table) 1970 continue; 1971 1972 ret = add_jump_table(file, insn, insn->jump_table); 1973 if (ret) 1974 return ret; 1975 } 1976 1977 return 0; 1978 } 1979 1980 /* 1981 * For some switch statements, gcc generates a jump table in the .rodata 1982 * section which contains a list of addresses within the function to jump to. 1983 * This finds these jump tables and adds them to the insn->alts lists. 1984 */ 1985 static int add_jump_table_alts(struct objtool_file *file) 1986 { 1987 struct section *sec; 1988 struct symbol *func; 1989 int ret; 1990 1991 if (!file->rodata) 1992 return 0; 1993 1994 for_each_sec(file, sec) { 1995 list_for_each_entry(func, &sec->symbol_list, list) { 1996 if (func->type != STT_FUNC) 1997 continue; 1998 1999 mark_func_jump_tables(file, func); 2000 ret = add_func_jump_tables(file, func); 2001 if (ret) 2002 return ret; 2003 } 2004 } 2005 2006 return 0; 2007 } 2008 2009 static void set_func_state(struct cfi_state *state) 2010 { 2011 state->cfa = initial_func_cfi.cfa; 2012 memcpy(&state->regs, &initial_func_cfi.regs, 2013 CFI_NUM_REGS * sizeof(struct cfi_reg)); 2014 state->stack_size = initial_func_cfi.cfa.offset; 2015 } 2016 2017 static int read_unwind_hints(struct objtool_file *file) 2018 { 2019 struct cfi_state cfi = init_cfi; 2020 struct section *sec, *relocsec; 2021 struct unwind_hint *hint; 2022 struct instruction *insn; 2023 struct reloc *reloc; 2024 int i; 2025 2026 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 2027 if (!sec) 2028 return 0; 2029 2030 relocsec = sec->reloc; 2031 if (!relocsec) { 2032 WARN("missing .rela.discard.unwind_hints section"); 2033 return -1; 2034 } 2035 2036 if (sec->sh.sh_size % sizeof(struct unwind_hint)) { 2037 WARN("struct unwind_hint size mismatch"); 2038 return -1; 2039 } 2040 2041 file->hints = true; 2042 2043 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { 2044 hint = (struct unwind_hint *)sec->data->d_buf + i; 2045 2046 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 2047 if (!reloc) { 2048 WARN("can't find reloc for unwind_hints[%d]", i); 2049 return -1; 2050 } 2051 2052 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2053 if (!insn) { 2054 WARN("can't find insn for unwind_hints[%d]", i); 2055 return -1; 2056 } 2057 2058 insn->hint = true; 2059 2060 if (hint->type == UNWIND_HINT_TYPE_SAVE) { 2061 insn->hint = false; 2062 insn->save = true; 2063 continue; 2064 } 2065 2066 if (hint->type == UNWIND_HINT_TYPE_RESTORE) { 2067 insn->restore = true; 2068 continue; 2069 } 2070 2071 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { 2072 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); 2073 2074 if (sym && sym->bind == STB_GLOBAL) { 2075 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { 2076 WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR", 2077 insn->sec, insn->offset); 2078 } 2079 2080 insn->entry = 1; 2081 } 2082 } 2083 2084 if (hint->type == UNWIND_HINT_TYPE_ENTRY) { 2085 hint->type = UNWIND_HINT_TYPE_CALL; 2086 insn->entry = 1; 2087 } 2088 2089 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 2090 insn->cfi = &func_cfi; 2091 continue; 2092 } 2093 2094 if (insn->cfi) 2095 cfi = *(insn->cfi); 2096 2097 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 2098 WARN_FUNC("unsupported unwind_hint sp base reg %d", 2099 insn->sec, insn->offset, hint->sp_reg); 2100 return -1; 2101 } 2102 2103 cfi.cfa.offset = bswap_if_needed(hint->sp_offset); 2104 cfi.type = hint->type; 2105 cfi.end = hint->end; 2106 2107 insn->cfi = cfi_hash_find_or_add(&cfi); 2108 } 2109 2110 return 0; 2111 } 2112 2113 static int read_noendbr_hints(struct objtool_file *file) 2114 { 2115 struct section *sec; 2116 struct instruction *insn; 2117 struct reloc *reloc; 2118 2119 sec = find_section_by_name(file->elf, ".rela.discard.noendbr"); 2120 if (!sec) 2121 return 0; 2122 2123 list_for_each_entry(reloc, &sec->reloc_list, list) { 2124 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend); 2125 if (!insn) { 2126 WARN("bad .discard.noendbr entry"); 2127 return -1; 2128 } 2129 2130 insn->noendbr = 1; 2131 } 2132 2133 return 0; 2134 } 2135 2136 static int read_retpoline_hints(struct objtool_file *file) 2137 { 2138 struct section *sec; 2139 struct instruction *insn; 2140 struct reloc *reloc; 2141 2142 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe"); 2143 if (!sec) 2144 return 0; 2145 2146 list_for_each_entry(reloc, &sec->reloc_list, list) { 2147 if (reloc->sym->type != STT_SECTION) { 2148 WARN("unexpected relocation symbol type in %s", sec->name); 2149 return -1; 2150 } 2151 2152 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2153 if (!insn) { 2154 WARN("bad .discard.retpoline_safe entry"); 2155 return -1; 2156 } 2157 2158 if (insn->type != INSN_JUMP_DYNAMIC && 2159 insn->type != INSN_CALL_DYNAMIC && 2160 insn->type != INSN_RETURN && 2161 insn->type != INSN_NOP) { 2162 WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop", 2163 insn->sec, insn->offset); 2164 return -1; 2165 } 2166 2167 insn->retpoline_safe = true; 2168 } 2169 2170 return 0; 2171 } 2172 2173 static int read_instr_hints(struct objtool_file *file) 2174 { 2175 struct section *sec; 2176 struct instruction *insn; 2177 struct reloc *reloc; 2178 2179 sec = find_section_by_name(file->elf, ".rela.discard.instr_end"); 2180 if (!sec) 2181 return 0; 2182 2183 list_for_each_entry(reloc, &sec->reloc_list, list) { 2184 if (reloc->sym->type != STT_SECTION) { 2185 WARN("unexpected relocation symbol type in %s", sec->name); 2186 return -1; 2187 } 2188 2189 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2190 if (!insn) { 2191 WARN("bad .discard.instr_end entry"); 2192 return -1; 2193 } 2194 2195 insn->instr--; 2196 } 2197 2198 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin"); 2199 if (!sec) 2200 return 0; 2201 2202 list_for_each_entry(reloc, &sec->reloc_list, list) { 2203 if (reloc->sym->type != STT_SECTION) { 2204 WARN("unexpected relocation symbol type in %s", sec->name); 2205 return -1; 2206 } 2207 2208 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2209 if (!insn) { 2210 WARN("bad .discard.instr_begin entry"); 2211 return -1; 2212 } 2213 2214 insn->instr++; 2215 } 2216 2217 return 0; 2218 } 2219 2220 static int read_intra_function_calls(struct objtool_file *file) 2221 { 2222 struct instruction *insn; 2223 struct section *sec; 2224 struct reloc *reloc; 2225 2226 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls"); 2227 if (!sec) 2228 return 0; 2229 2230 list_for_each_entry(reloc, &sec->reloc_list, list) { 2231 unsigned long dest_off; 2232 2233 if (reloc->sym->type != STT_SECTION) { 2234 WARN("unexpected relocation symbol type in %s", 2235 sec->name); 2236 return -1; 2237 } 2238 2239 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2240 if (!insn) { 2241 WARN("bad .discard.intra_function_call entry"); 2242 return -1; 2243 } 2244 2245 if (insn->type != INSN_CALL) { 2246 WARN_FUNC("intra_function_call not a direct call", 2247 insn->sec, insn->offset); 2248 return -1; 2249 } 2250 2251 /* 2252 * Treat intra-function CALLs as JMPs, but with a stack_op. 2253 * See add_call_destinations(), which strips stack_ops from 2254 * normal CALLs. 2255 */ 2256 insn->type = INSN_JUMP_UNCONDITIONAL; 2257 2258 dest_off = arch_jump_destination(insn); 2259 insn->jump_dest = find_insn(file, insn->sec, dest_off); 2260 if (!insn->jump_dest) { 2261 WARN_FUNC("can't find call dest at %s+0x%lx", 2262 insn->sec, insn->offset, 2263 insn->sec->name, dest_off); 2264 return -1; 2265 } 2266 } 2267 2268 return 0; 2269 } 2270 2271 /* 2272 * Return true if name matches an instrumentation function, where calls to that 2273 * function from noinstr code can safely be removed, but compilers won't do so. 2274 */ 2275 static bool is_profiling_func(const char *name) 2276 { 2277 /* 2278 * Many compilers cannot disable KCOV with a function attribute. 2279 */ 2280 if (!strncmp(name, "__sanitizer_cov_", 16)) 2281 return true; 2282 2283 /* 2284 * Some compilers currently do not remove __tsan_func_entry/exit nor 2285 * __tsan_atomic_signal_fence (used for barrier instrumentation) with 2286 * the __no_sanitize_thread attribute, remove them. Once the kernel's 2287 * minimum Clang version is 14.0, this can be removed. 2288 */ 2289 if (!strncmp(name, "__tsan_func_", 12) || 2290 !strcmp(name, "__tsan_atomic_signal_fence")) 2291 return true; 2292 2293 return false; 2294 } 2295 2296 static int classify_symbols(struct objtool_file *file) 2297 { 2298 struct section *sec; 2299 struct symbol *func; 2300 2301 for_each_sec(file, sec) { 2302 list_for_each_entry(func, &sec->symbol_list, list) { 2303 if (func->bind != STB_GLOBAL) 2304 continue; 2305 2306 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2307 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2308 func->static_call_tramp = true; 2309 2310 if (arch_is_retpoline(func)) 2311 func->retpoline_thunk = true; 2312 2313 if (arch_is_rethunk(func)) 2314 func->return_thunk = true; 2315 2316 if (!strcmp(func->name, "__fentry__")) 2317 func->fentry = true; 2318 2319 if (is_profiling_func(func->name)) 2320 func->profiling_func = true; 2321 } 2322 } 2323 2324 return 0; 2325 } 2326 2327 static void mark_rodata(struct objtool_file *file) 2328 { 2329 struct section *sec; 2330 bool found = false; 2331 2332 /* 2333 * Search for the following rodata sections, each of which can 2334 * potentially contain jump tables: 2335 * 2336 * - .rodata: can contain GCC switch tables 2337 * - .rodata.<func>: same, if -fdata-sections is being used 2338 * - .rodata..c_jump_table: contains C annotated jump tables 2339 * 2340 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2341 */ 2342 for_each_sec(file, sec) { 2343 if (!strncmp(sec->name, ".rodata", 7) && 2344 !strstr(sec->name, ".str1.")) { 2345 sec->rodata = true; 2346 found = true; 2347 } 2348 } 2349 2350 file->rodata = found; 2351 } 2352 2353 static int decode_sections(struct objtool_file *file) 2354 { 2355 int ret; 2356 2357 mark_rodata(file); 2358 2359 ret = init_pv_ops(file); 2360 if (ret) 2361 return ret; 2362 2363 ret = decode_instructions(file); 2364 if (ret) 2365 return ret; 2366 2367 add_ignores(file); 2368 add_uaccess_safe(file); 2369 2370 ret = add_ignore_alternatives(file); 2371 if (ret) 2372 return ret; 2373 2374 /* 2375 * Must be before read_unwind_hints() since that needs insn->noendbr. 2376 */ 2377 ret = read_noendbr_hints(file); 2378 if (ret) 2379 return ret; 2380 2381 /* 2382 * Must be before add_{jump_call}_destination. 2383 */ 2384 ret = classify_symbols(file); 2385 if (ret) 2386 return ret; 2387 2388 /* 2389 * Must be before add_jump_destinations(), which depends on 'func' 2390 * being set for alternatives, to enable proper sibling call detection. 2391 */ 2392 ret = add_special_section_alts(file); 2393 if (ret) 2394 return ret; 2395 2396 ret = add_jump_destinations(file); 2397 if (ret) 2398 return ret; 2399 2400 /* 2401 * Must be before add_call_destination(); it changes INSN_CALL to 2402 * INSN_JUMP. 2403 */ 2404 ret = read_intra_function_calls(file); 2405 if (ret) 2406 return ret; 2407 2408 ret = add_call_destinations(file); 2409 if (ret) 2410 return ret; 2411 2412 /* 2413 * Must be after add_call_destinations() such that it can override 2414 * dead_end_function() marks. 2415 */ 2416 ret = add_dead_ends(file); 2417 if (ret) 2418 return ret; 2419 2420 ret = add_jump_table_alts(file); 2421 if (ret) 2422 return ret; 2423 2424 ret = read_unwind_hints(file); 2425 if (ret) 2426 return ret; 2427 2428 ret = read_retpoline_hints(file); 2429 if (ret) 2430 return ret; 2431 2432 ret = read_instr_hints(file); 2433 if (ret) 2434 return ret; 2435 2436 return 0; 2437 } 2438 2439 static bool is_fentry_call(struct instruction *insn) 2440 { 2441 if (insn->type == INSN_CALL && 2442 insn->call_dest && 2443 insn->call_dest->fentry) 2444 return true; 2445 2446 return false; 2447 } 2448 2449 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 2450 { 2451 struct cfi_state *cfi = &state->cfi; 2452 int i; 2453 2454 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 2455 return true; 2456 2457 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 2458 return true; 2459 2460 if (cfi->stack_size != initial_func_cfi.cfa.offset) 2461 return true; 2462 2463 for (i = 0; i < CFI_NUM_REGS; i++) { 2464 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 2465 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 2466 return true; 2467 } 2468 2469 return false; 2470 } 2471 2472 static bool check_reg_frame_pos(const struct cfi_reg *reg, 2473 int expected_offset) 2474 { 2475 return reg->base == CFI_CFA && 2476 reg->offset == expected_offset; 2477 } 2478 2479 static bool has_valid_stack_frame(struct insn_state *state) 2480 { 2481 struct cfi_state *cfi = &state->cfi; 2482 2483 if (cfi->cfa.base == CFI_BP && 2484 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) && 2485 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8)) 2486 return true; 2487 2488 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 2489 return true; 2490 2491 return false; 2492 } 2493 2494 static int update_cfi_state_regs(struct instruction *insn, 2495 struct cfi_state *cfi, 2496 struct stack_op *op) 2497 { 2498 struct cfi_reg *cfa = &cfi->cfa; 2499 2500 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 2501 return 0; 2502 2503 /* push */ 2504 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 2505 cfa->offset += 8; 2506 2507 /* pop */ 2508 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 2509 cfa->offset -= 8; 2510 2511 /* add immediate to sp */ 2512 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 2513 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 2514 cfa->offset -= op->src.offset; 2515 2516 return 0; 2517 } 2518 2519 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 2520 { 2521 if (arch_callee_saved_reg(reg) && 2522 cfi->regs[reg].base == CFI_UNDEFINED) { 2523 cfi->regs[reg].base = base; 2524 cfi->regs[reg].offset = offset; 2525 } 2526 } 2527 2528 static void restore_reg(struct cfi_state *cfi, unsigned char reg) 2529 { 2530 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 2531 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 2532 } 2533 2534 /* 2535 * A note about DRAP stack alignment: 2536 * 2537 * GCC has the concept of a DRAP register, which is used to help keep track of 2538 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 2539 * register. The typical DRAP pattern is: 2540 * 2541 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 2542 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 2543 * 41 ff 72 f8 pushq -0x8(%r10) 2544 * 55 push %rbp 2545 * 48 89 e5 mov %rsp,%rbp 2546 * (more pushes) 2547 * 41 52 push %r10 2548 * ... 2549 * 41 5a pop %r10 2550 * (more pops) 2551 * 5d pop %rbp 2552 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2553 * c3 retq 2554 * 2555 * There are some variations in the epilogues, like: 2556 * 2557 * 5b pop %rbx 2558 * 41 5a pop %r10 2559 * 41 5c pop %r12 2560 * 41 5d pop %r13 2561 * 41 5e pop %r14 2562 * c9 leaveq 2563 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2564 * c3 retq 2565 * 2566 * and: 2567 * 2568 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 2569 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 2570 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 2571 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 2572 * c9 leaveq 2573 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2574 * c3 retq 2575 * 2576 * Sometimes r13 is used as the DRAP register, in which case it's saved and 2577 * restored beforehand: 2578 * 2579 * 41 55 push %r13 2580 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 2581 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 2582 * ... 2583 * 49 8d 65 f0 lea -0x10(%r13),%rsp 2584 * 41 5d pop %r13 2585 * c3 retq 2586 */ 2587 static int update_cfi_state(struct instruction *insn, 2588 struct instruction *next_insn, 2589 struct cfi_state *cfi, struct stack_op *op) 2590 { 2591 struct cfi_reg *cfa = &cfi->cfa; 2592 struct cfi_reg *regs = cfi->regs; 2593 2594 /* stack operations don't make sense with an undefined CFA */ 2595 if (cfa->base == CFI_UNDEFINED) { 2596 if (insn->func) { 2597 WARN_FUNC("undefined stack state", insn->sec, insn->offset); 2598 return -1; 2599 } 2600 return 0; 2601 } 2602 2603 if (cfi->type == UNWIND_HINT_TYPE_REGS || 2604 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 2605 return update_cfi_state_regs(insn, cfi, op); 2606 2607 switch (op->dest.type) { 2608 2609 case OP_DEST_REG: 2610 switch (op->src.type) { 2611 2612 case OP_SRC_REG: 2613 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 2614 cfa->base == CFI_SP && 2615 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) { 2616 2617 /* mov %rsp, %rbp */ 2618 cfa->base = op->dest.reg; 2619 cfi->bp_scratch = false; 2620 } 2621 2622 else if (op->src.reg == CFI_SP && 2623 op->dest.reg == CFI_BP && cfi->drap) { 2624 2625 /* drap: mov %rsp, %rbp */ 2626 regs[CFI_BP].base = CFI_BP; 2627 regs[CFI_BP].offset = -cfi->stack_size; 2628 cfi->bp_scratch = false; 2629 } 2630 2631 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2632 2633 /* 2634 * mov %rsp, %reg 2635 * 2636 * This is needed for the rare case where GCC 2637 * does: 2638 * 2639 * mov %rsp, %rax 2640 * ... 2641 * mov %rax, %rsp 2642 */ 2643 cfi->vals[op->dest.reg].base = CFI_CFA; 2644 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2645 } 2646 2647 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2648 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) { 2649 2650 /* 2651 * mov %rbp, %rsp 2652 * 2653 * Restore the original stack pointer (Clang). 2654 */ 2655 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2656 } 2657 2658 else if (op->dest.reg == cfa->base) { 2659 2660 /* mov %reg, %rsp */ 2661 if (cfa->base == CFI_SP && 2662 cfi->vals[op->src.reg].base == CFI_CFA) { 2663 2664 /* 2665 * This is needed for the rare case 2666 * where GCC does something dumb like: 2667 * 2668 * lea 0x8(%rsp), %rcx 2669 * ... 2670 * mov %rcx, %rsp 2671 */ 2672 cfa->offset = -cfi->vals[op->src.reg].offset; 2673 cfi->stack_size = cfa->offset; 2674 2675 } else if (cfa->base == CFI_SP && 2676 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2677 cfi->vals[op->src.reg].offset == cfa->offset) { 2678 2679 /* 2680 * Stack swizzle: 2681 * 2682 * 1: mov %rsp, (%[tos]) 2683 * 2: mov %[tos], %rsp 2684 * ... 2685 * 3: pop %rsp 2686 * 2687 * Where: 2688 * 2689 * 1 - places a pointer to the previous 2690 * stack at the Top-of-Stack of the 2691 * new stack. 2692 * 2693 * 2 - switches to the new stack. 2694 * 2695 * 3 - pops the Top-of-Stack to restore 2696 * the original stack. 2697 * 2698 * Note: we set base to SP_INDIRECT 2699 * here and preserve offset. Therefore 2700 * when the unwinder reaches ToS it 2701 * will dereference SP and then add the 2702 * offset to find the next frame, IOW: 2703 * (%rsp) + offset. 2704 */ 2705 cfa->base = CFI_SP_INDIRECT; 2706 2707 } else { 2708 cfa->base = CFI_UNDEFINED; 2709 cfa->offset = 0; 2710 } 2711 } 2712 2713 else if (op->dest.reg == CFI_SP && 2714 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2715 cfi->vals[op->src.reg].offset == cfa->offset) { 2716 2717 /* 2718 * The same stack swizzle case 2) as above. But 2719 * because we can't change cfa->base, case 3) 2720 * will become a regular POP. Pretend we're a 2721 * PUSH so things don't go unbalanced. 2722 */ 2723 cfi->stack_size += 8; 2724 } 2725 2726 2727 break; 2728 2729 case OP_SRC_ADD: 2730 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 2731 2732 /* add imm, %rsp */ 2733 cfi->stack_size -= op->src.offset; 2734 if (cfa->base == CFI_SP) 2735 cfa->offset -= op->src.offset; 2736 break; 2737 } 2738 2739 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 2740 2741 /* lea disp(%rbp), %rsp */ 2742 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 2743 break; 2744 } 2745 2746 if (!cfi->drap && op->src.reg == CFI_SP && 2747 op->dest.reg == CFI_BP && cfa->base == CFI_SP && 2748 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) { 2749 2750 /* lea disp(%rsp), %rbp */ 2751 cfa->base = CFI_BP; 2752 cfa->offset -= op->src.offset; 2753 cfi->bp_scratch = false; 2754 break; 2755 } 2756 2757 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2758 2759 /* drap: lea disp(%rsp), %drap */ 2760 cfi->drap_reg = op->dest.reg; 2761 2762 /* 2763 * lea disp(%rsp), %reg 2764 * 2765 * This is needed for the rare case where GCC 2766 * does something dumb like: 2767 * 2768 * lea 0x8(%rsp), %rcx 2769 * ... 2770 * mov %rcx, %rsp 2771 */ 2772 cfi->vals[op->dest.reg].base = CFI_CFA; 2773 cfi->vals[op->dest.reg].offset = \ 2774 -cfi->stack_size + op->src.offset; 2775 2776 break; 2777 } 2778 2779 if (cfi->drap && op->dest.reg == CFI_SP && 2780 op->src.reg == cfi->drap_reg) { 2781 2782 /* drap: lea disp(%drap), %rsp */ 2783 cfa->base = CFI_SP; 2784 cfa->offset = cfi->stack_size = -op->src.offset; 2785 cfi->drap_reg = CFI_UNDEFINED; 2786 cfi->drap = false; 2787 break; 2788 } 2789 2790 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) { 2791 WARN_FUNC("unsupported stack register modification", 2792 insn->sec, insn->offset); 2793 return -1; 2794 } 2795 2796 break; 2797 2798 case OP_SRC_AND: 2799 if (op->dest.reg != CFI_SP || 2800 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 2801 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 2802 WARN_FUNC("unsupported stack pointer realignment", 2803 insn->sec, insn->offset); 2804 return -1; 2805 } 2806 2807 if (cfi->drap_reg != CFI_UNDEFINED) { 2808 /* drap: and imm, %rsp */ 2809 cfa->base = cfi->drap_reg; 2810 cfa->offset = cfi->stack_size = 0; 2811 cfi->drap = true; 2812 } 2813 2814 /* 2815 * Older versions of GCC (4.8ish) realign the stack 2816 * without DRAP, with a frame pointer. 2817 */ 2818 2819 break; 2820 2821 case OP_SRC_POP: 2822 case OP_SRC_POPF: 2823 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) { 2824 2825 /* pop %rsp; # restore from a stack swizzle */ 2826 cfa->base = CFI_SP; 2827 break; 2828 } 2829 2830 if (!cfi->drap && op->dest.reg == cfa->base) { 2831 2832 /* pop %rbp */ 2833 cfa->base = CFI_SP; 2834 } 2835 2836 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 2837 op->dest.reg == cfi->drap_reg && 2838 cfi->drap_offset == -cfi->stack_size) { 2839 2840 /* drap: pop %drap */ 2841 cfa->base = cfi->drap_reg; 2842 cfa->offset = 0; 2843 cfi->drap_offset = -1; 2844 2845 } else if (cfi->stack_size == -regs[op->dest.reg].offset) { 2846 2847 /* pop %reg */ 2848 restore_reg(cfi, op->dest.reg); 2849 } 2850 2851 cfi->stack_size -= 8; 2852 if (cfa->base == CFI_SP) 2853 cfa->offset -= 8; 2854 2855 break; 2856 2857 case OP_SRC_REG_INDIRECT: 2858 if (!cfi->drap && op->dest.reg == cfa->base && 2859 op->dest.reg == CFI_BP) { 2860 2861 /* mov disp(%rsp), %rbp */ 2862 cfa->base = CFI_SP; 2863 cfa->offset = cfi->stack_size; 2864 } 2865 2866 if (cfi->drap && op->src.reg == CFI_BP && 2867 op->src.offset == cfi->drap_offset) { 2868 2869 /* drap: mov disp(%rbp), %drap */ 2870 cfa->base = cfi->drap_reg; 2871 cfa->offset = 0; 2872 cfi->drap_offset = -1; 2873 } 2874 2875 if (cfi->drap && op->src.reg == CFI_BP && 2876 op->src.offset == regs[op->dest.reg].offset) { 2877 2878 /* drap: mov disp(%rbp), %reg */ 2879 restore_reg(cfi, op->dest.reg); 2880 2881 } else if (op->src.reg == cfa->base && 2882 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 2883 2884 /* mov disp(%rbp), %reg */ 2885 /* mov disp(%rsp), %reg */ 2886 restore_reg(cfi, op->dest.reg); 2887 2888 } else if (op->src.reg == CFI_SP && 2889 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) { 2890 2891 /* mov disp(%rsp), %reg */ 2892 restore_reg(cfi, op->dest.reg); 2893 } 2894 2895 break; 2896 2897 default: 2898 WARN_FUNC("unknown stack-related instruction", 2899 insn->sec, insn->offset); 2900 return -1; 2901 } 2902 2903 break; 2904 2905 case OP_DEST_PUSH: 2906 case OP_DEST_PUSHF: 2907 cfi->stack_size += 8; 2908 if (cfa->base == CFI_SP) 2909 cfa->offset += 8; 2910 2911 if (op->src.type != OP_SRC_REG) 2912 break; 2913 2914 if (cfi->drap) { 2915 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2916 2917 /* drap: push %drap */ 2918 cfa->base = CFI_BP_INDIRECT; 2919 cfa->offset = -cfi->stack_size; 2920 2921 /* save drap so we know when to restore it */ 2922 cfi->drap_offset = -cfi->stack_size; 2923 2924 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 2925 2926 /* drap: push %rbp */ 2927 cfi->stack_size = 0; 2928 2929 } else { 2930 2931 /* drap: push %reg */ 2932 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 2933 } 2934 2935 } else { 2936 2937 /* push %reg */ 2938 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 2939 } 2940 2941 /* detect when asm code uses rbp as a scratch register */ 2942 if (opts.stackval && insn->func && op->src.reg == CFI_BP && 2943 cfa->base != CFI_BP) 2944 cfi->bp_scratch = true; 2945 break; 2946 2947 case OP_DEST_REG_INDIRECT: 2948 2949 if (cfi->drap) { 2950 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2951 2952 /* drap: mov %drap, disp(%rbp) */ 2953 cfa->base = CFI_BP_INDIRECT; 2954 cfa->offset = op->dest.offset; 2955 2956 /* save drap offset so we know when to restore it */ 2957 cfi->drap_offset = op->dest.offset; 2958 } else { 2959 2960 /* drap: mov reg, disp(%rbp) */ 2961 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 2962 } 2963 2964 } else if (op->dest.reg == cfa->base) { 2965 2966 /* mov reg, disp(%rbp) */ 2967 /* mov reg, disp(%rsp) */ 2968 save_reg(cfi, op->src.reg, CFI_CFA, 2969 op->dest.offset - cfi->cfa.offset); 2970 2971 } else if (op->dest.reg == CFI_SP) { 2972 2973 /* mov reg, disp(%rsp) */ 2974 save_reg(cfi, op->src.reg, CFI_CFA, 2975 op->dest.offset - cfi->stack_size); 2976 2977 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) { 2978 2979 /* mov %rsp, (%reg); # setup a stack swizzle. */ 2980 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT; 2981 cfi->vals[op->dest.reg].offset = cfa->offset; 2982 } 2983 2984 break; 2985 2986 case OP_DEST_MEM: 2987 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 2988 WARN_FUNC("unknown stack-related memory operation", 2989 insn->sec, insn->offset); 2990 return -1; 2991 } 2992 2993 /* pop mem */ 2994 cfi->stack_size -= 8; 2995 if (cfa->base == CFI_SP) 2996 cfa->offset -= 8; 2997 2998 break; 2999 3000 default: 3001 WARN_FUNC("unknown stack-related instruction", 3002 insn->sec, insn->offset); 3003 return -1; 3004 } 3005 3006 return 0; 3007 } 3008 3009 /* 3010 * The stack layouts of alternatives instructions can sometimes diverge when 3011 * they have stack modifications. That's fine as long as the potential stack 3012 * layouts don't conflict at any given potential instruction boundary. 3013 * 3014 * Flatten the CFIs of the different alternative code streams (both original 3015 * and replacement) into a single shared CFI array which can be used to detect 3016 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 3017 */ 3018 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 3019 { 3020 struct cfi_state **alt_cfi; 3021 int group_off; 3022 3023 if (!insn->alt_group) 3024 return 0; 3025 3026 if (!insn->cfi) { 3027 WARN("CFI missing"); 3028 return -1; 3029 } 3030 3031 alt_cfi = insn->alt_group->cfi; 3032 group_off = insn->offset - insn->alt_group->first_insn->offset; 3033 3034 if (!alt_cfi[group_off]) { 3035 alt_cfi[group_off] = insn->cfi; 3036 } else { 3037 if (cficmp(alt_cfi[group_off], insn->cfi)) { 3038 WARN_FUNC("stack layout conflict in alternatives", 3039 insn->sec, insn->offset); 3040 return -1; 3041 } 3042 } 3043 3044 return 0; 3045 } 3046 3047 static int handle_insn_ops(struct instruction *insn, 3048 struct instruction *next_insn, 3049 struct insn_state *state) 3050 { 3051 struct stack_op *op; 3052 3053 list_for_each_entry(op, &insn->stack_ops, list) { 3054 3055 if (update_cfi_state(insn, next_insn, &state->cfi, op)) 3056 return 1; 3057 3058 if (!insn->alt_group) 3059 continue; 3060 3061 if (op->dest.type == OP_DEST_PUSHF) { 3062 if (!state->uaccess_stack) { 3063 state->uaccess_stack = 1; 3064 } else if (state->uaccess_stack >> 31) { 3065 WARN_FUNC("PUSHF stack exhausted", 3066 insn->sec, insn->offset); 3067 return 1; 3068 } 3069 state->uaccess_stack <<= 1; 3070 state->uaccess_stack |= state->uaccess; 3071 } 3072 3073 if (op->src.type == OP_SRC_POPF) { 3074 if (state->uaccess_stack) { 3075 state->uaccess = state->uaccess_stack & 1; 3076 state->uaccess_stack >>= 1; 3077 if (state->uaccess_stack == 1) 3078 state->uaccess_stack = 0; 3079 } 3080 } 3081 } 3082 3083 return 0; 3084 } 3085 3086 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 3087 { 3088 struct cfi_state *cfi1 = insn->cfi; 3089 int i; 3090 3091 if (!cfi1) { 3092 WARN("CFI missing"); 3093 return false; 3094 } 3095 3096 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 3097 3098 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 3099 insn->sec, insn->offset, 3100 cfi1->cfa.base, cfi1->cfa.offset, 3101 cfi2->cfa.base, cfi2->cfa.offset); 3102 3103 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 3104 for (i = 0; i < CFI_NUM_REGS; i++) { 3105 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], 3106 sizeof(struct cfi_reg))) 3107 continue; 3108 3109 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 3110 insn->sec, insn->offset, 3111 i, cfi1->regs[i].base, cfi1->regs[i].offset, 3112 i, cfi2->regs[i].base, cfi2->regs[i].offset); 3113 break; 3114 } 3115 3116 } else if (cfi1->type != cfi2->type) { 3117 3118 WARN_FUNC("stack state mismatch: type1=%d type2=%d", 3119 insn->sec, insn->offset, cfi1->type, cfi2->type); 3120 3121 } else if (cfi1->drap != cfi2->drap || 3122 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 3123 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 3124 3125 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 3126 insn->sec, insn->offset, 3127 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 3128 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 3129 3130 } else 3131 return true; 3132 3133 return false; 3134 } 3135 3136 static inline bool func_uaccess_safe(struct symbol *func) 3137 { 3138 if (func) 3139 return func->uaccess_safe; 3140 3141 return false; 3142 } 3143 3144 static inline const char *call_dest_name(struct instruction *insn) 3145 { 3146 static char pvname[19]; 3147 struct reloc *rel; 3148 int idx; 3149 3150 if (insn->call_dest) 3151 return insn->call_dest->name; 3152 3153 rel = insn_reloc(NULL, insn); 3154 if (rel && !strcmp(rel->sym->name, "pv_ops")) { 3155 idx = (rel->addend / sizeof(void *)); 3156 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); 3157 return pvname; 3158 } 3159 3160 return "{dynamic}"; 3161 } 3162 3163 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn) 3164 { 3165 struct symbol *target; 3166 struct reloc *rel; 3167 int idx; 3168 3169 rel = insn_reloc(file, insn); 3170 if (!rel || strcmp(rel->sym->name, "pv_ops")) 3171 return false; 3172 3173 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *)); 3174 3175 if (file->pv_ops[idx].clean) 3176 return true; 3177 3178 file->pv_ops[idx].clean = true; 3179 3180 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) { 3181 if (!target->sec->noinstr) { 3182 WARN("pv_ops[%d]: %s", idx, target->name); 3183 file->pv_ops[idx].clean = false; 3184 } 3185 } 3186 3187 return file->pv_ops[idx].clean; 3188 } 3189 3190 static inline bool noinstr_call_dest(struct objtool_file *file, 3191 struct instruction *insn, 3192 struct symbol *func) 3193 { 3194 /* 3195 * We can't deal with indirect function calls at present; 3196 * assume they're instrumented. 3197 */ 3198 if (!func) { 3199 if (file->pv_ops) 3200 return pv_call_dest(file, insn); 3201 3202 return false; 3203 } 3204 3205 /* 3206 * If the symbol is from a noinstr section; we good. 3207 */ 3208 if (func->sec->noinstr) 3209 return true; 3210 3211 /* 3212 * The __ubsan_handle_*() calls are like WARN(), they only happen when 3213 * something 'BAD' happened. At the risk of taking the machine down, 3214 * let them proceed to get the message out. 3215 */ 3216 if (!strncmp(func->name, "__ubsan_handle_", 15)) 3217 return true; 3218 3219 return false; 3220 } 3221 3222 static int validate_call(struct objtool_file *file, 3223 struct instruction *insn, 3224 struct insn_state *state) 3225 { 3226 if (state->noinstr && state->instr <= 0 && 3227 !noinstr_call_dest(file, insn, insn->call_dest)) { 3228 WARN_FUNC("call to %s() leaves .noinstr.text section", 3229 insn->sec, insn->offset, call_dest_name(insn)); 3230 return 1; 3231 } 3232 3233 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { 3234 WARN_FUNC("call to %s() with UACCESS enabled", 3235 insn->sec, insn->offset, call_dest_name(insn)); 3236 return 1; 3237 } 3238 3239 if (state->df) { 3240 WARN_FUNC("call to %s() with DF set", 3241 insn->sec, insn->offset, call_dest_name(insn)); 3242 return 1; 3243 } 3244 3245 return 0; 3246 } 3247 3248 static int validate_sibling_call(struct objtool_file *file, 3249 struct instruction *insn, 3250 struct insn_state *state) 3251 { 3252 if (has_modified_stack_frame(insn, state)) { 3253 WARN_FUNC("sibling call from callable instruction with modified stack frame", 3254 insn->sec, insn->offset); 3255 return 1; 3256 } 3257 3258 return validate_call(file, insn, state); 3259 } 3260 3261 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 3262 { 3263 if (state->noinstr && state->instr > 0) { 3264 WARN_FUNC("return with instrumentation enabled", 3265 insn->sec, insn->offset); 3266 return 1; 3267 } 3268 3269 if (state->uaccess && !func_uaccess_safe(func)) { 3270 WARN_FUNC("return with UACCESS enabled", 3271 insn->sec, insn->offset); 3272 return 1; 3273 } 3274 3275 if (!state->uaccess && func_uaccess_safe(func)) { 3276 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", 3277 insn->sec, insn->offset); 3278 return 1; 3279 } 3280 3281 if (state->df) { 3282 WARN_FUNC("return with DF set", 3283 insn->sec, insn->offset); 3284 return 1; 3285 } 3286 3287 if (func && has_modified_stack_frame(insn, state)) { 3288 WARN_FUNC("return with modified stack frame", 3289 insn->sec, insn->offset); 3290 return 1; 3291 } 3292 3293 if (state->cfi.bp_scratch) { 3294 WARN_FUNC("BP used as a scratch register", 3295 insn->sec, insn->offset); 3296 return 1; 3297 } 3298 3299 return 0; 3300 } 3301 3302 static struct instruction *next_insn_to_validate(struct objtool_file *file, 3303 struct instruction *insn) 3304 { 3305 struct alt_group *alt_group = insn->alt_group; 3306 3307 /* 3308 * Simulate the fact that alternatives are patched in-place. When the 3309 * end of a replacement alt_group is reached, redirect objtool flow to 3310 * the end of the original alt_group. 3311 */ 3312 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group) 3313 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 3314 3315 return next_insn_same_sec(file, insn); 3316 } 3317 3318 /* 3319 * Follow the branch starting at the given instruction, and recursively follow 3320 * any other branches (jumps). Meanwhile, track the frame pointer state at 3321 * each instruction and validate all the rules described in 3322 * tools/objtool/Documentation/objtool.txt. 3323 */ 3324 static int validate_branch(struct objtool_file *file, struct symbol *func, 3325 struct instruction *insn, struct insn_state state) 3326 { 3327 struct alternative *alt; 3328 struct instruction *next_insn, *prev_insn = NULL; 3329 struct section *sec; 3330 u8 visited; 3331 int ret; 3332 3333 sec = insn->sec; 3334 3335 while (1) { 3336 next_insn = next_insn_to_validate(file, insn); 3337 3338 if (func && insn->func && func != insn->func->pfunc) { 3339 /* Ignore KCFI type preambles, which always fall through */ 3340 if (!strncmp(func->name, "__cfi_", 6)) 3341 return 0; 3342 3343 WARN("%s() falls through to next function %s()", 3344 func->name, insn->func->name); 3345 return 1; 3346 } 3347 3348 if (func && insn->ignore) { 3349 WARN_FUNC("BUG: why am I validating an ignored function?", 3350 sec, insn->offset); 3351 return 1; 3352 } 3353 3354 visited = VISITED_BRANCH << state.uaccess; 3355 if (insn->visited & VISITED_BRANCH_MASK) { 3356 if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) 3357 return 1; 3358 3359 if (insn->visited & visited) 3360 return 0; 3361 } else { 3362 nr_insns_visited++; 3363 } 3364 3365 if (state.noinstr) 3366 state.instr += insn->instr; 3367 3368 if (insn->hint) { 3369 if (insn->restore) { 3370 struct instruction *save_insn, *i; 3371 3372 i = insn; 3373 save_insn = NULL; 3374 3375 sym_for_each_insn_continue_reverse(file, func, i) { 3376 if (i->save) { 3377 save_insn = i; 3378 break; 3379 } 3380 } 3381 3382 if (!save_insn) { 3383 WARN_FUNC("no corresponding CFI save for CFI restore", 3384 sec, insn->offset); 3385 return 1; 3386 } 3387 3388 if (!save_insn->visited) { 3389 WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo", 3390 sec, insn->offset); 3391 return 1; 3392 } 3393 3394 insn->cfi = save_insn->cfi; 3395 nr_cfi_reused++; 3396 } 3397 3398 state.cfi = *insn->cfi; 3399 } else { 3400 /* XXX track if we actually changed state.cfi */ 3401 3402 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) { 3403 insn->cfi = prev_insn->cfi; 3404 nr_cfi_reused++; 3405 } else { 3406 insn->cfi = cfi_hash_find_or_add(&state.cfi); 3407 } 3408 } 3409 3410 insn->visited |= visited; 3411 3412 if (propagate_alt_cfi(file, insn)) 3413 return 1; 3414 3415 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 3416 bool skip_orig = false; 3417 3418 list_for_each_entry(alt, &insn->alts, list) { 3419 if (alt->skip_orig) 3420 skip_orig = true; 3421 3422 ret = validate_branch(file, func, alt->insn, state); 3423 if (ret) { 3424 if (opts.backtrace) 3425 BT_FUNC("(alt)", insn); 3426 return ret; 3427 } 3428 } 3429 3430 if (skip_orig) 3431 return 0; 3432 } 3433 3434 if (handle_insn_ops(insn, next_insn, &state)) 3435 return 1; 3436 3437 switch (insn->type) { 3438 3439 case INSN_RETURN: 3440 return validate_return(func, insn, &state); 3441 3442 case INSN_CALL: 3443 case INSN_CALL_DYNAMIC: 3444 ret = validate_call(file, insn, &state); 3445 if (ret) 3446 return ret; 3447 3448 if (opts.stackval && func && !is_fentry_call(insn) && 3449 !has_valid_stack_frame(&state)) { 3450 WARN_FUNC("call without frame pointer save/setup", 3451 sec, insn->offset); 3452 return 1; 3453 } 3454 3455 if (insn->dead_end) 3456 return 0; 3457 3458 break; 3459 3460 case INSN_JUMP_CONDITIONAL: 3461 case INSN_JUMP_UNCONDITIONAL: 3462 if (is_sibling_call(insn)) { 3463 ret = validate_sibling_call(file, insn, &state); 3464 if (ret) 3465 return ret; 3466 3467 } else if (insn->jump_dest) { 3468 ret = validate_branch(file, func, 3469 insn->jump_dest, state); 3470 if (ret) { 3471 if (opts.backtrace) 3472 BT_FUNC("(branch)", insn); 3473 return ret; 3474 } 3475 } 3476 3477 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3478 return 0; 3479 3480 break; 3481 3482 case INSN_JUMP_DYNAMIC: 3483 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3484 if (is_sibling_call(insn)) { 3485 ret = validate_sibling_call(file, insn, &state); 3486 if (ret) 3487 return ret; 3488 } 3489 3490 if (insn->type == INSN_JUMP_DYNAMIC) 3491 return 0; 3492 3493 break; 3494 3495 case INSN_CONTEXT_SWITCH: 3496 if (func && (!next_insn || !next_insn->hint)) { 3497 WARN_FUNC("unsupported instruction in callable function", 3498 sec, insn->offset); 3499 return 1; 3500 } 3501 return 0; 3502 3503 case INSN_STAC: 3504 if (state.uaccess) { 3505 WARN_FUNC("recursive UACCESS enable", sec, insn->offset); 3506 return 1; 3507 } 3508 3509 state.uaccess = true; 3510 break; 3511 3512 case INSN_CLAC: 3513 if (!state.uaccess && func) { 3514 WARN_FUNC("redundant UACCESS disable", sec, insn->offset); 3515 return 1; 3516 } 3517 3518 if (func_uaccess_safe(func) && !state.uaccess_stack) { 3519 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); 3520 return 1; 3521 } 3522 3523 state.uaccess = false; 3524 break; 3525 3526 case INSN_STD: 3527 if (state.df) { 3528 WARN_FUNC("recursive STD", sec, insn->offset); 3529 return 1; 3530 } 3531 3532 state.df = true; 3533 break; 3534 3535 case INSN_CLD: 3536 if (!state.df && func) { 3537 WARN_FUNC("redundant CLD", sec, insn->offset); 3538 return 1; 3539 } 3540 3541 state.df = false; 3542 break; 3543 3544 default: 3545 break; 3546 } 3547 3548 if (insn->dead_end) 3549 return 0; 3550 3551 if (!next_insn) { 3552 if (state.cfi.cfa.base == CFI_UNDEFINED) 3553 return 0; 3554 WARN("%s: unexpected end of section", sec->name); 3555 return 1; 3556 } 3557 3558 prev_insn = insn; 3559 insn = next_insn; 3560 } 3561 3562 return 0; 3563 } 3564 3565 static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 3566 { 3567 struct instruction *insn; 3568 struct insn_state state; 3569 int ret, warnings = 0; 3570 3571 if (!file->hints) 3572 return 0; 3573 3574 init_insn_state(file, &state, sec); 3575 3576 if (sec) { 3577 insn = find_insn(file, sec, 0); 3578 if (!insn) 3579 return 0; 3580 } else { 3581 insn = list_first_entry(&file->insn_list, typeof(*insn), list); 3582 } 3583 3584 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) { 3585 if (insn->hint && !insn->visited && !insn->ignore) { 3586 ret = validate_branch(file, insn->func, insn, state); 3587 if (ret && opts.backtrace) 3588 BT_FUNC("<=== (hint)", insn); 3589 warnings += ret; 3590 } 3591 3592 insn = list_next_entry(insn, list); 3593 } 3594 3595 return warnings; 3596 } 3597 3598 /* 3599 * Validate rethunk entry constraint: must untrain RET before the first RET. 3600 * 3601 * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes 3602 * before an actual RET instruction. 3603 */ 3604 static int validate_entry(struct objtool_file *file, struct instruction *insn) 3605 { 3606 struct instruction *next, *dest; 3607 int ret, warnings = 0; 3608 3609 for (;;) { 3610 next = next_insn_to_validate(file, insn); 3611 3612 if (insn->visited & VISITED_ENTRY) 3613 return 0; 3614 3615 insn->visited |= VISITED_ENTRY; 3616 3617 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 3618 struct alternative *alt; 3619 bool skip_orig = false; 3620 3621 list_for_each_entry(alt, &insn->alts, list) { 3622 if (alt->skip_orig) 3623 skip_orig = true; 3624 3625 ret = validate_entry(file, alt->insn); 3626 if (ret) { 3627 if (opts.backtrace) 3628 BT_FUNC("(alt)", insn); 3629 return ret; 3630 } 3631 } 3632 3633 if (skip_orig) 3634 return 0; 3635 } 3636 3637 switch (insn->type) { 3638 3639 case INSN_CALL_DYNAMIC: 3640 case INSN_JUMP_DYNAMIC: 3641 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3642 WARN_FUNC("early indirect call", insn->sec, insn->offset); 3643 return 1; 3644 3645 case INSN_JUMP_UNCONDITIONAL: 3646 case INSN_JUMP_CONDITIONAL: 3647 if (!is_sibling_call(insn)) { 3648 if (!insn->jump_dest) { 3649 WARN_FUNC("unresolved jump target after linking?!?", 3650 insn->sec, insn->offset); 3651 return -1; 3652 } 3653 ret = validate_entry(file, insn->jump_dest); 3654 if (ret) { 3655 if (opts.backtrace) { 3656 BT_FUNC("(branch%s)", insn, 3657 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : ""); 3658 } 3659 return ret; 3660 } 3661 3662 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3663 return 0; 3664 3665 break; 3666 } 3667 3668 /* fallthrough */ 3669 case INSN_CALL: 3670 dest = find_insn(file, insn->call_dest->sec, 3671 insn->call_dest->offset); 3672 if (!dest) { 3673 WARN("Unresolved function after linking!?: %s", 3674 insn->call_dest->name); 3675 return -1; 3676 } 3677 3678 ret = validate_entry(file, dest); 3679 if (ret) { 3680 if (opts.backtrace) 3681 BT_FUNC("(call)", insn); 3682 return ret; 3683 } 3684 /* 3685 * If a call returns without error, it must have seen UNTRAIN_RET. 3686 * Therefore any non-error return is a success. 3687 */ 3688 return 0; 3689 3690 case INSN_RETURN: 3691 WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset); 3692 return 1; 3693 3694 case INSN_NOP: 3695 if (insn->retpoline_safe) 3696 return 0; 3697 break; 3698 3699 default: 3700 break; 3701 } 3702 3703 if (!next) { 3704 WARN_FUNC("teh end!", insn->sec, insn->offset); 3705 return -1; 3706 } 3707 insn = next; 3708 } 3709 3710 return warnings; 3711 } 3712 3713 /* 3714 * Validate that all branches starting at 'insn->entry' encounter UNRET_END 3715 * before RET. 3716 */ 3717 static int validate_unret(struct objtool_file *file) 3718 { 3719 struct instruction *insn; 3720 int ret, warnings = 0; 3721 3722 for_each_insn(file, insn) { 3723 if (!insn->entry) 3724 continue; 3725 3726 ret = validate_entry(file, insn); 3727 if (ret < 0) { 3728 WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset); 3729 return ret; 3730 } 3731 warnings += ret; 3732 } 3733 3734 return warnings; 3735 } 3736 3737 static int validate_retpoline(struct objtool_file *file) 3738 { 3739 struct instruction *insn; 3740 int warnings = 0; 3741 3742 for_each_insn(file, insn) { 3743 if (insn->type != INSN_JUMP_DYNAMIC && 3744 insn->type != INSN_CALL_DYNAMIC && 3745 insn->type != INSN_RETURN) 3746 continue; 3747 3748 if (insn->retpoline_safe) 3749 continue; 3750 3751 /* 3752 * .init.text code is ran before userspace and thus doesn't 3753 * strictly need retpolines, except for modules which are 3754 * loaded late, they very much do need retpoline in their 3755 * .init.text 3756 */ 3757 if (!strcmp(insn->sec->name, ".init.text") && !opts.module) 3758 continue; 3759 3760 if (insn->type == INSN_RETURN) { 3761 if (opts.rethunk) { 3762 WARN_FUNC("'naked' return found in RETHUNK build", 3763 insn->sec, insn->offset); 3764 } else 3765 continue; 3766 } else { 3767 WARN_FUNC("indirect %s found in RETPOLINE build", 3768 insn->sec, insn->offset, 3769 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 3770 } 3771 3772 warnings++; 3773 } 3774 3775 return warnings; 3776 } 3777 3778 static bool is_kasan_insn(struct instruction *insn) 3779 { 3780 return (insn->type == INSN_CALL && 3781 !strcmp(insn->call_dest->name, "__asan_handle_no_return")); 3782 } 3783 3784 static bool is_ubsan_insn(struct instruction *insn) 3785 { 3786 return (insn->type == INSN_CALL && 3787 !strcmp(insn->call_dest->name, 3788 "__ubsan_handle_builtin_unreachable")); 3789 } 3790 3791 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 3792 { 3793 int i; 3794 struct instruction *prev_insn; 3795 3796 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP) 3797 return true; 3798 3799 /* 3800 * Ignore alternative replacement instructions. This can happen 3801 * when a whitelisted function uses one of the ALTERNATIVE macros. 3802 */ 3803 if (!strcmp(insn->sec->name, ".altinstr_replacement") || 3804 !strcmp(insn->sec->name, ".altinstr_aux")) 3805 return true; 3806 3807 /* 3808 * Whole archive runs might encounter dead code from weak symbols. 3809 * This is where the linker will have dropped the weak symbol in 3810 * favour of a regular symbol, but leaves the code in place. 3811 * 3812 * In this case we'll find a piece of code (whole function) that is not 3813 * covered by a !section symbol. Ignore them. 3814 */ 3815 if (opts.link && !insn->func) { 3816 int size = find_symbol_hole_containing(insn->sec, insn->offset); 3817 unsigned long end = insn->offset + size; 3818 3819 if (!size) /* not a hole */ 3820 return false; 3821 3822 if (size < 0) /* hole until the end */ 3823 return true; 3824 3825 sec_for_each_insn_continue(file, insn) { 3826 /* 3827 * If we reach a visited instruction at or before the 3828 * end of the hole, ignore the unreachable. 3829 */ 3830 if (insn->visited) 3831 return true; 3832 3833 if (insn->offset >= end) 3834 break; 3835 3836 /* 3837 * If this hole jumps to a .cold function, mark it ignore too. 3838 */ 3839 if (insn->jump_dest && insn->jump_dest->func && 3840 strstr(insn->jump_dest->func->name, ".cold")) { 3841 struct instruction *dest = insn->jump_dest; 3842 func_for_each_insn(file, dest->func, dest) 3843 dest->ignore = true; 3844 } 3845 } 3846 3847 return false; 3848 } 3849 3850 if (!insn->func) 3851 return false; 3852 3853 if (insn->func->static_call_tramp) 3854 return true; 3855 3856 /* 3857 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 3858 * __builtin_unreachable(). The BUG() macro has an unreachable() after 3859 * the UD2, which causes GCC's undefined trap logic to emit another UD2 3860 * (or occasionally a JMP to UD2). 3861 * 3862 * It may also insert a UD2 after calling a __noreturn function. 3863 */ 3864 prev_insn = list_prev_entry(insn, list); 3865 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) && 3866 (insn->type == INSN_BUG || 3867 (insn->type == INSN_JUMP_UNCONDITIONAL && 3868 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 3869 return true; 3870 3871 /* 3872 * Check if this (or a subsequent) instruction is related to 3873 * CONFIG_UBSAN or CONFIG_KASAN. 3874 * 3875 * End the search at 5 instructions to avoid going into the weeds. 3876 */ 3877 for (i = 0; i < 5; i++) { 3878 3879 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 3880 return true; 3881 3882 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 3883 if (insn->jump_dest && 3884 insn->jump_dest->func == insn->func) { 3885 insn = insn->jump_dest; 3886 continue; 3887 } 3888 3889 break; 3890 } 3891 3892 if (insn->offset + insn->len >= insn->func->offset + insn->func->len) 3893 break; 3894 3895 insn = list_next_entry(insn, list); 3896 } 3897 3898 return false; 3899 } 3900 3901 static int validate_symbol(struct objtool_file *file, struct section *sec, 3902 struct symbol *sym, struct insn_state *state) 3903 { 3904 struct instruction *insn; 3905 int ret; 3906 3907 if (!sym->len) { 3908 WARN("%s() is missing an ELF size annotation", sym->name); 3909 return 1; 3910 } 3911 3912 if (sym->pfunc != sym || sym->alias != sym) 3913 return 0; 3914 3915 insn = find_insn(file, sec, sym->offset); 3916 if (!insn || insn->ignore || insn->visited) 3917 return 0; 3918 3919 state->uaccess = sym->uaccess_safe; 3920 3921 ret = validate_branch(file, insn->func, insn, *state); 3922 if (ret && opts.backtrace) 3923 BT_FUNC("<=== (sym)", insn); 3924 return ret; 3925 } 3926 3927 static int validate_section(struct objtool_file *file, struct section *sec) 3928 { 3929 struct insn_state state; 3930 struct symbol *func; 3931 int warnings = 0; 3932 3933 list_for_each_entry(func, &sec->symbol_list, list) { 3934 if (func->type != STT_FUNC) 3935 continue; 3936 3937 init_insn_state(file, &state, sec); 3938 set_func_state(&state.cfi); 3939 3940 warnings += validate_symbol(file, sec, func, &state); 3941 } 3942 3943 return warnings; 3944 } 3945 3946 static int validate_noinstr_sections(struct objtool_file *file) 3947 { 3948 struct section *sec; 3949 int warnings = 0; 3950 3951 sec = find_section_by_name(file->elf, ".noinstr.text"); 3952 if (sec) { 3953 warnings += validate_section(file, sec); 3954 warnings += validate_unwind_hints(file, sec); 3955 } 3956 3957 sec = find_section_by_name(file->elf, ".entry.text"); 3958 if (sec) { 3959 warnings += validate_section(file, sec); 3960 warnings += validate_unwind_hints(file, sec); 3961 } 3962 3963 return warnings; 3964 } 3965 3966 static int validate_functions(struct objtool_file *file) 3967 { 3968 struct section *sec; 3969 int warnings = 0; 3970 3971 for_each_sec(file, sec) { 3972 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 3973 continue; 3974 3975 warnings += validate_section(file, sec); 3976 } 3977 3978 return warnings; 3979 } 3980 3981 static void mark_endbr_used(struct instruction *insn) 3982 { 3983 if (!list_empty(&insn->call_node)) 3984 list_del_init(&insn->call_node); 3985 } 3986 3987 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn) 3988 { 3989 struct instruction *dest; 3990 struct reloc *reloc; 3991 unsigned long off; 3992 int warnings = 0; 3993 3994 /* 3995 * Looking for function pointer load relocations. Ignore 3996 * direct/indirect branches: 3997 */ 3998 switch (insn->type) { 3999 case INSN_CALL: 4000 case INSN_CALL_DYNAMIC: 4001 case INSN_JUMP_CONDITIONAL: 4002 case INSN_JUMP_UNCONDITIONAL: 4003 case INSN_JUMP_DYNAMIC: 4004 case INSN_JUMP_DYNAMIC_CONDITIONAL: 4005 case INSN_RETURN: 4006 case INSN_NOP: 4007 return 0; 4008 default: 4009 break; 4010 } 4011 4012 for (reloc = insn_reloc(file, insn); 4013 reloc; 4014 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 4015 reloc->offset + 1, 4016 (insn->offset + insn->len) - (reloc->offset + 1))) { 4017 4018 /* 4019 * static_call_update() references the trampoline, which 4020 * doesn't have (or need) ENDBR. Skip warning in that case. 4021 */ 4022 if (reloc->sym->static_call_tramp) 4023 continue; 4024 4025 off = reloc->sym->offset; 4026 if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32) 4027 off += arch_dest_reloc_offset(reloc->addend); 4028 else 4029 off += reloc->addend; 4030 4031 dest = find_insn(file, reloc->sym->sec, off); 4032 if (!dest) 4033 continue; 4034 4035 if (dest->type == INSN_ENDBR) { 4036 mark_endbr_used(dest); 4037 continue; 4038 } 4039 4040 if (dest->func && dest->func == insn->func) { 4041 /* 4042 * Anything from->to self is either _THIS_IP_ or 4043 * IRET-to-self. 4044 * 4045 * There is no sane way to annotate _THIS_IP_ since the 4046 * compiler treats the relocation as a constant and is 4047 * happy to fold in offsets, skewing any annotation we 4048 * do, leading to vast amounts of false-positives. 4049 * 4050 * There's also compiler generated _THIS_IP_ through 4051 * KCOV and such which we have no hope of annotating. 4052 * 4053 * As such, blanket accept self-references without 4054 * issue. 4055 */ 4056 continue; 4057 } 4058 4059 if (dest->noendbr) 4060 continue; 4061 4062 WARN_FUNC("relocation to !ENDBR: %s", 4063 insn->sec, insn->offset, 4064 offstr(dest->sec, dest->offset)); 4065 4066 warnings++; 4067 } 4068 4069 return warnings; 4070 } 4071 4072 static int validate_ibt_data_reloc(struct objtool_file *file, 4073 struct reloc *reloc) 4074 { 4075 struct instruction *dest; 4076 4077 dest = find_insn(file, reloc->sym->sec, 4078 reloc->sym->offset + reloc->addend); 4079 if (!dest) 4080 return 0; 4081 4082 if (dest->type == INSN_ENDBR) { 4083 mark_endbr_used(dest); 4084 return 0; 4085 } 4086 4087 if (dest->noendbr) 4088 return 0; 4089 4090 WARN_FUNC("data relocation to !ENDBR: %s", 4091 reloc->sec->base, reloc->offset, 4092 offstr(dest->sec, dest->offset)); 4093 4094 return 1; 4095 } 4096 4097 /* 4098 * Validate IBT rules and remove used ENDBR instructions from the seal list. 4099 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with 4100 * NOPs) later, in create_ibt_endbr_seal_sections(). 4101 */ 4102 static int validate_ibt(struct objtool_file *file) 4103 { 4104 struct section *sec; 4105 struct reloc *reloc; 4106 struct instruction *insn; 4107 int warnings = 0; 4108 4109 for_each_insn(file, insn) 4110 warnings += validate_ibt_insn(file, insn); 4111 4112 for_each_sec(file, sec) { 4113 4114 /* Already done by validate_ibt_insn() */ 4115 if (sec->sh.sh_flags & SHF_EXECINSTR) 4116 continue; 4117 4118 if (!sec->reloc) 4119 continue; 4120 4121 /* 4122 * These sections can reference text addresses, but not with 4123 * the intent to indirect branch to them. 4124 */ 4125 if ((!strncmp(sec->name, ".discard", 8) && 4126 strcmp(sec->name, ".discard.ibt_endbr_noseal")) || 4127 !strncmp(sec->name, ".debug", 6) || 4128 !strcmp(sec->name, ".altinstructions") || 4129 !strcmp(sec->name, ".ibt_endbr_seal") || 4130 !strcmp(sec->name, ".orc_unwind_ip") || 4131 !strcmp(sec->name, ".parainstructions") || 4132 !strcmp(sec->name, ".retpoline_sites") || 4133 !strcmp(sec->name, ".smp_locks") || 4134 !strcmp(sec->name, ".static_call_sites") || 4135 !strcmp(sec->name, "_error_injection_whitelist") || 4136 !strcmp(sec->name, "_kprobe_blacklist") || 4137 !strcmp(sec->name, "__bug_table") || 4138 !strcmp(sec->name, "__ex_table") || 4139 !strcmp(sec->name, "__jump_table") || 4140 !strcmp(sec->name, "__mcount_loc") || 4141 !strcmp(sec->name, ".kcfi_traps") || 4142 strstr(sec->name, "__patchable_function_entries")) 4143 continue; 4144 4145 list_for_each_entry(reloc, &sec->reloc->reloc_list, list) 4146 warnings += validate_ibt_data_reloc(file, reloc); 4147 } 4148 4149 return warnings; 4150 } 4151 4152 static int validate_sls(struct objtool_file *file) 4153 { 4154 struct instruction *insn, *next_insn; 4155 int warnings = 0; 4156 4157 for_each_insn(file, insn) { 4158 next_insn = next_insn_same_sec(file, insn); 4159 4160 if (insn->retpoline_safe) 4161 continue; 4162 4163 switch (insn->type) { 4164 case INSN_RETURN: 4165 if (!next_insn || next_insn->type != INSN_TRAP) { 4166 WARN_FUNC("missing int3 after ret", 4167 insn->sec, insn->offset); 4168 warnings++; 4169 } 4170 4171 break; 4172 case INSN_JUMP_DYNAMIC: 4173 if (!next_insn || next_insn->type != INSN_TRAP) { 4174 WARN_FUNC("missing int3 after indirect jump", 4175 insn->sec, insn->offset); 4176 warnings++; 4177 } 4178 break; 4179 default: 4180 break; 4181 } 4182 } 4183 4184 return warnings; 4185 } 4186 4187 static int validate_reachable_instructions(struct objtool_file *file) 4188 { 4189 struct instruction *insn; 4190 4191 if (file->ignore_unreachables) 4192 return 0; 4193 4194 for_each_insn(file, insn) { 4195 if (insn->visited || ignore_unreachable_insn(file, insn)) 4196 continue; 4197 4198 WARN_FUNC("unreachable instruction", insn->sec, insn->offset); 4199 return 1; 4200 } 4201 4202 return 0; 4203 } 4204 4205 int check(struct objtool_file *file) 4206 { 4207 int ret, warnings = 0; 4208 4209 arch_initial_func_cfi_state(&initial_func_cfi); 4210 init_cfi_state(&init_cfi); 4211 init_cfi_state(&func_cfi); 4212 set_func_state(&func_cfi); 4213 4214 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) 4215 goto out; 4216 4217 cfi_hash_add(&init_cfi); 4218 cfi_hash_add(&func_cfi); 4219 4220 ret = decode_sections(file); 4221 if (ret < 0) 4222 goto out; 4223 4224 warnings += ret; 4225 4226 if (list_empty(&file->insn_list)) 4227 goto out; 4228 4229 if (opts.retpoline) { 4230 ret = validate_retpoline(file); 4231 if (ret < 0) 4232 return ret; 4233 warnings += ret; 4234 } 4235 4236 if (opts.stackval || opts.orc || opts.uaccess) { 4237 ret = validate_functions(file); 4238 if (ret < 0) 4239 goto out; 4240 warnings += ret; 4241 4242 ret = validate_unwind_hints(file, NULL); 4243 if (ret < 0) 4244 goto out; 4245 warnings += ret; 4246 4247 if (!warnings) { 4248 ret = validate_reachable_instructions(file); 4249 if (ret < 0) 4250 goto out; 4251 warnings += ret; 4252 } 4253 4254 } else if (opts.noinstr) { 4255 ret = validate_noinstr_sections(file); 4256 if (ret < 0) 4257 goto out; 4258 warnings += ret; 4259 } 4260 4261 if (opts.unret) { 4262 /* 4263 * Must be after validate_branch() and friends, it plays 4264 * further games with insn->visited. 4265 */ 4266 ret = validate_unret(file); 4267 if (ret < 0) 4268 return ret; 4269 warnings += ret; 4270 } 4271 4272 if (opts.ibt) { 4273 ret = validate_ibt(file); 4274 if (ret < 0) 4275 goto out; 4276 warnings += ret; 4277 } 4278 4279 if (opts.sls) { 4280 ret = validate_sls(file); 4281 if (ret < 0) 4282 goto out; 4283 warnings += ret; 4284 } 4285 4286 if (opts.static_call) { 4287 ret = create_static_call_sections(file); 4288 if (ret < 0) 4289 goto out; 4290 warnings += ret; 4291 } 4292 4293 if (opts.retpoline) { 4294 ret = create_retpoline_sites_sections(file); 4295 if (ret < 0) 4296 goto out; 4297 warnings += ret; 4298 } 4299 4300 if (opts.rethunk) { 4301 ret = create_return_sites_sections(file); 4302 if (ret < 0) 4303 goto out; 4304 warnings += ret; 4305 } 4306 4307 if (opts.mcount) { 4308 ret = create_mcount_loc_sections(file); 4309 if (ret < 0) 4310 goto out; 4311 warnings += ret; 4312 } 4313 4314 if (opts.ibt) { 4315 ret = create_ibt_endbr_seal_sections(file); 4316 if (ret < 0) 4317 goto out; 4318 warnings += ret; 4319 } 4320 4321 if (opts.orc && !list_empty(&file->insn_list)) { 4322 ret = orc_create(file); 4323 if (ret < 0) 4324 goto out; 4325 warnings += ret; 4326 } 4327 4328 4329 if (opts.stats) { 4330 printf("nr_insns_visited: %ld\n", nr_insns_visited); 4331 printf("nr_cfi: %ld\n", nr_cfi); 4332 printf("nr_cfi_reused: %ld\n", nr_cfi_reused); 4333 printf("nr_cfi_cache: %ld\n", nr_cfi_cache); 4334 } 4335 4336 out: 4337 /* 4338 * For now, don't fail the kernel build on fatal warnings. These 4339 * errors are still fairly common due to the growing matrix of 4340 * supported toolchains and their recent pace of change. 4341 */ 4342 return 0; 4343 } 4344