1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #include <string.h> 7 #include <stdlib.h> 8 #include <inttypes.h> 9 #include <sys/mman.h> 10 11 #include <arch/elf.h> 12 #include <objtool/builtin.h> 13 #include <objtool/cfi.h> 14 #include <objtool/arch.h> 15 #include <objtool/check.h> 16 #include <objtool/special.h> 17 #include <objtool/warn.h> 18 #include <objtool/endianness.h> 19 20 #include <linux/objtool.h> 21 #include <linux/hashtable.h> 22 #include <linux/kernel.h> 23 #include <linux/static_call_types.h> 24 25 struct alternative { 26 struct alternative *next; 27 struct instruction *insn; 28 bool skip_orig; 29 }; 30 31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; 32 33 static struct cfi_init_state initial_func_cfi; 34 static struct cfi_state init_cfi; 35 static struct cfi_state func_cfi; 36 37 struct instruction *find_insn(struct objtool_file *file, 38 struct section *sec, unsigned long offset) 39 { 40 struct instruction *insn; 41 42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 43 if (insn->sec == sec && insn->offset == offset) 44 return insn; 45 } 46 47 return NULL; 48 } 49 50 struct instruction *next_insn_same_sec(struct objtool_file *file, 51 struct instruction *insn) 52 { 53 if (insn->idx == INSN_CHUNK_MAX) 54 return find_insn(file, insn->sec, insn->offset + insn->len); 55 56 insn++; 57 if (!insn->len) 58 return NULL; 59 60 return insn; 61 } 62 63 static struct instruction *next_insn_same_func(struct objtool_file *file, 64 struct instruction *insn) 65 { 66 struct instruction *next = next_insn_same_sec(file, insn); 67 struct symbol *func = insn_func(insn); 68 69 if (!func) 70 return NULL; 71 72 if (next && insn_func(next) == func) 73 return next; 74 75 /* Check if we're already in the subfunction: */ 76 if (func == func->cfunc) 77 return NULL; 78 79 /* Move to the subfunction: */ 80 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 81 } 82 83 static struct instruction *prev_insn_same_sec(struct objtool_file *file, 84 struct instruction *insn) 85 { 86 if (insn->idx == 0) { 87 if (insn->prev_len) 88 return find_insn(file, insn->sec, insn->offset - insn->prev_len); 89 return NULL; 90 } 91 92 return insn - 1; 93 } 94 95 static struct instruction *prev_insn_same_sym(struct objtool_file *file, 96 struct instruction *insn) 97 { 98 struct instruction *prev = prev_insn_same_sec(file, insn); 99 100 if (prev && insn_func(prev) == insn_func(insn)) 101 return prev; 102 103 return NULL; 104 } 105 106 #define for_each_insn(file, insn) \ 107 for (struct section *__sec, *__fake = (struct section *)1; \ 108 __fake; __fake = NULL) \ 109 for_each_sec(file, __sec) \ 110 sec_for_each_insn(file, __sec, insn) 111 112 #define func_for_each_insn(file, func, insn) \ 113 for (insn = find_insn(file, func->sec, func->offset); \ 114 insn; \ 115 insn = next_insn_same_func(file, insn)) 116 117 #define sym_for_each_insn(file, sym, insn) \ 118 for (insn = find_insn(file, sym->sec, sym->offset); \ 119 insn && insn->offset < sym->offset + sym->len; \ 120 insn = next_insn_same_sec(file, insn)) 121 122 #define sym_for_each_insn_continue_reverse(file, sym, insn) \ 123 for (insn = prev_insn_same_sec(file, insn); \ 124 insn && insn->offset >= sym->offset; \ 125 insn = prev_insn_same_sec(file, insn)) 126 127 #define sec_for_each_insn_from(file, insn) \ 128 for (; insn; insn = next_insn_same_sec(file, insn)) 129 130 #define sec_for_each_insn_continue(file, insn) \ 131 for (insn = next_insn_same_sec(file, insn); insn; \ 132 insn = next_insn_same_sec(file, insn)) 133 134 static inline struct symbol *insn_call_dest(struct instruction *insn) 135 { 136 if (insn->type == INSN_JUMP_DYNAMIC || 137 insn->type == INSN_CALL_DYNAMIC) 138 return NULL; 139 140 return insn->_call_dest; 141 } 142 143 static inline struct reloc *insn_jump_table(struct instruction *insn) 144 { 145 if (insn->type == INSN_JUMP_DYNAMIC || 146 insn->type == INSN_CALL_DYNAMIC) 147 return insn->_jump_table; 148 149 return NULL; 150 } 151 152 static bool is_jump_table_jump(struct instruction *insn) 153 { 154 struct alt_group *alt_group = insn->alt_group; 155 156 if (insn_jump_table(insn)) 157 return true; 158 159 /* Retpoline alternative for a jump table? */ 160 return alt_group && alt_group->orig_group && 161 insn_jump_table(alt_group->orig_group->first_insn); 162 } 163 164 static bool is_sibling_call(struct instruction *insn) 165 { 166 /* 167 * Assume only STT_FUNC calls have jump-tables. 168 */ 169 if (insn_func(insn)) { 170 /* An indirect jump is either a sibling call or a jump to a table. */ 171 if (insn->type == INSN_JUMP_DYNAMIC) 172 return !is_jump_table_jump(insn); 173 } 174 175 /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */ 176 return (is_static_jump(insn) && insn_call_dest(insn)); 177 } 178 179 /* 180 * This checks to see if the given function is a "noreturn" function. 181 * 182 * For global functions which are outside the scope of this object file, we 183 * have to keep a manual list of them. 184 * 185 * For local functions, we have to detect them manually by simply looking for 186 * the lack of a return instruction. 187 */ 188 static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 189 int recursion) 190 { 191 int i; 192 struct instruction *insn; 193 bool empty = true; 194 195 /* 196 * Unfortunately these have to be hard coded because the noreturn 197 * attribute isn't provided in ELF data. Keep 'em sorted. 198 */ 199 static const char * const global_noreturns[] = { 200 "__invalid_creds", 201 "__module_put_and_kthread_exit", 202 "__reiserfs_panic", 203 "__stack_chk_fail", 204 "__ubsan_handle_builtin_unreachable", 205 "btrfs_assertfail", 206 "cpu_bringup_and_idle", 207 "cpu_startup_entry", 208 "do_exit", 209 "do_group_exit", 210 "do_task_dead", 211 "ex_handler_msr_mce", 212 "fortify_panic", 213 "kthread_complete_and_exit", 214 "kthread_exit", 215 "kunit_try_catch_throw", 216 "lbug_with_loc", 217 "machine_real_restart", 218 "make_task_dead", 219 "panic", 220 "rewind_stack_and_make_dead", 221 "sev_es_terminate", 222 "snp_abort", 223 "stop_this_cpu", 224 "usercopy_abort", 225 "xen_cpu_bringup_again", 226 "xen_start_kernel", 227 }; 228 229 if (!func) 230 return false; 231 232 if (func->bind == STB_WEAK) 233 return false; 234 235 if (func->bind == STB_GLOBAL) 236 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 237 if (!strcmp(func->name, global_noreturns[i])) 238 return true; 239 240 if (!func->len) 241 return false; 242 243 insn = find_insn(file, func->sec, func->offset); 244 if (!insn || !insn_func(insn)) 245 return false; 246 247 func_for_each_insn(file, func, insn) { 248 empty = false; 249 250 if (insn->type == INSN_RETURN) 251 return false; 252 } 253 254 if (empty) 255 return false; 256 257 /* 258 * A function can have a sibling call instead of a return. In that 259 * case, the function's dead-end status depends on whether the target 260 * of the sibling call returns. 261 */ 262 func_for_each_insn(file, func, insn) { 263 if (is_sibling_call(insn)) { 264 struct instruction *dest = insn->jump_dest; 265 266 if (!dest) 267 /* sibling call to another file */ 268 return false; 269 270 /* local sibling call */ 271 if (recursion == 5) { 272 /* 273 * Infinite recursion: two functions have 274 * sibling calls to each other. This is a very 275 * rare case. It means they aren't dead ends. 276 */ 277 return false; 278 } 279 280 return __dead_end_function(file, insn_func(dest), recursion+1); 281 } 282 } 283 284 return true; 285 } 286 287 static bool dead_end_function(struct objtool_file *file, struct symbol *func) 288 { 289 return __dead_end_function(file, func, 0); 290 } 291 292 static void init_cfi_state(struct cfi_state *cfi) 293 { 294 int i; 295 296 for (i = 0; i < CFI_NUM_REGS; i++) { 297 cfi->regs[i].base = CFI_UNDEFINED; 298 cfi->vals[i].base = CFI_UNDEFINED; 299 } 300 cfi->cfa.base = CFI_UNDEFINED; 301 cfi->drap_reg = CFI_UNDEFINED; 302 cfi->drap_offset = -1; 303 } 304 305 static void init_insn_state(struct objtool_file *file, struct insn_state *state, 306 struct section *sec) 307 { 308 memset(state, 0, sizeof(*state)); 309 init_cfi_state(&state->cfi); 310 311 /* 312 * We need the full vmlinux for noinstr validation, otherwise we can 313 * not correctly determine insn_call_dest(insn)->sec (external symbols 314 * do not have a section). 315 */ 316 if (opts.link && opts.noinstr && sec) 317 state->noinstr = sec->noinstr; 318 } 319 320 static struct cfi_state *cfi_alloc(void) 321 { 322 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); 323 if (!cfi) { 324 WARN("calloc failed"); 325 exit(1); 326 } 327 nr_cfi++; 328 return cfi; 329 } 330 331 static int cfi_bits; 332 static struct hlist_head *cfi_hash; 333 334 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) 335 { 336 return memcmp((void *)cfi1 + sizeof(cfi1->hash), 337 (void *)cfi2 + sizeof(cfi2->hash), 338 sizeof(struct cfi_state) - sizeof(struct hlist_node)); 339 } 340 341 static inline u32 cfi_key(struct cfi_state *cfi) 342 { 343 return jhash((void *)cfi + sizeof(cfi->hash), 344 sizeof(*cfi) - sizeof(cfi->hash), 0); 345 } 346 347 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) 348 { 349 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 350 struct cfi_state *obj; 351 352 hlist_for_each_entry(obj, head, hash) { 353 if (!cficmp(cfi, obj)) { 354 nr_cfi_cache++; 355 return obj; 356 } 357 } 358 359 obj = cfi_alloc(); 360 *obj = *cfi; 361 hlist_add_head(&obj->hash, head); 362 363 return obj; 364 } 365 366 static void cfi_hash_add(struct cfi_state *cfi) 367 { 368 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 369 370 hlist_add_head(&cfi->hash, head); 371 } 372 373 static void *cfi_hash_alloc(unsigned long size) 374 { 375 cfi_bits = max(10, ilog2(size)); 376 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, 377 PROT_READ|PROT_WRITE, 378 MAP_PRIVATE|MAP_ANON, -1, 0); 379 if (cfi_hash == (void *)-1L) { 380 WARN("mmap fail cfi_hash"); 381 cfi_hash = NULL; 382 } else if (opts.stats) { 383 printf("cfi_bits: %d\n", cfi_bits); 384 } 385 386 return cfi_hash; 387 } 388 389 static unsigned long nr_insns; 390 static unsigned long nr_insns_visited; 391 392 /* 393 * Call the arch-specific instruction decoder for all the instructions and add 394 * them to the global instruction list. 395 */ 396 static int decode_instructions(struct objtool_file *file) 397 { 398 struct section *sec; 399 struct symbol *func; 400 unsigned long offset; 401 struct instruction *insn; 402 int ret; 403 404 for_each_sec(file, sec) { 405 struct instruction *insns = NULL; 406 u8 prev_len = 0; 407 u8 idx = 0; 408 409 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 410 continue; 411 412 if (strcmp(sec->name, ".altinstr_replacement") && 413 strcmp(sec->name, ".altinstr_aux") && 414 strncmp(sec->name, ".discard.", 9)) 415 sec->text = true; 416 417 if (!strcmp(sec->name, ".noinstr.text") || 418 !strcmp(sec->name, ".entry.text") || 419 !strcmp(sec->name, ".cpuidle.text") || 420 !strncmp(sec->name, ".text.__x86.", 12)) 421 sec->noinstr = true; 422 423 /* 424 * .init.text code is ran before userspace and thus doesn't 425 * strictly need retpolines, except for modules which are 426 * loaded late, they very much do need retpoline in their 427 * .init.text 428 */ 429 if (!strcmp(sec->name, ".init.text") && !opts.module) 430 sec->init = true; 431 432 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { 433 if (!insns || idx == INSN_CHUNK_MAX) { 434 insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE); 435 if (!insns) { 436 WARN("malloc failed"); 437 return -1; 438 } 439 idx = 0; 440 } else { 441 idx++; 442 } 443 insn = &insns[idx]; 444 insn->idx = idx; 445 446 INIT_LIST_HEAD(&insn->call_node); 447 insn->sec = sec; 448 insn->offset = offset; 449 insn->prev_len = prev_len; 450 451 ret = arch_decode_instruction(file, sec, offset, 452 sec->sh.sh_size - offset, 453 insn); 454 if (ret) 455 return ret; 456 457 prev_len = insn->len; 458 459 /* 460 * By default, "ud2" is a dead end unless otherwise 461 * annotated, because GCC 7 inserts it for certain 462 * divide-by-zero cases. 463 */ 464 if (insn->type == INSN_BUG) 465 insn->dead_end = true; 466 467 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 468 nr_insns++; 469 } 470 471 // printf("%s: last chunk used: %d\n", sec->name, (int)idx); 472 473 list_for_each_entry(func, &sec->symbol_list, list) { 474 if (func->type != STT_NOTYPE && func->type != STT_FUNC) 475 continue; 476 477 if (func->offset == sec->sh.sh_size) { 478 /* Heuristic: likely an "end" symbol */ 479 if (func->type == STT_NOTYPE) 480 continue; 481 WARN("%s(): STT_FUNC at end of section", 482 func->name); 483 return -1; 484 } 485 486 if (func->return_thunk || func->alias != func) 487 continue; 488 489 if (!find_insn(file, sec, func->offset)) { 490 WARN("%s(): can't find starting instruction", 491 func->name); 492 return -1; 493 } 494 495 sym_for_each_insn(file, func, insn) { 496 insn->sym = func; 497 if (func->type == STT_FUNC && 498 insn->type == INSN_ENDBR && 499 list_empty(&insn->call_node)) { 500 if (insn->offset == func->offset) { 501 list_add_tail(&insn->call_node, &file->endbr_list); 502 file->nr_endbr++; 503 } else { 504 file->nr_endbr_int++; 505 } 506 } 507 } 508 } 509 } 510 511 if (opts.stats) 512 printf("nr_insns: %lu\n", nr_insns); 513 514 return 0; 515 } 516 517 /* 518 * Read the pv_ops[] .data table to find the static initialized values. 519 */ 520 static int add_pv_ops(struct objtool_file *file, const char *symname) 521 { 522 struct symbol *sym, *func; 523 unsigned long off, end; 524 struct reloc *rel; 525 int idx; 526 527 sym = find_symbol_by_name(file->elf, symname); 528 if (!sym) 529 return 0; 530 531 off = sym->offset; 532 end = off + sym->len; 533 for (;;) { 534 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off); 535 if (!rel) 536 break; 537 538 func = rel->sym; 539 if (func->type == STT_SECTION) 540 func = find_symbol_by_offset(rel->sym->sec, rel->addend); 541 542 idx = (rel->offset - sym->offset) / sizeof(unsigned long); 543 544 objtool_pv_add(file, idx, func); 545 546 off = rel->offset + 1; 547 if (off > end) 548 break; 549 } 550 551 return 0; 552 } 553 554 /* 555 * Allocate and initialize file->pv_ops[]. 556 */ 557 static int init_pv_ops(struct objtool_file *file) 558 { 559 static const char *pv_ops_tables[] = { 560 "pv_ops", 561 "xen_cpu_ops", 562 "xen_irq_ops", 563 "xen_mmu_ops", 564 NULL, 565 }; 566 const char *pv_ops; 567 struct symbol *sym; 568 int idx, nr; 569 570 if (!opts.noinstr) 571 return 0; 572 573 file->pv_ops = NULL; 574 575 sym = find_symbol_by_name(file->elf, "pv_ops"); 576 if (!sym) 577 return 0; 578 579 nr = sym->len / sizeof(unsigned long); 580 file->pv_ops = calloc(sizeof(struct pv_state), nr); 581 if (!file->pv_ops) 582 return -1; 583 584 for (idx = 0; idx < nr; idx++) 585 INIT_LIST_HEAD(&file->pv_ops[idx].targets); 586 587 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) 588 add_pv_ops(file, pv_ops); 589 590 return 0; 591 } 592 593 static struct instruction *find_last_insn(struct objtool_file *file, 594 struct section *sec) 595 { 596 struct instruction *insn = NULL; 597 unsigned int offset; 598 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0; 599 600 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--) 601 insn = find_insn(file, sec, offset); 602 603 return insn; 604 } 605 606 /* 607 * Mark "ud2" instructions and manually annotated dead ends. 608 */ 609 static int add_dead_ends(struct objtool_file *file) 610 { 611 struct section *sec; 612 struct reloc *reloc; 613 struct instruction *insn; 614 615 /* 616 * Check for manually annotated dead ends. 617 */ 618 sec = find_section_by_name(file->elf, ".rela.discard.unreachable"); 619 if (!sec) 620 goto reachable; 621 622 list_for_each_entry(reloc, &sec->reloc_list, list) { 623 if (reloc->sym->type != STT_SECTION) { 624 WARN("unexpected relocation symbol type in %s", sec->name); 625 return -1; 626 } 627 insn = find_insn(file, reloc->sym->sec, reloc->addend); 628 if (insn) 629 insn = prev_insn_same_sec(file, insn); 630 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 631 insn = find_last_insn(file, reloc->sym->sec); 632 if (!insn) { 633 WARN("can't find unreachable insn at %s+0x%" PRIx64, 634 reloc->sym->sec->name, reloc->addend); 635 return -1; 636 } 637 } else { 638 WARN("can't find unreachable insn at %s+0x%" PRIx64, 639 reloc->sym->sec->name, reloc->addend); 640 return -1; 641 } 642 643 insn->dead_end = true; 644 } 645 646 reachable: 647 /* 648 * These manually annotated reachable checks are needed for GCC 4.4, 649 * where the Linux unreachable() macro isn't supported. In that case 650 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's 651 * not a dead end. 652 */ 653 sec = find_section_by_name(file->elf, ".rela.discard.reachable"); 654 if (!sec) 655 return 0; 656 657 list_for_each_entry(reloc, &sec->reloc_list, list) { 658 if (reloc->sym->type != STT_SECTION) { 659 WARN("unexpected relocation symbol type in %s", sec->name); 660 return -1; 661 } 662 insn = find_insn(file, reloc->sym->sec, reloc->addend); 663 if (insn) 664 insn = prev_insn_same_sec(file, insn); 665 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 666 insn = find_last_insn(file, reloc->sym->sec); 667 if (!insn) { 668 WARN("can't find reachable insn at %s+0x%" PRIx64, 669 reloc->sym->sec->name, reloc->addend); 670 return -1; 671 } 672 } else { 673 WARN("can't find reachable insn at %s+0x%" PRIx64, 674 reloc->sym->sec->name, reloc->addend); 675 return -1; 676 } 677 678 insn->dead_end = false; 679 } 680 681 return 0; 682 } 683 684 static int create_static_call_sections(struct objtool_file *file) 685 { 686 struct section *sec; 687 struct static_call_site *site; 688 struct instruction *insn; 689 struct symbol *key_sym; 690 char *key_name, *tmp; 691 int idx; 692 693 sec = find_section_by_name(file->elf, ".static_call_sites"); 694 if (sec) { 695 INIT_LIST_HEAD(&file->static_call_list); 696 WARN("file already has .static_call_sites section, skipping"); 697 return 0; 698 } 699 700 if (list_empty(&file->static_call_list)) 701 return 0; 702 703 idx = 0; 704 list_for_each_entry(insn, &file->static_call_list, call_node) 705 idx++; 706 707 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE, 708 sizeof(struct static_call_site), idx); 709 if (!sec) 710 return -1; 711 712 idx = 0; 713 list_for_each_entry(insn, &file->static_call_list, call_node) { 714 715 site = (struct static_call_site *)sec->data->d_buf + idx; 716 memset(site, 0, sizeof(struct static_call_site)); 717 718 /* populate reloc for 'addr' */ 719 if (elf_add_reloc_to_insn(file->elf, sec, 720 idx * sizeof(struct static_call_site), 721 R_X86_64_PC32, 722 insn->sec, insn->offset)) 723 return -1; 724 725 /* find key symbol */ 726 key_name = strdup(insn_call_dest(insn)->name); 727 if (!key_name) { 728 perror("strdup"); 729 return -1; 730 } 731 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 732 STATIC_CALL_TRAMP_PREFIX_LEN)) { 733 WARN("static_call: trampoline name malformed: %s", key_name); 734 free(key_name); 735 return -1; 736 } 737 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 738 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 739 740 key_sym = find_symbol_by_name(file->elf, tmp); 741 if (!key_sym) { 742 if (!opts.module) { 743 WARN("static_call: can't find static_call_key symbol: %s", tmp); 744 free(key_name); 745 return -1; 746 } 747 748 /* 749 * For modules(), the key might not be exported, which 750 * means the module can make static calls but isn't 751 * allowed to change them. 752 * 753 * In that case we temporarily set the key to be the 754 * trampoline address. This is fixed up in 755 * static_call_add_module(). 756 */ 757 key_sym = insn_call_dest(insn); 758 } 759 free(key_name); 760 761 /* populate reloc for 'key' */ 762 if (elf_add_reloc(file->elf, sec, 763 idx * sizeof(struct static_call_site) + 4, 764 R_X86_64_PC32, key_sym, 765 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 766 return -1; 767 768 idx++; 769 } 770 771 return 0; 772 } 773 774 static int create_retpoline_sites_sections(struct objtool_file *file) 775 { 776 struct instruction *insn; 777 struct section *sec; 778 int idx; 779 780 sec = find_section_by_name(file->elf, ".retpoline_sites"); 781 if (sec) { 782 WARN("file already has .retpoline_sites, skipping"); 783 return 0; 784 } 785 786 idx = 0; 787 list_for_each_entry(insn, &file->retpoline_call_list, call_node) 788 idx++; 789 790 if (!idx) 791 return 0; 792 793 sec = elf_create_section(file->elf, ".retpoline_sites", 0, 794 sizeof(int), idx); 795 if (!sec) { 796 WARN("elf_create_section: .retpoline_sites"); 797 return -1; 798 } 799 800 idx = 0; 801 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 802 803 int *site = (int *)sec->data->d_buf + idx; 804 *site = 0; 805 806 if (elf_add_reloc_to_insn(file->elf, sec, 807 idx * sizeof(int), 808 R_X86_64_PC32, 809 insn->sec, insn->offset)) { 810 WARN("elf_add_reloc_to_insn: .retpoline_sites"); 811 return -1; 812 } 813 814 idx++; 815 } 816 817 return 0; 818 } 819 820 static int create_return_sites_sections(struct objtool_file *file) 821 { 822 struct instruction *insn; 823 struct section *sec; 824 int idx; 825 826 sec = find_section_by_name(file->elf, ".return_sites"); 827 if (sec) { 828 WARN("file already has .return_sites, skipping"); 829 return 0; 830 } 831 832 idx = 0; 833 list_for_each_entry(insn, &file->return_thunk_list, call_node) 834 idx++; 835 836 if (!idx) 837 return 0; 838 839 sec = elf_create_section(file->elf, ".return_sites", 0, 840 sizeof(int), idx); 841 if (!sec) { 842 WARN("elf_create_section: .return_sites"); 843 return -1; 844 } 845 846 idx = 0; 847 list_for_each_entry(insn, &file->return_thunk_list, call_node) { 848 849 int *site = (int *)sec->data->d_buf + idx; 850 *site = 0; 851 852 if (elf_add_reloc_to_insn(file->elf, sec, 853 idx * sizeof(int), 854 R_X86_64_PC32, 855 insn->sec, insn->offset)) { 856 WARN("elf_add_reloc_to_insn: .return_sites"); 857 return -1; 858 } 859 860 idx++; 861 } 862 863 return 0; 864 } 865 866 static int create_ibt_endbr_seal_sections(struct objtool_file *file) 867 { 868 struct instruction *insn; 869 struct section *sec; 870 int idx; 871 872 sec = find_section_by_name(file->elf, ".ibt_endbr_seal"); 873 if (sec) { 874 WARN("file already has .ibt_endbr_seal, skipping"); 875 return 0; 876 } 877 878 idx = 0; 879 list_for_each_entry(insn, &file->endbr_list, call_node) 880 idx++; 881 882 if (opts.stats) { 883 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr); 884 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int); 885 printf("ibt: superfluous ENDBR: %d\n", idx); 886 } 887 888 if (!idx) 889 return 0; 890 891 sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0, 892 sizeof(int), idx); 893 if (!sec) { 894 WARN("elf_create_section: .ibt_endbr_seal"); 895 return -1; 896 } 897 898 idx = 0; 899 list_for_each_entry(insn, &file->endbr_list, call_node) { 900 901 int *site = (int *)sec->data->d_buf + idx; 902 struct symbol *sym = insn->sym; 903 *site = 0; 904 905 if (opts.module && sym && sym->type == STT_FUNC && 906 insn->offset == sym->offset && 907 (!strcmp(sym->name, "init_module") || 908 !strcmp(sym->name, "cleanup_module"))) 909 WARN("%s(): not an indirect call target", sym->name); 910 911 if (elf_add_reloc_to_insn(file->elf, sec, 912 idx * sizeof(int), 913 R_X86_64_PC32, 914 insn->sec, insn->offset)) { 915 WARN("elf_add_reloc_to_insn: .ibt_endbr_seal"); 916 return -1; 917 } 918 919 idx++; 920 } 921 922 return 0; 923 } 924 925 static int create_cfi_sections(struct objtool_file *file) 926 { 927 struct section *sec, *s; 928 struct symbol *sym; 929 unsigned int *loc; 930 int idx; 931 932 sec = find_section_by_name(file->elf, ".cfi_sites"); 933 if (sec) { 934 INIT_LIST_HEAD(&file->call_list); 935 WARN("file already has .cfi_sites section, skipping"); 936 return 0; 937 } 938 939 idx = 0; 940 for_each_sec(file, s) { 941 if (!s->text) 942 continue; 943 944 list_for_each_entry(sym, &s->symbol_list, list) { 945 if (sym->type != STT_FUNC) 946 continue; 947 948 if (strncmp(sym->name, "__cfi_", 6)) 949 continue; 950 951 idx++; 952 } 953 } 954 955 sec = elf_create_section(file->elf, ".cfi_sites", 0, sizeof(unsigned int), idx); 956 if (!sec) 957 return -1; 958 959 idx = 0; 960 for_each_sec(file, s) { 961 if (!s->text) 962 continue; 963 964 list_for_each_entry(sym, &s->symbol_list, list) { 965 if (sym->type != STT_FUNC) 966 continue; 967 968 if (strncmp(sym->name, "__cfi_", 6)) 969 continue; 970 971 loc = (unsigned int *)sec->data->d_buf + idx; 972 memset(loc, 0, sizeof(unsigned int)); 973 974 if (elf_add_reloc_to_insn(file->elf, sec, 975 idx * sizeof(unsigned int), 976 R_X86_64_PC32, 977 s, sym->offset)) 978 return -1; 979 980 idx++; 981 } 982 } 983 984 return 0; 985 } 986 987 static int create_mcount_loc_sections(struct objtool_file *file) 988 { 989 int addrsize = elf_class_addrsize(file->elf); 990 struct instruction *insn; 991 struct section *sec; 992 int idx; 993 994 sec = find_section_by_name(file->elf, "__mcount_loc"); 995 if (sec) { 996 INIT_LIST_HEAD(&file->mcount_loc_list); 997 WARN("file already has __mcount_loc section, skipping"); 998 return 0; 999 } 1000 1001 if (list_empty(&file->mcount_loc_list)) 1002 return 0; 1003 1004 idx = 0; 1005 list_for_each_entry(insn, &file->mcount_loc_list, call_node) 1006 idx++; 1007 1008 sec = elf_create_section(file->elf, "__mcount_loc", 0, addrsize, idx); 1009 if (!sec) 1010 return -1; 1011 1012 sec->sh.sh_addralign = addrsize; 1013 1014 idx = 0; 1015 list_for_each_entry(insn, &file->mcount_loc_list, call_node) { 1016 void *loc; 1017 1018 loc = sec->data->d_buf + idx; 1019 memset(loc, 0, addrsize); 1020 1021 if (elf_add_reloc_to_insn(file->elf, sec, idx, 1022 addrsize == sizeof(u64) ? R_ABS64 : R_ABS32, 1023 insn->sec, insn->offset)) 1024 return -1; 1025 1026 idx += addrsize; 1027 } 1028 1029 return 0; 1030 } 1031 1032 static int create_direct_call_sections(struct objtool_file *file) 1033 { 1034 struct instruction *insn; 1035 struct section *sec; 1036 unsigned int *loc; 1037 int idx; 1038 1039 sec = find_section_by_name(file->elf, ".call_sites"); 1040 if (sec) { 1041 INIT_LIST_HEAD(&file->call_list); 1042 WARN("file already has .call_sites section, skipping"); 1043 return 0; 1044 } 1045 1046 if (list_empty(&file->call_list)) 1047 return 0; 1048 1049 idx = 0; 1050 list_for_each_entry(insn, &file->call_list, call_node) 1051 idx++; 1052 1053 sec = elf_create_section(file->elf, ".call_sites", 0, sizeof(unsigned int), idx); 1054 if (!sec) 1055 return -1; 1056 1057 idx = 0; 1058 list_for_each_entry(insn, &file->call_list, call_node) { 1059 1060 loc = (unsigned int *)sec->data->d_buf + idx; 1061 memset(loc, 0, sizeof(unsigned int)); 1062 1063 if (elf_add_reloc_to_insn(file->elf, sec, 1064 idx * sizeof(unsigned int), 1065 R_X86_64_PC32, 1066 insn->sec, insn->offset)) 1067 return -1; 1068 1069 idx++; 1070 } 1071 1072 return 0; 1073 } 1074 1075 /* 1076 * Warnings shouldn't be reported for ignored functions. 1077 */ 1078 static void add_ignores(struct objtool_file *file) 1079 { 1080 struct instruction *insn; 1081 struct section *sec; 1082 struct symbol *func; 1083 struct reloc *reloc; 1084 1085 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 1086 if (!sec) 1087 return; 1088 1089 list_for_each_entry(reloc, &sec->reloc_list, list) { 1090 switch (reloc->sym->type) { 1091 case STT_FUNC: 1092 func = reloc->sym; 1093 break; 1094 1095 case STT_SECTION: 1096 func = find_func_by_offset(reloc->sym->sec, reloc->addend); 1097 if (!func) 1098 continue; 1099 break; 1100 1101 default: 1102 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type); 1103 continue; 1104 } 1105 1106 func_for_each_insn(file, func, insn) 1107 insn->ignore = true; 1108 } 1109 } 1110 1111 /* 1112 * This is a whitelist of functions that is allowed to be called with AC set. 1113 * The list is meant to be minimal and only contains compiler instrumentation 1114 * ABI and a few functions used to implement *_{to,from}_user() functions. 1115 * 1116 * These functions must not directly change AC, but may PUSHF/POPF. 1117 */ 1118 static const char *uaccess_safe_builtin[] = { 1119 /* KASAN */ 1120 "kasan_report", 1121 "kasan_check_range", 1122 /* KASAN out-of-line */ 1123 "__asan_loadN_noabort", 1124 "__asan_load1_noabort", 1125 "__asan_load2_noabort", 1126 "__asan_load4_noabort", 1127 "__asan_load8_noabort", 1128 "__asan_load16_noabort", 1129 "__asan_storeN_noabort", 1130 "__asan_store1_noabort", 1131 "__asan_store2_noabort", 1132 "__asan_store4_noabort", 1133 "__asan_store8_noabort", 1134 "__asan_store16_noabort", 1135 "__kasan_check_read", 1136 "__kasan_check_write", 1137 /* KASAN in-line */ 1138 "__asan_report_load_n_noabort", 1139 "__asan_report_load1_noabort", 1140 "__asan_report_load2_noabort", 1141 "__asan_report_load4_noabort", 1142 "__asan_report_load8_noabort", 1143 "__asan_report_load16_noabort", 1144 "__asan_report_store_n_noabort", 1145 "__asan_report_store1_noabort", 1146 "__asan_report_store2_noabort", 1147 "__asan_report_store4_noabort", 1148 "__asan_report_store8_noabort", 1149 "__asan_report_store16_noabort", 1150 /* KCSAN */ 1151 "__kcsan_check_access", 1152 "__kcsan_mb", 1153 "__kcsan_wmb", 1154 "__kcsan_rmb", 1155 "__kcsan_release", 1156 "kcsan_found_watchpoint", 1157 "kcsan_setup_watchpoint", 1158 "kcsan_check_scoped_accesses", 1159 "kcsan_disable_current", 1160 "kcsan_enable_current_nowarn", 1161 /* KCSAN/TSAN */ 1162 "__tsan_func_entry", 1163 "__tsan_func_exit", 1164 "__tsan_read_range", 1165 "__tsan_write_range", 1166 "__tsan_read1", 1167 "__tsan_read2", 1168 "__tsan_read4", 1169 "__tsan_read8", 1170 "__tsan_read16", 1171 "__tsan_write1", 1172 "__tsan_write2", 1173 "__tsan_write4", 1174 "__tsan_write8", 1175 "__tsan_write16", 1176 "__tsan_read_write1", 1177 "__tsan_read_write2", 1178 "__tsan_read_write4", 1179 "__tsan_read_write8", 1180 "__tsan_read_write16", 1181 "__tsan_volatile_read1", 1182 "__tsan_volatile_read2", 1183 "__tsan_volatile_read4", 1184 "__tsan_volatile_read8", 1185 "__tsan_volatile_read16", 1186 "__tsan_volatile_write1", 1187 "__tsan_volatile_write2", 1188 "__tsan_volatile_write4", 1189 "__tsan_volatile_write8", 1190 "__tsan_volatile_write16", 1191 "__tsan_atomic8_load", 1192 "__tsan_atomic16_load", 1193 "__tsan_atomic32_load", 1194 "__tsan_atomic64_load", 1195 "__tsan_atomic8_store", 1196 "__tsan_atomic16_store", 1197 "__tsan_atomic32_store", 1198 "__tsan_atomic64_store", 1199 "__tsan_atomic8_exchange", 1200 "__tsan_atomic16_exchange", 1201 "__tsan_atomic32_exchange", 1202 "__tsan_atomic64_exchange", 1203 "__tsan_atomic8_fetch_add", 1204 "__tsan_atomic16_fetch_add", 1205 "__tsan_atomic32_fetch_add", 1206 "__tsan_atomic64_fetch_add", 1207 "__tsan_atomic8_fetch_sub", 1208 "__tsan_atomic16_fetch_sub", 1209 "__tsan_atomic32_fetch_sub", 1210 "__tsan_atomic64_fetch_sub", 1211 "__tsan_atomic8_fetch_and", 1212 "__tsan_atomic16_fetch_and", 1213 "__tsan_atomic32_fetch_and", 1214 "__tsan_atomic64_fetch_and", 1215 "__tsan_atomic8_fetch_or", 1216 "__tsan_atomic16_fetch_or", 1217 "__tsan_atomic32_fetch_or", 1218 "__tsan_atomic64_fetch_or", 1219 "__tsan_atomic8_fetch_xor", 1220 "__tsan_atomic16_fetch_xor", 1221 "__tsan_atomic32_fetch_xor", 1222 "__tsan_atomic64_fetch_xor", 1223 "__tsan_atomic8_fetch_nand", 1224 "__tsan_atomic16_fetch_nand", 1225 "__tsan_atomic32_fetch_nand", 1226 "__tsan_atomic64_fetch_nand", 1227 "__tsan_atomic8_compare_exchange_strong", 1228 "__tsan_atomic16_compare_exchange_strong", 1229 "__tsan_atomic32_compare_exchange_strong", 1230 "__tsan_atomic64_compare_exchange_strong", 1231 "__tsan_atomic8_compare_exchange_weak", 1232 "__tsan_atomic16_compare_exchange_weak", 1233 "__tsan_atomic32_compare_exchange_weak", 1234 "__tsan_atomic64_compare_exchange_weak", 1235 "__tsan_atomic8_compare_exchange_val", 1236 "__tsan_atomic16_compare_exchange_val", 1237 "__tsan_atomic32_compare_exchange_val", 1238 "__tsan_atomic64_compare_exchange_val", 1239 "__tsan_atomic_thread_fence", 1240 "__tsan_atomic_signal_fence", 1241 "__tsan_unaligned_read16", 1242 "__tsan_unaligned_write16", 1243 /* KCOV */ 1244 "write_comp_data", 1245 "check_kcov_mode", 1246 "__sanitizer_cov_trace_pc", 1247 "__sanitizer_cov_trace_const_cmp1", 1248 "__sanitizer_cov_trace_const_cmp2", 1249 "__sanitizer_cov_trace_const_cmp4", 1250 "__sanitizer_cov_trace_const_cmp8", 1251 "__sanitizer_cov_trace_cmp1", 1252 "__sanitizer_cov_trace_cmp2", 1253 "__sanitizer_cov_trace_cmp4", 1254 "__sanitizer_cov_trace_cmp8", 1255 "__sanitizer_cov_trace_switch", 1256 /* KMSAN */ 1257 "kmsan_copy_to_user", 1258 "kmsan_report", 1259 "kmsan_unpoison_entry_regs", 1260 "kmsan_unpoison_memory", 1261 "__msan_chain_origin", 1262 "__msan_get_context_state", 1263 "__msan_instrument_asm_store", 1264 "__msan_metadata_ptr_for_load_1", 1265 "__msan_metadata_ptr_for_load_2", 1266 "__msan_metadata_ptr_for_load_4", 1267 "__msan_metadata_ptr_for_load_8", 1268 "__msan_metadata_ptr_for_load_n", 1269 "__msan_metadata_ptr_for_store_1", 1270 "__msan_metadata_ptr_for_store_2", 1271 "__msan_metadata_ptr_for_store_4", 1272 "__msan_metadata_ptr_for_store_8", 1273 "__msan_metadata_ptr_for_store_n", 1274 "__msan_poison_alloca", 1275 "__msan_warning", 1276 /* UBSAN */ 1277 "ubsan_type_mismatch_common", 1278 "__ubsan_handle_type_mismatch", 1279 "__ubsan_handle_type_mismatch_v1", 1280 "__ubsan_handle_shift_out_of_bounds", 1281 "__ubsan_handle_load_invalid_value", 1282 /* misc */ 1283 "csum_partial_copy_generic", 1284 "copy_mc_fragile", 1285 "copy_mc_fragile_handle_tail", 1286 "copy_mc_enhanced_fast_string", 1287 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ 1288 "rep_stos_alternative", 1289 "rep_movs_alternative", 1290 "__copy_user_nocache", 1291 NULL 1292 }; 1293 1294 static void add_uaccess_safe(struct objtool_file *file) 1295 { 1296 struct symbol *func; 1297 const char **name; 1298 1299 if (!opts.uaccess) 1300 return; 1301 1302 for (name = uaccess_safe_builtin; *name; name++) { 1303 func = find_symbol_by_name(file->elf, *name); 1304 if (!func) 1305 continue; 1306 1307 func->uaccess_safe = true; 1308 } 1309 } 1310 1311 /* 1312 * FIXME: For now, just ignore any alternatives which add retpolines. This is 1313 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. 1314 * But it at least allows objtool to understand the control flow *around* the 1315 * retpoline. 1316 */ 1317 static int add_ignore_alternatives(struct objtool_file *file) 1318 { 1319 struct section *sec; 1320 struct reloc *reloc; 1321 struct instruction *insn; 1322 1323 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); 1324 if (!sec) 1325 return 0; 1326 1327 list_for_each_entry(reloc, &sec->reloc_list, list) { 1328 if (reloc->sym->type != STT_SECTION) { 1329 WARN("unexpected relocation symbol type in %s", sec->name); 1330 return -1; 1331 } 1332 1333 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1334 if (!insn) { 1335 WARN("bad .discard.ignore_alts entry"); 1336 return -1; 1337 } 1338 1339 insn->ignore_alts = true; 1340 } 1341 1342 return 0; 1343 } 1344 1345 __weak bool arch_is_retpoline(struct symbol *sym) 1346 { 1347 return false; 1348 } 1349 1350 __weak bool arch_is_rethunk(struct symbol *sym) 1351 { 1352 return false; 1353 } 1354 1355 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 1356 { 1357 struct reloc *reloc; 1358 1359 if (insn->no_reloc) 1360 return NULL; 1361 1362 if (!file) 1363 return NULL; 1364 1365 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 1366 insn->offset, insn->len); 1367 if (!reloc) { 1368 insn->no_reloc = 1; 1369 return NULL; 1370 } 1371 1372 return reloc; 1373 } 1374 1375 static void remove_insn_ops(struct instruction *insn) 1376 { 1377 struct stack_op *op, *next; 1378 1379 for (op = insn->stack_ops; op; op = next) { 1380 next = op->next; 1381 free(op); 1382 } 1383 insn->stack_ops = NULL; 1384 } 1385 1386 static void annotate_call_site(struct objtool_file *file, 1387 struct instruction *insn, bool sibling) 1388 { 1389 struct reloc *reloc = insn_reloc(file, insn); 1390 struct symbol *sym = insn_call_dest(insn); 1391 1392 if (!sym) 1393 sym = reloc->sym; 1394 1395 /* 1396 * Alternative replacement code is just template code which is 1397 * sometimes copied to the original instruction. For now, don't 1398 * annotate it. (In the future we might consider annotating the 1399 * original instruction if/when it ever makes sense to do so.) 1400 */ 1401 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1402 return; 1403 1404 if (sym->static_call_tramp) { 1405 list_add_tail(&insn->call_node, &file->static_call_list); 1406 return; 1407 } 1408 1409 if (sym->retpoline_thunk) { 1410 list_add_tail(&insn->call_node, &file->retpoline_call_list); 1411 return; 1412 } 1413 1414 /* 1415 * Many compilers cannot disable KCOV or sanitizer calls with a function 1416 * attribute so they need a little help, NOP out any such calls from 1417 * noinstr text. 1418 */ 1419 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) { 1420 if (reloc) { 1421 reloc->type = R_NONE; 1422 elf_write_reloc(file->elf, reloc); 1423 } 1424 1425 elf_write_insn(file->elf, insn->sec, 1426 insn->offset, insn->len, 1427 sibling ? arch_ret_insn(insn->len) 1428 : arch_nop_insn(insn->len)); 1429 1430 insn->type = sibling ? INSN_RETURN : INSN_NOP; 1431 1432 if (sibling) { 1433 /* 1434 * We've replaced the tail-call JMP insn by two new 1435 * insn: RET; INT3, except we only have a single struct 1436 * insn here. Mark it retpoline_safe to avoid the SLS 1437 * warning, instead of adding another insn. 1438 */ 1439 insn->retpoline_safe = true; 1440 } 1441 1442 return; 1443 } 1444 1445 if (opts.mcount && sym->fentry) { 1446 if (sibling) 1447 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset); 1448 if (opts.mnop) { 1449 if (reloc) { 1450 reloc->type = R_NONE; 1451 elf_write_reloc(file->elf, reloc); 1452 } 1453 1454 elf_write_insn(file->elf, insn->sec, 1455 insn->offset, insn->len, 1456 arch_nop_insn(insn->len)); 1457 1458 insn->type = INSN_NOP; 1459 } 1460 1461 list_add_tail(&insn->call_node, &file->mcount_loc_list); 1462 return; 1463 } 1464 1465 if (insn->type == INSN_CALL && !insn->sec->init) 1466 list_add_tail(&insn->call_node, &file->call_list); 1467 1468 if (!sibling && dead_end_function(file, sym)) 1469 insn->dead_end = true; 1470 } 1471 1472 static void add_call_dest(struct objtool_file *file, struct instruction *insn, 1473 struct symbol *dest, bool sibling) 1474 { 1475 insn->_call_dest = dest; 1476 if (!dest) 1477 return; 1478 1479 /* 1480 * Whatever stack impact regular CALLs have, should be undone 1481 * by the RETURN of the called function. 1482 * 1483 * Annotated intra-function calls retain the stack_ops but 1484 * are converted to JUMP, see read_intra_function_calls(). 1485 */ 1486 remove_insn_ops(insn); 1487 1488 annotate_call_site(file, insn, sibling); 1489 } 1490 1491 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn) 1492 { 1493 /* 1494 * Retpoline calls/jumps are really dynamic calls/jumps in disguise, 1495 * so convert them accordingly. 1496 */ 1497 switch (insn->type) { 1498 case INSN_CALL: 1499 insn->type = INSN_CALL_DYNAMIC; 1500 break; 1501 case INSN_JUMP_UNCONDITIONAL: 1502 insn->type = INSN_JUMP_DYNAMIC; 1503 break; 1504 case INSN_JUMP_CONDITIONAL: 1505 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 1506 break; 1507 default: 1508 return; 1509 } 1510 1511 insn->retpoline_safe = true; 1512 1513 /* 1514 * Whatever stack impact regular CALLs have, should be undone 1515 * by the RETURN of the called function. 1516 * 1517 * Annotated intra-function calls retain the stack_ops but 1518 * are converted to JUMP, see read_intra_function_calls(). 1519 */ 1520 remove_insn_ops(insn); 1521 1522 annotate_call_site(file, insn, false); 1523 } 1524 1525 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add) 1526 { 1527 /* 1528 * Return thunk tail calls are really just returns in disguise, 1529 * so convert them accordingly. 1530 */ 1531 insn->type = INSN_RETURN; 1532 insn->retpoline_safe = true; 1533 1534 if (add) 1535 list_add_tail(&insn->call_node, &file->return_thunk_list); 1536 } 1537 1538 static bool is_first_func_insn(struct objtool_file *file, 1539 struct instruction *insn, struct symbol *sym) 1540 { 1541 if (insn->offset == sym->offset) 1542 return true; 1543 1544 /* Allow direct CALL/JMP past ENDBR */ 1545 if (opts.ibt) { 1546 struct instruction *prev = prev_insn_same_sym(file, insn); 1547 1548 if (prev && prev->type == INSN_ENDBR && 1549 insn->offset == sym->offset + prev->len) 1550 return true; 1551 } 1552 1553 return false; 1554 } 1555 1556 /* 1557 * A sibling call is a tail-call to another symbol -- to differentiate from a 1558 * recursive tail-call which is to the same symbol. 1559 */ 1560 static bool jump_is_sibling_call(struct objtool_file *file, 1561 struct instruction *from, struct instruction *to) 1562 { 1563 struct symbol *fs = from->sym; 1564 struct symbol *ts = to->sym; 1565 1566 /* Not a sibling call if from/to a symbol hole */ 1567 if (!fs || !ts) 1568 return false; 1569 1570 /* Not a sibling call if not targeting the start of a symbol. */ 1571 if (!is_first_func_insn(file, to, ts)) 1572 return false; 1573 1574 /* Disallow sibling calls into STT_NOTYPE */ 1575 if (ts->type == STT_NOTYPE) 1576 return false; 1577 1578 /* Must not be self to be a sibling */ 1579 return fs->pfunc != ts->pfunc; 1580 } 1581 1582 /* 1583 * Find the destination instructions for all jumps. 1584 */ 1585 static int add_jump_destinations(struct objtool_file *file) 1586 { 1587 struct instruction *insn, *jump_dest; 1588 struct reloc *reloc; 1589 struct section *dest_sec; 1590 unsigned long dest_off; 1591 1592 for_each_insn(file, insn) { 1593 if (insn->jump_dest) { 1594 /* 1595 * handle_group_alt() may have previously set 1596 * 'jump_dest' for some alternatives. 1597 */ 1598 continue; 1599 } 1600 if (!is_static_jump(insn)) 1601 continue; 1602 1603 reloc = insn_reloc(file, insn); 1604 if (!reloc) { 1605 dest_sec = insn->sec; 1606 dest_off = arch_jump_destination(insn); 1607 } else if (reloc->sym->type == STT_SECTION) { 1608 dest_sec = reloc->sym->sec; 1609 dest_off = arch_dest_reloc_offset(reloc->addend); 1610 } else if (reloc->sym->retpoline_thunk) { 1611 add_retpoline_call(file, insn); 1612 continue; 1613 } else if (reloc->sym->return_thunk) { 1614 add_return_call(file, insn, true); 1615 continue; 1616 } else if (insn_func(insn)) { 1617 /* 1618 * External sibling call or internal sibling call with 1619 * STT_FUNC reloc. 1620 */ 1621 add_call_dest(file, insn, reloc->sym, true); 1622 continue; 1623 } else if (reloc->sym->sec->idx) { 1624 dest_sec = reloc->sym->sec; 1625 dest_off = reloc->sym->sym.st_value + 1626 arch_dest_reloc_offset(reloc->addend); 1627 } else { 1628 /* non-func asm code jumping to another file */ 1629 continue; 1630 } 1631 1632 jump_dest = find_insn(file, dest_sec, dest_off); 1633 if (!jump_dest) { 1634 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); 1635 1636 /* 1637 * This is a special case for zen_untrain_ret(). 1638 * It jumps to __x86_return_thunk(), but objtool 1639 * can't find the thunk's starting RET 1640 * instruction, because the RET is also in the 1641 * middle of another instruction. Objtool only 1642 * knows about the outer instruction. 1643 */ 1644 if (sym && sym->return_thunk) { 1645 add_return_call(file, insn, false); 1646 continue; 1647 } 1648 1649 WARN_FUNC("can't find jump dest instruction at %s+0x%lx", 1650 insn->sec, insn->offset, dest_sec->name, 1651 dest_off); 1652 return -1; 1653 } 1654 1655 /* 1656 * Cross-function jump. 1657 */ 1658 if (insn_func(insn) && insn_func(jump_dest) && 1659 insn_func(insn) != insn_func(jump_dest)) { 1660 1661 /* 1662 * For GCC 8+, create parent/child links for any cold 1663 * subfunctions. This is _mostly_ redundant with a 1664 * similar initialization in read_symbols(). 1665 * 1666 * If a function has aliases, we want the *first* such 1667 * function in the symbol table to be the subfunction's 1668 * parent. In that case we overwrite the 1669 * initialization done in read_symbols(). 1670 * 1671 * However this code can't completely replace the 1672 * read_symbols() code because this doesn't detect the 1673 * case where the parent function's only reference to a 1674 * subfunction is through a jump table. 1675 */ 1676 if (!strstr(insn_func(insn)->name, ".cold") && 1677 strstr(insn_func(jump_dest)->name, ".cold")) { 1678 insn_func(insn)->cfunc = insn_func(jump_dest); 1679 insn_func(jump_dest)->pfunc = insn_func(insn); 1680 } 1681 } 1682 1683 if (jump_is_sibling_call(file, insn, jump_dest)) { 1684 /* 1685 * Internal sibling call without reloc or with 1686 * STT_SECTION reloc. 1687 */ 1688 add_call_dest(file, insn, insn_func(jump_dest), true); 1689 continue; 1690 } 1691 1692 insn->jump_dest = jump_dest; 1693 } 1694 1695 return 0; 1696 } 1697 1698 static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 1699 { 1700 struct symbol *call_dest; 1701 1702 call_dest = find_func_by_offset(sec, offset); 1703 if (!call_dest) 1704 call_dest = find_symbol_by_offset(sec, offset); 1705 1706 return call_dest; 1707 } 1708 1709 /* 1710 * Find the destination instructions for all calls. 1711 */ 1712 static int add_call_destinations(struct objtool_file *file) 1713 { 1714 struct instruction *insn; 1715 unsigned long dest_off; 1716 struct symbol *dest; 1717 struct reloc *reloc; 1718 1719 for_each_insn(file, insn) { 1720 if (insn->type != INSN_CALL) 1721 continue; 1722 1723 reloc = insn_reloc(file, insn); 1724 if (!reloc) { 1725 dest_off = arch_jump_destination(insn); 1726 dest = find_call_destination(insn->sec, dest_off); 1727 1728 add_call_dest(file, insn, dest, false); 1729 1730 if (insn->ignore) 1731 continue; 1732 1733 if (!insn_call_dest(insn)) { 1734 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset); 1735 return -1; 1736 } 1737 1738 if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) { 1739 WARN_FUNC("unsupported call to non-function", 1740 insn->sec, insn->offset); 1741 return -1; 1742 } 1743 1744 } else if (reloc->sym->type == STT_SECTION) { 1745 dest_off = arch_dest_reloc_offset(reloc->addend); 1746 dest = find_call_destination(reloc->sym->sec, dest_off); 1747 if (!dest) { 1748 WARN_FUNC("can't find call dest symbol at %s+0x%lx", 1749 insn->sec, insn->offset, 1750 reloc->sym->sec->name, 1751 dest_off); 1752 return -1; 1753 } 1754 1755 add_call_dest(file, insn, dest, false); 1756 1757 } else if (reloc->sym->retpoline_thunk) { 1758 add_retpoline_call(file, insn); 1759 1760 } else 1761 add_call_dest(file, insn, reloc->sym, false); 1762 } 1763 1764 return 0; 1765 } 1766 1767 /* 1768 * The .alternatives section requires some extra special care over and above 1769 * other special sections because alternatives are patched in place. 1770 */ 1771 static int handle_group_alt(struct objtool_file *file, 1772 struct special_alt *special_alt, 1773 struct instruction *orig_insn, 1774 struct instruction **new_insn) 1775 { 1776 struct instruction *last_new_insn = NULL, *insn, *nop = NULL; 1777 struct alt_group *orig_alt_group, *new_alt_group; 1778 unsigned long dest_off; 1779 1780 orig_alt_group = orig_insn->alt_group; 1781 if (!orig_alt_group) { 1782 struct instruction *last_orig_insn = NULL; 1783 1784 orig_alt_group = malloc(sizeof(*orig_alt_group)); 1785 if (!orig_alt_group) { 1786 WARN("malloc failed"); 1787 return -1; 1788 } 1789 orig_alt_group->cfi = calloc(special_alt->orig_len, 1790 sizeof(struct cfi_state *)); 1791 if (!orig_alt_group->cfi) { 1792 WARN("calloc failed"); 1793 return -1; 1794 } 1795 1796 insn = orig_insn; 1797 sec_for_each_insn_from(file, insn) { 1798 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1799 break; 1800 1801 insn->alt_group = orig_alt_group; 1802 last_orig_insn = insn; 1803 } 1804 orig_alt_group->orig_group = NULL; 1805 orig_alt_group->first_insn = orig_insn; 1806 orig_alt_group->last_insn = last_orig_insn; 1807 orig_alt_group->nop = NULL; 1808 } else { 1809 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len - 1810 orig_alt_group->first_insn->offset != special_alt->orig_len) { 1811 WARN_FUNC("weirdly overlapping alternative! %ld != %d", 1812 orig_insn->sec, orig_insn->offset, 1813 orig_alt_group->last_insn->offset + 1814 orig_alt_group->last_insn->len - 1815 orig_alt_group->first_insn->offset, 1816 special_alt->orig_len); 1817 return -1; 1818 } 1819 } 1820 1821 new_alt_group = malloc(sizeof(*new_alt_group)); 1822 if (!new_alt_group) { 1823 WARN("malloc failed"); 1824 return -1; 1825 } 1826 1827 if (special_alt->new_len < special_alt->orig_len) { 1828 /* 1829 * Insert a fake nop at the end to make the replacement 1830 * alt_group the same size as the original. This is needed to 1831 * allow propagate_alt_cfi() to do its magic. When the last 1832 * instruction affects the stack, the instruction after it (the 1833 * nop) will propagate the new state to the shared CFI array. 1834 */ 1835 nop = malloc(sizeof(*nop)); 1836 if (!nop) { 1837 WARN("malloc failed"); 1838 return -1; 1839 } 1840 memset(nop, 0, sizeof(*nop)); 1841 1842 nop->sec = special_alt->new_sec; 1843 nop->offset = special_alt->new_off + special_alt->new_len; 1844 nop->len = special_alt->orig_len - special_alt->new_len; 1845 nop->type = INSN_NOP; 1846 nop->sym = orig_insn->sym; 1847 nop->alt_group = new_alt_group; 1848 nop->ignore = orig_insn->ignore_alts; 1849 } 1850 1851 if (!special_alt->new_len) { 1852 *new_insn = nop; 1853 goto end; 1854 } 1855 1856 insn = *new_insn; 1857 sec_for_each_insn_from(file, insn) { 1858 struct reloc *alt_reloc; 1859 1860 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1861 break; 1862 1863 last_new_insn = insn; 1864 1865 insn->ignore = orig_insn->ignore_alts; 1866 insn->sym = orig_insn->sym; 1867 insn->alt_group = new_alt_group; 1868 1869 /* 1870 * Since alternative replacement code is copy/pasted by the 1871 * kernel after applying relocations, generally such code can't 1872 * have relative-address relocation references to outside the 1873 * .altinstr_replacement section, unless the arch's 1874 * alternatives code can adjust the relative offsets 1875 * accordingly. 1876 */ 1877 alt_reloc = insn_reloc(file, insn); 1878 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) && 1879 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1880 1881 WARN_FUNC("unsupported relocation in alternatives section", 1882 insn->sec, insn->offset); 1883 return -1; 1884 } 1885 1886 if (!is_static_jump(insn)) 1887 continue; 1888 1889 if (!insn->immediate) 1890 continue; 1891 1892 dest_off = arch_jump_destination(insn); 1893 if (dest_off == special_alt->new_off + special_alt->new_len) { 1894 insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn); 1895 if (!insn->jump_dest) { 1896 WARN_FUNC("can't find alternative jump destination", 1897 insn->sec, insn->offset); 1898 return -1; 1899 } 1900 } 1901 } 1902 1903 if (!last_new_insn) { 1904 WARN_FUNC("can't find last new alternative instruction", 1905 special_alt->new_sec, special_alt->new_off); 1906 return -1; 1907 } 1908 1909 end: 1910 new_alt_group->orig_group = orig_alt_group; 1911 new_alt_group->first_insn = *new_insn; 1912 new_alt_group->last_insn = last_new_insn; 1913 new_alt_group->nop = nop; 1914 new_alt_group->cfi = orig_alt_group->cfi; 1915 return 0; 1916 } 1917 1918 /* 1919 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1920 * If the original instruction is a jump, make the alt entry an effective nop 1921 * by just skipping the original instruction. 1922 */ 1923 static int handle_jump_alt(struct objtool_file *file, 1924 struct special_alt *special_alt, 1925 struct instruction *orig_insn, 1926 struct instruction **new_insn) 1927 { 1928 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL && 1929 orig_insn->type != INSN_NOP) { 1930 1931 WARN_FUNC("unsupported instruction at jump label", 1932 orig_insn->sec, orig_insn->offset); 1933 return -1; 1934 } 1935 1936 if (opts.hack_jump_label && special_alt->key_addend & 2) { 1937 struct reloc *reloc = insn_reloc(file, orig_insn); 1938 1939 if (reloc) { 1940 reloc->type = R_NONE; 1941 elf_write_reloc(file->elf, reloc); 1942 } 1943 elf_write_insn(file->elf, orig_insn->sec, 1944 orig_insn->offset, orig_insn->len, 1945 arch_nop_insn(orig_insn->len)); 1946 orig_insn->type = INSN_NOP; 1947 } 1948 1949 if (orig_insn->type == INSN_NOP) { 1950 if (orig_insn->len == 2) 1951 file->jl_nop_short++; 1952 else 1953 file->jl_nop_long++; 1954 1955 return 0; 1956 } 1957 1958 if (orig_insn->len == 2) 1959 file->jl_short++; 1960 else 1961 file->jl_long++; 1962 1963 *new_insn = next_insn_same_sec(file, orig_insn); 1964 return 0; 1965 } 1966 1967 /* 1968 * Read all the special sections which have alternate instructions which can be 1969 * patched in or redirected to at runtime. Each instruction having alternate 1970 * instruction(s) has them added to its insn->alts list, which will be 1971 * traversed in validate_branch(). 1972 */ 1973 static int add_special_section_alts(struct objtool_file *file) 1974 { 1975 struct list_head special_alts; 1976 struct instruction *orig_insn, *new_insn; 1977 struct special_alt *special_alt, *tmp; 1978 struct alternative *alt; 1979 int ret; 1980 1981 ret = special_get_alts(file->elf, &special_alts); 1982 if (ret) 1983 return ret; 1984 1985 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1986 1987 orig_insn = find_insn(file, special_alt->orig_sec, 1988 special_alt->orig_off); 1989 if (!orig_insn) { 1990 WARN_FUNC("special: can't find orig instruction", 1991 special_alt->orig_sec, special_alt->orig_off); 1992 ret = -1; 1993 goto out; 1994 } 1995 1996 new_insn = NULL; 1997 if (!special_alt->group || special_alt->new_len) { 1998 new_insn = find_insn(file, special_alt->new_sec, 1999 special_alt->new_off); 2000 if (!new_insn) { 2001 WARN_FUNC("special: can't find new instruction", 2002 special_alt->new_sec, 2003 special_alt->new_off); 2004 ret = -1; 2005 goto out; 2006 } 2007 } 2008 2009 if (special_alt->group) { 2010 if (!special_alt->orig_len) { 2011 WARN_FUNC("empty alternative entry", 2012 orig_insn->sec, orig_insn->offset); 2013 continue; 2014 } 2015 2016 ret = handle_group_alt(file, special_alt, orig_insn, 2017 &new_insn); 2018 if (ret) 2019 goto out; 2020 } else if (special_alt->jump_or_nop) { 2021 ret = handle_jump_alt(file, special_alt, orig_insn, 2022 &new_insn); 2023 if (ret) 2024 goto out; 2025 } 2026 2027 alt = malloc(sizeof(*alt)); 2028 if (!alt) { 2029 WARN("malloc failed"); 2030 ret = -1; 2031 goto out; 2032 } 2033 2034 alt->insn = new_insn; 2035 alt->skip_orig = special_alt->skip_orig; 2036 orig_insn->ignore_alts |= special_alt->skip_alt; 2037 alt->next = orig_insn->alts; 2038 orig_insn->alts = alt; 2039 2040 list_del(&special_alt->list); 2041 free(special_alt); 2042 } 2043 2044 if (opts.stats) { 2045 printf("jl\\\tNOP\tJMP\n"); 2046 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short); 2047 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long); 2048 } 2049 2050 out: 2051 return ret; 2052 } 2053 2054 static int add_jump_table(struct objtool_file *file, struct instruction *insn, 2055 struct reloc *table) 2056 { 2057 struct reloc *reloc = table; 2058 struct instruction *dest_insn; 2059 struct alternative *alt; 2060 struct symbol *pfunc = insn_func(insn)->pfunc; 2061 unsigned int prev_offset = 0; 2062 2063 /* 2064 * Each @reloc is a switch table relocation which points to the target 2065 * instruction. 2066 */ 2067 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) { 2068 2069 /* Check for the end of the table: */ 2070 if (reloc != table && reloc->jump_table_start) 2071 break; 2072 2073 /* Make sure the table entries are consecutive: */ 2074 if (prev_offset && reloc->offset != prev_offset + 8) 2075 break; 2076 2077 /* Detect function pointers from contiguous objects: */ 2078 if (reloc->sym->sec == pfunc->sec && 2079 reloc->addend == pfunc->offset) 2080 break; 2081 2082 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend); 2083 if (!dest_insn) 2084 break; 2085 2086 /* Make sure the destination is in the same function: */ 2087 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc) 2088 break; 2089 2090 alt = malloc(sizeof(*alt)); 2091 if (!alt) { 2092 WARN("malloc failed"); 2093 return -1; 2094 } 2095 2096 alt->insn = dest_insn; 2097 alt->next = insn->alts; 2098 insn->alts = alt; 2099 prev_offset = reloc->offset; 2100 } 2101 2102 if (!prev_offset) { 2103 WARN_FUNC("can't find switch jump table", 2104 insn->sec, insn->offset); 2105 return -1; 2106 } 2107 2108 return 0; 2109 } 2110 2111 /* 2112 * find_jump_table() - Given a dynamic jump, find the switch jump table 2113 * associated with it. 2114 */ 2115 static struct reloc *find_jump_table(struct objtool_file *file, 2116 struct symbol *func, 2117 struct instruction *insn) 2118 { 2119 struct reloc *table_reloc; 2120 struct instruction *dest_insn, *orig_insn = insn; 2121 2122 /* 2123 * Backward search using the @first_jump_src links, these help avoid 2124 * much of the 'in between' code. Which avoids us getting confused by 2125 * it. 2126 */ 2127 for (; 2128 insn && insn_func(insn) && insn_func(insn)->pfunc == func; 2129 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 2130 2131 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 2132 break; 2133 2134 /* allow small jumps within the range */ 2135 if (insn->type == INSN_JUMP_UNCONDITIONAL && 2136 insn->jump_dest && 2137 (insn->jump_dest->offset <= insn->offset || 2138 insn->jump_dest->offset > orig_insn->offset)) 2139 break; 2140 2141 table_reloc = arch_find_switch_table(file, insn); 2142 if (!table_reloc) 2143 continue; 2144 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend); 2145 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func) 2146 continue; 2147 2148 return table_reloc; 2149 } 2150 2151 return NULL; 2152 } 2153 2154 /* 2155 * First pass: Mark the head of each jump table so that in the next pass, 2156 * we know when a given jump table ends and the next one starts. 2157 */ 2158 static void mark_func_jump_tables(struct objtool_file *file, 2159 struct symbol *func) 2160 { 2161 struct instruction *insn, *last = NULL; 2162 struct reloc *reloc; 2163 2164 func_for_each_insn(file, func, insn) { 2165 if (!last) 2166 last = insn; 2167 2168 /* 2169 * Store back-pointers for unconditional forward jumps such 2170 * that find_jump_table() can back-track using those and 2171 * avoid some potentially confusing code. 2172 */ 2173 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 2174 insn->offset > last->offset && 2175 insn->jump_dest->offset > insn->offset && 2176 !insn->jump_dest->first_jump_src) { 2177 2178 insn->jump_dest->first_jump_src = insn; 2179 last = insn->jump_dest; 2180 } 2181 2182 if (insn->type != INSN_JUMP_DYNAMIC) 2183 continue; 2184 2185 reloc = find_jump_table(file, func, insn); 2186 if (reloc) { 2187 reloc->jump_table_start = true; 2188 insn->_jump_table = reloc; 2189 } 2190 } 2191 } 2192 2193 static int add_func_jump_tables(struct objtool_file *file, 2194 struct symbol *func) 2195 { 2196 struct instruction *insn; 2197 int ret; 2198 2199 func_for_each_insn(file, func, insn) { 2200 if (!insn_jump_table(insn)) 2201 continue; 2202 2203 ret = add_jump_table(file, insn, insn_jump_table(insn)); 2204 if (ret) 2205 return ret; 2206 } 2207 2208 return 0; 2209 } 2210 2211 /* 2212 * For some switch statements, gcc generates a jump table in the .rodata 2213 * section which contains a list of addresses within the function to jump to. 2214 * This finds these jump tables and adds them to the insn->alts lists. 2215 */ 2216 static int add_jump_table_alts(struct objtool_file *file) 2217 { 2218 struct section *sec; 2219 struct symbol *func; 2220 int ret; 2221 2222 if (!file->rodata) 2223 return 0; 2224 2225 for_each_sec(file, sec) { 2226 list_for_each_entry(func, &sec->symbol_list, list) { 2227 if (func->type != STT_FUNC) 2228 continue; 2229 2230 mark_func_jump_tables(file, func); 2231 ret = add_func_jump_tables(file, func); 2232 if (ret) 2233 return ret; 2234 } 2235 } 2236 2237 return 0; 2238 } 2239 2240 static void set_func_state(struct cfi_state *state) 2241 { 2242 state->cfa = initial_func_cfi.cfa; 2243 memcpy(&state->regs, &initial_func_cfi.regs, 2244 CFI_NUM_REGS * sizeof(struct cfi_reg)); 2245 state->stack_size = initial_func_cfi.cfa.offset; 2246 } 2247 2248 static int read_unwind_hints(struct objtool_file *file) 2249 { 2250 struct cfi_state cfi = init_cfi; 2251 struct section *sec, *relocsec; 2252 struct unwind_hint *hint; 2253 struct instruction *insn; 2254 struct reloc *reloc; 2255 int i; 2256 2257 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 2258 if (!sec) 2259 return 0; 2260 2261 relocsec = sec->reloc; 2262 if (!relocsec) { 2263 WARN("missing .rela.discard.unwind_hints section"); 2264 return -1; 2265 } 2266 2267 if (sec->sh.sh_size % sizeof(struct unwind_hint)) { 2268 WARN("struct unwind_hint size mismatch"); 2269 return -1; 2270 } 2271 2272 file->hints = true; 2273 2274 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { 2275 hint = (struct unwind_hint *)sec->data->d_buf + i; 2276 2277 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 2278 if (!reloc) { 2279 WARN("can't find reloc for unwind_hints[%d]", i); 2280 return -1; 2281 } 2282 2283 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2284 if (!insn) { 2285 WARN("can't find insn for unwind_hints[%d]", i); 2286 return -1; 2287 } 2288 2289 insn->hint = true; 2290 2291 if (hint->type == UNWIND_HINT_TYPE_SAVE) { 2292 insn->hint = false; 2293 insn->save = true; 2294 continue; 2295 } 2296 2297 if (hint->type == UNWIND_HINT_TYPE_RESTORE) { 2298 insn->restore = true; 2299 continue; 2300 } 2301 2302 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { 2303 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); 2304 2305 if (sym && sym->bind == STB_GLOBAL) { 2306 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { 2307 WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR", 2308 insn->sec, insn->offset); 2309 } 2310 2311 insn->entry = 1; 2312 } 2313 } 2314 2315 if (hint->type == UNWIND_HINT_TYPE_ENTRY) { 2316 hint->type = UNWIND_HINT_TYPE_CALL; 2317 insn->entry = 1; 2318 } 2319 2320 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 2321 insn->cfi = &func_cfi; 2322 continue; 2323 } 2324 2325 if (insn->cfi) 2326 cfi = *(insn->cfi); 2327 2328 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 2329 WARN_FUNC("unsupported unwind_hint sp base reg %d", 2330 insn->sec, insn->offset, hint->sp_reg); 2331 return -1; 2332 } 2333 2334 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); 2335 cfi.type = hint->type; 2336 cfi.signal = hint->signal; 2337 cfi.end = hint->end; 2338 2339 insn->cfi = cfi_hash_find_or_add(&cfi); 2340 } 2341 2342 return 0; 2343 } 2344 2345 static int read_noendbr_hints(struct objtool_file *file) 2346 { 2347 struct section *sec; 2348 struct instruction *insn; 2349 struct reloc *reloc; 2350 2351 sec = find_section_by_name(file->elf, ".rela.discard.noendbr"); 2352 if (!sec) 2353 return 0; 2354 2355 list_for_each_entry(reloc, &sec->reloc_list, list) { 2356 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend); 2357 if (!insn) { 2358 WARN("bad .discard.noendbr entry"); 2359 return -1; 2360 } 2361 2362 insn->noendbr = 1; 2363 } 2364 2365 return 0; 2366 } 2367 2368 static int read_retpoline_hints(struct objtool_file *file) 2369 { 2370 struct section *sec; 2371 struct instruction *insn; 2372 struct reloc *reloc; 2373 2374 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe"); 2375 if (!sec) 2376 return 0; 2377 2378 list_for_each_entry(reloc, &sec->reloc_list, list) { 2379 if (reloc->sym->type != STT_SECTION) { 2380 WARN("unexpected relocation symbol type in %s", sec->name); 2381 return -1; 2382 } 2383 2384 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2385 if (!insn) { 2386 WARN("bad .discard.retpoline_safe entry"); 2387 return -1; 2388 } 2389 2390 if (insn->type != INSN_JUMP_DYNAMIC && 2391 insn->type != INSN_CALL_DYNAMIC && 2392 insn->type != INSN_RETURN && 2393 insn->type != INSN_NOP) { 2394 WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop", 2395 insn->sec, insn->offset); 2396 return -1; 2397 } 2398 2399 insn->retpoline_safe = true; 2400 } 2401 2402 return 0; 2403 } 2404 2405 static int read_instr_hints(struct objtool_file *file) 2406 { 2407 struct section *sec; 2408 struct instruction *insn; 2409 struct reloc *reloc; 2410 2411 sec = find_section_by_name(file->elf, ".rela.discard.instr_end"); 2412 if (!sec) 2413 return 0; 2414 2415 list_for_each_entry(reloc, &sec->reloc_list, list) { 2416 if (reloc->sym->type != STT_SECTION) { 2417 WARN("unexpected relocation symbol type in %s", sec->name); 2418 return -1; 2419 } 2420 2421 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2422 if (!insn) { 2423 WARN("bad .discard.instr_end entry"); 2424 return -1; 2425 } 2426 2427 insn->instr--; 2428 } 2429 2430 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin"); 2431 if (!sec) 2432 return 0; 2433 2434 list_for_each_entry(reloc, &sec->reloc_list, list) { 2435 if (reloc->sym->type != STT_SECTION) { 2436 WARN("unexpected relocation symbol type in %s", sec->name); 2437 return -1; 2438 } 2439 2440 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2441 if (!insn) { 2442 WARN("bad .discard.instr_begin entry"); 2443 return -1; 2444 } 2445 2446 insn->instr++; 2447 } 2448 2449 return 0; 2450 } 2451 2452 static int read_intra_function_calls(struct objtool_file *file) 2453 { 2454 struct instruction *insn; 2455 struct section *sec; 2456 struct reloc *reloc; 2457 2458 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls"); 2459 if (!sec) 2460 return 0; 2461 2462 list_for_each_entry(reloc, &sec->reloc_list, list) { 2463 unsigned long dest_off; 2464 2465 if (reloc->sym->type != STT_SECTION) { 2466 WARN("unexpected relocation symbol type in %s", 2467 sec->name); 2468 return -1; 2469 } 2470 2471 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2472 if (!insn) { 2473 WARN("bad .discard.intra_function_call entry"); 2474 return -1; 2475 } 2476 2477 if (insn->type != INSN_CALL) { 2478 WARN_FUNC("intra_function_call not a direct call", 2479 insn->sec, insn->offset); 2480 return -1; 2481 } 2482 2483 /* 2484 * Treat intra-function CALLs as JMPs, but with a stack_op. 2485 * See add_call_destinations(), which strips stack_ops from 2486 * normal CALLs. 2487 */ 2488 insn->type = INSN_JUMP_UNCONDITIONAL; 2489 2490 dest_off = arch_jump_destination(insn); 2491 insn->jump_dest = find_insn(file, insn->sec, dest_off); 2492 if (!insn->jump_dest) { 2493 WARN_FUNC("can't find call dest at %s+0x%lx", 2494 insn->sec, insn->offset, 2495 insn->sec->name, dest_off); 2496 return -1; 2497 } 2498 } 2499 2500 return 0; 2501 } 2502 2503 /* 2504 * Return true if name matches an instrumentation function, where calls to that 2505 * function from noinstr code can safely be removed, but compilers won't do so. 2506 */ 2507 static bool is_profiling_func(const char *name) 2508 { 2509 /* 2510 * Many compilers cannot disable KCOV with a function attribute. 2511 */ 2512 if (!strncmp(name, "__sanitizer_cov_", 16)) 2513 return true; 2514 2515 /* 2516 * Some compilers currently do not remove __tsan_func_entry/exit nor 2517 * __tsan_atomic_signal_fence (used for barrier instrumentation) with 2518 * the __no_sanitize_thread attribute, remove them. Once the kernel's 2519 * minimum Clang version is 14.0, this can be removed. 2520 */ 2521 if (!strncmp(name, "__tsan_func_", 12) || 2522 !strcmp(name, "__tsan_atomic_signal_fence")) 2523 return true; 2524 2525 return false; 2526 } 2527 2528 static int classify_symbols(struct objtool_file *file) 2529 { 2530 struct section *sec; 2531 struct symbol *func; 2532 2533 for_each_sec(file, sec) { 2534 list_for_each_entry(func, &sec->symbol_list, list) { 2535 if (func->bind != STB_GLOBAL) 2536 continue; 2537 2538 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2539 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2540 func->static_call_tramp = true; 2541 2542 if (arch_is_retpoline(func)) 2543 func->retpoline_thunk = true; 2544 2545 if (arch_is_rethunk(func)) 2546 func->return_thunk = true; 2547 2548 if (arch_ftrace_match(func->name)) 2549 func->fentry = true; 2550 2551 if (is_profiling_func(func->name)) 2552 func->profiling_func = true; 2553 } 2554 } 2555 2556 return 0; 2557 } 2558 2559 static void mark_rodata(struct objtool_file *file) 2560 { 2561 struct section *sec; 2562 bool found = false; 2563 2564 /* 2565 * Search for the following rodata sections, each of which can 2566 * potentially contain jump tables: 2567 * 2568 * - .rodata: can contain GCC switch tables 2569 * - .rodata.<func>: same, if -fdata-sections is being used 2570 * - .rodata..c_jump_table: contains C annotated jump tables 2571 * 2572 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2573 */ 2574 for_each_sec(file, sec) { 2575 if (!strncmp(sec->name, ".rodata", 7) && 2576 !strstr(sec->name, ".str1.")) { 2577 sec->rodata = true; 2578 found = true; 2579 } 2580 } 2581 2582 file->rodata = found; 2583 } 2584 2585 static int decode_sections(struct objtool_file *file) 2586 { 2587 int ret; 2588 2589 mark_rodata(file); 2590 2591 ret = init_pv_ops(file); 2592 if (ret) 2593 return ret; 2594 2595 /* 2596 * Must be before add_{jump_call}_destination. 2597 */ 2598 ret = classify_symbols(file); 2599 if (ret) 2600 return ret; 2601 2602 ret = decode_instructions(file); 2603 if (ret) 2604 return ret; 2605 2606 add_ignores(file); 2607 add_uaccess_safe(file); 2608 2609 ret = add_ignore_alternatives(file); 2610 if (ret) 2611 return ret; 2612 2613 /* 2614 * Must be before read_unwind_hints() since that needs insn->noendbr. 2615 */ 2616 ret = read_noendbr_hints(file); 2617 if (ret) 2618 return ret; 2619 2620 /* 2621 * Must be before add_jump_destinations(), which depends on 'func' 2622 * being set for alternatives, to enable proper sibling call detection. 2623 */ 2624 if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) { 2625 ret = add_special_section_alts(file); 2626 if (ret) 2627 return ret; 2628 } 2629 2630 ret = add_jump_destinations(file); 2631 if (ret) 2632 return ret; 2633 2634 /* 2635 * Must be before add_call_destination(); it changes INSN_CALL to 2636 * INSN_JUMP. 2637 */ 2638 ret = read_intra_function_calls(file); 2639 if (ret) 2640 return ret; 2641 2642 ret = add_call_destinations(file); 2643 if (ret) 2644 return ret; 2645 2646 /* 2647 * Must be after add_call_destinations() such that it can override 2648 * dead_end_function() marks. 2649 */ 2650 ret = add_dead_ends(file); 2651 if (ret) 2652 return ret; 2653 2654 ret = add_jump_table_alts(file); 2655 if (ret) 2656 return ret; 2657 2658 ret = read_unwind_hints(file); 2659 if (ret) 2660 return ret; 2661 2662 ret = read_retpoline_hints(file); 2663 if (ret) 2664 return ret; 2665 2666 ret = read_instr_hints(file); 2667 if (ret) 2668 return ret; 2669 2670 return 0; 2671 } 2672 2673 static bool is_fentry_call(struct instruction *insn) 2674 { 2675 if (insn->type == INSN_CALL && 2676 insn_call_dest(insn) && 2677 insn_call_dest(insn)->fentry) 2678 return true; 2679 2680 return false; 2681 } 2682 2683 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 2684 { 2685 struct cfi_state *cfi = &state->cfi; 2686 int i; 2687 2688 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 2689 return true; 2690 2691 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 2692 return true; 2693 2694 if (cfi->stack_size != initial_func_cfi.cfa.offset) 2695 return true; 2696 2697 for (i = 0; i < CFI_NUM_REGS; i++) { 2698 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 2699 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 2700 return true; 2701 } 2702 2703 return false; 2704 } 2705 2706 static bool check_reg_frame_pos(const struct cfi_reg *reg, 2707 int expected_offset) 2708 { 2709 return reg->base == CFI_CFA && 2710 reg->offset == expected_offset; 2711 } 2712 2713 static bool has_valid_stack_frame(struct insn_state *state) 2714 { 2715 struct cfi_state *cfi = &state->cfi; 2716 2717 if (cfi->cfa.base == CFI_BP && 2718 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) && 2719 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8)) 2720 return true; 2721 2722 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 2723 return true; 2724 2725 return false; 2726 } 2727 2728 static int update_cfi_state_regs(struct instruction *insn, 2729 struct cfi_state *cfi, 2730 struct stack_op *op) 2731 { 2732 struct cfi_reg *cfa = &cfi->cfa; 2733 2734 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 2735 return 0; 2736 2737 /* push */ 2738 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 2739 cfa->offset += 8; 2740 2741 /* pop */ 2742 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 2743 cfa->offset -= 8; 2744 2745 /* add immediate to sp */ 2746 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 2747 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 2748 cfa->offset -= op->src.offset; 2749 2750 return 0; 2751 } 2752 2753 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 2754 { 2755 if (arch_callee_saved_reg(reg) && 2756 cfi->regs[reg].base == CFI_UNDEFINED) { 2757 cfi->regs[reg].base = base; 2758 cfi->regs[reg].offset = offset; 2759 } 2760 } 2761 2762 static void restore_reg(struct cfi_state *cfi, unsigned char reg) 2763 { 2764 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 2765 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 2766 } 2767 2768 /* 2769 * A note about DRAP stack alignment: 2770 * 2771 * GCC has the concept of a DRAP register, which is used to help keep track of 2772 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 2773 * register. The typical DRAP pattern is: 2774 * 2775 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 2776 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 2777 * 41 ff 72 f8 pushq -0x8(%r10) 2778 * 55 push %rbp 2779 * 48 89 e5 mov %rsp,%rbp 2780 * (more pushes) 2781 * 41 52 push %r10 2782 * ... 2783 * 41 5a pop %r10 2784 * (more pops) 2785 * 5d pop %rbp 2786 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2787 * c3 retq 2788 * 2789 * There are some variations in the epilogues, like: 2790 * 2791 * 5b pop %rbx 2792 * 41 5a pop %r10 2793 * 41 5c pop %r12 2794 * 41 5d pop %r13 2795 * 41 5e pop %r14 2796 * c9 leaveq 2797 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2798 * c3 retq 2799 * 2800 * and: 2801 * 2802 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 2803 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 2804 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 2805 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 2806 * c9 leaveq 2807 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2808 * c3 retq 2809 * 2810 * Sometimes r13 is used as the DRAP register, in which case it's saved and 2811 * restored beforehand: 2812 * 2813 * 41 55 push %r13 2814 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 2815 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 2816 * ... 2817 * 49 8d 65 f0 lea -0x10(%r13),%rsp 2818 * 41 5d pop %r13 2819 * c3 retq 2820 */ 2821 static int update_cfi_state(struct instruction *insn, 2822 struct instruction *next_insn, 2823 struct cfi_state *cfi, struct stack_op *op) 2824 { 2825 struct cfi_reg *cfa = &cfi->cfa; 2826 struct cfi_reg *regs = cfi->regs; 2827 2828 /* stack operations don't make sense with an undefined CFA */ 2829 if (cfa->base == CFI_UNDEFINED) { 2830 if (insn_func(insn)) { 2831 WARN_FUNC("undefined stack state", insn->sec, insn->offset); 2832 return -1; 2833 } 2834 return 0; 2835 } 2836 2837 if (cfi->type == UNWIND_HINT_TYPE_REGS || 2838 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 2839 return update_cfi_state_regs(insn, cfi, op); 2840 2841 switch (op->dest.type) { 2842 2843 case OP_DEST_REG: 2844 switch (op->src.type) { 2845 2846 case OP_SRC_REG: 2847 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 2848 cfa->base == CFI_SP && 2849 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) { 2850 2851 /* mov %rsp, %rbp */ 2852 cfa->base = op->dest.reg; 2853 cfi->bp_scratch = false; 2854 } 2855 2856 else if (op->src.reg == CFI_SP && 2857 op->dest.reg == CFI_BP && cfi->drap) { 2858 2859 /* drap: mov %rsp, %rbp */ 2860 regs[CFI_BP].base = CFI_BP; 2861 regs[CFI_BP].offset = -cfi->stack_size; 2862 cfi->bp_scratch = false; 2863 } 2864 2865 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2866 2867 /* 2868 * mov %rsp, %reg 2869 * 2870 * This is needed for the rare case where GCC 2871 * does: 2872 * 2873 * mov %rsp, %rax 2874 * ... 2875 * mov %rax, %rsp 2876 */ 2877 cfi->vals[op->dest.reg].base = CFI_CFA; 2878 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2879 } 2880 2881 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2882 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) { 2883 2884 /* 2885 * mov %rbp, %rsp 2886 * 2887 * Restore the original stack pointer (Clang). 2888 */ 2889 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2890 } 2891 2892 else if (op->dest.reg == cfa->base) { 2893 2894 /* mov %reg, %rsp */ 2895 if (cfa->base == CFI_SP && 2896 cfi->vals[op->src.reg].base == CFI_CFA) { 2897 2898 /* 2899 * This is needed for the rare case 2900 * where GCC does something dumb like: 2901 * 2902 * lea 0x8(%rsp), %rcx 2903 * ... 2904 * mov %rcx, %rsp 2905 */ 2906 cfa->offset = -cfi->vals[op->src.reg].offset; 2907 cfi->stack_size = cfa->offset; 2908 2909 } else if (cfa->base == CFI_SP && 2910 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2911 cfi->vals[op->src.reg].offset == cfa->offset) { 2912 2913 /* 2914 * Stack swizzle: 2915 * 2916 * 1: mov %rsp, (%[tos]) 2917 * 2: mov %[tos], %rsp 2918 * ... 2919 * 3: pop %rsp 2920 * 2921 * Where: 2922 * 2923 * 1 - places a pointer to the previous 2924 * stack at the Top-of-Stack of the 2925 * new stack. 2926 * 2927 * 2 - switches to the new stack. 2928 * 2929 * 3 - pops the Top-of-Stack to restore 2930 * the original stack. 2931 * 2932 * Note: we set base to SP_INDIRECT 2933 * here and preserve offset. Therefore 2934 * when the unwinder reaches ToS it 2935 * will dereference SP and then add the 2936 * offset to find the next frame, IOW: 2937 * (%rsp) + offset. 2938 */ 2939 cfa->base = CFI_SP_INDIRECT; 2940 2941 } else { 2942 cfa->base = CFI_UNDEFINED; 2943 cfa->offset = 0; 2944 } 2945 } 2946 2947 else if (op->dest.reg == CFI_SP && 2948 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2949 cfi->vals[op->src.reg].offset == cfa->offset) { 2950 2951 /* 2952 * The same stack swizzle case 2) as above. But 2953 * because we can't change cfa->base, case 3) 2954 * will become a regular POP. Pretend we're a 2955 * PUSH so things don't go unbalanced. 2956 */ 2957 cfi->stack_size += 8; 2958 } 2959 2960 2961 break; 2962 2963 case OP_SRC_ADD: 2964 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 2965 2966 /* add imm, %rsp */ 2967 cfi->stack_size -= op->src.offset; 2968 if (cfa->base == CFI_SP) 2969 cfa->offset -= op->src.offset; 2970 break; 2971 } 2972 2973 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 2974 2975 /* lea disp(%rbp), %rsp */ 2976 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 2977 break; 2978 } 2979 2980 if (!cfi->drap && op->src.reg == CFI_SP && 2981 op->dest.reg == CFI_BP && cfa->base == CFI_SP && 2982 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) { 2983 2984 /* lea disp(%rsp), %rbp */ 2985 cfa->base = CFI_BP; 2986 cfa->offset -= op->src.offset; 2987 cfi->bp_scratch = false; 2988 break; 2989 } 2990 2991 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2992 2993 /* drap: lea disp(%rsp), %drap */ 2994 cfi->drap_reg = op->dest.reg; 2995 2996 /* 2997 * lea disp(%rsp), %reg 2998 * 2999 * This is needed for the rare case where GCC 3000 * does something dumb like: 3001 * 3002 * lea 0x8(%rsp), %rcx 3003 * ... 3004 * mov %rcx, %rsp 3005 */ 3006 cfi->vals[op->dest.reg].base = CFI_CFA; 3007 cfi->vals[op->dest.reg].offset = \ 3008 -cfi->stack_size + op->src.offset; 3009 3010 break; 3011 } 3012 3013 if (cfi->drap && op->dest.reg == CFI_SP && 3014 op->src.reg == cfi->drap_reg) { 3015 3016 /* drap: lea disp(%drap), %rsp */ 3017 cfa->base = CFI_SP; 3018 cfa->offset = cfi->stack_size = -op->src.offset; 3019 cfi->drap_reg = CFI_UNDEFINED; 3020 cfi->drap = false; 3021 break; 3022 } 3023 3024 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) { 3025 WARN_FUNC("unsupported stack register modification", 3026 insn->sec, insn->offset); 3027 return -1; 3028 } 3029 3030 break; 3031 3032 case OP_SRC_AND: 3033 if (op->dest.reg != CFI_SP || 3034 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 3035 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 3036 WARN_FUNC("unsupported stack pointer realignment", 3037 insn->sec, insn->offset); 3038 return -1; 3039 } 3040 3041 if (cfi->drap_reg != CFI_UNDEFINED) { 3042 /* drap: and imm, %rsp */ 3043 cfa->base = cfi->drap_reg; 3044 cfa->offset = cfi->stack_size = 0; 3045 cfi->drap = true; 3046 } 3047 3048 /* 3049 * Older versions of GCC (4.8ish) realign the stack 3050 * without DRAP, with a frame pointer. 3051 */ 3052 3053 break; 3054 3055 case OP_SRC_POP: 3056 case OP_SRC_POPF: 3057 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) { 3058 3059 /* pop %rsp; # restore from a stack swizzle */ 3060 cfa->base = CFI_SP; 3061 break; 3062 } 3063 3064 if (!cfi->drap && op->dest.reg == cfa->base) { 3065 3066 /* pop %rbp */ 3067 cfa->base = CFI_SP; 3068 } 3069 3070 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 3071 op->dest.reg == cfi->drap_reg && 3072 cfi->drap_offset == -cfi->stack_size) { 3073 3074 /* drap: pop %drap */ 3075 cfa->base = cfi->drap_reg; 3076 cfa->offset = 0; 3077 cfi->drap_offset = -1; 3078 3079 } else if (cfi->stack_size == -regs[op->dest.reg].offset) { 3080 3081 /* pop %reg */ 3082 restore_reg(cfi, op->dest.reg); 3083 } 3084 3085 cfi->stack_size -= 8; 3086 if (cfa->base == CFI_SP) 3087 cfa->offset -= 8; 3088 3089 break; 3090 3091 case OP_SRC_REG_INDIRECT: 3092 if (!cfi->drap && op->dest.reg == cfa->base && 3093 op->dest.reg == CFI_BP) { 3094 3095 /* mov disp(%rsp), %rbp */ 3096 cfa->base = CFI_SP; 3097 cfa->offset = cfi->stack_size; 3098 } 3099 3100 if (cfi->drap && op->src.reg == CFI_BP && 3101 op->src.offset == cfi->drap_offset) { 3102 3103 /* drap: mov disp(%rbp), %drap */ 3104 cfa->base = cfi->drap_reg; 3105 cfa->offset = 0; 3106 cfi->drap_offset = -1; 3107 } 3108 3109 if (cfi->drap && op->src.reg == CFI_BP && 3110 op->src.offset == regs[op->dest.reg].offset) { 3111 3112 /* drap: mov disp(%rbp), %reg */ 3113 restore_reg(cfi, op->dest.reg); 3114 3115 } else if (op->src.reg == cfa->base && 3116 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 3117 3118 /* mov disp(%rbp), %reg */ 3119 /* mov disp(%rsp), %reg */ 3120 restore_reg(cfi, op->dest.reg); 3121 3122 } else if (op->src.reg == CFI_SP && 3123 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) { 3124 3125 /* mov disp(%rsp), %reg */ 3126 restore_reg(cfi, op->dest.reg); 3127 } 3128 3129 break; 3130 3131 default: 3132 WARN_FUNC("unknown stack-related instruction", 3133 insn->sec, insn->offset); 3134 return -1; 3135 } 3136 3137 break; 3138 3139 case OP_DEST_PUSH: 3140 case OP_DEST_PUSHF: 3141 cfi->stack_size += 8; 3142 if (cfa->base == CFI_SP) 3143 cfa->offset += 8; 3144 3145 if (op->src.type != OP_SRC_REG) 3146 break; 3147 3148 if (cfi->drap) { 3149 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3150 3151 /* drap: push %drap */ 3152 cfa->base = CFI_BP_INDIRECT; 3153 cfa->offset = -cfi->stack_size; 3154 3155 /* save drap so we know when to restore it */ 3156 cfi->drap_offset = -cfi->stack_size; 3157 3158 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 3159 3160 /* drap: push %rbp */ 3161 cfi->stack_size = 0; 3162 3163 } else { 3164 3165 /* drap: push %reg */ 3166 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 3167 } 3168 3169 } else { 3170 3171 /* push %reg */ 3172 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 3173 } 3174 3175 /* detect when asm code uses rbp as a scratch register */ 3176 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP && 3177 cfa->base != CFI_BP) 3178 cfi->bp_scratch = true; 3179 break; 3180 3181 case OP_DEST_REG_INDIRECT: 3182 3183 if (cfi->drap) { 3184 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3185 3186 /* drap: mov %drap, disp(%rbp) */ 3187 cfa->base = CFI_BP_INDIRECT; 3188 cfa->offset = op->dest.offset; 3189 3190 /* save drap offset so we know when to restore it */ 3191 cfi->drap_offset = op->dest.offset; 3192 } else { 3193 3194 /* drap: mov reg, disp(%rbp) */ 3195 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 3196 } 3197 3198 } else if (op->dest.reg == cfa->base) { 3199 3200 /* mov reg, disp(%rbp) */ 3201 /* mov reg, disp(%rsp) */ 3202 save_reg(cfi, op->src.reg, CFI_CFA, 3203 op->dest.offset - cfi->cfa.offset); 3204 3205 } else if (op->dest.reg == CFI_SP) { 3206 3207 /* mov reg, disp(%rsp) */ 3208 save_reg(cfi, op->src.reg, CFI_CFA, 3209 op->dest.offset - cfi->stack_size); 3210 3211 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) { 3212 3213 /* mov %rsp, (%reg); # setup a stack swizzle. */ 3214 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT; 3215 cfi->vals[op->dest.reg].offset = cfa->offset; 3216 } 3217 3218 break; 3219 3220 case OP_DEST_MEM: 3221 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 3222 WARN_FUNC("unknown stack-related memory operation", 3223 insn->sec, insn->offset); 3224 return -1; 3225 } 3226 3227 /* pop mem */ 3228 cfi->stack_size -= 8; 3229 if (cfa->base == CFI_SP) 3230 cfa->offset -= 8; 3231 3232 break; 3233 3234 default: 3235 WARN_FUNC("unknown stack-related instruction", 3236 insn->sec, insn->offset); 3237 return -1; 3238 } 3239 3240 return 0; 3241 } 3242 3243 /* 3244 * The stack layouts of alternatives instructions can sometimes diverge when 3245 * they have stack modifications. That's fine as long as the potential stack 3246 * layouts don't conflict at any given potential instruction boundary. 3247 * 3248 * Flatten the CFIs of the different alternative code streams (both original 3249 * and replacement) into a single shared CFI array which can be used to detect 3250 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 3251 */ 3252 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 3253 { 3254 struct cfi_state **alt_cfi; 3255 int group_off; 3256 3257 if (!insn->alt_group) 3258 return 0; 3259 3260 if (!insn->cfi) { 3261 WARN("CFI missing"); 3262 return -1; 3263 } 3264 3265 alt_cfi = insn->alt_group->cfi; 3266 group_off = insn->offset - insn->alt_group->first_insn->offset; 3267 3268 if (!alt_cfi[group_off]) { 3269 alt_cfi[group_off] = insn->cfi; 3270 } else { 3271 if (cficmp(alt_cfi[group_off], insn->cfi)) { 3272 struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group; 3273 struct instruction *orig = orig_group->first_insn; 3274 char *where = offstr(insn->sec, insn->offset); 3275 WARN_FUNC("stack layout conflict in alternatives: %s", 3276 orig->sec, orig->offset, where); 3277 free(where); 3278 return -1; 3279 } 3280 } 3281 3282 return 0; 3283 } 3284 3285 static int handle_insn_ops(struct instruction *insn, 3286 struct instruction *next_insn, 3287 struct insn_state *state) 3288 { 3289 struct stack_op *op; 3290 3291 for (op = insn->stack_ops; op; op = op->next) { 3292 3293 if (update_cfi_state(insn, next_insn, &state->cfi, op)) 3294 return 1; 3295 3296 if (!insn->alt_group) 3297 continue; 3298 3299 if (op->dest.type == OP_DEST_PUSHF) { 3300 if (!state->uaccess_stack) { 3301 state->uaccess_stack = 1; 3302 } else if (state->uaccess_stack >> 31) { 3303 WARN_FUNC("PUSHF stack exhausted", 3304 insn->sec, insn->offset); 3305 return 1; 3306 } 3307 state->uaccess_stack <<= 1; 3308 state->uaccess_stack |= state->uaccess; 3309 } 3310 3311 if (op->src.type == OP_SRC_POPF) { 3312 if (state->uaccess_stack) { 3313 state->uaccess = state->uaccess_stack & 1; 3314 state->uaccess_stack >>= 1; 3315 if (state->uaccess_stack == 1) 3316 state->uaccess_stack = 0; 3317 } 3318 } 3319 } 3320 3321 return 0; 3322 } 3323 3324 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 3325 { 3326 struct cfi_state *cfi1 = insn->cfi; 3327 int i; 3328 3329 if (!cfi1) { 3330 WARN("CFI missing"); 3331 return false; 3332 } 3333 3334 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 3335 3336 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 3337 insn->sec, insn->offset, 3338 cfi1->cfa.base, cfi1->cfa.offset, 3339 cfi2->cfa.base, cfi2->cfa.offset); 3340 3341 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 3342 for (i = 0; i < CFI_NUM_REGS; i++) { 3343 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], 3344 sizeof(struct cfi_reg))) 3345 continue; 3346 3347 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 3348 insn->sec, insn->offset, 3349 i, cfi1->regs[i].base, cfi1->regs[i].offset, 3350 i, cfi2->regs[i].base, cfi2->regs[i].offset); 3351 break; 3352 } 3353 3354 } else if (cfi1->type != cfi2->type) { 3355 3356 WARN_FUNC("stack state mismatch: type1=%d type2=%d", 3357 insn->sec, insn->offset, cfi1->type, cfi2->type); 3358 3359 } else if (cfi1->drap != cfi2->drap || 3360 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 3361 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 3362 3363 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 3364 insn->sec, insn->offset, 3365 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 3366 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 3367 3368 } else 3369 return true; 3370 3371 return false; 3372 } 3373 3374 static inline bool func_uaccess_safe(struct symbol *func) 3375 { 3376 if (func) 3377 return func->uaccess_safe; 3378 3379 return false; 3380 } 3381 3382 static inline const char *call_dest_name(struct instruction *insn) 3383 { 3384 static char pvname[19]; 3385 struct reloc *rel; 3386 int idx; 3387 3388 if (insn_call_dest(insn)) 3389 return insn_call_dest(insn)->name; 3390 3391 rel = insn_reloc(NULL, insn); 3392 if (rel && !strcmp(rel->sym->name, "pv_ops")) { 3393 idx = (rel->addend / sizeof(void *)); 3394 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); 3395 return pvname; 3396 } 3397 3398 return "{dynamic}"; 3399 } 3400 3401 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn) 3402 { 3403 struct symbol *target; 3404 struct reloc *rel; 3405 int idx; 3406 3407 rel = insn_reloc(file, insn); 3408 if (!rel || strcmp(rel->sym->name, "pv_ops")) 3409 return false; 3410 3411 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *)); 3412 3413 if (file->pv_ops[idx].clean) 3414 return true; 3415 3416 file->pv_ops[idx].clean = true; 3417 3418 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) { 3419 if (!target->sec->noinstr) { 3420 WARN("pv_ops[%d]: %s", idx, target->name); 3421 file->pv_ops[idx].clean = false; 3422 } 3423 } 3424 3425 return file->pv_ops[idx].clean; 3426 } 3427 3428 static inline bool noinstr_call_dest(struct objtool_file *file, 3429 struct instruction *insn, 3430 struct symbol *func) 3431 { 3432 /* 3433 * We can't deal with indirect function calls at present; 3434 * assume they're instrumented. 3435 */ 3436 if (!func) { 3437 if (file->pv_ops) 3438 return pv_call_dest(file, insn); 3439 3440 return false; 3441 } 3442 3443 /* 3444 * If the symbol is from a noinstr section; we good. 3445 */ 3446 if (func->sec->noinstr) 3447 return true; 3448 3449 /* 3450 * If the symbol is a static_call trampoline, we can't tell. 3451 */ 3452 if (func->static_call_tramp) 3453 return true; 3454 3455 /* 3456 * The __ubsan_handle_*() calls are like WARN(), they only happen when 3457 * something 'BAD' happened. At the risk of taking the machine down, 3458 * let them proceed to get the message out. 3459 */ 3460 if (!strncmp(func->name, "__ubsan_handle_", 15)) 3461 return true; 3462 3463 return false; 3464 } 3465 3466 static int validate_call(struct objtool_file *file, 3467 struct instruction *insn, 3468 struct insn_state *state) 3469 { 3470 if (state->noinstr && state->instr <= 0 && 3471 !noinstr_call_dest(file, insn, insn_call_dest(insn))) { 3472 WARN_FUNC("call to %s() leaves .noinstr.text section", 3473 insn->sec, insn->offset, call_dest_name(insn)); 3474 return 1; 3475 } 3476 3477 if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) { 3478 WARN_FUNC("call to %s() with UACCESS enabled", 3479 insn->sec, insn->offset, call_dest_name(insn)); 3480 return 1; 3481 } 3482 3483 if (state->df) { 3484 WARN_FUNC("call to %s() with DF set", 3485 insn->sec, insn->offset, call_dest_name(insn)); 3486 return 1; 3487 } 3488 3489 return 0; 3490 } 3491 3492 static int validate_sibling_call(struct objtool_file *file, 3493 struct instruction *insn, 3494 struct insn_state *state) 3495 { 3496 if (insn_func(insn) && has_modified_stack_frame(insn, state)) { 3497 WARN_FUNC("sibling call from callable instruction with modified stack frame", 3498 insn->sec, insn->offset); 3499 return 1; 3500 } 3501 3502 return validate_call(file, insn, state); 3503 } 3504 3505 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 3506 { 3507 if (state->noinstr && state->instr > 0) { 3508 WARN_FUNC("return with instrumentation enabled", 3509 insn->sec, insn->offset); 3510 return 1; 3511 } 3512 3513 if (state->uaccess && !func_uaccess_safe(func)) { 3514 WARN_FUNC("return with UACCESS enabled", 3515 insn->sec, insn->offset); 3516 return 1; 3517 } 3518 3519 if (!state->uaccess && func_uaccess_safe(func)) { 3520 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", 3521 insn->sec, insn->offset); 3522 return 1; 3523 } 3524 3525 if (state->df) { 3526 WARN_FUNC("return with DF set", 3527 insn->sec, insn->offset); 3528 return 1; 3529 } 3530 3531 if (func && has_modified_stack_frame(insn, state)) { 3532 WARN_FUNC("return with modified stack frame", 3533 insn->sec, insn->offset); 3534 return 1; 3535 } 3536 3537 if (state->cfi.bp_scratch) { 3538 WARN_FUNC("BP used as a scratch register", 3539 insn->sec, insn->offset); 3540 return 1; 3541 } 3542 3543 return 0; 3544 } 3545 3546 static struct instruction *next_insn_to_validate(struct objtool_file *file, 3547 struct instruction *insn) 3548 { 3549 struct alt_group *alt_group = insn->alt_group; 3550 3551 /* 3552 * Simulate the fact that alternatives are patched in-place. When the 3553 * end of a replacement alt_group is reached, redirect objtool flow to 3554 * the end of the original alt_group. 3555 * 3556 * insn->alts->insn -> alt_group->first_insn 3557 * ... 3558 * alt_group->last_insn 3559 * [alt_group->nop] -> next(orig_group->last_insn) 3560 */ 3561 if (alt_group) { 3562 if (alt_group->nop) { 3563 /* ->nop implies ->orig_group */ 3564 if (insn == alt_group->last_insn) 3565 return alt_group->nop; 3566 if (insn == alt_group->nop) 3567 goto next_orig; 3568 } 3569 if (insn == alt_group->last_insn && alt_group->orig_group) 3570 goto next_orig; 3571 } 3572 3573 return next_insn_same_sec(file, insn); 3574 3575 next_orig: 3576 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 3577 } 3578 3579 /* 3580 * Follow the branch starting at the given instruction, and recursively follow 3581 * any other branches (jumps). Meanwhile, track the frame pointer state at 3582 * each instruction and validate all the rules described in 3583 * tools/objtool/Documentation/objtool.txt. 3584 */ 3585 static int validate_branch(struct objtool_file *file, struct symbol *func, 3586 struct instruction *insn, struct insn_state state) 3587 { 3588 struct alternative *alt; 3589 struct instruction *next_insn, *prev_insn = NULL; 3590 struct section *sec; 3591 u8 visited; 3592 int ret; 3593 3594 sec = insn->sec; 3595 3596 while (1) { 3597 next_insn = next_insn_to_validate(file, insn); 3598 3599 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) { 3600 /* Ignore KCFI type preambles, which always fall through */ 3601 if (!strncmp(func->name, "__cfi_", 6) || 3602 !strncmp(func->name, "__pfx_", 6)) 3603 return 0; 3604 3605 WARN("%s() falls through to next function %s()", 3606 func->name, insn_func(insn)->name); 3607 return 1; 3608 } 3609 3610 if (func && insn->ignore) { 3611 WARN_FUNC("BUG: why am I validating an ignored function?", 3612 sec, insn->offset); 3613 return 1; 3614 } 3615 3616 visited = VISITED_BRANCH << state.uaccess; 3617 if (insn->visited & VISITED_BRANCH_MASK) { 3618 if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) 3619 return 1; 3620 3621 if (insn->visited & visited) 3622 return 0; 3623 } else { 3624 nr_insns_visited++; 3625 } 3626 3627 if (state.noinstr) 3628 state.instr += insn->instr; 3629 3630 if (insn->hint) { 3631 if (insn->restore) { 3632 struct instruction *save_insn, *i; 3633 3634 i = insn; 3635 save_insn = NULL; 3636 3637 sym_for_each_insn_continue_reverse(file, func, i) { 3638 if (i->save) { 3639 save_insn = i; 3640 break; 3641 } 3642 } 3643 3644 if (!save_insn) { 3645 WARN_FUNC("no corresponding CFI save for CFI restore", 3646 sec, insn->offset); 3647 return 1; 3648 } 3649 3650 if (!save_insn->visited) { 3651 WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo", 3652 sec, insn->offset); 3653 return 1; 3654 } 3655 3656 insn->cfi = save_insn->cfi; 3657 nr_cfi_reused++; 3658 } 3659 3660 state.cfi = *insn->cfi; 3661 } else { 3662 /* XXX track if we actually changed state.cfi */ 3663 3664 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) { 3665 insn->cfi = prev_insn->cfi; 3666 nr_cfi_reused++; 3667 } else { 3668 insn->cfi = cfi_hash_find_or_add(&state.cfi); 3669 } 3670 } 3671 3672 insn->visited |= visited; 3673 3674 if (propagate_alt_cfi(file, insn)) 3675 return 1; 3676 3677 if (!insn->ignore_alts && insn->alts) { 3678 bool skip_orig = false; 3679 3680 for (alt = insn->alts; alt; alt = alt->next) { 3681 if (alt->skip_orig) 3682 skip_orig = true; 3683 3684 ret = validate_branch(file, func, alt->insn, state); 3685 if (ret) { 3686 if (opts.backtrace) 3687 BT_FUNC("(alt)", insn); 3688 return ret; 3689 } 3690 } 3691 3692 if (skip_orig) 3693 return 0; 3694 } 3695 3696 if (handle_insn_ops(insn, next_insn, &state)) 3697 return 1; 3698 3699 switch (insn->type) { 3700 3701 case INSN_RETURN: 3702 return validate_return(func, insn, &state); 3703 3704 case INSN_CALL: 3705 case INSN_CALL_DYNAMIC: 3706 ret = validate_call(file, insn, &state); 3707 if (ret) 3708 return ret; 3709 3710 if (opts.stackval && func && !is_fentry_call(insn) && 3711 !has_valid_stack_frame(&state)) { 3712 WARN_FUNC("call without frame pointer save/setup", 3713 sec, insn->offset); 3714 return 1; 3715 } 3716 3717 if (insn->dead_end) 3718 return 0; 3719 3720 break; 3721 3722 case INSN_JUMP_CONDITIONAL: 3723 case INSN_JUMP_UNCONDITIONAL: 3724 if (is_sibling_call(insn)) { 3725 ret = validate_sibling_call(file, insn, &state); 3726 if (ret) 3727 return ret; 3728 3729 } else if (insn->jump_dest) { 3730 ret = validate_branch(file, func, 3731 insn->jump_dest, state); 3732 if (ret) { 3733 if (opts.backtrace) 3734 BT_FUNC("(branch)", insn); 3735 return ret; 3736 } 3737 } 3738 3739 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3740 return 0; 3741 3742 break; 3743 3744 case INSN_JUMP_DYNAMIC: 3745 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3746 if (is_sibling_call(insn)) { 3747 ret = validate_sibling_call(file, insn, &state); 3748 if (ret) 3749 return ret; 3750 } 3751 3752 if (insn->type == INSN_JUMP_DYNAMIC) 3753 return 0; 3754 3755 break; 3756 3757 case INSN_CONTEXT_SWITCH: 3758 if (func && (!next_insn || !next_insn->hint)) { 3759 WARN_FUNC("unsupported instruction in callable function", 3760 sec, insn->offset); 3761 return 1; 3762 } 3763 return 0; 3764 3765 case INSN_STAC: 3766 if (state.uaccess) { 3767 WARN_FUNC("recursive UACCESS enable", sec, insn->offset); 3768 return 1; 3769 } 3770 3771 state.uaccess = true; 3772 break; 3773 3774 case INSN_CLAC: 3775 if (!state.uaccess && func) { 3776 WARN_FUNC("redundant UACCESS disable", sec, insn->offset); 3777 return 1; 3778 } 3779 3780 if (func_uaccess_safe(func) && !state.uaccess_stack) { 3781 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); 3782 return 1; 3783 } 3784 3785 state.uaccess = false; 3786 break; 3787 3788 case INSN_STD: 3789 if (state.df) { 3790 WARN_FUNC("recursive STD", sec, insn->offset); 3791 return 1; 3792 } 3793 3794 state.df = true; 3795 break; 3796 3797 case INSN_CLD: 3798 if (!state.df && func) { 3799 WARN_FUNC("redundant CLD", sec, insn->offset); 3800 return 1; 3801 } 3802 3803 state.df = false; 3804 break; 3805 3806 default: 3807 break; 3808 } 3809 3810 if (insn->dead_end) 3811 return 0; 3812 3813 if (!next_insn) { 3814 if (state.cfi.cfa.base == CFI_UNDEFINED) 3815 return 0; 3816 WARN("%s: unexpected end of section", sec->name); 3817 return 1; 3818 } 3819 3820 prev_insn = insn; 3821 insn = next_insn; 3822 } 3823 3824 return 0; 3825 } 3826 3827 static int validate_unwind_hint(struct objtool_file *file, 3828 struct instruction *insn, 3829 struct insn_state *state) 3830 { 3831 if (insn->hint && !insn->visited && !insn->ignore) { 3832 int ret = validate_branch(file, insn_func(insn), insn, *state); 3833 if (ret && opts.backtrace) 3834 BT_FUNC("<=== (hint)", insn); 3835 return ret; 3836 } 3837 3838 return 0; 3839 } 3840 3841 static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 3842 { 3843 struct instruction *insn; 3844 struct insn_state state; 3845 int warnings = 0; 3846 3847 if (!file->hints) 3848 return 0; 3849 3850 init_insn_state(file, &state, sec); 3851 3852 if (sec) { 3853 sec_for_each_insn(file, sec, insn) 3854 warnings += validate_unwind_hint(file, insn, &state); 3855 } else { 3856 for_each_insn(file, insn) 3857 warnings += validate_unwind_hint(file, insn, &state); 3858 } 3859 3860 return warnings; 3861 } 3862 3863 /* 3864 * Validate rethunk entry constraint: must untrain RET before the first RET. 3865 * 3866 * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes 3867 * before an actual RET instruction. 3868 */ 3869 static int validate_entry(struct objtool_file *file, struct instruction *insn) 3870 { 3871 struct instruction *next, *dest; 3872 int ret, warnings = 0; 3873 3874 for (;;) { 3875 next = next_insn_to_validate(file, insn); 3876 3877 if (insn->visited & VISITED_ENTRY) 3878 return 0; 3879 3880 insn->visited |= VISITED_ENTRY; 3881 3882 if (!insn->ignore_alts && insn->alts) { 3883 struct alternative *alt; 3884 bool skip_orig = false; 3885 3886 for (alt = insn->alts; alt; alt = alt->next) { 3887 if (alt->skip_orig) 3888 skip_orig = true; 3889 3890 ret = validate_entry(file, alt->insn); 3891 if (ret) { 3892 if (opts.backtrace) 3893 BT_FUNC("(alt)", insn); 3894 return ret; 3895 } 3896 } 3897 3898 if (skip_orig) 3899 return 0; 3900 } 3901 3902 switch (insn->type) { 3903 3904 case INSN_CALL_DYNAMIC: 3905 case INSN_JUMP_DYNAMIC: 3906 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3907 WARN_FUNC("early indirect call", insn->sec, insn->offset); 3908 return 1; 3909 3910 case INSN_JUMP_UNCONDITIONAL: 3911 case INSN_JUMP_CONDITIONAL: 3912 if (!is_sibling_call(insn)) { 3913 if (!insn->jump_dest) { 3914 WARN_FUNC("unresolved jump target after linking?!?", 3915 insn->sec, insn->offset); 3916 return -1; 3917 } 3918 ret = validate_entry(file, insn->jump_dest); 3919 if (ret) { 3920 if (opts.backtrace) { 3921 BT_FUNC("(branch%s)", insn, 3922 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : ""); 3923 } 3924 return ret; 3925 } 3926 3927 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3928 return 0; 3929 3930 break; 3931 } 3932 3933 /* fallthrough */ 3934 case INSN_CALL: 3935 dest = find_insn(file, insn_call_dest(insn)->sec, 3936 insn_call_dest(insn)->offset); 3937 if (!dest) { 3938 WARN("Unresolved function after linking!?: %s", 3939 insn_call_dest(insn)->name); 3940 return -1; 3941 } 3942 3943 ret = validate_entry(file, dest); 3944 if (ret) { 3945 if (opts.backtrace) 3946 BT_FUNC("(call)", insn); 3947 return ret; 3948 } 3949 /* 3950 * If a call returns without error, it must have seen UNTRAIN_RET. 3951 * Therefore any non-error return is a success. 3952 */ 3953 return 0; 3954 3955 case INSN_RETURN: 3956 WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset); 3957 return 1; 3958 3959 case INSN_NOP: 3960 if (insn->retpoline_safe) 3961 return 0; 3962 break; 3963 3964 default: 3965 break; 3966 } 3967 3968 if (!next) { 3969 WARN_FUNC("teh end!", insn->sec, insn->offset); 3970 return -1; 3971 } 3972 insn = next; 3973 } 3974 3975 return warnings; 3976 } 3977 3978 /* 3979 * Validate that all branches starting at 'insn->entry' encounter UNRET_END 3980 * before RET. 3981 */ 3982 static int validate_unret(struct objtool_file *file) 3983 { 3984 struct instruction *insn; 3985 int ret, warnings = 0; 3986 3987 for_each_insn(file, insn) { 3988 if (!insn->entry) 3989 continue; 3990 3991 ret = validate_entry(file, insn); 3992 if (ret < 0) { 3993 WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset); 3994 return ret; 3995 } 3996 warnings += ret; 3997 } 3998 3999 return warnings; 4000 } 4001 4002 static int validate_retpoline(struct objtool_file *file) 4003 { 4004 struct instruction *insn; 4005 int warnings = 0; 4006 4007 for_each_insn(file, insn) { 4008 if (insn->type != INSN_JUMP_DYNAMIC && 4009 insn->type != INSN_CALL_DYNAMIC && 4010 insn->type != INSN_RETURN) 4011 continue; 4012 4013 if (insn->retpoline_safe) 4014 continue; 4015 4016 if (insn->sec->init) 4017 continue; 4018 4019 if (insn->type == INSN_RETURN) { 4020 if (opts.rethunk) { 4021 WARN_FUNC("'naked' return found in RETHUNK build", 4022 insn->sec, insn->offset); 4023 } else 4024 continue; 4025 } else { 4026 WARN_FUNC("indirect %s found in RETPOLINE build", 4027 insn->sec, insn->offset, 4028 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 4029 } 4030 4031 warnings++; 4032 } 4033 4034 return warnings; 4035 } 4036 4037 static bool is_kasan_insn(struct instruction *insn) 4038 { 4039 return (insn->type == INSN_CALL && 4040 !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return")); 4041 } 4042 4043 static bool is_ubsan_insn(struct instruction *insn) 4044 { 4045 return (insn->type == INSN_CALL && 4046 !strcmp(insn_call_dest(insn)->name, 4047 "__ubsan_handle_builtin_unreachable")); 4048 } 4049 4050 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 4051 { 4052 int i; 4053 struct instruction *prev_insn; 4054 4055 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP) 4056 return true; 4057 4058 /* 4059 * Ignore alternative replacement instructions. This can happen 4060 * when a whitelisted function uses one of the ALTERNATIVE macros. 4061 */ 4062 if (!strcmp(insn->sec->name, ".altinstr_replacement") || 4063 !strcmp(insn->sec->name, ".altinstr_aux")) 4064 return true; 4065 4066 /* 4067 * Whole archive runs might encounter dead code from weak symbols. 4068 * This is where the linker will have dropped the weak symbol in 4069 * favour of a regular symbol, but leaves the code in place. 4070 * 4071 * In this case we'll find a piece of code (whole function) that is not 4072 * covered by a !section symbol. Ignore them. 4073 */ 4074 if (opts.link && !insn_func(insn)) { 4075 int size = find_symbol_hole_containing(insn->sec, insn->offset); 4076 unsigned long end = insn->offset + size; 4077 4078 if (!size) /* not a hole */ 4079 return false; 4080 4081 if (size < 0) /* hole until the end */ 4082 return true; 4083 4084 sec_for_each_insn_continue(file, insn) { 4085 /* 4086 * If we reach a visited instruction at or before the 4087 * end of the hole, ignore the unreachable. 4088 */ 4089 if (insn->visited) 4090 return true; 4091 4092 if (insn->offset >= end) 4093 break; 4094 4095 /* 4096 * If this hole jumps to a .cold function, mark it ignore too. 4097 */ 4098 if (insn->jump_dest && insn_func(insn->jump_dest) && 4099 strstr(insn_func(insn->jump_dest)->name, ".cold")) { 4100 struct instruction *dest = insn->jump_dest; 4101 func_for_each_insn(file, insn_func(dest), dest) 4102 dest->ignore = true; 4103 } 4104 } 4105 4106 return false; 4107 } 4108 4109 if (!insn_func(insn)) 4110 return false; 4111 4112 if (insn_func(insn)->static_call_tramp) 4113 return true; 4114 4115 /* 4116 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 4117 * __builtin_unreachable(). The BUG() macro has an unreachable() after 4118 * the UD2, which causes GCC's undefined trap logic to emit another UD2 4119 * (or occasionally a JMP to UD2). 4120 * 4121 * It may also insert a UD2 after calling a __noreturn function. 4122 */ 4123 prev_insn = prev_insn_same_sec(file, insn); 4124 if ((prev_insn->dead_end || 4125 dead_end_function(file, insn_call_dest(prev_insn))) && 4126 (insn->type == INSN_BUG || 4127 (insn->type == INSN_JUMP_UNCONDITIONAL && 4128 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 4129 return true; 4130 4131 /* 4132 * Check if this (or a subsequent) instruction is related to 4133 * CONFIG_UBSAN or CONFIG_KASAN. 4134 * 4135 * End the search at 5 instructions to avoid going into the weeds. 4136 */ 4137 for (i = 0; i < 5; i++) { 4138 4139 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 4140 return true; 4141 4142 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 4143 if (insn->jump_dest && 4144 insn_func(insn->jump_dest) == insn_func(insn)) { 4145 insn = insn->jump_dest; 4146 continue; 4147 } 4148 4149 break; 4150 } 4151 4152 if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len) 4153 break; 4154 4155 insn = next_insn_same_sec(file, insn); 4156 } 4157 4158 return false; 4159 } 4160 4161 static int add_prefix_symbol(struct objtool_file *file, struct symbol *func, 4162 struct instruction *insn) 4163 { 4164 if (!opts.prefix) 4165 return 0; 4166 4167 for (;;) { 4168 struct instruction *prev = prev_insn_same_sec(file, insn); 4169 u64 offset; 4170 4171 if (!prev) 4172 break; 4173 4174 if (prev->type != INSN_NOP) 4175 break; 4176 4177 offset = func->offset - prev->offset; 4178 if (offset >= opts.prefix) { 4179 if (offset == opts.prefix) { 4180 /* 4181 * Since the sec->symbol_list is ordered by 4182 * offset (see elf_add_symbol()) the added 4183 * symbol will not be seen by the iteration in 4184 * validate_section(). 4185 * 4186 * Hence the lack of list_for_each_entry_safe() 4187 * there. 4188 * 4189 * The direct concequence is that prefix symbols 4190 * don't get visited (because pointless), except 4191 * for the logic in ignore_unreachable_insn() 4192 * that needs the terminating insn to be visited 4193 * otherwise it will report the hole. 4194 * 4195 * Hence mark the first instruction of the 4196 * prefix symbol as visisted. 4197 */ 4198 prev->visited |= VISITED_BRANCH; 4199 elf_create_prefix_symbol(file->elf, func, opts.prefix); 4200 } 4201 break; 4202 } 4203 insn = prev; 4204 } 4205 4206 return 0; 4207 } 4208 4209 static int validate_symbol(struct objtool_file *file, struct section *sec, 4210 struct symbol *sym, struct insn_state *state) 4211 { 4212 struct instruction *insn; 4213 int ret; 4214 4215 if (!sym->len) { 4216 WARN("%s() is missing an ELF size annotation", sym->name); 4217 return 1; 4218 } 4219 4220 if (sym->pfunc != sym || sym->alias != sym) 4221 return 0; 4222 4223 insn = find_insn(file, sec, sym->offset); 4224 if (!insn || insn->ignore || insn->visited) 4225 return 0; 4226 4227 add_prefix_symbol(file, sym, insn); 4228 4229 state->uaccess = sym->uaccess_safe; 4230 4231 ret = validate_branch(file, insn_func(insn), insn, *state); 4232 if (ret && opts.backtrace) 4233 BT_FUNC("<=== (sym)", insn); 4234 return ret; 4235 } 4236 4237 static int validate_section(struct objtool_file *file, struct section *sec) 4238 { 4239 struct insn_state state; 4240 struct symbol *func; 4241 int warnings = 0; 4242 4243 list_for_each_entry(func, &sec->symbol_list, list) { 4244 if (func->type != STT_FUNC) 4245 continue; 4246 4247 init_insn_state(file, &state, sec); 4248 set_func_state(&state.cfi); 4249 4250 warnings += validate_symbol(file, sec, func, &state); 4251 } 4252 4253 return warnings; 4254 } 4255 4256 static int validate_noinstr_sections(struct objtool_file *file) 4257 { 4258 struct section *sec; 4259 int warnings = 0; 4260 4261 sec = find_section_by_name(file->elf, ".noinstr.text"); 4262 if (sec) { 4263 warnings += validate_section(file, sec); 4264 warnings += validate_unwind_hints(file, sec); 4265 } 4266 4267 sec = find_section_by_name(file->elf, ".entry.text"); 4268 if (sec) { 4269 warnings += validate_section(file, sec); 4270 warnings += validate_unwind_hints(file, sec); 4271 } 4272 4273 sec = find_section_by_name(file->elf, ".cpuidle.text"); 4274 if (sec) { 4275 warnings += validate_section(file, sec); 4276 warnings += validate_unwind_hints(file, sec); 4277 } 4278 4279 return warnings; 4280 } 4281 4282 static int validate_functions(struct objtool_file *file) 4283 { 4284 struct section *sec; 4285 int warnings = 0; 4286 4287 for_each_sec(file, sec) { 4288 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 4289 continue; 4290 4291 warnings += validate_section(file, sec); 4292 } 4293 4294 return warnings; 4295 } 4296 4297 static void mark_endbr_used(struct instruction *insn) 4298 { 4299 if (!list_empty(&insn->call_node)) 4300 list_del_init(&insn->call_node); 4301 } 4302 4303 static bool noendbr_range(struct objtool_file *file, struct instruction *insn) 4304 { 4305 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1); 4306 struct instruction *first; 4307 4308 if (!sym) 4309 return false; 4310 4311 first = find_insn(file, sym->sec, sym->offset); 4312 if (!first) 4313 return false; 4314 4315 if (first->type != INSN_ENDBR && !first->noendbr) 4316 return false; 4317 4318 return insn->offset == sym->offset + sym->len; 4319 } 4320 4321 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn) 4322 { 4323 struct instruction *dest; 4324 struct reloc *reloc; 4325 unsigned long off; 4326 int warnings = 0; 4327 4328 /* 4329 * Looking for function pointer load relocations. Ignore 4330 * direct/indirect branches: 4331 */ 4332 switch (insn->type) { 4333 case INSN_CALL: 4334 case INSN_CALL_DYNAMIC: 4335 case INSN_JUMP_CONDITIONAL: 4336 case INSN_JUMP_UNCONDITIONAL: 4337 case INSN_JUMP_DYNAMIC: 4338 case INSN_JUMP_DYNAMIC_CONDITIONAL: 4339 case INSN_RETURN: 4340 case INSN_NOP: 4341 return 0; 4342 default: 4343 break; 4344 } 4345 4346 for (reloc = insn_reloc(file, insn); 4347 reloc; 4348 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 4349 reloc->offset + 1, 4350 (insn->offset + insn->len) - (reloc->offset + 1))) { 4351 4352 /* 4353 * static_call_update() references the trampoline, which 4354 * doesn't have (or need) ENDBR. Skip warning in that case. 4355 */ 4356 if (reloc->sym->static_call_tramp) 4357 continue; 4358 4359 off = reloc->sym->offset; 4360 if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32) 4361 off += arch_dest_reloc_offset(reloc->addend); 4362 else 4363 off += reloc->addend; 4364 4365 dest = find_insn(file, reloc->sym->sec, off); 4366 if (!dest) 4367 continue; 4368 4369 if (dest->type == INSN_ENDBR) { 4370 mark_endbr_used(dest); 4371 continue; 4372 } 4373 4374 if (insn_func(dest) && insn_func(dest) == insn_func(insn)) { 4375 /* 4376 * Anything from->to self is either _THIS_IP_ or 4377 * IRET-to-self. 4378 * 4379 * There is no sane way to annotate _THIS_IP_ since the 4380 * compiler treats the relocation as a constant and is 4381 * happy to fold in offsets, skewing any annotation we 4382 * do, leading to vast amounts of false-positives. 4383 * 4384 * There's also compiler generated _THIS_IP_ through 4385 * KCOV and such which we have no hope of annotating. 4386 * 4387 * As such, blanket accept self-references without 4388 * issue. 4389 */ 4390 continue; 4391 } 4392 4393 /* 4394 * Accept anything ANNOTATE_NOENDBR. 4395 */ 4396 if (dest->noendbr) 4397 continue; 4398 4399 /* 4400 * Accept if this is the instruction after a symbol 4401 * that is (no)endbr -- typical code-range usage. 4402 */ 4403 if (noendbr_range(file, dest)) 4404 continue; 4405 4406 WARN_FUNC("relocation to !ENDBR: %s", 4407 insn->sec, insn->offset, 4408 offstr(dest->sec, dest->offset)); 4409 4410 warnings++; 4411 } 4412 4413 return warnings; 4414 } 4415 4416 static int validate_ibt_data_reloc(struct objtool_file *file, 4417 struct reloc *reloc) 4418 { 4419 struct instruction *dest; 4420 4421 dest = find_insn(file, reloc->sym->sec, 4422 reloc->sym->offset + reloc->addend); 4423 if (!dest) 4424 return 0; 4425 4426 if (dest->type == INSN_ENDBR) { 4427 mark_endbr_used(dest); 4428 return 0; 4429 } 4430 4431 if (dest->noendbr) 4432 return 0; 4433 4434 WARN_FUNC("data relocation to !ENDBR: %s", 4435 reloc->sec->base, reloc->offset, 4436 offstr(dest->sec, dest->offset)); 4437 4438 return 1; 4439 } 4440 4441 /* 4442 * Validate IBT rules and remove used ENDBR instructions from the seal list. 4443 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with 4444 * NOPs) later, in create_ibt_endbr_seal_sections(). 4445 */ 4446 static int validate_ibt(struct objtool_file *file) 4447 { 4448 struct section *sec; 4449 struct reloc *reloc; 4450 struct instruction *insn; 4451 int warnings = 0; 4452 4453 for_each_insn(file, insn) 4454 warnings += validate_ibt_insn(file, insn); 4455 4456 for_each_sec(file, sec) { 4457 4458 /* Already done by validate_ibt_insn() */ 4459 if (sec->sh.sh_flags & SHF_EXECINSTR) 4460 continue; 4461 4462 if (!sec->reloc) 4463 continue; 4464 4465 /* 4466 * These sections can reference text addresses, but not with 4467 * the intent to indirect branch to them. 4468 */ 4469 if ((!strncmp(sec->name, ".discard", 8) && 4470 strcmp(sec->name, ".discard.ibt_endbr_noseal")) || 4471 !strncmp(sec->name, ".debug", 6) || 4472 !strcmp(sec->name, ".altinstructions") || 4473 !strcmp(sec->name, ".ibt_endbr_seal") || 4474 !strcmp(sec->name, ".orc_unwind_ip") || 4475 !strcmp(sec->name, ".parainstructions") || 4476 !strcmp(sec->name, ".retpoline_sites") || 4477 !strcmp(sec->name, ".smp_locks") || 4478 !strcmp(sec->name, ".static_call_sites") || 4479 !strcmp(sec->name, "_error_injection_whitelist") || 4480 !strcmp(sec->name, "_kprobe_blacklist") || 4481 !strcmp(sec->name, "__bug_table") || 4482 !strcmp(sec->name, "__ex_table") || 4483 !strcmp(sec->name, "__jump_table") || 4484 !strcmp(sec->name, "__mcount_loc") || 4485 !strcmp(sec->name, ".kcfi_traps") || 4486 strstr(sec->name, "__patchable_function_entries")) 4487 continue; 4488 4489 list_for_each_entry(reloc, &sec->reloc->reloc_list, list) 4490 warnings += validate_ibt_data_reloc(file, reloc); 4491 } 4492 4493 return warnings; 4494 } 4495 4496 static int validate_sls(struct objtool_file *file) 4497 { 4498 struct instruction *insn, *next_insn; 4499 int warnings = 0; 4500 4501 for_each_insn(file, insn) { 4502 next_insn = next_insn_same_sec(file, insn); 4503 4504 if (insn->retpoline_safe) 4505 continue; 4506 4507 switch (insn->type) { 4508 case INSN_RETURN: 4509 if (!next_insn || next_insn->type != INSN_TRAP) { 4510 WARN_FUNC("missing int3 after ret", 4511 insn->sec, insn->offset); 4512 warnings++; 4513 } 4514 4515 break; 4516 case INSN_JUMP_DYNAMIC: 4517 if (!next_insn || next_insn->type != INSN_TRAP) { 4518 WARN_FUNC("missing int3 after indirect jump", 4519 insn->sec, insn->offset); 4520 warnings++; 4521 } 4522 break; 4523 default: 4524 break; 4525 } 4526 } 4527 4528 return warnings; 4529 } 4530 4531 static int validate_reachable_instructions(struct objtool_file *file) 4532 { 4533 struct instruction *insn; 4534 4535 if (file->ignore_unreachables) 4536 return 0; 4537 4538 for_each_insn(file, insn) { 4539 if (insn->visited || ignore_unreachable_insn(file, insn)) 4540 continue; 4541 4542 WARN_FUNC("unreachable instruction", insn->sec, insn->offset); 4543 return 1; 4544 } 4545 4546 return 0; 4547 } 4548 4549 int check(struct objtool_file *file) 4550 { 4551 int ret, warnings = 0; 4552 4553 arch_initial_func_cfi_state(&initial_func_cfi); 4554 init_cfi_state(&init_cfi); 4555 init_cfi_state(&func_cfi); 4556 set_func_state(&func_cfi); 4557 4558 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) 4559 goto out; 4560 4561 cfi_hash_add(&init_cfi); 4562 cfi_hash_add(&func_cfi); 4563 4564 ret = decode_sections(file); 4565 if (ret < 0) 4566 goto out; 4567 4568 warnings += ret; 4569 4570 if (!nr_insns) 4571 goto out; 4572 4573 if (opts.retpoline) { 4574 ret = validate_retpoline(file); 4575 if (ret < 0) 4576 return ret; 4577 warnings += ret; 4578 } 4579 4580 if (opts.stackval || opts.orc || opts.uaccess) { 4581 ret = validate_functions(file); 4582 if (ret < 0) 4583 goto out; 4584 warnings += ret; 4585 4586 ret = validate_unwind_hints(file, NULL); 4587 if (ret < 0) 4588 goto out; 4589 warnings += ret; 4590 4591 if (!warnings) { 4592 ret = validate_reachable_instructions(file); 4593 if (ret < 0) 4594 goto out; 4595 warnings += ret; 4596 } 4597 4598 } else if (opts.noinstr) { 4599 ret = validate_noinstr_sections(file); 4600 if (ret < 0) 4601 goto out; 4602 warnings += ret; 4603 } 4604 4605 if (opts.unret) { 4606 /* 4607 * Must be after validate_branch() and friends, it plays 4608 * further games with insn->visited. 4609 */ 4610 ret = validate_unret(file); 4611 if (ret < 0) 4612 return ret; 4613 warnings += ret; 4614 } 4615 4616 if (opts.ibt) { 4617 ret = validate_ibt(file); 4618 if (ret < 0) 4619 goto out; 4620 warnings += ret; 4621 } 4622 4623 if (opts.sls) { 4624 ret = validate_sls(file); 4625 if (ret < 0) 4626 goto out; 4627 warnings += ret; 4628 } 4629 4630 if (opts.static_call) { 4631 ret = create_static_call_sections(file); 4632 if (ret < 0) 4633 goto out; 4634 warnings += ret; 4635 } 4636 4637 if (opts.retpoline) { 4638 ret = create_retpoline_sites_sections(file); 4639 if (ret < 0) 4640 goto out; 4641 warnings += ret; 4642 } 4643 4644 if (opts.cfi) { 4645 ret = create_cfi_sections(file); 4646 if (ret < 0) 4647 goto out; 4648 warnings += ret; 4649 } 4650 4651 if (opts.rethunk) { 4652 ret = create_return_sites_sections(file); 4653 if (ret < 0) 4654 goto out; 4655 warnings += ret; 4656 4657 if (opts.hack_skylake) { 4658 ret = create_direct_call_sections(file); 4659 if (ret < 0) 4660 goto out; 4661 warnings += ret; 4662 } 4663 } 4664 4665 if (opts.mcount) { 4666 ret = create_mcount_loc_sections(file); 4667 if (ret < 0) 4668 goto out; 4669 warnings += ret; 4670 } 4671 4672 if (opts.ibt) { 4673 ret = create_ibt_endbr_seal_sections(file); 4674 if (ret < 0) 4675 goto out; 4676 warnings += ret; 4677 } 4678 4679 if (opts.orc && nr_insns) { 4680 ret = orc_create(file); 4681 if (ret < 0) 4682 goto out; 4683 warnings += ret; 4684 } 4685 4686 4687 if (opts.stats) { 4688 printf("nr_insns_visited: %ld\n", nr_insns_visited); 4689 printf("nr_cfi: %ld\n", nr_cfi); 4690 printf("nr_cfi_reused: %ld\n", nr_cfi_reused); 4691 printf("nr_cfi_cache: %ld\n", nr_cfi_cache); 4692 } 4693 4694 out: 4695 /* 4696 * For now, don't fail the kernel build on fatal warnings. These 4697 * errors are still fairly common due to the growing matrix of 4698 * supported toolchains and their recent pace of change. 4699 */ 4700 return 0; 4701 } 4702