1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #define _GNU_SOURCE /* memmem() */ 7 #include <string.h> 8 #include <stdlib.h> 9 #include <inttypes.h> 10 #include <sys/mman.h> 11 12 #include <objtool/builtin.h> 13 #include <objtool/cfi.h> 14 #include <objtool/arch.h> 15 #include <objtool/check.h> 16 #include <objtool/special.h> 17 #include <objtool/warn.h> 18 #include <objtool/checksum.h> 19 #include <objtool/util.h> 20 21 #include <linux/objtool_types.h> 22 #include <linux/hashtable.h> 23 #include <linux/kernel.h> 24 #include <linux/static_call_types.h> 25 #include <linux/string.h> 26 27 struct alternative { 28 struct alternative *next; 29 struct instruction *insn; 30 }; 31 32 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; 33 34 static struct cfi_init_state initial_func_cfi; 35 static struct cfi_state init_cfi; 36 static struct cfi_state func_cfi; 37 static struct cfi_state force_undefined_cfi; 38 39 struct instruction *find_insn(struct objtool_file *file, 40 struct section *sec, unsigned long offset) 41 { 42 struct instruction *insn; 43 44 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 45 if (insn->sec == sec && insn->offset == offset) 46 return insn; 47 } 48 49 return NULL; 50 } 51 52 struct instruction *next_insn_same_sec(struct objtool_file *file, 53 struct instruction *insn) 54 { 55 if (insn->idx == INSN_CHUNK_MAX) 56 return find_insn(file, insn->sec, insn->offset + insn->len); 57 58 insn++; 59 if (!insn->len) 60 return NULL; 61 62 return insn; 63 } 64 65 static struct instruction *next_insn_same_func(struct objtool_file *file, 66 struct instruction *insn) 67 { 68 struct instruction *next = next_insn_same_sec(file, insn); 69 struct symbol *func = insn_func(insn); 70 71 if (!func) 72 return NULL; 73 74 if (next && insn_func(next) == func) 75 return next; 76 77 /* Check if we're already in the subfunction: */ 78 if (func == func->cfunc) 79 return NULL; 80 81 /* Move to the subfunction: */ 82 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 83 } 84 85 static struct instruction *prev_insn_same_sec(struct objtool_file *file, 86 struct instruction *insn) 87 { 88 if (insn->idx == 0) { 89 if (insn->prev_len) 90 return find_insn(file, insn->sec, insn->offset - insn->prev_len); 91 return NULL; 92 } 93 94 return insn - 1; 95 } 96 97 static struct instruction *prev_insn_same_sym(struct objtool_file *file, 98 struct instruction *insn) 99 { 100 struct instruction *prev = prev_insn_same_sec(file, insn); 101 102 if (prev && insn_func(prev) == insn_func(insn)) 103 return prev; 104 105 return NULL; 106 } 107 108 #define for_each_insn(file, insn) \ 109 for (struct section *__sec, *__fake = (struct section *)1; \ 110 __fake; __fake = NULL) \ 111 for_each_sec(file->elf, __sec) \ 112 sec_for_each_insn(file, __sec, insn) 113 114 #define func_for_each_insn(file, func, insn) \ 115 for (insn = find_insn(file, func->sec, func->offset); \ 116 insn; \ 117 insn = next_insn_same_func(file, insn)) 118 119 #define sym_for_each_insn(file, sym, insn) \ 120 for (insn = find_insn(file, sym->sec, sym->offset); \ 121 insn && insn->offset < sym->offset + sym->len; \ 122 insn = next_insn_same_sec(file, insn)) 123 124 #define sym_for_each_insn_continue_reverse(file, sym, insn) \ 125 for (insn = prev_insn_same_sec(file, insn); \ 126 insn && insn->offset >= sym->offset; \ 127 insn = prev_insn_same_sec(file, insn)) 128 129 #define sec_for_each_insn_from(file, insn) \ 130 for (; insn; insn = next_insn_same_sec(file, insn)) 131 132 #define sec_for_each_insn_continue(file, insn) \ 133 for (insn = next_insn_same_sec(file, insn); insn; \ 134 insn = next_insn_same_sec(file, insn)) 135 136 static inline struct symbol *insn_call_dest(struct instruction *insn) 137 { 138 if (insn->type == INSN_JUMP_DYNAMIC || 139 insn->type == INSN_CALL_DYNAMIC) 140 return NULL; 141 142 return insn->_call_dest; 143 } 144 145 static inline struct reloc *insn_jump_table(struct instruction *insn) 146 { 147 if (insn->type == INSN_JUMP_DYNAMIC || 148 insn->type == INSN_CALL_DYNAMIC) 149 return insn->_jump_table; 150 151 return NULL; 152 } 153 154 static inline unsigned long insn_jump_table_size(struct instruction *insn) 155 { 156 if (insn->type == INSN_JUMP_DYNAMIC || 157 insn->type == INSN_CALL_DYNAMIC) 158 return insn->_jump_table_size; 159 160 return 0; 161 } 162 163 static bool is_jump_table_jump(struct instruction *insn) 164 { 165 struct alt_group *alt_group = insn->alt_group; 166 167 if (insn_jump_table(insn)) 168 return true; 169 170 /* Retpoline alternative for a jump table? */ 171 return alt_group && alt_group->orig_group && 172 insn_jump_table(alt_group->orig_group->first_insn); 173 } 174 175 static bool is_sibling_call(struct instruction *insn) 176 { 177 /* 178 * Assume only STT_FUNC calls have jump-tables. 179 */ 180 if (insn_func(insn)) { 181 /* An indirect jump is either a sibling call or a jump to a table. */ 182 if (insn->type == INSN_JUMP_DYNAMIC) 183 return !is_jump_table_jump(insn); 184 } 185 186 /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */ 187 return (is_static_jump(insn) && insn_call_dest(insn)); 188 } 189 190 /* 191 * Checks if a function is a Rust "noreturn" one. 192 */ 193 static bool is_rust_noreturn(const struct symbol *func) 194 { 195 /* 196 * If it does not start with "_R", then it is not a Rust symbol. 197 */ 198 if (strncmp(func->name, "_R", 2)) 199 return false; 200 201 /* 202 * These are just heuristics -- we do not control the precise symbol 203 * name, due to the crate disambiguators (which depend on the compiler) 204 * as well as changes to the source code itself between versions (since 205 * these come from the Rust standard library). 206 */ 207 return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") || 208 str_ends_with(func->name, "_4core6option13expect_failed") || 209 str_ends_with(func->name, "_4core6option13unwrap_failed") || 210 str_ends_with(func->name, "_4core6result13unwrap_failed") || 211 str_ends_with(func->name, "_4core9panicking5panic") || 212 str_ends_with(func->name, "_4core9panicking9panic_fmt") || 213 str_ends_with(func->name, "_4core9panicking14panic_explicit") || 214 str_ends_with(func->name, "_4core9panicking14panic_nounwind") || 215 str_ends_with(func->name, "_4core9panicking18panic_bounds_check") || 216 str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt") || 217 str_ends_with(func->name, "_4core9panicking19assert_failed_inner") || 218 str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") || 219 str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") || 220 str_ends_with(func->name, "_7___rustc17rust_begin_unwind") || 221 strstr(func->name, "_4core9panicking13assert_failed") || 222 strstr(func->name, "_4core9panicking11panic_const24panic_const_") || 223 (strstr(func->name, "_4core5slice5index") && 224 strstr(func->name, "slice_") && 225 str_ends_with(func->name, "_fail")); 226 } 227 228 /* 229 * This checks to see if the given function is a "noreturn" function. 230 * 231 * For global functions which are outside the scope of this object file, we 232 * have to keep a manual list of them. 233 * 234 * For local functions, we have to detect them manually by simply looking for 235 * the lack of a return instruction. 236 */ 237 static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 238 int recursion) 239 { 240 int i; 241 struct instruction *insn; 242 bool empty = true; 243 244 #define NORETURN(func) __stringify(func), 245 static const char * const global_noreturns[] = { 246 #include "noreturns.h" 247 }; 248 #undef NORETURN 249 250 if (!func) 251 return false; 252 253 if (!is_local_sym(func)) { 254 if (is_rust_noreturn(func)) 255 return true; 256 257 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 258 if (!strcmp(func->name, global_noreturns[i])) 259 return true; 260 } 261 262 if (is_weak_sym(func)) 263 return false; 264 265 if (!func->len) 266 return false; 267 268 insn = find_insn(file, func->sec, func->offset); 269 if (!insn || !insn_func(insn)) 270 return false; 271 272 func_for_each_insn(file, func, insn) { 273 empty = false; 274 275 if (insn->type == INSN_RETURN) 276 return false; 277 } 278 279 if (empty) 280 return false; 281 282 /* 283 * A function can have a sibling call instead of a return. In that 284 * case, the function's dead-end status depends on whether the target 285 * of the sibling call returns. 286 */ 287 func_for_each_insn(file, func, insn) { 288 if (is_sibling_call(insn)) { 289 struct instruction *dest = insn->jump_dest; 290 291 if (!dest) 292 /* sibling call to another file */ 293 return false; 294 295 /* local sibling call */ 296 if (recursion == 5) { 297 /* 298 * Infinite recursion: two functions have 299 * sibling calls to each other. This is a very 300 * rare case. It means they aren't dead ends. 301 */ 302 return false; 303 } 304 305 return __dead_end_function(file, insn_func(dest), recursion+1); 306 } 307 } 308 309 return true; 310 } 311 312 static bool dead_end_function(struct objtool_file *file, struct symbol *func) 313 { 314 return __dead_end_function(file, func, 0); 315 } 316 317 static void init_cfi_state(struct cfi_state *cfi) 318 { 319 int i; 320 321 for (i = 0; i < CFI_NUM_REGS; i++) { 322 cfi->regs[i].base = CFI_UNDEFINED; 323 cfi->vals[i].base = CFI_UNDEFINED; 324 } 325 cfi->cfa.base = CFI_UNDEFINED; 326 cfi->drap_reg = CFI_UNDEFINED; 327 cfi->drap_offset = -1; 328 } 329 330 static void init_insn_state(struct objtool_file *file, struct insn_state *state, 331 struct section *sec) 332 { 333 memset(state, 0, sizeof(*state)); 334 init_cfi_state(&state->cfi); 335 336 if (opts.noinstr && sec) 337 state->noinstr = sec->noinstr; 338 } 339 340 static struct cfi_state *cfi_alloc(void) 341 { 342 struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state)); 343 if (!cfi) { 344 ERROR_GLIBC("calloc"); 345 exit(1); 346 } 347 nr_cfi++; 348 return cfi; 349 } 350 351 static int cfi_bits; 352 static struct hlist_head *cfi_hash; 353 354 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) 355 { 356 return memcmp((void *)cfi1 + sizeof(cfi1->hash), 357 (void *)cfi2 + sizeof(cfi2->hash), 358 sizeof(struct cfi_state) - sizeof(struct hlist_node)); 359 } 360 361 static inline u32 cfi_key(struct cfi_state *cfi) 362 { 363 return jhash((void *)cfi + sizeof(cfi->hash), 364 sizeof(*cfi) - sizeof(cfi->hash), 0); 365 } 366 367 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) 368 { 369 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 370 struct cfi_state *obj; 371 372 hlist_for_each_entry(obj, head, hash) { 373 if (!cficmp(cfi, obj)) { 374 nr_cfi_cache++; 375 return obj; 376 } 377 } 378 379 obj = cfi_alloc(); 380 *obj = *cfi; 381 hlist_add_head(&obj->hash, head); 382 383 return obj; 384 } 385 386 static void cfi_hash_add(struct cfi_state *cfi) 387 { 388 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 389 390 hlist_add_head(&cfi->hash, head); 391 } 392 393 static void *cfi_hash_alloc(unsigned long size) 394 { 395 cfi_bits = max(10, ilog2(size)); 396 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, 397 PROT_READ|PROT_WRITE, 398 MAP_PRIVATE|MAP_ANON, -1, 0); 399 if (cfi_hash == (void *)-1L) { 400 ERROR_GLIBC("mmap fail cfi_hash"); 401 cfi_hash = NULL; 402 } else if (opts.stats) { 403 printf("cfi_bits: %d\n", cfi_bits); 404 } 405 406 return cfi_hash; 407 } 408 409 static unsigned long nr_insns; 410 static unsigned long nr_insns_visited; 411 412 /* 413 * Call the arch-specific instruction decoder for all the instructions and add 414 * them to the global instruction list. 415 */ 416 static int decode_instructions(struct objtool_file *file) 417 { 418 struct section *sec; 419 struct symbol *func; 420 unsigned long offset; 421 struct instruction *insn; 422 423 for_each_sec(file->elf, sec) { 424 struct instruction *insns = NULL; 425 u8 prev_len = 0; 426 u8 idx = 0; 427 428 if (!is_text_sec(sec)) 429 continue; 430 431 if (strcmp(sec->name, ".altinstr_replacement") && 432 strcmp(sec->name, ".altinstr_aux") && 433 strncmp(sec->name, ".discard.", 9)) 434 sec->text = true; 435 436 if (!strcmp(sec->name, ".noinstr.text") || 437 !strcmp(sec->name, ".entry.text") || 438 !strcmp(sec->name, ".cpuidle.text") || 439 !strncmp(sec->name, ".text..__x86.", 13)) 440 sec->noinstr = true; 441 442 /* 443 * .init.text code is ran before userspace and thus doesn't 444 * strictly need retpolines, except for modules which are 445 * loaded late, they very much do need retpoline in their 446 * .init.text 447 */ 448 if (!strcmp(sec->name, ".init.text") && !opts.module) 449 sec->init = true; 450 451 for (offset = 0; offset < sec_size(sec); offset += insn->len) { 452 if (!insns || idx == INSN_CHUNK_MAX) { 453 insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn)); 454 if (!insns) { 455 ERROR_GLIBC("calloc"); 456 return -1; 457 } 458 idx = 0; 459 } else { 460 idx++; 461 } 462 insn = &insns[idx]; 463 insn->idx = idx; 464 465 INIT_LIST_HEAD(&insn->call_node); 466 insn->sec = sec; 467 insn->offset = offset; 468 insn->prev_len = prev_len; 469 470 if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn)) 471 return -1; 472 473 prev_len = insn->len; 474 475 /* 476 * By default, "ud2" is a dead end unless otherwise 477 * annotated, because GCC 7 inserts it for certain 478 * divide-by-zero cases. 479 */ 480 if (insn->type == INSN_BUG) 481 insn->dead_end = true; 482 483 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 484 nr_insns++; 485 } 486 487 sec_for_each_sym(sec, func) { 488 if (!is_notype_sym(func) && !is_func_sym(func)) 489 continue; 490 491 if (func->offset == sec_size(sec)) { 492 /* Heuristic: likely an "end" symbol */ 493 if (is_notype_sym(func)) 494 continue; 495 ERROR("%s(): STT_FUNC at end of section", func->name); 496 return -1; 497 } 498 499 if (func->embedded_insn || func->alias != func) 500 continue; 501 502 if (!find_insn(file, sec, func->offset)) { 503 ERROR("%s(): can't find starting instruction", func->name); 504 return -1; 505 } 506 507 sym_for_each_insn(file, func, insn) { 508 insn->sym = func; 509 if (is_func_sym(func) && 510 insn->type == INSN_ENDBR && 511 list_empty(&insn->call_node)) { 512 if (insn->offset == func->offset) { 513 list_add_tail(&insn->call_node, &file->endbr_list); 514 file->nr_endbr++; 515 } else { 516 file->nr_endbr_int++; 517 } 518 } 519 } 520 } 521 } 522 523 if (opts.stats) 524 printf("nr_insns: %lu\n", nr_insns); 525 526 return 0; 527 } 528 529 /* 530 * Read the pv_ops[] .data table to find the static initialized values. 531 */ 532 static int add_pv_ops(struct objtool_file *file, const char *symname) 533 { 534 struct symbol *sym, *func; 535 unsigned long off, end; 536 struct reloc *reloc; 537 int idx; 538 539 sym = find_symbol_by_name(file->elf, symname); 540 if (!sym) 541 return 0; 542 543 off = sym->offset; 544 end = off + sym->len; 545 for (;;) { 546 reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off); 547 if (!reloc) 548 break; 549 550 idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long); 551 552 func = reloc->sym; 553 if (is_sec_sym(func)) 554 func = find_symbol_by_offset(reloc->sym->sec, 555 reloc_addend(reloc)); 556 if (!func) { 557 ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc), 558 "can't find func at %s[%d]", symname, idx); 559 return -1; 560 } 561 562 if (objtool_pv_add(file, idx, func)) 563 return -1; 564 565 off = reloc_offset(reloc) + 1; 566 if (off > end) 567 break; 568 } 569 570 return 0; 571 } 572 573 /* 574 * Allocate and initialize file->pv_ops[]. 575 */ 576 static int init_pv_ops(struct objtool_file *file) 577 { 578 static const char *pv_ops_tables[] = { 579 "pv_ops", 580 "xen_cpu_ops", 581 "xen_irq_ops", 582 "xen_mmu_ops", 583 NULL, 584 }; 585 const char *pv_ops; 586 struct symbol *sym; 587 int idx, nr; 588 589 if (!opts.noinstr) 590 return 0; 591 592 file->pv_ops = NULL; 593 594 sym = find_symbol_by_name(file->elf, "pv_ops"); 595 if (!sym) 596 return 0; 597 598 nr = sym->len / sizeof(unsigned long); 599 file->pv_ops = calloc(nr, sizeof(struct pv_state)); 600 if (!file->pv_ops) { 601 ERROR_GLIBC("calloc"); 602 return -1; 603 } 604 605 for (idx = 0; idx < nr; idx++) 606 INIT_LIST_HEAD(&file->pv_ops[idx].targets); 607 608 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) { 609 if (add_pv_ops(file, pv_ops)) 610 return -1; 611 } 612 613 return 0; 614 } 615 616 static bool is_livepatch_module(struct objtool_file *file) 617 { 618 struct section *sec; 619 620 if (!opts.module) 621 return false; 622 623 sec = find_section_by_name(file->elf, ".modinfo"); 624 if (!sec) 625 return false; 626 627 return memmem(sec->data->d_buf, sec_size(sec), "\0livepatch=Y", 12); 628 } 629 630 static int create_static_call_sections(struct objtool_file *file) 631 { 632 struct static_call_site *site; 633 struct section *sec; 634 struct instruction *insn; 635 struct symbol *key_sym; 636 char *key_name, *tmp; 637 int idx; 638 639 sec = find_section_by_name(file->elf, ".static_call_sites"); 640 if (sec) { 641 /* 642 * Livepatch modules may have already extracted the static call 643 * site entries to take advantage of vmlinux static call 644 * privileges. 645 */ 646 if (!file->klp) 647 WARN("file already has .static_call_sites section, skipping"); 648 649 return 0; 650 } 651 652 if (list_empty(&file->static_call_list)) 653 return 0; 654 655 idx = 0; 656 list_for_each_entry(insn, &file->static_call_list, call_node) 657 idx++; 658 659 sec = elf_create_section_pair(file->elf, ".static_call_sites", 660 sizeof(*site), idx, idx * 2); 661 if (!sec) 662 return -1; 663 664 /* Allow modules to modify the low bits of static_call_site::key */ 665 sec->sh.sh_flags |= SHF_WRITE; 666 667 idx = 0; 668 list_for_each_entry(insn, &file->static_call_list, call_node) { 669 670 /* populate reloc for 'addr' */ 671 if (!elf_init_reloc_text_sym(file->elf, sec, 672 idx * sizeof(*site), idx * 2, 673 insn->sec, insn->offset)) 674 return -1; 675 676 /* find key symbol */ 677 key_name = strdup(insn_call_dest(insn)->name); 678 if (!key_name) { 679 ERROR_GLIBC("strdup"); 680 return -1; 681 } 682 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 683 STATIC_CALL_TRAMP_PREFIX_LEN)) { 684 ERROR("static_call: trampoline name malformed: %s", key_name); 685 return -1; 686 } 687 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 688 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 689 690 key_sym = find_symbol_by_name(file->elf, tmp); 691 if (!key_sym) { 692 if (!opts.module || file->klp) { 693 ERROR("static_call: can't find static_call_key symbol: %s", tmp); 694 return -1; 695 } 696 697 /* 698 * For modules(), the key might not be exported, which 699 * means the module can make static calls but isn't 700 * allowed to change them. 701 * 702 * In that case we temporarily set the key to be the 703 * trampoline address. This is fixed up in 704 * static_call_add_module(). 705 */ 706 key_sym = insn_call_dest(insn); 707 } 708 709 /* populate reloc for 'key' */ 710 if (!elf_init_reloc_data_sym(file->elf, sec, 711 idx * sizeof(*site) + 4, 712 (idx * 2) + 1, key_sym, 713 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 714 return -1; 715 716 idx++; 717 } 718 719 return 0; 720 } 721 722 static int create_retpoline_sites_sections(struct objtool_file *file) 723 { 724 struct instruction *insn; 725 struct section *sec; 726 int idx; 727 728 sec = find_section_by_name(file->elf, ".retpoline_sites"); 729 if (sec) { 730 WARN("file already has .retpoline_sites, skipping"); 731 return 0; 732 } 733 734 idx = 0; 735 list_for_each_entry(insn, &file->retpoline_call_list, call_node) 736 idx++; 737 738 if (!idx) 739 return 0; 740 741 sec = elf_create_section_pair(file->elf, ".retpoline_sites", 742 sizeof(int), idx, idx); 743 if (!sec) 744 return -1; 745 746 idx = 0; 747 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 748 749 if (!elf_init_reloc_text_sym(file->elf, sec, 750 idx * sizeof(int), idx, 751 insn->sec, insn->offset)) 752 return -1; 753 754 idx++; 755 } 756 757 return 0; 758 } 759 760 static int create_return_sites_sections(struct objtool_file *file) 761 { 762 struct instruction *insn; 763 struct section *sec; 764 int idx; 765 766 sec = find_section_by_name(file->elf, ".return_sites"); 767 if (sec) { 768 WARN("file already has .return_sites, skipping"); 769 return 0; 770 } 771 772 idx = 0; 773 list_for_each_entry(insn, &file->return_thunk_list, call_node) 774 idx++; 775 776 if (!idx) 777 return 0; 778 779 sec = elf_create_section_pair(file->elf, ".return_sites", 780 sizeof(int), idx, idx); 781 if (!sec) 782 return -1; 783 784 idx = 0; 785 list_for_each_entry(insn, &file->return_thunk_list, call_node) { 786 787 if (!elf_init_reloc_text_sym(file->elf, sec, 788 idx * sizeof(int), idx, 789 insn->sec, insn->offset)) 790 return -1; 791 792 idx++; 793 } 794 795 return 0; 796 } 797 798 static int create_ibt_endbr_seal_sections(struct objtool_file *file) 799 { 800 struct instruction *insn; 801 struct section *sec; 802 int idx; 803 804 sec = find_section_by_name(file->elf, ".ibt_endbr_seal"); 805 if (sec) { 806 WARN("file already has .ibt_endbr_seal, skipping"); 807 return 0; 808 } 809 810 idx = 0; 811 list_for_each_entry(insn, &file->endbr_list, call_node) 812 idx++; 813 814 if (opts.stats) { 815 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr); 816 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int); 817 printf("ibt: superfluous ENDBR: %d\n", idx); 818 } 819 820 if (!idx) 821 return 0; 822 823 sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal", 824 sizeof(int), idx, idx); 825 if (!sec) 826 return -1; 827 828 idx = 0; 829 list_for_each_entry(insn, &file->endbr_list, call_node) { 830 831 int *site = (int *)sec->data->d_buf + idx; 832 struct symbol *sym = insn->sym; 833 *site = 0; 834 835 if (opts.module && sym && is_func_sym(sym) && 836 insn->offset == sym->offset && 837 (!strcmp(sym->name, "init_module") || 838 !strcmp(sym->name, "cleanup_module"))) { 839 ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead", 840 sym->name); 841 return -1; 842 } 843 844 if (!elf_init_reloc_text_sym(file->elf, sec, 845 idx * sizeof(int), idx, 846 insn->sec, insn->offset)) 847 return -1; 848 849 idx++; 850 } 851 852 return 0; 853 } 854 855 static int create_cfi_sections(struct objtool_file *file) 856 { 857 struct section *sec; 858 struct symbol *sym; 859 int idx; 860 861 sec = find_section_by_name(file->elf, ".cfi_sites"); 862 if (sec) { 863 WARN("file already has .cfi_sites section, skipping"); 864 return 0; 865 } 866 867 idx = 0; 868 for_each_sym(file->elf, sym) { 869 if (!is_func_sym(sym)) 870 continue; 871 872 if (strncmp(sym->name, "__cfi_", 6)) 873 continue; 874 875 idx++; 876 } 877 878 sec = elf_create_section_pair(file->elf, ".cfi_sites", 879 sizeof(unsigned int), idx, idx); 880 if (!sec) 881 return -1; 882 883 idx = 0; 884 for_each_sym(file->elf, sym) { 885 if (!is_func_sym(sym)) 886 continue; 887 888 if (strncmp(sym->name, "__cfi_", 6)) 889 continue; 890 891 if (!elf_init_reloc_text_sym(file->elf, sec, 892 idx * sizeof(unsigned int), idx, 893 sym->sec, sym->offset)) 894 return -1; 895 896 idx++; 897 } 898 899 return 0; 900 } 901 902 static int create_mcount_loc_sections(struct objtool_file *file) 903 { 904 size_t addr_size = elf_addr_size(file->elf); 905 struct instruction *insn; 906 struct section *sec; 907 int idx; 908 909 sec = find_section_by_name(file->elf, "__mcount_loc"); 910 if (sec) { 911 /* 912 * Livepatch modules have already extracted their __mcount_loc 913 * entries to cover the !CONFIG_FTRACE_MCOUNT_USE_OBJTOOL case. 914 */ 915 if (!file->klp) 916 WARN("file already has __mcount_loc section, skipping"); 917 918 return 0; 919 } 920 921 if (list_empty(&file->mcount_loc_list)) 922 return 0; 923 924 idx = 0; 925 list_for_each_entry(insn, &file->mcount_loc_list, call_node) 926 idx++; 927 928 sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size, 929 idx, idx); 930 if (!sec) 931 return -1; 932 933 sec->sh.sh_addralign = addr_size; 934 935 idx = 0; 936 list_for_each_entry(insn, &file->mcount_loc_list, call_node) { 937 938 struct reloc *reloc; 939 940 reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx, 941 insn->sec, insn->offset); 942 if (!reloc) 943 return -1; 944 945 set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32); 946 947 idx++; 948 } 949 950 return 0; 951 } 952 953 static int create_direct_call_sections(struct objtool_file *file) 954 { 955 struct instruction *insn; 956 struct section *sec; 957 int idx; 958 959 sec = find_section_by_name(file->elf, ".call_sites"); 960 if (sec) { 961 WARN("file already has .call_sites section, skipping"); 962 return 0; 963 } 964 965 if (list_empty(&file->call_list)) 966 return 0; 967 968 idx = 0; 969 list_for_each_entry(insn, &file->call_list, call_node) 970 idx++; 971 972 sec = elf_create_section_pair(file->elf, ".call_sites", 973 sizeof(unsigned int), idx, idx); 974 if (!sec) 975 return -1; 976 977 idx = 0; 978 list_for_each_entry(insn, &file->call_list, call_node) { 979 980 if (!elf_init_reloc_text_sym(file->elf, sec, 981 idx * sizeof(unsigned int), idx, 982 insn->sec, insn->offset)) 983 return -1; 984 985 idx++; 986 } 987 988 return 0; 989 } 990 991 #ifdef BUILD_KLP 992 static int create_sym_checksum_section(struct objtool_file *file) 993 { 994 struct section *sec; 995 struct symbol *sym; 996 unsigned int idx = 0; 997 struct sym_checksum *checksum; 998 size_t entsize = sizeof(struct sym_checksum); 999 1000 sec = find_section_by_name(file->elf, ".discard.sym_checksum"); 1001 if (sec) { 1002 if (!opts.dryrun) 1003 WARN("file already has .discard.sym_checksum section, skipping"); 1004 1005 return 0; 1006 } 1007 1008 for_each_sym(file->elf, sym) 1009 if (sym->csum.checksum) 1010 idx++; 1011 1012 if (!idx) 1013 return 0; 1014 1015 sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize, 1016 idx, idx); 1017 if (!sec) 1018 return -1; 1019 1020 idx = 0; 1021 for_each_sym(file->elf, sym) { 1022 if (!sym->csum.checksum) 1023 continue; 1024 1025 if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize, 1026 sym, 0, R_TEXT64)) 1027 return -1; 1028 1029 checksum = (struct sym_checksum *)sec->data->d_buf + idx; 1030 checksum->addr = 0; /* reloc */ 1031 checksum->checksum = sym->csum.checksum; 1032 1033 mark_sec_changed(file->elf, sec, true); 1034 1035 idx++; 1036 } 1037 1038 return 0; 1039 } 1040 #else 1041 static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; } 1042 #endif 1043 1044 /* 1045 * Warnings shouldn't be reported for ignored functions. 1046 */ 1047 static int add_ignores(struct objtool_file *file) 1048 { 1049 struct section *rsec; 1050 struct symbol *func; 1051 struct reloc *reloc; 1052 1053 rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 1054 if (!rsec) 1055 return 0; 1056 1057 for_each_reloc(rsec, reloc) { 1058 switch (reloc->sym->type) { 1059 case STT_FUNC: 1060 func = reloc->sym; 1061 break; 1062 1063 case STT_SECTION: 1064 func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc)); 1065 if (!func) 1066 continue; 1067 break; 1068 1069 default: 1070 ERROR("unexpected relocation symbol type in %s: %d", 1071 rsec->name, reloc->sym->type); 1072 return -1; 1073 } 1074 1075 func->ignore = true; 1076 if (func->cfunc) 1077 func->cfunc->ignore = true; 1078 } 1079 1080 return 0; 1081 } 1082 1083 /* 1084 * This is a whitelist of functions that is allowed to be called with AC set. 1085 * The list is meant to be minimal and only contains compiler instrumentation 1086 * ABI and a few functions used to implement *_{to,from}_user() functions. 1087 * 1088 * These functions must not directly change AC, but may PUSHF/POPF. 1089 */ 1090 static const char *uaccess_safe_builtin[] = { 1091 /* KASAN */ 1092 "kasan_report", 1093 "kasan_check_range", 1094 /* KASAN out-of-line */ 1095 "__asan_loadN_noabort", 1096 "__asan_load1_noabort", 1097 "__asan_load2_noabort", 1098 "__asan_load4_noabort", 1099 "__asan_load8_noabort", 1100 "__asan_load16_noabort", 1101 "__asan_storeN_noabort", 1102 "__asan_store1_noabort", 1103 "__asan_store2_noabort", 1104 "__asan_store4_noabort", 1105 "__asan_store8_noabort", 1106 "__asan_store16_noabort", 1107 "__kasan_check_read", 1108 "__kasan_check_write", 1109 /* KASAN in-line */ 1110 "__asan_report_load_n_noabort", 1111 "__asan_report_load1_noabort", 1112 "__asan_report_load2_noabort", 1113 "__asan_report_load4_noabort", 1114 "__asan_report_load8_noabort", 1115 "__asan_report_load16_noabort", 1116 "__asan_report_store_n_noabort", 1117 "__asan_report_store1_noabort", 1118 "__asan_report_store2_noabort", 1119 "__asan_report_store4_noabort", 1120 "__asan_report_store8_noabort", 1121 "__asan_report_store16_noabort", 1122 /* KCSAN */ 1123 "__kcsan_check_access", 1124 "__kcsan_mb", 1125 "__kcsan_wmb", 1126 "__kcsan_rmb", 1127 "__kcsan_release", 1128 "kcsan_found_watchpoint", 1129 "kcsan_setup_watchpoint", 1130 "kcsan_check_scoped_accesses", 1131 "kcsan_disable_current", 1132 "kcsan_enable_current_nowarn", 1133 /* KCSAN/TSAN */ 1134 "__tsan_func_entry", 1135 "__tsan_func_exit", 1136 "__tsan_read_range", 1137 "__tsan_write_range", 1138 "__tsan_read1", 1139 "__tsan_read2", 1140 "__tsan_read4", 1141 "__tsan_read8", 1142 "__tsan_read16", 1143 "__tsan_write1", 1144 "__tsan_write2", 1145 "__tsan_write4", 1146 "__tsan_write8", 1147 "__tsan_write16", 1148 "__tsan_read_write1", 1149 "__tsan_read_write2", 1150 "__tsan_read_write4", 1151 "__tsan_read_write8", 1152 "__tsan_read_write16", 1153 "__tsan_volatile_read1", 1154 "__tsan_volatile_read2", 1155 "__tsan_volatile_read4", 1156 "__tsan_volatile_read8", 1157 "__tsan_volatile_read16", 1158 "__tsan_volatile_write1", 1159 "__tsan_volatile_write2", 1160 "__tsan_volatile_write4", 1161 "__tsan_volatile_write8", 1162 "__tsan_volatile_write16", 1163 "__tsan_atomic8_load", 1164 "__tsan_atomic16_load", 1165 "__tsan_atomic32_load", 1166 "__tsan_atomic64_load", 1167 "__tsan_atomic8_store", 1168 "__tsan_atomic16_store", 1169 "__tsan_atomic32_store", 1170 "__tsan_atomic64_store", 1171 "__tsan_atomic8_exchange", 1172 "__tsan_atomic16_exchange", 1173 "__tsan_atomic32_exchange", 1174 "__tsan_atomic64_exchange", 1175 "__tsan_atomic8_fetch_add", 1176 "__tsan_atomic16_fetch_add", 1177 "__tsan_atomic32_fetch_add", 1178 "__tsan_atomic64_fetch_add", 1179 "__tsan_atomic8_fetch_sub", 1180 "__tsan_atomic16_fetch_sub", 1181 "__tsan_atomic32_fetch_sub", 1182 "__tsan_atomic64_fetch_sub", 1183 "__tsan_atomic8_fetch_and", 1184 "__tsan_atomic16_fetch_and", 1185 "__tsan_atomic32_fetch_and", 1186 "__tsan_atomic64_fetch_and", 1187 "__tsan_atomic8_fetch_or", 1188 "__tsan_atomic16_fetch_or", 1189 "__tsan_atomic32_fetch_or", 1190 "__tsan_atomic64_fetch_or", 1191 "__tsan_atomic8_fetch_xor", 1192 "__tsan_atomic16_fetch_xor", 1193 "__tsan_atomic32_fetch_xor", 1194 "__tsan_atomic64_fetch_xor", 1195 "__tsan_atomic8_fetch_nand", 1196 "__tsan_atomic16_fetch_nand", 1197 "__tsan_atomic32_fetch_nand", 1198 "__tsan_atomic64_fetch_nand", 1199 "__tsan_atomic8_compare_exchange_strong", 1200 "__tsan_atomic16_compare_exchange_strong", 1201 "__tsan_atomic32_compare_exchange_strong", 1202 "__tsan_atomic64_compare_exchange_strong", 1203 "__tsan_atomic8_compare_exchange_weak", 1204 "__tsan_atomic16_compare_exchange_weak", 1205 "__tsan_atomic32_compare_exchange_weak", 1206 "__tsan_atomic64_compare_exchange_weak", 1207 "__tsan_atomic8_compare_exchange_val", 1208 "__tsan_atomic16_compare_exchange_val", 1209 "__tsan_atomic32_compare_exchange_val", 1210 "__tsan_atomic64_compare_exchange_val", 1211 "__tsan_atomic_thread_fence", 1212 "__tsan_atomic_signal_fence", 1213 "__tsan_unaligned_read16", 1214 "__tsan_unaligned_write16", 1215 /* KCOV */ 1216 "write_comp_data", 1217 "check_kcov_mode", 1218 "__sanitizer_cov_trace_pc", 1219 "__sanitizer_cov_trace_const_cmp1", 1220 "__sanitizer_cov_trace_const_cmp2", 1221 "__sanitizer_cov_trace_const_cmp4", 1222 "__sanitizer_cov_trace_const_cmp8", 1223 "__sanitizer_cov_trace_cmp1", 1224 "__sanitizer_cov_trace_cmp2", 1225 "__sanitizer_cov_trace_cmp4", 1226 "__sanitizer_cov_trace_cmp8", 1227 "__sanitizer_cov_trace_switch", 1228 /* KMSAN */ 1229 "kmsan_copy_to_user", 1230 "kmsan_disable_current", 1231 "kmsan_enable_current", 1232 "kmsan_report", 1233 "kmsan_unpoison_entry_regs", 1234 "kmsan_unpoison_memory", 1235 "__msan_chain_origin", 1236 "__msan_get_context_state", 1237 "__msan_instrument_asm_store", 1238 "__msan_metadata_ptr_for_load_1", 1239 "__msan_metadata_ptr_for_load_2", 1240 "__msan_metadata_ptr_for_load_4", 1241 "__msan_metadata_ptr_for_load_8", 1242 "__msan_metadata_ptr_for_load_n", 1243 "__msan_metadata_ptr_for_store_1", 1244 "__msan_metadata_ptr_for_store_2", 1245 "__msan_metadata_ptr_for_store_4", 1246 "__msan_metadata_ptr_for_store_8", 1247 "__msan_metadata_ptr_for_store_n", 1248 "__msan_poison_alloca", 1249 "__msan_warning", 1250 /* UBSAN */ 1251 "ubsan_type_mismatch_common", 1252 "__ubsan_handle_type_mismatch", 1253 "__ubsan_handle_type_mismatch_v1", 1254 "__ubsan_handle_shift_out_of_bounds", 1255 "__ubsan_handle_load_invalid_value", 1256 /* KSTACK_ERASE */ 1257 "__sanitizer_cov_stack_depth", 1258 /* TRACE_BRANCH_PROFILING */ 1259 "ftrace_likely_update", 1260 /* STACKPROTECTOR */ 1261 "__stack_chk_fail", 1262 /* misc */ 1263 "csum_partial_copy_generic", 1264 "copy_mc_fragile", 1265 "copy_mc_fragile_handle_tail", 1266 "copy_mc_enhanced_fast_string", 1267 "rep_stos_alternative", 1268 "rep_movs_alternative", 1269 "__copy_user_nocache", 1270 NULL 1271 }; 1272 1273 static void add_uaccess_safe(struct objtool_file *file) 1274 { 1275 struct symbol *func; 1276 const char **name; 1277 1278 if (!opts.uaccess) 1279 return; 1280 1281 for (name = uaccess_safe_builtin; *name; name++) { 1282 func = find_symbol_by_name(file->elf, *name); 1283 if (!func) 1284 continue; 1285 1286 func->uaccess_safe = true; 1287 } 1288 } 1289 1290 /* 1291 * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol 1292 * will be added to the .retpoline_sites section. 1293 */ 1294 __weak bool arch_is_retpoline(struct symbol *sym) 1295 { 1296 return false; 1297 } 1298 1299 /* 1300 * Symbols that replace INSN_RETURN, every (tail) call to such a symbol 1301 * will be added to the .return_sites section. 1302 */ 1303 __weak bool arch_is_rethunk(struct symbol *sym) 1304 { 1305 return false; 1306 } 1307 1308 /* 1309 * Symbols that are embedded inside other instructions, because sometimes crazy 1310 * code exists. These are mostly ignored for validation purposes. 1311 */ 1312 __weak bool arch_is_embedded_insn(struct symbol *sym) 1313 { 1314 return false; 1315 } 1316 1317 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 1318 { 1319 struct reloc *reloc; 1320 1321 if (insn->no_reloc) 1322 return NULL; 1323 1324 if (!file) 1325 return NULL; 1326 1327 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 1328 insn->offset, insn->len); 1329 if (!reloc) { 1330 insn->no_reloc = 1; 1331 return NULL; 1332 } 1333 1334 return reloc; 1335 } 1336 1337 static void remove_insn_ops(struct instruction *insn) 1338 { 1339 struct stack_op *op, *next; 1340 1341 for (op = insn->stack_ops; op; op = next) { 1342 next = op->next; 1343 free(op); 1344 } 1345 insn->stack_ops = NULL; 1346 } 1347 1348 static int annotate_call_site(struct objtool_file *file, 1349 struct instruction *insn, bool sibling) 1350 { 1351 struct reloc *reloc = insn_reloc(file, insn); 1352 struct symbol *sym = insn_call_dest(insn); 1353 1354 if (!sym) 1355 sym = reloc->sym; 1356 1357 if (sym->static_call_tramp) { 1358 list_add_tail(&insn->call_node, &file->static_call_list); 1359 return 0; 1360 } 1361 1362 if (sym->retpoline_thunk) { 1363 list_add_tail(&insn->call_node, &file->retpoline_call_list); 1364 return 0; 1365 } 1366 1367 /* 1368 * Many compilers cannot disable KCOV or sanitizer calls with a function 1369 * attribute so they need a little help, NOP out any such calls from 1370 * noinstr text. 1371 */ 1372 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) { 1373 if (reloc) 1374 set_reloc_type(file->elf, reloc, R_NONE); 1375 1376 if (elf_write_insn(file->elf, insn->sec, 1377 insn->offset, insn->len, 1378 sibling ? arch_ret_insn(insn->len) 1379 : arch_nop_insn(insn->len))) { 1380 return -1; 1381 } 1382 1383 insn->type = sibling ? INSN_RETURN : INSN_NOP; 1384 1385 if (sibling) { 1386 /* 1387 * We've replaced the tail-call JMP insn by two new 1388 * insn: RET; INT3, except we only have a single struct 1389 * insn here. Mark it retpoline_safe to avoid the SLS 1390 * warning, instead of adding another insn. 1391 */ 1392 insn->retpoline_safe = true; 1393 } 1394 1395 return 0; 1396 } 1397 1398 if (opts.mcount && sym->fentry) { 1399 if (sibling) 1400 WARN_INSN(insn, "tail call to __fentry__ !?!?"); 1401 if (opts.mnop) { 1402 if (reloc) 1403 set_reloc_type(file->elf, reloc, R_NONE); 1404 1405 if (elf_write_insn(file->elf, insn->sec, 1406 insn->offset, insn->len, 1407 arch_nop_insn(insn->len))) { 1408 return -1; 1409 } 1410 1411 insn->type = INSN_NOP; 1412 } 1413 1414 list_add_tail(&insn->call_node, &file->mcount_loc_list); 1415 return 0; 1416 } 1417 1418 if (insn->type == INSN_CALL && !insn->sec->init && 1419 !insn->_call_dest->embedded_insn) 1420 list_add_tail(&insn->call_node, &file->call_list); 1421 1422 if (!sibling && dead_end_function(file, sym)) 1423 insn->dead_end = true; 1424 1425 return 0; 1426 } 1427 1428 static int add_call_dest(struct objtool_file *file, struct instruction *insn, 1429 struct symbol *dest, bool sibling) 1430 { 1431 insn->_call_dest = dest; 1432 if (!dest) 1433 return 0; 1434 1435 /* 1436 * Whatever stack impact regular CALLs have, should be undone 1437 * by the RETURN of the called function. 1438 * 1439 * Annotated intra-function calls retain the stack_ops but 1440 * are converted to JUMP, see read_intra_function_calls(). 1441 */ 1442 remove_insn_ops(insn); 1443 1444 return annotate_call_site(file, insn, sibling); 1445 } 1446 1447 static int add_retpoline_call(struct objtool_file *file, struct instruction *insn) 1448 { 1449 /* 1450 * Retpoline calls/jumps are really dynamic calls/jumps in disguise, 1451 * so convert them accordingly. 1452 */ 1453 switch (insn->type) { 1454 case INSN_CALL: 1455 insn->type = INSN_CALL_DYNAMIC; 1456 break; 1457 case INSN_JUMP_UNCONDITIONAL: 1458 insn->type = INSN_JUMP_DYNAMIC; 1459 break; 1460 case INSN_JUMP_CONDITIONAL: 1461 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 1462 break; 1463 default: 1464 return 0; 1465 } 1466 1467 insn->retpoline_safe = true; 1468 1469 /* 1470 * Whatever stack impact regular CALLs have, should be undone 1471 * by the RETURN of the called function. 1472 * 1473 * Annotated intra-function calls retain the stack_ops but 1474 * are converted to JUMP, see read_intra_function_calls(). 1475 */ 1476 remove_insn_ops(insn); 1477 1478 return annotate_call_site(file, insn, false); 1479 } 1480 1481 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add) 1482 { 1483 /* 1484 * Return thunk tail calls are really just returns in disguise, 1485 * so convert them accordingly. 1486 */ 1487 insn->type = INSN_RETURN; 1488 insn->retpoline_safe = true; 1489 1490 if (add) 1491 list_add_tail(&insn->call_node, &file->return_thunk_list); 1492 } 1493 1494 static bool is_first_func_insn(struct objtool_file *file, 1495 struct instruction *insn) 1496 { 1497 struct symbol *func = insn_func(insn); 1498 1499 if (!func) 1500 return false; 1501 1502 if (insn->offset == func->offset) 1503 return true; 1504 1505 /* Allow direct CALL/JMP past ENDBR */ 1506 if (opts.ibt) { 1507 struct instruction *prev = prev_insn_same_sym(file, insn); 1508 1509 if (prev && prev->type == INSN_ENDBR && 1510 insn->offset == func->offset + prev->len) 1511 return true; 1512 } 1513 1514 return false; 1515 } 1516 1517 /* 1518 * Find the destination instructions for all jumps. 1519 */ 1520 static int add_jump_destinations(struct objtool_file *file) 1521 { 1522 struct instruction *insn; 1523 struct reloc *reloc; 1524 1525 for_each_insn(file, insn) { 1526 struct symbol *func = insn_func(insn); 1527 struct instruction *dest_insn; 1528 struct section *dest_sec; 1529 struct symbol *dest_sym; 1530 unsigned long dest_off; 1531 1532 if (!is_static_jump(insn)) 1533 continue; 1534 1535 if (insn->jump_dest) { 1536 /* 1537 * handle_group_alt() may have previously set 1538 * 'jump_dest' for some alternatives. 1539 */ 1540 continue; 1541 } 1542 1543 reloc = insn_reloc(file, insn); 1544 if (!reloc) { 1545 dest_sec = insn->sec; 1546 dest_off = arch_jump_destination(insn); 1547 dest_sym = dest_sec->sym; 1548 } else { 1549 dest_sym = reloc->sym; 1550 if (is_undef_sym(dest_sym)) { 1551 if (dest_sym->retpoline_thunk) { 1552 if (add_retpoline_call(file, insn)) 1553 return -1; 1554 continue; 1555 } 1556 1557 if (dest_sym->return_thunk) { 1558 add_return_call(file, insn, true); 1559 continue; 1560 } 1561 1562 /* External symbol */ 1563 if (func) { 1564 /* External sibling call */ 1565 if (add_call_dest(file, insn, dest_sym, true)) 1566 return -1; 1567 continue; 1568 } 1569 1570 /* Non-func asm code jumping to external symbol */ 1571 continue; 1572 } 1573 1574 dest_sec = dest_sym->sec; 1575 dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc); 1576 } 1577 1578 dest_insn = find_insn(file, dest_sec, dest_off); 1579 if (!dest_insn) { 1580 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); 1581 1582 /* 1583 * retbleed_untrain_ret() jumps to 1584 * __x86_return_thunk(), but objtool can't find 1585 * the thunk's starting RET instruction, 1586 * because the RET is also in the middle of 1587 * another instruction. Objtool only knows 1588 * about the outer instruction. 1589 */ 1590 if (sym && sym->embedded_insn) { 1591 add_return_call(file, insn, false); 1592 continue; 1593 } 1594 1595 /* 1596 * GCOV/KCOV dead code can jump to the end of 1597 * the function/section. 1598 */ 1599 if (file->ignore_unreachables && func && 1600 dest_sec == insn->sec && 1601 dest_off == func->offset + func->len) 1602 continue; 1603 1604 ERROR_INSN(insn, "can't find jump dest instruction at %s", 1605 offstr(dest_sec, dest_off)); 1606 return -1; 1607 } 1608 1609 if (!dest_sym || is_sec_sym(dest_sym)) { 1610 dest_sym = dest_insn->sym; 1611 if (!dest_sym) 1612 goto set_jump_dest; 1613 } 1614 1615 if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) { 1616 if (add_retpoline_call(file, insn)) 1617 return -1; 1618 continue; 1619 } 1620 1621 if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) { 1622 add_return_call(file, insn, true); 1623 continue; 1624 } 1625 1626 if (!insn->sym || insn->sym->pfunc == dest_sym->pfunc) 1627 goto set_jump_dest; 1628 1629 /* 1630 * Internal cross-function jump. 1631 */ 1632 1633 if (is_first_func_insn(file, dest_insn)) { 1634 /* Internal sibling call */ 1635 if (add_call_dest(file, insn, dest_sym, true)) 1636 return -1; 1637 continue; 1638 } 1639 1640 set_jump_dest: 1641 insn->jump_dest = dest_insn; 1642 } 1643 1644 return 0; 1645 } 1646 1647 static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 1648 { 1649 struct symbol *call_dest; 1650 1651 call_dest = find_func_by_offset(sec, offset); 1652 if (!call_dest) 1653 call_dest = find_symbol_by_offset(sec, offset); 1654 1655 return call_dest; 1656 } 1657 1658 /* 1659 * Find the destination instructions for all calls. 1660 */ 1661 static int add_call_destinations(struct objtool_file *file) 1662 { 1663 struct instruction *insn; 1664 unsigned long dest_off; 1665 struct symbol *dest; 1666 struct reloc *reloc; 1667 1668 for_each_insn(file, insn) { 1669 struct symbol *func = insn_func(insn); 1670 if (insn->type != INSN_CALL) 1671 continue; 1672 1673 reloc = insn_reloc(file, insn); 1674 if (!reloc) { 1675 dest_off = arch_jump_destination(insn); 1676 dest = find_call_destination(insn->sec, dest_off); 1677 1678 if (add_call_dest(file, insn, dest, false)) 1679 return -1; 1680 1681 if (func && func->ignore) 1682 continue; 1683 1684 if (!insn_call_dest(insn)) { 1685 ERROR_INSN(insn, "unannotated intra-function call"); 1686 return -1; 1687 } 1688 1689 if (func && !is_func_sym(insn_call_dest(insn))) { 1690 ERROR_INSN(insn, "unsupported call to non-function"); 1691 return -1; 1692 } 1693 1694 } else if (is_sec_sym(reloc->sym)) { 1695 dest_off = arch_insn_adjusted_addend(insn, reloc); 1696 dest = find_call_destination(reloc->sym->sec, dest_off); 1697 if (!dest) { 1698 ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx", 1699 reloc->sym->sec->name, dest_off); 1700 return -1; 1701 } 1702 1703 if (add_call_dest(file, insn, dest, false)) 1704 return -1; 1705 1706 } else if (reloc->sym->retpoline_thunk) { 1707 if (add_retpoline_call(file, insn)) 1708 return -1; 1709 1710 } else { 1711 if (add_call_dest(file, insn, reloc->sym, false)) 1712 return -1; 1713 } 1714 } 1715 1716 return 0; 1717 } 1718 1719 /* 1720 * The .alternatives section requires some extra special care over and above 1721 * other special sections because alternatives are patched in place. 1722 */ 1723 static int handle_group_alt(struct objtool_file *file, 1724 struct special_alt *special_alt, 1725 struct instruction *orig_insn, 1726 struct instruction **new_insn) 1727 { 1728 struct instruction *last_new_insn = NULL, *insn, *nop = NULL; 1729 struct alt_group *orig_alt_group, *new_alt_group; 1730 unsigned long dest_off; 1731 1732 orig_alt_group = orig_insn->alt_group; 1733 if (!orig_alt_group) { 1734 struct instruction *last_orig_insn = NULL; 1735 1736 orig_alt_group = calloc(1, sizeof(*orig_alt_group)); 1737 if (!orig_alt_group) { 1738 ERROR_GLIBC("calloc"); 1739 return -1; 1740 } 1741 orig_alt_group->cfi = calloc(special_alt->orig_len, 1742 sizeof(struct cfi_state *)); 1743 if (!orig_alt_group->cfi) { 1744 ERROR_GLIBC("calloc"); 1745 return -1; 1746 } 1747 1748 insn = orig_insn; 1749 sec_for_each_insn_from(file, insn) { 1750 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1751 break; 1752 1753 insn->alt_group = orig_alt_group; 1754 last_orig_insn = insn; 1755 } 1756 orig_alt_group->orig_group = NULL; 1757 orig_alt_group->first_insn = orig_insn; 1758 orig_alt_group->last_insn = last_orig_insn; 1759 orig_alt_group->nop = NULL; 1760 orig_alt_group->ignore = orig_insn->ignore_alts; 1761 } else { 1762 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len - 1763 orig_alt_group->first_insn->offset != special_alt->orig_len) { 1764 ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d", 1765 orig_alt_group->last_insn->offset + 1766 orig_alt_group->last_insn->len - 1767 orig_alt_group->first_insn->offset, 1768 special_alt->orig_len); 1769 return -1; 1770 } 1771 } 1772 1773 new_alt_group = calloc(1, sizeof(*new_alt_group)); 1774 if (!new_alt_group) { 1775 ERROR_GLIBC("calloc"); 1776 return -1; 1777 } 1778 1779 if (special_alt->new_len < special_alt->orig_len) { 1780 /* 1781 * Insert a fake nop at the end to make the replacement 1782 * alt_group the same size as the original. This is needed to 1783 * allow propagate_alt_cfi() to do its magic. When the last 1784 * instruction affects the stack, the instruction after it (the 1785 * nop) will propagate the new state to the shared CFI array. 1786 */ 1787 nop = calloc(1, sizeof(*nop)); 1788 if (!nop) { 1789 ERROR_GLIBC("calloc"); 1790 return -1; 1791 } 1792 memset(nop, 0, sizeof(*nop)); 1793 1794 nop->sec = special_alt->new_sec; 1795 nop->offset = special_alt->new_off + special_alt->new_len; 1796 nop->len = special_alt->orig_len - special_alt->new_len; 1797 nop->type = INSN_NOP; 1798 nop->sym = orig_insn->sym; 1799 nop->alt_group = new_alt_group; 1800 nop->fake = 1; 1801 } 1802 1803 if (!special_alt->new_len) { 1804 *new_insn = nop; 1805 goto end; 1806 } 1807 1808 insn = *new_insn; 1809 sec_for_each_insn_from(file, insn) { 1810 struct reloc *alt_reloc; 1811 1812 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1813 break; 1814 1815 last_new_insn = insn; 1816 1817 insn->sym = orig_insn->sym; 1818 insn->alt_group = new_alt_group; 1819 1820 /* 1821 * Since alternative replacement code is copy/pasted by the 1822 * kernel after applying relocations, generally such code can't 1823 * have relative-address relocation references to outside the 1824 * .altinstr_replacement section, unless the arch's 1825 * alternatives code can adjust the relative offsets 1826 * accordingly. 1827 */ 1828 alt_reloc = insn_reloc(file, insn); 1829 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) && 1830 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1831 1832 ERROR_INSN(insn, "unsupported relocation in alternatives section"); 1833 return -1; 1834 } 1835 1836 if (!is_static_jump(insn)) 1837 continue; 1838 1839 if (!insn->immediate) 1840 continue; 1841 1842 dest_off = arch_jump_destination(insn); 1843 if (dest_off == special_alt->new_off + special_alt->new_len) { 1844 insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn); 1845 if (!insn->jump_dest) { 1846 ERROR_INSN(insn, "can't find alternative jump destination"); 1847 return -1; 1848 } 1849 } 1850 } 1851 1852 if (!last_new_insn) { 1853 ERROR_FUNC(special_alt->new_sec, special_alt->new_off, 1854 "can't find last new alternative instruction"); 1855 return -1; 1856 } 1857 1858 end: 1859 new_alt_group->orig_group = orig_alt_group; 1860 new_alt_group->first_insn = *new_insn; 1861 new_alt_group->last_insn = last_new_insn; 1862 new_alt_group->nop = nop; 1863 new_alt_group->ignore = (*new_insn)->ignore_alts; 1864 new_alt_group->cfi = orig_alt_group->cfi; 1865 return 0; 1866 } 1867 1868 /* 1869 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1870 * If the original instruction is a jump, make the alt entry an effective nop 1871 * by just skipping the original instruction. 1872 */ 1873 static int handle_jump_alt(struct objtool_file *file, 1874 struct special_alt *special_alt, 1875 struct instruction *orig_insn, 1876 struct instruction **new_insn) 1877 { 1878 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL && 1879 orig_insn->type != INSN_NOP) { 1880 1881 ERROR_INSN(orig_insn, "unsupported instruction at jump label"); 1882 return -1; 1883 } 1884 1885 if (opts.hack_jump_label && special_alt->key_addend & 2) { 1886 struct reloc *reloc = insn_reloc(file, orig_insn); 1887 1888 if (reloc) 1889 set_reloc_type(file->elf, reloc, R_NONE); 1890 1891 if (elf_write_insn(file->elf, orig_insn->sec, 1892 orig_insn->offset, orig_insn->len, 1893 arch_nop_insn(orig_insn->len))) { 1894 return -1; 1895 } 1896 1897 orig_insn->type = INSN_NOP; 1898 } 1899 1900 if (orig_insn->type == INSN_NOP) { 1901 if (orig_insn->len == 2) 1902 file->jl_nop_short++; 1903 else 1904 file->jl_nop_long++; 1905 1906 return 0; 1907 } 1908 1909 if (orig_insn->len == 2) 1910 file->jl_short++; 1911 else 1912 file->jl_long++; 1913 1914 *new_insn = next_insn_same_sec(file, orig_insn); 1915 return 0; 1916 } 1917 1918 /* 1919 * Read all the special sections which have alternate instructions which can be 1920 * patched in or redirected to at runtime. Each instruction having alternate 1921 * instruction(s) has them added to its insn->alts list, which will be 1922 * traversed in validate_branch(). 1923 */ 1924 static int add_special_section_alts(struct objtool_file *file) 1925 { 1926 struct list_head special_alts; 1927 struct instruction *orig_insn, *new_insn; 1928 struct special_alt *special_alt, *tmp; 1929 struct alternative *alt; 1930 1931 if (special_get_alts(file->elf, &special_alts)) 1932 return -1; 1933 1934 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1935 1936 orig_insn = find_insn(file, special_alt->orig_sec, 1937 special_alt->orig_off); 1938 if (!orig_insn) { 1939 ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off, 1940 "special: can't find orig instruction"); 1941 return -1; 1942 } 1943 1944 new_insn = NULL; 1945 if (!special_alt->group || special_alt->new_len) { 1946 new_insn = find_insn(file, special_alt->new_sec, 1947 special_alt->new_off); 1948 if (!new_insn) { 1949 ERROR_FUNC(special_alt->new_sec, special_alt->new_off, 1950 "special: can't find new instruction"); 1951 return -1; 1952 } 1953 } 1954 1955 if (special_alt->group) { 1956 if (!special_alt->orig_len) { 1957 ERROR_INSN(orig_insn, "empty alternative entry"); 1958 continue; 1959 } 1960 1961 if (handle_group_alt(file, special_alt, orig_insn, &new_insn)) 1962 return -1; 1963 1964 } else if (special_alt->jump_or_nop) { 1965 if (handle_jump_alt(file, special_alt, orig_insn, &new_insn)) 1966 return -1; 1967 } 1968 1969 alt = calloc(1, sizeof(*alt)); 1970 if (!alt) { 1971 ERROR_GLIBC("calloc"); 1972 return -1; 1973 } 1974 1975 alt->insn = new_insn; 1976 alt->next = orig_insn->alts; 1977 orig_insn->alts = alt; 1978 1979 list_del(&special_alt->list); 1980 free(special_alt); 1981 } 1982 1983 if (opts.stats) { 1984 printf("jl\\\tNOP\tJMP\n"); 1985 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short); 1986 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long); 1987 } 1988 1989 return 0; 1990 } 1991 1992 __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table) 1993 { 1994 return reloc->sym->offset + reloc_addend(reloc); 1995 } 1996 1997 static int add_jump_table(struct objtool_file *file, struct instruction *insn) 1998 { 1999 unsigned long table_size = insn_jump_table_size(insn); 2000 struct symbol *pfunc = insn_func(insn)->pfunc; 2001 struct reloc *table = insn_jump_table(insn); 2002 struct instruction *dest_insn; 2003 unsigned int prev_offset = 0; 2004 struct reloc *reloc = table; 2005 struct alternative *alt; 2006 unsigned long sym_offset; 2007 2008 /* 2009 * Each @reloc is a switch table relocation which points to the target 2010 * instruction. 2011 */ 2012 for_each_reloc_from(table->sec, reloc) { 2013 2014 /* Check for the end of the table: */ 2015 if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size) 2016 break; 2017 if (reloc != table && is_jump_table(reloc)) 2018 break; 2019 2020 /* Make sure the table entries are consecutive: */ 2021 if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc)) 2022 break; 2023 2024 sym_offset = arch_jump_table_sym_offset(reloc, table); 2025 2026 /* Detect function pointers from contiguous objects: */ 2027 if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset) 2028 break; 2029 2030 /* 2031 * Clang sometimes leaves dangling unused jump table entries 2032 * which point to the end of the function. Ignore them. 2033 */ 2034 if (reloc->sym->sec == pfunc->sec && 2035 sym_offset == pfunc->offset + pfunc->len) 2036 goto next; 2037 2038 dest_insn = find_insn(file, reloc->sym->sec, sym_offset); 2039 if (!dest_insn) 2040 break; 2041 2042 /* Make sure the destination is in the same function: */ 2043 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc) 2044 break; 2045 2046 alt = calloc(1, sizeof(*alt)); 2047 if (!alt) { 2048 ERROR_GLIBC("calloc"); 2049 return -1; 2050 } 2051 2052 alt->insn = dest_insn; 2053 alt->next = insn->alts; 2054 insn->alts = alt; 2055 next: 2056 prev_offset = reloc_offset(reloc); 2057 } 2058 2059 if (!prev_offset) { 2060 ERROR_INSN(insn, "can't find switch jump table"); 2061 return -1; 2062 } 2063 2064 return 0; 2065 } 2066 2067 /* 2068 * find_jump_table() - Given a dynamic jump, find the switch jump table 2069 * associated with it. 2070 */ 2071 static void find_jump_table(struct objtool_file *file, struct symbol *func, 2072 struct instruction *insn) 2073 { 2074 struct reloc *table_reloc; 2075 struct instruction *dest_insn, *orig_insn = insn; 2076 unsigned long table_size; 2077 unsigned long sym_offset; 2078 2079 /* 2080 * Backward search using the @first_jump_src links, these help avoid 2081 * much of the 'in between' code. Which avoids us getting confused by 2082 * it. 2083 */ 2084 for (; 2085 insn && insn_func(insn) && insn_func(insn)->pfunc == func; 2086 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 2087 2088 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 2089 break; 2090 2091 /* allow small jumps within the range */ 2092 if (insn->type == INSN_JUMP_UNCONDITIONAL && 2093 insn->jump_dest && 2094 (insn->jump_dest->offset <= insn->offset || 2095 insn->jump_dest->offset > orig_insn->offset)) 2096 break; 2097 2098 table_reloc = arch_find_switch_table(file, insn, &table_size); 2099 if (!table_reloc) 2100 continue; 2101 2102 sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc); 2103 2104 dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset); 2105 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func) 2106 continue; 2107 2108 set_jump_table(table_reloc); 2109 orig_insn->_jump_table = table_reloc; 2110 orig_insn->_jump_table_size = table_size; 2111 2112 break; 2113 } 2114 } 2115 2116 /* 2117 * First pass: Mark the head of each jump table so that in the next pass, 2118 * we know when a given jump table ends and the next one starts. 2119 */ 2120 static void mark_func_jump_tables(struct objtool_file *file, 2121 struct symbol *func) 2122 { 2123 struct instruction *insn, *last = NULL; 2124 2125 func_for_each_insn(file, func, insn) { 2126 if (!last) 2127 last = insn; 2128 2129 /* 2130 * Store back-pointers for unconditional forward jumps such 2131 * that find_jump_table() can back-track using those and 2132 * avoid some potentially confusing code. 2133 */ 2134 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 2135 insn->offset > last->offset && 2136 insn->jump_dest->offset > insn->offset && 2137 !insn->jump_dest->first_jump_src) { 2138 2139 insn->jump_dest->first_jump_src = insn; 2140 last = insn->jump_dest; 2141 } 2142 2143 if (insn->type != INSN_JUMP_DYNAMIC) 2144 continue; 2145 2146 find_jump_table(file, func, insn); 2147 } 2148 } 2149 2150 static int add_func_jump_tables(struct objtool_file *file, 2151 struct symbol *func) 2152 { 2153 struct instruction *insn; 2154 2155 func_for_each_insn(file, func, insn) { 2156 if (!insn_jump_table(insn)) 2157 continue; 2158 2159 if (add_jump_table(file, insn)) 2160 return -1; 2161 } 2162 2163 return 0; 2164 } 2165 2166 /* 2167 * For some switch statements, gcc generates a jump table in the .rodata 2168 * section which contains a list of addresses within the function to jump to. 2169 * This finds these jump tables and adds them to the insn->alts lists. 2170 */ 2171 static int add_jump_table_alts(struct objtool_file *file) 2172 { 2173 struct symbol *func; 2174 2175 if (!file->rodata) 2176 return 0; 2177 2178 for_each_sym(file->elf, func) { 2179 if (!is_func_sym(func) || func->alias != func) 2180 continue; 2181 2182 mark_func_jump_tables(file, func); 2183 if (add_func_jump_tables(file, func)) 2184 return -1; 2185 } 2186 2187 return 0; 2188 } 2189 2190 static void set_func_state(struct cfi_state *state) 2191 { 2192 state->cfa = initial_func_cfi.cfa; 2193 memcpy(&state->regs, &initial_func_cfi.regs, 2194 CFI_NUM_REGS * sizeof(struct cfi_reg)); 2195 state->stack_size = initial_func_cfi.cfa.offset; 2196 state->type = UNWIND_HINT_TYPE_CALL; 2197 } 2198 2199 static int read_unwind_hints(struct objtool_file *file) 2200 { 2201 struct cfi_state cfi = init_cfi; 2202 struct section *sec; 2203 struct unwind_hint *hint; 2204 struct instruction *insn; 2205 struct reloc *reloc; 2206 unsigned long offset; 2207 int i; 2208 2209 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 2210 if (!sec) 2211 return 0; 2212 2213 if (!sec->rsec) { 2214 ERROR("missing .rela.discard.unwind_hints section"); 2215 return -1; 2216 } 2217 2218 if (sec_size(sec) % sizeof(struct unwind_hint)) { 2219 ERROR("struct unwind_hint size mismatch"); 2220 return -1; 2221 } 2222 2223 file->hints = true; 2224 2225 for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) { 2226 hint = (struct unwind_hint *)sec->data->d_buf + i; 2227 2228 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 2229 if (!reloc) { 2230 ERROR("can't find reloc for unwind_hints[%d]", i); 2231 return -1; 2232 } 2233 2234 offset = reloc->sym->offset + reloc_addend(reloc); 2235 2236 insn = find_insn(file, reloc->sym->sec, offset); 2237 if (!insn) { 2238 ERROR("can't find insn for unwind_hints[%d]", i); 2239 return -1; 2240 } 2241 2242 insn->hint = true; 2243 2244 if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) { 2245 insn->cfi = &force_undefined_cfi; 2246 continue; 2247 } 2248 2249 if (hint->type == UNWIND_HINT_TYPE_SAVE) { 2250 insn->hint = false; 2251 insn->save = true; 2252 continue; 2253 } 2254 2255 if (hint->type == UNWIND_HINT_TYPE_RESTORE) { 2256 insn->restore = true; 2257 continue; 2258 } 2259 2260 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { 2261 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); 2262 2263 if (sym && is_global_sym(sym)) { 2264 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { 2265 ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR"); 2266 return -1; 2267 } 2268 } 2269 } 2270 2271 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 2272 insn->cfi = &func_cfi; 2273 continue; 2274 } 2275 2276 if (insn->cfi) 2277 cfi = *(insn->cfi); 2278 2279 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 2280 ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg); 2281 return -1; 2282 } 2283 2284 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); 2285 cfi.type = hint->type; 2286 cfi.signal = hint->signal; 2287 2288 insn->cfi = cfi_hash_find_or_add(&cfi); 2289 } 2290 2291 return 0; 2292 } 2293 2294 static int read_annotate(struct objtool_file *file, 2295 int (*func)(struct objtool_file *file, int type, struct instruction *insn)) 2296 { 2297 struct section *sec; 2298 struct instruction *insn; 2299 struct reloc *reloc; 2300 uint64_t offset; 2301 int type; 2302 2303 sec = find_section_by_name(file->elf, ".discard.annotate_insn"); 2304 if (!sec) 2305 return 0; 2306 2307 if (!sec->rsec) 2308 return 0; 2309 2310 if (sec->sh.sh_entsize != 8) { 2311 static bool warned = false; 2312 if (!warned && opts.verbose) { 2313 WARN("%s: dodgy linker, sh_entsize != 8", sec->name); 2314 warned = true; 2315 } 2316 sec->sh.sh_entsize = 8; 2317 } 2318 2319 if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) { 2320 ERROR("bad .discard.annotate_insn section: missing relocs"); 2321 return -1; 2322 } 2323 2324 for_each_reloc(sec->rsec, reloc) { 2325 type = annotype(file->elf, sec, reloc); 2326 offset = reloc->sym->offset + reloc_addend(reloc); 2327 insn = find_insn(file, reloc->sym->sec, offset); 2328 2329 if (!insn) { 2330 ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type); 2331 return -1; 2332 } 2333 2334 if (func(file, type, insn)) 2335 return -1; 2336 } 2337 2338 return 0; 2339 } 2340 2341 static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn) 2342 { 2343 switch (type) { 2344 2345 /* Must be before add_special_section_alts() */ 2346 case ANNOTYPE_IGNORE_ALTS: 2347 insn->ignore_alts = true; 2348 break; 2349 2350 /* 2351 * Must be before read_unwind_hints() since that needs insn->noendbr. 2352 */ 2353 case ANNOTYPE_NOENDBR: 2354 insn->noendbr = 1; 2355 break; 2356 2357 default: 2358 break; 2359 } 2360 2361 return 0; 2362 } 2363 2364 static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn) 2365 { 2366 unsigned long dest_off; 2367 2368 if (type != ANNOTYPE_INTRA_FUNCTION_CALL) 2369 return 0; 2370 2371 if (insn->type != INSN_CALL) { 2372 ERROR_INSN(insn, "intra_function_call not a direct call"); 2373 return -1; 2374 } 2375 2376 /* 2377 * Treat intra-function CALLs as JMPs, but with a stack_op. 2378 * See add_call_destinations(), which strips stack_ops from 2379 * normal CALLs. 2380 */ 2381 insn->type = INSN_JUMP_UNCONDITIONAL; 2382 2383 dest_off = arch_jump_destination(insn); 2384 insn->jump_dest = find_insn(file, insn->sec, dest_off); 2385 if (!insn->jump_dest) { 2386 ERROR_INSN(insn, "can't find call dest at %s+0x%lx", 2387 insn->sec->name, dest_off); 2388 return -1; 2389 } 2390 2391 return 0; 2392 } 2393 2394 static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn) 2395 { 2396 struct symbol *sym; 2397 2398 switch (type) { 2399 case ANNOTYPE_NOENDBR: 2400 /* early */ 2401 break; 2402 2403 case ANNOTYPE_RETPOLINE_SAFE: 2404 if (insn->type != INSN_JUMP_DYNAMIC && 2405 insn->type != INSN_CALL_DYNAMIC && 2406 insn->type != INSN_RETURN && 2407 insn->type != INSN_NOP) { 2408 ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop"); 2409 return -1; 2410 } 2411 2412 insn->retpoline_safe = true; 2413 break; 2414 2415 case ANNOTYPE_INSTR_BEGIN: 2416 insn->instr++; 2417 break; 2418 2419 case ANNOTYPE_INSTR_END: 2420 insn->instr--; 2421 break; 2422 2423 case ANNOTYPE_UNRET_BEGIN: 2424 insn->unret = 1; 2425 break; 2426 2427 case ANNOTYPE_IGNORE_ALTS: 2428 /* early */ 2429 break; 2430 2431 case ANNOTYPE_INTRA_FUNCTION_CALL: 2432 /* ifc */ 2433 break; 2434 2435 case ANNOTYPE_REACHABLE: 2436 insn->dead_end = false; 2437 break; 2438 2439 case ANNOTYPE_NOCFI: 2440 sym = insn->sym; 2441 if (!sym) { 2442 ERROR_INSN(insn, "dodgy NOCFI annotation"); 2443 return -1; 2444 } 2445 insn->sym->nocfi = 1; 2446 break; 2447 2448 default: 2449 ERROR_INSN(insn, "Unknown annotation type: %d", type); 2450 return -1; 2451 } 2452 2453 return 0; 2454 } 2455 2456 /* 2457 * Return true if name matches an instrumentation function, where calls to that 2458 * function from noinstr code can safely be removed, but compilers won't do so. 2459 */ 2460 static bool is_profiling_func(const char *name) 2461 { 2462 /* 2463 * Many compilers cannot disable KCOV with a function attribute. 2464 */ 2465 if (!strncmp(name, "__sanitizer_cov_", 16)) 2466 return true; 2467 2468 return false; 2469 } 2470 2471 static int classify_symbols(struct objtool_file *file) 2472 { 2473 struct symbol *func; 2474 2475 for_each_sym(file->elf, func) { 2476 if (is_notype_sym(func) && strstarts(func->name, ".L")) 2477 func->local_label = true; 2478 2479 if (!is_global_sym(func)) 2480 continue; 2481 2482 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2483 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2484 func->static_call_tramp = true; 2485 2486 if (arch_is_retpoline(func)) 2487 func->retpoline_thunk = true; 2488 2489 if (arch_is_rethunk(func)) 2490 func->return_thunk = true; 2491 2492 if (arch_is_embedded_insn(func)) 2493 func->embedded_insn = true; 2494 2495 if (arch_ftrace_match(func->name)) 2496 func->fentry = true; 2497 2498 if (is_profiling_func(func->name)) 2499 func->profiling_func = true; 2500 } 2501 2502 return 0; 2503 } 2504 2505 static void mark_rodata(struct objtool_file *file) 2506 { 2507 struct section *sec; 2508 bool found = false; 2509 2510 /* 2511 * Search for the following rodata sections, each of which can 2512 * potentially contain jump tables: 2513 * 2514 * - .rodata: can contain GCC switch tables 2515 * - .rodata.<func>: same, if -fdata-sections is being used 2516 * - .data.rel.ro.c_jump_table: contains C annotated jump tables 2517 * 2518 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2519 */ 2520 for_each_sec(file->elf, sec) { 2521 if ((!strncmp(sec->name, ".rodata", 7) && 2522 !strstr(sec->name, ".str1.")) || 2523 !strncmp(sec->name, ".data.rel.ro", 12)) { 2524 sec->rodata = true; 2525 found = true; 2526 } 2527 } 2528 2529 file->rodata = found; 2530 } 2531 2532 static void mark_holes(struct objtool_file *file) 2533 { 2534 struct instruction *insn; 2535 bool in_hole = false; 2536 2537 if (!opts.link) 2538 return; 2539 2540 /* 2541 * Whole archive runs might encounter dead code from weak symbols. 2542 * This is where the linker will have dropped the weak symbol in 2543 * favour of a regular symbol, but leaves the code in place. 2544 */ 2545 for_each_insn(file, insn) { 2546 if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) { 2547 in_hole = false; 2548 continue; 2549 } 2550 2551 /* Skip function padding and pfx code */ 2552 if (!in_hole && insn->type == INSN_NOP) 2553 continue; 2554 2555 in_hole = true; 2556 insn->hole = 1; 2557 2558 /* 2559 * If this hole jumps to a .cold function, mark it ignore. 2560 */ 2561 if (insn->jump_dest) { 2562 struct symbol *dest_func = insn_func(insn->jump_dest); 2563 2564 if (dest_func && dest_func->cold) 2565 dest_func->ignore = true; 2566 } 2567 } 2568 } 2569 2570 static bool validate_branch_enabled(void) 2571 { 2572 return opts.stackval || 2573 opts.orc || 2574 opts.uaccess || 2575 opts.checksum; 2576 } 2577 2578 static int decode_sections(struct objtool_file *file) 2579 { 2580 file->klp = is_livepatch_module(file); 2581 2582 mark_rodata(file); 2583 2584 if (init_pv_ops(file)) 2585 return -1; 2586 2587 /* 2588 * Must be before add_{jump_call}_destination. 2589 */ 2590 if (classify_symbols(file)) 2591 return -1; 2592 2593 if (decode_instructions(file)) 2594 return -1; 2595 2596 if (add_ignores(file)) 2597 return -1; 2598 2599 add_uaccess_safe(file); 2600 2601 if (read_annotate(file, __annotate_early)) 2602 return -1; 2603 2604 /* 2605 * Must be before add_jump_destinations(), which depends on 'func' 2606 * being set for alternatives, to enable proper sibling call detection. 2607 */ 2608 if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label) { 2609 if (add_special_section_alts(file)) 2610 return -1; 2611 } 2612 2613 if (add_jump_destinations(file)) 2614 return -1; 2615 2616 /* 2617 * Must be before add_call_destination(); it changes INSN_CALL to 2618 * INSN_JUMP. 2619 */ 2620 if (read_annotate(file, __annotate_ifc)) 2621 return -1; 2622 2623 if (add_call_destinations(file)) 2624 return -1; 2625 2626 if (add_jump_table_alts(file)) 2627 return -1; 2628 2629 if (read_unwind_hints(file)) 2630 return -1; 2631 2632 /* Must be after add_jump_destinations() */ 2633 mark_holes(file); 2634 2635 /* 2636 * Must be after add_call_destinations() such that it can override 2637 * dead_end_function() marks. 2638 */ 2639 if (read_annotate(file, __annotate_late)) 2640 return -1; 2641 2642 return 0; 2643 } 2644 2645 static bool is_special_call(struct instruction *insn) 2646 { 2647 if (insn->type == INSN_CALL) { 2648 struct symbol *dest = insn_call_dest(insn); 2649 2650 if (!dest) 2651 return false; 2652 2653 if (dest->fentry || dest->embedded_insn) 2654 return true; 2655 } 2656 2657 return false; 2658 } 2659 2660 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 2661 { 2662 struct cfi_state *cfi = &state->cfi; 2663 int i; 2664 2665 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 2666 return true; 2667 2668 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 2669 return true; 2670 2671 if (cfi->stack_size != initial_func_cfi.cfa.offset) 2672 return true; 2673 2674 for (i = 0; i < CFI_NUM_REGS; i++) { 2675 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 2676 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 2677 return true; 2678 } 2679 2680 return false; 2681 } 2682 2683 static bool check_reg_frame_pos(const struct cfi_reg *reg, 2684 int expected_offset) 2685 { 2686 return reg->base == CFI_CFA && 2687 reg->offset == expected_offset; 2688 } 2689 2690 static bool has_valid_stack_frame(struct insn_state *state) 2691 { 2692 struct cfi_state *cfi = &state->cfi; 2693 2694 if (cfi->cfa.base == CFI_BP && 2695 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) && 2696 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8)) 2697 return true; 2698 2699 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 2700 return true; 2701 2702 return false; 2703 } 2704 2705 static int update_cfi_state_regs(struct instruction *insn, 2706 struct cfi_state *cfi, 2707 struct stack_op *op) 2708 { 2709 struct cfi_reg *cfa = &cfi->cfa; 2710 2711 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 2712 return 0; 2713 2714 /* push */ 2715 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 2716 cfa->offset += 8; 2717 2718 /* pop */ 2719 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 2720 cfa->offset -= 8; 2721 2722 /* add immediate to sp */ 2723 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 2724 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 2725 cfa->offset -= op->src.offset; 2726 2727 return 0; 2728 } 2729 2730 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 2731 { 2732 if (arch_callee_saved_reg(reg) && 2733 cfi->regs[reg].base == CFI_UNDEFINED) { 2734 cfi->regs[reg].base = base; 2735 cfi->regs[reg].offset = offset; 2736 } 2737 } 2738 2739 static void restore_reg(struct cfi_state *cfi, unsigned char reg) 2740 { 2741 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 2742 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 2743 } 2744 2745 /* 2746 * A note about DRAP stack alignment: 2747 * 2748 * GCC has the concept of a DRAP register, which is used to help keep track of 2749 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 2750 * register. The typical DRAP pattern is: 2751 * 2752 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 2753 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 2754 * 41 ff 72 f8 pushq -0x8(%r10) 2755 * 55 push %rbp 2756 * 48 89 e5 mov %rsp,%rbp 2757 * (more pushes) 2758 * 41 52 push %r10 2759 * ... 2760 * 41 5a pop %r10 2761 * (more pops) 2762 * 5d pop %rbp 2763 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2764 * c3 retq 2765 * 2766 * There are some variations in the epilogues, like: 2767 * 2768 * 5b pop %rbx 2769 * 41 5a pop %r10 2770 * 41 5c pop %r12 2771 * 41 5d pop %r13 2772 * 41 5e pop %r14 2773 * c9 leaveq 2774 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2775 * c3 retq 2776 * 2777 * and: 2778 * 2779 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 2780 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 2781 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 2782 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 2783 * c9 leaveq 2784 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2785 * c3 retq 2786 * 2787 * Sometimes r13 is used as the DRAP register, in which case it's saved and 2788 * restored beforehand: 2789 * 2790 * 41 55 push %r13 2791 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 2792 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 2793 * ... 2794 * 49 8d 65 f0 lea -0x10(%r13),%rsp 2795 * 41 5d pop %r13 2796 * c3 retq 2797 */ 2798 static int update_cfi_state(struct instruction *insn, 2799 struct instruction *next_insn, 2800 struct cfi_state *cfi, struct stack_op *op) 2801 { 2802 struct cfi_reg *cfa = &cfi->cfa; 2803 struct cfi_reg *regs = cfi->regs; 2804 2805 /* ignore UNWIND_HINT_UNDEFINED regions */ 2806 if (cfi->force_undefined) 2807 return 0; 2808 2809 /* stack operations don't make sense with an undefined CFA */ 2810 if (cfa->base == CFI_UNDEFINED) { 2811 if (insn_func(insn)) { 2812 WARN_INSN(insn, "undefined stack state"); 2813 return 1; 2814 } 2815 return 0; 2816 } 2817 2818 if (cfi->type == UNWIND_HINT_TYPE_REGS || 2819 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 2820 return update_cfi_state_regs(insn, cfi, op); 2821 2822 switch (op->dest.type) { 2823 2824 case OP_DEST_REG: 2825 switch (op->src.type) { 2826 2827 case OP_SRC_REG: 2828 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 2829 cfa->base == CFI_SP && 2830 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) { 2831 2832 /* mov %rsp, %rbp */ 2833 cfa->base = op->dest.reg; 2834 cfi->bp_scratch = false; 2835 } 2836 2837 else if (op->src.reg == CFI_SP && 2838 op->dest.reg == CFI_BP && cfi->drap) { 2839 2840 /* drap: mov %rsp, %rbp */ 2841 regs[CFI_BP].base = CFI_BP; 2842 regs[CFI_BP].offset = -cfi->stack_size; 2843 cfi->bp_scratch = false; 2844 } 2845 2846 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2847 2848 /* 2849 * mov %rsp, %reg 2850 * 2851 * This is needed for the rare case where GCC 2852 * does: 2853 * 2854 * mov %rsp, %rax 2855 * ... 2856 * mov %rax, %rsp 2857 */ 2858 cfi->vals[op->dest.reg].base = CFI_CFA; 2859 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2860 } 2861 2862 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2863 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) { 2864 2865 /* 2866 * mov %rbp, %rsp 2867 * 2868 * Restore the original stack pointer (Clang). 2869 */ 2870 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2871 } 2872 2873 else if (op->dest.reg == cfa->base) { 2874 2875 /* mov %reg, %rsp */ 2876 if (cfa->base == CFI_SP && 2877 cfi->vals[op->src.reg].base == CFI_CFA) { 2878 2879 /* 2880 * This is needed for the rare case 2881 * where GCC does something dumb like: 2882 * 2883 * lea 0x8(%rsp), %rcx 2884 * ... 2885 * mov %rcx, %rsp 2886 */ 2887 cfa->offset = -cfi->vals[op->src.reg].offset; 2888 cfi->stack_size = cfa->offset; 2889 2890 } else if (cfa->base == CFI_SP && 2891 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2892 cfi->vals[op->src.reg].offset == cfa->offset) { 2893 2894 /* 2895 * Stack swizzle: 2896 * 2897 * 1: mov %rsp, (%[tos]) 2898 * 2: mov %[tos], %rsp 2899 * ... 2900 * 3: pop %rsp 2901 * 2902 * Where: 2903 * 2904 * 1 - places a pointer to the previous 2905 * stack at the Top-of-Stack of the 2906 * new stack. 2907 * 2908 * 2 - switches to the new stack. 2909 * 2910 * 3 - pops the Top-of-Stack to restore 2911 * the original stack. 2912 * 2913 * Note: we set base to SP_INDIRECT 2914 * here and preserve offset. Therefore 2915 * when the unwinder reaches ToS it 2916 * will dereference SP and then add the 2917 * offset to find the next frame, IOW: 2918 * (%rsp) + offset. 2919 */ 2920 cfa->base = CFI_SP_INDIRECT; 2921 2922 } else { 2923 cfa->base = CFI_UNDEFINED; 2924 cfa->offset = 0; 2925 } 2926 } 2927 2928 else if (op->dest.reg == CFI_SP && 2929 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2930 cfi->vals[op->src.reg].offset == cfa->offset) { 2931 2932 /* 2933 * The same stack swizzle case 2) as above. But 2934 * because we can't change cfa->base, case 3) 2935 * will become a regular POP. Pretend we're a 2936 * PUSH so things don't go unbalanced. 2937 */ 2938 cfi->stack_size += 8; 2939 } 2940 2941 2942 break; 2943 2944 case OP_SRC_ADD: 2945 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 2946 2947 /* add imm, %rsp */ 2948 cfi->stack_size -= op->src.offset; 2949 if (cfa->base == CFI_SP) 2950 cfa->offset -= op->src.offset; 2951 break; 2952 } 2953 2954 if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP && 2955 insn->sym->frame_pointer) { 2956 /* addi.d fp,sp,imm on LoongArch */ 2957 if (cfa->base == CFI_SP && cfa->offset == op->src.offset) { 2958 cfa->base = CFI_BP; 2959 cfa->offset = 0; 2960 } 2961 break; 2962 } 2963 2964 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 2965 /* addi.d sp,fp,imm on LoongArch */ 2966 if (cfa->base == CFI_BP && cfa->offset == 0) { 2967 if (insn->sym->frame_pointer) { 2968 cfa->base = CFI_SP; 2969 cfa->offset = -op->src.offset; 2970 } 2971 } else { 2972 /* lea disp(%rbp), %rsp */ 2973 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 2974 } 2975 break; 2976 } 2977 2978 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2979 2980 /* drap: lea disp(%rsp), %drap */ 2981 cfi->drap_reg = op->dest.reg; 2982 2983 /* 2984 * lea disp(%rsp), %reg 2985 * 2986 * This is needed for the rare case where GCC 2987 * does something dumb like: 2988 * 2989 * lea 0x8(%rsp), %rcx 2990 * ... 2991 * mov %rcx, %rsp 2992 */ 2993 cfi->vals[op->dest.reg].base = CFI_CFA; 2994 cfi->vals[op->dest.reg].offset = \ 2995 -cfi->stack_size + op->src.offset; 2996 2997 break; 2998 } 2999 3000 if (cfi->drap && op->dest.reg == CFI_SP && 3001 op->src.reg == cfi->drap_reg) { 3002 3003 /* drap: lea disp(%drap), %rsp */ 3004 cfa->base = CFI_SP; 3005 cfa->offset = cfi->stack_size = -op->src.offset; 3006 cfi->drap_reg = CFI_UNDEFINED; 3007 cfi->drap = false; 3008 break; 3009 } 3010 3011 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) { 3012 WARN_INSN(insn, "unsupported stack register modification"); 3013 return -1; 3014 } 3015 3016 break; 3017 3018 case OP_SRC_AND: 3019 if (op->dest.reg != CFI_SP || 3020 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 3021 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 3022 WARN_INSN(insn, "unsupported stack pointer realignment"); 3023 return -1; 3024 } 3025 3026 if (cfi->drap_reg != CFI_UNDEFINED) { 3027 /* drap: and imm, %rsp */ 3028 cfa->base = cfi->drap_reg; 3029 cfa->offset = cfi->stack_size = 0; 3030 cfi->drap = true; 3031 } 3032 3033 /* 3034 * Older versions of GCC (4.8ish) realign the stack 3035 * without DRAP, with a frame pointer. 3036 */ 3037 3038 break; 3039 3040 case OP_SRC_POP: 3041 case OP_SRC_POPF: 3042 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) { 3043 3044 /* pop %rsp; # restore from a stack swizzle */ 3045 cfa->base = CFI_SP; 3046 break; 3047 } 3048 3049 if (!cfi->drap && op->dest.reg == cfa->base) { 3050 3051 /* pop %rbp */ 3052 cfa->base = CFI_SP; 3053 } 3054 3055 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 3056 op->dest.reg == cfi->drap_reg && 3057 cfi->drap_offset == -cfi->stack_size) { 3058 3059 /* drap: pop %drap */ 3060 cfa->base = cfi->drap_reg; 3061 cfa->offset = 0; 3062 cfi->drap_offset = -1; 3063 3064 } else if (cfi->stack_size == -regs[op->dest.reg].offset) { 3065 3066 /* pop %reg */ 3067 restore_reg(cfi, op->dest.reg); 3068 } 3069 3070 cfi->stack_size -= 8; 3071 if (cfa->base == CFI_SP) 3072 cfa->offset -= 8; 3073 3074 break; 3075 3076 case OP_SRC_REG_INDIRECT: 3077 if (!cfi->drap && op->dest.reg == cfa->base && 3078 op->dest.reg == CFI_BP) { 3079 3080 /* mov disp(%rsp), %rbp */ 3081 cfa->base = CFI_SP; 3082 cfa->offset = cfi->stack_size; 3083 } 3084 3085 if (cfi->drap && op->src.reg == CFI_BP && 3086 op->src.offset == cfi->drap_offset) { 3087 3088 /* drap: mov disp(%rbp), %drap */ 3089 cfa->base = cfi->drap_reg; 3090 cfa->offset = 0; 3091 cfi->drap_offset = -1; 3092 } 3093 3094 if (cfi->drap && op->src.reg == CFI_BP && 3095 op->src.offset == regs[op->dest.reg].offset) { 3096 3097 /* drap: mov disp(%rbp), %reg */ 3098 restore_reg(cfi, op->dest.reg); 3099 3100 } else if (op->src.reg == cfa->base && 3101 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 3102 3103 /* mov disp(%rbp), %reg */ 3104 /* mov disp(%rsp), %reg */ 3105 restore_reg(cfi, op->dest.reg); 3106 3107 } else if (op->src.reg == CFI_SP && 3108 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) { 3109 3110 /* mov disp(%rsp), %reg */ 3111 restore_reg(cfi, op->dest.reg); 3112 } 3113 3114 break; 3115 3116 default: 3117 WARN_INSN(insn, "unknown stack-related instruction"); 3118 return -1; 3119 } 3120 3121 break; 3122 3123 case OP_DEST_PUSH: 3124 case OP_DEST_PUSHF: 3125 cfi->stack_size += 8; 3126 if (cfa->base == CFI_SP) 3127 cfa->offset += 8; 3128 3129 if (op->src.type != OP_SRC_REG) 3130 break; 3131 3132 if (cfi->drap) { 3133 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3134 3135 /* drap: push %drap */ 3136 cfa->base = CFI_BP_INDIRECT; 3137 cfa->offset = -cfi->stack_size; 3138 3139 /* save drap so we know when to restore it */ 3140 cfi->drap_offset = -cfi->stack_size; 3141 3142 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 3143 3144 /* drap: push %rbp */ 3145 cfi->stack_size = 0; 3146 3147 } else { 3148 3149 /* drap: push %reg */ 3150 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 3151 } 3152 3153 } else { 3154 3155 /* push %reg */ 3156 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 3157 } 3158 3159 /* detect when asm code uses rbp as a scratch register */ 3160 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP && 3161 cfa->base != CFI_BP) 3162 cfi->bp_scratch = true; 3163 break; 3164 3165 case OP_DEST_REG_INDIRECT: 3166 3167 if (cfi->drap) { 3168 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3169 3170 /* drap: mov %drap, disp(%rbp) */ 3171 cfa->base = CFI_BP_INDIRECT; 3172 cfa->offset = op->dest.offset; 3173 3174 /* save drap offset so we know when to restore it */ 3175 cfi->drap_offset = op->dest.offset; 3176 } else { 3177 3178 /* drap: mov reg, disp(%rbp) */ 3179 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 3180 } 3181 3182 } else if (op->dest.reg == cfa->base) { 3183 3184 /* mov reg, disp(%rbp) */ 3185 /* mov reg, disp(%rsp) */ 3186 save_reg(cfi, op->src.reg, CFI_CFA, 3187 op->dest.offset - cfi->cfa.offset); 3188 3189 } else if (op->dest.reg == CFI_SP) { 3190 3191 /* mov reg, disp(%rsp) */ 3192 save_reg(cfi, op->src.reg, CFI_CFA, 3193 op->dest.offset - cfi->stack_size); 3194 3195 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) { 3196 3197 /* mov %rsp, (%reg); # setup a stack swizzle. */ 3198 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT; 3199 cfi->vals[op->dest.reg].offset = cfa->offset; 3200 } 3201 3202 break; 3203 3204 case OP_DEST_MEM: 3205 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 3206 WARN_INSN(insn, "unknown stack-related memory operation"); 3207 return -1; 3208 } 3209 3210 /* pop mem */ 3211 cfi->stack_size -= 8; 3212 if (cfa->base == CFI_SP) 3213 cfa->offset -= 8; 3214 3215 break; 3216 3217 default: 3218 WARN_INSN(insn, "unknown stack-related instruction"); 3219 return -1; 3220 } 3221 3222 return 0; 3223 } 3224 3225 /* 3226 * The stack layouts of alternatives instructions can sometimes diverge when 3227 * they have stack modifications. That's fine as long as the potential stack 3228 * layouts don't conflict at any given potential instruction boundary. 3229 * 3230 * Flatten the CFIs of the different alternative code streams (both original 3231 * and replacement) into a single shared CFI array which can be used to detect 3232 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 3233 */ 3234 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 3235 { 3236 struct cfi_state **alt_cfi; 3237 int group_off; 3238 3239 if (!insn->alt_group) 3240 return 0; 3241 3242 if (!insn->cfi) { 3243 WARN("CFI missing"); 3244 return -1; 3245 } 3246 3247 alt_cfi = insn->alt_group->cfi; 3248 group_off = insn->offset - insn->alt_group->first_insn->offset; 3249 3250 if (!alt_cfi[group_off]) { 3251 alt_cfi[group_off] = insn->cfi; 3252 } else { 3253 if (cficmp(alt_cfi[group_off], insn->cfi)) { 3254 struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group; 3255 struct instruction *orig = orig_group->first_insn; 3256 WARN_INSN(orig, "stack layout conflict in alternatives: %s", 3257 offstr(insn->sec, insn->offset)); 3258 return -1; 3259 } 3260 } 3261 3262 return 0; 3263 } 3264 3265 static int handle_insn_ops(struct instruction *insn, 3266 struct instruction *next_insn, 3267 struct insn_state *state) 3268 { 3269 struct stack_op *op; 3270 int ret; 3271 3272 for (op = insn->stack_ops; op; op = op->next) { 3273 3274 ret = update_cfi_state(insn, next_insn, &state->cfi, op); 3275 if (ret) 3276 return ret; 3277 3278 if (!opts.uaccess || !insn->alt_group) 3279 continue; 3280 3281 if (op->dest.type == OP_DEST_PUSHF) { 3282 if (!state->uaccess_stack) { 3283 state->uaccess_stack = 1; 3284 } else if (state->uaccess_stack >> 31) { 3285 WARN_INSN(insn, "PUSHF stack exhausted"); 3286 return 1; 3287 } 3288 state->uaccess_stack <<= 1; 3289 state->uaccess_stack |= state->uaccess; 3290 } 3291 3292 if (op->src.type == OP_SRC_POPF) { 3293 if (state->uaccess_stack) { 3294 state->uaccess = state->uaccess_stack & 1; 3295 state->uaccess_stack >>= 1; 3296 if (state->uaccess_stack == 1) 3297 state->uaccess_stack = 0; 3298 } 3299 } 3300 } 3301 3302 return 0; 3303 } 3304 3305 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 3306 { 3307 struct cfi_state *cfi1 = insn->cfi; 3308 int i; 3309 3310 if (!cfi1) { 3311 WARN("CFI missing"); 3312 return false; 3313 } 3314 3315 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 3316 3317 WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 3318 cfi1->cfa.base, cfi1->cfa.offset, 3319 cfi2->cfa.base, cfi2->cfa.offset); 3320 return false; 3321 3322 } 3323 3324 if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 3325 for (i = 0; i < CFI_NUM_REGS; i++) { 3326 3327 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg))) 3328 continue; 3329 3330 WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 3331 i, cfi1->regs[i].base, cfi1->regs[i].offset, 3332 i, cfi2->regs[i].base, cfi2->regs[i].offset); 3333 } 3334 return false; 3335 } 3336 3337 if (cfi1->type != cfi2->type) { 3338 3339 WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d", 3340 cfi1->type, cfi2->type); 3341 return false; 3342 } 3343 3344 if (cfi1->drap != cfi2->drap || 3345 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 3346 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 3347 3348 WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 3349 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 3350 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 3351 return false; 3352 } 3353 3354 return true; 3355 } 3356 3357 static inline bool func_uaccess_safe(struct symbol *func) 3358 { 3359 if (func) 3360 return func->uaccess_safe; 3361 3362 return false; 3363 } 3364 3365 static inline const char *call_dest_name(struct instruction *insn) 3366 { 3367 static char pvname[19]; 3368 struct reloc *reloc; 3369 int idx; 3370 3371 if (insn_call_dest(insn)) 3372 return insn_call_dest(insn)->name; 3373 3374 reloc = insn_reloc(NULL, insn); 3375 if (reloc && !strcmp(reloc->sym->name, "pv_ops")) { 3376 idx = (reloc_addend(reloc) / sizeof(void *)); 3377 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); 3378 return pvname; 3379 } 3380 3381 return "{dynamic}"; 3382 } 3383 3384 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn) 3385 { 3386 struct symbol *target; 3387 struct reloc *reloc; 3388 int idx; 3389 3390 reloc = insn_reloc(file, insn); 3391 if (!reloc || strcmp(reloc->sym->name, "pv_ops")) 3392 return false; 3393 3394 idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *); 3395 3396 if (file->pv_ops[idx].clean) 3397 return true; 3398 3399 file->pv_ops[idx].clean = true; 3400 3401 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) { 3402 if (!target->sec->noinstr) { 3403 WARN("pv_ops[%d]: %s", idx, target->name); 3404 file->pv_ops[idx].clean = false; 3405 } 3406 } 3407 3408 return file->pv_ops[idx].clean; 3409 } 3410 3411 static inline bool noinstr_call_dest(struct objtool_file *file, 3412 struct instruction *insn, 3413 struct symbol *func) 3414 { 3415 /* 3416 * We can't deal with indirect function calls at present; 3417 * assume they're instrumented. 3418 */ 3419 if (!func) { 3420 if (file->pv_ops) 3421 return pv_call_dest(file, insn); 3422 3423 return false; 3424 } 3425 3426 /* 3427 * If the symbol is from a noinstr section; we good. 3428 */ 3429 if (func->sec->noinstr) 3430 return true; 3431 3432 /* 3433 * If the symbol is a static_call trampoline, we can't tell. 3434 */ 3435 if (func->static_call_tramp) 3436 return true; 3437 3438 /* 3439 * The __ubsan_handle_*() calls are like WARN(), they only happen when 3440 * something 'BAD' happened. At the risk of taking the machine down, 3441 * let them proceed to get the message out. 3442 */ 3443 if (!strncmp(func->name, "__ubsan_handle_", 15)) 3444 return true; 3445 3446 return false; 3447 } 3448 3449 static int validate_call(struct objtool_file *file, 3450 struct instruction *insn, 3451 struct insn_state *state) 3452 { 3453 if (state->noinstr && state->instr <= 0 && 3454 !noinstr_call_dest(file, insn, insn_call_dest(insn))) { 3455 WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn)); 3456 return 1; 3457 } 3458 3459 if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) { 3460 WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn)); 3461 return 1; 3462 } 3463 3464 if (state->df) { 3465 WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn)); 3466 return 1; 3467 } 3468 3469 return 0; 3470 } 3471 3472 static int validate_sibling_call(struct objtool_file *file, 3473 struct instruction *insn, 3474 struct insn_state *state) 3475 { 3476 if (insn_func(insn) && has_modified_stack_frame(insn, state)) { 3477 WARN_INSN(insn, "sibling call from callable instruction with modified stack frame"); 3478 return 1; 3479 } 3480 3481 return validate_call(file, insn, state); 3482 } 3483 3484 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 3485 { 3486 if (state->noinstr && state->instr > 0) { 3487 WARN_INSN(insn, "return with instrumentation enabled"); 3488 return 1; 3489 } 3490 3491 if (state->uaccess && !func_uaccess_safe(func)) { 3492 WARN_INSN(insn, "return with UACCESS enabled"); 3493 return 1; 3494 } 3495 3496 if (!state->uaccess && func_uaccess_safe(func)) { 3497 WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function"); 3498 return 1; 3499 } 3500 3501 if (state->df) { 3502 WARN_INSN(insn, "return with DF set"); 3503 return 1; 3504 } 3505 3506 if (func && has_modified_stack_frame(insn, state)) { 3507 WARN_INSN(insn, "return with modified stack frame"); 3508 return 1; 3509 } 3510 3511 if (state->cfi.bp_scratch) { 3512 WARN_INSN(insn, "BP used as a scratch register"); 3513 return 1; 3514 } 3515 3516 return 0; 3517 } 3518 3519 static struct instruction *next_insn_to_validate(struct objtool_file *file, 3520 struct instruction *insn) 3521 { 3522 struct alt_group *alt_group = insn->alt_group; 3523 3524 /* 3525 * Simulate the fact that alternatives are patched in-place. When the 3526 * end of a replacement alt_group is reached, redirect objtool flow to 3527 * the end of the original alt_group. 3528 * 3529 * insn->alts->insn -> alt_group->first_insn 3530 * ... 3531 * alt_group->last_insn 3532 * [alt_group->nop] -> next(orig_group->last_insn) 3533 */ 3534 if (alt_group) { 3535 if (alt_group->nop) { 3536 /* ->nop implies ->orig_group */ 3537 if (insn == alt_group->last_insn) 3538 return alt_group->nop; 3539 if (insn == alt_group->nop) 3540 goto next_orig; 3541 } 3542 if (insn == alt_group->last_insn && alt_group->orig_group) 3543 goto next_orig; 3544 } 3545 3546 return next_insn_same_sec(file, insn); 3547 3548 next_orig: 3549 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 3550 } 3551 3552 static bool skip_alt_group(struct instruction *insn) 3553 { 3554 struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL; 3555 3556 if (!insn->alt_group) 3557 return false; 3558 3559 /* ANNOTATE_IGNORE_ALTERNATIVE */ 3560 if (insn->alt_group->ignore) 3561 return true; 3562 3563 /* 3564 * For NOP patched with CLAC/STAC, only follow the latter to avoid 3565 * impossible code paths combining patched CLAC with unpatched STAC 3566 * or vice versa. 3567 * 3568 * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus 3569 * requested not to do that to avoid hurting .s file readability 3570 * around CLAC/STAC alternative sites. 3571 */ 3572 3573 if (!alt_insn) 3574 return false; 3575 3576 /* Don't override ASM_{CLAC,STAC}_UNSAFE */ 3577 if (alt_insn->alt_group && alt_insn->alt_group->ignore) 3578 return false; 3579 3580 return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC; 3581 } 3582 3583 static int checksum_debug_init(struct objtool_file *file) 3584 { 3585 char *dup, *s; 3586 3587 if (!opts.debug_checksum) 3588 return 0; 3589 3590 dup = strdup(opts.debug_checksum); 3591 if (!dup) { 3592 ERROR_GLIBC("strdup"); 3593 return -1; 3594 } 3595 3596 s = dup; 3597 while (*s) { 3598 struct symbol *func; 3599 char *comma; 3600 3601 comma = strchr(s, ','); 3602 if (comma) 3603 *comma = '\0'; 3604 3605 func = find_symbol_by_name(file->elf, s); 3606 if (!func || !is_func_sym(func)) 3607 WARN("--debug-checksum: can't find '%s'", s); 3608 else 3609 func->debug_checksum = 1; 3610 3611 if (!comma) 3612 break; 3613 3614 s = comma + 1; 3615 } 3616 3617 free(dup); 3618 return 0; 3619 } 3620 3621 static void checksum_update_insn(struct objtool_file *file, struct symbol *func, 3622 struct instruction *insn) 3623 { 3624 struct reloc *reloc = insn_reloc(file, insn); 3625 unsigned long offset; 3626 struct symbol *sym; 3627 3628 if (insn->fake) 3629 return; 3630 3631 checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len); 3632 3633 if (!reloc) { 3634 struct symbol *call_dest = insn_call_dest(insn); 3635 3636 if (call_dest) 3637 checksum_update(func, insn, call_dest->demangled_name, 3638 strlen(call_dest->demangled_name)); 3639 return; 3640 } 3641 3642 sym = reloc->sym; 3643 offset = arch_insn_adjusted_addend(insn, reloc); 3644 3645 if (is_string_sec(sym->sec)) { 3646 char *str; 3647 3648 str = sym->sec->data->d_buf + sym->offset + offset; 3649 checksum_update(func, insn, str, strlen(str)); 3650 return; 3651 } 3652 3653 if (is_sec_sym(sym)) { 3654 sym = find_symbol_containing(reloc->sym->sec, offset); 3655 if (!sym) 3656 return; 3657 3658 offset -= sym->offset; 3659 } 3660 3661 checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name)); 3662 checksum_update(func, insn, &offset, sizeof(offset)); 3663 } 3664 3665 /* 3666 * Follow the branch starting at the given instruction, and recursively follow 3667 * any other branches (jumps). Meanwhile, track the frame pointer state at 3668 * each instruction and validate all the rules described in 3669 * tools/objtool/Documentation/objtool.txt. 3670 */ 3671 static int validate_branch(struct objtool_file *file, struct symbol *func, 3672 struct instruction *insn, struct insn_state state) 3673 { 3674 struct alternative *alt; 3675 struct instruction *next_insn, *prev_insn = NULL; 3676 u8 visited; 3677 int ret; 3678 3679 if (func && func->ignore) 3680 return 0; 3681 3682 while (1) { 3683 next_insn = next_insn_to_validate(file, insn); 3684 3685 if (opts.checksum && func && insn->sec) 3686 checksum_update_insn(file, func, insn); 3687 3688 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) { 3689 /* Ignore KCFI type preambles, which always fall through */ 3690 if (is_prefix_func(func)) 3691 return 0; 3692 3693 if (file->ignore_unreachables) 3694 return 0; 3695 3696 WARN("%s() falls through to next function %s()", 3697 func->name, insn_func(insn)->name); 3698 func->warned = 1; 3699 3700 return 1; 3701 } 3702 3703 visited = VISITED_BRANCH << state.uaccess; 3704 if (insn->visited & VISITED_BRANCH_MASK) { 3705 if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) 3706 return 1; 3707 3708 if (insn->visited & visited) 3709 return 0; 3710 } else { 3711 nr_insns_visited++; 3712 } 3713 3714 if (state.noinstr) 3715 state.instr += insn->instr; 3716 3717 if (insn->hint) { 3718 if (insn->restore) { 3719 struct instruction *save_insn, *i; 3720 3721 i = insn; 3722 save_insn = NULL; 3723 3724 sym_for_each_insn_continue_reverse(file, func, i) { 3725 if (i->save) { 3726 save_insn = i; 3727 break; 3728 } 3729 } 3730 3731 if (!save_insn) { 3732 WARN_INSN(insn, "no corresponding CFI save for CFI restore"); 3733 return 1; 3734 } 3735 3736 if (!save_insn->visited) { 3737 /* 3738 * If the restore hint insn is at the 3739 * beginning of a basic block and was 3740 * branched to from elsewhere, and the 3741 * save insn hasn't been visited yet, 3742 * defer following this branch for now. 3743 * It will be seen later via the 3744 * straight-line path. 3745 */ 3746 if (!prev_insn) 3747 return 0; 3748 3749 WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo"); 3750 return 1; 3751 } 3752 3753 insn->cfi = save_insn->cfi; 3754 nr_cfi_reused++; 3755 } 3756 3757 state.cfi = *insn->cfi; 3758 } else { 3759 /* XXX track if we actually changed state.cfi */ 3760 3761 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) { 3762 insn->cfi = prev_insn->cfi; 3763 nr_cfi_reused++; 3764 } else { 3765 insn->cfi = cfi_hash_find_or_add(&state.cfi); 3766 } 3767 } 3768 3769 insn->visited |= visited; 3770 3771 if (propagate_alt_cfi(file, insn)) 3772 return 1; 3773 3774 if (insn->alts) { 3775 for (alt = insn->alts; alt; alt = alt->next) { 3776 ret = validate_branch(file, func, alt->insn, state); 3777 if (ret) { 3778 BT_INSN(insn, "(alt)"); 3779 return ret; 3780 } 3781 } 3782 } 3783 3784 if (skip_alt_group(insn)) 3785 return 0; 3786 3787 if (handle_insn_ops(insn, next_insn, &state)) 3788 return 1; 3789 3790 switch (insn->type) { 3791 3792 case INSN_RETURN: 3793 return validate_return(func, insn, &state); 3794 3795 case INSN_CALL: 3796 case INSN_CALL_DYNAMIC: 3797 ret = validate_call(file, insn, &state); 3798 if (ret) 3799 return ret; 3800 3801 if (opts.stackval && func && !is_special_call(insn) && 3802 !has_valid_stack_frame(&state)) { 3803 WARN_INSN(insn, "call without frame pointer save/setup"); 3804 return 1; 3805 } 3806 3807 break; 3808 3809 case INSN_JUMP_CONDITIONAL: 3810 case INSN_JUMP_UNCONDITIONAL: 3811 if (is_sibling_call(insn)) { 3812 ret = validate_sibling_call(file, insn, &state); 3813 if (ret) 3814 return ret; 3815 3816 } else if (insn->jump_dest) { 3817 ret = validate_branch(file, func, 3818 insn->jump_dest, state); 3819 if (ret) { 3820 BT_INSN(insn, "(branch)"); 3821 return ret; 3822 } 3823 } 3824 3825 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3826 return 0; 3827 3828 break; 3829 3830 case INSN_JUMP_DYNAMIC: 3831 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3832 if (is_sibling_call(insn)) { 3833 ret = validate_sibling_call(file, insn, &state); 3834 if (ret) 3835 return ret; 3836 } 3837 3838 if (insn->type == INSN_JUMP_DYNAMIC) 3839 return 0; 3840 3841 break; 3842 3843 case INSN_SYSCALL: 3844 if (func && (!next_insn || !next_insn->hint)) { 3845 WARN_INSN(insn, "unsupported instruction in callable function"); 3846 return 1; 3847 } 3848 3849 break; 3850 3851 case INSN_SYSRET: 3852 if (func && (!next_insn || !next_insn->hint)) { 3853 WARN_INSN(insn, "unsupported instruction in callable function"); 3854 return 1; 3855 } 3856 3857 return 0; 3858 3859 case INSN_STAC: 3860 if (!opts.uaccess) 3861 break; 3862 3863 if (state.uaccess) { 3864 WARN_INSN(insn, "recursive UACCESS enable"); 3865 return 1; 3866 } 3867 3868 state.uaccess = true; 3869 break; 3870 3871 case INSN_CLAC: 3872 if (!opts.uaccess) 3873 break; 3874 3875 if (!state.uaccess && func) { 3876 WARN_INSN(insn, "redundant UACCESS disable"); 3877 return 1; 3878 } 3879 3880 if (func_uaccess_safe(func) && !state.uaccess_stack) { 3881 WARN_INSN(insn, "UACCESS-safe disables UACCESS"); 3882 return 1; 3883 } 3884 3885 state.uaccess = false; 3886 break; 3887 3888 case INSN_STD: 3889 if (state.df) { 3890 WARN_INSN(insn, "recursive STD"); 3891 return 1; 3892 } 3893 3894 state.df = true; 3895 break; 3896 3897 case INSN_CLD: 3898 if (!state.df && func) { 3899 WARN_INSN(insn, "redundant CLD"); 3900 return 1; 3901 } 3902 3903 state.df = false; 3904 break; 3905 3906 default: 3907 break; 3908 } 3909 3910 if (insn->dead_end) 3911 return 0; 3912 3913 if (!next_insn) { 3914 if (state.cfi.cfa.base == CFI_UNDEFINED) 3915 return 0; 3916 if (file->ignore_unreachables) 3917 return 0; 3918 3919 WARN("%s%sunexpected end of section %s", 3920 func ? func->name : "", func ? "(): " : "", 3921 insn->sec->name); 3922 return 1; 3923 } 3924 3925 prev_insn = insn; 3926 insn = next_insn; 3927 } 3928 3929 return 0; 3930 } 3931 3932 static int validate_unwind_hint(struct objtool_file *file, 3933 struct instruction *insn, 3934 struct insn_state *state) 3935 { 3936 if (insn->hint && !insn->visited) { 3937 struct symbol *func = insn_func(insn); 3938 int ret; 3939 3940 if (opts.checksum) 3941 checksum_init(func); 3942 3943 ret = validate_branch(file, func, insn, *state); 3944 if (ret) 3945 BT_INSN(insn, "<=== (hint)"); 3946 return ret; 3947 } 3948 3949 return 0; 3950 } 3951 3952 static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 3953 { 3954 struct instruction *insn; 3955 struct insn_state state; 3956 int warnings = 0; 3957 3958 if (!file->hints) 3959 return 0; 3960 3961 init_insn_state(file, &state, sec); 3962 3963 if (sec) { 3964 sec_for_each_insn(file, sec, insn) 3965 warnings += validate_unwind_hint(file, insn, &state); 3966 } else { 3967 for_each_insn(file, insn) 3968 warnings += validate_unwind_hint(file, insn, &state); 3969 } 3970 3971 return warnings; 3972 } 3973 3974 /* 3975 * Validate rethunk entry constraint: must untrain RET before the first RET. 3976 * 3977 * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes 3978 * before an actual RET instruction. 3979 */ 3980 static int validate_unret(struct objtool_file *file, struct instruction *insn) 3981 { 3982 struct instruction *next, *dest; 3983 int ret; 3984 3985 for (;;) { 3986 next = next_insn_to_validate(file, insn); 3987 3988 if (insn->visited & VISITED_UNRET) 3989 return 0; 3990 3991 insn->visited |= VISITED_UNRET; 3992 3993 if (insn->alts) { 3994 struct alternative *alt; 3995 for (alt = insn->alts; alt; alt = alt->next) { 3996 ret = validate_unret(file, alt->insn); 3997 if (ret) { 3998 BT_INSN(insn, "(alt)"); 3999 return ret; 4000 } 4001 } 4002 } 4003 4004 switch (insn->type) { 4005 4006 case INSN_CALL_DYNAMIC: 4007 case INSN_JUMP_DYNAMIC: 4008 case INSN_JUMP_DYNAMIC_CONDITIONAL: 4009 WARN_INSN(insn, "early indirect call"); 4010 return 1; 4011 4012 case INSN_JUMP_UNCONDITIONAL: 4013 case INSN_JUMP_CONDITIONAL: 4014 if (!is_sibling_call(insn)) { 4015 if (!insn->jump_dest) { 4016 WARN_INSN(insn, "unresolved jump target after linking?!?"); 4017 return 1; 4018 } 4019 ret = validate_unret(file, insn->jump_dest); 4020 if (ret) { 4021 BT_INSN(insn, "(branch%s)", 4022 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : ""); 4023 return ret; 4024 } 4025 4026 if (insn->type == INSN_JUMP_UNCONDITIONAL) 4027 return 0; 4028 4029 break; 4030 } 4031 4032 /* fallthrough */ 4033 case INSN_CALL: 4034 dest = find_insn(file, insn_call_dest(insn)->sec, 4035 insn_call_dest(insn)->offset); 4036 if (!dest) { 4037 WARN("Unresolved function after linking!?: %s", 4038 insn_call_dest(insn)->name); 4039 return 1; 4040 } 4041 4042 ret = validate_unret(file, dest); 4043 if (ret) { 4044 BT_INSN(insn, "(call)"); 4045 return ret; 4046 } 4047 /* 4048 * If a call returns without error, it must have seen UNTRAIN_RET. 4049 * Therefore any non-error return is a success. 4050 */ 4051 return 0; 4052 4053 case INSN_RETURN: 4054 WARN_INSN(insn, "RET before UNTRAIN"); 4055 return 1; 4056 4057 case INSN_SYSCALL: 4058 break; 4059 4060 case INSN_SYSRET: 4061 return 0; 4062 4063 case INSN_NOP: 4064 if (insn->retpoline_safe) 4065 return 0; 4066 break; 4067 4068 default: 4069 break; 4070 } 4071 4072 if (insn->dead_end) 4073 return 0; 4074 4075 if (!next) { 4076 WARN_INSN(insn, "teh end!"); 4077 return 1; 4078 } 4079 insn = next; 4080 } 4081 4082 return 0; 4083 } 4084 4085 /* 4086 * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter 4087 * VALIDATE_UNRET_END before RET. 4088 */ 4089 static int validate_unrets(struct objtool_file *file) 4090 { 4091 struct instruction *insn; 4092 int warnings = 0; 4093 4094 for_each_insn(file, insn) { 4095 if (!insn->unret) 4096 continue; 4097 4098 warnings += validate_unret(file, insn); 4099 } 4100 4101 return warnings; 4102 } 4103 4104 static int validate_retpoline(struct objtool_file *file) 4105 { 4106 struct instruction *insn; 4107 int warnings = 0; 4108 4109 for_each_insn(file, insn) { 4110 if (insn->type != INSN_JUMP_DYNAMIC && 4111 insn->type != INSN_CALL_DYNAMIC && 4112 insn->type != INSN_RETURN) 4113 continue; 4114 4115 if (insn->retpoline_safe) 4116 continue; 4117 4118 if (insn->sec->init) 4119 continue; 4120 4121 if (insn->type == INSN_RETURN) { 4122 if (opts.rethunk) { 4123 WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build"); 4124 warnings++; 4125 } 4126 continue; 4127 } 4128 4129 WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build", 4130 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 4131 warnings++; 4132 } 4133 4134 if (!opts.cfi) 4135 return warnings; 4136 4137 /* 4138 * kCFI call sites look like: 4139 * 4140 * movl $(-0x12345678), %r10d 4141 * addl -4(%r11), %r10d 4142 * jz 1f 4143 * ud2 4144 * 1: cs call __x86_indirect_thunk_r11 4145 * 4146 * Verify all indirect calls are kCFI adorned by checking for the 4147 * UD2. Notably, doing __nocfi calls to regular (cfi) functions is 4148 * broken. 4149 */ 4150 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 4151 struct symbol *sym = insn->sym; 4152 4153 if (sym && (sym->type == STT_NOTYPE || 4154 sym->type == STT_FUNC) && !sym->nocfi) { 4155 struct instruction *prev = 4156 prev_insn_same_sym(file, insn); 4157 4158 if (!prev || prev->type != INSN_BUG) { 4159 WARN_INSN(insn, "no-cfi indirect call!"); 4160 warnings++; 4161 } 4162 } 4163 } 4164 4165 return warnings; 4166 } 4167 4168 static bool is_kasan_insn(struct instruction *insn) 4169 { 4170 return (insn->type == INSN_CALL && 4171 !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return")); 4172 } 4173 4174 static bool is_ubsan_insn(struct instruction *insn) 4175 { 4176 return (insn->type == INSN_CALL && 4177 !strcmp(insn_call_dest(insn)->name, 4178 "__ubsan_handle_builtin_unreachable")); 4179 } 4180 4181 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 4182 { 4183 struct symbol *func = insn_func(insn); 4184 struct instruction *prev_insn; 4185 int i; 4186 4187 if (insn->type == INSN_NOP || insn->type == INSN_TRAP || 4188 insn->hole || (func && func->ignore)) 4189 return true; 4190 4191 /* 4192 * Ignore alternative replacement instructions. This can happen 4193 * when a whitelisted function uses one of the ALTERNATIVE macros. 4194 */ 4195 if (!strcmp(insn->sec->name, ".altinstr_replacement") || 4196 !strcmp(insn->sec->name, ".altinstr_aux")) 4197 return true; 4198 4199 if (!func) 4200 return false; 4201 4202 if (func->static_call_tramp) 4203 return true; 4204 4205 /* 4206 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 4207 * __builtin_unreachable(). The BUG() macro has an unreachable() after 4208 * the UD2, which causes GCC's undefined trap logic to emit another UD2 4209 * (or occasionally a JMP to UD2). 4210 * 4211 * It may also insert a UD2 after calling a __noreturn function. 4212 */ 4213 prev_insn = prev_insn_same_sec(file, insn); 4214 if (prev_insn && prev_insn->dead_end && 4215 (insn->type == INSN_BUG || 4216 (insn->type == INSN_JUMP_UNCONDITIONAL && 4217 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 4218 return true; 4219 4220 /* 4221 * Check if this (or a subsequent) instruction is related to 4222 * CONFIG_UBSAN or CONFIG_KASAN. 4223 * 4224 * End the search at 5 instructions to avoid going into the weeds. 4225 */ 4226 for (i = 0; i < 5; i++) { 4227 4228 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 4229 return true; 4230 4231 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 4232 if (insn->jump_dest && 4233 insn_func(insn->jump_dest) == func) { 4234 insn = insn->jump_dest; 4235 continue; 4236 } 4237 4238 break; 4239 } 4240 4241 if (insn->offset + insn->len >= func->offset + func->len) 4242 break; 4243 4244 insn = next_insn_same_sec(file, insn); 4245 } 4246 4247 return false; 4248 } 4249 4250 /* 4251 * For FineIBT or kCFI, a certain number of bytes preceding the function may be 4252 * NOPs. Those NOPs may be rewritten at runtime and executed, so give them a 4253 * proper function name: __pfx_<func>. 4254 * 4255 * The NOPs may not exist for the following cases: 4256 * 4257 * - compiler cloned functions (*.cold, *.part0, etc) 4258 * - asm functions created with inline asm or without SYM_FUNC_START() 4259 * 4260 * Also, the function may already have a prefix from a previous objtool run 4261 * (livepatch extracted functions, or manually running objtool multiple times). 4262 * 4263 * So return 0 if the NOPs are missing or the function already has a prefix 4264 * symbol. 4265 */ 4266 static int create_prefix_symbol(struct objtool_file *file, struct symbol *func) 4267 { 4268 struct instruction *insn, *prev; 4269 char name[SYM_NAME_LEN]; 4270 struct cfi_state *cfi; 4271 4272 if (!is_func_sym(func) || is_prefix_func(func) || 4273 func->cold || func->static_call_tramp) 4274 return 0; 4275 4276 if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) { 4277 WARN("%s: symbol name too long, can't create __pfx_ symbol", 4278 func->name); 4279 return 0; 4280 } 4281 4282 if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name)) 4283 return -1; 4284 4285 if (file->klp) { 4286 struct symbol *pfx; 4287 4288 pfx = find_symbol_by_offset(func->sec, func->offset - opts.prefix); 4289 if (pfx && is_prefix_func(pfx) && !strcmp(pfx->name, name)) 4290 return 0; 4291 } 4292 4293 insn = find_insn(file, func->sec, func->offset); 4294 if (!insn) { 4295 WARN("%s: can't find starting instruction", func->name); 4296 return -1; 4297 } 4298 4299 for (prev = prev_insn_same_sec(file, insn); 4300 prev; 4301 prev = prev_insn_same_sec(file, prev)) { 4302 u64 offset; 4303 4304 if (prev->type != INSN_NOP) 4305 return 0; 4306 4307 offset = func->offset - prev->offset; 4308 4309 if (offset > opts.prefix) 4310 return 0; 4311 4312 if (offset < opts.prefix) 4313 continue; 4314 4315 if (!elf_create_symbol(file->elf, name, func->sec, 4316 GELF_ST_BIND(func->sym.st_info), 4317 GELF_ST_TYPE(func->sym.st_info), 4318 prev->offset, opts.prefix)) 4319 return -1; 4320 4321 break; 4322 } 4323 4324 if (!prev) 4325 return 0; 4326 4327 if (!insn->cfi) { 4328 /* 4329 * This can happen if stack validation isn't enabled or the 4330 * function is annotated with STACK_FRAME_NON_STANDARD. 4331 */ 4332 return 0; 4333 } 4334 4335 /* Propagate insn->cfi to the prefix code */ 4336 cfi = cfi_hash_find_or_add(insn->cfi); 4337 for (; prev != insn; prev = next_insn_same_sec(file, prev)) 4338 prev->cfi = cfi; 4339 4340 return 0; 4341 } 4342 4343 static int create_prefix_symbols(struct objtool_file *file) 4344 { 4345 struct section *sec; 4346 struct symbol *func; 4347 4348 for_each_sec(file->elf, sec) { 4349 if (!is_text_sec(sec)) 4350 continue; 4351 4352 sec_for_each_sym(sec, func) { 4353 if (create_prefix_symbol(file, func)) 4354 return -1; 4355 } 4356 } 4357 4358 return 0; 4359 } 4360 4361 static int validate_symbol(struct objtool_file *file, struct section *sec, 4362 struct symbol *sym, struct insn_state *state) 4363 { 4364 struct instruction *insn; 4365 struct symbol *func; 4366 int ret; 4367 4368 if (!sym->len) { 4369 WARN("%s() is missing an ELF size annotation", sym->name); 4370 return 1; 4371 } 4372 4373 if (sym->pfunc != sym || sym->alias != sym) 4374 return 0; 4375 4376 insn = find_insn(file, sec, sym->offset); 4377 if (!insn || insn->visited) 4378 return 0; 4379 4380 if (opts.uaccess) 4381 state->uaccess = sym->uaccess_safe; 4382 4383 func = insn_func(insn); 4384 4385 if (opts.checksum) 4386 checksum_init(func); 4387 4388 ret = validate_branch(file, func, insn, *state); 4389 if (ret) 4390 BT_INSN(insn, "<=== (sym)"); 4391 4392 if (opts.checksum) 4393 checksum_finish(func); 4394 4395 return ret; 4396 } 4397 4398 static int validate_section(struct objtool_file *file, struct section *sec) 4399 { 4400 struct insn_state state; 4401 struct symbol *func; 4402 int warnings = 0; 4403 4404 sec_for_each_sym(sec, func) { 4405 if (!is_func_sym(func)) 4406 continue; 4407 4408 init_insn_state(file, &state, sec); 4409 set_func_state(&state.cfi); 4410 4411 warnings += validate_symbol(file, sec, func, &state); 4412 } 4413 4414 return warnings; 4415 } 4416 4417 static int validate_noinstr_sections(struct objtool_file *file) 4418 { 4419 struct section *sec; 4420 int warnings = 0; 4421 4422 sec = find_section_by_name(file->elf, ".noinstr.text"); 4423 if (sec) { 4424 warnings += validate_section(file, sec); 4425 warnings += validate_unwind_hints(file, sec); 4426 } 4427 4428 sec = find_section_by_name(file->elf, ".entry.text"); 4429 if (sec) { 4430 warnings += validate_section(file, sec); 4431 warnings += validate_unwind_hints(file, sec); 4432 } 4433 4434 sec = find_section_by_name(file->elf, ".cpuidle.text"); 4435 if (sec) { 4436 warnings += validate_section(file, sec); 4437 warnings += validate_unwind_hints(file, sec); 4438 } 4439 4440 return warnings; 4441 } 4442 4443 static int validate_functions(struct objtool_file *file) 4444 { 4445 struct section *sec; 4446 int warnings = 0; 4447 4448 for_each_sec(file->elf, sec) { 4449 if (!is_text_sec(sec)) 4450 continue; 4451 4452 warnings += validate_section(file, sec); 4453 } 4454 4455 return warnings; 4456 } 4457 4458 static void mark_endbr_used(struct instruction *insn) 4459 { 4460 if (!list_empty(&insn->call_node)) 4461 list_del_init(&insn->call_node); 4462 } 4463 4464 static bool noendbr_range(struct objtool_file *file, struct instruction *insn) 4465 { 4466 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1); 4467 struct instruction *first; 4468 4469 if (!sym) 4470 return false; 4471 4472 first = find_insn(file, sym->sec, sym->offset); 4473 if (!first) 4474 return false; 4475 4476 if (first->type != INSN_ENDBR && !first->noendbr) 4477 return false; 4478 4479 return insn->offset == sym->offset + sym->len; 4480 } 4481 4482 static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn, 4483 struct instruction *dest) 4484 { 4485 if (dest->type == INSN_ENDBR) { 4486 mark_endbr_used(dest); 4487 return 0; 4488 } 4489 4490 if (insn_func(dest) && insn_func(insn) && 4491 insn_func(dest)->pfunc == insn_func(insn)->pfunc) { 4492 /* 4493 * Anything from->to self is either _THIS_IP_ or 4494 * IRET-to-self. 4495 * 4496 * There is no sane way to annotate _THIS_IP_ since the 4497 * compiler treats the relocation as a constant and is 4498 * happy to fold in offsets, skewing any annotation we 4499 * do, leading to vast amounts of false-positives. 4500 * 4501 * There's also compiler generated _THIS_IP_ through 4502 * KCOV and such which we have no hope of annotating. 4503 * 4504 * As such, blanket accept self-references without 4505 * issue. 4506 */ 4507 return 0; 4508 } 4509 4510 /* 4511 * Accept anything ANNOTATE_NOENDBR. 4512 */ 4513 if (dest->noendbr) 4514 return 0; 4515 4516 /* 4517 * Accept if this is the instruction after a symbol 4518 * that is (no)endbr -- typical code-range usage. 4519 */ 4520 if (noendbr_range(file, dest)) 4521 return 0; 4522 4523 WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset)); 4524 return 1; 4525 } 4526 4527 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn) 4528 { 4529 struct instruction *dest; 4530 struct reloc *reloc; 4531 unsigned long off; 4532 int warnings = 0; 4533 4534 /* 4535 * Looking for function pointer load relocations. Ignore 4536 * direct/indirect branches: 4537 */ 4538 switch (insn->type) { 4539 4540 case INSN_CALL: 4541 case INSN_CALL_DYNAMIC: 4542 case INSN_JUMP_CONDITIONAL: 4543 case INSN_JUMP_UNCONDITIONAL: 4544 case INSN_JUMP_DYNAMIC: 4545 case INSN_JUMP_DYNAMIC_CONDITIONAL: 4546 case INSN_RETURN: 4547 case INSN_NOP: 4548 return 0; 4549 4550 case INSN_LEA_RIP: 4551 if (!insn_reloc(file, insn)) { 4552 /* local function pointer reference without reloc */ 4553 4554 off = arch_jump_destination(insn); 4555 4556 dest = find_insn(file, insn->sec, off); 4557 if (!dest) { 4558 WARN_INSN(insn, "corrupt function pointer reference"); 4559 return 1; 4560 } 4561 4562 return __validate_ibt_insn(file, insn, dest); 4563 } 4564 break; 4565 4566 default: 4567 break; 4568 } 4569 4570 for (reloc = insn_reloc(file, insn); 4571 reloc; 4572 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 4573 reloc_offset(reloc) + 1, 4574 (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) { 4575 4576 off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc); 4577 4578 dest = find_insn(file, reloc->sym->sec, off); 4579 if (!dest) 4580 continue; 4581 4582 warnings += __validate_ibt_insn(file, insn, dest); 4583 } 4584 4585 return warnings; 4586 } 4587 4588 static int validate_ibt_data_reloc(struct objtool_file *file, 4589 struct reloc *reloc) 4590 { 4591 struct instruction *dest; 4592 4593 dest = find_insn(file, reloc->sym->sec, 4594 reloc->sym->offset + reloc_addend(reloc)); 4595 if (!dest) 4596 return 0; 4597 4598 if (dest->type == INSN_ENDBR) { 4599 mark_endbr_used(dest); 4600 return 0; 4601 } 4602 4603 if (dest->noendbr) 4604 return 0; 4605 4606 WARN_FUNC(reloc->sec->base, reloc_offset(reloc), 4607 "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset)); 4608 4609 return 1; 4610 } 4611 4612 /* 4613 * Validate IBT rules and remove used ENDBR instructions from the seal list. 4614 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with 4615 * NOPs) later, in create_ibt_endbr_seal_sections(). 4616 */ 4617 static int validate_ibt(struct objtool_file *file) 4618 { 4619 struct section *sec; 4620 struct reloc *reloc; 4621 struct instruction *insn; 4622 int warnings = 0; 4623 4624 for_each_insn(file, insn) 4625 warnings += validate_ibt_insn(file, insn); 4626 4627 for_each_sec(file->elf, sec) { 4628 4629 /* Already done by validate_ibt_insn() */ 4630 if (is_text_sec(sec)) 4631 continue; 4632 4633 if (!sec->rsec) 4634 continue; 4635 4636 /* 4637 * These sections can reference text addresses, but not with 4638 * the intent to indirect branch to them. 4639 */ 4640 if ((!strncmp(sec->name, ".discard", 8) && 4641 strcmp(sec->name, ".discard.ibt_endbr_noseal")) || 4642 !strncmp(sec->name, ".debug", 6) || 4643 !strcmp(sec->name, ".altinstructions") || 4644 !strcmp(sec->name, ".ibt_endbr_seal") || 4645 !strcmp(sec->name, ".kcfi_traps") || 4646 !strcmp(sec->name, ".orc_unwind_ip") || 4647 !strcmp(sec->name, ".retpoline_sites") || 4648 !strcmp(sec->name, ".smp_locks") || 4649 !strcmp(sec->name, ".static_call_sites") || 4650 !strcmp(sec->name, "_error_injection_whitelist") || 4651 !strcmp(sec->name, "_kprobe_blacklist") || 4652 !strcmp(sec->name, "__bug_table") || 4653 !strcmp(sec->name, "__ex_table") || 4654 !strcmp(sec->name, "__jump_table") || 4655 !strcmp(sec->name, "__klp_funcs") || 4656 !strcmp(sec->name, "__mcount_loc") || 4657 !strcmp(sec->name, ".llvm.call-graph-profile") || 4658 !strcmp(sec->name, ".llvm_bb_addr_map") || 4659 !strcmp(sec->name, "__tracepoints") || 4660 !strcmp(sec->name, "__patchable_function_entries")) 4661 continue; 4662 4663 for_each_reloc(sec->rsec, reloc) 4664 warnings += validate_ibt_data_reloc(file, reloc); 4665 } 4666 4667 return warnings; 4668 } 4669 4670 static int validate_sls(struct objtool_file *file) 4671 { 4672 struct instruction *insn, *next_insn; 4673 int warnings = 0; 4674 4675 for_each_insn(file, insn) { 4676 next_insn = next_insn_same_sec(file, insn); 4677 4678 if (insn->retpoline_safe) 4679 continue; 4680 4681 switch (insn->type) { 4682 case INSN_RETURN: 4683 if (!next_insn || next_insn->type != INSN_TRAP) { 4684 WARN_INSN(insn, "missing int3 after ret"); 4685 warnings++; 4686 } 4687 4688 break; 4689 case INSN_JUMP_DYNAMIC: 4690 if (!next_insn || next_insn->type != INSN_TRAP) { 4691 WARN_INSN(insn, "missing int3 after indirect jump"); 4692 warnings++; 4693 } 4694 break; 4695 default: 4696 break; 4697 } 4698 } 4699 4700 return warnings; 4701 } 4702 4703 static int validate_reachable_instructions(struct objtool_file *file) 4704 { 4705 struct instruction *insn, *prev_insn; 4706 struct symbol *call_dest; 4707 int warnings = 0; 4708 4709 if (file->ignore_unreachables) 4710 return 0; 4711 4712 for_each_insn(file, insn) { 4713 if (insn->visited || ignore_unreachable_insn(file, insn)) 4714 continue; 4715 4716 prev_insn = prev_insn_same_sec(file, insn); 4717 if (prev_insn && prev_insn->dead_end) { 4718 call_dest = insn_call_dest(prev_insn); 4719 if (call_dest) { 4720 WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h", 4721 call_dest->name); 4722 warnings++; 4723 continue; 4724 } 4725 } 4726 4727 WARN_INSN(insn, "unreachable instruction"); 4728 warnings++; 4729 } 4730 4731 return warnings; 4732 } 4733 4734 __weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc) 4735 { 4736 unsigned int type = reloc_type(reloc); 4737 size_t sz = elf_addr_size(elf); 4738 4739 return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32); 4740 } 4741 4742 static int check_abs_references(struct objtool_file *file) 4743 { 4744 struct section *sec; 4745 struct reloc *reloc; 4746 int ret = 0; 4747 4748 for_each_sec(file->elf, sec) { 4749 /* absolute references in non-loadable sections are fine */ 4750 if (!(sec->sh.sh_flags & SHF_ALLOC)) 4751 continue; 4752 4753 /* section must have an associated .rela section */ 4754 if (!sec->rsec) 4755 continue; 4756 4757 /* 4758 * Special case for compiler generated metadata that is not 4759 * consumed until after boot. 4760 */ 4761 if (!strcmp(sec->name, "__patchable_function_entries")) 4762 continue; 4763 4764 for_each_reloc(sec->rsec, reloc) { 4765 if (arch_absolute_reloc(file->elf, reloc)) { 4766 WARN("section %s has absolute relocation at offset 0x%llx", 4767 sec->name, (unsigned long long)reloc_offset(reloc)); 4768 ret++; 4769 } 4770 } 4771 } 4772 return ret; 4773 } 4774 4775 struct insn_chunk { 4776 void *addr; 4777 struct insn_chunk *next; 4778 }; 4779 4780 /* 4781 * Reduce peak RSS usage by freeing insns memory before writing the ELF file, 4782 * which can trigger more allocations for .debug_* sections whose data hasn't 4783 * been read yet. 4784 */ 4785 static void free_insns(struct objtool_file *file) 4786 { 4787 struct instruction *insn; 4788 struct insn_chunk *chunks = NULL, *chunk; 4789 4790 for_each_insn(file, insn) { 4791 if (!insn->idx) { 4792 chunk = malloc(sizeof(*chunk)); 4793 chunk->addr = insn; 4794 chunk->next = chunks; 4795 chunks = chunk; 4796 } 4797 } 4798 4799 for (chunk = chunks; chunk; chunk = chunk->next) 4800 free(chunk->addr); 4801 } 4802 4803 int check(struct objtool_file *file) 4804 { 4805 int ret = 0, warnings = 0; 4806 4807 arch_initial_func_cfi_state(&initial_func_cfi); 4808 init_cfi_state(&init_cfi); 4809 init_cfi_state(&func_cfi); 4810 set_func_state(&func_cfi); 4811 init_cfi_state(&force_undefined_cfi); 4812 force_undefined_cfi.force_undefined = true; 4813 4814 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) { 4815 ret = -1; 4816 goto out; 4817 } 4818 4819 cfi_hash_add(&init_cfi); 4820 cfi_hash_add(&func_cfi); 4821 4822 ret = checksum_debug_init(file); 4823 if (ret) 4824 goto out; 4825 4826 ret = decode_sections(file); 4827 if (ret) 4828 goto out; 4829 4830 if (!nr_insns) 4831 goto out; 4832 4833 if (opts.retpoline) 4834 warnings += validate_retpoline(file); 4835 4836 if (validate_branch_enabled()) { 4837 int w = 0; 4838 4839 w += validate_functions(file); 4840 w += validate_unwind_hints(file, NULL); 4841 if (!w) 4842 w += validate_reachable_instructions(file); 4843 4844 warnings += w; 4845 4846 } else if (opts.noinstr) { 4847 warnings += validate_noinstr_sections(file); 4848 } 4849 4850 if (opts.unret) { 4851 /* 4852 * Must be after validate_branch() and friends, it plays 4853 * further games with insn->visited. 4854 */ 4855 warnings += validate_unrets(file); 4856 } 4857 4858 if (opts.ibt) 4859 warnings += validate_ibt(file); 4860 4861 if (opts.sls) 4862 warnings += validate_sls(file); 4863 4864 if (opts.static_call) { 4865 ret = create_static_call_sections(file); 4866 if (ret) 4867 goto out; 4868 } 4869 4870 if (opts.retpoline) { 4871 ret = create_retpoline_sites_sections(file); 4872 if (ret) 4873 goto out; 4874 } 4875 4876 if (opts.cfi) { 4877 ret = create_cfi_sections(file); 4878 if (ret) 4879 goto out; 4880 } 4881 4882 if (opts.rethunk) { 4883 ret = create_return_sites_sections(file); 4884 if (ret) 4885 goto out; 4886 4887 if (opts.hack_skylake) { 4888 ret = create_direct_call_sections(file); 4889 if (ret) 4890 goto out; 4891 } 4892 } 4893 4894 if (opts.mcount) { 4895 ret = create_mcount_loc_sections(file); 4896 if (ret) 4897 goto out; 4898 } 4899 4900 if (opts.prefix) { 4901 ret = create_prefix_symbols(file); 4902 if (ret) 4903 goto out; 4904 } 4905 4906 if (opts.ibt) { 4907 ret = create_ibt_endbr_seal_sections(file); 4908 if (ret) 4909 goto out; 4910 } 4911 4912 if (opts.noabs) 4913 warnings += check_abs_references(file); 4914 4915 if (opts.checksum) { 4916 ret = create_sym_checksum_section(file); 4917 if (ret) 4918 goto out; 4919 } 4920 4921 if (opts.orc && nr_insns) { 4922 ret = orc_create(file); 4923 if (ret) 4924 goto out; 4925 } 4926 4927 free_insns(file); 4928 4929 if (opts.stats) { 4930 printf("nr_insns_visited: %ld\n", nr_insns_visited); 4931 printf("nr_cfi: %ld\n", nr_cfi); 4932 printf("nr_cfi_reused: %ld\n", nr_cfi_reused); 4933 printf("nr_cfi_cache: %ld\n", nr_cfi_cache); 4934 } 4935 4936 out: 4937 if (!ret && !warnings) 4938 return 0; 4939 4940 if (opts.werror && warnings) 4941 ret = 1; 4942 4943 if (opts.verbose) { 4944 if (opts.werror && warnings) 4945 WARN("%d warning(s) upgraded to errors", warnings); 4946 disas_warned_funcs(file); 4947 } 4948 4949 if (opts.backup && make_backup()) 4950 return 1; 4951 4952 return ret; 4953 } 4954