1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #define _GNU_SOURCE /* memmem() */ 7 #include <fnmatch.h> 8 #include <string.h> 9 #include <stdlib.h> 10 #include <inttypes.h> 11 #include <sys/mman.h> 12 13 #include <objtool/builtin.h> 14 #include <objtool/cfi.h> 15 #include <objtool/arch.h> 16 #include <objtool/disas.h> 17 #include <objtool/check.h> 18 #include <objtool/special.h> 19 #include <objtool/trace.h> 20 #include <objtool/warn.h> 21 #include <objtool/checksum.h> 22 #include <objtool/util.h> 23 24 #include <linux/objtool_types.h> 25 #include <linux/hashtable.h> 26 #include <linux/kernel.h> 27 #include <linux/static_call_types.h> 28 #include <linux/string.h> 29 30 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; 31 32 static struct cfi_init_state initial_func_cfi; 33 static struct cfi_state init_cfi; 34 static struct cfi_state func_cfi; 35 static struct cfi_state force_undefined_cfi; 36 37 struct disas_context *objtool_disas_ctx; 38 39 size_t sym_name_max_len; 40 41 struct instruction *find_insn(struct objtool_file *file, 42 struct section *sec, unsigned long offset) 43 { 44 struct instruction *insn; 45 46 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 47 if (insn->sec == sec && insn->offset == offset) 48 return insn; 49 } 50 51 return NULL; 52 } 53 54 struct instruction *next_insn_same_sec(struct objtool_file *file, 55 struct instruction *insn) 56 { 57 if (insn->idx == INSN_CHUNK_MAX) 58 return find_insn(file, insn->sec, insn->offset + insn->len); 59 60 insn++; 61 if (!insn->len) 62 return NULL; 63 64 return insn; 65 } 66 67 static struct instruction *next_insn_same_func(struct objtool_file *file, 68 struct instruction *insn) 69 { 70 struct instruction *next = next_insn_same_sec(file, insn); 71 struct symbol *func = insn_func(insn); 72 73 if (!func) 74 return NULL; 75 76 if (next && insn_func(next) == func) 77 return next; 78 79 /* Check if we're already in the subfunction: */ 80 if (func == func->cfunc) 81 return NULL; 82 83 /* Move to the subfunction: */ 84 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 85 } 86 87 static struct instruction *prev_insn_same_sec(struct objtool_file *file, 88 struct instruction *insn) 89 { 90 if (insn->idx == 0) { 91 if (insn->prev_len) 92 return find_insn(file, insn->sec, insn->offset - insn->prev_len); 93 return NULL; 94 } 95 96 return insn - 1; 97 } 98 99 static struct instruction *prev_insn_same_sym(struct objtool_file *file, 100 struct instruction *insn) 101 { 102 struct instruction *prev = prev_insn_same_sec(file, insn); 103 104 if (prev && insn_func(prev) == insn_func(insn)) 105 return prev; 106 107 return NULL; 108 } 109 110 #define for_each_insn(file, insn) \ 111 for (struct section *__sec, *__fake = (struct section *)1; \ 112 __fake; __fake = NULL) \ 113 for_each_sec(file->elf, __sec) \ 114 sec_for_each_insn(file, __sec, insn) 115 116 #define func_for_each_insn(file, func, insn) \ 117 for (insn = find_insn(file, func->sec, func->offset); \ 118 insn; \ 119 insn = next_insn_same_func(file, insn)) 120 121 #define sym_for_each_insn(file, sym, insn) \ 122 for (insn = find_insn(file, sym->sec, sym->offset); \ 123 insn && insn->offset < sym->offset + sym->len; \ 124 insn = next_insn_same_sec(file, insn)) 125 126 #define sym_for_each_insn_continue_reverse(file, sym, insn) \ 127 for (insn = prev_insn_same_sec(file, insn); \ 128 insn && insn->offset >= sym->offset; \ 129 insn = prev_insn_same_sec(file, insn)) 130 131 #define sec_for_each_insn_from(file, insn) \ 132 for (; insn; insn = next_insn_same_sec(file, insn)) 133 134 #define sec_for_each_insn_continue(file, insn) \ 135 for (insn = next_insn_same_sec(file, insn); insn; \ 136 insn = next_insn_same_sec(file, insn)) 137 138 static inline struct reloc *insn_jump_table(struct instruction *insn) 139 { 140 if (insn->type == INSN_JUMP_DYNAMIC || 141 insn->type == INSN_CALL_DYNAMIC) 142 return insn->_jump_table; 143 144 return NULL; 145 } 146 147 static inline unsigned long insn_jump_table_size(struct instruction *insn) 148 { 149 if (insn->type == INSN_JUMP_DYNAMIC || 150 insn->type == INSN_CALL_DYNAMIC) 151 return insn->_jump_table_size; 152 153 return 0; 154 } 155 156 static bool is_jump_table_jump(struct instruction *insn) 157 { 158 struct alt_group *alt_group = insn->alt_group; 159 160 if (insn_jump_table(insn)) 161 return true; 162 163 /* Retpoline alternative for a jump table? */ 164 return alt_group && alt_group->orig_group && 165 insn_jump_table(alt_group->orig_group->first_insn); 166 } 167 168 static bool is_sibling_call(struct instruction *insn) 169 { 170 /* 171 * Assume only STT_FUNC calls have jump-tables. 172 */ 173 if (insn_func(insn)) { 174 /* An indirect jump is either a sibling call or a jump to a table. */ 175 if (insn->type == INSN_JUMP_DYNAMIC) 176 return !is_jump_table_jump(insn); 177 } 178 179 /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */ 180 return (is_static_jump(insn) && insn_call_dest(insn)); 181 } 182 183 /* 184 * Checks if a function is a Rust "noreturn" one. 185 */ 186 static bool is_rust_noreturn(const struct symbol *func) 187 { 188 /* 189 * If it does not start with "_R", then it is not a Rust symbol. 190 */ 191 if (strncmp(func->name, "_R", 2)) 192 return false; 193 194 /* 195 * These are just heuristics -- we do not control the precise symbol 196 * name, due to the crate disambiguators (which depend on the compiler) 197 * as well as changes to the source code itself between versions (since 198 * these come from the Rust standard library). 199 */ 200 return str_ends_with(func->name, "_4core3num22from_ascii_radix_panic") || 201 str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") || 202 str_ends_with(func->name, "_4core6option13expect_failed") || 203 str_ends_with(func->name, "_4core6option13unwrap_failed") || 204 str_ends_with(func->name, "_4core6result13unwrap_failed") || 205 str_ends_with(func->name, "_4core9panicking5panic") || 206 str_ends_with(func->name, "_4core9panicking9panic_fmt") || 207 str_ends_with(func->name, "_4core9panicking14panic_explicit") || 208 str_ends_with(func->name, "_4core9panicking14panic_nounwind") || 209 str_ends_with(func->name, "_4core9panicking18panic_bounds_check") || 210 str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt") || 211 str_ends_with(func->name, "_4core9panicking19assert_failed_inner") || 212 str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") || 213 str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") || 214 str_ends_with(func->name, "_7___rustc17rust_begin_unwind") || 215 strstr(func->name, "_4core9panicking13assert_failed") || 216 strstr(func->name, "_4core9panicking11panic_const24panic_const_") || 217 (strstr(func->name, "_4core5slice5index") && 218 strstr(func->name, "slice_") && 219 str_ends_with(func->name, "_fail")); 220 } 221 222 /* 223 * This checks to see if the given function is a "noreturn" function. 224 * 225 * For global functions which are outside the scope of this object file, we 226 * have to keep a manual list of them. 227 * 228 * For local functions, we have to detect them manually by simply looking for 229 * the lack of a return instruction. 230 */ 231 static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 232 int recursion) 233 { 234 int i; 235 struct instruction *insn; 236 bool empty = true; 237 238 #define NORETURN(func) __stringify(func), 239 static const char * const global_noreturns[] = { 240 #include "noreturns.h" 241 }; 242 #undef NORETURN 243 244 if (!func) 245 return false; 246 247 if (!is_local_sym(func)) { 248 if (is_rust_noreturn(func)) 249 return true; 250 251 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 252 if (!strcmp(func->name, global_noreturns[i])) 253 return true; 254 } 255 256 if (is_weak_sym(func)) 257 return false; 258 259 if (!func->len) 260 return false; 261 262 insn = find_insn(file, func->sec, func->offset); 263 if (!insn || !insn_func(insn)) 264 return false; 265 266 func_for_each_insn(file, func, insn) { 267 empty = false; 268 269 if (insn->type == INSN_RETURN) 270 return false; 271 } 272 273 if (empty) 274 return false; 275 276 /* 277 * A function can have a sibling call instead of a return. In that 278 * case, the function's dead-end status depends on whether the target 279 * of the sibling call returns. 280 */ 281 func_for_each_insn(file, func, insn) { 282 if (is_sibling_call(insn)) { 283 struct instruction *dest = insn->jump_dest; 284 285 if (!dest) 286 /* sibling call to another file */ 287 return false; 288 289 /* local sibling call */ 290 if (recursion == 5) { 291 /* 292 * Infinite recursion: two functions have 293 * sibling calls to each other. This is a very 294 * rare case. It means they aren't dead ends. 295 */ 296 return false; 297 } 298 299 return __dead_end_function(file, insn_func(dest), recursion+1); 300 } 301 } 302 303 return true; 304 } 305 306 static bool dead_end_function(struct objtool_file *file, struct symbol *func) 307 { 308 return __dead_end_function(file, func, 0); 309 } 310 311 static void init_cfi_state(struct cfi_state *cfi) 312 { 313 int i; 314 315 for (i = 0; i < CFI_NUM_REGS; i++) { 316 cfi->regs[i].base = CFI_UNDEFINED; 317 cfi->vals[i].base = CFI_UNDEFINED; 318 } 319 cfi->cfa.base = CFI_UNDEFINED; 320 cfi->drap_reg = CFI_UNDEFINED; 321 cfi->drap_offset = -1; 322 } 323 324 static void init_insn_state(struct objtool_file *file, struct insn_state *state, 325 struct section *sec) 326 { 327 memset(state, 0, sizeof(*state)); 328 init_cfi_state(&state->cfi); 329 330 if (opts.noinstr && sec) 331 state->noinstr = sec->noinstr; 332 } 333 334 static struct cfi_state *cfi_alloc(void) 335 { 336 struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state)); 337 if (!cfi) { 338 ERROR_GLIBC("calloc"); 339 exit(1); 340 } 341 nr_cfi++; 342 return cfi; 343 } 344 345 static int cfi_bits; 346 static struct hlist_head *cfi_hash; 347 348 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) 349 { 350 return memcmp((void *)cfi1 + sizeof(cfi1->hash), 351 (void *)cfi2 + sizeof(cfi2->hash), 352 sizeof(struct cfi_state) - sizeof(struct hlist_node)); 353 } 354 355 static inline u32 cfi_key(struct cfi_state *cfi) 356 { 357 return jhash((void *)cfi + sizeof(cfi->hash), 358 sizeof(*cfi) - sizeof(cfi->hash), 0); 359 } 360 361 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) 362 { 363 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 364 struct cfi_state *obj; 365 366 hlist_for_each_entry(obj, head, hash) { 367 if (!cficmp(cfi, obj)) { 368 nr_cfi_cache++; 369 return obj; 370 } 371 } 372 373 obj = cfi_alloc(); 374 *obj = *cfi; 375 hlist_add_head(&obj->hash, head); 376 377 return obj; 378 } 379 380 static void cfi_hash_add(struct cfi_state *cfi) 381 { 382 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 383 384 hlist_add_head(&cfi->hash, head); 385 } 386 387 static void *cfi_hash_alloc(unsigned long size) 388 { 389 cfi_bits = max(10, ilog2(size)); 390 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, 391 PROT_READ|PROT_WRITE, 392 MAP_PRIVATE|MAP_ANON, -1, 0); 393 if (cfi_hash == (void *)-1L) { 394 ERROR_GLIBC("mmap fail cfi_hash"); 395 cfi_hash = NULL; 396 } else if (opts.stats) { 397 printf("cfi_bits: %d\n", cfi_bits); 398 } 399 400 return cfi_hash; 401 } 402 403 static unsigned long nr_insns; 404 static unsigned long nr_insns_visited; 405 406 /* 407 * Call the arch-specific instruction decoder for all the instructions and add 408 * them to the global instruction list. 409 */ 410 static int decode_instructions(struct objtool_file *file) 411 { 412 struct section *sec; 413 struct symbol *func; 414 unsigned long offset; 415 struct instruction *insn; 416 417 for_each_sec(file->elf, sec) { 418 struct instruction *insns = NULL; 419 u8 prev_len = 0; 420 u8 idx = 0; 421 422 if (!is_text_sec(sec)) 423 continue; 424 425 if (strcmp(sec->name, ".altinstr_replacement") && 426 strcmp(sec->name, ".altinstr_aux") && 427 strncmp(sec->name, ".discard.", 9)) 428 sec->text = true; 429 430 if (!strcmp(sec->name, ".noinstr.text") || 431 !strcmp(sec->name, ".entry.text") || 432 !strcmp(sec->name, ".cpuidle.text") || 433 !strncmp(sec->name, ".text..__x86.", 13)) 434 sec->noinstr = true; 435 436 /* 437 * .init.text code is ran before userspace and thus doesn't 438 * strictly need retpolines, except for modules which are 439 * loaded late, they very much do need retpoline in their 440 * .init.text 441 */ 442 if (!strcmp(sec->name, ".init.text") && !opts.module) 443 sec->init = true; 444 445 for (offset = 0; offset < sec_size(sec); offset += insn->len) { 446 if (!insns || idx == INSN_CHUNK_MAX) { 447 insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn)); 448 if (!insns) { 449 ERROR_GLIBC("calloc"); 450 return -1; 451 } 452 idx = 0; 453 } else { 454 idx++; 455 } 456 insn = &insns[idx]; 457 insn->idx = idx; 458 459 INIT_LIST_HEAD(&insn->call_node); 460 insn->sec = sec; 461 insn->offset = offset; 462 insn->prev_len = prev_len; 463 464 if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn)) 465 return -1; 466 467 prev_len = insn->len; 468 469 /* 470 * By default, "ud2" is a dead end unless otherwise 471 * annotated, because GCC 7 inserts it for certain 472 * divide-by-zero cases. 473 */ 474 if (insn->type == INSN_BUG) 475 insn->dead_end = true; 476 477 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 478 nr_insns++; 479 } 480 481 sec_for_each_sym(sec, func) { 482 if (!is_notype_sym(func) && !is_func_sym(func)) 483 continue; 484 485 if (func->offset == sec_size(sec)) { 486 /* Heuristic: likely an "end" symbol */ 487 if (is_notype_sym(func)) 488 continue; 489 ERROR("%s(): STT_FUNC at end of section", func->name); 490 return -1; 491 } 492 493 if (func->embedded_insn || func->alias != func) 494 continue; 495 496 if (!find_insn(file, sec, func->offset)) { 497 ERROR("%s(): can't find starting instruction", func->name); 498 return -1; 499 } 500 501 sym_for_each_insn(file, func, insn) { 502 insn->sym = func; 503 if (is_func_sym(func) && 504 insn->type == INSN_ENDBR && 505 list_empty(&insn->call_node)) { 506 if (insn->offset == func->offset) { 507 list_add_tail(&insn->call_node, &file->endbr_list); 508 file->nr_endbr++; 509 } else { 510 file->nr_endbr_int++; 511 } 512 } 513 } 514 } 515 } 516 517 if (opts.stats) 518 printf("nr_insns: %lu\n", nr_insns); 519 520 return 0; 521 } 522 523 /* 524 * Known pv_ops*[] arrays. 525 */ 526 static struct { 527 const char *name; 528 int idx_off; 529 } pv_ops_tables[] = { 530 { .name = "pv_ops", }, 531 { .name = "pv_ops_lock", }, 532 { .name = NULL, .idx_off = -1 } 533 }; 534 535 /* 536 * Get index offset for a pv_ops* array. 537 */ 538 int pv_ops_idx_off(const char *symname) 539 { 540 int idx; 541 542 for (idx = 0; pv_ops_tables[idx].name; idx++) { 543 if (!strcmp(symname, pv_ops_tables[idx].name)) 544 break; 545 } 546 547 return pv_ops_tables[idx].idx_off; 548 } 549 550 /* 551 * Read a pv_ops*[] .data table to find the static initialized values. 552 */ 553 static int add_pv_ops(struct objtool_file *file, int pv_ops_idx) 554 { 555 struct symbol *sym, *func; 556 unsigned long off, end; 557 struct reloc *reloc; 558 int idx, idx_off; 559 const char *symname; 560 561 symname = pv_ops_tables[pv_ops_idx].name; 562 sym = find_symbol_by_name(file->elf, symname); 563 if (!sym) { 564 ERROR("Unknown pv_ops array %s", symname); 565 return -1; 566 } 567 568 off = sym->offset; 569 end = off + sym->len; 570 idx_off = pv_ops_tables[pv_ops_idx].idx_off; 571 if (idx_off < 0) { 572 ERROR("pv_ops array %s has unknown index offset", symname); 573 return -1; 574 } 575 576 for (;;) { 577 reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off); 578 if (!reloc) 579 break; 580 581 idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long); 582 583 func = reloc->sym; 584 if (is_sec_sym(func)) 585 func = find_symbol_by_offset(reloc->sym->sec, 586 reloc_addend(reloc)); 587 if (!func) { 588 ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc), 589 "can't find func at %s[%d]", symname, idx); 590 return -1; 591 } 592 593 if (objtool_pv_add(file, idx + idx_off, func)) 594 return -1; 595 596 off = reloc_offset(reloc) + 1; 597 if (off > end) 598 break; 599 } 600 601 return 0; 602 } 603 604 /* 605 * Allocate and initialize file->pv_ops[]. 606 */ 607 static int init_pv_ops(struct objtool_file *file) 608 { 609 struct symbol *sym; 610 int idx, nr; 611 612 if (!opts.noinstr) 613 return 0; 614 615 file->pv_ops = NULL; 616 617 nr = 0; 618 for (idx = 0; pv_ops_tables[idx].name; idx++) { 619 sym = find_symbol_by_name(file->elf, pv_ops_tables[idx].name); 620 if (!sym) { 621 pv_ops_tables[idx].idx_off = -1; 622 continue; 623 } 624 pv_ops_tables[idx].idx_off = nr; 625 nr += sym->len / sizeof(unsigned long); 626 } 627 628 if (nr == 0) 629 return 0; 630 631 file->pv_ops = calloc(nr, sizeof(struct pv_state)); 632 if (!file->pv_ops) { 633 ERROR_GLIBC("calloc"); 634 return -1; 635 } 636 637 for (idx = 0; idx < nr; idx++) 638 INIT_LIST_HEAD(&file->pv_ops[idx].targets); 639 640 for (idx = 0; pv_ops_tables[idx].name; idx++) { 641 if (pv_ops_tables[idx].idx_off < 0) 642 continue; 643 if (add_pv_ops(file, idx)) 644 return -1; 645 } 646 647 return 0; 648 } 649 650 static bool is_livepatch_module(struct objtool_file *file) 651 { 652 struct section *sec; 653 654 if (!opts.module) 655 return false; 656 657 sec = find_section_by_name(file->elf, ".modinfo"); 658 if (!sec) 659 return false; 660 661 return memmem(sec->data->d_buf, sec_size(sec), "\0livepatch=Y", 12); 662 } 663 664 static int create_static_call_sections(struct objtool_file *file) 665 { 666 struct static_call_site *site; 667 struct section *sec; 668 struct instruction *insn; 669 struct symbol *key_sym; 670 char *key_name, *tmp; 671 int idx; 672 673 sec = find_section_by_name(file->elf, ".static_call_sites"); 674 if (sec) { 675 /* 676 * Livepatch modules may have already extracted the static call 677 * site entries to take advantage of vmlinux static call 678 * privileges. 679 */ 680 if (!file->klp) 681 WARN("file already has .static_call_sites section, skipping"); 682 683 return 0; 684 } 685 686 if (list_empty(&file->static_call_list)) 687 return 0; 688 689 idx = 0; 690 list_for_each_entry(insn, &file->static_call_list, call_node) 691 idx++; 692 693 sec = elf_create_section_pair(file->elf, ".static_call_sites", 694 sizeof(*site), idx, idx * 2); 695 if (!sec) 696 return -1; 697 698 /* Allow modules to modify the low bits of static_call_site::key */ 699 sec->sh.sh_flags |= SHF_WRITE; 700 701 idx = 0; 702 list_for_each_entry(insn, &file->static_call_list, call_node) { 703 704 /* populate reloc for 'addr' */ 705 if (!elf_init_reloc_text_sym(file->elf, sec, 706 idx * sizeof(*site), idx * 2, 707 insn->sec, insn->offset)) 708 return -1; 709 710 /* find key symbol */ 711 key_name = strdup(insn_call_dest(insn)->name); 712 if (!key_name) { 713 ERROR_GLIBC("strdup"); 714 return -1; 715 } 716 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 717 STATIC_CALL_TRAMP_PREFIX_LEN)) { 718 ERROR("static_call: trampoline name malformed: %s", key_name); 719 return -1; 720 } 721 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 722 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 723 724 key_sym = find_symbol_by_name(file->elf, tmp); 725 if (!key_sym) { 726 if (!opts.module) { 727 ERROR("static_call: can't find static_call_key symbol: %s", tmp); 728 return -1; 729 } 730 731 /* 732 * For modules(), the key might not be exported, which 733 * means the module can make static calls but isn't 734 * allowed to change them. 735 * 736 * In that case we temporarily set the key to be the 737 * trampoline address. This is fixed up in 738 * static_call_add_module(). 739 */ 740 key_sym = insn_call_dest(insn); 741 } 742 743 /* populate reloc for 'key' */ 744 if (!elf_init_reloc_data_sym(file->elf, sec, 745 idx * sizeof(*site) + 4, 746 (idx * 2) + 1, key_sym, 747 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 748 return -1; 749 750 idx++; 751 } 752 753 return 0; 754 } 755 756 static int create_retpoline_sites_sections(struct objtool_file *file) 757 { 758 struct instruction *insn; 759 struct section *sec; 760 int idx; 761 762 sec = find_section_by_name(file->elf, ".retpoline_sites"); 763 if (sec) { 764 WARN("file already has .retpoline_sites, skipping"); 765 return 0; 766 } 767 768 idx = 0; 769 list_for_each_entry(insn, &file->retpoline_call_list, call_node) 770 idx++; 771 772 if (!idx) 773 return 0; 774 775 sec = elf_create_section_pair(file->elf, ".retpoline_sites", 776 sizeof(int), idx, idx); 777 if (!sec) 778 return -1; 779 780 idx = 0; 781 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 782 783 if (!elf_init_reloc_text_sym(file->elf, sec, 784 idx * sizeof(int), idx, 785 insn->sec, insn->offset)) 786 return -1; 787 788 idx++; 789 } 790 791 return 0; 792 } 793 794 static int create_return_sites_sections(struct objtool_file *file) 795 { 796 struct instruction *insn; 797 struct section *sec; 798 int idx; 799 800 sec = find_section_by_name(file->elf, ".return_sites"); 801 if (sec) { 802 WARN("file already has .return_sites, skipping"); 803 return 0; 804 } 805 806 idx = 0; 807 list_for_each_entry(insn, &file->return_thunk_list, call_node) 808 idx++; 809 810 if (!idx) 811 return 0; 812 813 sec = elf_create_section_pair(file->elf, ".return_sites", 814 sizeof(int), idx, idx); 815 if (!sec) 816 return -1; 817 818 idx = 0; 819 list_for_each_entry(insn, &file->return_thunk_list, call_node) { 820 821 if (!elf_init_reloc_text_sym(file->elf, sec, 822 idx * sizeof(int), idx, 823 insn->sec, insn->offset)) 824 return -1; 825 826 idx++; 827 } 828 829 return 0; 830 } 831 832 static int create_ibt_endbr_seal_sections(struct objtool_file *file) 833 { 834 struct instruction *insn; 835 struct section *sec; 836 int idx; 837 838 sec = find_section_by_name(file->elf, ".ibt_endbr_seal"); 839 if (sec) { 840 WARN("file already has .ibt_endbr_seal, skipping"); 841 return 0; 842 } 843 844 idx = 0; 845 list_for_each_entry(insn, &file->endbr_list, call_node) 846 idx++; 847 848 if (opts.stats) { 849 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr); 850 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int); 851 printf("ibt: superfluous ENDBR: %d\n", idx); 852 } 853 854 if (!idx) 855 return 0; 856 857 sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal", 858 sizeof(int), idx, idx); 859 if (!sec) 860 return -1; 861 862 idx = 0; 863 list_for_each_entry(insn, &file->endbr_list, call_node) { 864 865 int *site = (int *)sec->data->d_buf + idx; 866 struct symbol *sym = insn->sym; 867 *site = 0; 868 869 if (opts.module && sym && is_func_sym(sym) && 870 insn->offset == sym->offset && 871 (!strcmp(sym->name, "init_module") || 872 !strcmp(sym->name, "cleanup_module"))) { 873 ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead", 874 sym->name); 875 return -1; 876 } 877 878 if (!elf_init_reloc_text_sym(file->elf, sec, 879 idx * sizeof(int), idx, 880 insn->sec, insn->offset)) 881 return -1; 882 883 idx++; 884 } 885 886 return 0; 887 } 888 889 static int create_cfi_sections(struct objtool_file *file) 890 { 891 struct section *sec; 892 struct symbol *sym; 893 int idx; 894 895 sec = find_section_by_name(file->elf, ".cfi_sites"); 896 if (sec) { 897 WARN("file already has .cfi_sites section, skipping"); 898 return 0; 899 } 900 901 idx = 0; 902 for_each_sym(file->elf, sym) { 903 if (!is_func_sym(sym)) 904 continue; 905 906 if (strncmp(sym->name, "__cfi_", 6)) 907 continue; 908 909 idx++; 910 } 911 912 sec = elf_create_section_pair(file->elf, ".cfi_sites", 913 sizeof(unsigned int), idx, idx); 914 if (!sec) 915 return -1; 916 917 idx = 0; 918 for_each_sym(file->elf, sym) { 919 if (!is_func_sym(sym)) 920 continue; 921 922 if (strncmp(sym->name, "__cfi_", 6)) 923 continue; 924 925 if (!elf_init_reloc_text_sym(file->elf, sec, 926 idx * sizeof(unsigned int), idx, 927 sym->sec, sym->offset)) 928 return -1; 929 930 idx++; 931 } 932 933 return 0; 934 } 935 936 static int create_mcount_loc_sections(struct objtool_file *file) 937 { 938 size_t addr_size = elf_addr_size(file->elf); 939 struct instruction *insn; 940 struct section *sec; 941 int idx; 942 943 sec = find_section_by_name(file->elf, "__mcount_loc"); 944 if (sec) { 945 /* 946 * Livepatch modules have already extracted their __mcount_loc 947 * entries to cover the !CONFIG_FTRACE_MCOUNT_USE_OBJTOOL case. 948 */ 949 if (!file->klp) 950 WARN("file already has __mcount_loc section, skipping"); 951 952 return 0; 953 } 954 955 if (list_empty(&file->mcount_loc_list)) 956 return 0; 957 958 idx = 0; 959 list_for_each_entry(insn, &file->mcount_loc_list, call_node) 960 idx++; 961 962 sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size, 963 idx, idx); 964 if (!sec) 965 return -1; 966 967 sec->sh.sh_addralign = addr_size; 968 969 idx = 0; 970 list_for_each_entry(insn, &file->mcount_loc_list, call_node) { 971 972 struct reloc *reloc; 973 974 reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx, 975 insn->sec, insn->offset); 976 if (!reloc) 977 return -1; 978 979 set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32); 980 981 idx++; 982 } 983 984 return 0; 985 } 986 987 static int create_direct_call_sections(struct objtool_file *file) 988 { 989 struct instruction *insn; 990 struct section *sec; 991 int idx; 992 993 sec = find_section_by_name(file->elf, ".call_sites"); 994 if (sec) { 995 WARN("file already has .call_sites section, skipping"); 996 return 0; 997 } 998 999 if (list_empty(&file->call_list)) 1000 return 0; 1001 1002 idx = 0; 1003 list_for_each_entry(insn, &file->call_list, call_node) 1004 idx++; 1005 1006 sec = elf_create_section_pair(file->elf, ".call_sites", 1007 sizeof(unsigned int), idx, idx); 1008 if (!sec) 1009 return -1; 1010 1011 idx = 0; 1012 list_for_each_entry(insn, &file->call_list, call_node) { 1013 1014 if (!elf_init_reloc_text_sym(file->elf, sec, 1015 idx * sizeof(unsigned int), idx, 1016 insn->sec, insn->offset)) 1017 return -1; 1018 1019 idx++; 1020 } 1021 1022 return 0; 1023 } 1024 1025 #ifdef BUILD_KLP 1026 static int create_sym_checksum_section(struct objtool_file *file) 1027 { 1028 struct section *sec; 1029 struct symbol *sym; 1030 unsigned int idx = 0; 1031 struct sym_checksum *checksum; 1032 size_t entsize = sizeof(struct sym_checksum); 1033 1034 sec = find_section_by_name(file->elf, ".discard.sym_checksum"); 1035 if (sec) { 1036 if (!opts.dryrun) 1037 WARN("file already has .discard.sym_checksum section, skipping"); 1038 1039 return 0; 1040 } 1041 1042 for_each_sym(file->elf, sym) 1043 if (sym->csum.checksum) 1044 idx++; 1045 1046 if (!idx) 1047 return 0; 1048 1049 sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize, 1050 idx, idx); 1051 if (!sec) 1052 return -1; 1053 1054 idx = 0; 1055 for_each_sym(file->elf, sym) { 1056 if (!sym->csum.checksum) 1057 continue; 1058 1059 if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize, 1060 sym, 0, R_TEXT64)) 1061 return -1; 1062 1063 checksum = (struct sym_checksum *)sec->data->d_buf + idx; 1064 checksum->addr = 0; /* reloc */ 1065 checksum->checksum = sym->csum.checksum; 1066 1067 mark_sec_changed(file->elf, sec, true); 1068 1069 idx++; 1070 } 1071 1072 return 0; 1073 } 1074 #else 1075 static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; } 1076 #endif 1077 1078 /* 1079 * Warnings shouldn't be reported for ignored functions. 1080 */ 1081 static int add_ignores(struct objtool_file *file) 1082 { 1083 struct section *rsec; 1084 struct symbol *func; 1085 struct reloc *reloc; 1086 1087 rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 1088 if (!rsec) 1089 return 0; 1090 1091 for_each_reloc(rsec, reloc) { 1092 switch (reloc->sym->type) { 1093 case STT_FUNC: 1094 func = reloc->sym; 1095 break; 1096 1097 case STT_SECTION: 1098 func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc)); 1099 if (!func) 1100 continue; 1101 break; 1102 1103 default: 1104 ERROR("unexpected relocation symbol type in %s: %d", 1105 rsec->name, reloc->sym->type); 1106 return -1; 1107 } 1108 1109 func->ignore = true; 1110 if (func->cfunc) 1111 func->cfunc->ignore = true; 1112 } 1113 1114 return 0; 1115 } 1116 1117 /* 1118 * This is a whitelist of functions that is allowed to be called with AC set. 1119 * The list is meant to be minimal and only contains compiler instrumentation 1120 * ABI and a few functions used to implement *_{to,from}_user() functions. 1121 * 1122 * These functions must not directly change AC, but may PUSHF/POPF. 1123 */ 1124 static const char *uaccess_safe_builtin[] = { 1125 /* KASAN */ 1126 "kasan_report", 1127 "kasan_check_range", 1128 /* KASAN out-of-line */ 1129 "__asan_loadN_noabort", 1130 "__asan_load1_noabort", 1131 "__asan_load2_noabort", 1132 "__asan_load4_noabort", 1133 "__asan_load8_noabort", 1134 "__asan_load16_noabort", 1135 "__asan_storeN_noabort", 1136 "__asan_store1_noabort", 1137 "__asan_store2_noabort", 1138 "__asan_store4_noabort", 1139 "__asan_store8_noabort", 1140 "__asan_store16_noabort", 1141 "__kasan_check_read", 1142 "__kasan_check_write", 1143 /* KASAN in-line */ 1144 "__asan_report_load_n_noabort", 1145 "__asan_report_load1_noabort", 1146 "__asan_report_load2_noabort", 1147 "__asan_report_load4_noabort", 1148 "__asan_report_load8_noabort", 1149 "__asan_report_load16_noabort", 1150 "__asan_report_store_n_noabort", 1151 "__asan_report_store1_noabort", 1152 "__asan_report_store2_noabort", 1153 "__asan_report_store4_noabort", 1154 "__asan_report_store8_noabort", 1155 "__asan_report_store16_noabort", 1156 /* KCSAN */ 1157 "__kcsan_check_access", 1158 "__kcsan_mb", 1159 "__kcsan_wmb", 1160 "__kcsan_rmb", 1161 "__kcsan_release", 1162 "kcsan_found_watchpoint", 1163 "kcsan_setup_watchpoint", 1164 "kcsan_check_scoped_accesses", 1165 "kcsan_disable_current", 1166 "kcsan_enable_current_nowarn", 1167 /* KCSAN/TSAN */ 1168 "__tsan_func_entry", 1169 "__tsan_func_exit", 1170 "__tsan_read_range", 1171 "__tsan_write_range", 1172 "__tsan_read1", 1173 "__tsan_read2", 1174 "__tsan_read4", 1175 "__tsan_read8", 1176 "__tsan_read16", 1177 "__tsan_write1", 1178 "__tsan_write2", 1179 "__tsan_write4", 1180 "__tsan_write8", 1181 "__tsan_write16", 1182 "__tsan_read_write1", 1183 "__tsan_read_write2", 1184 "__tsan_read_write4", 1185 "__tsan_read_write8", 1186 "__tsan_read_write16", 1187 "__tsan_volatile_read1", 1188 "__tsan_volatile_read2", 1189 "__tsan_volatile_read4", 1190 "__tsan_volatile_read8", 1191 "__tsan_volatile_read16", 1192 "__tsan_volatile_write1", 1193 "__tsan_volatile_write2", 1194 "__tsan_volatile_write4", 1195 "__tsan_volatile_write8", 1196 "__tsan_volatile_write16", 1197 "__tsan_atomic8_load", 1198 "__tsan_atomic16_load", 1199 "__tsan_atomic32_load", 1200 "__tsan_atomic64_load", 1201 "__tsan_atomic8_store", 1202 "__tsan_atomic16_store", 1203 "__tsan_atomic32_store", 1204 "__tsan_atomic64_store", 1205 "__tsan_atomic8_exchange", 1206 "__tsan_atomic16_exchange", 1207 "__tsan_atomic32_exchange", 1208 "__tsan_atomic64_exchange", 1209 "__tsan_atomic8_fetch_add", 1210 "__tsan_atomic16_fetch_add", 1211 "__tsan_atomic32_fetch_add", 1212 "__tsan_atomic64_fetch_add", 1213 "__tsan_atomic8_fetch_sub", 1214 "__tsan_atomic16_fetch_sub", 1215 "__tsan_atomic32_fetch_sub", 1216 "__tsan_atomic64_fetch_sub", 1217 "__tsan_atomic8_fetch_and", 1218 "__tsan_atomic16_fetch_and", 1219 "__tsan_atomic32_fetch_and", 1220 "__tsan_atomic64_fetch_and", 1221 "__tsan_atomic8_fetch_or", 1222 "__tsan_atomic16_fetch_or", 1223 "__tsan_atomic32_fetch_or", 1224 "__tsan_atomic64_fetch_or", 1225 "__tsan_atomic8_fetch_xor", 1226 "__tsan_atomic16_fetch_xor", 1227 "__tsan_atomic32_fetch_xor", 1228 "__tsan_atomic64_fetch_xor", 1229 "__tsan_atomic8_fetch_nand", 1230 "__tsan_atomic16_fetch_nand", 1231 "__tsan_atomic32_fetch_nand", 1232 "__tsan_atomic64_fetch_nand", 1233 "__tsan_atomic8_compare_exchange_strong", 1234 "__tsan_atomic16_compare_exchange_strong", 1235 "__tsan_atomic32_compare_exchange_strong", 1236 "__tsan_atomic64_compare_exchange_strong", 1237 "__tsan_atomic8_compare_exchange_weak", 1238 "__tsan_atomic16_compare_exchange_weak", 1239 "__tsan_atomic32_compare_exchange_weak", 1240 "__tsan_atomic64_compare_exchange_weak", 1241 "__tsan_atomic8_compare_exchange_val", 1242 "__tsan_atomic16_compare_exchange_val", 1243 "__tsan_atomic32_compare_exchange_val", 1244 "__tsan_atomic64_compare_exchange_val", 1245 "__tsan_atomic_thread_fence", 1246 "__tsan_atomic_signal_fence", 1247 "__tsan_unaligned_read16", 1248 "__tsan_unaligned_write16", 1249 /* KCOV */ 1250 "write_comp_data", 1251 "check_kcov_mode", 1252 "__sanitizer_cov_trace_pc", 1253 "__sanitizer_cov_trace_const_cmp1", 1254 "__sanitizer_cov_trace_const_cmp2", 1255 "__sanitizer_cov_trace_const_cmp4", 1256 "__sanitizer_cov_trace_const_cmp8", 1257 "__sanitizer_cov_trace_cmp1", 1258 "__sanitizer_cov_trace_cmp2", 1259 "__sanitizer_cov_trace_cmp4", 1260 "__sanitizer_cov_trace_cmp8", 1261 "__sanitizer_cov_trace_switch", 1262 /* KMSAN */ 1263 "kmsan_copy_to_user", 1264 "kmsan_disable_current", 1265 "kmsan_enable_current", 1266 "kmsan_report", 1267 "kmsan_unpoison_entry_regs", 1268 "kmsan_unpoison_memory", 1269 "__msan_chain_origin", 1270 "__msan_get_context_state", 1271 "__msan_instrument_asm_store", 1272 "__msan_metadata_ptr_for_load_1", 1273 "__msan_metadata_ptr_for_load_2", 1274 "__msan_metadata_ptr_for_load_4", 1275 "__msan_metadata_ptr_for_load_8", 1276 "__msan_metadata_ptr_for_load_n", 1277 "__msan_metadata_ptr_for_store_1", 1278 "__msan_metadata_ptr_for_store_2", 1279 "__msan_metadata_ptr_for_store_4", 1280 "__msan_metadata_ptr_for_store_8", 1281 "__msan_metadata_ptr_for_store_n", 1282 "__msan_poison_alloca", 1283 "__msan_warning", 1284 /* UBSAN */ 1285 "ubsan_type_mismatch_common", 1286 "__ubsan_handle_type_mismatch", 1287 "__ubsan_handle_type_mismatch_v1", 1288 "__ubsan_handle_shift_out_of_bounds", 1289 "__ubsan_handle_load_invalid_value", 1290 /* KSTACK_ERASE */ 1291 "__sanitizer_cov_stack_depth", 1292 /* TRACE_BRANCH_PROFILING */ 1293 "ftrace_likely_update", 1294 /* STACKPROTECTOR */ 1295 "__stack_chk_fail", 1296 /* misc */ 1297 "csum_partial_copy_generic", 1298 "copy_mc_fragile", 1299 "copy_mc_fragile_handle_tail", 1300 "copy_mc_enhanced_fast_string", 1301 "rep_stos_alternative", 1302 "rep_movs_alternative", 1303 "__copy_user_nocache", 1304 NULL 1305 }; 1306 1307 static void add_uaccess_safe(struct objtool_file *file) 1308 { 1309 struct symbol *func; 1310 const char **name; 1311 1312 if (!opts.uaccess) 1313 return; 1314 1315 for (name = uaccess_safe_builtin; *name; name++) { 1316 func = find_symbol_by_name(file->elf, *name); 1317 if (!func) 1318 continue; 1319 1320 func->uaccess_safe = true; 1321 } 1322 } 1323 1324 /* 1325 * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol 1326 * will be added to the .retpoline_sites section. 1327 */ 1328 __weak bool arch_is_retpoline(struct symbol *sym) 1329 { 1330 return false; 1331 } 1332 1333 /* 1334 * Symbols that replace INSN_RETURN, every (tail) call to such a symbol 1335 * will be added to the .return_sites section. 1336 */ 1337 __weak bool arch_is_rethunk(struct symbol *sym) 1338 { 1339 return false; 1340 } 1341 1342 /* 1343 * Symbols that are embedded inside other instructions, because sometimes crazy 1344 * code exists. These are mostly ignored for validation purposes. 1345 */ 1346 __weak bool arch_is_embedded_insn(struct symbol *sym) 1347 { 1348 return false; 1349 } 1350 1351 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 1352 { 1353 struct reloc *reloc; 1354 1355 if (insn->no_reloc) 1356 return NULL; 1357 1358 if (!file) 1359 return NULL; 1360 1361 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 1362 insn->offset, insn->len); 1363 if (!reloc) { 1364 insn->no_reloc = 1; 1365 return NULL; 1366 } 1367 1368 return reloc; 1369 } 1370 1371 static void remove_insn_ops(struct instruction *insn) 1372 { 1373 struct stack_op *op, *next; 1374 1375 for (op = insn->stack_ops; op; op = next) { 1376 next = op->next; 1377 free(op); 1378 } 1379 insn->stack_ops = NULL; 1380 } 1381 1382 static int annotate_call_site(struct objtool_file *file, 1383 struct instruction *insn, bool sibling) 1384 { 1385 struct reloc *reloc = insn_reloc(file, insn); 1386 struct symbol *sym = insn_call_dest(insn); 1387 1388 if (!sym) 1389 sym = reloc->sym; 1390 1391 if (sym->static_call_tramp) { 1392 list_add_tail(&insn->call_node, &file->static_call_list); 1393 return 0; 1394 } 1395 1396 if (sym->retpoline_thunk) { 1397 list_add_tail(&insn->call_node, &file->retpoline_call_list); 1398 return 0; 1399 } 1400 1401 /* 1402 * Many compilers cannot disable KCOV or sanitizer calls with a function 1403 * attribute so they need a little help, NOP out any such calls from 1404 * noinstr text. 1405 */ 1406 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) { 1407 if (reloc) 1408 set_reloc_type(file->elf, reloc, R_NONE); 1409 1410 if (elf_write_insn(file->elf, insn->sec, 1411 insn->offset, insn->len, 1412 sibling ? arch_ret_insn(insn->len) 1413 : arch_nop_insn(insn->len))) { 1414 return -1; 1415 } 1416 1417 insn->type = sibling ? INSN_RETURN : INSN_NOP; 1418 1419 if (sibling) { 1420 /* 1421 * We've replaced the tail-call JMP insn by two new 1422 * insn: RET; INT3, except we only have a single struct 1423 * insn here. Mark it retpoline_safe to avoid the SLS 1424 * warning, instead of adding another insn. 1425 */ 1426 insn->retpoline_safe = true; 1427 } 1428 1429 return 0; 1430 } 1431 1432 if (opts.mcount && sym->fentry) { 1433 if (sibling) 1434 WARN_INSN(insn, "tail call to __fentry__ !?!?"); 1435 if (opts.mnop) { 1436 if (reloc) 1437 set_reloc_type(file->elf, reloc, R_NONE); 1438 1439 if (elf_write_insn(file->elf, insn->sec, 1440 insn->offset, insn->len, 1441 arch_nop_insn(insn->len))) { 1442 return -1; 1443 } 1444 1445 insn->type = INSN_NOP; 1446 } 1447 1448 list_add_tail(&insn->call_node, &file->mcount_loc_list); 1449 return 0; 1450 } 1451 1452 if (insn->type == INSN_CALL && !insn->sec->init && 1453 !insn->_call_dest->embedded_insn) 1454 list_add_tail(&insn->call_node, &file->call_list); 1455 1456 if (!sibling && dead_end_function(file, sym)) 1457 insn->dead_end = true; 1458 1459 return 0; 1460 } 1461 1462 static int add_call_dest(struct objtool_file *file, struct instruction *insn, 1463 struct symbol *dest, bool sibling) 1464 { 1465 insn->_call_dest = dest; 1466 if (!dest) 1467 return 0; 1468 1469 /* 1470 * Whatever stack impact regular CALLs have, should be undone 1471 * by the RETURN of the called function. 1472 * 1473 * Annotated intra-function calls retain the stack_ops but 1474 * are converted to JUMP, see read_intra_function_calls(). 1475 */ 1476 remove_insn_ops(insn); 1477 1478 return annotate_call_site(file, insn, sibling); 1479 } 1480 1481 static int add_retpoline_call(struct objtool_file *file, struct instruction *insn) 1482 { 1483 /* 1484 * Retpoline calls/jumps are really dynamic calls/jumps in disguise, 1485 * so convert them accordingly. 1486 */ 1487 switch (insn->type) { 1488 case INSN_CALL: 1489 insn->type = INSN_CALL_DYNAMIC; 1490 break; 1491 case INSN_JUMP_UNCONDITIONAL: 1492 insn->type = INSN_JUMP_DYNAMIC; 1493 break; 1494 case INSN_JUMP_CONDITIONAL: 1495 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 1496 break; 1497 default: 1498 return 0; 1499 } 1500 1501 insn->retpoline_safe = true; 1502 1503 /* 1504 * Whatever stack impact regular CALLs have, should be undone 1505 * by the RETURN of the called function. 1506 * 1507 * Annotated intra-function calls retain the stack_ops but 1508 * are converted to JUMP, see read_intra_function_calls(). 1509 */ 1510 remove_insn_ops(insn); 1511 1512 return annotate_call_site(file, insn, false); 1513 } 1514 1515 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add) 1516 { 1517 /* 1518 * Return thunk tail calls are really just returns in disguise, 1519 * so convert them accordingly. 1520 */ 1521 insn->type = INSN_RETURN; 1522 insn->retpoline_safe = true; 1523 1524 if (add) 1525 list_add_tail(&insn->call_node, &file->return_thunk_list); 1526 } 1527 1528 static bool is_first_func_insn(struct objtool_file *file, 1529 struct instruction *insn) 1530 { 1531 struct symbol *func = insn_func(insn); 1532 1533 if (!func) 1534 return false; 1535 1536 if (insn->offset == func->offset) 1537 return true; 1538 1539 /* Allow direct CALL/JMP past ENDBR */ 1540 if (opts.ibt) { 1541 struct instruction *prev = prev_insn_same_sym(file, insn); 1542 1543 if (prev && prev->type == INSN_ENDBR && 1544 insn->offset == func->offset + prev->len) 1545 return true; 1546 } 1547 1548 return false; 1549 } 1550 1551 /* 1552 * Find the destination instructions for all jumps. 1553 */ 1554 static int add_jump_destinations(struct objtool_file *file) 1555 { 1556 struct instruction *insn; 1557 struct reloc *reloc; 1558 1559 for_each_insn(file, insn) { 1560 struct symbol *func = insn_func(insn); 1561 struct instruction *dest_insn; 1562 struct section *dest_sec; 1563 struct symbol *dest_sym; 1564 unsigned long dest_off; 1565 1566 if (!is_static_jump(insn)) 1567 continue; 1568 1569 if (insn->jump_dest) { 1570 /* 1571 * handle_group_alt() may have previously set 1572 * 'jump_dest' for some alternatives. 1573 */ 1574 continue; 1575 } 1576 1577 reloc = insn_reloc(file, insn); 1578 if (!reloc) { 1579 dest_sec = insn->sec; 1580 dest_off = arch_jump_destination(insn); 1581 dest_sym = dest_sec->sym; 1582 } else { 1583 dest_sym = reloc->sym; 1584 if (is_undef_sym(dest_sym)) { 1585 if (dest_sym->retpoline_thunk) { 1586 if (add_retpoline_call(file, insn)) 1587 return -1; 1588 continue; 1589 } 1590 1591 if (dest_sym->return_thunk) { 1592 add_return_call(file, insn, true); 1593 continue; 1594 } 1595 1596 /* External symbol */ 1597 if (func) { 1598 /* External sibling call */ 1599 if (add_call_dest(file, insn, dest_sym, true)) 1600 return -1; 1601 continue; 1602 } 1603 1604 /* Non-func asm code jumping to external symbol */ 1605 continue; 1606 } 1607 1608 dest_sec = dest_sym->sec; 1609 dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc); 1610 } 1611 1612 dest_insn = find_insn(file, dest_sec, dest_off); 1613 if (!dest_insn) { 1614 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); 1615 1616 /* 1617 * retbleed_untrain_ret() jumps to 1618 * __x86_return_thunk(), but objtool can't find 1619 * the thunk's starting RET instruction, 1620 * because the RET is also in the middle of 1621 * another instruction. Objtool only knows 1622 * about the outer instruction. 1623 */ 1624 if (sym && sym->embedded_insn) { 1625 add_return_call(file, insn, false); 1626 continue; 1627 } 1628 1629 /* 1630 * GCOV/KCOV dead code can jump to the end of 1631 * the function/section. 1632 */ 1633 if (file->ignore_unreachables && func && 1634 dest_sec == insn->sec && 1635 dest_off == func->offset + func->len) 1636 continue; 1637 1638 ERROR_INSN(insn, "can't find jump dest instruction at %s", 1639 offstr(dest_sec, dest_off)); 1640 return -1; 1641 } 1642 1643 if (!dest_sym || is_sec_sym(dest_sym)) { 1644 dest_sym = dest_insn->sym; 1645 if (!dest_sym) 1646 goto set_jump_dest; 1647 } 1648 1649 if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) { 1650 if (add_retpoline_call(file, insn)) 1651 return -1; 1652 continue; 1653 } 1654 1655 if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) { 1656 add_return_call(file, insn, true); 1657 continue; 1658 } 1659 1660 if (!insn->sym || insn->sym->pfunc == dest_sym->pfunc) 1661 goto set_jump_dest; 1662 1663 /* 1664 * Internal cross-function jump. 1665 */ 1666 1667 if (is_first_func_insn(file, dest_insn)) { 1668 /* Internal sibling call */ 1669 if (add_call_dest(file, insn, dest_sym, true)) 1670 return -1; 1671 continue; 1672 } 1673 1674 set_jump_dest: 1675 insn->jump_dest = dest_insn; 1676 } 1677 1678 return 0; 1679 } 1680 1681 static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 1682 { 1683 struct symbol *call_dest; 1684 1685 call_dest = find_func_by_offset(sec, offset); 1686 if (!call_dest) 1687 call_dest = find_symbol_by_offset(sec, offset); 1688 1689 return call_dest; 1690 } 1691 1692 /* 1693 * Find the destination instructions for all calls. 1694 */ 1695 static int add_call_destinations(struct objtool_file *file) 1696 { 1697 struct instruction *insn; 1698 unsigned long dest_off; 1699 struct symbol *dest; 1700 struct reloc *reloc; 1701 1702 for_each_insn(file, insn) { 1703 struct symbol *func = insn_func(insn); 1704 if (insn->type != INSN_CALL) 1705 continue; 1706 1707 reloc = insn_reloc(file, insn); 1708 if (!reloc) { 1709 dest_off = arch_jump_destination(insn); 1710 dest = find_call_destination(insn->sec, dest_off); 1711 1712 if (add_call_dest(file, insn, dest, false)) 1713 return -1; 1714 1715 if (func && func->ignore) 1716 continue; 1717 1718 if (!insn_call_dest(insn)) { 1719 ERROR_INSN(insn, "unannotated intra-function call"); 1720 return -1; 1721 } 1722 1723 if (func && !is_func_sym(insn_call_dest(insn))) { 1724 ERROR_INSN(insn, "unsupported call to non-function"); 1725 return -1; 1726 } 1727 1728 } else if (is_sec_sym(reloc->sym)) { 1729 dest_off = arch_insn_adjusted_addend(insn, reloc); 1730 dest = find_call_destination(reloc->sym->sec, dest_off); 1731 if (!dest) { 1732 ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx", 1733 reloc->sym->sec->name, dest_off); 1734 return -1; 1735 } 1736 1737 if (add_call_dest(file, insn, dest, false)) 1738 return -1; 1739 1740 } else if (reloc->sym->retpoline_thunk) { 1741 if (add_retpoline_call(file, insn)) 1742 return -1; 1743 1744 } else { 1745 if (add_call_dest(file, insn, reloc->sym, false)) 1746 return -1; 1747 } 1748 } 1749 1750 return 0; 1751 } 1752 1753 /* 1754 * The .alternatives section requires some extra special care over and above 1755 * other special sections because alternatives are patched in place. 1756 */ 1757 static int handle_group_alt(struct objtool_file *file, 1758 struct special_alt *special_alt, 1759 struct instruction *orig_insn, 1760 struct instruction **new_insn) 1761 { 1762 struct instruction *last_new_insn = NULL, *insn, *nop = NULL; 1763 struct alt_group *orig_alt_group, *new_alt_group; 1764 unsigned long dest_off; 1765 1766 orig_alt_group = orig_insn->alt_group; 1767 if (!orig_alt_group) { 1768 struct instruction *last_orig_insn = NULL; 1769 1770 orig_alt_group = calloc(1, sizeof(*orig_alt_group)); 1771 if (!orig_alt_group) { 1772 ERROR_GLIBC("calloc"); 1773 return -1; 1774 } 1775 orig_alt_group->cfi = calloc(special_alt->orig_len, 1776 sizeof(struct cfi_state *)); 1777 if (!orig_alt_group->cfi) { 1778 ERROR_GLIBC("calloc"); 1779 return -1; 1780 } 1781 1782 insn = orig_insn; 1783 sec_for_each_insn_from(file, insn) { 1784 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1785 break; 1786 1787 insn->alt_group = orig_alt_group; 1788 last_orig_insn = insn; 1789 } 1790 orig_alt_group->orig_group = NULL; 1791 orig_alt_group->first_insn = orig_insn; 1792 orig_alt_group->last_insn = last_orig_insn; 1793 orig_alt_group->nop = NULL; 1794 orig_alt_group->ignore = orig_insn->ignore_alts; 1795 orig_alt_group->feature = 0; 1796 } else { 1797 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len - 1798 orig_alt_group->first_insn->offset != special_alt->orig_len) { 1799 ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d", 1800 orig_alt_group->last_insn->offset + 1801 orig_alt_group->last_insn->len - 1802 orig_alt_group->first_insn->offset, 1803 special_alt->orig_len); 1804 return -1; 1805 } 1806 } 1807 1808 new_alt_group = calloc(1, sizeof(*new_alt_group)); 1809 if (!new_alt_group) { 1810 ERROR_GLIBC("calloc"); 1811 return -1; 1812 } 1813 1814 if (special_alt->new_len < special_alt->orig_len) { 1815 /* 1816 * Insert a fake nop at the end to make the replacement 1817 * alt_group the same size as the original. This is needed to 1818 * allow propagate_alt_cfi() to do its magic. When the last 1819 * instruction affects the stack, the instruction after it (the 1820 * nop) will propagate the new state to the shared CFI array. 1821 */ 1822 nop = calloc(1, sizeof(*nop)); 1823 if (!nop) { 1824 ERROR_GLIBC("calloc"); 1825 return -1; 1826 } 1827 memset(nop, 0, sizeof(*nop)); 1828 1829 nop->sec = special_alt->new_sec; 1830 nop->offset = special_alt->new_off + special_alt->new_len; 1831 nop->len = special_alt->orig_len - special_alt->new_len; 1832 nop->type = INSN_NOP; 1833 nop->sym = orig_insn->sym; 1834 nop->alt_group = new_alt_group; 1835 nop->fake = 1; 1836 } 1837 1838 if (!special_alt->new_len) { 1839 *new_insn = nop; 1840 goto end; 1841 } 1842 1843 insn = *new_insn; 1844 sec_for_each_insn_from(file, insn) { 1845 struct reloc *alt_reloc; 1846 1847 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1848 break; 1849 1850 last_new_insn = insn; 1851 1852 insn->sym = orig_insn->sym; 1853 insn->alt_group = new_alt_group; 1854 1855 /* 1856 * Since alternative replacement code is copy/pasted by the 1857 * kernel after applying relocations, generally such code can't 1858 * have relative-address relocation references to outside the 1859 * .altinstr_replacement section, unless the arch's 1860 * alternatives code can adjust the relative offsets 1861 * accordingly. 1862 */ 1863 alt_reloc = insn_reloc(file, insn); 1864 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) && 1865 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1866 1867 ERROR_INSN(insn, "unsupported relocation in alternatives section"); 1868 return -1; 1869 } 1870 1871 if (!is_static_jump(insn)) 1872 continue; 1873 1874 if (!insn->immediate) 1875 continue; 1876 1877 dest_off = arch_jump_destination(insn); 1878 if (dest_off == special_alt->new_off + special_alt->new_len) { 1879 insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn); 1880 if (!insn->jump_dest) { 1881 ERROR_INSN(insn, "can't find alternative jump destination"); 1882 return -1; 1883 } 1884 } 1885 } 1886 1887 if (!last_new_insn) { 1888 ERROR_FUNC(special_alt->new_sec, special_alt->new_off, 1889 "can't find last new alternative instruction"); 1890 return -1; 1891 } 1892 1893 end: 1894 new_alt_group->orig_group = orig_alt_group; 1895 new_alt_group->first_insn = *new_insn; 1896 new_alt_group->last_insn = last_new_insn; 1897 new_alt_group->nop = nop; 1898 new_alt_group->ignore = (*new_insn)->ignore_alts; 1899 new_alt_group->cfi = orig_alt_group->cfi; 1900 new_alt_group->feature = special_alt->feature; 1901 return 0; 1902 } 1903 1904 /* 1905 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1906 * If the original instruction is a jump, make the alt entry an effective nop 1907 * by just skipping the original instruction. 1908 */ 1909 static int handle_jump_alt(struct objtool_file *file, 1910 struct special_alt *special_alt, 1911 struct instruction *orig_insn, 1912 struct instruction **new_insn) 1913 { 1914 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL && 1915 orig_insn->type != INSN_NOP) { 1916 1917 ERROR_INSN(orig_insn, "unsupported instruction at jump label"); 1918 return -1; 1919 } 1920 1921 if (opts.hack_jump_label && special_alt->key_addend & 2) { 1922 struct reloc *reloc = insn_reloc(file, orig_insn); 1923 1924 if (reloc) 1925 set_reloc_type(file->elf, reloc, R_NONE); 1926 1927 if (elf_write_insn(file->elf, orig_insn->sec, 1928 orig_insn->offset, orig_insn->len, 1929 arch_nop_insn(orig_insn->len))) { 1930 return -1; 1931 } 1932 1933 orig_insn->type = INSN_NOP; 1934 } 1935 1936 if (orig_insn->type == INSN_NOP) { 1937 if (orig_insn->len == 2) 1938 file->jl_nop_short++; 1939 else 1940 file->jl_nop_long++; 1941 1942 return 0; 1943 } 1944 1945 if (orig_insn->len == 2) 1946 file->jl_short++; 1947 else 1948 file->jl_long++; 1949 1950 *new_insn = next_insn_same_sec(file, orig_insn); 1951 return 0; 1952 } 1953 1954 /* 1955 * Read all the special sections which have alternate instructions which can be 1956 * patched in or redirected to at runtime. Each instruction having alternate 1957 * instruction(s) has them added to its insn->alts list, which will be 1958 * traversed in validate_branch(). 1959 */ 1960 static int add_special_section_alts(struct objtool_file *file) 1961 { 1962 struct list_head special_alts; 1963 struct instruction *orig_insn, *new_insn; 1964 struct special_alt *special_alt, *tmp; 1965 enum alternative_type alt_type; 1966 struct alternative *alt; 1967 struct alternative *a; 1968 1969 if (special_get_alts(file->elf, &special_alts)) 1970 return -1; 1971 1972 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1973 1974 orig_insn = find_insn(file, special_alt->orig_sec, 1975 special_alt->orig_off); 1976 if (!orig_insn) { 1977 ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off, 1978 "special: can't find orig instruction"); 1979 return -1; 1980 } 1981 1982 new_insn = NULL; 1983 if (!special_alt->group || special_alt->new_len) { 1984 new_insn = find_insn(file, special_alt->new_sec, 1985 special_alt->new_off); 1986 if (!new_insn) { 1987 ERROR_FUNC(special_alt->new_sec, special_alt->new_off, 1988 "special: can't find new instruction"); 1989 return -1; 1990 } 1991 } 1992 1993 if (special_alt->group) { 1994 if (!special_alt->orig_len) { 1995 ERROR_INSN(orig_insn, "empty alternative entry"); 1996 continue; 1997 } 1998 1999 if (handle_group_alt(file, special_alt, orig_insn, &new_insn)) 2000 return -1; 2001 2002 alt_type = ALT_TYPE_INSTRUCTIONS; 2003 2004 } else if (special_alt->jump_or_nop) { 2005 if (handle_jump_alt(file, special_alt, orig_insn, &new_insn)) 2006 return -1; 2007 2008 alt_type = ALT_TYPE_JUMP_TABLE; 2009 } else { 2010 alt_type = ALT_TYPE_EX_TABLE; 2011 } 2012 2013 alt = calloc(1, sizeof(*alt)); 2014 if (!alt) { 2015 ERROR_GLIBC("calloc"); 2016 return -1; 2017 } 2018 2019 alt->insn = new_insn; 2020 alt->type = alt_type; 2021 alt->next = NULL; 2022 2023 /* 2024 * Store alternatives in the same order they have been 2025 * defined. 2026 */ 2027 if (!orig_insn->alts) { 2028 orig_insn->alts = alt; 2029 } else { 2030 for (a = orig_insn->alts; a->next; a = a->next) 2031 ; 2032 a->next = alt; 2033 } 2034 2035 list_del(&special_alt->list); 2036 free(special_alt); 2037 } 2038 2039 if (opts.stats) { 2040 printf("jl\\\tNOP\tJMP\n"); 2041 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short); 2042 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long); 2043 } 2044 2045 return 0; 2046 } 2047 2048 __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table) 2049 { 2050 return reloc->sym->offset + reloc_addend(reloc); 2051 } 2052 2053 static int add_jump_table(struct objtool_file *file, struct instruction *insn) 2054 { 2055 unsigned long table_size = insn_jump_table_size(insn); 2056 struct symbol *pfunc = insn_func(insn)->pfunc; 2057 struct reloc *table = insn_jump_table(insn); 2058 struct instruction *dest_insn; 2059 unsigned int prev_offset = 0; 2060 struct reloc *reloc = table; 2061 struct alternative *alt; 2062 unsigned long sym_offset; 2063 2064 /* 2065 * Each @reloc is a switch table relocation which points to the target 2066 * instruction. 2067 */ 2068 for_each_reloc_from(table->sec, reloc) { 2069 2070 /* Check for the end of the table: */ 2071 if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size) 2072 break; 2073 if (reloc != table && is_jump_table(reloc)) 2074 break; 2075 2076 /* Make sure the table entries are consecutive: */ 2077 if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc)) 2078 break; 2079 2080 sym_offset = arch_jump_table_sym_offset(reloc, table); 2081 2082 /* Detect function pointers from contiguous objects: */ 2083 if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset) 2084 break; 2085 2086 /* 2087 * Clang sometimes leaves dangling unused jump table entries 2088 * which point to the end of the function. Ignore them. 2089 */ 2090 if (reloc->sym->sec == pfunc->sec && 2091 sym_offset == pfunc->offset + pfunc->len) 2092 goto next; 2093 2094 dest_insn = find_insn(file, reloc->sym->sec, sym_offset); 2095 if (!dest_insn) 2096 break; 2097 2098 /* Make sure the destination is in the same function: */ 2099 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc) 2100 break; 2101 2102 alt = calloc(1, sizeof(*alt)); 2103 if (!alt) { 2104 ERROR_GLIBC("calloc"); 2105 return -1; 2106 } 2107 2108 alt->insn = dest_insn; 2109 alt->next = insn->alts; 2110 insn->alts = alt; 2111 next: 2112 prev_offset = reloc_offset(reloc); 2113 } 2114 2115 if (!prev_offset) { 2116 ERROR_INSN(insn, "can't find switch jump table"); 2117 return -1; 2118 } 2119 2120 return 0; 2121 } 2122 2123 /* 2124 * find_jump_table() - Given a dynamic jump, find the switch jump table 2125 * associated with it. 2126 */ 2127 static void find_jump_table(struct objtool_file *file, struct symbol *func, 2128 struct instruction *insn) 2129 { 2130 struct reloc *table_reloc; 2131 struct instruction *dest_insn, *orig_insn = insn; 2132 unsigned long table_size; 2133 unsigned long sym_offset; 2134 2135 /* 2136 * Backward search using the @first_jump_src links, these help avoid 2137 * much of the 'in between' code. Which avoids us getting confused by 2138 * it. 2139 */ 2140 for (; 2141 insn && insn_func(insn) && insn_func(insn)->pfunc == func; 2142 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 2143 2144 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 2145 break; 2146 2147 /* allow small jumps within the range */ 2148 if (insn->type == INSN_JUMP_UNCONDITIONAL && 2149 insn->jump_dest && 2150 (insn->jump_dest->offset <= insn->offset || 2151 insn->jump_dest->offset > orig_insn->offset)) 2152 break; 2153 2154 table_reloc = arch_find_switch_table(file, insn, &table_size); 2155 if (!table_reloc) 2156 continue; 2157 2158 sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc); 2159 2160 dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset); 2161 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func) 2162 continue; 2163 2164 set_jump_table(table_reloc); 2165 orig_insn->_jump_table = table_reloc; 2166 orig_insn->_jump_table_size = table_size; 2167 2168 break; 2169 } 2170 } 2171 2172 /* 2173 * First pass: Mark the head of each jump table so that in the next pass, 2174 * we know when a given jump table ends and the next one starts. 2175 */ 2176 static void mark_func_jump_tables(struct objtool_file *file, 2177 struct symbol *func) 2178 { 2179 struct instruction *insn, *last = NULL; 2180 2181 func_for_each_insn(file, func, insn) { 2182 if (!last) 2183 last = insn; 2184 2185 /* 2186 * Store back-pointers for unconditional forward jumps such 2187 * that find_jump_table() can back-track using those and 2188 * avoid some potentially confusing code. 2189 */ 2190 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 2191 insn->offset > last->offset && 2192 insn->jump_dest->offset > insn->offset && 2193 !insn->jump_dest->first_jump_src) { 2194 2195 insn->jump_dest->first_jump_src = insn; 2196 last = insn->jump_dest; 2197 } 2198 2199 if (insn->type != INSN_JUMP_DYNAMIC) 2200 continue; 2201 2202 find_jump_table(file, func, insn); 2203 } 2204 } 2205 2206 static int add_func_jump_tables(struct objtool_file *file, 2207 struct symbol *func) 2208 { 2209 struct instruction *insn; 2210 2211 func_for_each_insn(file, func, insn) { 2212 if (!insn_jump_table(insn)) 2213 continue; 2214 2215 if (add_jump_table(file, insn)) 2216 return -1; 2217 } 2218 2219 return 0; 2220 } 2221 2222 /* 2223 * For some switch statements, gcc generates a jump table in the .rodata 2224 * section which contains a list of addresses within the function to jump to. 2225 * This finds these jump tables and adds them to the insn->alts lists. 2226 */ 2227 static int add_jump_table_alts(struct objtool_file *file) 2228 { 2229 struct symbol *func; 2230 2231 if (!file->rodata) 2232 return 0; 2233 2234 for_each_sym(file->elf, func) { 2235 if (!is_func_sym(func) || func->alias != func) 2236 continue; 2237 2238 mark_func_jump_tables(file, func); 2239 if (add_func_jump_tables(file, func)) 2240 return -1; 2241 } 2242 2243 return 0; 2244 } 2245 2246 static void set_func_state(struct cfi_state *state) 2247 { 2248 state->cfa = initial_func_cfi.cfa; 2249 memcpy(&state->regs, &initial_func_cfi.regs, 2250 CFI_NUM_REGS * sizeof(struct cfi_reg)); 2251 state->stack_size = initial_func_cfi.cfa.offset; 2252 state->type = UNWIND_HINT_TYPE_CALL; 2253 } 2254 2255 static int read_unwind_hints(struct objtool_file *file) 2256 { 2257 struct cfi_state cfi = init_cfi; 2258 struct section *sec; 2259 struct unwind_hint *hint; 2260 struct instruction *insn; 2261 struct reloc *reloc; 2262 unsigned long offset; 2263 int i; 2264 2265 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 2266 if (!sec) 2267 return 0; 2268 2269 if (!sec->rsec) { 2270 ERROR("missing .rela.discard.unwind_hints section"); 2271 return -1; 2272 } 2273 2274 if (sec_size(sec) % sizeof(struct unwind_hint)) { 2275 ERROR("struct unwind_hint size mismatch"); 2276 return -1; 2277 } 2278 2279 file->hints = true; 2280 2281 for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) { 2282 hint = (struct unwind_hint *)sec->data->d_buf + i; 2283 2284 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 2285 if (!reloc) { 2286 ERROR("can't find reloc for unwind_hints[%d]", i); 2287 return -1; 2288 } 2289 2290 offset = reloc->sym->offset + reloc_addend(reloc); 2291 2292 insn = find_insn(file, reloc->sym->sec, offset); 2293 if (!insn) { 2294 ERROR("can't find insn for unwind_hints[%d]", i); 2295 return -1; 2296 } 2297 2298 insn->hint = true; 2299 2300 if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) { 2301 insn->cfi = &force_undefined_cfi; 2302 continue; 2303 } 2304 2305 if (hint->type == UNWIND_HINT_TYPE_SAVE) { 2306 insn->hint = false; 2307 insn->save = true; 2308 continue; 2309 } 2310 2311 if (hint->type == UNWIND_HINT_TYPE_RESTORE) { 2312 insn->restore = true; 2313 continue; 2314 } 2315 2316 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { 2317 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); 2318 2319 if (sym && is_global_sym(sym)) { 2320 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { 2321 ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR"); 2322 return -1; 2323 } 2324 } 2325 } 2326 2327 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 2328 insn->cfi = &func_cfi; 2329 continue; 2330 } 2331 2332 if (insn->cfi) 2333 cfi = *(insn->cfi); 2334 2335 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 2336 ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg); 2337 return -1; 2338 } 2339 2340 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); 2341 cfi.type = hint->type; 2342 cfi.signal = hint->signal; 2343 2344 insn->cfi = cfi_hash_find_or_add(&cfi); 2345 } 2346 2347 return 0; 2348 } 2349 2350 static int read_annotate(struct objtool_file *file, 2351 int (*func)(struct objtool_file *file, int type, struct instruction *insn)) 2352 { 2353 struct section *sec; 2354 struct instruction *insn; 2355 struct reloc *reloc; 2356 uint64_t offset; 2357 int type; 2358 2359 sec = find_section_by_name(file->elf, ".discard.annotate_insn"); 2360 if (!sec) 2361 return 0; 2362 2363 if (!sec->rsec) 2364 return 0; 2365 2366 if (sec->sh.sh_entsize != 8) { 2367 static bool warned = false; 2368 if (!warned && opts.verbose) { 2369 WARN("%s: dodgy linker, sh_entsize != 8", sec->name); 2370 warned = true; 2371 } 2372 sec->sh.sh_entsize = 8; 2373 } 2374 2375 if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) { 2376 ERROR("bad .discard.annotate_insn section: missing relocs"); 2377 return -1; 2378 } 2379 2380 for_each_reloc(sec->rsec, reloc) { 2381 type = annotype(file->elf, sec, reloc); 2382 offset = reloc->sym->offset + reloc_addend(reloc); 2383 insn = find_insn(file, reloc->sym->sec, offset); 2384 2385 if (!insn) { 2386 ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type); 2387 return -1; 2388 } 2389 2390 if (func(file, type, insn)) 2391 return -1; 2392 } 2393 2394 return 0; 2395 } 2396 2397 static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn) 2398 { 2399 switch (type) { 2400 2401 /* Must be before add_special_section_alts() */ 2402 case ANNOTYPE_IGNORE_ALTS: 2403 insn->ignore_alts = true; 2404 break; 2405 2406 /* 2407 * Must be before read_unwind_hints() since that needs insn->noendbr. 2408 */ 2409 case ANNOTYPE_NOENDBR: 2410 insn->noendbr = 1; 2411 break; 2412 2413 default: 2414 break; 2415 } 2416 2417 return 0; 2418 } 2419 2420 static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn) 2421 { 2422 unsigned long dest_off; 2423 2424 if (type != ANNOTYPE_INTRA_FUNCTION_CALL) 2425 return 0; 2426 2427 if (insn->type != INSN_CALL) { 2428 ERROR_INSN(insn, "intra_function_call not a direct call"); 2429 return -1; 2430 } 2431 2432 /* 2433 * Treat intra-function CALLs as JMPs, but with a stack_op. 2434 * See add_call_destinations(), which strips stack_ops from 2435 * normal CALLs. 2436 */ 2437 insn->type = INSN_JUMP_UNCONDITIONAL; 2438 2439 dest_off = arch_jump_destination(insn); 2440 insn->jump_dest = find_insn(file, insn->sec, dest_off); 2441 if (!insn->jump_dest) { 2442 ERROR_INSN(insn, "can't find call dest at %s+0x%lx", 2443 insn->sec->name, dest_off); 2444 return -1; 2445 } 2446 2447 return 0; 2448 } 2449 2450 static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn) 2451 { 2452 struct symbol *sym; 2453 2454 switch (type) { 2455 case ANNOTYPE_NOENDBR: 2456 /* early */ 2457 break; 2458 2459 case ANNOTYPE_RETPOLINE_SAFE: 2460 if (insn->type != INSN_JUMP_DYNAMIC && 2461 insn->type != INSN_CALL_DYNAMIC && 2462 insn->type != INSN_RETURN && 2463 insn->type != INSN_NOP) { 2464 ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop"); 2465 return -1; 2466 } 2467 2468 insn->retpoline_safe = true; 2469 break; 2470 2471 case ANNOTYPE_INSTR_BEGIN: 2472 insn->instr++; 2473 break; 2474 2475 case ANNOTYPE_INSTR_END: 2476 insn->instr--; 2477 break; 2478 2479 case ANNOTYPE_UNRET_BEGIN: 2480 insn->unret = 1; 2481 break; 2482 2483 case ANNOTYPE_IGNORE_ALTS: 2484 /* early */ 2485 break; 2486 2487 case ANNOTYPE_INTRA_FUNCTION_CALL: 2488 /* ifc */ 2489 break; 2490 2491 case ANNOTYPE_REACHABLE: 2492 insn->dead_end = false; 2493 break; 2494 2495 case ANNOTYPE_NOCFI: 2496 sym = insn->sym; 2497 if (!sym) { 2498 ERROR_INSN(insn, "dodgy NOCFI annotation"); 2499 return -1; 2500 } 2501 insn->sym->nocfi = 1; 2502 break; 2503 2504 default: 2505 ERROR_INSN(insn, "Unknown annotation type: %d", type); 2506 return -1; 2507 } 2508 2509 return 0; 2510 } 2511 2512 /* 2513 * Return true if name matches an instrumentation function, where calls to that 2514 * function from noinstr code can safely be removed, but compilers won't do so. 2515 */ 2516 static bool is_profiling_func(const char *name) 2517 { 2518 /* 2519 * Many compilers cannot disable KCOV with a function attribute. 2520 */ 2521 if (!strncmp(name, "__sanitizer_cov_", 16)) 2522 return true; 2523 2524 return false; 2525 } 2526 2527 static int classify_symbols(struct objtool_file *file) 2528 { 2529 struct symbol *func; 2530 size_t len; 2531 2532 for_each_sym(file->elf, func) { 2533 if (is_notype_sym(func) && strstarts(func->name, ".L")) 2534 func->local_label = true; 2535 2536 if (!is_global_sym(func)) 2537 continue; 2538 2539 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2540 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2541 func->static_call_tramp = true; 2542 2543 if (arch_is_retpoline(func)) 2544 func->retpoline_thunk = true; 2545 2546 if (arch_is_rethunk(func)) 2547 func->return_thunk = true; 2548 2549 if (arch_is_embedded_insn(func)) 2550 func->embedded_insn = true; 2551 2552 if (arch_ftrace_match(func->name)) 2553 func->fentry = true; 2554 2555 if (is_profiling_func(func->name)) 2556 func->profiling_func = true; 2557 2558 len = strlen(func->name); 2559 if (len > sym_name_max_len) 2560 sym_name_max_len = len; 2561 } 2562 2563 return 0; 2564 } 2565 2566 static void mark_rodata(struct objtool_file *file) 2567 { 2568 struct section *sec; 2569 bool found = false; 2570 2571 /* 2572 * Search for the following rodata sections, each of which can 2573 * potentially contain jump tables: 2574 * 2575 * - .rodata: can contain GCC switch tables 2576 * - .rodata.<func>: same, if -fdata-sections is being used 2577 * - .data.rel.ro.c_jump_table: contains C annotated jump tables 2578 * 2579 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2580 */ 2581 for_each_sec(file->elf, sec) { 2582 if ((!strncmp(sec->name, ".rodata", 7) && 2583 !strstr(sec->name, ".str1.")) || 2584 !strncmp(sec->name, ".data.rel.ro", 12)) { 2585 sec->rodata = true; 2586 found = true; 2587 } 2588 } 2589 2590 file->rodata = found; 2591 } 2592 2593 static void mark_holes(struct objtool_file *file) 2594 { 2595 struct instruction *insn; 2596 bool in_hole = false; 2597 2598 if (!opts.link) 2599 return; 2600 2601 /* 2602 * Whole archive runs might encounter dead code from weak symbols. 2603 * This is where the linker will have dropped the weak symbol in 2604 * favour of a regular symbol, but leaves the code in place. 2605 */ 2606 for_each_insn(file, insn) { 2607 if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) { 2608 in_hole = false; 2609 continue; 2610 } 2611 2612 /* Skip function padding and pfx code */ 2613 if (!in_hole && insn->type == INSN_NOP) 2614 continue; 2615 2616 in_hole = true; 2617 insn->hole = 1; 2618 2619 /* 2620 * If this hole jumps to a .cold function, mark it ignore. 2621 */ 2622 if (insn->jump_dest) { 2623 struct symbol *dest_func = insn_func(insn->jump_dest); 2624 2625 if (dest_func && dest_func->cold) 2626 dest_func->ignore = true; 2627 } 2628 } 2629 } 2630 2631 static bool validate_branch_enabled(void) 2632 { 2633 return opts.stackval || 2634 opts.orc || 2635 opts.uaccess || 2636 opts.checksum; 2637 } 2638 2639 static int decode_sections(struct objtool_file *file) 2640 { 2641 file->klp = is_livepatch_module(file); 2642 2643 mark_rodata(file); 2644 2645 if (init_pv_ops(file)) 2646 return -1; 2647 2648 /* 2649 * Must be before add_{jump_call}_destination. 2650 */ 2651 if (classify_symbols(file)) 2652 return -1; 2653 2654 if (decode_instructions(file)) 2655 return -1; 2656 2657 if (add_ignores(file)) 2658 return -1; 2659 2660 add_uaccess_safe(file); 2661 2662 if (read_annotate(file, __annotate_early)) 2663 return -1; 2664 2665 /* 2666 * Must be before add_jump_destinations(), which depends on 'func' 2667 * being set for alternatives, to enable proper sibling call detection. 2668 */ 2669 if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label || opts.disas) { 2670 if (add_special_section_alts(file)) 2671 return -1; 2672 } 2673 2674 if (add_jump_destinations(file)) 2675 return -1; 2676 2677 /* 2678 * Must be before add_call_destination(); it changes INSN_CALL to 2679 * INSN_JUMP. 2680 */ 2681 if (read_annotate(file, __annotate_ifc)) 2682 return -1; 2683 2684 if (add_call_destinations(file)) 2685 return -1; 2686 2687 if (add_jump_table_alts(file)) 2688 return -1; 2689 2690 if (read_unwind_hints(file)) 2691 return -1; 2692 2693 /* Must be after add_jump_destinations() */ 2694 mark_holes(file); 2695 2696 /* 2697 * Must be after add_call_destinations() such that it can override 2698 * dead_end_function() marks. 2699 */ 2700 if (read_annotate(file, __annotate_late)) 2701 return -1; 2702 2703 return 0; 2704 } 2705 2706 static bool is_special_call(struct instruction *insn) 2707 { 2708 if (insn->type == INSN_CALL) { 2709 struct symbol *dest = insn_call_dest(insn); 2710 2711 if (!dest) 2712 return false; 2713 2714 if (dest->fentry || dest->embedded_insn) 2715 return true; 2716 } 2717 2718 return false; 2719 } 2720 2721 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 2722 { 2723 struct cfi_state *cfi = &state->cfi; 2724 int i; 2725 2726 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 2727 return true; 2728 2729 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 2730 return true; 2731 2732 if (cfi->stack_size != initial_func_cfi.cfa.offset) 2733 return true; 2734 2735 for (i = 0; i < CFI_NUM_REGS; i++) { 2736 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 2737 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 2738 return true; 2739 } 2740 2741 return false; 2742 } 2743 2744 static bool check_reg_frame_pos(const struct cfi_reg *reg, 2745 int expected_offset) 2746 { 2747 return reg->base == CFI_CFA && 2748 reg->offset == expected_offset; 2749 } 2750 2751 static bool has_valid_stack_frame(struct insn_state *state) 2752 { 2753 struct cfi_state *cfi = &state->cfi; 2754 2755 if (cfi->cfa.base == CFI_BP && 2756 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) && 2757 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8)) 2758 return true; 2759 2760 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 2761 return true; 2762 2763 return false; 2764 } 2765 2766 static int update_cfi_state_regs(struct instruction *insn, 2767 struct cfi_state *cfi, 2768 struct stack_op *op) 2769 { 2770 struct cfi_reg *cfa = &cfi->cfa; 2771 2772 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 2773 return 0; 2774 2775 /* push */ 2776 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 2777 cfa->offset += 8; 2778 2779 /* pop */ 2780 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 2781 cfa->offset -= 8; 2782 2783 /* add immediate to sp */ 2784 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 2785 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 2786 cfa->offset -= op->src.offset; 2787 2788 return 0; 2789 } 2790 2791 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 2792 { 2793 if (arch_callee_saved_reg(reg) && 2794 cfi->regs[reg].base == CFI_UNDEFINED) { 2795 cfi->regs[reg].base = base; 2796 cfi->regs[reg].offset = offset; 2797 } 2798 } 2799 2800 static void restore_reg(struct cfi_state *cfi, unsigned char reg) 2801 { 2802 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 2803 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 2804 } 2805 2806 /* 2807 * A note about DRAP stack alignment: 2808 * 2809 * GCC has the concept of a DRAP register, which is used to help keep track of 2810 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 2811 * register. The typical DRAP pattern is: 2812 * 2813 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 2814 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 2815 * 41 ff 72 f8 pushq -0x8(%r10) 2816 * 55 push %rbp 2817 * 48 89 e5 mov %rsp,%rbp 2818 * (more pushes) 2819 * 41 52 push %r10 2820 * ... 2821 * 41 5a pop %r10 2822 * (more pops) 2823 * 5d pop %rbp 2824 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2825 * c3 retq 2826 * 2827 * There are some variations in the epilogues, like: 2828 * 2829 * 5b pop %rbx 2830 * 41 5a pop %r10 2831 * 41 5c pop %r12 2832 * 41 5d pop %r13 2833 * 41 5e pop %r14 2834 * c9 leaveq 2835 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2836 * c3 retq 2837 * 2838 * and: 2839 * 2840 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 2841 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 2842 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 2843 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 2844 * c9 leaveq 2845 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2846 * c3 retq 2847 * 2848 * Sometimes r13 is used as the DRAP register, in which case it's saved and 2849 * restored beforehand: 2850 * 2851 * 41 55 push %r13 2852 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 2853 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 2854 * ... 2855 * 49 8d 65 f0 lea -0x10(%r13),%rsp 2856 * 41 5d pop %r13 2857 * c3 retq 2858 */ 2859 static int update_cfi_state(struct instruction *insn, 2860 struct instruction *next_insn, 2861 struct cfi_state *cfi, struct stack_op *op) 2862 { 2863 struct cfi_reg *cfa = &cfi->cfa; 2864 struct cfi_reg *regs = cfi->regs; 2865 2866 /* ignore UNWIND_HINT_UNDEFINED regions */ 2867 if (cfi->force_undefined) 2868 return 0; 2869 2870 /* stack operations don't make sense with an undefined CFA */ 2871 if (cfa->base == CFI_UNDEFINED) { 2872 if (insn_func(insn)) { 2873 WARN_INSN(insn, "undefined stack state"); 2874 return 1; 2875 } 2876 return 0; 2877 } 2878 2879 if (cfi->type == UNWIND_HINT_TYPE_REGS || 2880 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 2881 return update_cfi_state_regs(insn, cfi, op); 2882 2883 switch (op->dest.type) { 2884 2885 case OP_DEST_REG: 2886 switch (op->src.type) { 2887 2888 case OP_SRC_REG: 2889 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 2890 cfa->base == CFI_SP && 2891 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) { 2892 2893 /* mov %rsp, %rbp */ 2894 cfa->base = op->dest.reg; 2895 cfi->bp_scratch = false; 2896 } 2897 2898 else if (op->src.reg == CFI_SP && 2899 op->dest.reg == CFI_BP && cfi->drap) { 2900 2901 /* drap: mov %rsp, %rbp */ 2902 regs[CFI_BP].base = CFI_BP; 2903 regs[CFI_BP].offset = -cfi->stack_size; 2904 cfi->bp_scratch = false; 2905 } 2906 2907 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2908 2909 /* 2910 * mov %rsp, %reg 2911 * 2912 * This is needed for the rare case where GCC 2913 * does: 2914 * 2915 * mov %rsp, %rax 2916 * ... 2917 * mov %rax, %rsp 2918 */ 2919 cfi->vals[op->dest.reg].base = CFI_CFA; 2920 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2921 } 2922 2923 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2924 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) { 2925 2926 /* 2927 * mov %rbp, %rsp 2928 * 2929 * Restore the original stack pointer (Clang). 2930 */ 2931 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2932 } 2933 2934 else if (op->dest.reg == cfa->base) { 2935 2936 /* mov %reg, %rsp */ 2937 if (cfa->base == CFI_SP && 2938 cfi->vals[op->src.reg].base == CFI_CFA) { 2939 2940 /* 2941 * This is needed for the rare case 2942 * where GCC does something dumb like: 2943 * 2944 * lea 0x8(%rsp), %rcx 2945 * ... 2946 * mov %rcx, %rsp 2947 */ 2948 cfa->offset = -cfi->vals[op->src.reg].offset; 2949 cfi->stack_size = cfa->offset; 2950 2951 } else if (cfa->base == CFI_SP && 2952 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2953 cfi->vals[op->src.reg].offset == cfa->offset) { 2954 2955 /* 2956 * Stack swizzle: 2957 * 2958 * 1: mov %rsp, (%[tos]) 2959 * 2: mov %[tos], %rsp 2960 * ... 2961 * 3: pop %rsp 2962 * 2963 * Where: 2964 * 2965 * 1 - places a pointer to the previous 2966 * stack at the Top-of-Stack of the 2967 * new stack. 2968 * 2969 * 2 - switches to the new stack. 2970 * 2971 * 3 - pops the Top-of-Stack to restore 2972 * the original stack. 2973 * 2974 * Note: we set base to SP_INDIRECT 2975 * here and preserve offset. Therefore 2976 * when the unwinder reaches ToS it 2977 * will dereference SP and then add the 2978 * offset to find the next frame, IOW: 2979 * (%rsp) + offset. 2980 */ 2981 cfa->base = CFI_SP_INDIRECT; 2982 2983 } else { 2984 cfa->base = CFI_UNDEFINED; 2985 cfa->offset = 0; 2986 } 2987 } 2988 2989 else if (op->dest.reg == CFI_SP && 2990 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2991 cfi->vals[op->src.reg].offset == cfa->offset) { 2992 2993 /* 2994 * The same stack swizzle case 2) as above. But 2995 * because we can't change cfa->base, case 3) 2996 * will become a regular POP. Pretend we're a 2997 * PUSH so things don't go unbalanced. 2998 */ 2999 cfi->stack_size += 8; 3000 } 3001 3002 3003 break; 3004 3005 case OP_SRC_ADD: 3006 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 3007 3008 /* add imm, %rsp */ 3009 cfi->stack_size -= op->src.offset; 3010 if (cfa->base == CFI_SP) 3011 cfa->offset -= op->src.offset; 3012 break; 3013 } 3014 3015 if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP && 3016 insn->sym->frame_pointer) { 3017 /* addi.d fp,sp,imm on LoongArch */ 3018 if (cfa->base == CFI_SP && cfa->offset == op->src.offset) { 3019 cfa->base = CFI_BP; 3020 cfa->offset = 0; 3021 } 3022 break; 3023 } 3024 3025 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 3026 /* addi.d sp,fp,imm on LoongArch */ 3027 if (cfa->base == CFI_BP && cfa->offset == 0) { 3028 if (insn->sym->frame_pointer) { 3029 cfa->base = CFI_SP; 3030 cfa->offset = -op->src.offset; 3031 } 3032 } else { 3033 /* lea disp(%rbp), %rsp */ 3034 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 3035 } 3036 break; 3037 } 3038 3039 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 3040 3041 /* drap: lea disp(%rsp), %drap */ 3042 cfi->drap_reg = op->dest.reg; 3043 3044 /* 3045 * lea disp(%rsp), %reg 3046 * 3047 * This is needed for the rare case where GCC 3048 * does something dumb like: 3049 * 3050 * lea 0x8(%rsp), %rcx 3051 * ... 3052 * mov %rcx, %rsp 3053 */ 3054 cfi->vals[op->dest.reg].base = CFI_CFA; 3055 cfi->vals[op->dest.reg].offset = \ 3056 -cfi->stack_size + op->src.offset; 3057 3058 break; 3059 } 3060 3061 if (cfi->drap && op->dest.reg == CFI_SP && 3062 op->src.reg == cfi->drap_reg) { 3063 3064 /* drap: lea disp(%drap), %rsp */ 3065 cfa->base = CFI_SP; 3066 cfa->offset = cfi->stack_size = -op->src.offset; 3067 cfi->drap_reg = CFI_UNDEFINED; 3068 cfi->drap = false; 3069 break; 3070 } 3071 3072 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) { 3073 WARN_INSN(insn, "unsupported stack register modification"); 3074 return -1; 3075 } 3076 3077 break; 3078 3079 case OP_SRC_AND: 3080 if (op->dest.reg != CFI_SP || 3081 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 3082 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 3083 WARN_INSN(insn, "unsupported stack pointer realignment"); 3084 return -1; 3085 } 3086 3087 if (cfi->drap_reg != CFI_UNDEFINED) { 3088 /* drap: and imm, %rsp */ 3089 cfa->base = cfi->drap_reg; 3090 cfa->offset = cfi->stack_size = 0; 3091 cfi->drap = true; 3092 } 3093 3094 /* 3095 * Older versions of GCC (4.8ish) realign the stack 3096 * without DRAP, with a frame pointer. 3097 */ 3098 3099 break; 3100 3101 case OP_SRC_POP: 3102 case OP_SRC_POPF: 3103 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) { 3104 3105 /* pop %rsp; # restore from a stack swizzle */ 3106 cfa->base = CFI_SP; 3107 break; 3108 } 3109 3110 if (!cfi->drap && op->dest.reg == cfa->base) { 3111 3112 /* pop %rbp */ 3113 cfa->base = CFI_SP; 3114 } 3115 3116 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 3117 op->dest.reg == cfi->drap_reg && 3118 cfi->drap_offset == -cfi->stack_size) { 3119 3120 /* drap: pop %drap */ 3121 cfa->base = cfi->drap_reg; 3122 cfa->offset = 0; 3123 cfi->drap_offset = -1; 3124 3125 } else if (cfi->stack_size == -regs[op->dest.reg].offset) { 3126 3127 /* pop %reg */ 3128 restore_reg(cfi, op->dest.reg); 3129 } 3130 3131 cfi->stack_size -= 8; 3132 if (cfa->base == CFI_SP) 3133 cfa->offset -= 8; 3134 3135 break; 3136 3137 case OP_SRC_REG_INDIRECT: 3138 if (!cfi->drap && op->dest.reg == cfa->base && 3139 op->dest.reg == CFI_BP) { 3140 3141 /* mov disp(%rsp), %rbp */ 3142 cfa->base = CFI_SP; 3143 cfa->offset = cfi->stack_size; 3144 } 3145 3146 if (cfi->drap && op->src.reg == CFI_BP && 3147 op->src.offset == cfi->drap_offset) { 3148 3149 /* drap: mov disp(%rbp), %drap */ 3150 cfa->base = cfi->drap_reg; 3151 cfa->offset = 0; 3152 cfi->drap_offset = -1; 3153 } 3154 3155 if (cfi->drap && op->src.reg == CFI_BP && 3156 op->src.offset == regs[op->dest.reg].offset) { 3157 3158 /* drap: mov disp(%rbp), %reg */ 3159 restore_reg(cfi, op->dest.reg); 3160 3161 } else if (op->src.reg == cfa->base && 3162 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 3163 3164 /* mov disp(%rbp), %reg */ 3165 /* mov disp(%rsp), %reg */ 3166 restore_reg(cfi, op->dest.reg); 3167 3168 } else if (op->src.reg == CFI_SP && 3169 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) { 3170 3171 /* mov disp(%rsp), %reg */ 3172 restore_reg(cfi, op->dest.reg); 3173 } 3174 3175 break; 3176 3177 default: 3178 WARN_INSN(insn, "unknown stack-related instruction"); 3179 return -1; 3180 } 3181 3182 break; 3183 3184 case OP_DEST_PUSH: 3185 case OP_DEST_PUSHF: 3186 cfi->stack_size += 8; 3187 if (cfa->base == CFI_SP) 3188 cfa->offset += 8; 3189 3190 if (op->src.type != OP_SRC_REG) 3191 break; 3192 3193 if (cfi->drap) { 3194 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3195 3196 /* drap: push %drap */ 3197 cfa->base = CFI_BP_INDIRECT; 3198 cfa->offset = -cfi->stack_size; 3199 3200 /* save drap so we know when to restore it */ 3201 cfi->drap_offset = -cfi->stack_size; 3202 3203 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 3204 3205 /* drap: push %rbp */ 3206 cfi->stack_size = 0; 3207 3208 } else { 3209 3210 /* drap: push %reg */ 3211 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 3212 } 3213 3214 } else { 3215 3216 /* push %reg */ 3217 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 3218 } 3219 3220 /* detect when asm code uses rbp as a scratch register */ 3221 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP && 3222 cfa->base != CFI_BP) 3223 cfi->bp_scratch = true; 3224 break; 3225 3226 case OP_DEST_REG_INDIRECT: 3227 3228 if (cfi->drap) { 3229 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3230 3231 /* drap: mov %drap, disp(%rbp) */ 3232 cfa->base = CFI_BP_INDIRECT; 3233 cfa->offset = op->dest.offset; 3234 3235 /* save drap offset so we know when to restore it */ 3236 cfi->drap_offset = op->dest.offset; 3237 } else { 3238 3239 /* drap: mov reg, disp(%rbp) */ 3240 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 3241 } 3242 3243 } else if (op->dest.reg == cfa->base) { 3244 3245 /* mov reg, disp(%rbp) */ 3246 /* mov reg, disp(%rsp) */ 3247 save_reg(cfi, op->src.reg, CFI_CFA, 3248 op->dest.offset - cfi->cfa.offset); 3249 3250 } else if (op->dest.reg == CFI_SP) { 3251 3252 /* mov reg, disp(%rsp) */ 3253 save_reg(cfi, op->src.reg, CFI_CFA, 3254 op->dest.offset - cfi->stack_size); 3255 3256 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) { 3257 3258 /* mov %rsp, (%reg); # setup a stack swizzle. */ 3259 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT; 3260 cfi->vals[op->dest.reg].offset = cfa->offset; 3261 } 3262 3263 break; 3264 3265 case OP_DEST_MEM: 3266 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 3267 WARN_INSN(insn, "unknown stack-related memory operation"); 3268 return -1; 3269 } 3270 3271 /* pop mem */ 3272 cfi->stack_size -= 8; 3273 if (cfa->base == CFI_SP) 3274 cfa->offset -= 8; 3275 3276 break; 3277 3278 default: 3279 WARN_INSN(insn, "unknown stack-related instruction"); 3280 return -1; 3281 } 3282 3283 return 0; 3284 } 3285 3286 /* 3287 * The stack layouts of alternatives instructions can sometimes diverge when 3288 * they have stack modifications. That's fine as long as the potential stack 3289 * layouts don't conflict at any given potential instruction boundary. 3290 * 3291 * Flatten the CFIs of the different alternative code streams (both original 3292 * and replacement) into a single shared CFI array which can be used to detect 3293 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 3294 */ 3295 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 3296 { 3297 struct cfi_state **alt_cfi; 3298 int group_off; 3299 3300 if (!insn->alt_group) 3301 return 0; 3302 3303 if (!insn->cfi) { 3304 WARN("CFI missing"); 3305 return -1; 3306 } 3307 3308 alt_cfi = insn->alt_group->cfi; 3309 group_off = insn->offset - insn->alt_group->first_insn->offset; 3310 3311 if (!alt_cfi[group_off]) { 3312 alt_cfi[group_off] = insn->cfi; 3313 } else { 3314 if (cficmp(alt_cfi[group_off], insn->cfi)) { 3315 struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group; 3316 struct instruction *orig = orig_group->first_insn; 3317 WARN_INSN(orig, "stack layout conflict in alternatives: %s", 3318 offstr(insn->sec, insn->offset)); 3319 return -1; 3320 } 3321 } 3322 3323 return 0; 3324 } 3325 3326 static int noinline handle_insn_ops(struct instruction *insn, 3327 struct instruction *next_insn, 3328 struct insn_state *state) 3329 { 3330 struct insn_state prev_state __maybe_unused = *state; 3331 struct stack_op *op; 3332 int ret = 0; 3333 3334 for (op = insn->stack_ops; op; op = op->next) { 3335 3336 ret = update_cfi_state(insn, next_insn, &state->cfi, op); 3337 if (ret) 3338 goto done; 3339 3340 if (!opts.uaccess || !insn->alt_group) 3341 continue; 3342 3343 if (op->dest.type == OP_DEST_PUSHF) { 3344 if (!state->uaccess_stack) { 3345 state->uaccess_stack = 1; 3346 } else if (state->uaccess_stack >> 31) { 3347 WARN_INSN(insn, "PUSHF stack exhausted"); 3348 ret = 1; 3349 goto done; 3350 } 3351 state->uaccess_stack <<= 1; 3352 state->uaccess_stack |= state->uaccess; 3353 } 3354 3355 if (op->src.type == OP_SRC_POPF) { 3356 if (state->uaccess_stack) { 3357 state->uaccess = state->uaccess_stack & 1; 3358 state->uaccess_stack >>= 1; 3359 if (state->uaccess_stack == 1) 3360 state->uaccess_stack = 0; 3361 } 3362 } 3363 } 3364 3365 done: 3366 TRACE_INSN_STATE(insn, &prev_state, state); 3367 3368 return ret; 3369 } 3370 3371 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 3372 { 3373 struct cfi_state *cfi1 = insn->cfi; 3374 int i; 3375 3376 if (!cfi1) { 3377 WARN("CFI missing"); 3378 return false; 3379 } 3380 3381 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 3382 3383 WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 3384 cfi1->cfa.base, cfi1->cfa.offset, 3385 cfi2->cfa.base, cfi2->cfa.offset); 3386 return false; 3387 3388 } 3389 3390 if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 3391 for (i = 0; i < CFI_NUM_REGS; i++) { 3392 3393 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg))) 3394 continue; 3395 3396 WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 3397 i, cfi1->regs[i].base, cfi1->regs[i].offset, 3398 i, cfi2->regs[i].base, cfi2->regs[i].offset); 3399 } 3400 return false; 3401 } 3402 3403 if (cfi1->type != cfi2->type) { 3404 3405 WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d", 3406 cfi1->type, cfi2->type); 3407 return false; 3408 } 3409 3410 if (cfi1->drap != cfi2->drap || 3411 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 3412 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 3413 3414 WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 3415 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 3416 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 3417 return false; 3418 } 3419 3420 return true; 3421 } 3422 3423 static inline bool func_uaccess_safe(struct symbol *func) 3424 { 3425 if (func) 3426 return func->uaccess_safe; 3427 3428 return false; 3429 } 3430 3431 static inline const char *call_dest_name(struct instruction *insn) 3432 { 3433 static char pvname[19]; 3434 struct reloc *reloc; 3435 int idx; 3436 3437 if (insn_call_dest(insn)) 3438 return insn_call_dest(insn)->name; 3439 3440 reloc = insn_reloc(NULL, insn); 3441 if (reloc && !strcmp(reloc->sym->name, "pv_ops")) { 3442 idx = (reloc_addend(reloc) / sizeof(void *)); 3443 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); 3444 return pvname; 3445 } 3446 3447 return "{dynamic}"; 3448 } 3449 3450 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn) 3451 { 3452 struct symbol *target; 3453 struct reloc *reloc; 3454 int idx; 3455 3456 reloc = insn_reloc(file, insn); 3457 if (!reloc || strcmp(reloc->sym->name, "pv_ops")) 3458 return false; 3459 3460 idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *); 3461 3462 if (file->pv_ops[idx].clean) 3463 return true; 3464 3465 file->pv_ops[idx].clean = true; 3466 3467 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) { 3468 if (!target->sec->noinstr) { 3469 WARN("pv_ops[%d]: %s", idx, target->name); 3470 file->pv_ops[idx].clean = false; 3471 } 3472 } 3473 3474 return file->pv_ops[idx].clean; 3475 } 3476 3477 static inline bool noinstr_call_dest(struct objtool_file *file, 3478 struct instruction *insn, 3479 struct symbol *func) 3480 { 3481 /* 3482 * We can't deal with indirect function calls at present; 3483 * assume they're instrumented. 3484 */ 3485 if (!func) { 3486 if (file->pv_ops) 3487 return pv_call_dest(file, insn); 3488 3489 return false; 3490 } 3491 3492 /* 3493 * If the symbol is from a noinstr section; we good. 3494 */ 3495 if (func->sec->noinstr) 3496 return true; 3497 3498 /* 3499 * If the symbol is a static_call trampoline, we can't tell. 3500 */ 3501 if (func->static_call_tramp) 3502 return true; 3503 3504 /* 3505 * The __ubsan_handle_*() calls are like WARN(), they only happen when 3506 * something 'BAD' happened. At the risk of taking the machine down, 3507 * let them proceed to get the message out. 3508 */ 3509 if (!strncmp(func->name, "__ubsan_handle_", 15)) 3510 return true; 3511 3512 return false; 3513 } 3514 3515 static int validate_call(struct objtool_file *file, 3516 struct instruction *insn, 3517 struct insn_state *state) 3518 { 3519 if (state->noinstr && state->instr <= 0 && 3520 !noinstr_call_dest(file, insn, insn_call_dest(insn))) { 3521 WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn)); 3522 return 1; 3523 } 3524 3525 if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) { 3526 WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn)); 3527 return 1; 3528 } 3529 3530 if (state->df) { 3531 WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn)); 3532 return 1; 3533 } 3534 3535 return 0; 3536 } 3537 3538 static int validate_sibling_call(struct objtool_file *file, 3539 struct instruction *insn, 3540 struct insn_state *state) 3541 { 3542 if (insn_func(insn) && has_modified_stack_frame(insn, state)) { 3543 WARN_INSN(insn, "sibling call from callable instruction with modified stack frame"); 3544 return 1; 3545 } 3546 3547 return validate_call(file, insn, state); 3548 } 3549 3550 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 3551 { 3552 if (state->noinstr && state->instr > 0) { 3553 WARN_INSN(insn, "return with instrumentation enabled"); 3554 return 1; 3555 } 3556 3557 if (state->uaccess && !func_uaccess_safe(func)) { 3558 WARN_INSN(insn, "return with UACCESS enabled"); 3559 return 1; 3560 } 3561 3562 if (!state->uaccess && func_uaccess_safe(func)) { 3563 WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function"); 3564 return 1; 3565 } 3566 3567 if (state->df) { 3568 WARN_INSN(insn, "return with DF set"); 3569 return 1; 3570 } 3571 3572 if (func && has_modified_stack_frame(insn, state)) { 3573 WARN_INSN(insn, "return with modified stack frame"); 3574 return 1; 3575 } 3576 3577 if (state->cfi.bp_scratch) { 3578 WARN_INSN(insn, "BP used as a scratch register"); 3579 return 1; 3580 } 3581 3582 return 0; 3583 } 3584 3585 static struct instruction *next_insn_to_validate(struct objtool_file *file, 3586 struct instruction *insn) 3587 { 3588 struct alt_group *alt_group = insn->alt_group; 3589 3590 /* 3591 * Simulate the fact that alternatives are patched in-place. When the 3592 * end of a replacement alt_group is reached, redirect objtool flow to 3593 * the end of the original alt_group. 3594 * 3595 * insn->alts->insn -> alt_group->first_insn 3596 * ... 3597 * alt_group->last_insn 3598 * [alt_group->nop] -> next(orig_group->last_insn) 3599 */ 3600 if (alt_group) { 3601 if (alt_group->nop) { 3602 /* ->nop implies ->orig_group */ 3603 if (insn == alt_group->last_insn) 3604 return alt_group->nop; 3605 if (insn == alt_group->nop) 3606 goto next_orig; 3607 } 3608 if (insn == alt_group->last_insn && alt_group->orig_group) 3609 goto next_orig; 3610 } 3611 3612 return next_insn_same_sec(file, insn); 3613 3614 next_orig: 3615 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 3616 } 3617 3618 static bool skip_alt_group(struct instruction *insn) 3619 { 3620 struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL; 3621 3622 if (!insn->alt_group) 3623 return false; 3624 3625 /* ANNOTATE_IGNORE_ALTERNATIVE */ 3626 if (insn->alt_group->ignore) { 3627 TRACE_ALT(insn, "alt group ignored"); 3628 return true; 3629 } 3630 3631 /* 3632 * For NOP patched with CLAC/STAC, only follow the latter to avoid 3633 * impossible code paths combining patched CLAC with unpatched STAC 3634 * or vice versa. 3635 * 3636 * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus 3637 * requested not to do that to avoid hurting .s file readability 3638 * around CLAC/STAC alternative sites. 3639 */ 3640 3641 if (!alt_insn) 3642 return false; 3643 3644 /* Don't override ASM_{CLAC,STAC}_UNSAFE */ 3645 if (alt_insn->alt_group && alt_insn->alt_group->ignore) 3646 return false; 3647 3648 return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC; 3649 } 3650 3651 static int checksum_debug_init(struct objtool_file *file) 3652 { 3653 char *dup, *s; 3654 3655 if (!opts.debug_checksum) 3656 return 0; 3657 3658 dup = strdup(opts.debug_checksum); 3659 if (!dup) { 3660 ERROR_GLIBC("strdup"); 3661 return -1; 3662 } 3663 3664 s = dup; 3665 while (*s) { 3666 struct symbol *func; 3667 char *comma; 3668 3669 comma = strchr(s, ','); 3670 if (comma) 3671 *comma = '\0'; 3672 3673 func = find_symbol_by_name(file->elf, s); 3674 if (!func || !is_func_sym(func)) 3675 WARN("--debug-checksum: can't find '%s'", s); 3676 else 3677 func->debug_checksum = 1; 3678 3679 if (!comma) 3680 break; 3681 3682 s = comma + 1; 3683 } 3684 3685 free(dup); 3686 return 0; 3687 } 3688 3689 static void checksum_update_insn(struct objtool_file *file, struct symbol *func, 3690 struct instruction *insn) 3691 { 3692 struct reloc *reloc = insn_reloc(file, insn); 3693 unsigned long offset; 3694 struct symbol *sym; 3695 3696 if (insn->fake) 3697 return; 3698 3699 checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len); 3700 3701 if (!reloc) { 3702 struct symbol *call_dest = insn_call_dest(insn); 3703 3704 if (call_dest) 3705 checksum_update(func, insn, call_dest->demangled_name, 3706 strlen(call_dest->demangled_name)); 3707 return; 3708 } 3709 3710 sym = reloc->sym; 3711 offset = arch_insn_adjusted_addend(insn, reloc); 3712 3713 if (is_string_sec(sym->sec)) { 3714 char *str; 3715 3716 str = sym->sec->data->d_buf + sym->offset + offset; 3717 checksum_update(func, insn, str, strlen(str)); 3718 return; 3719 } 3720 3721 if (is_sec_sym(sym)) { 3722 sym = find_symbol_containing(reloc->sym->sec, offset); 3723 if (!sym) 3724 return; 3725 3726 offset -= sym->offset; 3727 } 3728 3729 checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name)); 3730 checksum_update(func, insn, &offset, sizeof(offset)); 3731 } 3732 3733 static int validate_branch(struct objtool_file *file, struct symbol *func, 3734 struct instruction *insn, struct insn_state state); 3735 static int do_validate_branch(struct objtool_file *file, struct symbol *func, 3736 struct instruction *insn, struct insn_state state); 3737 3738 static int validate_insn(struct objtool_file *file, struct symbol *func, 3739 struct instruction *insn, struct insn_state *statep, 3740 struct instruction *prev_insn, struct instruction *next_insn, 3741 bool *dead_end) 3742 { 3743 char *alt_name __maybe_unused = NULL; 3744 struct alternative *alt; 3745 u8 visited; 3746 int ret; 3747 3748 /* 3749 * Any returns before the end of this function are effectively dead 3750 * ends, i.e. validate_branch() has reached the end of the branch. 3751 */ 3752 *dead_end = true; 3753 3754 visited = VISITED_BRANCH << statep->uaccess; 3755 if (insn->visited & VISITED_BRANCH_MASK) { 3756 if (!insn->hint && !insn_cfi_match(insn, &statep->cfi)) 3757 return 1; 3758 3759 if (insn->visited & visited) { 3760 TRACE_INSN(insn, "already visited"); 3761 return 0; 3762 } 3763 } else { 3764 nr_insns_visited++; 3765 } 3766 3767 if (statep->noinstr) 3768 statep->instr += insn->instr; 3769 3770 if (insn->hint) { 3771 if (insn->restore) { 3772 struct instruction *save_insn, *i; 3773 3774 i = insn; 3775 save_insn = NULL; 3776 3777 sym_for_each_insn_continue_reverse(file, func, i) { 3778 if (i->save) { 3779 save_insn = i; 3780 break; 3781 } 3782 } 3783 3784 if (!save_insn) { 3785 WARN_INSN(insn, "no corresponding CFI save for CFI restore"); 3786 return 1; 3787 } 3788 3789 if (!save_insn->visited) { 3790 /* 3791 * If the restore hint insn is at the 3792 * beginning of a basic block and was 3793 * branched to from elsewhere, and the 3794 * save insn hasn't been visited yet, 3795 * defer following this branch for now. 3796 * It will be seen later via the 3797 * straight-line path. 3798 */ 3799 if (!prev_insn) { 3800 TRACE_INSN(insn, "defer restore"); 3801 return 0; 3802 } 3803 3804 WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo"); 3805 return 1; 3806 } 3807 3808 insn->cfi = save_insn->cfi; 3809 nr_cfi_reused++; 3810 } 3811 3812 statep->cfi = *insn->cfi; 3813 } else { 3814 /* XXX track if we actually changed statep->cfi */ 3815 3816 if (prev_insn && !cficmp(prev_insn->cfi, &statep->cfi)) { 3817 insn->cfi = prev_insn->cfi; 3818 nr_cfi_reused++; 3819 } else { 3820 insn->cfi = cfi_hash_find_or_add(&statep->cfi); 3821 } 3822 } 3823 3824 insn->visited |= visited; 3825 3826 if (propagate_alt_cfi(file, insn)) 3827 return 1; 3828 3829 if (insn->alts) { 3830 for (alt = insn->alts; alt; alt = alt->next) { 3831 TRACE_ALT_BEGIN(insn, alt, alt_name); 3832 ret = validate_branch(file, func, alt->insn, *statep); 3833 TRACE_ALT_END(insn, alt, alt_name); 3834 if (ret) { 3835 BT_INSN(insn, "(alt)"); 3836 return ret; 3837 } 3838 } 3839 TRACE_ALT_INFO_NOADDR(insn, "/ ", "DEFAULT"); 3840 } 3841 3842 if (skip_alt_group(insn)) 3843 return 0; 3844 3845 if (handle_insn_ops(insn, next_insn, statep)) 3846 return 1; 3847 3848 switch (insn->type) { 3849 3850 case INSN_RETURN: 3851 TRACE_INSN(insn, "return"); 3852 return validate_return(func, insn, statep); 3853 3854 case INSN_CALL: 3855 case INSN_CALL_DYNAMIC: 3856 if (insn->type == INSN_CALL) 3857 TRACE_INSN(insn, "call"); 3858 else 3859 TRACE_INSN(insn, "indirect call"); 3860 3861 ret = validate_call(file, insn, statep); 3862 if (ret) 3863 return ret; 3864 3865 if (opts.stackval && func && !is_special_call(insn) && 3866 !has_valid_stack_frame(statep)) { 3867 WARN_INSN(insn, "call without frame pointer save/setup"); 3868 return 1; 3869 } 3870 3871 break; 3872 3873 case INSN_JUMP_CONDITIONAL: 3874 case INSN_JUMP_UNCONDITIONAL: 3875 if (is_sibling_call(insn)) { 3876 TRACE_INSN(insn, "sibling call"); 3877 ret = validate_sibling_call(file, insn, statep); 3878 if (ret) 3879 return ret; 3880 3881 } else if (insn->jump_dest) { 3882 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3883 TRACE_INSN(insn, "unconditional jump"); 3884 else 3885 TRACE_INSN(insn, "jump taken"); 3886 3887 ret = validate_branch(file, func, insn->jump_dest, *statep); 3888 if (ret) { 3889 BT_INSN(insn, "(branch)"); 3890 return ret; 3891 } 3892 } 3893 3894 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3895 return 0; 3896 3897 TRACE_INSN(insn, "jump not taken"); 3898 break; 3899 3900 case INSN_JUMP_DYNAMIC: 3901 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3902 TRACE_INSN(insn, "indirect jump"); 3903 if (is_sibling_call(insn)) { 3904 ret = validate_sibling_call(file, insn, statep); 3905 if (ret) 3906 return ret; 3907 } 3908 3909 if (insn->type == INSN_JUMP_DYNAMIC) 3910 return 0; 3911 3912 break; 3913 3914 case INSN_SYSCALL: 3915 TRACE_INSN(insn, "syscall"); 3916 if (func && (!next_insn || !next_insn->hint)) { 3917 WARN_INSN(insn, "unsupported instruction in callable function"); 3918 return 1; 3919 } 3920 3921 break; 3922 3923 case INSN_SYSRET: 3924 TRACE_INSN(insn, "sysret"); 3925 if (func && (!next_insn || !next_insn->hint)) { 3926 WARN_INSN(insn, "unsupported instruction in callable function"); 3927 return 1; 3928 } 3929 3930 return 0; 3931 3932 case INSN_STAC: 3933 TRACE_INSN(insn, "stac"); 3934 if (!opts.uaccess) 3935 break; 3936 3937 if (statep->uaccess) { 3938 WARN_INSN(insn, "recursive UACCESS enable"); 3939 return 1; 3940 } 3941 3942 statep->uaccess = true; 3943 break; 3944 3945 case INSN_CLAC: 3946 TRACE_INSN(insn, "clac"); 3947 if (!opts.uaccess) 3948 break; 3949 3950 if (!statep->uaccess && func) { 3951 WARN_INSN(insn, "redundant UACCESS disable"); 3952 return 1; 3953 } 3954 3955 if (func_uaccess_safe(func) && !statep->uaccess_stack) { 3956 WARN_INSN(insn, "UACCESS-safe disables UACCESS"); 3957 return 1; 3958 } 3959 3960 statep->uaccess = false; 3961 break; 3962 3963 case INSN_STD: 3964 TRACE_INSN(insn, "std"); 3965 if (statep->df) { 3966 WARN_INSN(insn, "recursive STD"); 3967 return 1; 3968 } 3969 3970 statep->df = true; 3971 break; 3972 3973 case INSN_CLD: 3974 TRACE_INSN(insn, "cld"); 3975 if (!statep->df && func) { 3976 WARN_INSN(insn, "redundant CLD"); 3977 return 1; 3978 } 3979 3980 statep->df = false; 3981 break; 3982 3983 default: 3984 break; 3985 } 3986 3987 if (insn->dead_end) 3988 TRACE_INSN(insn, "dead end"); 3989 3990 *dead_end = insn->dead_end; 3991 return 0; 3992 } 3993 3994 /* 3995 * Follow the branch starting at the given instruction, and recursively follow 3996 * any other branches (jumps). Meanwhile, track the frame pointer state at 3997 * each instruction and validate all the rules described in 3998 * tools/objtool/Documentation/objtool.txt. 3999 */ 4000 static int do_validate_branch(struct objtool_file *file, struct symbol *func, 4001 struct instruction *insn, struct insn_state state) 4002 { 4003 struct instruction *next_insn, *prev_insn = NULL; 4004 bool dead_end; 4005 int ret; 4006 4007 if (func && func->ignore) 4008 return 0; 4009 4010 do { 4011 insn->trace = 0; 4012 next_insn = next_insn_to_validate(file, insn); 4013 4014 if (opts.checksum && func && insn->sec) 4015 checksum_update_insn(file, func, insn); 4016 4017 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) { 4018 /* Ignore KCFI type preambles, which always fall through */ 4019 if (is_prefix_func(func)) 4020 return 0; 4021 4022 if (file->ignore_unreachables) 4023 return 0; 4024 4025 WARN("%s() falls through to next function %s()", 4026 func->name, insn_func(insn)->name); 4027 func->warned = 1; 4028 4029 return 1; 4030 } 4031 4032 ret = validate_insn(file, func, insn, &state, prev_insn, next_insn, 4033 &dead_end); 4034 4035 if (!insn->trace) { 4036 if (ret) 4037 TRACE_INSN(insn, "warning (%d)", ret); 4038 else 4039 TRACE_INSN(insn, NULL); 4040 } 4041 4042 if (!dead_end && !next_insn) { 4043 if (state.cfi.cfa.base == CFI_UNDEFINED) 4044 return 0; 4045 if (file->ignore_unreachables) 4046 return 0; 4047 4048 WARN("%s%sunexpected end of section %s", 4049 func ? func->name : "", func ? "(): " : "", 4050 insn->sec->name); 4051 return 1; 4052 } 4053 4054 prev_insn = insn; 4055 insn = next_insn; 4056 4057 } while (!dead_end); 4058 4059 return ret; 4060 } 4061 4062 static int validate_branch(struct objtool_file *file, struct symbol *func, 4063 struct instruction *insn, struct insn_state state) 4064 { 4065 int ret; 4066 4067 trace_depth_inc(); 4068 ret = do_validate_branch(file, func, insn, state); 4069 trace_depth_dec(); 4070 4071 return ret; 4072 } 4073 4074 static int validate_unwind_hint(struct objtool_file *file, 4075 struct instruction *insn, 4076 struct insn_state *state) 4077 { 4078 if (insn->hint && !insn->visited) { 4079 struct symbol *func = insn_func(insn); 4080 int ret; 4081 4082 if (opts.checksum) 4083 checksum_init(func); 4084 4085 ret = validate_branch(file, func, insn, *state); 4086 if (ret) 4087 BT_INSN(insn, "<=== (hint)"); 4088 return ret; 4089 } 4090 4091 return 0; 4092 } 4093 4094 static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 4095 { 4096 struct instruction *insn; 4097 struct insn_state state; 4098 int warnings = 0; 4099 4100 if (!file->hints) 4101 return 0; 4102 4103 init_insn_state(file, &state, sec); 4104 4105 if (sec) { 4106 sec_for_each_insn(file, sec, insn) 4107 warnings += validate_unwind_hint(file, insn, &state); 4108 } else { 4109 for_each_insn(file, insn) 4110 warnings += validate_unwind_hint(file, insn, &state); 4111 } 4112 4113 return warnings; 4114 } 4115 4116 /* 4117 * Validate rethunk entry constraint: must untrain RET before the first RET. 4118 * 4119 * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes 4120 * before an actual RET instruction. 4121 */ 4122 static int validate_unret(struct objtool_file *file, struct instruction *insn) 4123 { 4124 struct instruction *next, *dest; 4125 int ret; 4126 4127 for (;;) { 4128 next = next_insn_to_validate(file, insn); 4129 4130 if (insn->visited & VISITED_UNRET) 4131 return 0; 4132 4133 insn->visited |= VISITED_UNRET; 4134 4135 if (insn->alts) { 4136 struct alternative *alt; 4137 for (alt = insn->alts; alt; alt = alt->next) { 4138 ret = validate_unret(file, alt->insn); 4139 if (ret) { 4140 BT_INSN(insn, "(alt)"); 4141 return ret; 4142 } 4143 } 4144 } 4145 4146 switch (insn->type) { 4147 4148 case INSN_CALL_DYNAMIC: 4149 case INSN_JUMP_DYNAMIC: 4150 case INSN_JUMP_DYNAMIC_CONDITIONAL: 4151 WARN_INSN(insn, "early indirect call"); 4152 return 1; 4153 4154 case INSN_JUMP_UNCONDITIONAL: 4155 case INSN_JUMP_CONDITIONAL: 4156 if (!is_sibling_call(insn)) { 4157 if (!insn->jump_dest) { 4158 WARN_INSN(insn, "unresolved jump target after linking?!?"); 4159 return 1; 4160 } 4161 ret = validate_unret(file, insn->jump_dest); 4162 if (ret) { 4163 BT_INSN(insn, "(branch%s)", 4164 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : ""); 4165 return ret; 4166 } 4167 4168 if (insn->type == INSN_JUMP_UNCONDITIONAL) 4169 return 0; 4170 4171 break; 4172 } 4173 4174 /* fallthrough */ 4175 case INSN_CALL: 4176 dest = find_insn(file, insn_call_dest(insn)->sec, 4177 insn_call_dest(insn)->offset); 4178 if (!dest) { 4179 WARN("Unresolved function after linking!?: %s", 4180 insn_call_dest(insn)->name); 4181 return 1; 4182 } 4183 4184 ret = validate_unret(file, dest); 4185 if (ret) { 4186 BT_INSN(insn, "(call)"); 4187 return ret; 4188 } 4189 /* 4190 * If a call returns without error, it must have seen UNTRAIN_RET. 4191 * Therefore any non-error return is a success. 4192 */ 4193 return 0; 4194 4195 case INSN_RETURN: 4196 WARN_INSN(insn, "RET before UNTRAIN"); 4197 return 1; 4198 4199 case INSN_SYSCALL: 4200 break; 4201 4202 case INSN_SYSRET: 4203 return 0; 4204 4205 case INSN_NOP: 4206 if (insn->retpoline_safe) 4207 return 0; 4208 break; 4209 4210 default: 4211 break; 4212 } 4213 4214 if (insn->dead_end) 4215 return 0; 4216 4217 if (!next) { 4218 WARN_INSN(insn, "teh end!"); 4219 return 1; 4220 } 4221 insn = next; 4222 } 4223 4224 return 0; 4225 } 4226 4227 /* 4228 * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter 4229 * VALIDATE_UNRET_END before RET. 4230 */ 4231 static int validate_unrets(struct objtool_file *file) 4232 { 4233 struct instruction *insn; 4234 int warnings = 0; 4235 4236 for_each_insn(file, insn) { 4237 if (!insn->unret) 4238 continue; 4239 4240 warnings += validate_unret(file, insn); 4241 } 4242 4243 return warnings; 4244 } 4245 4246 static int validate_retpoline(struct objtool_file *file) 4247 { 4248 struct instruction *insn; 4249 int warnings = 0; 4250 4251 for_each_insn(file, insn) { 4252 if (insn->type != INSN_JUMP_DYNAMIC && 4253 insn->type != INSN_CALL_DYNAMIC && 4254 insn->type != INSN_RETURN) 4255 continue; 4256 4257 if (insn->retpoline_safe) 4258 continue; 4259 4260 if (insn->sec->init) 4261 continue; 4262 4263 if (insn->type == INSN_RETURN) { 4264 if (opts.rethunk) { 4265 WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build"); 4266 warnings++; 4267 } 4268 continue; 4269 } 4270 4271 WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build", 4272 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 4273 warnings++; 4274 } 4275 4276 if (!opts.cfi) 4277 return warnings; 4278 4279 /* 4280 * kCFI call sites look like: 4281 * 4282 * movl $(-0x12345678), %r10d 4283 * addl -4(%r11), %r10d 4284 * jz 1f 4285 * ud2 4286 * 1: cs call __x86_indirect_thunk_r11 4287 * 4288 * Verify all indirect calls are kCFI adorned by checking for the 4289 * UD2. Notably, doing __nocfi calls to regular (cfi) functions is 4290 * broken. 4291 */ 4292 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 4293 struct symbol *sym = insn->sym; 4294 4295 if (sym && (sym->type == STT_NOTYPE || 4296 sym->type == STT_FUNC) && !sym->nocfi) { 4297 struct instruction *prev = 4298 prev_insn_same_sym(file, insn); 4299 4300 if (!prev || prev->type != INSN_BUG) { 4301 WARN_INSN(insn, "no-cfi indirect call!"); 4302 warnings++; 4303 } 4304 } 4305 } 4306 4307 return warnings; 4308 } 4309 4310 static bool is_kasan_insn(struct instruction *insn) 4311 { 4312 return (insn->type == INSN_CALL && 4313 !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return")); 4314 } 4315 4316 static bool is_ubsan_insn(struct instruction *insn) 4317 { 4318 return (insn->type == INSN_CALL && 4319 !strcmp(insn_call_dest(insn)->name, 4320 "__ubsan_handle_builtin_unreachable")); 4321 } 4322 4323 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 4324 { 4325 struct symbol *func = insn_func(insn); 4326 struct instruction *prev_insn; 4327 int i; 4328 4329 if (insn->type == INSN_NOP || insn->type == INSN_TRAP || 4330 insn->hole || (func && func->ignore)) 4331 return true; 4332 4333 /* 4334 * Ignore alternative replacement instructions. This can happen 4335 * when a whitelisted function uses one of the ALTERNATIVE macros. 4336 */ 4337 if (!strcmp(insn->sec->name, ".altinstr_replacement") || 4338 !strcmp(insn->sec->name, ".altinstr_aux")) 4339 return true; 4340 4341 if (!func) 4342 return false; 4343 4344 if (func->static_call_tramp) 4345 return true; 4346 4347 /* 4348 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 4349 * __builtin_unreachable(). The BUG() macro has an unreachable() after 4350 * the UD2, which causes GCC's undefined trap logic to emit another UD2 4351 * (or occasionally a JMP to UD2). 4352 * 4353 * It may also insert a UD2 after calling a __noreturn function. 4354 */ 4355 prev_insn = prev_insn_same_sec(file, insn); 4356 if (prev_insn && prev_insn->dead_end && 4357 (insn->type == INSN_BUG || 4358 (insn->type == INSN_JUMP_UNCONDITIONAL && 4359 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 4360 return true; 4361 4362 /* 4363 * Check if this (or a subsequent) instruction is related to 4364 * CONFIG_UBSAN or CONFIG_KASAN. 4365 * 4366 * End the search at 5 instructions to avoid going into the weeds. 4367 */ 4368 for (i = 0; i < 5; i++) { 4369 4370 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 4371 return true; 4372 4373 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 4374 if (insn->jump_dest && 4375 insn_func(insn->jump_dest) == func) { 4376 insn = insn->jump_dest; 4377 continue; 4378 } 4379 4380 break; 4381 } 4382 4383 if (insn->offset + insn->len >= func->offset + func->len) 4384 break; 4385 4386 insn = next_insn_same_sec(file, insn); 4387 } 4388 4389 return false; 4390 } 4391 4392 /* 4393 * For FineIBT or kCFI, a certain number of bytes preceding the function may be 4394 * NOPs. Those NOPs may be rewritten at runtime and executed, so give them a 4395 * proper function name: __pfx_<func>. 4396 * 4397 * The NOPs may not exist for the following cases: 4398 * 4399 * - compiler cloned functions (*.cold, *.part0, etc) 4400 * - asm functions created with inline asm or without SYM_FUNC_START() 4401 * 4402 * Also, the function may already have a prefix from a previous objtool run 4403 * (livepatch extracted functions, or manually running objtool multiple times). 4404 * 4405 * So return 0 if the NOPs are missing or the function already has a prefix 4406 * symbol. 4407 */ 4408 static int create_prefix_symbol(struct objtool_file *file, struct symbol *func) 4409 { 4410 struct instruction *insn, *prev; 4411 char name[SYM_NAME_LEN]; 4412 struct cfi_state *cfi; 4413 4414 if (!is_func_sym(func) || is_prefix_func(func) || 4415 func->cold || func->static_call_tramp) 4416 return 0; 4417 4418 if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) { 4419 WARN("%s: symbol name too long, can't create __pfx_ symbol", 4420 func->name); 4421 return 0; 4422 } 4423 4424 if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name)) 4425 return -1; 4426 4427 if (file->klp) { 4428 struct symbol *pfx; 4429 4430 pfx = find_symbol_by_offset(func->sec, func->offset - opts.prefix); 4431 if (pfx && is_prefix_func(pfx) && !strcmp(pfx->name, name)) 4432 return 0; 4433 } 4434 4435 insn = find_insn(file, func->sec, func->offset); 4436 if (!insn) { 4437 WARN("%s: can't find starting instruction", func->name); 4438 return -1; 4439 } 4440 4441 for (prev = prev_insn_same_sec(file, insn); 4442 prev; 4443 prev = prev_insn_same_sec(file, prev)) { 4444 u64 offset; 4445 4446 if (prev->type != INSN_NOP) 4447 return 0; 4448 4449 offset = func->offset - prev->offset; 4450 4451 if (offset > opts.prefix) 4452 return 0; 4453 4454 if (offset < opts.prefix) 4455 continue; 4456 4457 if (!elf_create_symbol(file->elf, name, func->sec, 4458 GELF_ST_BIND(func->sym.st_info), 4459 GELF_ST_TYPE(func->sym.st_info), 4460 prev->offset, opts.prefix)) 4461 return -1; 4462 4463 break; 4464 } 4465 4466 if (!prev) 4467 return 0; 4468 4469 if (!insn->cfi) { 4470 /* 4471 * This can happen if stack validation isn't enabled or the 4472 * function is annotated with STACK_FRAME_NON_STANDARD. 4473 */ 4474 return 0; 4475 } 4476 4477 /* Propagate insn->cfi to the prefix code */ 4478 cfi = cfi_hash_find_or_add(insn->cfi); 4479 for (; prev != insn; prev = next_insn_same_sec(file, prev)) 4480 prev->cfi = cfi; 4481 4482 return 0; 4483 } 4484 4485 static int create_prefix_symbols(struct objtool_file *file) 4486 { 4487 struct section *sec; 4488 struct symbol *func; 4489 4490 for_each_sec(file->elf, sec) { 4491 if (!is_text_sec(sec)) 4492 continue; 4493 4494 sec_for_each_sym(sec, func) { 4495 if (create_prefix_symbol(file, func)) 4496 return -1; 4497 } 4498 } 4499 4500 return 0; 4501 } 4502 4503 static int validate_symbol(struct objtool_file *file, struct section *sec, 4504 struct symbol *sym, struct insn_state *state) 4505 { 4506 struct instruction *insn; 4507 struct symbol *func; 4508 int ret; 4509 4510 if (!sym->len) { 4511 WARN("%s() is missing an ELF size annotation", sym->name); 4512 return 1; 4513 } 4514 4515 if (sym->pfunc != sym || sym->alias != sym) 4516 return 0; 4517 4518 insn = find_insn(file, sec, sym->offset); 4519 if (!insn || insn->visited) 4520 return 0; 4521 4522 if (opts.uaccess) 4523 state->uaccess = sym->uaccess_safe; 4524 4525 func = insn_func(insn); 4526 4527 if (opts.checksum) 4528 checksum_init(func); 4529 4530 if (opts.trace && !fnmatch(opts.trace, sym->name, 0)) { 4531 trace_enable(); 4532 TRACE("%s: validation begin\n", sym->name); 4533 } 4534 4535 ret = validate_branch(file, func, insn, *state); 4536 if (ret) 4537 BT_INSN(insn, "<=== (sym)"); 4538 4539 TRACE("%s: validation %s\n\n", sym->name, ret ? "failed" : "end"); 4540 trace_disable(); 4541 4542 if (opts.checksum) 4543 checksum_finish(func); 4544 4545 return ret; 4546 } 4547 4548 static int validate_section(struct objtool_file *file, struct section *sec) 4549 { 4550 struct insn_state state; 4551 struct symbol *func; 4552 int warnings = 0; 4553 4554 sec_for_each_sym(sec, func) { 4555 if (!is_func_sym(func)) 4556 continue; 4557 4558 init_insn_state(file, &state, sec); 4559 set_func_state(&state.cfi); 4560 4561 warnings += validate_symbol(file, sec, func, &state); 4562 } 4563 4564 return warnings; 4565 } 4566 4567 static int validate_noinstr_sections(struct objtool_file *file) 4568 { 4569 struct section *sec; 4570 int warnings = 0; 4571 4572 sec = find_section_by_name(file->elf, ".noinstr.text"); 4573 if (sec) { 4574 warnings += validate_section(file, sec); 4575 warnings += validate_unwind_hints(file, sec); 4576 } 4577 4578 sec = find_section_by_name(file->elf, ".entry.text"); 4579 if (sec) { 4580 warnings += validate_section(file, sec); 4581 warnings += validate_unwind_hints(file, sec); 4582 } 4583 4584 sec = find_section_by_name(file->elf, ".cpuidle.text"); 4585 if (sec) { 4586 warnings += validate_section(file, sec); 4587 warnings += validate_unwind_hints(file, sec); 4588 } 4589 4590 return warnings; 4591 } 4592 4593 static int validate_functions(struct objtool_file *file) 4594 { 4595 struct section *sec; 4596 int warnings = 0; 4597 4598 for_each_sec(file->elf, sec) { 4599 if (!is_text_sec(sec)) 4600 continue; 4601 4602 warnings += validate_section(file, sec); 4603 } 4604 4605 return warnings; 4606 } 4607 4608 static void mark_endbr_used(struct instruction *insn) 4609 { 4610 if (!list_empty(&insn->call_node)) 4611 list_del_init(&insn->call_node); 4612 } 4613 4614 static bool noendbr_range(struct objtool_file *file, struct instruction *insn) 4615 { 4616 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1); 4617 struct instruction *first; 4618 4619 if (!sym) 4620 return false; 4621 4622 first = find_insn(file, sym->sec, sym->offset); 4623 if (!first) 4624 return false; 4625 4626 if (first->type != INSN_ENDBR && !first->noendbr) 4627 return false; 4628 4629 return insn->offset == sym->offset + sym->len; 4630 } 4631 4632 static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn, 4633 struct instruction *dest) 4634 { 4635 if (dest->type == INSN_ENDBR) { 4636 mark_endbr_used(dest); 4637 return 0; 4638 } 4639 4640 if (insn_func(dest) && insn_func(insn) && 4641 insn_func(dest)->pfunc == insn_func(insn)->pfunc) { 4642 /* 4643 * Anything from->to self is either _THIS_IP_ or 4644 * IRET-to-self. 4645 * 4646 * There is no sane way to annotate _THIS_IP_ since the 4647 * compiler treats the relocation as a constant and is 4648 * happy to fold in offsets, skewing any annotation we 4649 * do, leading to vast amounts of false-positives. 4650 * 4651 * There's also compiler generated _THIS_IP_ through 4652 * KCOV and such which we have no hope of annotating. 4653 * 4654 * As such, blanket accept self-references without 4655 * issue. 4656 */ 4657 return 0; 4658 } 4659 4660 /* 4661 * Accept anything ANNOTATE_NOENDBR. 4662 */ 4663 if (dest->noendbr) 4664 return 0; 4665 4666 /* 4667 * Accept if this is the instruction after a symbol 4668 * that is (no)endbr -- typical code-range usage. 4669 */ 4670 if (noendbr_range(file, dest)) 4671 return 0; 4672 4673 WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset)); 4674 return 1; 4675 } 4676 4677 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn) 4678 { 4679 struct instruction *dest; 4680 struct reloc *reloc; 4681 unsigned long off; 4682 int warnings = 0; 4683 4684 /* 4685 * Looking for function pointer load relocations. Ignore 4686 * direct/indirect branches: 4687 */ 4688 switch (insn->type) { 4689 4690 case INSN_CALL: 4691 case INSN_CALL_DYNAMIC: 4692 case INSN_JUMP_CONDITIONAL: 4693 case INSN_JUMP_UNCONDITIONAL: 4694 case INSN_JUMP_DYNAMIC: 4695 case INSN_JUMP_DYNAMIC_CONDITIONAL: 4696 case INSN_RETURN: 4697 case INSN_NOP: 4698 return 0; 4699 4700 case INSN_LEA_RIP: 4701 if (!insn_reloc(file, insn)) { 4702 /* local function pointer reference without reloc */ 4703 4704 off = arch_jump_destination(insn); 4705 4706 dest = find_insn(file, insn->sec, off); 4707 if (!dest) { 4708 WARN_INSN(insn, "corrupt function pointer reference"); 4709 return 1; 4710 } 4711 4712 return __validate_ibt_insn(file, insn, dest); 4713 } 4714 break; 4715 4716 default: 4717 break; 4718 } 4719 4720 for (reloc = insn_reloc(file, insn); 4721 reloc; 4722 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 4723 reloc_offset(reloc) + 1, 4724 (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) { 4725 4726 off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc); 4727 4728 dest = find_insn(file, reloc->sym->sec, off); 4729 if (!dest) 4730 continue; 4731 4732 warnings += __validate_ibt_insn(file, insn, dest); 4733 } 4734 4735 return warnings; 4736 } 4737 4738 static int validate_ibt_data_reloc(struct objtool_file *file, 4739 struct reloc *reloc) 4740 { 4741 struct instruction *dest; 4742 4743 dest = find_insn(file, reloc->sym->sec, 4744 reloc->sym->offset + reloc_addend(reloc)); 4745 if (!dest) 4746 return 0; 4747 4748 if (dest->type == INSN_ENDBR) { 4749 mark_endbr_used(dest); 4750 return 0; 4751 } 4752 4753 if (dest->noendbr) 4754 return 0; 4755 4756 WARN_FUNC(reloc->sec->base, reloc_offset(reloc), 4757 "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset)); 4758 4759 return 1; 4760 } 4761 4762 /* 4763 * Validate IBT rules and remove used ENDBR instructions from the seal list. 4764 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with 4765 * NOPs) later, in create_ibt_endbr_seal_sections(). 4766 */ 4767 static int validate_ibt(struct objtool_file *file) 4768 { 4769 struct section *sec; 4770 struct reloc *reloc; 4771 struct instruction *insn; 4772 int warnings = 0; 4773 4774 for_each_insn(file, insn) 4775 warnings += validate_ibt_insn(file, insn); 4776 4777 for_each_sec(file->elf, sec) { 4778 4779 /* Already done by validate_ibt_insn() */ 4780 if (is_text_sec(sec)) 4781 continue; 4782 4783 if (!sec->rsec) 4784 continue; 4785 4786 /* 4787 * These sections can reference text addresses, but not with 4788 * the intent to indirect branch to them. 4789 */ 4790 if ((!strncmp(sec->name, ".discard", 8) && 4791 strcmp(sec->name, ".discard.ibt_endbr_noseal")) || 4792 !strncmp(sec->name, ".debug", 6) || 4793 !strcmp(sec->name, ".altinstructions") || 4794 !strcmp(sec->name, ".ibt_endbr_seal") || 4795 !strcmp(sec->name, ".kcfi_traps") || 4796 !strcmp(sec->name, ".orc_unwind_ip") || 4797 !strcmp(sec->name, ".retpoline_sites") || 4798 !strcmp(sec->name, ".smp_locks") || 4799 !strcmp(sec->name, ".static_call_sites") || 4800 !strcmp(sec->name, "_error_injection_whitelist") || 4801 !strcmp(sec->name, "_kprobe_blacklist") || 4802 !strcmp(sec->name, "__bug_table") || 4803 !strcmp(sec->name, "__ex_table") || 4804 !strcmp(sec->name, "__jump_table") || 4805 !strcmp(sec->name, ".init.klp_funcs") || 4806 !strcmp(sec->name, "__mcount_loc") || 4807 !strcmp(sec->name, ".llvm.call-graph-profile") || 4808 !strcmp(sec->name, ".llvm_bb_addr_map") || 4809 !strcmp(sec->name, "__tracepoints") || 4810 !strcmp(sec->name, ".return_sites") || 4811 !strcmp(sec->name, ".call_sites") || 4812 !strcmp(sec->name, "__patchable_function_entries")) 4813 continue; 4814 4815 for_each_reloc(sec->rsec, reloc) 4816 warnings += validate_ibt_data_reloc(file, reloc); 4817 } 4818 4819 return warnings; 4820 } 4821 4822 static int validate_sls(struct objtool_file *file) 4823 { 4824 struct instruction *insn, *next_insn; 4825 int warnings = 0; 4826 4827 for_each_insn(file, insn) { 4828 next_insn = next_insn_same_sec(file, insn); 4829 4830 if (insn->retpoline_safe) 4831 continue; 4832 4833 switch (insn->type) { 4834 case INSN_RETURN: 4835 if (!next_insn || next_insn->type != INSN_TRAP) { 4836 WARN_INSN(insn, "missing int3 after ret"); 4837 warnings++; 4838 } 4839 4840 break; 4841 case INSN_JUMP_DYNAMIC: 4842 if (!next_insn || next_insn->type != INSN_TRAP) { 4843 WARN_INSN(insn, "missing int3 after indirect jump"); 4844 warnings++; 4845 } 4846 break; 4847 default: 4848 break; 4849 } 4850 } 4851 4852 return warnings; 4853 } 4854 4855 static int validate_reachable_instructions(struct objtool_file *file) 4856 { 4857 struct instruction *insn, *prev_insn; 4858 struct symbol *call_dest; 4859 int warnings = 0; 4860 4861 if (file->ignore_unreachables) 4862 return 0; 4863 4864 for_each_insn(file, insn) { 4865 if (insn->visited || ignore_unreachable_insn(file, insn)) 4866 continue; 4867 4868 prev_insn = prev_insn_same_sec(file, insn); 4869 if (prev_insn && prev_insn->dead_end) { 4870 call_dest = insn_call_dest(prev_insn); 4871 if (call_dest) { 4872 WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h", 4873 call_dest->name); 4874 warnings++; 4875 continue; 4876 } 4877 } 4878 4879 WARN_INSN(insn, "unreachable instruction"); 4880 warnings++; 4881 } 4882 4883 return warnings; 4884 } 4885 4886 __weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc) 4887 { 4888 unsigned int type = reloc_type(reloc); 4889 size_t sz = elf_addr_size(elf); 4890 4891 return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32); 4892 } 4893 4894 static int check_abs_references(struct objtool_file *file) 4895 { 4896 struct section *sec; 4897 struct reloc *reloc; 4898 int ret = 0; 4899 4900 for_each_sec(file->elf, sec) { 4901 /* absolute references in non-loadable sections are fine */ 4902 if (!(sec->sh.sh_flags & SHF_ALLOC)) 4903 continue; 4904 4905 /* section must have an associated .rela section */ 4906 if (!sec->rsec) 4907 continue; 4908 4909 /* 4910 * Special case for compiler generated metadata that is not 4911 * consumed until after boot. 4912 */ 4913 if (!strcmp(sec->name, "__patchable_function_entries")) 4914 continue; 4915 4916 for_each_reloc(sec->rsec, reloc) { 4917 if (arch_absolute_reloc(file->elf, reloc)) { 4918 WARN("section %s has absolute relocation at offset 0x%llx", 4919 sec->name, (unsigned long long)reloc_offset(reloc)); 4920 ret++; 4921 } 4922 } 4923 } 4924 return ret; 4925 } 4926 4927 struct insn_chunk { 4928 void *addr; 4929 struct insn_chunk *next; 4930 }; 4931 4932 /* 4933 * Reduce peak RSS usage by freeing insns memory before writing the ELF file, 4934 * which can trigger more allocations for .debug_* sections whose data hasn't 4935 * been read yet. 4936 */ 4937 static void free_insns(struct objtool_file *file) 4938 { 4939 struct instruction *insn; 4940 struct insn_chunk *chunks = NULL, *chunk; 4941 4942 for_each_insn(file, insn) { 4943 if (!insn->idx) { 4944 chunk = malloc(sizeof(*chunk)); 4945 chunk->addr = insn; 4946 chunk->next = chunks; 4947 chunks = chunk; 4948 } 4949 } 4950 4951 for (chunk = chunks; chunk; chunk = chunk->next) 4952 free(chunk->addr); 4953 } 4954 4955 const char *objtool_disas_insn(struct instruction *insn) 4956 { 4957 struct disas_context *dctx = objtool_disas_ctx; 4958 4959 if (!dctx) 4960 return ""; 4961 4962 disas_insn(dctx, insn); 4963 return disas_result(dctx); 4964 } 4965 4966 int check(struct objtool_file *file) 4967 { 4968 struct disas_context *disas_ctx = NULL; 4969 int ret = 0, warnings = 0; 4970 4971 /* 4972 * Create a disassembly context if we might disassemble any 4973 * instruction or function. 4974 */ 4975 if (opts.verbose || opts.backtrace || opts.trace || opts.disas) { 4976 disas_ctx = disas_context_create(file); 4977 if (!disas_ctx) { 4978 opts.disas = false; 4979 opts.trace = false; 4980 } 4981 objtool_disas_ctx = disas_ctx; 4982 } 4983 4984 arch_initial_func_cfi_state(&initial_func_cfi); 4985 init_cfi_state(&init_cfi); 4986 init_cfi_state(&func_cfi); 4987 set_func_state(&func_cfi); 4988 init_cfi_state(&force_undefined_cfi); 4989 force_undefined_cfi.force_undefined = true; 4990 4991 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) { 4992 ret = -1; 4993 goto out; 4994 } 4995 4996 cfi_hash_add(&init_cfi); 4997 cfi_hash_add(&func_cfi); 4998 4999 ret = checksum_debug_init(file); 5000 if (ret) 5001 goto out; 5002 5003 ret = decode_sections(file); 5004 if (ret) 5005 goto out; 5006 5007 if (!nr_insns) 5008 goto out; 5009 5010 if (opts.retpoline) 5011 warnings += validate_retpoline(file); 5012 5013 if (validate_branch_enabled()) { 5014 int w = 0; 5015 5016 w += validate_functions(file); 5017 w += validate_unwind_hints(file, NULL); 5018 if (!w) 5019 w += validate_reachable_instructions(file); 5020 5021 warnings += w; 5022 5023 } else if (opts.noinstr) { 5024 warnings += validate_noinstr_sections(file); 5025 } 5026 5027 if (opts.unret) { 5028 /* 5029 * Must be after validate_branch() and friends, it plays 5030 * further games with insn->visited. 5031 */ 5032 warnings += validate_unrets(file); 5033 } 5034 5035 if (opts.ibt) 5036 warnings += validate_ibt(file); 5037 5038 if (opts.sls) 5039 warnings += validate_sls(file); 5040 5041 if (opts.static_call) { 5042 ret = create_static_call_sections(file); 5043 if (ret) 5044 goto out; 5045 } 5046 5047 if (opts.retpoline) { 5048 ret = create_retpoline_sites_sections(file); 5049 if (ret) 5050 goto out; 5051 } 5052 5053 if (opts.cfi) { 5054 ret = create_cfi_sections(file); 5055 if (ret) 5056 goto out; 5057 } 5058 5059 if (opts.rethunk) { 5060 ret = create_return_sites_sections(file); 5061 if (ret) 5062 goto out; 5063 5064 if (opts.hack_skylake) { 5065 ret = create_direct_call_sections(file); 5066 if (ret) 5067 goto out; 5068 } 5069 } 5070 5071 if (opts.mcount) { 5072 ret = create_mcount_loc_sections(file); 5073 if (ret) 5074 goto out; 5075 } 5076 5077 if (opts.prefix) { 5078 ret = create_prefix_symbols(file); 5079 if (ret) 5080 goto out; 5081 } 5082 5083 if (opts.ibt) { 5084 ret = create_ibt_endbr_seal_sections(file); 5085 if (ret) 5086 goto out; 5087 } 5088 5089 if (opts.noabs) 5090 warnings += check_abs_references(file); 5091 5092 if (opts.checksum) { 5093 ret = create_sym_checksum_section(file); 5094 if (ret) 5095 goto out; 5096 } 5097 5098 if (opts.orc && nr_insns) { 5099 ret = orc_create(file); 5100 if (ret) 5101 goto out; 5102 } 5103 5104 if (opts.stats) { 5105 printf("nr_insns_visited: %ld\n", nr_insns_visited); 5106 printf("nr_cfi: %ld\n", nr_cfi); 5107 printf("nr_cfi_reused: %ld\n", nr_cfi_reused); 5108 printf("nr_cfi_cache: %ld\n", nr_cfi_cache); 5109 } 5110 5111 out: 5112 if (ret || warnings) { 5113 if (opts.werror && warnings) 5114 ret = 1; 5115 5116 if (opts.verbose) { 5117 if (opts.werror && warnings) 5118 WARN("%d warning(s) upgraded to errors", warnings); 5119 disas_warned_funcs(disas_ctx); 5120 } 5121 } 5122 5123 if (opts.disas) 5124 disas_funcs(disas_ctx); 5125 5126 if (disas_ctx) { 5127 disas_context_destroy(disas_ctx); 5128 objtool_disas_ctx = NULL; 5129 } 5130 5131 free_insns(file); 5132 5133 if (!ret && !warnings) 5134 return 0; 5135 5136 if (opts.backup && make_backup()) 5137 return 1; 5138 5139 return ret; 5140 } 5141