1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #define _GNU_SOURCE /* memmem() */ 7 #include <fnmatch.h> 8 #include <string.h> 9 #include <stdlib.h> 10 #include <inttypes.h> 11 #include <sys/mman.h> 12 13 #include <objtool/builtin.h> 14 #include <objtool/cfi.h> 15 #include <objtool/arch.h> 16 #include <objtool/disas.h> 17 #include <objtool/check.h> 18 #include <objtool/special.h> 19 #include <objtool/trace.h> 20 #include <objtool/warn.h> 21 #include <objtool/checksum.h> 22 #include <objtool/util.h> 23 24 #include <linux/objtool_types.h> 25 #include <linux/hashtable.h> 26 #include <linux/kernel.h> 27 #include <linux/static_call_types.h> 28 #include <linux/string.h> 29 30 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; 31 32 static struct cfi_init_state initial_func_cfi; 33 static struct cfi_state init_cfi; 34 static struct cfi_state func_cfi; 35 static struct cfi_state force_undefined_cfi; 36 37 struct disas_context *objtool_disas_ctx; 38 39 size_t sym_name_max_len; 40 41 struct instruction *find_insn(struct objtool_file *file, 42 struct section *sec, unsigned long offset) 43 { 44 struct instruction *insn; 45 46 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 47 if (insn->sec == sec && insn->offset == offset) 48 return insn; 49 } 50 51 return NULL; 52 } 53 54 struct instruction *next_insn_same_sec(struct objtool_file *file, 55 struct instruction *insn) 56 { 57 if (insn->idx == INSN_CHUNK_MAX) 58 return find_insn(file, insn->sec, insn->offset + insn->len); 59 60 insn++; 61 if (!insn->len) 62 return NULL; 63 64 return insn; 65 } 66 67 static struct instruction *next_insn_same_func(struct objtool_file *file, 68 struct instruction *insn) 69 { 70 struct instruction *next = next_insn_same_sec(file, insn); 71 struct symbol *func = insn_func(insn); 72 73 if (!func) 74 return NULL; 75 76 if (next && insn_func(next) == func) 77 return next; 78 79 /* Check if we're already in the subfunction: */ 80 if (func == func->cfunc) 81 return NULL; 82 83 /* Move to the subfunction: */ 84 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 85 } 86 87 static struct instruction *prev_insn_same_sec(struct objtool_file *file, 88 struct instruction *insn) 89 { 90 if (insn->idx == 0) { 91 if (insn->prev_len) 92 return find_insn(file, insn->sec, insn->offset - insn->prev_len); 93 return NULL; 94 } 95 96 return insn - 1; 97 } 98 99 static struct instruction *prev_insn_same_sym(struct objtool_file *file, 100 struct instruction *insn) 101 { 102 struct instruction *prev = prev_insn_same_sec(file, insn); 103 104 if (prev && insn_func(prev) == insn_func(insn)) 105 return prev; 106 107 return NULL; 108 } 109 110 #define for_each_insn(file, insn) \ 111 for (struct section *__sec, *__fake = (struct section *)1; \ 112 __fake; __fake = NULL) \ 113 for_each_sec(file->elf, __sec) \ 114 sec_for_each_insn(file, __sec, insn) 115 116 #define func_for_each_insn(file, func, insn) \ 117 for (insn = find_insn(file, func->sec, func->offset); \ 118 insn; \ 119 insn = next_insn_same_func(file, insn)) 120 121 #define sym_for_each_insn(file, sym, insn) \ 122 for (insn = find_insn(file, sym->sec, sym->offset); \ 123 insn && insn->offset < sym->offset + sym->len; \ 124 insn = next_insn_same_sec(file, insn)) 125 126 #define sym_for_each_insn_continue_reverse(file, sym, insn) \ 127 for (insn = prev_insn_same_sec(file, insn); \ 128 insn && insn->offset >= sym->offset; \ 129 insn = prev_insn_same_sec(file, insn)) 130 131 #define sec_for_each_insn_from(file, insn) \ 132 for (; insn; insn = next_insn_same_sec(file, insn)) 133 134 #define sec_for_each_insn_continue(file, insn) \ 135 for (insn = next_insn_same_sec(file, insn); insn; \ 136 insn = next_insn_same_sec(file, insn)) 137 138 static inline struct reloc *insn_jump_table(struct instruction *insn) 139 { 140 if (insn->type == INSN_JUMP_DYNAMIC || 141 insn->type == INSN_CALL_DYNAMIC) 142 return insn->_jump_table; 143 144 return NULL; 145 } 146 147 static inline unsigned long insn_jump_table_size(struct instruction *insn) 148 { 149 if (insn->type == INSN_JUMP_DYNAMIC || 150 insn->type == INSN_CALL_DYNAMIC) 151 return insn->_jump_table_size; 152 153 return 0; 154 } 155 156 static bool is_jump_table_jump(struct instruction *insn) 157 { 158 struct alt_group *alt_group = insn->alt_group; 159 160 if (insn_jump_table(insn)) 161 return true; 162 163 /* Retpoline alternative for a jump table? */ 164 return alt_group && alt_group->orig_group && 165 insn_jump_table(alt_group->orig_group->first_insn); 166 } 167 168 static bool is_sibling_call(struct instruction *insn) 169 { 170 /* 171 * Assume only STT_FUNC calls have jump-tables. 172 */ 173 if (insn_func(insn)) { 174 /* An indirect jump is either a sibling call or a jump to a table. */ 175 if (insn->type == INSN_JUMP_DYNAMIC) 176 return !is_jump_table_jump(insn); 177 } 178 179 /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */ 180 return (is_static_jump(insn) && insn_call_dest(insn)); 181 } 182 183 /* 184 * Checks if a function is a Rust "noreturn" one. 185 */ 186 static bool is_rust_noreturn(const struct symbol *func) 187 { 188 /* 189 * If it does not start with "_R", then it is not a Rust symbol. 190 */ 191 if (strncmp(func->name, "_R", 2)) 192 return false; 193 194 /* 195 * These are just heuristics -- we do not control the precise symbol 196 * name, due to the crate disambiguators (which depend on the compiler) 197 * as well as changes to the source code itself between versions (since 198 * these come from the Rust standard library). 199 */ 200 return str_ends_with(func->name, "_4core3num20from_str_radix_panic") || 201 str_ends_with(func->name, "_4core3num22from_ascii_radix_panic") || 202 str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") || 203 str_ends_with(func->name, "_4core6option13expect_failed") || 204 str_ends_with(func->name, "_4core6option13unwrap_failed") || 205 str_ends_with(func->name, "_4core6result13unwrap_failed") || 206 str_ends_with(func->name, "_4core9panicking5panic") || 207 str_ends_with(func->name, "_4core9panicking9panic_fmt") || 208 str_ends_with(func->name, "_4core9panicking14panic_explicit") || 209 str_ends_with(func->name, "_4core9panicking14panic_nounwind") || 210 str_ends_with(func->name, "_4core9panicking18panic_bounds_check") || 211 str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt") || 212 str_ends_with(func->name, "_4core9panicking19assert_failed_inner") || 213 str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") || 214 str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") || 215 str_ends_with(func->name, "_7___rustc17rust_begin_unwind") || 216 strstr(func->name, "_4core9panicking13assert_failed") || 217 strstr(func->name, "_4core9panicking11panic_const24panic_const_") || 218 (strstr(func->name, "_4core5slice5index") && 219 strstr(func->name, "slice_") && 220 str_ends_with(func->name, "_fail")); 221 } 222 223 /* 224 * This checks to see if the given function is a "noreturn" function. 225 * 226 * For global functions which are outside the scope of this object file, we 227 * have to keep a manual list of them. 228 * 229 * For local functions, we have to detect them manually by simply looking for 230 * the lack of a return instruction. 231 */ 232 static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 233 int recursion) 234 { 235 int i; 236 struct instruction *insn; 237 bool empty = true; 238 239 #define NORETURN(func) __stringify(func), 240 static const char * const global_noreturns[] = { 241 #include "noreturns.h" 242 }; 243 #undef NORETURN 244 245 if (!func) 246 return false; 247 248 if (!is_local_sym(func)) { 249 if (is_rust_noreturn(func)) 250 return true; 251 252 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 253 if (!strcmp(func->name, global_noreturns[i])) 254 return true; 255 } 256 257 if (is_weak_sym(func)) 258 return false; 259 260 if (!func->len) 261 return false; 262 263 insn = find_insn(file, func->sec, func->offset); 264 if (!insn || !insn_func(insn)) 265 return false; 266 267 func_for_each_insn(file, func, insn) { 268 empty = false; 269 270 if (insn->type == INSN_RETURN) 271 return false; 272 } 273 274 if (empty) 275 return false; 276 277 /* 278 * A function can have a sibling call instead of a return. In that 279 * case, the function's dead-end status depends on whether the target 280 * of the sibling call returns. 281 */ 282 func_for_each_insn(file, func, insn) { 283 if (is_sibling_call(insn)) { 284 struct instruction *dest = insn->jump_dest; 285 286 if (!dest) 287 /* sibling call to another file */ 288 return false; 289 290 /* local sibling call */ 291 if (recursion == 5) { 292 /* 293 * Infinite recursion: two functions have 294 * sibling calls to each other. This is a very 295 * rare case. It means they aren't dead ends. 296 */ 297 return false; 298 } 299 300 return __dead_end_function(file, insn_func(dest), recursion+1); 301 } 302 } 303 304 return true; 305 } 306 307 static bool dead_end_function(struct objtool_file *file, struct symbol *func) 308 { 309 return __dead_end_function(file, func, 0); 310 } 311 312 static void init_cfi_state(struct cfi_state *cfi) 313 { 314 int i; 315 316 for (i = 0; i < CFI_NUM_REGS; i++) { 317 cfi->regs[i].base = CFI_UNDEFINED; 318 cfi->vals[i].base = CFI_UNDEFINED; 319 } 320 cfi->cfa.base = CFI_UNDEFINED; 321 cfi->drap_reg = CFI_UNDEFINED; 322 cfi->drap_offset = -1; 323 } 324 325 static void init_insn_state(struct objtool_file *file, struct insn_state *state, 326 struct section *sec) 327 { 328 memset(state, 0, sizeof(*state)); 329 init_cfi_state(&state->cfi); 330 331 if (opts.noinstr && sec) 332 state->noinstr = sec->noinstr; 333 } 334 335 static struct cfi_state *cfi_alloc(void) 336 { 337 struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state)); 338 if (!cfi) { 339 ERROR_GLIBC("calloc"); 340 exit(1); 341 } 342 nr_cfi++; 343 return cfi; 344 } 345 346 static int cfi_bits; 347 static struct hlist_head *cfi_hash; 348 349 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) 350 { 351 return memcmp((void *)cfi1 + sizeof(cfi1->hash), 352 (void *)cfi2 + sizeof(cfi2->hash), 353 sizeof(struct cfi_state) - sizeof(struct hlist_node)); 354 } 355 356 static inline u32 cfi_key(struct cfi_state *cfi) 357 { 358 return jhash((void *)cfi + sizeof(cfi->hash), 359 sizeof(*cfi) - sizeof(cfi->hash), 0); 360 } 361 362 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) 363 { 364 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 365 struct cfi_state *obj; 366 367 hlist_for_each_entry(obj, head, hash) { 368 if (!cficmp(cfi, obj)) { 369 nr_cfi_cache++; 370 return obj; 371 } 372 } 373 374 obj = cfi_alloc(); 375 *obj = *cfi; 376 hlist_add_head(&obj->hash, head); 377 378 return obj; 379 } 380 381 static void cfi_hash_add(struct cfi_state *cfi) 382 { 383 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 384 385 hlist_add_head(&cfi->hash, head); 386 } 387 388 static void *cfi_hash_alloc(unsigned long size) 389 { 390 cfi_bits = max(10, ilog2(size)); 391 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, 392 PROT_READ|PROT_WRITE, 393 MAP_PRIVATE|MAP_ANON, -1, 0); 394 if (cfi_hash == (void *)-1L) { 395 ERROR_GLIBC("mmap fail cfi_hash"); 396 cfi_hash = NULL; 397 } else if (opts.stats) { 398 printf("cfi_bits: %d\n", cfi_bits); 399 } 400 401 return cfi_hash; 402 } 403 404 static unsigned long nr_insns; 405 static unsigned long nr_insns_visited; 406 407 /* 408 * Call the arch-specific instruction decoder for all the instructions and add 409 * them to the global instruction list. 410 */ 411 static int decode_instructions(struct objtool_file *file) 412 { 413 struct section *sec; 414 struct symbol *func; 415 unsigned long offset; 416 struct instruction *insn; 417 418 for_each_sec(file->elf, sec) { 419 struct instruction *insns = NULL; 420 u8 prev_len = 0; 421 u8 idx = 0; 422 423 if (!is_text_sec(sec)) 424 continue; 425 426 if (strcmp(sec->name, ".altinstr_replacement") && 427 strcmp(sec->name, ".altinstr_aux") && 428 strncmp(sec->name, ".discard.", 9)) 429 sec->text = true; 430 431 if (!strcmp(sec->name, ".noinstr.text") || 432 !strcmp(sec->name, ".entry.text") || 433 !strcmp(sec->name, ".cpuidle.text") || 434 !strncmp(sec->name, ".text..__x86.", 13)) 435 sec->noinstr = true; 436 437 /* 438 * .init.text code is ran before userspace and thus doesn't 439 * strictly need retpolines, except for modules which are 440 * loaded late, they very much do need retpoline in their 441 * .init.text 442 */ 443 if (!strcmp(sec->name, ".init.text") && !opts.module) 444 sec->init = true; 445 446 for (offset = 0; offset < sec_size(sec); offset += insn->len) { 447 if (!insns || idx == INSN_CHUNK_MAX) { 448 insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn)); 449 if (!insns) { 450 ERROR_GLIBC("calloc"); 451 return -1; 452 } 453 idx = 0; 454 } else { 455 idx++; 456 } 457 insn = &insns[idx]; 458 insn->idx = idx; 459 460 INIT_LIST_HEAD(&insn->call_node); 461 insn->sec = sec; 462 insn->offset = offset; 463 insn->prev_len = prev_len; 464 465 if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn)) 466 return -1; 467 468 prev_len = insn->len; 469 470 /* 471 * By default, "ud2" is a dead end unless otherwise 472 * annotated, because GCC 7 inserts it for certain 473 * divide-by-zero cases. 474 */ 475 if (insn->type == INSN_BUG) 476 insn->dead_end = true; 477 478 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 479 nr_insns++; 480 } 481 482 sec_for_each_sym(sec, func) { 483 if (!is_notype_sym(func) && !is_func_sym(func)) 484 continue; 485 486 if (func->offset == sec_size(sec)) { 487 /* Heuristic: likely an "end" symbol */ 488 if (is_notype_sym(func)) 489 continue; 490 ERROR("%s(): STT_FUNC at end of section", func->name); 491 return -1; 492 } 493 494 if (func->embedded_insn || func->alias != func) 495 continue; 496 497 if (!find_insn(file, sec, func->offset)) { 498 ERROR("%s(): can't find starting instruction", func->name); 499 return -1; 500 } 501 502 sym_for_each_insn(file, func, insn) { 503 insn->sym = func; 504 if (is_func_sym(func) && 505 insn->type == INSN_ENDBR && 506 list_empty(&insn->call_node)) { 507 if (insn->offset == func->offset) { 508 list_add_tail(&insn->call_node, &file->endbr_list); 509 file->nr_endbr++; 510 } else { 511 file->nr_endbr_int++; 512 } 513 } 514 } 515 } 516 } 517 518 if (opts.stats) 519 printf("nr_insns: %lu\n", nr_insns); 520 521 return 0; 522 } 523 524 /* 525 * Known pv_ops*[] arrays. 526 */ 527 static struct { 528 const char *name; 529 int idx_off; 530 } pv_ops_tables[] = { 531 { .name = "pv_ops", }, 532 { .name = "pv_ops_lock", }, 533 { .name = NULL, .idx_off = -1 } 534 }; 535 536 /* 537 * Get index offset for a pv_ops* array. 538 */ 539 int pv_ops_idx_off(const char *symname) 540 { 541 int idx; 542 543 for (idx = 0; pv_ops_tables[idx].name; idx++) { 544 if (!strcmp(symname, pv_ops_tables[idx].name)) 545 break; 546 } 547 548 return pv_ops_tables[idx].idx_off; 549 } 550 551 /* 552 * Read a pv_ops*[] .data table to find the static initialized values. 553 */ 554 static int add_pv_ops(struct objtool_file *file, int pv_ops_idx) 555 { 556 struct symbol *sym, *func; 557 unsigned long off, end; 558 struct reloc *reloc; 559 int idx, idx_off; 560 const char *symname; 561 562 symname = pv_ops_tables[pv_ops_idx].name; 563 sym = find_symbol_by_name(file->elf, symname); 564 if (!sym) { 565 ERROR("Unknown pv_ops array %s", symname); 566 return -1; 567 } 568 569 off = sym->offset; 570 end = off + sym->len; 571 idx_off = pv_ops_tables[pv_ops_idx].idx_off; 572 if (idx_off < 0) { 573 ERROR("pv_ops array %s has unknown index offset", symname); 574 return -1; 575 } 576 577 for (;;) { 578 reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off); 579 if (!reloc) 580 break; 581 582 idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long); 583 584 func = reloc->sym; 585 if (is_sec_sym(func)) 586 func = find_symbol_by_offset(reloc->sym->sec, 587 reloc_addend(reloc)); 588 if (!func) { 589 ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc), 590 "can't find func at %s[%d]", symname, idx); 591 return -1; 592 } 593 594 if (objtool_pv_add(file, idx + idx_off, func)) 595 return -1; 596 597 off = reloc_offset(reloc) + 1; 598 if (off > end) 599 break; 600 } 601 602 return 0; 603 } 604 605 /* 606 * Allocate and initialize file->pv_ops[]. 607 */ 608 static int init_pv_ops(struct objtool_file *file) 609 { 610 struct symbol *sym; 611 int idx, nr; 612 613 if (!opts.noinstr) 614 return 0; 615 616 file->pv_ops = NULL; 617 618 nr = 0; 619 for (idx = 0; pv_ops_tables[idx].name; idx++) { 620 sym = find_symbol_by_name(file->elf, pv_ops_tables[idx].name); 621 if (!sym) { 622 pv_ops_tables[idx].idx_off = -1; 623 continue; 624 } 625 pv_ops_tables[idx].idx_off = nr; 626 nr += sym->len / sizeof(unsigned long); 627 } 628 629 if (nr == 0) 630 return 0; 631 632 file->pv_ops = calloc(nr, sizeof(struct pv_state)); 633 if (!file->pv_ops) { 634 ERROR_GLIBC("calloc"); 635 return -1; 636 } 637 638 for (idx = 0; idx < nr; idx++) 639 INIT_LIST_HEAD(&file->pv_ops[idx].targets); 640 641 for (idx = 0; pv_ops_tables[idx].name; idx++) { 642 if (pv_ops_tables[idx].idx_off < 0) 643 continue; 644 if (add_pv_ops(file, idx)) 645 return -1; 646 } 647 648 return 0; 649 } 650 651 static bool is_livepatch_module(struct objtool_file *file) 652 { 653 struct section *sec; 654 655 if (!opts.module) 656 return false; 657 658 sec = find_section_by_name(file->elf, ".modinfo"); 659 if (!sec) 660 return false; 661 662 return memmem(sec->data->d_buf, sec_size(sec), "\0livepatch=Y", 12); 663 } 664 665 static int create_static_call_sections(struct objtool_file *file) 666 { 667 struct static_call_site *site; 668 struct section *sec; 669 struct instruction *insn; 670 struct symbol *key_sym; 671 char *key_name, *tmp; 672 int idx; 673 674 sec = find_section_by_name(file->elf, ".static_call_sites"); 675 if (sec) { 676 /* 677 * Livepatch modules may have already extracted the static call 678 * site entries to take advantage of vmlinux static call 679 * privileges. 680 */ 681 if (!file->klp) 682 WARN("file already has .static_call_sites section, skipping"); 683 684 return 0; 685 } 686 687 if (list_empty(&file->static_call_list)) 688 return 0; 689 690 idx = 0; 691 list_for_each_entry(insn, &file->static_call_list, call_node) 692 idx++; 693 694 sec = elf_create_section_pair(file->elf, ".static_call_sites", 695 sizeof(*site), idx, idx * 2); 696 if (!sec) 697 return -1; 698 699 /* Allow modules to modify the low bits of static_call_site::key */ 700 sec->sh.sh_flags |= SHF_WRITE; 701 702 idx = 0; 703 list_for_each_entry(insn, &file->static_call_list, call_node) { 704 705 /* populate reloc for 'addr' */ 706 if (!elf_init_reloc_text_sym(file->elf, sec, 707 idx * sizeof(*site), idx * 2, 708 insn->sec, insn->offset)) 709 return -1; 710 711 /* find key symbol */ 712 key_name = strdup(insn_call_dest(insn)->name); 713 if (!key_name) { 714 ERROR_GLIBC("strdup"); 715 return -1; 716 } 717 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 718 STATIC_CALL_TRAMP_PREFIX_LEN)) { 719 ERROR("static_call: trampoline name malformed: %s", key_name); 720 return -1; 721 } 722 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 723 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 724 725 key_sym = find_symbol_by_name(file->elf, tmp); 726 if (!key_sym) { 727 if (!opts.module) { 728 ERROR("static_call: can't find static_call_key symbol: %s", tmp); 729 return -1; 730 } 731 732 /* 733 * For modules(), the key might not be exported, which 734 * means the module can make static calls but isn't 735 * allowed to change them. 736 * 737 * In that case we temporarily set the key to be the 738 * trampoline address. This is fixed up in 739 * static_call_add_module(). 740 */ 741 key_sym = insn_call_dest(insn); 742 } 743 744 /* populate reloc for 'key' */ 745 if (!elf_init_reloc_data_sym(file->elf, sec, 746 idx * sizeof(*site) + 4, 747 (idx * 2) + 1, key_sym, 748 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 749 return -1; 750 751 idx++; 752 } 753 754 return 0; 755 } 756 757 static int create_retpoline_sites_sections(struct objtool_file *file) 758 { 759 struct instruction *insn; 760 struct section *sec; 761 int idx; 762 763 sec = find_section_by_name(file->elf, ".retpoline_sites"); 764 if (sec) { 765 WARN("file already has .retpoline_sites, skipping"); 766 return 0; 767 } 768 769 idx = 0; 770 list_for_each_entry(insn, &file->retpoline_call_list, call_node) 771 idx++; 772 773 if (!idx) 774 return 0; 775 776 sec = elf_create_section_pair(file->elf, ".retpoline_sites", 777 sizeof(int), idx, idx); 778 if (!sec) 779 return -1; 780 781 idx = 0; 782 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 783 784 if (!elf_init_reloc_text_sym(file->elf, sec, 785 idx * sizeof(int), idx, 786 insn->sec, insn->offset)) 787 return -1; 788 789 idx++; 790 } 791 792 return 0; 793 } 794 795 static int create_return_sites_sections(struct objtool_file *file) 796 { 797 struct instruction *insn; 798 struct section *sec; 799 int idx; 800 801 sec = find_section_by_name(file->elf, ".return_sites"); 802 if (sec) { 803 WARN("file already has .return_sites, skipping"); 804 return 0; 805 } 806 807 idx = 0; 808 list_for_each_entry(insn, &file->return_thunk_list, call_node) 809 idx++; 810 811 if (!idx) 812 return 0; 813 814 sec = elf_create_section_pair(file->elf, ".return_sites", 815 sizeof(int), idx, idx); 816 if (!sec) 817 return -1; 818 819 idx = 0; 820 list_for_each_entry(insn, &file->return_thunk_list, call_node) { 821 822 if (!elf_init_reloc_text_sym(file->elf, sec, 823 idx * sizeof(int), idx, 824 insn->sec, insn->offset)) 825 return -1; 826 827 idx++; 828 } 829 830 return 0; 831 } 832 833 static int create_ibt_endbr_seal_sections(struct objtool_file *file) 834 { 835 struct instruction *insn; 836 struct section *sec; 837 int idx; 838 839 sec = find_section_by_name(file->elf, ".ibt_endbr_seal"); 840 if (sec) { 841 WARN("file already has .ibt_endbr_seal, skipping"); 842 return 0; 843 } 844 845 idx = 0; 846 list_for_each_entry(insn, &file->endbr_list, call_node) 847 idx++; 848 849 if (opts.stats) { 850 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr); 851 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int); 852 printf("ibt: superfluous ENDBR: %d\n", idx); 853 } 854 855 if (!idx) 856 return 0; 857 858 sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal", 859 sizeof(int), idx, idx); 860 if (!sec) 861 return -1; 862 863 idx = 0; 864 list_for_each_entry(insn, &file->endbr_list, call_node) { 865 866 int *site = (int *)sec->data->d_buf + idx; 867 struct symbol *sym = insn->sym; 868 *site = 0; 869 870 if (opts.module && sym && is_func_sym(sym) && 871 insn->offset == sym->offset && 872 (!strcmp(sym->name, "init_module") || 873 !strcmp(sym->name, "cleanup_module"))) { 874 ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead", 875 sym->name); 876 return -1; 877 } 878 879 if (!elf_init_reloc_text_sym(file->elf, sec, 880 idx * sizeof(int), idx, 881 insn->sec, insn->offset)) 882 return -1; 883 884 idx++; 885 } 886 887 return 0; 888 } 889 890 static int create_cfi_sections(struct objtool_file *file) 891 { 892 struct section *sec; 893 struct symbol *sym; 894 int idx; 895 896 sec = find_section_by_name(file->elf, ".cfi_sites"); 897 if (sec) { 898 WARN("file already has .cfi_sites section, skipping"); 899 return 0; 900 } 901 902 idx = 0; 903 for_each_sym(file->elf, sym) { 904 if (!is_func_sym(sym)) 905 continue; 906 907 if (strncmp(sym->name, "__cfi_", 6)) 908 continue; 909 910 idx++; 911 } 912 913 sec = elf_create_section_pair(file->elf, ".cfi_sites", 914 sizeof(unsigned int), idx, idx); 915 if (!sec) 916 return -1; 917 918 idx = 0; 919 for_each_sym(file->elf, sym) { 920 if (!is_func_sym(sym)) 921 continue; 922 923 if (strncmp(sym->name, "__cfi_", 6)) 924 continue; 925 926 if (!elf_init_reloc_text_sym(file->elf, sec, 927 idx * sizeof(unsigned int), idx, 928 sym->sec, sym->offset)) 929 return -1; 930 931 idx++; 932 } 933 934 return 0; 935 } 936 937 static int create_mcount_loc_sections(struct objtool_file *file) 938 { 939 size_t addr_size = elf_addr_size(file->elf); 940 struct instruction *insn; 941 struct section *sec; 942 int idx; 943 944 sec = find_section_by_name(file->elf, "__mcount_loc"); 945 if (sec) { 946 /* 947 * Livepatch modules have already extracted their __mcount_loc 948 * entries to cover the !CONFIG_FTRACE_MCOUNT_USE_OBJTOOL case. 949 */ 950 if (!file->klp) 951 WARN("file already has __mcount_loc section, skipping"); 952 953 return 0; 954 } 955 956 if (list_empty(&file->mcount_loc_list)) 957 return 0; 958 959 idx = 0; 960 list_for_each_entry(insn, &file->mcount_loc_list, call_node) 961 idx++; 962 963 sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size, 964 idx, idx); 965 if (!sec) 966 return -1; 967 968 sec->sh.sh_addralign = addr_size; 969 970 idx = 0; 971 list_for_each_entry(insn, &file->mcount_loc_list, call_node) { 972 973 struct reloc *reloc; 974 975 reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx, 976 insn->sec, insn->offset); 977 if (!reloc) 978 return -1; 979 980 set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32); 981 982 idx++; 983 } 984 985 return 0; 986 } 987 988 static int create_direct_call_sections(struct objtool_file *file) 989 { 990 struct instruction *insn; 991 struct section *sec; 992 int idx; 993 994 sec = find_section_by_name(file->elf, ".call_sites"); 995 if (sec) { 996 WARN("file already has .call_sites section, skipping"); 997 return 0; 998 } 999 1000 if (list_empty(&file->call_list)) 1001 return 0; 1002 1003 idx = 0; 1004 list_for_each_entry(insn, &file->call_list, call_node) 1005 idx++; 1006 1007 sec = elf_create_section_pair(file->elf, ".call_sites", 1008 sizeof(unsigned int), idx, idx); 1009 if (!sec) 1010 return -1; 1011 1012 idx = 0; 1013 list_for_each_entry(insn, &file->call_list, call_node) { 1014 1015 if (!elf_init_reloc_text_sym(file->elf, sec, 1016 idx * sizeof(unsigned int), idx, 1017 insn->sec, insn->offset)) 1018 return -1; 1019 1020 idx++; 1021 } 1022 1023 return 0; 1024 } 1025 1026 #ifdef BUILD_KLP 1027 static int create_sym_checksum_section(struct objtool_file *file) 1028 { 1029 struct section *sec; 1030 struct symbol *sym; 1031 unsigned int idx = 0; 1032 struct sym_checksum *checksum; 1033 size_t entsize = sizeof(struct sym_checksum); 1034 1035 sec = find_section_by_name(file->elf, ".discard.sym_checksum"); 1036 if (sec) { 1037 if (!opts.dryrun) 1038 WARN("file already has .discard.sym_checksum section, skipping"); 1039 1040 return 0; 1041 } 1042 1043 for_each_sym(file->elf, sym) 1044 if (sym->csum.checksum) 1045 idx++; 1046 1047 if (!idx) 1048 return 0; 1049 1050 sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize, 1051 idx, idx); 1052 if (!sec) 1053 return -1; 1054 1055 idx = 0; 1056 for_each_sym(file->elf, sym) { 1057 if (!sym->csum.checksum) 1058 continue; 1059 1060 if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize, 1061 sym, 0, R_TEXT64)) 1062 return -1; 1063 1064 checksum = (struct sym_checksum *)sec->data->d_buf + idx; 1065 checksum->addr = 0; /* reloc */ 1066 checksum->checksum = sym->csum.checksum; 1067 1068 mark_sec_changed(file->elf, sec, true); 1069 1070 idx++; 1071 } 1072 1073 return 0; 1074 } 1075 #else 1076 static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; } 1077 #endif 1078 1079 /* 1080 * Warnings shouldn't be reported for ignored functions. 1081 */ 1082 static int add_ignores(struct objtool_file *file) 1083 { 1084 struct section *rsec; 1085 struct symbol *func; 1086 struct reloc *reloc; 1087 1088 rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 1089 if (!rsec) 1090 return 0; 1091 1092 for_each_reloc(rsec, reloc) { 1093 switch (reloc->sym->type) { 1094 case STT_FUNC: 1095 func = reloc->sym; 1096 break; 1097 1098 case STT_SECTION: 1099 func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc)); 1100 if (!func) 1101 continue; 1102 break; 1103 1104 default: 1105 ERROR("unexpected relocation symbol type in %s: %d", 1106 rsec->name, reloc->sym->type); 1107 return -1; 1108 } 1109 1110 func->ignore = true; 1111 if (func->cfunc) 1112 func->cfunc->ignore = true; 1113 } 1114 1115 return 0; 1116 } 1117 1118 /* 1119 * This is a whitelist of functions that is allowed to be called with AC set. 1120 * The list is meant to be minimal and only contains compiler instrumentation 1121 * ABI and a few functions used to implement *_{to,from}_user() functions. 1122 * 1123 * These functions must not directly change AC, but may PUSHF/POPF. 1124 */ 1125 static const char *uaccess_safe_builtin[] = { 1126 /* KASAN */ 1127 "kasan_report", 1128 "kasan_check_range", 1129 /* KASAN out-of-line */ 1130 "__asan_loadN_noabort", 1131 "__asan_load1_noabort", 1132 "__asan_load2_noabort", 1133 "__asan_load4_noabort", 1134 "__asan_load8_noabort", 1135 "__asan_load16_noabort", 1136 "__asan_storeN_noabort", 1137 "__asan_store1_noabort", 1138 "__asan_store2_noabort", 1139 "__asan_store4_noabort", 1140 "__asan_store8_noabort", 1141 "__asan_store16_noabort", 1142 "__kasan_check_read", 1143 "__kasan_check_write", 1144 /* KASAN in-line */ 1145 "__asan_report_load_n_noabort", 1146 "__asan_report_load1_noabort", 1147 "__asan_report_load2_noabort", 1148 "__asan_report_load4_noabort", 1149 "__asan_report_load8_noabort", 1150 "__asan_report_load16_noabort", 1151 "__asan_report_store_n_noabort", 1152 "__asan_report_store1_noabort", 1153 "__asan_report_store2_noabort", 1154 "__asan_report_store4_noabort", 1155 "__asan_report_store8_noabort", 1156 "__asan_report_store16_noabort", 1157 /* KCSAN */ 1158 "__kcsan_check_access", 1159 "__kcsan_mb", 1160 "__kcsan_wmb", 1161 "__kcsan_rmb", 1162 "__kcsan_release", 1163 "kcsan_found_watchpoint", 1164 "kcsan_setup_watchpoint", 1165 "kcsan_check_scoped_accesses", 1166 "kcsan_disable_current", 1167 "kcsan_enable_current_nowarn", 1168 /* KCSAN/TSAN */ 1169 "__tsan_func_entry", 1170 "__tsan_func_exit", 1171 "__tsan_read_range", 1172 "__tsan_write_range", 1173 "__tsan_read1", 1174 "__tsan_read2", 1175 "__tsan_read4", 1176 "__tsan_read8", 1177 "__tsan_read16", 1178 "__tsan_write1", 1179 "__tsan_write2", 1180 "__tsan_write4", 1181 "__tsan_write8", 1182 "__tsan_write16", 1183 "__tsan_read_write1", 1184 "__tsan_read_write2", 1185 "__tsan_read_write4", 1186 "__tsan_read_write8", 1187 "__tsan_read_write16", 1188 "__tsan_volatile_read1", 1189 "__tsan_volatile_read2", 1190 "__tsan_volatile_read4", 1191 "__tsan_volatile_read8", 1192 "__tsan_volatile_read16", 1193 "__tsan_volatile_write1", 1194 "__tsan_volatile_write2", 1195 "__tsan_volatile_write4", 1196 "__tsan_volatile_write8", 1197 "__tsan_volatile_write16", 1198 "__tsan_atomic8_load", 1199 "__tsan_atomic16_load", 1200 "__tsan_atomic32_load", 1201 "__tsan_atomic64_load", 1202 "__tsan_atomic8_store", 1203 "__tsan_atomic16_store", 1204 "__tsan_atomic32_store", 1205 "__tsan_atomic64_store", 1206 "__tsan_atomic8_exchange", 1207 "__tsan_atomic16_exchange", 1208 "__tsan_atomic32_exchange", 1209 "__tsan_atomic64_exchange", 1210 "__tsan_atomic8_fetch_add", 1211 "__tsan_atomic16_fetch_add", 1212 "__tsan_atomic32_fetch_add", 1213 "__tsan_atomic64_fetch_add", 1214 "__tsan_atomic8_fetch_sub", 1215 "__tsan_atomic16_fetch_sub", 1216 "__tsan_atomic32_fetch_sub", 1217 "__tsan_atomic64_fetch_sub", 1218 "__tsan_atomic8_fetch_and", 1219 "__tsan_atomic16_fetch_and", 1220 "__tsan_atomic32_fetch_and", 1221 "__tsan_atomic64_fetch_and", 1222 "__tsan_atomic8_fetch_or", 1223 "__tsan_atomic16_fetch_or", 1224 "__tsan_atomic32_fetch_or", 1225 "__tsan_atomic64_fetch_or", 1226 "__tsan_atomic8_fetch_xor", 1227 "__tsan_atomic16_fetch_xor", 1228 "__tsan_atomic32_fetch_xor", 1229 "__tsan_atomic64_fetch_xor", 1230 "__tsan_atomic8_fetch_nand", 1231 "__tsan_atomic16_fetch_nand", 1232 "__tsan_atomic32_fetch_nand", 1233 "__tsan_atomic64_fetch_nand", 1234 "__tsan_atomic8_compare_exchange_strong", 1235 "__tsan_atomic16_compare_exchange_strong", 1236 "__tsan_atomic32_compare_exchange_strong", 1237 "__tsan_atomic64_compare_exchange_strong", 1238 "__tsan_atomic8_compare_exchange_weak", 1239 "__tsan_atomic16_compare_exchange_weak", 1240 "__tsan_atomic32_compare_exchange_weak", 1241 "__tsan_atomic64_compare_exchange_weak", 1242 "__tsan_atomic8_compare_exchange_val", 1243 "__tsan_atomic16_compare_exchange_val", 1244 "__tsan_atomic32_compare_exchange_val", 1245 "__tsan_atomic64_compare_exchange_val", 1246 "__tsan_atomic_thread_fence", 1247 "__tsan_atomic_signal_fence", 1248 "__tsan_unaligned_read16", 1249 "__tsan_unaligned_write16", 1250 /* KCOV */ 1251 "write_comp_data", 1252 "check_kcov_mode", 1253 "__sanitizer_cov_trace_pc", 1254 "__sanitizer_cov_trace_const_cmp1", 1255 "__sanitizer_cov_trace_const_cmp2", 1256 "__sanitizer_cov_trace_const_cmp4", 1257 "__sanitizer_cov_trace_const_cmp8", 1258 "__sanitizer_cov_trace_cmp1", 1259 "__sanitizer_cov_trace_cmp2", 1260 "__sanitizer_cov_trace_cmp4", 1261 "__sanitizer_cov_trace_cmp8", 1262 "__sanitizer_cov_trace_switch", 1263 /* KMSAN */ 1264 "kmsan_copy_to_user", 1265 "kmsan_disable_current", 1266 "kmsan_enable_current", 1267 "kmsan_report", 1268 "kmsan_unpoison_entry_regs", 1269 "kmsan_unpoison_memory", 1270 "__msan_chain_origin", 1271 "__msan_get_context_state", 1272 "__msan_instrument_asm_store", 1273 "__msan_metadata_ptr_for_load_1", 1274 "__msan_metadata_ptr_for_load_2", 1275 "__msan_metadata_ptr_for_load_4", 1276 "__msan_metadata_ptr_for_load_8", 1277 "__msan_metadata_ptr_for_load_n", 1278 "__msan_metadata_ptr_for_store_1", 1279 "__msan_metadata_ptr_for_store_2", 1280 "__msan_metadata_ptr_for_store_4", 1281 "__msan_metadata_ptr_for_store_8", 1282 "__msan_metadata_ptr_for_store_n", 1283 "__msan_poison_alloca", 1284 "__msan_warning", 1285 /* UBSAN */ 1286 "ubsan_type_mismatch_common", 1287 "__ubsan_handle_type_mismatch", 1288 "__ubsan_handle_type_mismatch_v1", 1289 "__ubsan_handle_shift_out_of_bounds", 1290 "__ubsan_handle_load_invalid_value", 1291 /* KSTACK_ERASE */ 1292 "__sanitizer_cov_stack_depth", 1293 /* TRACE_BRANCH_PROFILING */ 1294 "ftrace_likely_update", 1295 /* STACKPROTECTOR */ 1296 "__stack_chk_fail", 1297 /* misc */ 1298 "csum_partial_copy_generic", 1299 "copy_mc_fragile", 1300 "copy_mc_fragile_handle_tail", 1301 "copy_mc_enhanced_fast_string", 1302 "rep_stos_alternative", 1303 "rep_movs_alternative", 1304 "__copy_user_nocache", 1305 NULL 1306 }; 1307 1308 static void add_uaccess_safe(struct objtool_file *file) 1309 { 1310 struct symbol *func; 1311 const char **name; 1312 1313 if (!opts.uaccess) 1314 return; 1315 1316 for (name = uaccess_safe_builtin; *name; name++) { 1317 func = find_symbol_by_name(file->elf, *name); 1318 if (!func) 1319 continue; 1320 1321 func->uaccess_safe = true; 1322 } 1323 } 1324 1325 /* 1326 * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol 1327 * will be added to the .retpoline_sites section. 1328 */ 1329 __weak bool arch_is_retpoline(struct symbol *sym) 1330 { 1331 return false; 1332 } 1333 1334 /* 1335 * Symbols that replace INSN_RETURN, every (tail) call to such a symbol 1336 * will be added to the .return_sites section. 1337 */ 1338 __weak bool arch_is_rethunk(struct symbol *sym) 1339 { 1340 return false; 1341 } 1342 1343 /* 1344 * Symbols that are embedded inside other instructions, because sometimes crazy 1345 * code exists. These are mostly ignored for validation purposes. 1346 */ 1347 __weak bool arch_is_embedded_insn(struct symbol *sym) 1348 { 1349 return false; 1350 } 1351 1352 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 1353 { 1354 struct reloc *reloc; 1355 1356 if (insn->no_reloc) 1357 return NULL; 1358 1359 if (!file) 1360 return NULL; 1361 1362 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 1363 insn->offset, insn->len); 1364 if (!reloc) { 1365 insn->no_reloc = 1; 1366 return NULL; 1367 } 1368 1369 return reloc; 1370 } 1371 1372 static void remove_insn_ops(struct instruction *insn) 1373 { 1374 struct stack_op *op, *next; 1375 1376 for (op = insn->stack_ops; op; op = next) { 1377 next = op->next; 1378 free(op); 1379 } 1380 insn->stack_ops = NULL; 1381 } 1382 1383 static int annotate_call_site(struct objtool_file *file, 1384 struct instruction *insn, bool sibling) 1385 { 1386 struct reloc *reloc = insn_reloc(file, insn); 1387 struct symbol *sym = insn_call_dest(insn); 1388 1389 if (!sym) 1390 sym = reloc->sym; 1391 1392 if (sym->static_call_tramp) { 1393 list_add_tail(&insn->call_node, &file->static_call_list); 1394 return 0; 1395 } 1396 1397 if (sym->retpoline_thunk) { 1398 list_add_tail(&insn->call_node, &file->retpoline_call_list); 1399 return 0; 1400 } 1401 1402 /* 1403 * Many compilers cannot disable KCOV or sanitizer calls with a function 1404 * attribute so they need a little help, NOP out any such calls from 1405 * noinstr text. 1406 */ 1407 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) { 1408 if (reloc) 1409 set_reloc_type(file->elf, reloc, R_NONE); 1410 1411 if (elf_write_insn(file->elf, insn->sec, 1412 insn->offset, insn->len, 1413 sibling ? arch_ret_insn(insn->len) 1414 : arch_nop_insn(insn->len))) { 1415 return -1; 1416 } 1417 1418 insn->type = sibling ? INSN_RETURN : INSN_NOP; 1419 1420 if (sibling) { 1421 /* 1422 * We've replaced the tail-call JMP insn by two new 1423 * insn: RET; INT3, except we only have a single struct 1424 * insn here. Mark it retpoline_safe to avoid the SLS 1425 * warning, instead of adding another insn. 1426 */ 1427 insn->retpoline_safe = true; 1428 } 1429 1430 return 0; 1431 } 1432 1433 if (opts.mcount && sym->fentry) { 1434 if (sibling) 1435 WARN_INSN(insn, "tail call to __fentry__ !?!?"); 1436 if (opts.mnop) { 1437 if (reloc) 1438 set_reloc_type(file->elf, reloc, R_NONE); 1439 1440 if (elf_write_insn(file->elf, insn->sec, 1441 insn->offset, insn->len, 1442 arch_nop_insn(insn->len))) { 1443 return -1; 1444 } 1445 1446 insn->type = INSN_NOP; 1447 } 1448 1449 list_add_tail(&insn->call_node, &file->mcount_loc_list); 1450 return 0; 1451 } 1452 1453 if (insn->type == INSN_CALL && !insn->sec->init && 1454 !insn->_call_dest->embedded_insn) 1455 list_add_tail(&insn->call_node, &file->call_list); 1456 1457 if (!sibling && dead_end_function(file, sym)) 1458 insn->dead_end = true; 1459 1460 return 0; 1461 } 1462 1463 static int add_call_dest(struct objtool_file *file, struct instruction *insn, 1464 struct symbol *dest, bool sibling) 1465 { 1466 insn->_call_dest = dest; 1467 if (!dest) 1468 return 0; 1469 1470 /* 1471 * Whatever stack impact regular CALLs have, should be undone 1472 * by the RETURN of the called function. 1473 * 1474 * Annotated intra-function calls retain the stack_ops but 1475 * are converted to JUMP, see read_intra_function_calls(). 1476 */ 1477 remove_insn_ops(insn); 1478 1479 return annotate_call_site(file, insn, sibling); 1480 } 1481 1482 static int add_retpoline_call(struct objtool_file *file, struct instruction *insn) 1483 { 1484 /* 1485 * Retpoline calls/jumps are really dynamic calls/jumps in disguise, 1486 * so convert them accordingly. 1487 */ 1488 switch (insn->type) { 1489 case INSN_CALL: 1490 insn->type = INSN_CALL_DYNAMIC; 1491 break; 1492 case INSN_JUMP_UNCONDITIONAL: 1493 insn->type = INSN_JUMP_DYNAMIC; 1494 break; 1495 case INSN_JUMP_CONDITIONAL: 1496 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 1497 break; 1498 default: 1499 return 0; 1500 } 1501 1502 insn->retpoline_safe = true; 1503 1504 /* 1505 * Whatever stack impact regular CALLs have, should be undone 1506 * by the RETURN of the called function. 1507 * 1508 * Annotated intra-function calls retain the stack_ops but 1509 * are converted to JUMP, see read_intra_function_calls(). 1510 */ 1511 remove_insn_ops(insn); 1512 1513 return annotate_call_site(file, insn, false); 1514 } 1515 1516 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add) 1517 { 1518 /* 1519 * Return thunk tail calls are really just returns in disguise, 1520 * so convert them accordingly. 1521 */ 1522 insn->type = INSN_RETURN; 1523 insn->retpoline_safe = true; 1524 1525 if (add) 1526 list_add_tail(&insn->call_node, &file->return_thunk_list); 1527 } 1528 1529 static bool is_first_func_insn(struct objtool_file *file, 1530 struct instruction *insn) 1531 { 1532 struct symbol *func = insn_func(insn); 1533 1534 if (!func) 1535 return false; 1536 1537 if (insn->offset == func->offset) 1538 return true; 1539 1540 /* Allow direct CALL/JMP past ENDBR */ 1541 if (opts.ibt) { 1542 struct instruction *prev = prev_insn_same_sym(file, insn); 1543 1544 if (prev && prev->type == INSN_ENDBR && 1545 insn->offset == func->offset + prev->len) 1546 return true; 1547 } 1548 1549 return false; 1550 } 1551 1552 /* 1553 * Find the destination instructions for all jumps. 1554 */ 1555 static int add_jump_destinations(struct objtool_file *file) 1556 { 1557 struct instruction *insn; 1558 struct reloc *reloc; 1559 1560 for_each_insn(file, insn) { 1561 struct symbol *func = insn_func(insn); 1562 struct instruction *dest_insn; 1563 struct section *dest_sec; 1564 struct symbol *dest_sym; 1565 unsigned long dest_off; 1566 1567 if (!is_static_jump(insn)) 1568 continue; 1569 1570 if (insn->jump_dest) { 1571 /* 1572 * handle_group_alt() may have previously set 1573 * 'jump_dest' for some alternatives. 1574 */ 1575 continue; 1576 } 1577 1578 reloc = insn_reloc(file, insn); 1579 if (!reloc) { 1580 dest_sec = insn->sec; 1581 dest_off = arch_jump_destination(insn); 1582 dest_sym = dest_sec->sym; 1583 } else { 1584 dest_sym = reloc->sym; 1585 if (is_undef_sym(dest_sym)) { 1586 if (dest_sym->retpoline_thunk) { 1587 if (add_retpoline_call(file, insn)) 1588 return -1; 1589 continue; 1590 } 1591 1592 if (dest_sym->return_thunk) { 1593 add_return_call(file, insn, true); 1594 continue; 1595 } 1596 1597 /* External symbol */ 1598 if (func) { 1599 /* External sibling call */ 1600 if (add_call_dest(file, insn, dest_sym, true)) 1601 return -1; 1602 continue; 1603 } 1604 1605 /* Non-func asm code jumping to external symbol */ 1606 continue; 1607 } 1608 1609 dest_sec = dest_sym->sec; 1610 dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc); 1611 } 1612 1613 dest_insn = find_insn(file, dest_sec, dest_off); 1614 if (!dest_insn) { 1615 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); 1616 1617 /* 1618 * retbleed_untrain_ret() jumps to 1619 * __x86_return_thunk(), but objtool can't find 1620 * the thunk's starting RET instruction, 1621 * because the RET is also in the middle of 1622 * another instruction. Objtool only knows 1623 * about the outer instruction. 1624 */ 1625 if (sym && sym->embedded_insn) { 1626 add_return_call(file, insn, false); 1627 continue; 1628 } 1629 1630 /* 1631 * GCOV/KCOV dead code can jump to the end of 1632 * the function/section. 1633 */ 1634 if (file->ignore_unreachables && func && 1635 dest_sec == insn->sec && 1636 dest_off == func->offset + func->len) 1637 continue; 1638 1639 ERROR_INSN(insn, "can't find jump dest instruction at %s", 1640 offstr(dest_sec, dest_off)); 1641 return -1; 1642 } 1643 1644 if (!dest_sym || is_sec_sym(dest_sym)) { 1645 dest_sym = dest_insn->sym; 1646 if (!dest_sym) 1647 goto set_jump_dest; 1648 } 1649 1650 if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) { 1651 if (add_retpoline_call(file, insn)) 1652 return -1; 1653 continue; 1654 } 1655 1656 if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) { 1657 add_return_call(file, insn, true); 1658 continue; 1659 } 1660 1661 if (!insn->sym || insn->sym->pfunc == dest_sym->pfunc) 1662 goto set_jump_dest; 1663 1664 /* 1665 * Internal cross-function jump. 1666 */ 1667 1668 if (is_first_func_insn(file, dest_insn)) { 1669 /* Internal sibling call */ 1670 if (add_call_dest(file, insn, dest_sym, true)) 1671 return -1; 1672 continue; 1673 } 1674 1675 set_jump_dest: 1676 insn->jump_dest = dest_insn; 1677 } 1678 1679 return 0; 1680 } 1681 1682 static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 1683 { 1684 struct symbol *call_dest; 1685 1686 call_dest = find_func_by_offset(sec, offset); 1687 if (!call_dest) 1688 call_dest = find_symbol_by_offset(sec, offset); 1689 1690 return call_dest; 1691 } 1692 1693 /* 1694 * Find the destination instructions for all calls. 1695 */ 1696 static int add_call_destinations(struct objtool_file *file) 1697 { 1698 struct instruction *insn; 1699 unsigned long dest_off; 1700 struct symbol *dest; 1701 struct reloc *reloc; 1702 1703 for_each_insn(file, insn) { 1704 struct symbol *func = insn_func(insn); 1705 if (insn->type != INSN_CALL) 1706 continue; 1707 1708 reloc = insn_reloc(file, insn); 1709 if (!reloc) { 1710 dest_off = arch_jump_destination(insn); 1711 dest = find_call_destination(insn->sec, dest_off); 1712 1713 if (add_call_dest(file, insn, dest, false)) 1714 return -1; 1715 1716 if (func && func->ignore) 1717 continue; 1718 1719 if (!insn_call_dest(insn)) { 1720 ERROR_INSN(insn, "unannotated intra-function call"); 1721 return -1; 1722 } 1723 1724 if (func && !is_func_sym(insn_call_dest(insn))) { 1725 ERROR_INSN(insn, "unsupported call to non-function"); 1726 return -1; 1727 } 1728 1729 } else if (is_sec_sym(reloc->sym)) { 1730 dest_off = arch_insn_adjusted_addend(insn, reloc); 1731 dest = find_call_destination(reloc->sym->sec, dest_off); 1732 if (!dest) { 1733 ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx", 1734 reloc->sym->sec->name, dest_off); 1735 return -1; 1736 } 1737 1738 if (add_call_dest(file, insn, dest, false)) 1739 return -1; 1740 1741 } else if (reloc->sym->retpoline_thunk) { 1742 if (add_retpoline_call(file, insn)) 1743 return -1; 1744 1745 } else { 1746 if (add_call_dest(file, insn, reloc->sym, false)) 1747 return -1; 1748 } 1749 } 1750 1751 return 0; 1752 } 1753 1754 /* 1755 * The .alternatives section requires some extra special care over and above 1756 * other special sections because alternatives are patched in place. 1757 */ 1758 static int handle_group_alt(struct objtool_file *file, 1759 struct special_alt *special_alt, 1760 struct instruction *orig_insn, 1761 struct instruction **new_insn) 1762 { 1763 struct instruction *last_new_insn = NULL, *insn, *nop = NULL; 1764 struct alt_group *orig_alt_group, *new_alt_group; 1765 unsigned long dest_off; 1766 1767 orig_alt_group = orig_insn->alt_group; 1768 if (!orig_alt_group) { 1769 struct instruction *last_orig_insn = NULL; 1770 1771 orig_alt_group = calloc(1, sizeof(*orig_alt_group)); 1772 if (!orig_alt_group) { 1773 ERROR_GLIBC("calloc"); 1774 return -1; 1775 } 1776 orig_alt_group->cfi = calloc(special_alt->orig_len, 1777 sizeof(struct cfi_state *)); 1778 if (!orig_alt_group->cfi) { 1779 ERROR_GLIBC("calloc"); 1780 return -1; 1781 } 1782 1783 insn = orig_insn; 1784 sec_for_each_insn_from(file, insn) { 1785 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1786 break; 1787 1788 insn->alt_group = orig_alt_group; 1789 last_orig_insn = insn; 1790 } 1791 orig_alt_group->orig_group = NULL; 1792 orig_alt_group->first_insn = orig_insn; 1793 orig_alt_group->last_insn = last_orig_insn; 1794 orig_alt_group->nop = NULL; 1795 orig_alt_group->ignore = orig_insn->ignore_alts; 1796 orig_alt_group->feature = 0; 1797 } else { 1798 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len - 1799 orig_alt_group->first_insn->offset != special_alt->orig_len) { 1800 ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d", 1801 orig_alt_group->last_insn->offset + 1802 orig_alt_group->last_insn->len - 1803 orig_alt_group->first_insn->offset, 1804 special_alt->orig_len); 1805 return -1; 1806 } 1807 } 1808 1809 new_alt_group = calloc(1, sizeof(*new_alt_group)); 1810 if (!new_alt_group) { 1811 ERROR_GLIBC("calloc"); 1812 return -1; 1813 } 1814 1815 if (special_alt->new_len < special_alt->orig_len) { 1816 /* 1817 * Insert a fake nop at the end to make the replacement 1818 * alt_group the same size as the original. This is needed to 1819 * allow propagate_alt_cfi() to do its magic. When the last 1820 * instruction affects the stack, the instruction after it (the 1821 * nop) will propagate the new state to the shared CFI array. 1822 */ 1823 nop = calloc(1, sizeof(*nop)); 1824 if (!nop) { 1825 ERROR_GLIBC("calloc"); 1826 return -1; 1827 } 1828 memset(nop, 0, sizeof(*nop)); 1829 1830 nop->sec = special_alt->new_sec; 1831 nop->offset = special_alt->new_off + special_alt->new_len; 1832 nop->len = special_alt->orig_len - special_alt->new_len; 1833 nop->type = INSN_NOP; 1834 nop->sym = orig_insn->sym; 1835 nop->alt_group = new_alt_group; 1836 nop->fake = 1; 1837 } 1838 1839 if (!special_alt->new_len) { 1840 *new_insn = nop; 1841 goto end; 1842 } 1843 1844 insn = *new_insn; 1845 sec_for_each_insn_from(file, insn) { 1846 struct reloc *alt_reloc; 1847 1848 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1849 break; 1850 1851 last_new_insn = insn; 1852 1853 insn->sym = orig_insn->sym; 1854 insn->alt_group = new_alt_group; 1855 1856 /* 1857 * Since alternative replacement code is copy/pasted by the 1858 * kernel after applying relocations, generally such code can't 1859 * have relative-address relocation references to outside the 1860 * .altinstr_replacement section, unless the arch's 1861 * alternatives code can adjust the relative offsets 1862 * accordingly. 1863 */ 1864 alt_reloc = insn_reloc(file, insn); 1865 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) && 1866 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1867 1868 ERROR_INSN(insn, "unsupported relocation in alternatives section"); 1869 return -1; 1870 } 1871 1872 if (!is_static_jump(insn)) 1873 continue; 1874 1875 if (!insn->immediate) 1876 continue; 1877 1878 dest_off = arch_jump_destination(insn); 1879 if (dest_off == special_alt->new_off + special_alt->new_len) { 1880 insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn); 1881 if (!insn->jump_dest) { 1882 ERROR_INSN(insn, "can't find alternative jump destination"); 1883 return -1; 1884 } 1885 } 1886 } 1887 1888 if (!last_new_insn) { 1889 ERROR_FUNC(special_alt->new_sec, special_alt->new_off, 1890 "can't find last new alternative instruction"); 1891 return -1; 1892 } 1893 1894 end: 1895 new_alt_group->orig_group = orig_alt_group; 1896 new_alt_group->first_insn = *new_insn; 1897 new_alt_group->last_insn = last_new_insn; 1898 new_alt_group->nop = nop; 1899 new_alt_group->ignore = (*new_insn)->ignore_alts; 1900 new_alt_group->cfi = orig_alt_group->cfi; 1901 new_alt_group->feature = special_alt->feature; 1902 return 0; 1903 } 1904 1905 /* 1906 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1907 * If the original instruction is a jump, make the alt entry an effective nop 1908 * by just skipping the original instruction. 1909 */ 1910 static int handle_jump_alt(struct objtool_file *file, 1911 struct special_alt *special_alt, 1912 struct instruction *orig_insn, 1913 struct instruction **new_insn) 1914 { 1915 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL && 1916 orig_insn->type != INSN_NOP) { 1917 1918 ERROR_INSN(orig_insn, "unsupported instruction at jump label"); 1919 return -1; 1920 } 1921 1922 if (opts.hack_jump_label && special_alt->key_addend & 2) { 1923 struct reloc *reloc = insn_reloc(file, orig_insn); 1924 1925 if (reloc) 1926 set_reloc_type(file->elf, reloc, R_NONE); 1927 1928 if (elf_write_insn(file->elf, orig_insn->sec, 1929 orig_insn->offset, orig_insn->len, 1930 arch_nop_insn(orig_insn->len))) { 1931 return -1; 1932 } 1933 1934 orig_insn->type = INSN_NOP; 1935 } 1936 1937 if (orig_insn->type == INSN_NOP) { 1938 if (orig_insn->len == 2) 1939 file->jl_nop_short++; 1940 else 1941 file->jl_nop_long++; 1942 1943 return 0; 1944 } 1945 1946 if (orig_insn->len == 2) 1947 file->jl_short++; 1948 else 1949 file->jl_long++; 1950 1951 *new_insn = next_insn_same_sec(file, orig_insn); 1952 return 0; 1953 } 1954 1955 /* 1956 * Read all the special sections which have alternate instructions which can be 1957 * patched in or redirected to at runtime. Each instruction having alternate 1958 * instruction(s) has them added to its insn->alts list, which will be 1959 * traversed in validate_branch(). 1960 */ 1961 static int add_special_section_alts(struct objtool_file *file) 1962 { 1963 struct list_head special_alts; 1964 struct instruction *orig_insn, *new_insn; 1965 struct special_alt *special_alt, *tmp; 1966 enum alternative_type alt_type; 1967 struct alternative *alt; 1968 struct alternative *a; 1969 1970 if (special_get_alts(file->elf, &special_alts)) 1971 return -1; 1972 1973 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1974 1975 orig_insn = find_insn(file, special_alt->orig_sec, 1976 special_alt->orig_off); 1977 if (!orig_insn) { 1978 ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off, 1979 "special: can't find orig instruction"); 1980 return -1; 1981 } 1982 1983 new_insn = NULL; 1984 if (!special_alt->group || special_alt->new_len) { 1985 new_insn = find_insn(file, special_alt->new_sec, 1986 special_alt->new_off); 1987 if (!new_insn) { 1988 ERROR_FUNC(special_alt->new_sec, special_alt->new_off, 1989 "special: can't find new instruction"); 1990 return -1; 1991 } 1992 } 1993 1994 if (special_alt->group) { 1995 if (!special_alt->orig_len) { 1996 ERROR_INSN(orig_insn, "empty alternative entry"); 1997 continue; 1998 } 1999 2000 if (handle_group_alt(file, special_alt, orig_insn, &new_insn)) 2001 return -1; 2002 2003 alt_type = ALT_TYPE_INSTRUCTIONS; 2004 2005 } else if (special_alt->jump_or_nop) { 2006 if (handle_jump_alt(file, special_alt, orig_insn, &new_insn)) 2007 return -1; 2008 2009 alt_type = ALT_TYPE_JUMP_TABLE; 2010 } else { 2011 alt_type = ALT_TYPE_EX_TABLE; 2012 } 2013 2014 alt = calloc(1, sizeof(*alt)); 2015 if (!alt) { 2016 ERROR_GLIBC("calloc"); 2017 return -1; 2018 } 2019 2020 alt->insn = new_insn; 2021 alt->type = alt_type; 2022 alt->next = NULL; 2023 2024 /* 2025 * Store alternatives in the same order they have been 2026 * defined. 2027 */ 2028 if (!orig_insn->alts) { 2029 orig_insn->alts = alt; 2030 } else { 2031 for (a = orig_insn->alts; a->next; a = a->next) 2032 ; 2033 a->next = alt; 2034 } 2035 2036 list_del(&special_alt->list); 2037 free(special_alt); 2038 } 2039 2040 if (opts.stats) { 2041 printf("jl\\\tNOP\tJMP\n"); 2042 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short); 2043 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long); 2044 } 2045 2046 return 0; 2047 } 2048 2049 __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table) 2050 { 2051 return reloc->sym->offset + reloc_addend(reloc); 2052 } 2053 2054 static int add_jump_table(struct objtool_file *file, struct instruction *insn) 2055 { 2056 unsigned long table_size = insn_jump_table_size(insn); 2057 struct symbol *pfunc = insn_func(insn)->pfunc; 2058 struct reloc *table = insn_jump_table(insn); 2059 struct instruction *dest_insn; 2060 unsigned int prev_offset = 0; 2061 struct reloc *reloc = table; 2062 struct alternative *alt; 2063 unsigned long sym_offset; 2064 2065 /* 2066 * Each @reloc is a switch table relocation which points to the target 2067 * instruction. 2068 */ 2069 for_each_reloc_from(table->sec, reloc) { 2070 2071 /* Check for the end of the table: */ 2072 if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size) 2073 break; 2074 if (reloc != table && is_jump_table(reloc)) 2075 break; 2076 2077 /* Make sure the table entries are consecutive: */ 2078 if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc)) 2079 break; 2080 2081 sym_offset = arch_jump_table_sym_offset(reloc, table); 2082 2083 /* Detect function pointers from contiguous objects: */ 2084 if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset) 2085 break; 2086 2087 /* 2088 * Clang sometimes leaves dangling unused jump table entries 2089 * which point to the end of the function. Ignore them. 2090 */ 2091 if (reloc->sym->sec == pfunc->sec && 2092 sym_offset == pfunc->offset + pfunc->len) 2093 goto next; 2094 2095 dest_insn = find_insn(file, reloc->sym->sec, sym_offset); 2096 if (!dest_insn) 2097 break; 2098 2099 /* Make sure the destination is in the same function: */ 2100 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc) 2101 break; 2102 2103 alt = calloc(1, sizeof(*alt)); 2104 if (!alt) { 2105 ERROR_GLIBC("calloc"); 2106 return -1; 2107 } 2108 2109 alt->insn = dest_insn; 2110 alt->next = insn->alts; 2111 insn->alts = alt; 2112 next: 2113 prev_offset = reloc_offset(reloc); 2114 } 2115 2116 if (!prev_offset) { 2117 ERROR_INSN(insn, "can't find switch jump table"); 2118 return -1; 2119 } 2120 2121 return 0; 2122 } 2123 2124 /* 2125 * find_jump_table() - Given a dynamic jump, find the switch jump table 2126 * associated with it. 2127 */ 2128 static void find_jump_table(struct objtool_file *file, struct symbol *func, 2129 struct instruction *insn) 2130 { 2131 struct reloc *table_reloc; 2132 struct instruction *dest_insn, *orig_insn = insn; 2133 unsigned long table_size; 2134 unsigned long sym_offset; 2135 2136 /* 2137 * Backward search using the @first_jump_src links, these help avoid 2138 * much of the 'in between' code. Which avoids us getting confused by 2139 * it. 2140 */ 2141 for (; 2142 insn && insn_func(insn) && insn_func(insn)->pfunc == func; 2143 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 2144 2145 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 2146 break; 2147 2148 /* allow small jumps within the range */ 2149 if (insn->type == INSN_JUMP_UNCONDITIONAL && 2150 insn->jump_dest && 2151 (insn->jump_dest->offset <= insn->offset || 2152 insn->jump_dest->offset > orig_insn->offset)) 2153 break; 2154 2155 table_reloc = arch_find_switch_table(file, insn, &table_size); 2156 if (!table_reloc) 2157 continue; 2158 2159 sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc); 2160 2161 dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset); 2162 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func) 2163 continue; 2164 2165 set_jump_table(table_reloc); 2166 orig_insn->_jump_table = table_reloc; 2167 orig_insn->_jump_table_size = table_size; 2168 2169 break; 2170 } 2171 } 2172 2173 /* 2174 * First pass: Mark the head of each jump table so that in the next pass, 2175 * we know when a given jump table ends and the next one starts. 2176 */ 2177 static void mark_func_jump_tables(struct objtool_file *file, 2178 struct symbol *func) 2179 { 2180 struct instruction *insn, *last = NULL; 2181 2182 func_for_each_insn(file, func, insn) { 2183 if (!last) 2184 last = insn; 2185 2186 /* 2187 * Store back-pointers for unconditional forward jumps such 2188 * that find_jump_table() can back-track using those and 2189 * avoid some potentially confusing code. 2190 */ 2191 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 2192 insn->offset > last->offset && 2193 insn->jump_dest->offset > insn->offset && 2194 !insn->jump_dest->first_jump_src) { 2195 2196 insn->jump_dest->first_jump_src = insn; 2197 last = insn->jump_dest; 2198 } 2199 2200 if (insn->type != INSN_JUMP_DYNAMIC) 2201 continue; 2202 2203 find_jump_table(file, func, insn); 2204 } 2205 } 2206 2207 static int add_func_jump_tables(struct objtool_file *file, 2208 struct symbol *func) 2209 { 2210 struct instruction *insn; 2211 2212 func_for_each_insn(file, func, insn) { 2213 if (!insn_jump_table(insn)) 2214 continue; 2215 2216 if (add_jump_table(file, insn)) 2217 return -1; 2218 } 2219 2220 return 0; 2221 } 2222 2223 /* 2224 * For some switch statements, gcc generates a jump table in the .rodata 2225 * section which contains a list of addresses within the function to jump to. 2226 * This finds these jump tables and adds them to the insn->alts lists. 2227 */ 2228 static int add_jump_table_alts(struct objtool_file *file) 2229 { 2230 struct symbol *func; 2231 2232 if (!file->rodata) 2233 return 0; 2234 2235 for_each_sym(file->elf, func) { 2236 if (!is_func_sym(func) || func->alias != func) 2237 continue; 2238 2239 mark_func_jump_tables(file, func); 2240 if (add_func_jump_tables(file, func)) 2241 return -1; 2242 } 2243 2244 return 0; 2245 } 2246 2247 static void set_func_state(struct cfi_state *state) 2248 { 2249 state->cfa = initial_func_cfi.cfa; 2250 memcpy(&state->regs, &initial_func_cfi.regs, 2251 CFI_NUM_REGS * sizeof(struct cfi_reg)); 2252 state->stack_size = initial_func_cfi.cfa.offset; 2253 state->type = UNWIND_HINT_TYPE_CALL; 2254 } 2255 2256 static int read_unwind_hints(struct objtool_file *file) 2257 { 2258 struct cfi_state cfi = init_cfi; 2259 struct section *sec; 2260 struct unwind_hint *hint; 2261 struct instruction *insn; 2262 struct reloc *reloc; 2263 unsigned long offset; 2264 int i; 2265 2266 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 2267 if (!sec) 2268 return 0; 2269 2270 if (!sec->rsec) { 2271 ERROR("missing .rela.discard.unwind_hints section"); 2272 return -1; 2273 } 2274 2275 if (sec_size(sec) % sizeof(struct unwind_hint)) { 2276 ERROR("struct unwind_hint size mismatch"); 2277 return -1; 2278 } 2279 2280 file->hints = true; 2281 2282 for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) { 2283 hint = (struct unwind_hint *)sec->data->d_buf + i; 2284 2285 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 2286 if (!reloc) { 2287 ERROR("can't find reloc for unwind_hints[%d]", i); 2288 return -1; 2289 } 2290 2291 offset = reloc->sym->offset + reloc_addend(reloc); 2292 2293 insn = find_insn(file, reloc->sym->sec, offset); 2294 if (!insn) { 2295 ERROR("can't find insn for unwind_hints[%d]", i); 2296 return -1; 2297 } 2298 2299 insn->hint = true; 2300 2301 if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) { 2302 insn->cfi = &force_undefined_cfi; 2303 continue; 2304 } 2305 2306 if (hint->type == UNWIND_HINT_TYPE_SAVE) { 2307 insn->hint = false; 2308 insn->save = true; 2309 continue; 2310 } 2311 2312 if (hint->type == UNWIND_HINT_TYPE_RESTORE) { 2313 insn->restore = true; 2314 continue; 2315 } 2316 2317 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { 2318 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); 2319 2320 if (sym && is_global_sym(sym)) { 2321 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { 2322 ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR"); 2323 return -1; 2324 } 2325 } 2326 } 2327 2328 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 2329 insn->cfi = &func_cfi; 2330 continue; 2331 } 2332 2333 if (insn->cfi) 2334 cfi = *(insn->cfi); 2335 2336 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 2337 ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg); 2338 return -1; 2339 } 2340 2341 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); 2342 cfi.type = hint->type; 2343 cfi.signal = hint->signal; 2344 2345 insn->cfi = cfi_hash_find_or_add(&cfi); 2346 } 2347 2348 return 0; 2349 } 2350 2351 static int read_annotate(struct objtool_file *file, 2352 int (*func)(struct objtool_file *file, int type, struct instruction *insn)) 2353 { 2354 struct section *sec; 2355 struct instruction *insn; 2356 struct reloc *reloc; 2357 uint64_t offset; 2358 int type; 2359 2360 sec = find_section_by_name(file->elf, ".discard.annotate_insn"); 2361 if (!sec) 2362 return 0; 2363 2364 if (!sec->rsec) 2365 return 0; 2366 2367 if (sec->sh.sh_entsize != 8) { 2368 static bool warned = false; 2369 if (!warned && opts.verbose) { 2370 WARN("%s: dodgy linker, sh_entsize != 8", sec->name); 2371 warned = true; 2372 } 2373 sec->sh.sh_entsize = 8; 2374 } 2375 2376 if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) { 2377 ERROR("bad .discard.annotate_insn section: missing relocs"); 2378 return -1; 2379 } 2380 2381 for_each_reloc(sec->rsec, reloc) { 2382 type = annotype(file->elf, sec, reloc); 2383 offset = reloc->sym->offset + reloc_addend(reloc); 2384 insn = find_insn(file, reloc->sym->sec, offset); 2385 2386 if (!insn) { 2387 ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type); 2388 return -1; 2389 } 2390 2391 if (func(file, type, insn)) 2392 return -1; 2393 } 2394 2395 return 0; 2396 } 2397 2398 static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn) 2399 { 2400 switch (type) { 2401 2402 /* Must be before add_special_section_alts() */ 2403 case ANNOTYPE_IGNORE_ALTS: 2404 insn->ignore_alts = true; 2405 break; 2406 2407 /* 2408 * Must be before read_unwind_hints() since that needs insn->noendbr. 2409 */ 2410 case ANNOTYPE_NOENDBR: 2411 insn->noendbr = 1; 2412 break; 2413 2414 default: 2415 break; 2416 } 2417 2418 return 0; 2419 } 2420 2421 static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn) 2422 { 2423 unsigned long dest_off; 2424 2425 if (type != ANNOTYPE_INTRA_FUNCTION_CALL) 2426 return 0; 2427 2428 if (insn->type != INSN_CALL) { 2429 ERROR_INSN(insn, "intra_function_call not a direct call"); 2430 return -1; 2431 } 2432 2433 /* 2434 * Treat intra-function CALLs as JMPs, but with a stack_op. 2435 * See add_call_destinations(), which strips stack_ops from 2436 * normal CALLs. 2437 */ 2438 insn->type = INSN_JUMP_UNCONDITIONAL; 2439 2440 dest_off = arch_jump_destination(insn); 2441 insn->jump_dest = find_insn(file, insn->sec, dest_off); 2442 if (!insn->jump_dest) { 2443 ERROR_INSN(insn, "can't find call dest at %s+0x%lx", 2444 insn->sec->name, dest_off); 2445 return -1; 2446 } 2447 2448 return 0; 2449 } 2450 2451 static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn) 2452 { 2453 struct symbol *sym; 2454 2455 switch (type) { 2456 case ANNOTYPE_NOENDBR: 2457 /* early */ 2458 break; 2459 2460 case ANNOTYPE_RETPOLINE_SAFE: 2461 if (insn->type != INSN_JUMP_DYNAMIC && 2462 insn->type != INSN_CALL_DYNAMIC && 2463 insn->type != INSN_RETURN && 2464 insn->type != INSN_NOP) { 2465 ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop"); 2466 return -1; 2467 } 2468 2469 insn->retpoline_safe = true; 2470 break; 2471 2472 case ANNOTYPE_INSTR_BEGIN: 2473 insn->instr++; 2474 break; 2475 2476 case ANNOTYPE_INSTR_END: 2477 insn->instr--; 2478 break; 2479 2480 case ANNOTYPE_UNRET_BEGIN: 2481 insn->unret = 1; 2482 break; 2483 2484 case ANNOTYPE_IGNORE_ALTS: 2485 /* early */ 2486 break; 2487 2488 case ANNOTYPE_INTRA_FUNCTION_CALL: 2489 /* ifc */ 2490 break; 2491 2492 case ANNOTYPE_REACHABLE: 2493 insn->dead_end = false; 2494 break; 2495 2496 case ANNOTYPE_NOCFI: 2497 sym = insn->sym; 2498 if (!sym) { 2499 ERROR_INSN(insn, "dodgy NOCFI annotation"); 2500 return -1; 2501 } 2502 insn->sym->nocfi = 1; 2503 break; 2504 2505 default: 2506 ERROR_INSN(insn, "Unknown annotation type: %d", type); 2507 return -1; 2508 } 2509 2510 return 0; 2511 } 2512 2513 /* 2514 * Return true if name matches an instrumentation function, where calls to that 2515 * function from noinstr code can safely be removed, but compilers won't do so. 2516 */ 2517 static bool is_profiling_func(const char *name) 2518 { 2519 /* 2520 * Many compilers cannot disable KCOV with a function attribute. 2521 */ 2522 if (!strncmp(name, "__sanitizer_cov_", 16)) 2523 return true; 2524 2525 return false; 2526 } 2527 2528 static int classify_symbols(struct objtool_file *file) 2529 { 2530 struct symbol *func; 2531 size_t len; 2532 2533 for_each_sym(file->elf, func) { 2534 if (is_notype_sym(func) && strstarts(func->name, ".L")) 2535 func->local_label = true; 2536 2537 if (!is_global_sym(func)) 2538 continue; 2539 2540 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2541 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2542 func->static_call_tramp = true; 2543 2544 if (arch_is_retpoline(func)) 2545 func->retpoline_thunk = true; 2546 2547 if (arch_is_rethunk(func)) 2548 func->return_thunk = true; 2549 2550 if (arch_is_embedded_insn(func)) 2551 func->embedded_insn = true; 2552 2553 if (arch_ftrace_match(func->name)) 2554 func->fentry = true; 2555 2556 if (is_profiling_func(func->name)) 2557 func->profiling_func = true; 2558 2559 len = strlen(func->name); 2560 if (len > sym_name_max_len) 2561 sym_name_max_len = len; 2562 } 2563 2564 return 0; 2565 } 2566 2567 static void mark_rodata(struct objtool_file *file) 2568 { 2569 struct section *sec; 2570 bool found = false; 2571 2572 /* 2573 * Search for the following rodata sections, each of which can 2574 * potentially contain jump tables: 2575 * 2576 * - .rodata: can contain GCC switch tables 2577 * - .rodata.<func>: same, if -fdata-sections is being used 2578 * - .data.rel.ro.c_jump_table: contains C annotated jump tables 2579 * 2580 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2581 */ 2582 for_each_sec(file->elf, sec) { 2583 if ((!strncmp(sec->name, ".rodata", 7) && 2584 !strstr(sec->name, ".str1.")) || 2585 !strncmp(sec->name, ".data.rel.ro", 12)) { 2586 sec->rodata = true; 2587 found = true; 2588 } 2589 } 2590 2591 file->rodata = found; 2592 } 2593 2594 static void mark_holes(struct objtool_file *file) 2595 { 2596 struct instruction *insn; 2597 bool in_hole = false; 2598 2599 if (!opts.link) 2600 return; 2601 2602 /* 2603 * Whole archive runs might encounter dead code from weak symbols. 2604 * This is where the linker will have dropped the weak symbol in 2605 * favour of a regular symbol, but leaves the code in place. 2606 */ 2607 for_each_insn(file, insn) { 2608 if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) { 2609 in_hole = false; 2610 continue; 2611 } 2612 2613 /* Skip function padding and pfx code */ 2614 if (!in_hole && insn->type == INSN_NOP) 2615 continue; 2616 2617 in_hole = true; 2618 insn->hole = 1; 2619 2620 /* 2621 * If this hole jumps to a .cold function, mark it ignore. 2622 */ 2623 if (insn->jump_dest) { 2624 struct symbol *dest_func = insn_func(insn->jump_dest); 2625 2626 if (dest_func && dest_func->cold) 2627 dest_func->ignore = true; 2628 } 2629 } 2630 } 2631 2632 static bool validate_branch_enabled(void) 2633 { 2634 return opts.stackval || 2635 opts.orc || 2636 opts.uaccess || 2637 opts.checksum; 2638 } 2639 2640 static int decode_sections(struct objtool_file *file) 2641 { 2642 file->klp = is_livepatch_module(file); 2643 2644 mark_rodata(file); 2645 2646 if (init_pv_ops(file)) 2647 return -1; 2648 2649 /* 2650 * Must be before add_{jump_call}_destination. 2651 */ 2652 if (classify_symbols(file)) 2653 return -1; 2654 2655 if (decode_instructions(file)) 2656 return -1; 2657 2658 if (add_ignores(file)) 2659 return -1; 2660 2661 add_uaccess_safe(file); 2662 2663 if (read_annotate(file, __annotate_early)) 2664 return -1; 2665 2666 /* 2667 * Must be before add_jump_destinations(), which depends on 'func' 2668 * being set for alternatives, to enable proper sibling call detection. 2669 */ 2670 if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label || opts.disas) { 2671 if (add_special_section_alts(file)) 2672 return -1; 2673 } 2674 2675 if (add_jump_destinations(file)) 2676 return -1; 2677 2678 /* 2679 * Must be before add_call_destination(); it changes INSN_CALL to 2680 * INSN_JUMP. 2681 */ 2682 if (read_annotate(file, __annotate_ifc)) 2683 return -1; 2684 2685 if (add_call_destinations(file)) 2686 return -1; 2687 2688 if (add_jump_table_alts(file)) 2689 return -1; 2690 2691 if (read_unwind_hints(file)) 2692 return -1; 2693 2694 /* Must be after add_jump_destinations() */ 2695 mark_holes(file); 2696 2697 /* 2698 * Must be after add_call_destinations() such that it can override 2699 * dead_end_function() marks. 2700 */ 2701 if (read_annotate(file, __annotate_late)) 2702 return -1; 2703 2704 return 0; 2705 } 2706 2707 static bool is_special_call(struct instruction *insn) 2708 { 2709 if (insn->type == INSN_CALL) { 2710 struct symbol *dest = insn_call_dest(insn); 2711 2712 if (!dest) 2713 return false; 2714 2715 if (dest->fentry || dest->embedded_insn) 2716 return true; 2717 } 2718 2719 return false; 2720 } 2721 2722 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 2723 { 2724 struct cfi_state *cfi = &state->cfi; 2725 int i; 2726 2727 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 2728 return true; 2729 2730 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 2731 return true; 2732 2733 if (cfi->stack_size != initial_func_cfi.cfa.offset) 2734 return true; 2735 2736 for (i = 0; i < CFI_NUM_REGS; i++) { 2737 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 2738 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 2739 return true; 2740 } 2741 2742 return false; 2743 } 2744 2745 static bool check_reg_frame_pos(const struct cfi_reg *reg, 2746 int expected_offset) 2747 { 2748 return reg->base == CFI_CFA && 2749 reg->offset == expected_offset; 2750 } 2751 2752 static bool has_valid_stack_frame(struct insn_state *state) 2753 { 2754 struct cfi_state *cfi = &state->cfi; 2755 2756 if (cfi->cfa.base == CFI_BP && 2757 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) && 2758 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8)) 2759 return true; 2760 2761 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 2762 return true; 2763 2764 return false; 2765 } 2766 2767 static int update_cfi_state_regs(struct instruction *insn, 2768 struct cfi_state *cfi, 2769 struct stack_op *op) 2770 { 2771 struct cfi_reg *cfa = &cfi->cfa; 2772 2773 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 2774 return 0; 2775 2776 /* push */ 2777 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 2778 cfa->offset += 8; 2779 2780 /* pop */ 2781 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 2782 cfa->offset -= 8; 2783 2784 /* add immediate to sp */ 2785 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 2786 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 2787 cfa->offset -= op->src.offset; 2788 2789 return 0; 2790 } 2791 2792 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 2793 { 2794 if (arch_callee_saved_reg(reg) && 2795 cfi->regs[reg].base == CFI_UNDEFINED) { 2796 cfi->regs[reg].base = base; 2797 cfi->regs[reg].offset = offset; 2798 } 2799 } 2800 2801 static void restore_reg(struct cfi_state *cfi, unsigned char reg) 2802 { 2803 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 2804 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 2805 } 2806 2807 /* 2808 * A note about DRAP stack alignment: 2809 * 2810 * GCC has the concept of a DRAP register, which is used to help keep track of 2811 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 2812 * register. The typical DRAP pattern is: 2813 * 2814 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 2815 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 2816 * 41 ff 72 f8 pushq -0x8(%r10) 2817 * 55 push %rbp 2818 * 48 89 e5 mov %rsp,%rbp 2819 * (more pushes) 2820 * 41 52 push %r10 2821 * ... 2822 * 41 5a pop %r10 2823 * (more pops) 2824 * 5d pop %rbp 2825 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2826 * c3 retq 2827 * 2828 * There are some variations in the epilogues, like: 2829 * 2830 * 5b pop %rbx 2831 * 41 5a pop %r10 2832 * 41 5c pop %r12 2833 * 41 5d pop %r13 2834 * 41 5e pop %r14 2835 * c9 leaveq 2836 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2837 * c3 retq 2838 * 2839 * and: 2840 * 2841 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 2842 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 2843 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 2844 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 2845 * c9 leaveq 2846 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2847 * c3 retq 2848 * 2849 * Sometimes r13 is used as the DRAP register, in which case it's saved and 2850 * restored beforehand: 2851 * 2852 * 41 55 push %r13 2853 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 2854 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 2855 * ... 2856 * 49 8d 65 f0 lea -0x10(%r13),%rsp 2857 * 41 5d pop %r13 2858 * c3 retq 2859 */ 2860 static int update_cfi_state(struct instruction *insn, 2861 struct instruction *next_insn, 2862 struct cfi_state *cfi, struct stack_op *op) 2863 { 2864 struct cfi_reg *cfa = &cfi->cfa; 2865 struct cfi_reg *regs = cfi->regs; 2866 2867 /* ignore UNWIND_HINT_UNDEFINED regions */ 2868 if (cfi->force_undefined) 2869 return 0; 2870 2871 /* stack operations don't make sense with an undefined CFA */ 2872 if (cfa->base == CFI_UNDEFINED) { 2873 if (insn_func(insn)) { 2874 WARN_INSN(insn, "undefined stack state"); 2875 return 1; 2876 } 2877 return 0; 2878 } 2879 2880 if (cfi->type == UNWIND_HINT_TYPE_REGS || 2881 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 2882 return update_cfi_state_regs(insn, cfi, op); 2883 2884 switch (op->dest.type) { 2885 2886 case OP_DEST_REG: 2887 switch (op->src.type) { 2888 2889 case OP_SRC_REG: 2890 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 2891 cfa->base == CFI_SP && 2892 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) { 2893 2894 /* mov %rsp, %rbp */ 2895 cfa->base = op->dest.reg; 2896 cfi->bp_scratch = false; 2897 } 2898 2899 else if (op->src.reg == CFI_SP && 2900 op->dest.reg == CFI_BP && cfi->drap) { 2901 2902 /* drap: mov %rsp, %rbp */ 2903 regs[CFI_BP].base = CFI_BP; 2904 regs[CFI_BP].offset = -cfi->stack_size; 2905 cfi->bp_scratch = false; 2906 } 2907 2908 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2909 2910 /* 2911 * mov %rsp, %reg 2912 * 2913 * This is needed for the rare case where GCC 2914 * does: 2915 * 2916 * mov %rsp, %rax 2917 * ... 2918 * mov %rax, %rsp 2919 */ 2920 cfi->vals[op->dest.reg].base = CFI_CFA; 2921 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2922 } 2923 2924 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2925 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) { 2926 2927 /* 2928 * mov %rbp, %rsp 2929 * 2930 * Restore the original stack pointer (Clang). 2931 */ 2932 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2933 } 2934 2935 else if (op->dest.reg == cfa->base) { 2936 2937 /* mov %reg, %rsp */ 2938 if (cfa->base == CFI_SP && 2939 cfi->vals[op->src.reg].base == CFI_CFA) { 2940 2941 /* 2942 * This is needed for the rare case 2943 * where GCC does something dumb like: 2944 * 2945 * lea 0x8(%rsp), %rcx 2946 * ... 2947 * mov %rcx, %rsp 2948 */ 2949 cfa->offset = -cfi->vals[op->src.reg].offset; 2950 cfi->stack_size = cfa->offset; 2951 2952 } else if (cfa->base == CFI_SP && 2953 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2954 cfi->vals[op->src.reg].offset == cfa->offset) { 2955 2956 /* 2957 * Stack swizzle: 2958 * 2959 * 1: mov %rsp, (%[tos]) 2960 * 2: mov %[tos], %rsp 2961 * ... 2962 * 3: pop %rsp 2963 * 2964 * Where: 2965 * 2966 * 1 - places a pointer to the previous 2967 * stack at the Top-of-Stack of the 2968 * new stack. 2969 * 2970 * 2 - switches to the new stack. 2971 * 2972 * 3 - pops the Top-of-Stack to restore 2973 * the original stack. 2974 * 2975 * Note: we set base to SP_INDIRECT 2976 * here and preserve offset. Therefore 2977 * when the unwinder reaches ToS it 2978 * will dereference SP and then add the 2979 * offset to find the next frame, IOW: 2980 * (%rsp) + offset. 2981 */ 2982 cfa->base = CFI_SP_INDIRECT; 2983 2984 } else { 2985 cfa->base = CFI_UNDEFINED; 2986 cfa->offset = 0; 2987 } 2988 } 2989 2990 else if (op->dest.reg == CFI_SP && 2991 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2992 cfi->vals[op->src.reg].offset == cfa->offset) { 2993 2994 /* 2995 * The same stack swizzle case 2) as above. But 2996 * because we can't change cfa->base, case 3) 2997 * will become a regular POP. Pretend we're a 2998 * PUSH so things don't go unbalanced. 2999 */ 3000 cfi->stack_size += 8; 3001 } 3002 3003 3004 break; 3005 3006 case OP_SRC_ADD: 3007 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 3008 3009 /* add imm, %rsp */ 3010 cfi->stack_size -= op->src.offset; 3011 if (cfa->base == CFI_SP) 3012 cfa->offset -= op->src.offset; 3013 break; 3014 } 3015 3016 if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP && 3017 insn->sym->frame_pointer) { 3018 /* addi.d fp,sp,imm on LoongArch */ 3019 if (cfa->base == CFI_SP && cfa->offset == op->src.offset) { 3020 cfa->base = CFI_BP; 3021 cfa->offset = 0; 3022 } 3023 break; 3024 } 3025 3026 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 3027 /* addi.d sp,fp,imm on LoongArch */ 3028 if (cfa->base == CFI_BP && cfa->offset == 0) { 3029 if (insn->sym->frame_pointer) { 3030 cfa->base = CFI_SP; 3031 cfa->offset = -op->src.offset; 3032 } 3033 } else { 3034 /* lea disp(%rbp), %rsp */ 3035 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 3036 } 3037 break; 3038 } 3039 3040 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 3041 3042 /* drap: lea disp(%rsp), %drap */ 3043 cfi->drap_reg = op->dest.reg; 3044 3045 /* 3046 * lea disp(%rsp), %reg 3047 * 3048 * This is needed for the rare case where GCC 3049 * does something dumb like: 3050 * 3051 * lea 0x8(%rsp), %rcx 3052 * ... 3053 * mov %rcx, %rsp 3054 */ 3055 cfi->vals[op->dest.reg].base = CFI_CFA; 3056 cfi->vals[op->dest.reg].offset = \ 3057 -cfi->stack_size + op->src.offset; 3058 3059 break; 3060 } 3061 3062 if (cfi->drap && op->dest.reg == CFI_SP && 3063 op->src.reg == cfi->drap_reg) { 3064 3065 /* drap: lea disp(%drap), %rsp */ 3066 cfa->base = CFI_SP; 3067 cfa->offset = cfi->stack_size = -op->src.offset; 3068 cfi->drap_reg = CFI_UNDEFINED; 3069 cfi->drap = false; 3070 break; 3071 } 3072 3073 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) { 3074 WARN_INSN(insn, "unsupported stack register modification"); 3075 return -1; 3076 } 3077 3078 break; 3079 3080 case OP_SRC_AND: 3081 if (op->dest.reg != CFI_SP || 3082 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 3083 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 3084 WARN_INSN(insn, "unsupported stack pointer realignment"); 3085 return -1; 3086 } 3087 3088 if (cfi->drap_reg != CFI_UNDEFINED) { 3089 /* drap: and imm, %rsp */ 3090 cfa->base = cfi->drap_reg; 3091 cfa->offset = cfi->stack_size = 0; 3092 cfi->drap = true; 3093 } 3094 3095 /* 3096 * Older versions of GCC (4.8ish) realign the stack 3097 * without DRAP, with a frame pointer. 3098 */ 3099 3100 break; 3101 3102 case OP_SRC_POP: 3103 case OP_SRC_POPF: 3104 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) { 3105 3106 /* pop %rsp; # restore from a stack swizzle */ 3107 cfa->base = CFI_SP; 3108 break; 3109 } 3110 3111 if (!cfi->drap && op->dest.reg == cfa->base) { 3112 3113 /* pop %rbp */ 3114 cfa->base = CFI_SP; 3115 } 3116 3117 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 3118 op->dest.reg == cfi->drap_reg && 3119 cfi->drap_offset == -cfi->stack_size) { 3120 3121 /* drap: pop %drap */ 3122 cfa->base = cfi->drap_reg; 3123 cfa->offset = 0; 3124 cfi->drap_offset = -1; 3125 3126 } else if (cfi->stack_size == -regs[op->dest.reg].offset) { 3127 3128 /* pop %reg */ 3129 restore_reg(cfi, op->dest.reg); 3130 } 3131 3132 cfi->stack_size -= 8; 3133 if (cfa->base == CFI_SP) 3134 cfa->offset -= 8; 3135 3136 break; 3137 3138 case OP_SRC_REG_INDIRECT: 3139 if (!cfi->drap && op->dest.reg == cfa->base && 3140 op->dest.reg == CFI_BP) { 3141 3142 /* mov disp(%rsp), %rbp */ 3143 cfa->base = CFI_SP; 3144 cfa->offset = cfi->stack_size; 3145 } 3146 3147 if (cfi->drap && op->src.reg == CFI_BP && 3148 op->src.offset == cfi->drap_offset) { 3149 3150 /* drap: mov disp(%rbp), %drap */ 3151 cfa->base = cfi->drap_reg; 3152 cfa->offset = 0; 3153 cfi->drap_offset = -1; 3154 } 3155 3156 if (cfi->drap && op->src.reg == CFI_BP && 3157 op->src.offset == regs[op->dest.reg].offset) { 3158 3159 /* drap: mov disp(%rbp), %reg */ 3160 restore_reg(cfi, op->dest.reg); 3161 3162 } else if (op->src.reg == cfa->base && 3163 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 3164 3165 /* mov disp(%rbp), %reg */ 3166 /* mov disp(%rsp), %reg */ 3167 restore_reg(cfi, op->dest.reg); 3168 3169 } else if (op->src.reg == CFI_SP && 3170 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) { 3171 3172 /* mov disp(%rsp), %reg */ 3173 restore_reg(cfi, op->dest.reg); 3174 } 3175 3176 break; 3177 3178 default: 3179 WARN_INSN(insn, "unknown stack-related instruction"); 3180 return -1; 3181 } 3182 3183 break; 3184 3185 case OP_DEST_PUSH: 3186 case OP_DEST_PUSHF: 3187 cfi->stack_size += 8; 3188 if (cfa->base == CFI_SP) 3189 cfa->offset += 8; 3190 3191 if (op->src.type != OP_SRC_REG) 3192 break; 3193 3194 if (cfi->drap) { 3195 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3196 3197 /* drap: push %drap */ 3198 cfa->base = CFI_BP_INDIRECT; 3199 cfa->offset = -cfi->stack_size; 3200 3201 /* save drap so we know when to restore it */ 3202 cfi->drap_offset = -cfi->stack_size; 3203 3204 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 3205 3206 /* drap: push %rbp */ 3207 cfi->stack_size = 0; 3208 3209 } else { 3210 3211 /* drap: push %reg */ 3212 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 3213 } 3214 3215 } else { 3216 3217 /* push %reg */ 3218 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 3219 } 3220 3221 /* detect when asm code uses rbp as a scratch register */ 3222 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP && 3223 cfa->base != CFI_BP) 3224 cfi->bp_scratch = true; 3225 break; 3226 3227 case OP_DEST_REG_INDIRECT: 3228 3229 if (cfi->drap) { 3230 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3231 3232 /* drap: mov %drap, disp(%rbp) */ 3233 cfa->base = CFI_BP_INDIRECT; 3234 cfa->offset = op->dest.offset; 3235 3236 /* save drap offset so we know when to restore it */ 3237 cfi->drap_offset = op->dest.offset; 3238 } else { 3239 3240 /* drap: mov reg, disp(%rbp) */ 3241 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 3242 } 3243 3244 } else if (op->dest.reg == cfa->base) { 3245 3246 /* mov reg, disp(%rbp) */ 3247 /* mov reg, disp(%rsp) */ 3248 save_reg(cfi, op->src.reg, CFI_CFA, 3249 op->dest.offset - cfi->cfa.offset); 3250 3251 } else if (op->dest.reg == CFI_SP) { 3252 3253 /* mov reg, disp(%rsp) */ 3254 save_reg(cfi, op->src.reg, CFI_CFA, 3255 op->dest.offset - cfi->stack_size); 3256 3257 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) { 3258 3259 /* mov %rsp, (%reg); # setup a stack swizzle. */ 3260 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT; 3261 cfi->vals[op->dest.reg].offset = cfa->offset; 3262 } 3263 3264 break; 3265 3266 case OP_DEST_MEM: 3267 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 3268 WARN_INSN(insn, "unknown stack-related memory operation"); 3269 return -1; 3270 } 3271 3272 /* pop mem */ 3273 cfi->stack_size -= 8; 3274 if (cfa->base == CFI_SP) 3275 cfa->offset -= 8; 3276 3277 break; 3278 3279 default: 3280 WARN_INSN(insn, "unknown stack-related instruction"); 3281 return -1; 3282 } 3283 3284 return 0; 3285 } 3286 3287 /* 3288 * The stack layouts of alternatives instructions can sometimes diverge when 3289 * they have stack modifications. That's fine as long as the potential stack 3290 * layouts don't conflict at any given potential instruction boundary. 3291 * 3292 * Flatten the CFIs of the different alternative code streams (both original 3293 * and replacement) into a single shared CFI array which can be used to detect 3294 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 3295 */ 3296 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 3297 { 3298 struct cfi_state **alt_cfi; 3299 int group_off; 3300 3301 if (!insn->alt_group) 3302 return 0; 3303 3304 if (!insn->cfi) { 3305 WARN("CFI missing"); 3306 return -1; 3307 } 3308 3309 alt_cfi = insn->alt_group->cfi; 3310 group_off = insn->offset - insn->alt_group->first_insn->offset; 3311 3312 if (!alt_cfi[group_off]) { 3313 alt_cfi[group_off] = insn->cfi; 3314 } else { 3315 if (cficmp(alt_cfi[group_off], insn->cfi)) { 3316 struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group; 3317 struct instruction *orig = orig_group->first_insn; 3318 WARN_INSN(orig, "stack layout conflict in alternatives: %s", 3319 offstr(insn->sec, insn->offset)); 3320 return -1; 3321 } 3322 } 3323 3324 return 0; 3325 } 3326 3327 static int noinline handle_insn_ops(struct instruction *insn, 3328 struct instruction *next_insn, 3329 struct insn_state *state) 3330 { 3331 struct insn_state prev_state __maybe_unused = *state; 3332 struct stack_op *op; 3333 int ret = 0; 3334 3335 for (op = insn->stack_ops; op; op = op->next) { 3336 3337 ret = update_cfi_state(insn, next_insn, &state->cfi, op); 3338 if (ret) 3339 goto done; 3340 3341 if (!opts.uaccess || !insn->alt_group) 3342 continue; 3343 3344 if (op->dest.type == OP_DEST_PUSHF) { 3345 if (!state->uaccess_stack) { 3346 state->uaccess_stack = 1; 3347 } else if (state->uaccess_stack >> 31) { 3348 WARN_INSN(insn, "PUSHF stack exhausted"); 3349 ret = 1; 3350 goto done; 3351 } 3352 state->uaccess_stack <<= 1; 3353 state->uaccess_stack |= state->uaccess; 3354 } 3355 3356 if (op->src.type == OP_SRC_POPF) { 3357 if (state->uaccess_stack) { 3358 state->uaccess = state->uaccess_stack & 1; 3359 state->uaccess_stack >>= 1; 3360 if (state->uaccess_stack == 1) 3361 state->uaccess_stack = 0; 3362 } 3363 } 3364 } 3365 3366 done: 3367 TRACE_INSN_STATE(insn, &prev_state, state); 3368 3369 return ret; 3370 } 3371 3372 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 3373 { 3374 struct cfi_state *cfi1 = insn->cfi; 3375 int i; 3376 3377 if (!cfi1) { 3378 WARN("CFI missing"); 3379 return false; 3380 } 3381 3382 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 3383 3384 WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 3385 cfi1->cfa.base, cfi1->cfa.offset, 3386 cfi2->cfa.base, cfi2->cfa.offset); 3387 return false; 3388 3389 } 3390 3391 if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 3392 for (i = 0; i < CFI_NUM_REGS; i++) { 3393 3394 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg))) 3395 continue; 3396 3397 WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 3398 i, cfi1->regs[i].base, cfi1->regs[i].offset, 3399 i, cfi2->regs[i].base, cfi2->regs[i].offset); 3400 } 3401 return false; 3402 } 3403 3404 if (cfi1->type != cfi2->type) { 3405 3406 WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d", 3407 cfi1->type, cfi2->type); 3408 return false; 3409 } 3410 3411 if (cfi1->drap != cfi2->drap || 3412 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 3413 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 3414 3415 WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 3416 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 3417 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 3418 return false; 3419 } 3420 3421 return true; 3422 } 3423 3424 static inline bool func_uaccess_safe(struct symbol *func) 3425 { 3426 if (func) 3427 return func->uaccess_safe; 3428 3429 return false; 3430 } 3431 3432 static inline const char *call_dest_name(struct instruction *insn) 3433 { 3434 static char pvname[19]; 3435 struct reloc *reloc; 3436 int idx; 3437 3438 if (insn_call_dest(insn)) 3439 return insn_call_dest(insn)->name; 3440 3441 reloc = insn_reloc(NULL, insn); 3442 if (reloc && !strcmp(reloc->sym->name, "pv_ops")) { 3443 idx = (reloc_addend(reloc) / sizeof(void *)); 3444 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); 3445 return pvname; 3446 } 3447 3448 return "{dynamic}"; 3449 } 3450 3451 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn) 3452 { 3453 struct symbol *target; 3454 struct reloc *reloc; 3455 int idx; 3456 3457 reloc = insn_reloc(file, insn); 3458 if (!reloc || strcmp(reloc->sym->name, "pv_ops")) 3459 return false; 3460 3461 idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *); 3462 3463 if (file->pv_ops[idx].clean) 3464 return true; 3465 3466 file->pv_ops[idx].clean = true; 3467 3468 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) { 3469 if (!target->sec->noinstr) { 3470 WARN("pv_ops[%d]: %s", idx, target->name); 3471 file->pv_ops[idx].clean = false; 3472 } 3473 } 3474 3475 return file->pv_ops[idx].clean; 3476 } 3477 3478 static inline bool noinstr_call_dest(struct objtool_file *file, 3479 struct instruction *insn, 3480 struct symbol *func) 3481 { 3482 /* 3483 * We can't deal with indirect function calls at present; 3484 * assume they're instrumented. 3485 */ 3486 if (!func) { 3487 if (file->pv_ops) 3488 return pv_call_dest(file, insn); 3489 3490 return false; 3491 } 3492 3493 /* 3494 * If the symbol is from a noinstr section; we good. 3495 */ 3496 if (func->sec->noinstr) 3497 return true; 3498 3499 /* 3500 * If the symbol is a static_call trampoline, we can't tell. 3501 */ 3502 if (func->static_call_tramp) 3503 return true; 3504 3505 /* 3506 * The __ubsan_handle_*() calls are like WARN(), they only happen when 3507 * something 'BAD' happened. At the risk of taking the machine down, 3508 * let them proceed to get the message out. 3509 */ 3510 if (!strncmp(func->name, "__ubsan_handle_", 15)) 3511 return true; 3512 3513 return false; 3514 } 3515 3516 static int validate_call(struct objtool_file *file, 3517 struct instruction *insn, 3518 struct insn_state *state) 3519 { 3520 if (state->noinstr && state->instr <= 0 && 3521 !noinstr_call_dest(file, insn, insn_call_dest(insn))) { 3522 WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn)); 3523 return 1; 3524 } 3525 3526 if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) { 3527 WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn)); 3528 return 1; 3529 } 3530 3531 if (state->df) { 3532 WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn)); 3533 return 1; 3534 } 3535 3536 return 0; 3537 } 3538 3539 static int validate_sibling_call(struct objtool_file *file, 3540 struct instruction *insn, 3541 struct insn_state *state) 3542 { 3543 if (insn_func(insn) && has_modified_stack_frame(insn, state)) { 3544 WARN_INSN(insn, "sibling call from callable instruction with modified stack frame"); 3545 return 1; 3546 } 3547 3548 return validate_call(file, insn, state); 3549 } 3550 3551 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 3552 { 3553 if (state->noinstr && state->instr > 0) { 3554 WARN_INSN(insn, "return with instrumentation enabled"); 3555 return 1; 3556 } 3557 3558 if (state->uaccess && !func_uaccess_safe(func)) { 3559 WARN_INSN(insn, "return with UACCESS enabled"); 3560 return 1; 3561 } 3562 3563 if (!state->uaccess && func_uaccess_safe(func)) { 3564 WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function"); 3565 return 1; 3566 } 3567 3568 if (state->df) { 3569 WARN_INSN(insn, "return with DF set"); 3570 return 1; 3571 } 3572 3573 if (func && has_modified_stack_frame(insn, state)) { 3574 WARN_INSN(insn, "return with modified stack frame"); 3575 return 1; 3576 } 3577 3578 if (state->cfi.bp_scratch) { 3579 WARN_INSN(insn, "BP used as a scratch register"); 3580 return 1; 3581 } 3582 3583 return 0; 3584 } 3585 3586 static struct instruction *next_insn_to_validate(struct objtool_file *file, 3587 struct instruction *insn) 3588 { 3589 struct alt_group *alt_group = insn->alt_group; 3590 3591 /* 3592 * Simulate the fact that alternatives are patched in-place. When the 3593 * end of a replacement alt_group is reached, redirect objtool flow to 3594 * the end of the original alt_group. 3595 * 3596 * insn->alts->insn -> alt_group->first_insn 3597 * ... 3598 * alt_group->last_insn 3599 * [alt_group->nop] -> next(orig_group->last_insn) 3600 */ 3601 if (alt_group) { 3602 if (alt_group->nop) { 3603 /* ->nop implies ->orig_group */ 3604 if (insn == alt_group->last_insn) 3605 return alt_group->nop; 3606 if (insn == alt_group->nop) 3607 goto next_orig; 3608 } 3609 if (insn == alt_group->last_insn && alt_group->orig_group) 3610 goto next_orig; 3611 } 3612 3613 return next_insn_same_sec(file, insn); 3614 3615 next_orig: 3616 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 3617 } 3618 3619 static bool skip_alt_group(struct instruction *insn) 3620 { 3621 struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL; 3622 3623 if (!insn->alt_group) 3624 return false; 3625 3626 /* ANNOTATE_IGNORE_ALTERNATIVE */ 3627 if (insn->alt_group->ignore) { 3628 TRACE_ALT(insn, "alt group ignored"); 3629 return true; 3630 } 3631 3632 /* 3633 * For NOP patched with CLAC/STAC, only follow the latter to avoid 3634 * impossible code paths combining patched CLAC with unpatched STAC 3635 * or vice versa. 3636 * 3637 * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus 3638 * requested not to do that to avoid hurting .s file readability 3639 * around CLAC/STAC alternative sites. 3640 */ 3641 3642 if (!alt_insn) 3643 return false; 3644 3645 /* Don't override ASM_{CLAC,STAC}_UNSAFE */ 3646 if (alt_insn->alt_group && alt_insn->alt_group->ignore) 3647 return false; 3648 3649 return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC; 3650 } 3651 3652 static int checksum_debug_init(struct objtool_file *file) 3653 { 3654 char *dup, *s; 3655 3656 if (!opts.debug_checksum) 3657 return 0; 3658 3659 dup = strdup(opts.debug_checksum); 3660 if (!dup) { 3661 ERROR_GLIBC("strdup"); 3662 return -1; 3663 } 3664 3665 s = dup; 3666 while (*s) { 3667 struct symbol *func; 3668 char *comma; 3669 3670 comma = strchr(s, ','); 3671 if (comma) 3672 *comma = '\0'; 3673 3674 func = find_symbol_by_name(file->elf, s); 3675 if (!func || !is_func_sym(func)) 3676 WARN("--debug-checksum: can't find '%s'", s); 3677 else 3678 func->debug_checksum = 1; 3679 3680 if (!comma) 3681 break; 3682 3683 s = comma + 1; 3684 } 3685 3686 free(dup); 3687 return 0; 3688 } 3689 3690 static void checksum_update_insn(struct objtool_file *file, struct symbol *func, 3691 struct instruction *insn) 3692 { 3693 struct reloc *reloc = insn_reloc(file, insn); 3694 unsigned long offset; 3695 struct symbol *sym; 3696 3697 if (insn->fake) 3698 return; 3699 3700 checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len); 3701 3702 if (!reloc) { 3703 struct symbol *call_dest = insn_call_dest(insn); 3704 3705 if (call_dest) 3706 checksum_update(func, insn, call_dest->demangled_name, 3707 strlen(call_dest->demangled_name)); 3708 return; 3709 } 3710 3711 sym = reloc->sym; 3712 offset = arch_insn_adjusted_addend(insn, reloc); 3713 3714 if (is_string_sec(sym->sec)) { 3715 char *str; 3716 3717 str = sym->sec->data->d_buf + sym->offset + offset; 3718 checksum_update(func, insn, str, strlen(str)); 3719 return; 3720 } 3721 3722 if (is_sec_sym(sym)) { 3723 sym = find_symbol_containing(reloc->sym->sec, offset); 3724 if (!sym) 3725 return; 3726 3727 offset -= sym->offset; 3728 } 3729 3730 checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name)); 3731 checksum_update(func, insn, &offset, sizeof(offset)); 3732 } 3733 3734 static int validate_branch(struct objtool_file *file, struct symbol *func, 3735 struct instruction *insn, struct insn_state state); 3736 static int do_validate_branch(struct objtool_file *file, struct symbol *func, 3737 struct instruction *insn, struct insn_state state); 3738 3739 static int validate_insn(struct objtool_file *file, struct symbol *func, 3740 struct instruction *insn, struct insn_state *statep, 3741 struct instruction *prev_insn, struct instruction *next_insn, 3742 bool *dead_end) 3743 { 3744 char *alt_name __maybe_unused = NULL; 3745 struct alternative *alt; 3746 u8 visited; 3747 int ret; 3748 3749 /* 3750 * Any returns before the end of this function are effectively dead 3751 * ends, i.e. validate_branch() has reached the end of the branch. 3752 */ 3753 *dead_end = true; 3754 3755 visited = VISITED_BRANCH << statep->uaccess; 3756 if (insn->visited & VISITED_BRANCH_MASK) { 3757 if (!insn->hint && !insn_cfi_match(insn, &statep->cfi)) 3758 return 1; 3759 3760 if (insn->visited & visited) { 3761 TRACE_INSN(insn, "already visited"); 3762 return 0; 3763 } 3764 } else { 3765 nr_insns_visited++; 3766 } 3767 3768 if (statep->noinstr) 3769 statep->instr += insn->instr; 3770 3771 if (insn->hint) { 3772 if (insn->restore) { 3773 struct instruction *save_insn, *i; 3774 3775 i = insn; 3776 save_insn = NULL; 3777 3778 sym_for_each_insn_continue_reverse(file, func, i) { 3779 if (i->save) { 3780 save_insn = i; 3781 break; 3782 } 3783 } 3784 3785 if (!save_insn) { 3786 WARN_INSN(insn, "no corresponding CFI save for CFI restore"); 3787 return 1; 3788 } 3789 3790 if (!save_insn->visited) { 3791 /* 3792 * If the restore hint insn is at the 3793 * beginning of a basic block and was 3794 * branched to from elsewhere, and the 3795 * save insn hasn't been visited yet, 3796 * defer following this branch for now. 3797 * It will be seen later via the 3798 * straight-line path. 3799 */ 3800 if (!prev_insn) { 3801 TRACE_INSN(insn, "defer restore"); 3802 return 0; 3803 } 3804 3805 WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo"); 3806 return 1; 3807 } 3808 3809 insn->cfi = save_insn->cfi; 3810 nr_cfi_reused++; 3811 } 3812 3813 statep->cfi = *insn->cfi; 3814 } else { 3815 /* XXX track if we actually changed statep->cfi */ 3816 3817 if (prev_insn && !cficmp(prev_insn->cfi, &statep->cfi)) { 3818 insn->cfi = prev_insn->cfi; 3819 nr_cfi_reused++; 3820 } else { 3821 insn->cfi = cfi_hash_find_or_add(&statep->cfi); 3822 } 3823 } 3824 3825 insn->visited |= visited; 3826 3827 if (propagate_alt_cfi(file, insn)) 3828 return 1; 3829 3830 if (insn->alts) { 3831 for (alt = insn->alts; alt; alt = alt->next) { 3832 TRACE_ALT_BEGIN(insn, alt, alt_name); 3833 ret = validate_branch(file, func, alt->insn, *statep); 3834 TRACE_ALT_END(insn, alt, alt_name); 3835 if (ret) { 3836 BT_INSN(insn, "(alt)"); 3837 return ret; 3838 } 3839 } 3840 TRACE_ALT_INFO_NOADDR(insn, "/ ", "DEFAULT"); 3841 } 3842 3843 if (skip_alt_group(insn)) 3844 return 0; 3845 3846 if (handle_insn_ops(insn, next_insn, statep)) 3847 return 1; 3848 3849 switch (insn->type) { 3850 3851 case INSN_RETURN: 3852 TRACE_INSN(insn, "return"); 3853 return validate_return(func, insn, statep); 3854 3855 case INSN_CALL: 3856 case INSN_CALL_DYNAMIC: 3857 if (insn->type == INSN_CALL) 3858 TRACE_INSN(insn, "call"); 3859 else 3860 TRACE_INSN(insn, "indirect call"); 3861 3862 ret = validate_call(file, insn, statep); 3863 if (ret) 3864 return ret; 3865 3866 if (opts.stackval && func && !is_special_call(insn) && 3867 !has_valid_stack_frame(statep)) { 3868 WARN_INSN(insn, "call without frame pointer save/setup"); 3869 return 1; 3870 } 3871 3872 break; 3873 3874 case INSN_JUMP_CONDITIONAL: 3875 case INSN_JUMP_UNCONDITIONAL: 3876 if (is_sibling_call(insn)) { 3877 TRACE_INSN(insn, "sibling call"); 3878 ret = validate_sibling_call(file, insn, statep); 3879 if (ret) 3880 return ret; 3881 3882 } else if (insn->jump_dest) { 3883 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3884 TRACE_INSN(insn, "unconditional jump"); 3885 else 3886 TRACE_INSN(insn, "jump taken"); 3887 3888 ret = validate_branch(file, func, insn->jump_dest, *statep); 3889 if (ret) { 3890 BT_INSN(insn, "(branch)"); 3891 return ret; 3892 } 3893 } 3894 3895 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3896 return 0; 3897 3898 TRACE_INSN(insn, "jump not taken"); 3899 break; 3900 3901 case INSN_JUMP_DYNAMIC: 3902 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3903 TRACE_INSN(insn, "indirect jump"); 3904 if (is_sibling_call(insn)) { 3905 ret = validate_sibling_call(file, insn, statep); 3906 if (ret) 3907 return ret; 3908 } 3909 3910 if (insn->type == INSN_JUMP_DYNAMIC) 3911 return 0; 3912 3913 break; 3914 3915 case INSN_SYSCALL: 3916 TRACE_INSN(insn, "syscall"); 3917 if (func && (!next_insn || !next_insn->hint)) { 3918 WARN_INSN(insn, "unsupported instruction in callable function"); 3919 return 1; 3920 } 3921 3922 break; 3923 3924 case INSN_SYSRET: 3925 TRACE_INSN(insn, "sysret"); 3926 if (func && (!next_insn || !next_insn->hint)) { 3927 WARN_INSN(insn, "unsupported instruction in callable function"); 3928 return 1; 3929 } 3930 3931 return 0; 3932 3933 case INSN_STAC: 3934 TRACE_INSN(insn, "stac"); 3935 if (!opts.uaccess) 3936 break; 3937 3938 if (statep->uaccess) { 3939 WARN_INSN(insn, "recursive UACCESS enable"); 3940 return 1; 3941 } 3942 3943 statep->uaccess = true; 3944 break; 3945 3946 case INSN_CLAC: 3947 TRACE_INSN(insn, "clac"); 3948 if (!opts.uaccess) 3949 break; 3950 3951 if (!statep->uaccess && func) { 3952 WARN_INSN(insn, "redundant UACCESS disable"); 3953 return 1; 3954 } 3955 3956 if (func_uaccess_safe(func) && !statep->uaccess_stack) { 3957 WARN_INSN(insn, "UACCESS-safe disables UACCESS"); 3958 return 1; 3959 } 3960 3961 statep->uaccess = false; 3962 break; 3963 3964 case INSN_STD: 3965 TRACE_INSN(insn, "std"); 3966 if (statep->df) { 3967 WARN_INSN(insn, "recursive STD"); 3968 return 1; 3969 } 3970 3971 statep->df = true; 3972 break; 3973 3974 case INSN_CLD: 3975 TRACE_INSN(insn, "cld"); 3976 if (!statep->df && func) { 3977 WARN_INSN(insn, "redundant CLD"); 3978 return 1; 3979 } 3980 3981 statep->df = false; 3982 break; 3983 3984 default: 3985 break; 3986 } 3987 3988 if (insn->dead_end) 3989 TRACE_INSN(insn, "dead end"); 3990 3991 *dead_end = insn->dead_end; 3992 return 0; 3993 } 3994 3995 /* 3996 * Follow the branch starting at the given instruction, and recursively follow 3997 * any other branches (jumps). Meanwhile, track the frame pointer state at 3998 * each instruction and validate all the rules described in 3999 * tools/objtool/Documentation/objtool.txt. 4000 */ 4001 static int do_validate_branch(struct objtool_file *file, struct symbol *func, 4002 struct instruction *insn, struct insn_state state) 4003 { 4004 struct instruction *next_insn, *prev_insn = NULL; 4005 bool dead_end; 4006 int ret; 4007 4008 if (func && func->ignore) 4009 return 0; 4010 4011 do { 4012 insn->trace = 0; 4013 next_insn = next_insn_to_validate(file, insn); 4014 4015 if (opts.checksum && func && insn->sec) 4016 checksum_update_insn(file, func, insn); 4017 4018 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) { 4019 /* Ignore KCFI type preambles, which always fall through */ 4020 if (is_prefix_func(func)) 4021 return 0; 4022 4023 if (file->ignore_unreachables) 4024 return 0; 4025 4026 WARN("%s() falls through to next function %s()", 4027 func->name, insn_func(insn)->name); 4028 func->warned = 1; 4029 4030 return 1; 4031 } 4032 4033 ret = validate_insn(file, func, insn, &state, prev_insn, next_insn, 4034 &dead_end); 4035 4036 if (!insn->trace) { 4037 if (ret) 4038 TRACE_INSN(insn, "warning (%d)", ret); 4039 else 4040 TRACE_INSN(insn, NULL); 4041 } 4042 4043 if (!dead_end && !next_insn) { 4044 if (state.cfi.cfa.base == CFI_UNDEFINED) 4045 return 0; 4046 if (file->ignore_unreachables) 4047 return 0; 4048 4049 WARN("%s%sunexpected end of section %s", 4050 func ? func->name : "", func ? "(): " : "", 4051 insn->sec->name); 4052 return 1; 4053 } 4054 4055 prev_insn = insn; 4056 insn = next_insn; 4057 4058 } while (!dead_end); 4059 4060 return ret; 4061 } 4062 4063 static int validate_branch(struct objtool_file *file, struct symbol *func, 4064 struct instruction *insn, struct insn_state state) 4065 { 4066 int ret; 4067 4068 trace_depth_inc(); 4069 ret = do_validate_branch(file, func, insn, state); 4070 trace_depth_dec(); 4071 4072 return ret; 4073 } 4074 4075 static int validate_unwind_hint(struct objtool_file *file, 4076 struct instruction *insn, 4077 struct insn_state *state) 4078 { 4079 if (insn->hint && !insn->visited) { 4080 struct symbol *func = insn_func(insn); 4081 int ret; 4082 4083 if (opts.checksum) 4084 checksum_init(func); 4085 4086 ret = validate_branch(file, func, insn, *state); 4087 if (ret) 4088 BT_INSN(insn, "<=== (hint)"); 4089 return ret; 4090 } 4091 4092 return 0; 4093 } 4094 4095 static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 4096 { 4097 struct instruction *insn; 4098 struct insn_state state; 4099 int warnings = 0; 4100 4101 if (!file->hints) 4102 return 0; 4103 4104 init_insn_state(file, &state, sec); 4105 4106 if (sec) { 4107 sec_for_each_insn(file, sec, insn) 4108 warnings += validate_unwind_hint(file, insn, &state); 4109 } else { 4110 for_each_insn(file, insn) 4111 warnings += validate_unwind_hint(file, insn, &state); 4112 } 4113 4114 return warnings; 4115 } 4116 4117 /* 4118 * Validate rethunk entry constraint: must untrain RET before the first RET. 4119 * 4120 * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes 4121 * before an actual RET instruction. 4122 */ 4123 static int validate_unret(struct objtool_file *file, struct instruction *insn) 4124 { 4125 struct instruction *next, *dest; 4126 int ret; 4127 4128 for (;;) { 4129 next = next_insn_to_validate(file, insn); 4130 4131 if (insn->visited & VISITED_UNRET) 4132 return 0; 4133 4134 insn->visited |= VISITED_UNRET; 4135 4136 if (insn->alts) { 4137 struct alternative *alt; 4138 for (alt = insn->alts; alt; alt = alt->next) { 4139 ret = validate_unret(file, alt->insn); 4140 if (ret) { 4141 BT_INSN(insn, "(alt)"); 4142 return ret; 4143 } 4144 } 4145 } 4146 4147 switch (insn->type) { 4148 4149 case INSN_CALL_DYNAMIC: 4150 case INSN_JUMP_DYNAMIC: 4151 case INSN_JUMP_DYNAMIC_CONDITIONAL: 4152 WARN_INSN(insn, "early indirect call"); 4153 return 1; 4154 4155 case INSN_JUMP_UNCONDITIONAL: 4156 case INSN_JUMP_CONDITIONAL: 4157 if (!is_sibling_call(insn)) { 4158 if (!insn->jump_dest) { 4159 WARN_INSN(insn, "unresolved jump target after linking?!?"); 4160 return 1; 4161 } 4162 ret = validate_unret(file, insn->jump_dest); 4163 if (ret) { 4164 BT_INSN(insn, "(branch%s)", 4165 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : ""); 4166 return ret; 4167 } 4168 4169 if (insn->type == INSN_JUMP_UNCONDITIONAL) 4170 return 0; 4171 4172 break; 4173 } 4174 4175 /* fallthrough */ 4176 case INSN_CALL: 4177 dest = find_insn(file, insn_call_dest(insn)->sec, 4178 insn_call_dest(insn)->offset); 4179 if (!dest) { 4180 WARN("Unresolved function after linking!?: %s", 4181 insn_call_dest(insn)->name); 4182 return 1; 4183 } 4184 4185 ret = validate_unret(file, dest); 4186 if (ret) { 4187 BT_INSN(insn, "(call)"); 4188 return ret; 4189 } 4190 /* 4191 * If a call returns without error, it must have seen UNTRAIN_RET. 4192 * Therefore any non-error return is a success. 4193 */ 4194 return 0; 4195 4196 case INSN_RETURN: 4197 WARN_INSN(insn, "RET before UNTRAIN"); 4198 return 1; 4199 4200 case INSN_SYSCALL: 4201 break; 4202 4203 case INSN_SYSRET: 4204 return 0; 4205 4206 case INSN_NOP: 4207 if (insn->retpoline_safe) 4208 return 0; 4209 break; 4210 4211 default: 4212 break; 4213 } 4214 4215 if (insn->dead_end) 4216 return 0; 4217 4218 if (!next) { 4219 WARN_INSN(insn, "teh end!"); 4220 return 1; 4221 } 4222 insn = next; 4223 } 4224 4225 return 0; 4226 } 4227 4228 /* 4229 * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter 4230 * VALIDATE_UNRET_END before RET. 4231 */ 4232 static int validate_unrets(struct objtool_file *file) 4233 { 4234 struct instruction *insn; 4235 int warnings = 0; 4236 4237 for_each_insn(file, insn) { 4238 if (!insn->unret) 4239 continue; 4240 4241 warnings += validate_unret(file, insn); 4242 } 4243 4244 return warnings; 4245 } 4246 4247 static int validate_retpoline(struct objtool_file *file) 4248 { 4249 struct instruction *insn; 4250 int warnings = 0; 4251 4252 for_each_insn(file, insn) { 4253 if (insn->type != INSN_JUMP_DYNAMIC && 4254 insn->type != INSN_CALL_DYNAMIC && 4255 insn->type != INSN_RETURN) 4256 continue; 4257 4258 if (insn->retpoline_safe) 4259 continue; 4260 4261 if (insn->sec->init) 4262 continue; 4263 4264 if (insn->type == INSN_RETURN) { 4265 if (opts.rethunk) { 4266 WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build"); 4267 warnings++; 4268 } 4269 continue; 4270 } 4271 4272 WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build", 4273 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 4274 warnings++; 4275 } 4276 4277 if (!opts.cfi) 4278 return warnings; 4279 4280 /* 4281 * kCFI call sites look like: 4282 * 4283 * movl $(-0x12345678), %r10d 4284 * addl -4(%r11), %r10d 4285 * jz 1f 4286 * ud2 4287 * 1: cs call __x86_indirect_thunk_r11 4288 * 4289 * Verify all indirect calls are kCFI adorned by checking for the 4290 * UD2. Notably, doing __nocfi calls to regular (cfi) functions is 4291 * broken. 4292 */ 4293 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 4294 struct symbol *sym = insn->sym; 4295 4296 if (sym && (sym->type == STT_NOTYPE || 4297 sym->type == STT_FUNC) && !sym->nocfi) { 4298 struct instruction *prev = 4299 prev_insn_same_sym(file, insn); 4300 4301 if (!prev || prev->type != INSN_BUG) { 4302 WARN_INSN(insn, "no-cfi indirect call!"); 4303 warnings++; 4304 } 4305 } 4306 } 4307 4308 return warnings; 4309 } 4310 4311 static bool is_kasan_insn(struct instruction *insn) 4312 { 4313 return (insn->type == INSN_CALL && 4314 !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return")); 4315 } 4316 4317 static bool is_ubsan_insn(struct instruction *insn) 4318 { 4319 return (insn->type == INSN_CALL && 4320 !strcmp(insn_call_dest(insn)->name, 4321 "__ubsan_handle_builtin_unreachable")); 4322 } 4323 4324 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 4325 { 4326 struct symbol *func = insn_func(insn); 4327 struct instruction *prev_insn; 4328 int i; 4329 4330 if (insn->type == INSN_NOP || insn->type == INSN_TRAP || 4331 insn->hole || (func && func->ignore)) 4332 return true; 4333 4334 /* 4335 * Ignore alternative replacement instructions. This can happen 4336 * when a whitelisted function uses one of the ALTERNATIVE macros. 4337 */ 4338 if (!strcmp(insn->sec->name, ".altinstr_replacement") || 4339 !strcmp(insn->sec->name, ".altinstr_aux")) 4340 return true; 4341 4342 if (!func) 4343 return false; 4344 4345 if (func->static_call_tramp) 4346 return true; 4347 4348 /* 4349 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 4350 * __builtin_unreachable(). The BUG() macro has an unreachable() after 4351 * the UD2, which causes GCC's undefined trap logic to emit another UD2 4352 * (or occasionally a JMP to UD2). 4353 * 4354 * It may also insert a UD2 after calling a __noreturn function. 4355 */ 4356 prev_insn = prev_insn_same_sec(file, insn); 4357 if (prev_insn && prev_insn->dead_end && 4358 (insn->type == INSN_BUG || 4359 (insn->type == INSN_JUMP_UNCONDITIONAL && 4360 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 4361 return true; 4362 4363 /* 4364 * Check if this (or a subsequent) instruction is related to 4365 * CONFIG_UBSAN or CONFIG_KASAN. 4366 * 4367 * End the search at 5 instructions to avoid going into the weeds. 4368 */ 4369 for (i = 0; i < 5; i++) { 4370 4371 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 4372 return true; 4373 4374 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 4375 if (insn->jump_dest && 4376 insn_func(insn->jump_dest) == func) { 4377 insn = insn->jump_dest; 4378 continue; 4379 } 4380 4381 break; 4382 } 4383 4384 if (insn->offset + insn->len >= func->offset + func->len) 4385 break; 4386 4387 insn = next_insn_same_sec(file, insn); 4388 } 4389 4390 return false; 4391 } 4392 4393 /* 4394 * For FineIBT or kCFI, a certain number of bytes preceding the function may be 4395 * NOPs. Those NOPs may be rewritten at runtime and executed, so give them a 4396 * proper function name: __pfx_<func>. 4397 * 4398 * The NOPs may not exist for the following cases: 4399 * 4400 * - compiler cloned functions (*.cold, *.part0, etc) 4401 * - asm functions created with inline asm or without SYM_FUNC_START() 4402 * 4403 * Also, the function may already have a prefix from a previous objtool run 4404 * (livepatch extracted functions, or manually running objtool multiple times). 4405 * 4406 * So return 0 if the NOPs are missing or the function already has a prefix 4407 * symbol. 4408 */ 4409 static int create_prefix_symbol(struct objtool_file *file, struct symbol *func) 4410 { 4411 struct instruction *insn, *prev; 4412 char name[SYM_NAME_LEN]; 4413 struct cfi_state *cfi; 4414 4415 if (!is_func_sym(func) || is_prefix_func(func) || 4416 func->cold || func->static_call_tramp) 4417 return 0; 4418 4419 if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) { 4420 WARN("%s: symbol name too long, can't create __pfx_ symbol", 4421 func->name); 4422 return 0; 4423 } 4424 4425 if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name)) 4426 return -1; 4427 4428 if (file->klp) { 4429 struct symbol *pfx; 4430 4431 pfx = find_symbol_by_offset(func->sec, func->offset - opts.prefix); 4432 if (pfx && is_prefix_func(pfx) && !strcmp(pfx->name, name)) 4433 return 0; 4434 } 4435 4436 insn = find_insn(file, func->sec, func->offset); 4437 if (!insn) { 4438 WARN("%s: can't find starting instruction", func->name); 4439 return -1; 4440 } 4441 4442 for (prev = prev_insn_same_sec(file, insn); 4443 prev; 4444 prev = prev_insn_same_sec(file, prev)) { 4445 u64 offset; 4446 4447 if (prev->type != INSN_NOP) 4448 return 0; 4449 4450 offset = func->offset - prev->offset; 4451 4452 if (offset > opts.prefix) 4453 return 0; 4454 4455 if (offset < opts.prefix) 4456 continue; 4457 4458 if (!elf_create_symbol(file->elf, name, func->sec, 4459 GELF_ST_BIND(func->sym.st_info), 4460 GELF_ST_TYPE(func->sym.st_info), 4461 prev->offset, opts.prefix)) 4462 return -1; 4463 4464 break; 4465 } 4466 4467 if (!prev) 4468 return 0; 4469 4470 if (!insn->cfi) { 4471 /* 4472 * This can happen if stack validation isn't enabled or the 4473 * function is annotated with STACK_FRAME_NON_STANDARD. 4474 */ 4475 return 0; 4476 } 4477 4478 /* Propagate insn->cfi to the prefix code */ 4479 cfi = cfi_hash_find_or_add(insn->cfi); 4480 for (; prev != insn; prev = next_insn_same_sec(file, prev)) 4481 prev->cfi = cfi; 4482 4483 return 0; 4484 } 4485 4486 static int create_prefix_symbols(struct objtool_file *file) 4487 { 4488 struct section *sec; 4489 struct symbol *func; 4490 4491 for_each_sec(file->elf, sec) { 4492 if (!is_text_sec(sec)) 4493 continue; 4494 4495 sec_for_each_sym(sec, func) { 4496 if (create_prefix_symbol(file, func)) 4497 return -1; 4498 } 4499 } 4500 4501 return 0; 4502 } 4503 4504 static int validate_symbol(struct objtool_file *file, struct section *sec, 4505 struct symbol *sym, struct insn_state *state) 4506 { 4507 struct instruction *insn; 4508 struct symbol *func; 4509 int ret; 4510 4511 if (!sym->len) { 4512 WARN("%s() is missing an ELF size annotation", sym->name); 4513 return 1; 4514 } 4515 4516 if (sym->pfunc != sym || sym->alias != sym) 4517 return 0; 4518 4519 insn = find_insn(file, sec, sym->offset); 4520 if (!insn || insn->visited) 4521 return 0; 4522 4523 if (opts.uaccess) 4524 state->uaccess = sym->uaccess_safe; 4525 4526 func = insn_func(insn); 4527 4528 if (opts.checksum) 4529 checksum_init(func); 4530 4531 if (opts.trace && !fnmatch(opts.trace, sym->name, 0)) { 4532 trace_enable(); 4533 TRACE("%s: validation begin\n", sym->name); 4534 } 4535 4536 ret = validate_branch(file, func, insn, *state); 4537 if (ret) 4538 BT_INSN(insn, "<=== (sym)"); 4539 4540 TRACE("%s: validation %s\n\n", sym->name, ret ? "failed" : "end"); 4541 trace_disable(); 4542 4543 if (opts.checksum) 4544 checksum_finish(func); 4545 4546 return ret; 4547 } 4548 4549 static int validate_section(struct objtool_file *file, struct section *sec) 4550 { 4551 struct insn_state state; 4552 struct symbol *func; 4553 int warnings = 0; 4554 4555 sec_for_each_sym(sec, func) { 4556 if (!is_func_sym(func)) 4557 continue; 4558 4559 init_insn_state(file, &state, sec); 4560 set_func_state(&state.cfi); 4561 4562 warnings += validate_symbol(file, sec, func, &state); 4563 } 4564 4565 return warnings; 4566 } 4567 4568 static int validate_noinstr_sections(struct objtool_file *file) 4569 { 4570 struct section *sec; 4571 int warnings = 0; 4572 4573 sec = find_section_by_name(file->elf, ".noinstr.text"); 4574 if (sec) { 4575 warnings += validate_section(file, sec); 4576 warnings += validate_unwind_hints(file, sec); 4577 } 4578 4579 sec = find_section_by_name(file->elf, ".entry.text"); 4580 if (sec) { 4581 warnings += validate_section(file, sec); 4582 warnings += validate_unwind_hints(file, sec); 4583 } 4584 4585 sec = find_section_by_name(file->elf, ".cpuidle.text"); 4586 if (sec) { 4587 warnings += validate_section(file, sec); 4588 warnings += validate_unwind_hints(file, sec); 4589 } 4590 4591 return warnings; 4592 } 4593 4594 static int validate_functions(struct objtool_file *file) 4595 { 4596 struct section *sec; 4597 int warnings = 0; 4598 4599 for_each_sec(file->elf, sec) { 4600 if (!is_text_sec(sec)) 4601 continue; 4602 4603 warnings += validate_section(file, sec); 4604 } 4605 4606 return warnings; 4607 } 4608 4609 static void mark_endbr_used(struct instruction *insn) 4610 { 4611 if (!list_empty(&insn->call_node)) 4612 list_del_init(&insn->call_node); 4613 } 4614 4615 static bool noendbr_range(struct objtool_file *file, struct instruction *insn) 4616 { 4617 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1); 4618 struct instruction *first; 4619 4620 if (!sym) 4621 return false; 4622 4623 first = find_insn(file, sym->sec, sym->offset); 4624 if (!first) 4625 return false; 4626 4627 if (first->type != INSN_ENDBR && !first->noendbr) 4628 return false; 4629 4630 return insn->offset == sym->offset + sym->len; 4631 } 4632 4633 static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn, 4634 struct instruction *dest) 4635 { 4636 if (dest->type == INSN_ENDBR) { 4637 mark_endbr_used(dest); 4638 return 0; 4639 } 4640 4641 if (insn_func(dest) && insn_func(insn) && 4642 insn_func(dest)->pfunc == insn_func(insn)->pfunc) { 4643 /* 4644 * Anything from->to self is either _THIS_IP_ or 4645 * IRET-to-self. 4646 * 4647 * There is no sane way to annotate _THIS_IP_ since the 4648 * compiler treats the relocation as a constant and is 4649 * happy to fold in offsets, skewing any annotation we 4650 * do, leading to vast amounts of false-positives. 4651 * 4652 * There's also compiler generated _THIS_IP_ through 4653 * KCOV and such which we have no hope of annotating. 4654 * 4655 * As such, blanket accept self-references without 4656 * issue. 4657 */ 4658 return 0; 4659 } 4660 4661 /* 4662 * Accept anything ANNOTATE_NOENDBR. 4663 */ 4664 if (dest->noendbr) 4665 return 0; 4666 4667 /* 4668 * Accept if this is the instruction after a symbol 4669 * that is (no)endbr -- typical code-range usage. 4670 */ 4671 if (noendbr_range(file, dest)) 4672 return 0; 4673 4674 WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset)); 4675 return 1; 4676 } 4677 4678 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn) 4679 { 4680 struct instruction *dest; 4681 struct reloc *reloc; 4682 unsigned long off; 4683 int warnings = 0; 4684 4685 /* 4686 * Looking for function pointer load relocations. Ignore 4687 * direct/indirect branches: 4688 */ 4689 switch (insn->type) { 4690 4691 case INSN_CALL: 4692 case INSN_CALL_DYNAMIC: 4693 case INSN_JUMP_CONDITIONAL: 4694 case INSN_JUMP_UNCONDITIONAL: 4695 case INSN_JUMP_DYNAMIC: 4696 case INSN_JUMP_DYNAMIC_CONDITIONAL: 4697 case INSN_RETURN: 4698 case INSN_NOP: 4699 return 0; 4700 4701 case INSN_LEA_RIP: 4702 if (!insn_reloc(file, insn)) { 4703 /* local function pointer reference without reloc */ 4704 4705 off = arch_jump_destination(insn); 4706 4707 dest = find_insn(file, insn->sec, off); 4708 if (!dest) { 4709 WARN_INSN(insn, "corrupt function pointer reference"); 4710 return 1; 4711 } 4712 4713 return __validate_ibt_insn(file, insn, dest); 4714 } 4715 break; 4716 4717 default: 4718 break; 4719 } 4720 4721 for (reloc = insn_reloc(file, insn); 4722 reloc; 4723 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 4724 reloc_offset(reloc) + 1, 4725 (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) { 4726 4727 off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc); 4728 4729 dest = find_insn(file, reloc->sym->sec, off); 4730 if (!dest) 4731 continue; 4732 4733 warnings += __validate_ibt_insn(file, insn, dest); 4734 } 4735 4736 return warnings; 4737 } 4738 4739 static int validate_ibt_data_reloc(struct objtool_file *file, 4740 struct reloc *reloc) 4741 { 4742 struct instruction *dest; 4743 4744 dest = find_insn(file, reloc->sym->sec, 4745 reloc->sym->offset + reloc_addend(reloc)); 4746 if (!dest) 4747 return 0; 4748 4749 if (dest->type == INSN_ENDBR) { 4750 mark_endbr_used(dest); 4751 return 0; 4752 } 4753 4754 if (dest->noendbr) 4755 return 0; 4756 4757 WARN_FUNC(reloc->sec->base, reloc_offset(reloc), 4758 "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset)); 4759 4760 return 1; 4761 } 4762 4763 /* 4764 * Validate IBT rules and remove used ENDBR instructions from the seal list. 4765 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with 4766 * NOPs) later, in create_ibt_endbr_seal_sections(). 4767 */ 4768 static int validate_ibt(struct objtool_file *file) 4769 { 4770 struct section *sec; 4771 struct reloc *reloc; 4772 struct instruction *insn; 4773 int warnings = 0; 4774 4775 for_each_insn(file, insn) 4776 warnings += validate_ibt_insn(file, insn); 4777 4778 for_each_sec(file->elf, sec) { 4779 4780 /* Already done by validate_ibt_insn() */ 4781 if (is_text_sec(sec)) 4782 continue; 4783 4784 if (!sec->rsec) 4785 continue; 4786 4787 /* 4788 * These sections can reference text addresses, but not with 4789 * the intent to indirect branch to them. 4790 */ 4791 if ((!strncmp(sec->name, ".discard", 8) && 4792 strcmp(sec->name, ".discard.ibt_endbr_noseal")) || 4793 !strncmp(sec->name, ".debug", 6) || 4794 !strcmp(sec->name, ".altinstructions") || 4795 !strcmp(sec->name, ".ibt_endbr_seal") || 4796 !strcmp(sec->name, ".kcfi_traps") || 4797 !strcmp(sec->name, ".orc_unwind_ip") || 4798 !strcmp(sec->name, ".retpoline_sites") || 4799 !strcmp(sec->name, ".smp_locks") || 4800 !strcmp(sec->name, ".static_call_sites") || 4801 !strcmp(sec->name, "_error_injection_whitelist") || 4802 !strcmp(sec->name, "_kprobe_blacklist") || 4803 !strcmp(sec->name, "__bug_table") || 4804 !strcmp(sec->name, "__ex_table") || 4805 !strcmp(sec->name, "__jump_table") || 4806 !strcmp(sec->name, ".init.klp_funcs") || 4807 !strcmp(sec->name, "__mcount_loc") || 4808 !strcmp(sec->name, ".llvm.call-graph-profile") || 4809 !strcmp(sec->name, ".llvm_bb_addr_map") || 4810 !strcmp(sec->name, "__tracepoints") || 4811 !strcmp(sec->name, ".return_sites") || 4812 !strcmp(sec->name, ".call_sites") || 4813 !strcmp(sec->name, "__patchable_function_entries")) 4814 continue; 4815 4816 for_each_reloc(sec->rsec, reloc) 4817 warnings += validate_ibt_data_reloc(file, reloc); 4818 } 4819 4820 return warnings; 4821 } 4822 4823 static int validate_sls(struct objtool_file *file) 4824 { 4825 struct instruction *insn, *next_insn; 4826 int warnings = 0; 4827 4828 for_each_insn(file, insn) { 4829 next_insn = next_insn_same_sec(file, insn); 4830 4831 if (insn->retpoline_safe) 4832 continue; 4833 4834 switch (insn->type) { 4835 case INSN_RETURN: 4836 if (!next_insn || next_insn->type != INSN_TRAP) { 4837 WARN_INSN(insn, "missing int3 after ret"); 4838 warnings++; 4839 } 4840 4841 break; 4842 case INSN_JUMP_DYNAMIC: 4843 if (!next_insn || next_insn->type != INSN_TRAP) { 4844 WARN_INSN(insn, "missing int3 after indirect jump"); 4845 warnings++; 4846 } 4847 break; 4848 default: 4849 break; 4850 } 4851 } 4852 4853 return warnings; 4854 } 4855 4856 static int validate_reachable_instructions(struct objtool_file *file) 4857 { 4858 struct instruction *insn, *prev_insn; 4859 struct symbol *call_dest; 4860 int warnings = 0; 4861 4862 if (file->ignore_unreachables) 4863 return 0; 4864 4865 for_each_insn(file, insn) { 4866 if (insn->visited || ignore_unreachable_insn(file, insn)) 4867 continue; 4868 4869 prev_insn = prev_insn_same_sec(file, insn); 4870 if (prev_insn && prev_insn->dead_end) { 4871 call_dest = insn_call_dest(prev_insn); 4872 if (call_dest) { 4873 WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h", 4874 call_dest->name); 4875 warnings++; 4876 continue; 4877 } 4878 } 4879 4880 WARN_INSN(insn, "unreachable instruction"); 4881 warnings++; 4882 } 4883 4884 return warnings; 4885 } 4886 4887 __weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc) 4888 { 4889 unsigned int type = reloc_type(reloc); 4890 size_t sz = elf_addr_size(elf); 4891 4892 return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32); 4893 } 4894 4895 static int check_abs_references(struct objtool_file *file) 4896 { 4897 struct section *sec; 4898 struct reloc *reloc; 4899 int ret = 0; 4900 4901 for_each_sec(file->elf, sec) { 4902 /* absolute references in non-loadable sections are fine */ 4903 if (!(sec->sh.sh_flags & SHF_ALLOC)) 4904 continue; 4905 4906 /* section must have an associated .rela section */ 4907 if (!sec->rsec) 4908 continue; 4909 4910 /* 4911 * Special case for compiler generated metadata that is not 4912 * consumed until after boot. 4913 */ 4914 if (!strcmp(sec->name, "__patchable_function_entries")) 4915 continue; 4916 4917 for_each_reloc(sec->rsec, reloc) { 4918 if (arch_absolute_reloc(file->elf, reloc)) { 4919 WARN("section %s has absolute relocation at offset 0x%llx", 4920 sec->name, (unsigned long long)reloc_offset(reloc)); 4921 ret++; 4922 } 4923 } 4924 } 4925 return ret; 4926 } 4927 4928 struct insn_chunk { 4929 void *addr; 4930 struct insn_chunk *next; 4931 }; 4932 4933 /* 4934 * Reduce peak RSS usage by freeing insns memory before writing the ELF file, 4935 * which can trigger more allocations for .debug_* sections whose data hasn't 4936 * been read yet. 4937 */ 4938 static void free_insns(struct objtool_file *file) 4939 { 4940 struct instruction *insn; 4941 struct insn_chunk *chunks = NULL, *chunk; 4942 4943 for_each_insn(file, insn) { 4944 if (!insn->idx) { 4945 chunk = malloc(sizeof(*chunk)); 4946 chunk->addr = insn; 4947 chunk->next = chunks; 4948 chunks = chunk; 4949 } 4950 } 4951 4952 for (chunk = chunks; chunk; chunk = chunk->next) 4953 free(chunk->addr); 4954 } 4955 4956 const char *objtool_disas_insn(struct instruction *insn) 4957 { 4958 struct disas_context *dctx = objtool_disas_ctx; 4959 4960 if (!dctx) 4961 return ""; 4962 4963 disas_insn(dctx, insn); 4964 return disas_result(dctx); 4965 } 4966 4967 int check(struct objtool_file *file) 4968 { 4969 struct disas_context *disas_ctx = NULL; 4970 int ret = 0, warnings = 0; 4971 4972 /* 4973 * Create a disassembly context if we might disassemble any 4974 * instruction or function. 4975 */ 4976 if (opts.verbose || opts.backtrace || opts.trace || opts.disas) { 4977 disas_ctx = disas_context_create(file); 4978 if (!disas_ctx) { 4979 opts.disas = false; 4980 opts.trace = false; 4981 } 4982 objtool_disas_ctx = disas_ctx; 4983 } 4984 4985 arch_initial_func_cfi_state(&initial_func_cfi); 4986 init_cfi_state(&init_cfi); 4987 init_cfi_state(&func_cfi); 4988 set_func_state(&func_cfi); 4989 init_cfi_state(&force_undefined_cfi); 4990 force_undefined_cfi.force_undefined = true; 4991 4992 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) { 4993 ret = -1; 4994 goto out; 4995 } 4996 4997 cfi_hash_add(&init_cfi); 4998 cfi_hash_add(&func_cfi); 4999 5000 ret = checksum_debug_init(file); 5001 if (ret) 5002 goto out; 5003 5004 ret = decode_sections(file); 5005 if (ret) 5006 goto out; 5007 5008 if (!nr_insns) 5009 goto out; 5010 5011 if (opts.retpoline) 5012 warnings += validate_retpoline(file); 5013 5014 if (validate_branch_enabled()) { 5015 int w = 0; 5016 5017 w += validate_functions(file); 5018 w += validate_unwind_hints(file, NULL); 5019 if (!w) 5020 w += validate_reachable_instructions(file); 5021 5022 warnings += w; 5023 5024 } else if (opts.noinstr) { 5025 warnings += validate_noinstr_sections(file); 5026 } 5027 5028 if (opts.unret) { 5029 /* 5030 * Must be after validate_branch() and friends, it plays 5031 * further games with insn->visited. 5032 */ 5033 warnings += validate_unrets(file); 5034 } 5035 5036 if (opts.ibt) 5037 warnings += validate_ibt(file); 5038 5039 if (opts.sls) 5040 warnings += validate_sls(file); 5041 5042 if (opts.static_call) { 5043 ret = create_static_call_sections(file); 5044 if (ret) 5045 goto out; 5046 } 5047 5048 if (opts.retpoline) { 5049 ret = create_retpoline_sites_sections(file); 5050 if (ret) 5051 goto out; 5052 } 5053 5054 if (opts.cfi) { 5055 ret = create_cfi_sections(file); 5056 if (ret) 5057 goto out; 5058 } 5059 5060 if (opts.rethunk) { 5061 ret = create_return_sites_sections(file); 5062 if (ret) 5063 goto out; 5064 5065 if (opts.hack_skylake) { 5066 ret = create_direct_call_sections(file); 5067 if (ret) 5068 goto out; 5069 } 5070 } 5071 5072 if (opts.mcount) { 5073 ret = create_mcount_loc_sections(file); 5074 if (ret) 5075 goto out; 5076 } 5077 5078 if (opts.prefix) { 5079 ret = create_prefix_symbols(file); 5080 if (ret) 5081 goto out; 5082 } 5083 5084 if (opts.ibt) { 5085 ret = create_ibt_endbr_seal_sections(file); 5086 if (ret) 5087 goto out; 5088 } 5089 5090 if (opts.noabs) 5091 warnings += check_abs_references(file); 5092 5093 if (opts.checksum) { 5094 ret = create_sym_checksum_section(file); 5095 if (ret) 5096 goto out; 5097 } 5098 5099 if (opts.orc && nr_insns) { 5100 ret = orc_create(file); 5101 if (ret) 5102 goto out; 5103 } 5104 5105 if (opts.stats) { 5106 printf("nr_insns_visited: %ld\n", nr_insns_visited); 5107 printf("nr_cfi: %ld\n", nr_cfi); 5108 printf("nr_cfi_reused: %ld\n", nr_cfi_reused); 5109 printf("nr_cfi_cache: %ld\n", nr_cfi_cache); 5110 } 5111 5112 out: 5113 if (ret || warnings) { 5114 if (opts.werror && warnings) 5115 ret = 1; 5116 5117 if (opts.verbose) { 5118 if (opts.werror && warnings) 5119 WARN("%d warning(s) upgraded to errors", warnings); 5120 disas_warned_funcs(disas_ctx); 5121 } 5122 } 5123 5124 if (opts.disas) 5125 disas_funcs(disas_ctx); 5126 5127 if (disas_ctx) { 5128 disas_context_destroy(disas_ctx); 5129 objtool_disas_ctx = NULL; 5130 } 5131 5132 free_insns(file); 5133 5134 if (!ret && !warnings) 5135 return 0; 5136 5137 if (opts.backup && make_backup()) 5138 return 1; 5139 5140 return ret; 5141 } 5142