1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6 #define _GNU_SOURCE /* memmem() */
7 #include <fnmatch.h>
8 #include <string.h>
9 #include <stdlib.h>
10 #include <inttypes.h>
11 #include <sys/mman.h>
12
13 #include <objtool/builtin.h>
14 #include <objtool/cfi.h>
15 #include <objtool/arch.h>
16 #include <objtool/disas.h>
17 #include <objtool/check.h>
18 #include <objtool/special.h>
19 #include <objtool/trace.h>
20 #include <objtool/warn.h>
21 #include <objtool/checksum.h>
22 #include <objtool/util.h>
23
24 #include <linux/objtool_types.h>
25 #include <linux/hashtable.h>
26 #include <linux/kernel.h>
27 #include <linux/static_call_types.h>
28 #include <linux/string.h>
29
30 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
31
32 static struct cfi_init_state initial_func_cfi;
33 static struct cfi_state init_cfi;
34 static struct cfi_state func_cfi;
35 static struct cfi_state force_undefined_cfi;
36
37 struct disas_context *objtool_disas_ctx;
38
39 size_t sym_name_max_len;
40
find_insn(struct objtool_file * file,struct section * sec,unsigned long offset)41 struct instruction *find_insn(struct objtool_file *file,
42 struct section *sec, unsigned long offset)
43 {
44 struct instruction *insn;
45
46 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
47 if (insn->sec == sec && insn->offset == offset)
48 return insn;
49 }
50
51 return NULL;
52 }
53
next_insn_same_sec(struct objtool_file * file,struct instruction * insn)54 struct instruction *next_insn_same_sec(struct objtool_file *file,
55 struct instruction *insn)
56 {
57 if (insn->idx == INSN_CHUNK_MAX)
58 return find_insn(file, insn->sec, insn->offset + insn->len);
59
60 insn++;
61 if (!insn->len)
62 return NULL;
63
64 return insn;
65 }
66
next_insn_same_func(struct objtool_file * file,struct instruction * insn)67 static struct instruction *next_insn_same_func(struct objtool_file *file,
68 struct instruction *insn)
69 {
70 struct instruction *next = next_insn_same_sec(file, insn);
71 struct symbol *func = insn_func(insn);
72
73 if (!func)
74 return NULL;
75
76 if (next && insn_func(next) == func)
77 return next;
78
79 /* Check if we're already in the subfunction: */
80 if (func == func->cfunc)
81 return NULL;
82
83 /* Move to the subfunction: */
84 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
85 }
86
prev_insn_same_sec(struct objtool_file * file,struct instruction * insn)87 static struct instruction *prev_insn_same_sec(struct objtool_file *file,
88 struct instruction *insn)
89 {
90 if (insn->idx == 0) {
91 if (insn->prev_len)
92 return find_insn(file, insn->sec, insn->offset - insn->prev_len);
93 return NULL;
94 }
95
96 return insn - 1;
97 }
98
prev_insn_same_sym(struct objtool_file * file,struct instruction * insn)99 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
100 struct instruction *insn)
101 {
102 struct instruction *prev = prev_insn_same_sec(file, insn);
103
104 if (prev && insn_func(prev) == insn_func(insn))
105 return prev;
106
107 return NULL;
108 }
109
110 #define for_each_insn(file, insn) \
111 for (struct section *__sec, *__fake = (struct section *)1; \
112 __fake; __fake = NULL) \
113 for_each_sec(file->elf, __sec) \
114 sec_for_each_insn(file, __sec, insn)
115
116 #define func_for_each_insn(file, func, insn) \
117 for (insn = find_insn(file, func->sec, func->offset); \
118 insn; \
119 insn = next_insn_same_func(file, insn))
120
121 #define sym_for_each_insn(file, sym, insn) \
122 for (insn = find_insn(file, sym->sec, sym->offset); \
123 insn && insn->offset < sym->offset + sym->len; \
124 insn = next_insn_same_sec(file, insn))
125
126 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
127 for (insn = prev_insn_same_sec(file, insn); \
128 insn && insn->offset >= sym->offset; \
129 insn = prev_insn_same_sec(file, insn))
130
131 #define sec_for_each_insn_from(file, insn) \
132 for (; insn; insn = next_insn_same_sec(file, insn))
133
134 #define sec_for_each_insn_continue(file, insn) \
135 for (insn = next_insn_same_sec(file, insn); insn; \
136 insn = next_insn_same_sec(file, insn))
137
insn_jump_table(struct instruction * insn)138 static inline struct reloc *insn_jump_table(struct instruction *insn)
139 {
140 if (insn->type == INSN_JUMP_DYNAMIC ||
141 insn->type == INSN_CALL_DYNAMIC)
142 return insn->_jump_table;
143
144 return NULL;
145 }
146
insn_jump_table_size(struct instruction * insn)147 static inline unsigned long insn_jump_table_size(struct instruction *insn)
148 {
149 if (insn->type == INSN_JUMP_DYNAMIC ||
150 insn->type == INSN_CALL_DYNAMIC)
151 return insn->_jump_table_size;
152
153 return 0;
154 }
155
is_jump_table_jump(struct instruction * insn)156 static bool is_jump_table_jump(struct instruction *insn)
157 {
158 struct alt_group *alt_group = insn->alt_group;
159
160 if (insn_jump_table(insn))
161 return true;
162
163 /* Retpoline alternative for a jump table? */
164 return alt_group && alt_group->orig_group &&
165 insn_jump_table(alt_group->orig_group->first_insn);
166 }
167
is_sibling_call(struct instruction * insn)168 static bool is_sibling_call(struct instruction *insn)
169 {
170 /*
171 * Assume only STT_FUNC calls have jump-tables.
172 */
173 if (insn_func(insn)) {
174 /* An indirect jump is either a sibling call or a jump to a table. */
175 if (insn->type == INSN_JUMP_DYNAMIC)
176 return !is_jump_table_jump(insn);
177 }
178
179 /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
180 return (is_static_jump(insn) && insn_call_dest(insn));
181 }
182
183 /*
184 * Checks if a function is a Rust "noreturn" one.
185 */
is_rust_noreturn(const struct symbol * func)186 static bool is_rust_noreturn(const struct symbol *func)
187 {
188 /*
189 * If it does not start with "_R", then it is not a Rust symbol.
190 */
191 if (strncmp(func->name, "_R", 2))
192 return false;
193
194 /*
195 * These are just heuristics -- we do not control the precise symbol
196 * name, due to the crate disambiguators (which depend on the compiler)
197 * as well as changes to the source code itself between versions (since
198 * these come from the Rust standard library).
199 */
200 return str_ends_with(func->name, "_4core3num20from_str_radix_panic") ||
201 str_ends_with(func->name, "_4core3num22from_ascii_radix_panic") ||
202 str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
203 str_ends_with(func->name, "_4core6option13expect_failed") ||
204 str_ends_with(func->name, "_4core6option13unwrap_failed") ||
205 str_ends_with(func->name, "_4core6result13unwrap_failed") ||
206 str_ends_with(func->name, "_4core9panicking5panic") ||
207 str_ends_with(func->name, "_4core9panicking9panic_fmt") ||
208 str_ends_with(func->name, "_4core9panicking14panic_explicit") ||
209 str_ends_with(func->name, "_4core9panicking14panic_nounwind") ||
210 str_ends_with(func->name, "_4core9panicking18panic_bounds_check") ||
211 str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt") ||
212 str_ends_with(func->name, "_4core9panicking19assert_failed_inner") ||
213 str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") ||
214 str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") ||
215 str_ends_with(func->name, "_7___rustc17rust_begin_unwind") ||
216 strstr(func->name, "_4core9panicking13assert_failed") ||
217 strstr(func->name, "_4core9panicking11panic_const24panic_const_") ||
218 (strstr(func->name, "_4core5slice5index") &&
219 strstr(func->name, "slice_") &&
220 str_ends_with(func->name, "_fail"));
221 }
222
223 /*
224 * This checks to see if the given function is a "noreturn" function.
225 *
226 * For global functions which are outside the scope of this object file, we
227 * have to keep a manual list of them.
228 *
229 * For local functions, we have to detect them manually by simply looking for
230 * the lack of a return instruction.
231 */
__dead_end_function(struct objtool_file * file,struct symbol * func,int recursion)232 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
233 int recursion)
234 {
235 int i;
236 struct instruction *insn;
237 bool empty = true;
238
239 #define NORETURN(func) __stringify(func),
240 static const char * const global_noreturns[] = {
241 #include "noreturns.h"
242 };
243 #undef NORETURN
244
245 if (!func)
246 return false;
247
248 if (!is_local_sym(func)) {
249 if (is_rust_noreturn(func))
250 return true;
251
252 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
253 if (!strcmp(func->name, global_noreturns[i]))
254 return true;
255 }
256
257 if (is_weak_sym(func))
258 return false;
259
260 if (!func->len)
261 return false;
262
263 insn = find_insn(file, func->sec, func->offset);
264 if (!insn || !insn_func(insn))
265 return false;
266
267 func_for_each_insn(file, func, insn) {
268 empty = false;
269
270 if (insn->type == INSN_RETURN)
271 return false;
272 }
273
274 if (empty)
275 return false;
276
277 /*
278 * A function can have a sibling call instead of a return. In that
279 * case, the function's dead-end status depends on whether the target
280 * of the sibling call returns.
281 */
282 func_for_each_insn(file, func, insn) {
283 if (is_sibling_call(insn)) {
284 struct instruction *dest = insn->jump_dest;
285
286 if (!dest)
287 /* sibling call to another file */
288 return false;
289
290 /* local sibling call */
291 if (recursion == 5) {
292 /*
293 * Infinite recursion: two functions have
294 * sibling calls to each other. This is a very
295 * rare case. It means they aren't dead ends.
296 */
297 return false;
298 }
299
300 return __dead_end_function(file, insn_func(dest), recursion+1);
301 }
302 }
303
304 return true;
305 }
306
dead_end_function(struct objtool_file * file,struct symbol * func)307 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
308 {
309 return __dead_end_function(file, func, 0);
310 }
311
init_cfi_state(struct cfi_state * cfi)312 static void init_cfi_state(struct cfi_state *cfi)
313 {
314 int i;
315
316 for (i = 0; i < CFI_NUM_REGS; i++) {
317 cfi->regs[i].base = CFI_UNDEFINED;
318 cfi->vals[i].base = CFI_UNDEFINED;
319 }
320 cfi->cfa.base = CFI_UNDEFINED;
321 cfi->drap_reg = CFI_UNDEFINED;
322 cfi->drap_offset = -1;
323 }
324
init_insn_state(struct objtool_file * file,struct insn_state * state,struct section * sec)325 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
326 struct section *sec)
327 {
328 memset(state, 0, sizeof(*state));
329 init_cfi_state(&state->cfi);
330
331 if (opts.noinstr && sec)
332 state->noinstr = sec->noinstr;
333 }
334
cfi_alloc(void)335 static struct cfi_state *cfi_alloc(void)
336 {
337 struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
338 if (!cfi) {
339 ERROR_GLIBC("calloc");
340 exit(1);
341 }
342 nr_cfi++;
343 return cfi;
344 }
345
346 static int cfi_bits;
347 static struct hlist_head *cfi_hash;
348
cficmp(struct cfi_state * cfi1,struct cfi_state * cfi2)349 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
350 {
351 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
352 (void *)cfi2 + sizeof(cfi2->hash),
353 sizeof(struct cfi_state) - sizeof(struct hlist_node));
354 }
355
cfi_key(struct cfi_state * cfi)356 static inline u32 cfi_key(struct cfi_state *cfi)
357 {
358 return jhash((void *)cfi + sizeof(cfi->hash),
359 sizeof(*cfi) - sizeof(cfi->hash), 0);
360 }
361
cfi_hash_find_or_add(struct cfi_state * cfi)362 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
363 {
364 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
365 struct cfi_state *obj;
366
367 hlist_for_each_entry(obj, head, hash) {
368 if (!cficmp(cfi, obj)) {
369 nr_cfi_cache++;
370 return obj;
371 }
372 }
373
374 obj = cfi_alloc();
375 *obj = *cfi;
376 hlist_add_head(&obj->hash, head);
377
378 return obj;
379 }
380
cfi_hash_add(struct cfi_state * cfi)381 static void cfi_hash_add(struct cfi_state *cfi)
382 {
383 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
384
385 hlist_add_head(&cfi->hash, head);
386 }
387
cfi_hash_alloc(unsigned long size)388 static void *cfi_hash_alloc(unsigned long size)
389 {
390 cfi_bits = max(10, ilog2(size));
391 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
392 PROT_READ|PROT_WRITE,
393 MAP_PRIVATE|MAP_ANON, -1, 0);
394 if (cfi_hash == (void *)-1L) {
395 ERROR_GLIBC("mmap fail cfi_hash");
396 cfi_hash = NULL;
397 } else if (opts.stats) {
398 printf("cfi_bits: %d\n", cfi_bits);
399 }
400
401 return cfi_hash;
402 }
403
404 static unsigned long nr_insns;
405 static unsigned long nr_insns_visited;
406
407 /*
408 * Call the arch-specific instruction decoder for all the instructions and add
409 * them to the global instruction list.
410 */
decode_instructions(struct objtool_file * file)411 static int decode_instructions(struct objtool_file *file)
412 {
413 struct section *sec;
414 struct symbol *func;
415 unsigned long offset;
416 struct instruction *insn;
417
418 for_each_sec(file->elf, sec) {
419 struct instruction *insns = NULL;
420 u8 prev_len = 0;
421 u8 idx = 0;
422
423 if (!is_text_sec(sec))
424 continue;
425
426 if (strcmp(sec->name, ".altinstr_replacement") &&
427 strcmp(sec->name, ".altinstr_aux") &&
428 strncmp(sec->name, ".discard.", 9))
429 sec->text = true;
430
431 if (!strcmp(sec->name, ".noinstr.text") ||
432 !strcmp(sec->name, ".entry.text") ||
433 !strcmp(sec->name, ".cpuidle.text") ||
434 !strncmp(sec->name, ".text..__x86.", 13))
435 sec->noinstr = true;
436
437 /*
438 * .init.text code is ran before userspace and thus doesn't
439 * strictly need retpolines, except for modules which are
440 * loaded late, they very much do need retpoline in their
441 * .init.text
442 */
443 if (!strcmp(sec->name, ".init.text") && !opts.module)
444 sec->init = true;
445
446 for (offset = 0; offset < sec_size(sec); offset += insn->len) {
447 if (!insns || idx == INSN_CHUNK_MAX) {
448 insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn));
449 if (!insns) {
450 ERROR_GLIBC("calloc");
451 return -1;
452 }
453 idx = 0;
454 } else {
455 idx++;
456 }
457 insn = &insns[idx];
458 insn->idx = idx;
459
460 INIT_LIST_HEAD(&insn->call_node);
461 insn->sec = sec;
462 insn->offset = offset;
463 insn->prev_len = prev_len;
464
465 if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn))
466 return -1;
467
468 prev_len = insn->len;
469
470 /*
471 * By default, "ud2" is a dead end unless otherwise
472 * annotated, because GCC 7 inserts it for certain
473 * divide-by-zero cases.
474 */
475 if (insn->type == INSN_BUG)
476 insn->dead_end = true;
477
478 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
479 nr_insns++;
480 }
481
482 sec_for_each_sym(sec, func) {
483 if (!is_notype_sym(func) && !is_func_sym(func))
484 continue;
485
486 if (func->offset == sec_size(sec)) {
487 /* Heuristic: likely an "end" symbol */
488 if (is_notype_sym(func))
489 continue;
490 ERROR("%s(): STT_FUNC at end of section", func->name);
491 return -1;
492 }
493
494 if (func->embedded_insn || func->alias != func)
495 continue;
496
497 if (!find_insn(file, sec, func->offset)) {
498 ERROR("%s(): can't find starting instruction", func->name);
499 return -1;
500 }
501
502 sym_for_each_insn(file, func, insn) {
503 insn->sym = func;
504 if (is_func_sym(func) &&
505 insn->type == INSN_ENDBR &&
506 list_empty(&insn->call_node)) {
507 if (insn->offset == func->offset) {
508 list_add_tail(&insn->call_node, &file->endbr_list);
509 file->nr_endbr++;
510 } else {
511 file->nr_endbr_int++;
512 }
513 }
514 }
515 }
516 }
517
518 if (opts.stats)
519 printf("nr_insns: %lu\n", nr_insns);
520
521 return 0;
522 }
523
524 /*
525 * Known pv_ops*[] arrays.
526 */
527 static struct {
528 const char *name;
529 int idx_off;
530 } pv_ops_tables[] = {
531 { .name = "pv_ops", },
532 { .name = "pv_ops_lock", },
533 { .name = NULL, .idx_off = -1 }
534 };
535
536 /*
537 * Get index offset for a pv_ops* array.
538 */
pv_ops_idx_off(const char * symname)539 int pv_ops_idx_off(const char *symname)
540 {
541 int idx;
542
543 for (idx = 0; pv_ops_tables[idx].name; idx++) {
544 if (!strcmp(symname, pv_ops_tables[idx].name))
545 break;
546 }
547
548 return pv_ops_tables[idx].idx_off;
549 }
550
551 /*
552 * Read a pv_ops*[] .data table to find the static initialized values.
553 */
add_pv_ops(struct objtool_file * file,int pv_ops_idx)554 static int add_pv_ops(struct objtool_file *file, int pv_ops_idx)
555 {
556 struct symbol *sym, *func;
557 unsigned long off, end;
558 struct reloc *reloc;
559 int idx, idx_off;
560 const char *symname;
561
562 symname = pv_ops_tables[pv_ops_idx].name;
563 sym = find_symbol_by_name(file->elf, symname);
564 if (!sym) {
565 ERROR("Unknown pv_ops array %s", symname);
566 return -1;
567 }
568
569 off = sym->offset;
570 end = off + sym->len;
571 idx_off = pv_ops_tables[pv_ops_idx].idx_off;
572 if (idx_off < 0) {
573 ERROR("pv_ops array %s has unknown index offset", symname);
574 return -1;
575 }
576
577 for (;;) {
578 reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
579 if (!reloc)
580 break;
581
582 idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
583
584 func = reloc->sym;
585 if (is_sec_sym(func))
586 func = find_symbol_by_offset(reloc->sym->sec,
587 reloc_addend(reloc));
588 if (!func) {
589 ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc),
590 "can't find func at %s[%d]", symname, idx);
591 return -1;
592 }
593
594 if (objtool_pv_add(file, idx + idx_off, func))
595 return -1;
596
597 off = reloc_offset(reloc) + 1;
598 if (off > end)
599 break;
600 }
601
602 return 0;
603 }
604
605 /*
606 * Allocate and initialize file->pv_ops[].
607 */
init_pv_ops(struct objtool_file * file)608 static int init_pv_ops(struct objtool_file *file)
609 {
610 struct symbol *sym;
611 int idx, nr;
612
613 if (!opts.noinstr)
614 return 0;
615
616 file->pv_ops = NULL;
617
618 nr = 0;
619 for (idx = 0; pv_ops_tables[idx].name; idx++) {
620 sym = find_symbol_by_name(file->elf, pv_ops_tables[idx].name);
621 if (!sym) {
622 pv_ops_tables[idx].idx_off = -1;
623 continue;
624 }
625 pv_ops_tables[idx].idx_off = nr;
626 nr += sym->len / sizeof(unsigned long);
627 }
628
629 if (nr == 0)
630 return 0;
631
632 file->pv_ops = calloc(nr, sizeof(struct pv_state));
633 if (!file->pv_ops) {
634 ERROR_GLIBC("calloc");
635 return -1;
636 }
637
638 for (idx = 0; idx < nr; idx++)
639 INIT_LIST_HEAD(&file->pv_ops[idx].targets);
640
641 for (idx = 0; pv_ops_tables[idx].name; idx++) {
642 if (pv_ops_tables[idx].idx_off < 0)
643 continue;
644 if (add_pv_ops(file, idx))
645 return -1;
646 }
647
648 return 0;
649 }
650
is_livepatch_module(struct objtool_file * file)651 static bool is_livepatch_module(struct objtool_file *file)
652 {
653 struct section *sec;
654
655 if (!opts.module)
656 return false;
657
658 sec = find_section_by_name(file->elf, ".modinfo");
659 if (!sec)
660 return false;
661
662 return memmem(sec->data->d_buf, sec_size(sec), "\0livepatch=Y", 12);
663 }
664
create_static_call_sections(struct objtool_file * file)665 static int create_static_call_sections(struct objtool_file *file)
666 {
667 struct static_call_site *site;
668 struct section *sec;
669 struct instruction *insn;
670 struct symbol *key_sym;
671 char *key_name, *tmp;
672 int idx;
673
674 sec = find_section_by_name(file->elf, ".static_call_sites");
675 if (sec) {
676 /*
677 * Livepatch modules may have already extracted the static call
678 * site entries to take advantage of vmlinux static call
679 * privileges.
680 */
681 if (!file->klp)
682 WARN("file already has .static_call_sites section, skipping");
683
684 return 0;
685 }
686
687 if (list_empty(&file->static_call_list))
688 return 0;
689
690 idx = 0;
691 list_for_each_entry(insn, &file->static_call_list, call_node)
692 idx++;
693
694 sec = elf_create_section_pair(file->elf, ".static_call_sites",
695 sizeof(*site), idx, idx * 2);
696 if (!sec)
697 return -1;
698
699 /* Allow modules to modify the low bits of static_call_site::key */
700 sec->sh.sh_flags |= SHF_WRITE;
701
702 idx = 0;
703 list_for_each_entry(insn, &file->static_call_list, call_node) {
704
705 /* populate reloc for 'addr' */
706 if (!elf_init_reloc_text_sym(file->elf, sec,
707 idx * sizeof(*site), idx * 2,
708 insn->sec, insn->offset))
709 return -1;
710
711 /* find key symbol */
712 key_name = strdup(insn_call_dest(insn)->name);
713 if (!key_name) {
714 ERROR_GLIBC("strdup");
715 return -1;
716 }
717 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
718 STATIC_CALL_TRAMP_PREFIX_LEN)) {
719 ERROR("static_call: trampoline name malformed: %s", key_name);
720 return -1;
721 }
722 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
723 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
724
725 key_sym = find_symbol_by_name(file->elf, tmp);
726 if (!key_sym) {
727 if (!opts.module) {
728 ERROR("static_call: can't find static_call_key symbol: %s", tmp);
729 return -1;
730 }
731
732 /*
733 * For modules(), the key might not be exported, which
734 * means the module can make static calls but isn't
735 * allowed to change them.
736 *
737 * In that case we temporarily set the key to be the
738 * trampoline address. This is fixed up in
739 * static_call_add_module().
740 */
741 key_sym = insn_call_dest(insn);
742 }
743
744 /* populate reloc for 'key' */
745 if (!elf_init_reloc_data_sym(file->elf, sec,
746 idx * sizeof(*site) + 4,
747 (idx * 2) + 1, key_sym,
748 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
749 return -1;
750
751 idx++;
752 }
753
754 return 0;
755 }
756
create_retpoline_sites_sections(struct objtool_file * file)757 static int create_retpoline_sites_sections(struct objtool_file *file)
758 {
759 struct instruction *insn;
760 struct section *sec;
761 int idx;
762
763 sec = find_section_by_name(file->elf, ".retpoline_sites");
764 if (sec) {
765 WARN("file already has .retpoline_sites, skipping");
766 return 0;
767 }
768
769 idx = 0;
770 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
771 idx++;
772
773 if (!idx)
774 return 0;
775
776 sec = elf_create_section_pair(file->elf, ".retpoline_sites",
777 sizeof(int), idx, idx);
778 if (!sec)
779 return -1;
780
781 idx = 0;
782 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
783
784 if (!elf_init_reloc_text_sym(file->elf, sec,
785 idx * sizeof(int), idx,
786 insn->sec, insn->offset))
787 return -1;
788
789 idx++;
790 }
791
792 return 0;
793 }
794
create_return_sites_sections(struct objtool_file * file)795 static int create_return_sites_sections(struct objtool_file *file)
796 {
797 struct instruction *insn;
798 struct section *sec;
799 int idx;
800
801 sec = find_section_by_name(file->elf, ".return_sites");
802 if (sec) {
803 WARN("file already has .return_sites, skipping");
804 return 0;
805 }
806
807 idx = 0;
808 list_for_each_entry(insn, &file->return_thunk_list, call_node)
809 idx++;
810
811 if (!idx)
812 return 0;
813
814 sec = elf_create_section_pair(file->elf, ".return_sites",
815 sizeof(int), idx, idx);
816 if (!sec)
817 return -1;
818
819 idx = 0;
820 list_for_each_entry(insn, &file->return_thunk_list, call_node) {
821
822 if (!elf_init_reloc_text_sym(file->elf, sec,
823 idx * sizeof(int), idx,
824 insn->sec, insn->offset))
825 return -1;
826
827 idx++;
828 }
829
830 return 0;
831 }
832
create_ibt_endbr_seal_sections(struct objtool_file * file)833 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
834 {
835 struct instruction *insn;
836 struct section *sec;
837 int idx;
838
839 sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
840 if (sec) {
841 WARN("file already has .ibt_endbr_seal, skipping");
842 return 0;
843 }
844
845 idx = 0;
846 list_for_each_entry(insn, &file->endbr_list, call_node)
847 idx++;
848
849 if (opts.stats) {
850 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
851 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
852 printf("ibt: superfluous ENDBR: %d\n", idx);
853 }
854
855 if (!idx)
856 return 0;
857
858 sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal",
859 sizeof(int), idx, idx);
860 if (!sec)
861 return -1;
862
863 idx = 0;
864 list_for_each_entry(insn, &file->endbr_list, call_node) {
865
866 int *site = (int *)sec->data->d_buf + idx;
867 struct symbol *sym = insn->sym;
868 *site = 0;
869
870 if (opts.module && sym && is_func_sym(sym) &&
871 insn->offset == sym->offset &&
872 (!strcmp(sym->name, "init_module") ||
873 !strcmp(sym->name, "cleanup_module"))) {
874 ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
875 sym->name);
876 return -1;
877 }
878
879 if (!elf_init_reloc_text_sym(file->elf, sec,
880 idx * sizeof(int), idx,
881 insn->sec, insn->offset))
882 return -1;
883
884 idx++;
885 }
886
887 return 0;
888 }
889
create_cfi_sections(struct objtool_file * file)890 static int create_cfi_sections(struct objtool_file *file)
891 {
892 struct section *sec;
893 struct symbol *sym;
894 int idx;
895
896 sec = find_section_by_name(file->elf, ".cfi_sites");
897 if (sec) {
898 WARN("file already has .cfi_sites section, skipping");
899 return 0;
900 }
901
902 idx = 0;
903 for_each_sym(file->elf, sym) {
904 if (!is_func_sym(sym))
905 continue;
906
907 if (strncmp(sym->name, "__cfi_", 6))
908 continue;
909
910 idx++;
911 }
912
913 sec = elf_create_section_pair(file->elf, ".cfi_sites",
914 sizeof(unsigned int), idx, idx);
915 if (!sec)
916 return -1;
917
918 idx = 0;
919 for_each_sym(file->elf, sym) {
920 if (!is_func_sym(sym))
921 continue;
922
923 if (strncmp(sym->name, "__cfi_", 6))
924 continue;
925
926 if (!elf_init_reloc_text_sym(file->elf, sec,
927 idx * sizeof(unsigned int), idx,
928 sym->sec, sym->offset))
929 return -1;
930
931 idx++;
932 }
933
934 return 0;
935 }
936
create_mcount_loc_sections(struct objtool_file * file)937 static int create_mcount_loc_sections(struct objtool_file *file)
938 {
939 size_t addr_size = elf_addr_size(file->elf);
940 struct instruction *insn;
941 struct section *sec;
942 int idx;
943
944 sec = find_section_by_name(file->elf, "__mcount_loc");
945 if (sec) {
946 /*
947 * Livepatch modules have already extracted their __mcount_loc
948 * entries to cover the !CONFIG_FTRACE_MCOUNT_USE_OBJTOOL case.
949 */
950 if (!file->klp)
951 WARN("file already has __mcount_loc section, skipping");
952
953 return 0;
954 }
955
956 if (list_empty(&file->mcount_loc_list))
957 return 0;
958
959 idx = 0;
960 list_for_each_entry(insn, &file->mcount_loc_list, call_node)
961 idx++;
962
963 sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size,
964 idx, idx);
965 if (!sec)
966 return -1;
967
968 sec->sh.sh_addralign = addr_size;
969
970 idx = 0;
971 list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
972
973 struct reloc *reloc;
974
975 reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx,
976 insn->sec, insn->offset);
977 if (!reloc)
978 return -1;
979
980 set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32);
981
982 idx++;
983 }
984
985 return 0;
986 }
987
create_direct_call_sections(struct objtool_file * file)988 static int create_direct_call_sections(struct objtool_file *file)
989 {
990 struct instruction *insn;
991 struct section *sec;
992 int idx;
993
994 sec = find_section_by_name(file->elf, ".call_sites");
995 if (sec) {
996 WARN("file already has .call_sites section, skipping");
997 return 0;
998 }
999
1000 if (list_empty(&file->call_list))
1001 return 0;
1002
1003 idx = 0;
1004 list_for_each_entry(insn, &file->call_list, call_node)
1005 idx++;
1006
1007 sec = elf_create_section_pair(file->elf, ".call_sites",
1008 sizeof(unsigned int), idx, idx);
1009 if (!sec)
1010 return -1;
1011
1012 idx = 0;
1013 list_for_each_entry(insn, &file->call_list, call_node) {
1014
1015 if (!elf_init_reloc_text_sym(file->elf, sec,
1016 idx * sizeof(unsigned int), idx,
1017 insn->sec, insn->offset))
1018 return -1;
1019
1020 idx++;
1021 }
1022
1023 return 0;
1024 }
1025
1026 #ifdef BUILD_KLP
create_sym_checksum_section(struct objtool_file * file)1027 static int create_sym_checksum_section(struct objtool_file *file)
1028 {
1029 struct section *sec;
1030 struct symbol *sym;
1031 unsigned int idx = 0;
1032 struct sym_checksum *checksum;
1033 size_t entsize = sizeof(struct sym_checksum);
1034
1035 sec = find_section_by_name(file->elf, ".discard.sym_checksum");
1036 if (sec) {
1037 if (!opts.dryrun)
1038 WARN("file already has .discard.sym_checksum section, skipping");
1039
1040 return 0;
1041 }
1042
1043 for_each_sym(file->elf, sym)
1044 if (sym->csum.checksum)
1045 idx++;
1046
1047 if (!idx)
1048 return 0;
1049
1050 sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize,
1051 idx, idx);
1052 if (!sec)
1053 return -1;
1054
1055 idx = 0;
1056 for_each_sym(file->elf, sym) {
1057 if (!sym->csum.checksum)
1058 continue;
1059
1060 if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize,
1061 sym, 0, R_TEXT64))
1062 return -1;
1063
1064 checksum = (struct sym_checksum *)sec->data->d_buf + idx;
1065 checksum->addr = 0; /* reloc */
1066 checksum->checksum = sym->csum.checksum;
1067
1068 mark_sec_changed(file->elf, sec, true);
1069
1070 idx++;
1071 }
1072
1073 return 0;
1074 }
1075 #else
create_sym_checksum_section(struct objtool_file * file)1076 static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; }
1077 #endif
1078
1079 /*
1080 * Warnings shouldn't be reported for ignored functions.
1081 */
add_ignores(struct objtool_file * file)1082 static int add_ignores(struct objtool_file *file)
1083 {
1084 struct section *rsec;
1085 struct symbol *func;
1086 struct reloc *reloc;
1087
1088 rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1089 if (!rsec)
1090 return 0;
1091
1092 for_each_reloc(rsec, reloc) {
1093 switch (reloc->sym->type) {
1094 case STT_FUNC:
1095 func = reloc->sym;
1096 break;
1097
1098 case STT_SECTION:
1099 func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc));
1100 if (!func)
1101 continue;
1102 break;
1103
1104 default:
1105 ERROR("unexpected relocation symbol type in %s: %d",
1106 rsec->name, reloc->sym->type);
1107 return -1;
1108 }
1109
1110 func->ignore = true;
1111 if (func->cfunc)
1112 func->cfunc->ignore = true;
1113 }
1114
1115 return 0;
1116 }
1117
1118 /*
1119 * This is a whitelist of functions that is allowed to be called with AC set.
1120 * The list is meant to be minimal and only contains compiler instrumentation
1121 * ABI and a few functions used to implement *_{to,from}_user() functions.
1122 *
1123 * These functions must not directly change AC, but may PUSHF/POPF.
1124 */
1125 static const char *uaccess_safe_builtin[] = {
1126 /* KASAN */
1127 "kasan_report",
1128 "kasan_check_range",
1129 /* KASAN out-of-line */
1130 "__asan_loadN_noabort",
1131 "__asan_load1_noabort",
1132 "__asan_load2_noabort",
1133 "__asan_load4_noabort",
1134 "__asan_load8_noabort",
1135 "__asan_load16_noabort",
1136 "__asan_storeN_noabort",
1137 "__asan_store1_noabort",
1138 "__asan_store2_noabort",
1139 "__asan_store4_noabort",
1140 "__asan_store8_noabort",
1141 "__asan_store16_noabort",
1142 "__kasan_check_read",
1143 "__kasan_check_write",
1144 /* KASAN in-line */
1145 "__asan_report_load_n_noabort",
1146 "__asan_report_load1_noabort",
1147 "__asan_report_load2_noabort",
1148 "__asan_report_load4_noabort",
1149 "__asan_report_load8_noabort",
1150 "__asan_report_load16_noabort",
1151 "__asan_report_store_n_noabort",
1152 "__asan_report_store1_noabort",
1153 "__asan_report_store2_noabort",
1154 "__asan_report_store4_noabort",
1155 "__asan_report_store8_noabort",
1156 "__asan_report_store16_noabort",
1157 /* KCSAN */
1158 "__kcsan_check_access",
1159 "__kcsan_mb",
1160 "__kcsan_wmb",
1161 "__kcsan_rmb",
1162 "__kcsan_release",
1163 "kcsan_found_watchpoint",
1164 "kcsan_setup_watchpoint",
1165 "kcsan_check_scoped_accesses",
1166 "kcsan_disable_current",
1167 "kcsan_enable_current_nowarn",
1168 /* KCSAN/TSAN */
1169 "__tsan_func_entry",
1170 "__tsan_func_exit",
1171 "__tsan_read_range",
1172 "__tsan_write_range",
1173 "__tsan_read1",
1174 "__tsan_read2",
1175 "__tsan_read4",
1176 "__tsan_read8",
1177 "__tsan_read16",
1178 "__tsan_write1",
1179 "__tsan_write2",
1180 "__tsan_write4",
1181 "__tsan_write8",
1182 "__tsan_write16",
1183 "__tsan_read_write1",
1184 "__tsan_read_write2",
1185 "__tsan_read_write4",
1186 "__tsan_read_write8",
1187 "__tsan_read_write16",
1188 "__tsan_volatile_read1",
1189 "__tsan_volatile_read2",
1190 "__tsan_volatile_read4",
1191 "__tsan_volatile_read8",
1192 "__tsan_volatile_read16",
1193 "__tsan_volatile_write1",
1194 "__tsan_volatile_write2",
1195 "__tsan_volatile_write4",
1196 "__tsan_volatile_write8",
1197 "__tsan_volatile_write16",
1198 "__tsan_atomic8_load",
1199 "__tsan_atomic16_load",
1200 "__tsan_atomic32_load",
1201 "__tsan_atomic64_load",
1202 "__tsan_atomic8_store",
1203 "__tsan_atomic16_store",
1204 "__tsan_atomic32_store",
1205 "__tsan_atomic64_store",
1206 "__tsan_atomic8_exchange",
1207 "__tsan_atomic16_exchange",
1208 "__tsan_atomic32_exchange",
1209 "__tsan_atomic64_exchange",
1210 "__tsan_atomic8_fetch_add",
1211 "__tsan_atomic16_fetch_add",
1212 "__tsan_atomic32_fetch_add",
1213 "__tsan_atomic64_fetch_add",
1214 "__tsan_atomic8_fetch_sub",
1215 "__tsan_atomic16_fetch_sub",
1216 "__tsan_atomic32_fetch_sub",
1217 "__tsan_atomic64_fetch_sub",
1218 "__tsan_atomic8_fetch_and",
1219 "__tsan_atomic16_fetch_and",
1220 "__tsan_atomic32_fetch_and",
1221 "__tsan_atomic64_fetch_and",
1222 "__tsan_atomic8_fetch_or",
1223 "__tsan_atomic16_fetch_or",
1224 "__tsan_atomic32_fetch_or",
1225 "__tsan_atomic64_fetch_or",
1226 "__tsan_atomic8_fetch_xor",
1227 "__tsan_atomic16_fetch_xor",
1228 "__tsan_atomic32_fetch_xor",
1229 "__tsan_atomic64_fetch_xor",
1230 "__tsan_atomic8_fetch_nand",
1231 "__tsan_atomic16_fetch_nand",
1232 "__tsan_atomic32_fetch_nand",
1233 "__tsan_atomic64_fetch_nand",
1234 "__tsan_atomic8_compare_exchange_strong",
1235 "__tsan_atomic16_compare_exchange_strong",
1236 "__tsan_atomic32_compare_exchange_strong",
1237 "__tsan_atomic64_compare_exchange_strong",
1238 "__tsan_atomic8_compare_exchange_weak",
1239 "__tsan_atomic16_compare_exchange_weak",
1240 "__tsan_atomic32_compare_exchange_weak",
1241 "__tsan_atomic64_compare_exchange_weak",
1242 "__tsan_atomic8_compare_exchange_val",
1243 "__tsan_atomic16_compare_exchange_val",
1244 "__tsan_atomic32_compare_exchange_val",
1245 "__tsan_atomic64_compare_exchange_val",
1246 "__tsan_atomic_thread_fence",
1247 "__tsan_atomic_signal_fence",
1248 "__tsan_unaligned_read16",
1249 "__tsan_unaligned_write16",
1250 /* KCOV */
1251 "write_comp_data",
1252 "check_kcov_mode",
1253 "__sanitizer_cov_trace_pc",
1254 "__sanitizer_cov_trace_const_cmp1",
1255 "__sanitizer_cov_trace_const_cmp2",
1256 "__sanitizer_cov_trace_const_cmp4",
1257 "__sanitizer_cov_trace_const_cmp8",
1258 "__sanitizer_cov_trace_cmp1",
1259 "__sanitizer_cov_trace_cmp2",
1260 "__sanitizer_cov_trace_cmp4",
1261 "__sanitizer_cov_trace_cmp8",
1262 "__sanitizer_cov_trace_switch",
1263 /* KMSAN */
1264 "kmsan_copy_to_user",
1265 "kmsan_disable_current",
1266 "kmsan_enable_current",
1267 "kmsan_report",
1268 "kmsan_unpoison_entry_regs",
1269 "kmsan_unpoison_memory",
1270 "__msan_chain_origin",
1271 "__msan_get_context_state",
1272 "__msan_instrument_asm_store",
1273 "__msan_metadata_ptr_for_load_1",
1274 "__msan_metadata_ptr_for_load_2",
1275 "__msan_metadata_ptr_for_load_4",
1276 "__msan_metadata_ptr_for_load_8",
1277 "__msan_metadata_ptr_for_load_n",
1278 "__msan_metadata_ptr_for_store_1",
1279 "__msan_metadata_ptr_for_store_2",
1280 "__msan_metadata_ptr_for_store_4",
1281 "__msan_metadata_ptr_for_store_8",
1282 "__msan_metadata_ptr_for_store_n",
1283 "__msan_poison_alloca",
1284 "__msan_warning",
1285 /* UBSAN */
1286 "ubsan_type_mismatch_common",
1287 "__ubsan_handle_type_mismatch",
1288 "__ubsan_handle_type_mismatch_v1",
1289 "__ubsan_handle_shift_out_of_bounds",
1290 "__ubsan_handle_load_invalid_value",
1291 /* KSTACK_ERASE */
1292 "__sanitizer_cov_stack_depth",
1293 /* TRACE_BRANCH_PROFILING */
1294 "ftrace_likely_update",
1295 /* STACKPROTECTOR */
1296 "__stack_chk_fail",
1297 /* misc */
1298 "csum_partial_copy_generic",
1299 "copy_mc_fragile",
1300 "copy_mc_fragile_handle_tail",
1301 "copy_mc_enhanced_fast_string",
1302 "rep_stos_alternative",
1303 "rep_movs_alternative",
1304 "__copy_user_nocache",
1305 NULL
1306 };
1307
add_uaccess_safe(struct objtool_file * file)1308 static void add_uaccess_safe(struct objtool_file *file)
1309 {
1310 struct symbol *func;
1311 const char **name;
1312
1313 if (!opts.uaccess)
1314 return;
1315
1316 for (name = uaccess_safe_builtin; *name; name++) {
1317 func = find_symbol_by_name(file->elf, *name);
1318 if (!func)
1319 continue;
1320
1321 func->uaccess_safe = true;
1322 }
1323 }
1324
1325 /*
1326 * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
1327 * will be added to the .retpoline_sites section.
1328 */
arch_is_retpoline(struct symbol * sym)1329 __weak bool arch_is_retpoline(struct symbol *sym)
1330 {
1331 return false;
1332 }
1333
1334 /*
1335 * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
1336 * will be added to the .return_sites section.
1337 */
arch_is_rethunk(struct symbol * sym)1338 __weak bool arch_is_rethunk(struct symbol *sym)
1339 {
1340 return false;
1341 }
1342
1343 /*
1344 * Symbols that are embedded inside other instructions, because sometimes crazy
1345 * code exists. These are mostly ignored for validation purposes.
1346 */
arch_is_embedded_insn(struct symbol * sym)1347 __weak bool arch_is_embedded_insn(struct symbol *sym)
1348 {
1349 return false;
1350 }
1351
insn_reloc(struct objtool_file * file,struct instruction * insn)1352 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1353 {
1354 struct reloc *reloc;
1355
1356 if (insn->no_reloc)
1357 return NULL;
1358
1359 if (!file)
1360 return NULL;
1361
1362 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1363 insn->offset, insn->len);
1364 if (!reloc) {
1365 insn->no_reloc = 1;
1366 return NULL;
1367 }
1368
1369 return reloc;
1370 }
1371
remove_insn_ops(struct instruction * insn)1372 static void remove_insn_ops(struct instruction *insn)
1373 {
1374 struct stack_op *op, *next;
1375
1376 for (op = insn->stack_ops; op; op = next) {
1377 next = op->next;
1378 free(op);
1379 }
1380 insn->stack_ops = NULL;
1381 }
1382
annotate_call_site(struct objtool_file * file,struct instruction * insn,bool sibling)1383 static int annotate_call_site(struct objtool_file *file,
1384 struct instruction *insn, bool sibling)
1385 {
1386 struct reloc *reloc = insn_reloc(file, insn);
1387 struct symbol *sym = insn_call_dest(insn);
1388
1389 if (!sym)
1390 sym = reloc->sym;
1391
1392 if (sym->static_call_tramp) {
1393 list_add_tail(&insn->call_node, &file->static_call_list);
1394 return 0;
1395 }
1396
1397 if (sym->retpoline_thunk) {
1398 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1399 return 0;
1400 }
1401
1402 /*
1403 * Many compilers cannot disable KCOV or sanitizer calls with a function
1404 * attribute so they need a little help, NOP out any such calls from
1405 * noinstr text.
1406 */
1407 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1408 if (reloc)
1409 set_reloc_type(file->elf, reloc, R_NONE);
1410
1411 if (elf_write_insn(file->elf, insn->sec,
1412 insn->offset, insn->len,
1413 sibling ? arch_ret_insn(insn->len)
1414 : arch_nop_insn(insn->len))) {
1415 return -1;
1416 }
1417
1418 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1419
1420 if (sibling) {
1421 /*
1422 * We've replaced the tail-call JMP insn by two new
1423 * insn: RET; INT3, except we only have a single struct
1424 * insn here. Mark it retpoline_safe to avoid the SLS
1425 * warning, instead of adding another insn.
1426 */
1427 insn->retpoline_safe = true;
1428 }
1429
1430 return 0;
1431 }
1432
1433 if (opts.mcount && sym->fentry) {
1434 if (sibling)
1435 WARN_INSN(insn, "tail call to __fentry__ !?!?");
1436 if (opts.mnop) {
1437 if (reloc)
1438 set_reloc_type(file->elf, reloc, R_NONE);
1439
1440 if (elf_write_insn(file->elf, insn->sec,
1441 insn->offset, insn->len,
1442 arch_nop_insn(insn->len))) {
1443 return -1;
1444 }
1445
1446 insn->type = INSN_NOP;
1447 }
1448
1449 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1450 return 0;
1451 }
1452
1453 if (insn->type == INSN_CALL && !insn->sec->init &&
1454 !insn->_call_dest->embedded_insn)
1455 list_add_tail(&insn->call_node, &file->call_list);
1456
1457 if (!sibling && dead_end_function(file, sym))
1458 insn->dead_end = true;
1459
1460 return 0;
1461 }
1462
add_call_dest(struct objtool_file * file,struct instruction * insn,struct symbol * dest,bool sibling)1463 static int add_call_dest(struct objtool_file *file, struct instruction *insn,
1464 struct symbol *dest, bool sibling)
1465 {
1466 insn->_call_dest = dest;
1467 if (!dest)
1468 return 0;
1469
1470 /*
1471 * Whatever stack impact regular CALLs have, should be undone
1472 * by the RETURN of the called function.
1473 *
1474 * Annotated intra-function calls retain the stack_ops but
1475 * are converted to JUMP, see read_intra_function_calls().
1476 */
1477 remove_insn_ops(insn);
1478
1479 return annotate_call_site(file, insn, sibling);
1480 }
1481
add_retpoline_call(struct objtool_file * file,struct instruction * insn)1482 static int add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1483 {
1484 /*
1485 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1486 * so convert them accordingly.
1487 */
1488 switch (insn->type) {
1489 case INSN_CALL:
1490 insn->type = INSN_CALL_DYNAMIC;
1491 break;
1492 case INSN_JUMP_UNCONDITIONAL:
1493 insn->type = INSN_JUMP_DYNAMIC;
1494 break;
1495 case INSN_JUMP_CONDITIONAL:
1496 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1497 break;
1498 default:
1499 return 0;
1500 }
1501
1502 insn->retpoline_safe = true;
1503
1504 /*
1505 * Whatever stack impact regular CALLs have, should be undone
1506 * by the RETURN of the called function.
1507 *
1508 * Annotated intra-function calls retain the stack_ops but
1509 * are converted to JUMP, see read_intra_function_calls().
1510 */
1511 remove_insn_ops(insn);
1512
1513 return annotate_call_site(file, insn, false);
1514 }
1515
add_return_call(struct objtool_file * file,struct instruction * insn,bool add)1516 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1517 {
1518 /*
1519 * Return thunk tail calls are really just returns in disguise,
1520 * so convert them accordingly.
1521 */
1522 insn->type = INSN_RETURN;
1523 insn->retpoline_safe = true;
1524
1525 if (add)
1526 list_add_tail(&insn->call_node, &file->return_thunk_list);
1527 }
1528
is_first_func_insn(struct objtool_file * file,struct instruction * insn)1529 static bool is_first_func_insn(struct objtool_file *file,
1530 struct instruction *insn)
1531 {
1532 struct symbol *func = insn_func(insn);
1533
1534 if (!func)
1535 return false;
1536
1537 if (insn->offset == func->offset)
1538 return true;
1539
1540 /* Allow direct CALL/JMP past ENDBR */
1541 if (opts.ibt) {
1542 struct instruction *prev = prev_insn_same_sym(file, insn);
1543
1544 if (prev && prev->type == INSN_ENDBR &&
1545 insn->offset == func->offset + prev->len)
1546 return true;
1547 }
1548
1549 return false;
1550 }
1551
1552 /*
1553 * Find the destination instructions for all jumps.
1554 */
add_jump_destinations(struct objtool_file * file)1555 static int add_jump_destinations(struct objtool_file *file)
1556 {
1557 struct instruction *insn;
1558 struct reloc *reloc;
1559
1560 for_each_insn(file, insn) {
1561 struct symbol *func = insn_func(insn);
1562 struct instruction *dest_insn;
1563 struct section *dest_sec;
1564 struct symbol *dest_sym;
1565 unsigned long dest_off;
1566
1567 if (!is_static_jump(insn))
1568 continue;
1569
1570 if (insn->jump_dest) {
1571 /*
1572 * handle_group_alt() may have previously set
1573 * 'jump_dest' for some alternatives.
1574 */
1575 continue;
1576 }
1577
1578 reloc = insn_reloc(file, insn);
1579 if (!reloc) {
1580 dest_sec = insn->sec;
1581 dest_off = arch_jump_destination(insn);
1582 dest_sym = dest_sec->sym;
1583 } else {
1584 dest_sym = reloc->sym;
1585 if (is_undef_sym(dest_sym)) {
1586 if (dest_sym->retpoline_thunk) {
1587 if (add_retpoline_call(file, insn))
1588 return -1;
1589 continue;
1590 }
1591
1592 if (dest_sym->return_thunk) {
1593 add_return_call(file, insn, true);
1594 continue;
1595 }
1596
1597 /* External symbol */
1598 if (func) {
1599 /* External sibling call */
1600 if (add_call_dest(file, insn, dest_sym, true))
1601 return -1;
1602 continue;
1603 }
1604
1605 /* Non-func asm code jumping to external symbol */
1606 continue;
1607 }
1608
1609 dest_sec = dest_sym->sec;
1610 dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc);
1611 }
1612
1613 dest_insn = find_insn(file, dest_sec, dest_off);
1614 if (!dest_insn) {
1615 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1616
1617 /*
1618 * retbleed_untrain_ret() jumps to
1619 * __x86_return_thunk(), but objtool can't find
1620 * the thunk's starting RET instruction,
1621 * because the RET is also in the middle of
1622 * another instruction. Objtool only knows
1623 * about the outer instruction.
1624 */
1625 if (sym && sym->embedded_insn) {
1626 add_return_call(file, insn, false);
1627 continue;
1628 }
1629
1630 /*
1631 * GCOV/KCOV dead code can jump to the end of
1632 * the function/section.
1633 */
1634 if (file->ignore_unreachables && func &&
1635 dest_sec == insn->sec &&
1636 dest_off == func->offset + func->len)
1637 continue;
1638
1639 ERROR_INSN(insn, "can't find jump dest instruction at %s",
1640 offstr(dest_sec, dest_off));
1641 return -1;
1642 }
1643
1644 if (!dest_sym || is_sec_sym(dest_sym)) {
1645 dest_sym = dest_insn->sym;
1646 if (!dest_sym)
1647 goto set_jump_dest;
1648 }
1649
1650 if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) {
1651 if (add_retpoline_call(file, insn))
1652 return -1;
1653 continue;
1654 }
1655
1656 if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) {
1657 add_return_call(file, insn, true);
1658 continue;
1659 }
1660
1661 if (!insn->sym || insn->sym->pfunc == dest_sym->pfunc)
1662 goto set_jump_dest;
1663
1664 /*
1665 * Internal cross-function jump.
1666 */
1667
1668 if (is_first_func_insn(file, dest_insn)) {
1669 /* Internal sibling call */
1670 if (add_call_dest(file, insn, dest_sym, true))
1671 return -1;
1672 continue;
1673 }
1674
1675 set_jump_dest:
1676 insn->jump_dest = dest_insn;
1677 }
1678
1679 return 0;
1680 }
1681
find_call_destination(struct section * sec,unsigned long offset)1682 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1683 {
1684 struct symbol *call_dest;
1685
1686 call_dest = find_func_by_offset(sec, offset);
1687 if (!call_dest)
1688 call_dest = find_symbol_by_offset(sec, offset);
1689
1690 return call_dest;
1691 }
1692
1693 /*
1694 * Find the destination instructions for all calls.
1695 */
add_call_destinations(struct objtool_file * file)1696 static int add_call_destinations(struct objtool_file *file)
1697 {
1698 struct instruction *insn;
1699 unsigned long dest_off;
1700 struct symbol *dest;
1701 struct reloc *reloc;
1702
1703 for_each_insn(file, insn) {
1704 struct symbol *func = insn_func(insn);
1705 if (insn->type != INSN_CALL)
1706 continue;
1707
1708 reloc = insn_reloc(file, insn);
1709 if (!reloc) {
1710 dest_off = arch_jump_destination(insn);
1711 dest = find_call_destination(insn->sec, dest_off);
1712
1713 if (add_call_dest(file, insn, dest, false))
1714 return -1;
1715
1716 if (func && func->ignore)
1717 continue;
1718
1719 if (!insn_call_dest(insn)) {
1720 ERROR_INSN(insn, "unannotated intra-function call");
1721 return -1;
1722 }
1723
1724 if (func && !is_func_sym(insn_call_dest(insn))) {
1725 ERROR_INSN(insn, "unsupported call to non-function");
1726 return -1;
1727 }
1728
1729 } else if (is_sec_sym(reloc->sym)) {
1730 dest_off = arch_insn_adjusted_addend(insn, reloc);
1731 dest = find_call_destination(reloc->sym->sec, dest_off);
1732 if (!dest) {
1733 ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1734 reloc->sym->sec->name, dest_off);
1735 return -1;
1736 }
1737
1738 if (add_call_dest(file, insn, dest, false))
1739 return -1;
1740
1741 } else if (reloc->sym->retpoline_thunk) {
1742 if (add_retpoline_call(file, insn))
1743 return -1;
1744
1745 } else {
1746 if (add_call_dest(file, insn, reloc->sym, false))
1747 return -1;
1748 }
1749 }
1750
1751 return 0;
1752 }
1753
1754 /*
1755 * The .alternatives section requires some extra special care over and above
1756 * other special sections because alternatives are patched in place.
1757 */
handle_group_alt(struct objtool_file * file,struct special_alt * special_alt,struct instruction * orig_insn,struct instruction ** new_insn)1758 static int handle_group_alt(struct objtool_file *file,
1759 struct special_alt *special_alt,
1760 struct instruction *orig_insn,
1761 struct instruction **new_insn)
1762 {
1763 struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1764 struct alt_group *orig_alt_group, *new_alt_group;
1765 unsigned long dest_off;
1766
1767 orig_alt_group = orig_insn->alt_group;
1768 if (!orig_alt_group) {
1769 struct instruction *last_orig_insn = NULL;
1770
1771 orig_alt_group = calloc(1, sizeof(*orig_alt_group));
1772 if (!orig_alt_group) {
1773 ERROR_GLIBC("calloc");
1774 return -1;
1775 }
1776 orig_alt_group->cfi = calloc(special_alt->orig_len,
1777 sizeof(struct cfi_state *));
1778 if (!orig_alt_group->cfi) {
1779 ERROR_GLIBC("calloc");
1780 return -1;
1781 }
1782
1783 insn = orig_insn;
1784 sec_for_each_insn_from(file, insn) {
1785 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1786 break;
1787
1788 insn->alt_group = orig_alt_group;
1789 last_orig_insn = insn;
1790 }
1791 orig_alt_group->orig_group = NULL;
1792 orig_alt_group->first_insn = orig_insn;
1793 orig_alt_group->last_insn = last_orig_insn;
1794 orig_alt_group->nop = NULL;
1795 orig_alt_group->ignore = orig_insn->ignore_alts;
1796 orig_alt_group->feature = 0;
1797 } else {
1798 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1799 orig_alt_group->first_insn->offset != special_alt->orig_len) {
1800 ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1801 orig_alt_group->last_insn->offset +
1802 orig_alt_group->last_insn->len -
1803 orig_alt_group->first_insn->offset,
1804 special_alt->orig_len);
1805 return -1;
1806 }
1807 }
1808
1809 new_alt_group = calloc(1, sizeof(*new_alt_group));
1810 if (!new_alt_group) {
1811 ERROR_GLIBC("calloc");
1812 return -1;
1813 }
1814
1815 if (special_alt->new_len < special_alt->orig_len) {
1816 /*
1817 * Insert a fake nop at the end to make the replacement
1818 * alt_group the same size as the original. This is needed to
1819 * allow propagate_alt_cfi() to do its magic. When the last
1820 * instruction affects the stack, the instruction after it (the
1821 * nop) will propagate the new state to the shared CFI array.
1822 */
1823 nop = calloc(1, sizeof(*nop));
1824 if (!nop) {
1825 ERROR_GLIBC("calloc");
1826 return -1;
1827 }
1828 memset(nop, 0, sizeof(*nop));
1829
1830 nop->sec = special_alt->new_sec;
1831 nop->offset = special_alt->new_off + special_alt->new_len;
1832 nop->len = special_alt->orig_len - special_alt->new_len;
1833 nop->type = INSN_NOP;
1834 nop->sym = orig_insn->sym;
1835 nop->alt_group = new_alt_group;
1836 nop->fake = 1;
1837 }
1838
1839 if (!special_alt->new_len) {
1840 *new_insn = nop;
1841 goto end;
1842 }
1843
1844 insn = *new_insn;
1845 sec_for_each_insn_from(file, insn) {
1846 struct reloc *alt_reloc;
1847
1848 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1849 break;
1850
1851 last_new_insn = insn;
1852
1853 insn->sym = orig_insn->sym;
1854 insn->alt_group = new_alt_group;
1855
1856 /*
1857 * Since alternative replacement code is copy/pasted by the
1858 * kernel after applying relocations, generally such code can't
1859 * have relative-address relocation references to outside the
1860 * .altinstr_replacement section, unless the arch's
1861 * alternatives code can adjust the relative offsets
1862 * accordingly.
1863 */
1864 alt_reloc = insn_reloc(file, insn);
1865 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1866 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1867
1868 ERROR_INSN(insn, "unsupported relocation in alternatives section");
1869 return -1;
1870 }
1871
1872 if (!is_static_jump(insn))
1873 continue;
1874
1875 if (!insn->immediate)
1876 continue;
1877
1878 dest_off = arch_jump_destination(insn);
1879 if (dest_off == special_alt->new_off + special_alt->new_len) {
1880 insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1881 if (!insn->jump_dest) {
1882 ERROR_INSN(insn, "can't find alternative jump destination");
1883 return -1;
1884 }
1885 }
1886 }
1887
1888 if (!last_new_insn) {
1889 ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1890 "can't find last new alternative instruction");
1891 return -1;
1892 }
1893
1894 end:
1895 new_alt_group->orig_group = orig_alt_group;
1896 new_alt_group->first_insn = *new_insn;
1897 new_alt_group->last_insn = last_new_insn;
1898 new_alt_group->nop = nop;
1899 new_alt_group->ignore = (*new_insn)->ignore_alts;
1900 new_alt_group->cfi = orig_alt_group->cfi;
1901 new_alt_group->feature = special_alt->feature;
1902 return 0;
1903 }
1904
1905 /*
1906 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1907 * If the original instruction is a jump, make the alt entry an effective nop
1908 * by just skipping the original instruction.
1909 */
handle_jump_alt(struct objtool_file * file,struct special_alt * special_alt,struct instruction * orig_insn,struct instruction ** new_insn)1910 static int handle_jump_alt(struct objtool_file *file,
1911 struct special_alt *special_alt,
1912 struct instruction *orig_insn,
1913 struct instruction **new_insn)
1914 {
1915 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1916 orig_insn->type != INSN_NOP) {
1917
1918 ERROR_INSN(orig_insn, "unsupported instruction at jump label");
1919 return -1;
1920 }
1921
1922 if (opts.hack_jump_label && special_alt->key_addend & 2) {
1923 struct reloc *reloc = insn_reloc(file, orig_insn);
1924
1925 if (reloc)
1926 set_reloc_type(file->elf, reloc, R_NONE);
1927
1928 if (elf_write_insn(file->elf, orig_insn->sec,
1929 orig_insn->offset, orig_insn->len,
1930 arch_nop_insn(orig_insn->len))) {
1931 return -1;
1932 }
1933
1934 orig_insn->type = INSN_NOP;
1935 }
1936
1937 if (orig_insn->type == INSN_NOP) {
1938 if (orig_insn->len == 2)
1939 file->jl_nop_short++;
1940 else
1941 file->jl_nop_long++;
1942
1943 return 0;
1944 }
1945
1946 if (orig_insn->len == 2)
1947 file->jl_short++;
1948 else
1949 file->jl_long++;
1950
1951 *new_insn = next_insn_same_sec(file, orig_insn);
1952 return 0;
1953 }
1954
1955 /*
1956 * Read all the special sections which have alternate instructions which can be
1957 * patched in or redirected to at runtime. Each instruction having alternate
1958 * instruction(s) has them added to its insn->alts list, which will be
1959 * traversed in validate_branch().
1960 */
add_special_section_alts(struct objtool_file * file)1961 static int add_special_section_alts(struct objtool_file *file)
1962 {
1963 struct list_head special_alts;
1964 struct instruction *orig_insn, *new_insn;
1965 struct special_alt *special_alt, *tmp;
1966 enum alternative_type alt_type;
1967 struct alternative *alt;
1968 struct alternative *a;
1969
1970 if (special_get_alts(file->elf, &special_alts))
1971 return -1;
1972
1973 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1974
1975 orig_insn = find_insn(file, special_alt->orig_sec,
1976 special_alt->orig_off);
1977 if (!orig_insn) {
1978 ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off,
1979 "special: can't find orig instruction");
1980 return -1;
1981 }
1982
1983 new_insn = NULL;
1984 if (!special_alt->group || special_alt->new_len) {
1985 new_insn = find_insn(file, special_alt->new_sec,
1986 special_alt->new_off);
1987 if (!new_insn) {
1988 ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1989 "special: can't find new instruction");
1990 return -1;
1991 }
1992 }
1993
1994 if (special_alt->group) {
1995 if (!special_alt->orig_len) {
1996 ERROR_INSN(orig_insn, "empty alternative entry");
1997 continue;
1998 }
1999
2000 if (handle_group_alt(file, special_alt, orig_insn, &new_insn))
2001 return -1;
2002
2003 alt_type = ALT_TYPE_INSTRUCTIONS;
2004
2005 } else if (special_alt->jump_or_nop) {
2006 if (handle_jump_alt(file, special_alt, orig_insn, &new_insn))
2007 return -1;
2008
2009 alt_type = ALT_TYPE_JUMP_TABLE;
2010 } else {
2011 alt_type = ALT_TYPE_EX_TABLE;
2012 }
2013
2014 alt = calloc(1, sizeof(*alt));
2015 if (!alt) {
2016 ERROR_GLIBC("calloc");
2017 return -1;
2018 }
2019
2020 alt->insn = new_insn;
2021 alt->type = alt_type;
2022 alt->next = NULL;
2023
2024 /*
2025 * Store alternatives in the same order they have been
2026 * defined.
2027 */
2028 if (!orig_insn->alts) {
2029 orig_insn->alts = alt;
2030 } else {
2031 for (a = orig_insn->alts; a->next; a = a->next)
2032 ;
2033 a->next = alt;
2034 }
2035
2036 list_del(&special_alt->list);
2037 free(special_alt);
2038 }
2039
2040 if (opts.stats) {
2041 printf("jl\\\tNOP\tJMP\n");
2042 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
2043 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
2044 }
2045
2046 return 0;
2047 }
2048
arch_jump_table_sym_offset(struct reloc * reloc,struct reloc * table)2049 __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
2050 {
2051 return reloc->sym->offset + reloc_addend(reloc);
2052 }
2053
add_jump_table(struct objtool_file * file,struct instruction * insn)2054 static int add_jump_table(struct objtool_file *file, struct instruction *insn)
2055 {
2056 unsigned long table_size = insn_jump_table_size(insn);
2057 struct symbol *pfunc = insn_func(insn)->pfunc;
2058 struct reloc *table = insn_jump_table(insn);
2059 struct instruction *dest_insn;
2060 unsigned int prev_offset = 0;
2061 struct reloc *reloc = table;
2062 struct alternative *alt;
2063 unsigned long sym_offset;
2064
2065 /*
2066 * Each @reloc is a switch table relocation which points to the target
2067 * instruction.
2068 */
2069 for_each_reloc_from(table->sec, reloc) {
2070
2071 /* Check for the end of the table: */
2072 if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
2073 break;
2074 if (reloc != table && is_jump_table(reloc))
2075 break;
2076
2077 /* Make sure the table entries are consecutive: */
2078 if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc))
2079 break;
2080
2081 sym_offset = arch_jump_table_sym_offset(reloc, table);
2082
2083 /* Detect function pointers from contiguous objects: */
2084 if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset)
2085 break;
2086
2087 /*
2088 * Clang sometimes leaves dangling unused jump table entries
2089 * which point to the end of the function. Ignore them.
2090 */
2091 if (reloc->sym->sec == pfunc->sec &&
2092 sym_offset == pfunc->offset + pfunc->len)
2093 goto next;
2094
2095 dest_insn = find_insn(file, reloc->sym->sec, sym_offset);
2096 if (!dest_insn)
2097 break;
2098
2099 /* Make sure the destination is in the same function: */
2100 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2101 break;
2102
2103 alt = calloc(1, sizeof(*alt));
2104 if (!alt) {
2105 ERROR_GLIBC("calloc");
2106 return -1;
2107 }
2108
2109 alt->insn = dest_insn;
2110 alt->next = insn->alts;
2111 insn->alts = alt;
2112 next:
2113 prev_offset = reloc_offset(reloc);
2114 }
2115
2116 if (!prev_offset) {
2117 ERROR_INSN(insn, "can't find switch jump table");
2118 return -1;
2119 }
2120
2121 return 0;
2122 }
2123
2124 /*
2125 * find_jump_table() - Given a dynamic jump, find the switch jump table
2126 * associated with it.
2127 */
find_jump_table(struct objtool_file * file,struct symbol * func,struct instruction * insn)2128 static void find_jump_table(struct objtool_file *file, struct symbol *func,
2129 struct instruction *insn)
2130 {
2131 struct reloc *table_reloc;
2132 struct instruction *dest_insn, *orig_insn = insn;
2133 unsigned long table_size;
2134 unsigned long sym_offset;
2135
2136 /*
2137 * Backward search using the @first_jump_src links, these help avoid
2138 * much of the 'in between' code. Which avoids us getting confused by
2139 * it.
2140 */
2141 for (;
2142 insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2143 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2144
2145 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2146 break;
2147
2148 /* allow small jumps within the range */
2149 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2150 insn->jump_dest &&
2151 (insn->jump_dest->offset <= insn->offset ||
2152 insn->jump_dest->offset > orig_insn->offset))
2153 break;
2154
2155 table_reloc = arch_find_switch_table(file, insn, &table_size);
2156 if (!table_reloc)
2157 continue;
2158
2159 sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc);
2160
2161 dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset);
2162 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2163 continue;
2164
2165 set_jump_table(table_reloc);
2166 orig_insn->_jump_table = table_reloc;
2167 orig_insn->_jump_table_size = table_size;
2168
2169 break;
2170 }
2171 }
2172
2173 /*
2174 * First pass: Mark the head of each jump table so that in the next pass,
2175 * we know when a given jump table ends and the next one starts.
2176 */
mark_func_jump_tables(struct objtool_file * file,struct symbol * func)2177 static void mark_func_jump_tables(struct objtool_file *file,
2178 struct symbol *func)
2179 {
2180 struct instruction *insn, *last = NULL;
2181
2182 func_for_each_insn(file, func, insn) {
2183 if (!last)
2184 last = insn;
2185
2186 /*
2187 * Store back-pointers for unconditional forward jumps such
2188 * that find_jump_table() can back-track using those and
2189 * avoid some potentially confusing code.
2190 */
2191 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2192 insn->offset > last->offset &&
2193 insn->jump_dest->offset > insn->offset &&
2194 !insn->jump_dest->first_jump_src) {
2195
2196 insn->jump_dest->first_jump_src = insn;
2197 last = insn->jump_dest;
2198 }
2199
2200 if (insn->type != INSN_JUMP_DYNAMIC)
2201 continue;
2202
2203 find_jump_table(file, func, insn);
2204 }
2205 }
2206
add_func_jump_tables(struct objtool_file * file,struct symbol * func)2207 static int add_func_jump_tables(struct objtool_file *file,
2208 struct symbol *func)
2209 {
2210 struct instruction *insn;
2211
2212 func_for_each_insn(file, func, insn) {
2213 if (!insn_jump_table(insn))
2214 continue;
2215
2216 if (add_jump_table(file, insn))
2217 return -1;
2218 }
2219
2220 return 0;
2221 }
2222
2223 /*
2224 * For some switch statements, gcc generates a jump table in the .rodata
2225 * section which contains a list of addresses within the function to jump to.
2226 * This finds these jump tables and adds them to the insn->alts lists.
2227 */
add_jump_table_alts(struct objtool_file * file)2228 static int add_jump_table_alts(struct objtool_file *file)
2229 {
2230 struct symbol *func;
2231
2232 if (!file->rodata)
2233 return 0;
2234
2235 for_each_sym(file->elf, func) {
2236 if (!is_func_sym(func) || func->alias != func)
2237 continue;
2238
2239 mark_func_jump_tables(file, func);
2240 if (add_func_jump_tables(file, func))
2241 return -1;
2242 }
2243
2244 return 0;
2245 }
2246
set_func_state(struct cfi_state * state)2247 static void set_func_state(struct cfi_state *state)
2248 {
2249 state->cfa = initial_func_cfi.cfa;
2250 memcpy(&state->regs, &initial_func_cfi.regs,
2251 CFI_NUM_REGS * sizeof(struct cfi_reg));
2252 state->stack_size = initial_func_cfi.cfa.offset;
2253 state->type = UNWIND_HINT_TYPE_CALL;
2254 }
2255
read_unwind_hints(struct objtool_file * file)2256 static int read_unwind_hints(struct objtool_file *file)
2257 {
2258 struct cfi_state cfi = init_cfi;
2259 struct section *sec;
2260 struct unwind_hint *hint;
2261 struct instruction *insn;
2262 struct reloc *reloc;
2263 unsigned long offset;
2264 int i;
2265
2266 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2267 if (!sec)
2268 return 0;
2269
2270 if (!sec->rsec) {
2271 ERROR("missing .rela.discard.unwind_hints section");
2272 return -1;
2273 }
2274
2275 if (sec_size(sec) % sizeof(struct unwind_hint)) {
2276 ERROR("struct unwind_hint size mismatch");
2277 return -1;
2278 }
2279
2280 file->hints = true;
2281
2282 for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) {
2283 hint = (struct unwind_hint *)sec->data->d_buf + i;
2284
2285 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2286 if (!reloc) {
2287 ERROR("can't find reloc for unwind_hints[%d]", i);
2288 return -1;
2289 }
2290
2291 offset = reloc->sym->offset + reloc_addend(reloc);
2292
2293 insn = find_insn(file, reloc->sym->sec, offset);
2294 if (!insn) {
2295 ERROR("can't find insn for unwind_hints[%d]", i);
2296 return -1;
2297 }
2298
2299 insn->hint = true;
2300
2301 if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) {
2302 insn->cfi = &force_undefined_cfi;
2303 continue;
2304 }
2305
2306 if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2307 insn->hint = false;
2308 insn->save = true;
2309 continue;
2310 }
2311
2312 if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2313 insn->restore = true;
2314 continue;
2315 }
2316
2317 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2318 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2319
2320 if (sym && is_global_sym(sym)) {
2321 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2322 ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2323 return -1;
2324 }
2325 }
2326 }
2327
2328 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2329 insn->cfi = &func_cfi;
2330 continue;
2331 }
2332
2333 if (insn->cfi)
2334 cfi = *(insn->cfi);
2335
2336 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2337 ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2338 return -1;
2339 }
2340
2341 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2342 cfi.type = hint->type;
2343 cfi.signal = hint->signal;
2344
2345 insn->cfi = cfi_hash_find_or_add(&cfi);
2346 }
2347
2348 return 0;
2349 }
2350
read_annotate(struct objtool_file * file,int (* func)(struct objtool_file * file,int type,struct instruction * insn))2351 static int read_annotate(struct objtool_file *file,
2352 int (*func)(struct objtool_file *file, int type, struct instruction *insn))
2353 {
2354 struct section *sec;
2355 struct instruction *insn;
2356 struct reloc *reloc;
2357 uint64_t offset;
2358 int type;
2359
2360 sec = find_section_by_name(file->elf, ".discard.annotate_insn");
2361 if (!sec)
2362 return 0;
2363
2364 if (!sec->rsec)
2365 return 0;
2366
2367 if (sec->sh.sh_entsize != 8) {
2368 static bool warned = false;
2369 if (!warned && opts.verbose) {
2370 WARN("%s: dodgy linker, sh_entsize != 8", sec->name);
2371 warned = true;
2372 }
2373 sec->sh.sh_entsize = 8;
2374 }
2375
2376 if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) {
2377 ERROR("bad .discard.annotate_insn section: missing relocs");
2378 return -1;
2379 }
2380
2381 for_each_reloc(sec->rsec, reloc) {
2382 type = annotype(file->elf, sec, reloc);
2383 offset = reloc->sym->offset + reloc_addend(reloc);
2384 insn = find_insn(file, reloc->sym->sec, offset);
2385
2386 if (!insn) {
2387 ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
2388 return -1;
2389 }
2390
2391 if (func(file, type, insn))
2392 return -1;
2393 }
2394
2395 return 0;
2396 }
2397
__annotate_early(struct objtool_file * file,int type,struct instruction * insn)2398 static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn)
2399 {
2400 switch (type) {
2401
2402 /* Must be before add_special_section_alts() */
2403 case ANNOTYPE_IGNORE_ALTS:
2404 insn->ignore_alts = true;
2405 break;
2406
2407 /*
2408 * Must be before read_unwind_hints() since that needs insn->noendbr.
2409 */
2410 case ANNOTYPE_NOENDBR:
2411 insn->noendbr = 1;
2412 break;
2413
2414 default:
2415 break;
2416 }
2417
2418 return 0;
2419 }
2420
__annotate_ifc(struct objtool_file * file,int type,struct instruction * insn)2421 static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn)
2422 {
2423 unsigned long dest_off;
2424
2425 if (type != ANNOTYPE_INTRA_FUNCTION_CALL)
2426 return 0;
2427
2428 if (insn->type != INSN_CALL) {
2429 ERROR_INSN(insn, "intra_function_call not a direct call");
2430 return -1;
2431 }
2432
2433 /*
2434 * Treat intra-function CALLs as JMPs, but with a stack_op.
2435 * See add_call_destinations(), which strips stack_ops from
2436 * normal CALLs.
2437 */
2438 insn->type = INSN_JUMP_UNCONDITIONAL;
2439
2440 dest_off = arch_jump_destination(insn);
2441 insn->jump_dest = find_insn(file, insn->sec, dest_off);
2442 if (!insn->jump_dest) {
2443 ERROR_INSN(insn, "can't find call dest at %s+0x%lx",
2444 insn->sec->name, dest_off);
2445 return -1;
2446 }
2447
2448 return 0;
2449 }
2450
__annotate_late(struct objtool_file * file,int type,struct instruction * insn)2451 static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn)
2452 {
2453 struct symbol *sym;
2454
2455 switch (type) {
2456 case ANNOTYPE_NOENDBR:
2457 /* early */
2458 break;
2459
2460 case ANNOTYPE_RETPOLINE_SAFE:
2461 if (insn->type != INSN_JUMP_DYNAMIC &&
2462 insn->type != INSN_CALL_DYNAMIC &&
2463 insn->type != INSN_RETURN &&
2464 insn->type != INSN_NOP) {
2465 ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2466 return -1;
2467 }
2468
2469 insn->retpoline_safe = true;
2470 break;
2471
2472 case ANNOTYPE_INSTR_BEGIN:
2473 insn->instr++;
2474 break;
2475
2476 case ANNOTYPE_INSTR_END:
2477 insn->instr--;
2478 break;
2479
2480 case ANNOTYPE_UNRET_BEGIN:
2481 insn->unret = 1;
2482 break;
2483
2484 case ANNOTYPE_IGNORE_ALTS:
2485 /* early */
2486 break;
2487
2488 case ANNOTYPE_INTRA_FUNCTION_CALL:
2489 /* ifc */
2490 break;
2491
2492 case ANNOTYPE_REACHABLE:
2493 insn->dead_end = false;
2494 break;
2495
2496 case ANNOTYPE_NOCFI:
2497 sym = insn->sym;
2498 if (!sym) {
2499 ERROR_INSN(insn, "dodgy NOCFI annotation");
2500 return -1;
2501 }
2502 insn->sym->nocfi = 1;
2503 break;
2504
2505 default:
2506 ERROR_INSN(insn, "Unknown annotation type: %d", type);
2507 return -1;
2508 }
2509
2510 return 0;
2511 }
2512
2513 /*
2514 * Return true if name matches an instrumentation function, where calls to that
2515 * function from noinstr code can safely be removed, but compilers won't do so.
2516 */
is_profiling_func(const char * name)2517 static bool is_profiling_func(const char *name)
2518 {
2519 /*
2520 * Many compilers cannot disable KCOV with a function attribute.
2521 */
2522 if (!strncmp(name, "__sanitizer_cov_", 16))
2523 return true;
2524
2525 return false;
2526 }
2527
classify_symbols(struct objtool_file * file)2528 static int classify_symbols(struct objtool_file *file)
2529 {
2530 struct symbol *func;
2531 size_t len;
2532
2533 for_each_sym(file->elf, func) {
2534 if (is_notype_sym(func) && strstarts(func->name, ".L"))
2535 func->local_label = true;
2536
2537 if (!is_global_sym(func))
2538 continue;
2539
2540 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2541 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2542 func->static_call_tramp = true;
2543
2544 if (arch_is_retpoline(func))
2545 func->retpoline_thunk = true;
2546
2547 if (arch_is_rethunk(func))
2548 func->return_thunk = true;
2549
2550 if (arch_is_embedded_insn(func))
2551 func->embedded_insn = true;
2552
2553 if (arch_ftrace_match(func->name))
2554 func->fentry = true;
2555
2556 if (is_profiling_func(func->name))
2557 func->profiling_func = true;
2558
2559 len = strlen(func->name);
2560 if (len > sym_name_max_len)
2561 sym_name_max_len = len;
2562 }
2563
2564 return 0;
2565 }
2566
mark_rodata(struct objtool_file * file)2567 static void mark_rodata(struct objtool_file *file)
2568 {
2569 struct section *sec;
2570 bool found = false;
2571
2572 /*
2573 * Search for the following rodata sections, each of which can
2574 * potentially contain jump tables:
2575 *
2576 * - .rodata: can contain GCC switch tables
2577 * - .rodata.<func>: same, if -fdata-sections is being used
2578 * - .data.rel.ro.c_jump_table: contains C annotated jump tables
2579 *
2580 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2581 */
2582 for_each_sec(file->elf, sec) {
2583 if ((!strncmp(sec->name, ".rodata", 7) &&
2584 !strstr(sec->name, ".str1.")) ||
2585 !strncmp(sec->name, ".data.rel.ro", 12)) {
2586 sec->rodata = true;
2587 found = true;
2588 }
2589 }
2590
2591 file->rodata = found;
2592 }
2593
mark_holes(struct objtool_file * file)2594 static void mark_holes(struct objtool_file *file)
2595 {
2596 struct instruction *insn;
2597 bool in_hole = false;
2598
2599 if (!opts.link)
2600 return;
2601
2602 /*
2603 * Whole archive runs might encounter dead code from weak symbols.
2604 * This is where the linker will have dropped the weak symbol in
2605 * favour of a regular symbol, but leaves the code in place.
2606 */
2607 for_each_insn(file, insn) {
2608 if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) {
2609 in_hole = false;
2610 continue;
2611 }
2612
2613 /* Skip function padding and pfx code */
2614 if (!in_hole && insn->type == INSN_NOP)
2615 continue;
2616
2617 in_hole = true;
2618 insn->hole = 1;
2619
2620 /*
2621 * If this hole jumps to a .cold function, mark it ignore.
2622 */
2623 if (insn->jump_dest) {
2624 struct symbol *dest_func = insn_func(insn->jump_dest);
2625
2626 if (dest_func && dest_func->cold)
2627 dest_func->ignore = true;
2628 }
2629 }
2630 }
2631
validate_branch_enabled(void)2632 static bool validate_branch_enabled(void)
2633 {
2634 return opts.stackval ||
2635 opts.orc ||
2636 opts.uaccess ||
2637 opts.checksum;
2638 }
2639
decode_sections(struct objtool_file * file)2640 static int decode_sections(struct objtool_file *file)
2641 {
2642 file->klp = is_livepatch_module(file);
2643
2644 mark_rodata(file);
2645
2646 if (init_pv_ops(file))
2647 return -1;
2648
2649 /*
2650 * Must be before add_{jump_call}_destination.
2651 */
2652 if (classify_symbols(file))
2653 return -1;
2654
2655 if (decode_instructions(file))
2656 return -1;
2657
2658 if (add_ignores(file))
2659 return -1;
2660
2661 add_uaccess_safe(file);
2662
2663 if (read_annotate(file, __annotate_early))
2664 return -1;
2665
2666 /*
2667 * Must be before add_jump_destinations(), which depends on 'func'
2668 * being set for alternatives, to enable proper sibling call detection.
2669 */
2670 if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label || opts.disas) {
2671 if (add_special_section_alts(file))
2672 return -1;
2673 }
2674
2675 if (add_jump_destinations(file))
2676 return -1;
2677
2678 /*
2679 * Must be before add_call_destination(); it changes INSN_CALL to
2680 * INSN_JUMP.
2681 */
2682 if (read_annotate(file, __annotate_ifc))
2683 return -1;
2684
2685 if (add_call_destinations(file))
2686 return -1;
2687
2688 if (add_jump_table_alts(file))
2689 return -1;
2690
2691 if (read_unwind_hints(file))
2692 return -1;
2693
2694 /* Must be after add_jump_destinations() */
2695 mark_holes(file);
2696
2697 /*
2698 * Must be after add_call_destinations() such that it can override
2699 * dead_end_function() marks.
2700 */
2701 if (read_annotate(file, __annotate_late))
2702 return -1;
2703
2704 return 0;
2705 }
2706
is_special_call(struct instruction * insn)2707 static bool is_special_call(struct instruction *insn)
2708 {
2709 if (insn->type == INSN_CALL) {
2710 struct symbol *dest = insn_call_dest(insn);
2711
2712 if (!dest)
2713 return false;
2714
2715 if (dest->fentry || dest->embedded_insn)
2716 return true;
2717 }
2718
2719 return false;
2720 }
2721
has_modified_stack_frame(struct instruction * insn,struct insn_state * state)2722 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2723 {
2724 struct cfi_state *cfi = &state->cfi;
2725 int i;
2726
2727 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2728 return true;
2729
2730 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2731 return true;
2732
2733 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2734 return true;
2735
2736 for (i = 0; i < CFI_NUM_REGS; i++) {
2737 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2738 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2739 return true;
2740 }
2741
2742 return false;
2743 }
2744
check_reg_frame_pos(const struct cfi_reg * reg,int expected_offset)2745 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2746 int expected_offset)
2747 {
2748 return reg->base == CFI_CFA &&
2749 reg->offset == expected_offset;
2750 }
2751
has_valid_stack_frame(struct insn_state * state)2752 static bool has_valid_stack_frame(struct insn_state *state)
2753 {
2754 struct cfi_state *cfi = &state->cfi;
2755
2756 if (cfi->cfa.base == CFI_BP &&
2757 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2758 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2759 return true;
2760
2761 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2762 return true;
2763
2764 return false;
2765 }
2766
update_cfi_state_regs(struct instruction * insn,struct cfi_state * cfi,struct stack_op * op)2767 static int update_cfi_state_regs(struct instruction *insn,
2768 struct cfi_state *cfi,
2769 struct stack_op *op)
2770 {
2771 struct cfi_reg *cfa = &cfi->cfa;
2772
2773 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2774 return 0;
2775
2776 /* push */
2777 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2778 cfa->offset += 8;
2779
2780 /* pop */
2781 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2782 cfa->offset -= 8;
2783
2784 /* add immediate to sp */
2785 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2786 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2787 cfa->offset -= op->src.offset;
2788
2789 return 0;
2790 }
2791
save_reg(struct cfi_state * cfi,unsigned char reg,int base,int offset)2792 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2793 {
2794 if (arch_callee_saved_reg(reg) &&
2795 cfi->regs[reg].base == CFI_UNDEFINED) {
2796 cfi->regs[reg].base = base;
2797 cfi->regs[reg].offset = offset;
2798 }
2799 }
2800
restore_reg(struct cfi_state * cfi,unsigned char reg)2801 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2802 {
2803 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2804 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2805 }
2806
2807 /*
2808 * A note about DRAP stack alignment:
2809 *
2810 * GCC has the concept of a DRAP register, which is used to help keep track of
2811 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2812 * register. The typical DRAP pattern is:
2813 *
2814 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2815 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2816 * 41 ff 72 f8 pushq -0x8(%r10)
2817 * 55 push %rbp
2818 * 48 89 e5 mov %rsp,%rbp
2819 * (more pushes)
2820 * 41 52 push %r10
2821 * ...
2822 * 41 5a pop %r10
2823 * (more pops)
2824 * 5d pop %rbp
2825 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2826 * c3 retq
2827 *
2828 * There are some variations in the epilogues, like:
2829 *
2830 * 5b pop %rbx
2831 * 41 5a pop %r10
2832 * 41 5c pop %r12
2833 * 41 5d pop %r13
2834 * 41 5e pop %r14
2835 * c9 leaveq
2836 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2837 * c3 retq
2838 *
2839 * and:
2840 *
2841 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2842 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2843 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2844 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2845 * c9 leaveq
2846 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2847 * c3 retq
2848 *
2849 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2850 * restored beforehand:
2851 *
2852 * 41 55 push %r13
2853 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2854 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2855 * ...
2856 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2857 * 41 5d pop %r13
2858 * c3 retq
2859 */
update_cfi_state(struct instruction * insn,struct instruction * next_insn,struct cfi_state * cfi,struct stack_op * op)2860 static int update_cfi_state(struct instruction *insn,
2861 struct instruction *next_insn,
2862 struct cfi_state *cfi, struct stack_op *op)
2863 {
2864 struct cfi_reg *cfa = &cfi->cfa;
2865 struct cfi_reg *regs = cfi->regs;
2866
2867 /* ignore UNWIND_HINT_UNDEFINED regions */
2868 if (cfi->force_undefined)
2869 return 0;
2870
2871 /* stack operations don't make sense with an undefined CFA */
2872 if (cfa->base == CFI_UNDEFINED) {
2873 if (insn_func(insn)) {
2874 WARN_INSN(insn, "undefined stack state");
2875 return 1;
2876 }
2877 return 0;
2878 }
2879
2880 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2881 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2882 return update_cfi_state_regs(insn, cfi, op);
2883
2884 switch (op->dest.type) {
2885
2886 case OP_DEST_REG:
2887 switch (op->src.type) {
2888
2889 case OP_SRC_REG:
2890 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2891 cfa->base == CFI_SP &&
2892 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) {
2893
2894 /* mov %rsp, %rbp */
2895 cfa->base = op->dest.reg;
2896 cfi->bp_scratch = false;
2897 }
2898
2899 else if (op->src.reg == CFI_SP &&
2900 op->dest.reg == CFI_BP && cfi->drap) {
2901
2902 /* drap: mov %rsp, %rbp */
2903 regs[CFI_BP].base = CFI_BP;
2904 regs[CFI_BP].offset = -cfi->stack_size;
2905 cfi->bp_scratch = false;
2906 }
2907
2908 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2909
2910 /*
2911 * mov %rsp, %reg
2912 *
2913 * This is needed for the rare case where GCC
2914 * does:
2915 *
2916 * mov %rsp, %rax
2917 * ...
2918 * mov %rax, %rsp
2919 */
2920 cfi->vals[op->dest.reg].base = CFI_CFA;
2921 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2922 }
2923
2924 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2925 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2926
2927 /*
2928 * mov %rbp, %rsp
2929 *
2930 * Restore the original stack pointer (Clang).
2931 */
2932 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2933 }
2934
2935 else if (op->dest.reg == cfa->base) {
2936
2937 /* mov %reg, %rsp */
2938 if (cfa->base == CFI_SP &&
2939 cfi->vals[op->src.reg].base == CFI_CFA) {
2940
2941 /*
2942 * This is needed for the rare case
2943 * where GCC does something dumb like:
2944 *
2945 * lea 0x8(%rsp), %rcx
2946 * ...
2947 * mov %rcx, %rsp
2948 */
2949 cfa->offset = -cfi->vals[op->src.reg].offset;
2950 cfi->stack_size = cfa->offset;
2951
2952 } else if (cfa->base == CFI_SP &&
2953 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2954 cfi->vals[op->src.reg].offset == cfa->offset) {
2955
2956 /*
2957 * Stack swizzle:
2958 *
2959 * 1: mov %rsp, (%[tos])
2960 * 2: mov %[tos], %rsp
2961 * ...
2962 * 3: pop %rsp
2963 *
2964 * Where:
2965 *
2966 * 1 - places a pointer to the previous
2967 * stack at the Top-of-Stack of the
2968 * new stack.
2969 *
2970 * 2 - switches to the new stack.
2971 *
2972 * 3 - pops the Top-of-Stack to restore
2973 * the original stack.
2974 *
2975 * Note: we set base to SP_INDIRECT
2976 * here and preserve offset. Therefore
2977 * when the unwinder reaches ToS it
2978 * will dereference SP and then add the
2979 * offset to find the next frame, IOW:
2980 * (%rsp) + offset.
2981 */
2982 cfa->base = CFI_SP_INDIRECT;
2983
2984 } else {
2985 cfa->base = CFI_UNDEFINED;
2986 cfa->offset = 0;
2987 }
2988 }
2989
2990 else if (op->dest.reg == CFI_SP &&
2991 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2992 cfi->vals[op->src.reg].offset == cfa->offset) {
2993
2994 /*
2995 * The same stack swizzle case 2) as above. But
2996 * because we can't change cfa->base, case 3)
2997 * will become a regular POP. Pretend we're a
2998 * PUSH so things don't go unbalanced.
2999 */
3000 cfi->stack_size += 8;
3001 }
3002
3003 else if (cfi->vals[op->src.reg].base == CFI_CFA) {
3004 /*
3005 * Clang RSP musical chairs:
3006 *
3007 * mov %rsp, %rdx [handled above]
3008 * ...
3009 * mov %rdx, %rbx [handled here]
3010 * ...
3011 * mov %rbx, %rsp [handled above]
3012 */
3013 cfi->vals[op->dest.reg].base = CFI_CFA;
3014 cfi->vals[op->dest.reg].offset = cfi->vals[op->src.reg].offset;
3015 }
3016
3017
3018 break;
3019
3020 case OP_SRC_ADD:
3021 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
3022
3023 /* add imm, %rsp */
3024 cfi->stack_size -= op->src.offset;
3025 if (cfa->base == CFI_SP)
3026 cfa->offset -= op->src.offset;
3027 break;
3028 }
3029
3030 if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
3031 insn->sym->frame_pointer) {
3032 /* addi.d fp,sp,imm on LoongArch */
3033 if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
3034 cfa->base = CFI_BP;
3035 cfa->offset = 0;
3036 }
3037 break;
3038 }
3039
3040 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
3041 /* addi.d sp,fp,imm on LoongArch */
3042 if (cfa->base == CFI_BP && cfa->offset == 0) {
3043 if (insn->sym->frame_pointer) {
3044 cfa->base = CFI_SP;
3045 cfa->offset = -op->src.offset;
3046 }
3047 } else {
3048 /* lea disp(%rbp), %rsp */
3049 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
3050 }
3051 break;
3052 }
3053
3054 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
3055
3056 /* drap: lea disp(%rsp), %drap */
3057 cfi->drap_reg = op->dest.reg;
3058
3059 /*
3060 * lea disp(%rsp), %reg
3061 *
3062 * This is needed for the rare case where GCC
3063 * does something dumb like:
3064 *
3065 * lea 0x8(%rsp), %rcx
3066 * ...
3067 * mov %rcx, %rsp
3068 */
3069 cfi->vals[op->dest.reg].base = CFI_CFA;
3070 cfi->vals[op->dest.reg].offset = \
3071 -cfi->stack_size + op->src.offset;
3072
3073 break;
3074 }
3075
3076 if (cfi->drap && op->dest.reg == CFI_SP &&
3077 op->src.reg == cfi->drap_reg) {
3078
3079 /* drap: lea disp(%drap), %rsp */
3080 cfa->base = CFI_SP;
3081 cfa->offset = cfi->stack_size = -op->src.offset;
3082 cfi->drap_reg = CFI_UNDEFINED;
3083 cfi->drap = false;
3084 break;
3085 }
3086
3087 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
3088 WARN_INSN(insn, "unsupported stack register modification");
3089 return -1;
3090 }
3091
3092 break;
3093
3094 case OP_SRC_AND:
3095 if (op->dest.reg != CFI_SP ||
3096 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
3097 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
3098 WARN_INSN(insn, "unsupported stack pointer realignment");
3099 return -1;
3100 }
3101
3102 if (cfi->drap_reg != CFI_UNDEFINED) {
3103 /* drap: and imm, %rsp */
3104 cfa->base = cfi->drap_reg;
3105 cfa->offset = cfi->stack_size = 0;
3106 cfi->drap = true;
3107 }
3108
3109 /*
3110 * Older versions of GCC (4.8ish) realign the stack
3111 * without DRAP, with a frame pointer.
3112 */
3113
3114 break;
3115
3116 case OP_SRC_POP:
3117 case OP_SRC_POPF:
3118 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3119
3120 /* pop %rsp; # restore from a stack swizzle */
3121 cfa->base = CFI_SP;
3122 break;
3123 }
3124
3125 if (!cfi->drap && op->dest.reg == cfa->base) {
3126
3127 /* pop %rbp */
3128 cfa->base = CFI_SP;
3129 }
3130
3131 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3132 op->dest.reg == cfi->drap_reg &&
3133 cfi->drap_offset == -cfi->stack_size) {
3134
3135 /* drap: pop %drap */
3136 cfa->base = cfi->drap_reg;
3137 cfa->offset = 0;
3138 cfi->drap_offset = -1;
3139
3140 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3141
3142 /* pop %reg */
3143 restore_reg(cfi, op->dest.reg);
3144 }
3145
3146 cfi->stack_size -= 8;
3147 if (cfa->base == CFI_SP)
3148 cfa->offset -= 8;
3149
3150 break;
3151
3152 case OP_SRC_REG_INDIRECT:
3153 if (!cfi->drap && op->dest.reg == cfa->base &&
3154 op->dest.reg == CFI_BP) {
3155
3156 /* mov disp(%rsp), %rbp */
3157 cfa->base = CFI_SP;
3158 cfa->offset = cfi->stack_size;
3159 }
3160
3161 if (cfi->drap && op->src.reg == CFI_BP &&
3162 op->src.offset == cfi->drap_offset) {
3163
3164 /* drap: mov disp(%rbp), %drap */
3165 cfa->base = cfi->drap_reg;
3166 cfa->offset = 0;
3167 cfi->drap_offset = -1;
3168 }
3169
3170 if (cfi->drap && op->src.reg == CFI_BP &&
3171 op->src.offset == regs[op->dest.reg].offset) {
3172
3173 /* drap: mov disp(%rbp), %reg */
3174 restore_reg(cfi, op->dest.reg);
3175
3176 } else if (op->src.reg == cfa->base &&
3177 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3178
3179 /* mov disp(%rbp), %reg */
3180 /* mov disp(%rsp), %reg */
3181 restore_reg(cfi, op->dest.reg);
3182
3183 } else if (op->src.reg == CFI_SP &&
3184 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3185
3186 /* mov disp(%rsp), %reg */
3187 restore_reg(cfi, op->dest.reg);
3188 }
3189
3190 break;
3191
3192 default:
3193 WARN_INSN(insn, "unknown stack-related instruction");
3194 return -1;
3195 }
3196
3197 break;
3198
3199 case OP_DEST_PUSH:
3200 case OP_DEST_PUSHF:
3201 cfi->stack_size += 8;
3202 if (cfa->base == CFI_SP)
3203 cfa->offset += 8;
3204
3205 if (op->src.type != OP_SRC_REG)
3206 break;
3207
3208 if (cfi->drap) {
3209 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3210
3211 /* drap: push %drap */
3212 cfa->base = CFI_BP_INDIRECT;
3213 cfa->offset = -cfi->stack_size;
3214
3215 /* save drap so we know when to restore it */
3216 cfi->drap_offset = -cfi->stack_size;
3217
3218 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3219
3220 /* drap: push %rbp */
3221 cfi->stack_size = 0;
3222
3223 } else {
3224
3225 /* drap: push %reg */
3226 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3227 }
3228
3229 } else {
3230
3231 /* push %reg */
3232 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3233 }
3234
3235 /* detect when asm code uses rbp as a scratch register */
3236 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3237 cfa->base != CFI_BP)
3238 cfi->bp_scratch = true;
3239 break;
3240
3241 case OP_DEST_REG_INDIRECT:
3242
3243 if (cfi->drap) {
3244 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3245
3246 /* drap: mov %drap, disp(%rbp) */
3247 cfa->base = CFI_BP_INDIRECT;
3248 cfa->offset = op->dest.offset;
3249
3250 /* save drap offset so we know when to restore it */
3251 cfi->drap_offset = op->dest.offset;
3252 } else {
3253
3254 /* drap: mov reg, disp(%rbp) */
3255 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3256 }
3257
3258 } else if (op->dest.reg == cfa->base) {
3259
3260 /* mov reg, disp(%rbp) */
3261 /* mov reg, disp(%rsp) */
3262 save_reg(cfi, op->src.reg, CFI_CFA,
3263 op->dest.offset - cfi->cfa.offset);
3264
3265 } else if (op->dest.reg == CFI_SP) {
3266
3267 /* mov reg, disp(%rsp) */
3268 save_reg(cfi, op->src.reg, CFI_CFA,
3269 op->dest.offset - cfi->stack_size);
3270
3271 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3272
3273 /* mov %rsp, (%reg); # setup a stack swizzle. */
3274 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3275 cfi->vals[op->dest.reg].offset = cfa->offset;
3276 }
3277
3278 break;
3279
3280 case OP_DEST_MEM:
3281 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3282 WARN_INSN(insn, "unknown stack-related memory operation");
3283 return -1;
3284 }
3285
3286 /* pop mem */
3287 cfi->stack_size -= 8;
3288 if (cfa->base == CFI_SP)
3289 cfa->offset -= 8;
3290
3291 break;
3292
3293 default:
3294 WARN_INSN(insn, "unknown stack-related instruction");
3295 return -1;
3296 }
3297
3298 return 0;
3299 }
3300
3301 /*
3302 * The stack layouts of alternatives instructions can sometimes diverge when
3303 * they have stack modifications. That's fine as long as the potential stack
3304 * layouts don't conflict at any given potential instruction boundary.
3305 *
3306 * Flatten the CFIs of the different alternative code streams (both original
3307 * and replacement) into a single shared CFI array which can be used to detect
3308 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3309 */
propagate_alt_cfi(struct objtool_file * file,struct instruction * insn)3310 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3311 {
3312 struct cfi_state **alt_cfi;
3313 int group_off;
3314
3315 if (!insn->alt_group)
3316 return 0;
3317
3318 if (!insn->cfi) {
3319 WARN("CFI missing");
3320 return -1;
3321 }
3322
3323 alt_cfi = insn->alt_group->cfi;
3324 group_off = insn->offset - insn->alt_group->first_insn->offset;
3325
3326 if (!alt_cfi[group_off]) {
3327 alt_cfi[group_off] = insn->cfi;
3328 } else {
3329 if (cficmp(alt_cfi[group_off], insn->cfi)) {
3330 struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3331 struct instruction *orig = orig_group->first_insn;
3332 WARN_INSN(orig, "stack layout conflict in alternatives: %s",
3333 offstr(insn->sec, insn->offset));
3334 return -1;
3335 }
3336 }
3337
3338 return 0;
3339 }
3340
handle_insn_ops(struct instruction * insn,struct instruction * next_insn,struct insn_state * state)3341 static int noinline handle_insn_ops(struct instruction *insn,
3342 struct instruction *next_insn,
3343 struct insn_state *state)
3344 {
3345 struct insn_state prev_state __maybe_unused = *state;
3346 struct stack_op *op;
3347 int ret = 0;
3348
3349 for (op = insn->stack_ops; op; op = op->next) {
3350
3351 ret = update_cfi_state(insn, next_insn, &state->cfi, op);
3352 if (ret)
3353 goto done;
3354
3355 if (!opts.uaccess || !insn->alt_group)
3356 continue;
3357
3358 if (op->dest.type == OP_DEST_PUSHF) {
3359 if (!state->uaccess_stack) {
3360 state->uaccess_stack = 1;
3361 } else if (state->uaccess_stack >> 31) {
3362 WARN_INSN(insn, "PUSHF stack exhausted");
3363 ret = 1;
3364 goto done;
3365 }
3366 state->uaccess_stack <<= 1;
3367 state->uaccess_stack |= state->uaccess;
3368 }
3369
3370 if (op->src.type == OP_SRC_POPF) {
3371 if (state->uaccess_stack) {
3372 state->uaccess = state->uaccess_stack & 1;
3373 state->uaccess_stack >>= 1;
3374 if (state->uaccess_stack == 1)
3375 state->uaccess_stack = 0;
3376 }
3377 }
3378 }
3379
3380 done:
3381 TRACE_INSN_STATE(insn, &prev_state, state);
3382
3383 return ret;
3384 }
3385
insn_cfi_match(struct instruction * insn,struct cfi_state * cfi2)3386 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3387 {
3388 struct cfi_state *cfi1 = insn->cfi;
3389 int i;
3390
3391 if (!cfi1) {
3392 WARN("CFI missing");
3393 return false;
3394 }
3395
3396 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3397
3398 WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3399 cfi1->cfa.base, cfi1->cfa.offset,
3400 cfi2->cfa.base, cfi2->cfa.offset);
3401 return false;
3402
3403 }
3404
3405 if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3406 for (i = 0; i < CFI_NUM_REGS; i++) {
3407
3408 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg)))
3409 continue;
3410
3411 WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3412 i, cfi1->regs[i].base, cfi1->regs[i].offset,
3413 i, cfi2->regs[i].base, cfi2->regs[i].offset);
3414 }
3415 return false;
3416 }
3417
3418 if (cfi1->type != cfi2->type) {
3419
3420 WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3421 cfi1->type, cfi2->type);
3422 return false;
3423 }
3424
3425 if (cfi1->drap != cfi2->drap ||
3426 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3427 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3428
3429 WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3430 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3431 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3432 return false;
3433 }
3434
3435 return true;
3436 }
3437
func_uaccess_safe(struct symbol * func)3438 static inline bool func_uaccess_safe(struct symbol *func)
3439 {
3440 if (func)
3441 return func->uaccess_safe;
3442
3443 return false;
3444 }
3445
call_dest_name(struct instruction * insn)3446 static inline const char *call_dest_name(struct instruction *insn)
3447 {
3448 static char pvname[19];
3449 struct reloc *reloc;
3450 int idx;
3451
3452 if (insn_call_dest(insn))
3453 return insn_call_dest(insn)->name;
3454
3455 reloc = insn_reloc(NULL, insn);
3456 if (reloc && !strcmp(reloc->sym->name, "pv_ops")) {
3457 idx = (reloc_addend(reloc) / sizeof(void *));
3458 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3459 return pvname;
3460 }
3461
3462 return "{dynamic}";
3463 }
3464
pv_call_dest(struct objtool_file * file,struct instruction * insn)3465 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3466 {
3467 struct symbol *target;
3468 struct reloc *reloc;
3469 int idx;
3470
3471 reloc = insn_reloc(file, insn);
3472 if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
3473 return false;
3474
3475 idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *);
3476
3477 if (file->pv_ops[idx].clean)
3478 return true;
3479
3480 file->pv_ops[idx].clean = true;
3481
3482 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3483 if (!target->sec->noinstr) {
3484 WARN("pv_ops[%d]: %s", idx, target->name);
3485 file->pv_ops[idx].clean = false;
3486 }
3487 }
3488
3489 return file->pv_ops[idx].clean;
3490 }
3491
noinstr_call_dest(struct objtool_file * file,struct instruction * insn,struct symbol * func)3492 static inline bool noinstr_call_dest(struct objtool_file *file,
3493 struct instruction *insn,
3494 struct symbol *func)
3495 {
3496 /*
3497 * We can't deal with indirect function calls at present;
3498 * assume they're instrumented.
3499 */
3500 if (!func) {
3501 if (file->pv_ops)
3502 return pv_call_dest(file, insn);
3503
3504 return false;
3505 }
3506
3507 /*
3508 * If the symbol is from a noinstr section; we good.
3509 */
3510 if (func->sec->noinstr)
3511 return true;
3512
3513 /*
3514 * If the symbol is a static_call trampoline, we can't tell.
3515 */
3516 if (func->static_call_tramp)
3517 return true;
3518
3519 /*
3520 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3521 * something 'BAD' happened. At the risk of taking the machine down,
3522 * let them proceed to get the message out.
3523 */
3524 if (!strncmp(func->name, "__ubsan_handle_", 15))
3525 return true;
3526
3527 return false;
3528 }
3529
validate_call(struct objtool_file * file,struct instruction * insn,struct insn_state * state)3530 static int validate_call(struct objtool_file *file,
3531 struct instruction *insn,
3532 struct insn_state *state)
3533 {
3534 if (state->noinstr && state->instr <= 0 &&
3535 !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3536 WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3537 return 1;
3538 }
3539
3540 if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3541 WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3542 return 1;
3543 }
3544
3545 if (state->df) {
3546 WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3547 return 1;
3548 }
3549
3550 return 0;
3551 }
3552
validate_sibling_call(struct objtool_file * file,struct instruction * insn,struct insn_state * state)3553 static int validate_sibling_call(struct objtool_file *file,
3554 struct instruction *insn,
3555 struct insn_state *state)
3556 {
3557 if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3558 WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3559 return 1;
3560 }
3561
3562 return validate_call(file, insn, state);
3563 }
3564
validate_return(struct symbol * func,struct instruction * insn,struct insn_state * state)3565 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3566 {
3567 if (state->noinstr && state->instr > 0) {
3568 WARN_INSN(insn, "return with instrumentation enabled");
3569 return 1;
3570 }
3571
3572 if (state->uaccess && !func_uaccess_safe(func)) {
3573 WARN_INSN(insn, "return with UACCESS enabled");
3574 return 1;
3575 }
3576
3577 if (!state->uaccess && func_uaccess_safe(func)) {
3578 WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3579 return 1;
3580 }
3581
3582 if (state->df) {
3583 WARN_INSN(insn, "return with DF set");
3584 return 1;
3585 }
3586
3587 if (func && has_modified_stack_frame(insn, state)) {
3588 WARN_INSN(insn, "return with modified stack frame");
3589 return 1;
3590 }
3591
3592 if (state->cfi.bp_scratch) {
3593 WARN_INSN(insn, "BP used as a scratch register");
3594 return 1;
3595 }
3596
3597 return 0;
3598 }
3599
next_insn_to_validate(struct objtool_file * file,struct instruction * insn)3600 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3601 struct instruction *insn)
3602 {
3603 struct alt_group *alt_group = insn->alt_group;
3604
3605 /*
3606 * Simulate the fact that alternatives are patched in-place. When the
3607 * end of a replacement alt_group is reached, redirect objtool flow to
3608 * the end of the original alt_group.
3609 *
3610 * insn->alts->insn -> alt_group->first_insn
3611 * ...
3612 * alt_group->last_insn
3613 * [alt_group->nop] -> next(orig_group->last_insn)
3614 */
3615 if (alt_group) {
3616 if (alt_group->nop) {
3617 /* ->nop implies ->orig_group */
3618 if (insn == alt_group->last_insn)
3619 return alt_group->nop;
3620 if (insn == alt_group->nop)
3621 goto next_orig;
3622 }
3623 if (insn == alt_group->last_insn && alt_group->orig_group)
3624 goto next_orig;
3625 }
3626
3627 return next_insn_same_sec(file, insn);
3628
3629 next_orig:
3630 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3631 }
3632
skip_alt_group(struct instruction * insn)3633 static bool skip_alt_group(struct instruction *insn)
3634 {
3635 struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
3636
3637 if (!insn->alt_group)
3638 return false;
3639
3640 /* ANNOTATE_IGNORE_ALTERNATIVE */
3641 if (insn->alt_group->ignore) {
3642 TRACE_ALT(insn, "alt group ignored");
3643 return true;
3644 }
3645
3646 /*
3647 * For NOP patched with CLAC/STAC, only follow the latter to avoid
3648 * impossible code paths combining patched CLAC with unpatched STAC
3649 * or vice versa.
3650 *
3651 * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
3652 * requested not to do that to avoid hurting .s file readability
3653 * around CLAC/STAC alternative sites.
3654 */
3655
3656 if (!alt_insn)
3657 return false;
3658
3659 /* Don't override ASM_{CLAC,STAC}_UNSAFE */
3660 if (alt_insn->alt_group && alt_insn->alt_group->ignore)
3661 return false;
3662
3663 return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
3664 }
3665
checksum_debug_init(struct objtool_file * file)3666 static int checksum_debug_init(struct objtool_file *file)
3667 {
3668 char *dup, *s;
3669
3670 if (!opts.debug_checksum)
3671 return 0;
3672
3673 dup = strdup(opts.debug_checksum);
3674 if (!dup) {
3675 ERROR_GLIBC("strdup");
3676 return -1;
3677 }
3678
3679 s = dup;
3680 while (*s) {
3681 struct symbol *func;
3682 char *comma;
3683
3684 comma = strchr(s, ',');
3685 if (comma)
3686 *comma = '\0';
3687
3688 func = find_symbol_by_name(file->elf, s);
3689 if (!func || !is_func_sym(func))
3690 WARN("--debug-checksum: can't find '%s'", s);
3691 else
3692 func->debug_checksum = 1;
3693
3694 if (!comma)
3695 break;
3696
3697 s = comma + 1;
3698 }
3699
3700 free(dup);
3701 return 0;
3702 }
3703
checksum_update_insn(struct objtool_file * file,struct symbol * func,struct instruction * insn)3704 static void checksum_update_insn(struct objtool_file *file, struct symbol *func,
3705 struct instruction *insn)
3706 {
3707 struct reloc *reloc = insn_reloc(file, insn);
3708 unsigned long offset;
3709 struct symbol *sym;
3710
3711 if (insn->fake)
3712 return;
3713
3714 checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len);
3715
3716 if (!reloc) {
3717 struct symbol *call_dest = insn_call_dest(insn);
3718
3719 if (call_dest)
3720 checksum_update(func, insn, call_dest->demangled_name,
3721 strlen(call_dest->demangled_name));
3722 return;
3723 }
3724
3725 sym = reloc->sym;
3726 offset = arch_insn_adjusted_addend(insn, reloc);
3727
3728 if (is_string_sec(sym->sec)) {
3729 char *str;
3730
3731 str = sym->sec->data->d_buf + sym->offset + offset;
3732 checksum_update(func, insn, str, strlen(str));
3733 return;
3734 }
3735
3736 if (is_sec_sym(sym)) {
3737 sym = find_symbol_containing(reloc->sym->sec, offset);
3738 if (!sym)
3739 return;
3740
3741 offset -= sym->offset;
3742 }
3743
3744 checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name));
3745 checksum_update(func, insn, &offset, sizeof(offset));
3746 }
3747
3748 static int validate_branch(struct objtool_file *file, struct symbol *func,
3749 struct instruction *insn, struct insn_state state);
3750 static int do_validate_branch(struct objtool_file *file, struct symbol *func,
3751 struct instruction *insn, struct insn_state *state);
3752
validate_insn(struct objtool_file * file,struct symbol * func,struct instruction * insn,struct insn_state * statep,struct instruction * prev_insn,struct instruction * next_insn,bool * dead_end)3753 static int validate_insn(struct objtool_file *file, struct symbol *func,
3754 struct instruction *insn, struct insn_state *statep,
3755 struct instruction *prev_insn, struct instruction *next_insn,
3756 bool *dead_end)
3757 {
3758 char *alt_name __maybe_unused = NULL;
3759 struct alternative *alt;
3760 u8 visited;
3761 int ret;
3762
3763 /*
3764 * Any returns before the end of this function are effectively dead
3765 * ends, i.e. validate_branch() has reached the end of the branch.
3766 */
3767 *dead_end = true;
3768
3769 visited = VISITED_BRANCH << statep->uaccess;
3770 if (insn->visited & VISITED_BRANCH_MASK) {
3771 if (!insn->hint && !insn_cfi_match(insn, &statep->cfi))
3772 return 1;
3773
3774 if (insn->visited & visited) {
3775 TRACE_INSN(insn, "already visited");
3776 return 0;
3777 }
3778 } else {
3779 nr_insns_visited++;
3780 }
3781
3782 if (statep->noinstr)
3783 statep->instr += insn->instr;
3784
3785 if (insn->hint) {
3786 if (insn->restore) {
3787 struct instruction *save_insn, *i;
3788
3789 i = insn;
3790 save_insn = NULL;
3791
3792 sym_for_each_insn_continue_reverse(file, func, i) {
3793 if (i->save) {
3794 save_insn = i;
3795 break;
3796 }
3797 }
3798
3799 if (!save_insn) {
3800 WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3801 return 1;
3802 }
3803
3804 if (!save_insn->visited) {
3805 /*
3806 * If the restore hint insn is at the
3807 * beginning of a basic block and was
3808 * branched to from elsewhere, and the
3809 * save insn hasn't been visited yet,
3810 * defer following this branch for now.
3811 * It will be seen later via the
3812 * straight-line path.
3813 */
3814 if (!prev_insn) {
3815 TRACE_INSN(insn, "defer restore");
3816 return 0;
3817 }
3818
3819 WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3820 return 1;
3821 }
3822
3823 insn->cfi = save_insn->cfi;
3824 nr_cfi_reused++;
3825 }
3826
3827 statep->cfi = *insn->cfi;
3828 } else {
3829 /* XXX track if we actually changed statep->cfi */
3830
3831 if (prev_insn && !cficmp(prev_insn->cfi, &statep->cfi)) {
3832 insn->cfi = prev_insn->cfi;
3833 nr_cfi_reused++;
3834 } else {
3835 insn->cfi = cfi_hash_find_or_add(&statep->cfi);
3836 }
3837 }
3838
3839 insn->visited |= visited;
3840
3841 if (propagate_alt_cfi(file, insn))
3842 return 1;
3843
3844 if (insn->alts) {
3845 for (alt = insn->alts; alt; alt = alt->next) {
3846 TRACE_ALT_BEGIN(insn, alt, alt_name);
3847 ret = validate_branch(file, func, alt->insn, *statep);
3848 TRACE_ALT_END(insn, alt, alt_name);
3849 if (ret) {
3850 BT_INSN(insn, "(alt)");
3851 return ret;
3852 }
3853 }
3854 TRACE_ALT_INFO_NOADDR(insn, "/ ", "DEFAULT");
3855 }
3856
3857 if (skip_alt_group(insn))
3858 return 0;
3859
3860 if (handle_insn_ops(insn, next_insn, statep))
3861 return 1;
3862
3863 switch (insn->type) {
3864
3865 case INSN_RETURN:
3866 TRACE_INSN(insn, "return");
3867 return validate_return(func, insn, statep);
3868
3869 case INSN_CALL:
3870 case INSN_CALL_DYNAMIC:
3871 if (insn->type == INSN_CALL)
3872 TRACE_INSN(insn, "call");
3873 else
3874 TRACE_INSN(insn, "indirect call");
3875
3876 ret = validate_call(file, insn, statep);
3877 if (ret)
3878 return ret;
3879
3880 if (opts.stackval && func && !is_special_call(insn) &&
3881 !has_valid_stack_frame(statep)) {
3882 WARN_INSN(insn, "call without frame pointer save/setup");
3883 return 1;
3884 }
3885
3886 break;
3887
3888 case INSN_JUMP_CONDITIONAL:
3889 case INSN_JUMP_UNCONDITIONAL:
3890 if (is_sibling_call(insn)) {
3891 TRACE_INSN(insn, "sibling call");
3892 ret = validate_sibling_call(file, insn, statep);
3893 if (ret)
3894 return ret;
3895
3896 } else if (insn->jump_dest) {
3897 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3898 TRACE_INSN(insn, "unconditional jump");
3899 else
3900 TRACE_INSN(insn, "jump taken");
3901
3902 ret = validate_branch(file, func, insn->jump_dest, *statep);
3903 if (ret) {
3904 BT_INSN(insn, "(branch)");
3905 return ret;
3906 }
3907 }
3908
3909 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3910 return 0;
3911
3912 TRACE_INSN(insn, "jump not taken");
3913 break;
3914
3915 case INSN_JUMP_DYNAMIC:
3916 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3917 TRACE_INSN(insn, "indirect jump");
3918 if (is_sibling_call(insn)) {
3919 ret = validate_sibling_call(file, insn, statep);
3920 if (ret)
3921 return ret;
3922 }
3923
3924 if (insn->type == INSN_JUMP_DYNAMIC)
3925 return 0;
3926
3927 break;
3928
3929 case INSN_SYSCALL:
3930 TRACE_INSN(insn, "syscall");
3931 if (func && (!next_insn || !next_insn->hint)) {
3932 WARN_INSN(insn, "unsupported instruction in callable function");
3933 return 1;
3934 }
3935
3936 break;
3937
3938 case INSN_SYSRET:
3939 TRACE_INSN(insn, "sysret");
3940 if (func && (!next_insn || !next_insn->hint)) {
3941 WARN_INSN(insn, "unsupported instruction in callable function");
3942 return 1;
3943 }
3944
3945 return 0;
3946
3947 case INSN_STAC:
3948 TRACE_INSN(insn, "stac");
3949 if (!opts.uaccess)
3950 break;
3951
3952 if (statep->uaccess) {
3953 WARN_INSN(insn, "recursive UACCESS enable");
3954 return 1;
3955 }
3956
3957 statep->uaccess = true;
3958 break;
3959
3960 case INSN_CLAC:
3961 TRACE_INSN(insn, "clac");
3962 if (!opts.uaccess)
3963 break;
3964
3965 if (!statep->uaccess && func) {
3966 WARN_INSN(insn, "redundant UACCESS disable");
3967 return 1;
3968 }
3969
3970 if (func_uaccess_safe(func) && !statep->uaccess_stack) {
3971 WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3972 return 1;
3973 }
3974
3975 statep->uaccess = false;
3976 break;
3977
3978 case INSN_STD:
3979 TRACE_INSN(insn, "std");
3980 if (statep->df) {
3981 WARN_INSN(insn, "recursive STD");
3982 return 1;
3983 }
3984
3985 statep->df = true;
3986 break;
3987
3988 case INSN_CLD:
3989 TRACE_INSN(insn, "cld");
3990 if (!statep->df && func) {
3991 WARN_INSN(insn, "redundant CLD");
3992 return 1;
3993 }
3994
3995 statep->df = false;
3996 break;
3997
3998 default:
3999 break;
4000 }
4001
4002 if (insn->dead_end)
4003 TRACE_INSN(insn, "dead end");
4004
4005 *dead_end = insn->dead_end;
4006 return 0;
4007 }
4008
4009 /*
4010 * Follow the branch starting at the given instruction, and recursively follow
4011 * any other branches (jumps). Meanwhile, track the frame pointer state at
4012 * each instruction and validate all the rules described in
4013 * tools/objtool/Documentation/objtool.txt.
4014 */
do_validate_branch(struct objtool_file * file,struct symbol * func,struct instruction * insn,struct insn_state * state)4015 static int do_validate_branch(struct objtool_file *file, struct symbol *func,
4016 struct instruction *insn, struct insn_state *state)
4017 {
4018 struct instruction *next_insn, *prev_insn = NULL;
4019 bool dead_end;
4020 int ret;
4021
4022 if (func && func->ignore)
4023 return 0;
4024
4025 do {
4026 insn->trace = 0;
4027 next_insn = next_insn_to_validate(file, insn);
4028
4029 if (opts.checksum && func && insn->sec)
4030 checksum_update_insn(file, func, insn);
4031
4032 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
4033 /* Ignore KCFI type preambles, which always fall through */
4034 if (is_prefix_func(func))
4035 return 0;
4036
4037 if (file->ignore_unreachables)
4038 return 0;
4039
4040 WARN("%s() falls through to next function %s()",
4041 func->name, insn_func(insn)->name);
4042 func->warned = 1;
4043
4044 return 1;
4045 }
4046
4047 ret = validate_insn(file, func, insn, state, prev_insn, next_insn,
4048 &dead_end);
4049
4050 if (!insn->trace) {
4051 if (ret)
4052 TRACE_INSN(insn, "warning (%d)", ret);
4053 else
4054 TRACE_INSN(insn, NULL);
4055 }
4056
4057 if (!dead_end && !next_insn) {
4058 if (state->cfi.cfa.base == CFI_UNDEFINED)
4059 return 0;
4060 if (file->ignore_unreachables)
4061 return 0;
4062
4063 WARN("%s%sunexpected end of section %s",
4064 func ? func->name : "", func ? "(): " : "",
4065 insn->sec->name);
4066 return 1;
4067 }
4068
4069 prev_insn = insn;
4070 insn = next_insn;
4071
4072 } while (!dead_end);
4073
4074 return ret;
4075 }
4076
validate_branch(struct objtool_file * file,struct symbol * func,struct instruction * insn,struct insn_state state)4077 static int validate_branch(struct objtool_file *file, struct symbol *func,
4078 struct instruction *insn, struct insn_state state)
4079 {
4080 int ret;
4081
4082 trace_depth_inc();
4083 ret = do_validate_branch(file, func, insn, &state);
4084 trace_depth_dec();
4085
4086 return ret;
4087 }
4088
validate_unwind_hint(struct objtool_file * file,struct instruction * insn,struct insn_state * state)4089 static int validate_unwind_hint(struct objtool_file *file,
4090 struct instruction *insn,
4091 struct insn_state *state)
4092 {
4093 if (insn->hint && !insn->visited) {
4094 struct symbol *func = insn_func(insn);
4095 int ret;
4096
4097 if (opts.checksum)
4098 checksum_init(func);
4099
4100 ret = validate_branch(file, func, insn, *state);
4101 if (ret)
4102 BT_INSN(insn, "<=== (hint)");
4103 return ret;
4104 }
4105
4106 return 0;
4107 }
4108
validate_unwind_hints(struct objtool_file * file,struct section * sec)4109 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
4110 {
4111 struct instruction *insn;
4112 struct insn_state state;
4113 int warnings = 0;
4114
4115 if (!file->hints)
4116 return 0;
4117
4118 init_insn_state(file, &state, sec);
4119
4120 if (sec) {
4121 sec_for_each_insn(file, sec, insn)
4122 warnings += validate_unwind_hint(file, insn, &state);
4123 } else {
4124 for_each_insn(file, insn)
4125 warnings += validate_unwind_hint(file, insn, &state);
4126 }
4127
4128 return warnings;
4129 }
4130
4131 /*
4132 * Validate rethunk entry constraint: must untrain RET before the first RET.
4133 *
4134 * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
4135 * before an actual RET instruction.
4136 */
validate_unret(struct objtool_file * file,struct instruction * insn)4137 static int validate_unret(struct objtool_file *file, struct instruction *insn)
4138 {
4139 struct instruction *next, *dest;
4140 int ret;
4141
4142 for (;;) {
4143 next = next_insn_to_validate(file, insn);
4144
4145 if (insn->visited & VISITED_UNRET)
4146 return 0;
4147
4148 insn->visited |= VISITED_UNRET;
4149
4150 if (insn->alts) {
4151 struct alternative *alt;
4152 for (alt = insn->alts; alt; alt = alt->next) {
4153 ret = validate_unret(file, alt->insn);
4154 if (ret) {
4155 BT_INSN(insn, "(alt)");
4156 return ret;
4157 }
4158 }
4159 }
4160
4161 switch (insn->type) {
4162
4163 case INSN_CALL_DYNAMIC:
4164 case INSN_JUMP_DYNAMIC:
4165 case INSN_JUMP_DYNAMIC_CONDITIONAL:
4166 WARN_INSN(insn, "early indirect call");
4167 return 1;
4168
4169 case INSN_JUMP_UNCONDITIONAL:
4170 case INSN_JUMP_CONDITIONAL:
4171 if (!is_sibling_call(insn)) {
4172 if (!insn->jump_dest) {
4173 WARN_INSN(insn, "unresolved jump target after linking?!?");
4174 return 1;
4175 }
4176 ret = validate_unret(file, insn->jump_dest);
4177 if (ret) {
4178 BT_INSN(insn, "(branch%s)",
4179 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
4180 return ret;
4181 }
4182
4183 if (insn->type == INSN_JUMP_UNCONDITIONAL)
4184 return 0;
4185
4186 break;
4187 }
4188
4189 /* fallthrough */
4190 case INSN_CALL:
4191 dest = find_insn(file, insn_call_dest(insn)->sec,
4192 insn_call_dest(insn)->offset);
4193 if (!dest) {
4194 WARN("Unresolved function after linking!?: %s",
4195 insn_call_dest(insn)->name);
4196 return 1;
4197 }
4198
4199 ret = validate_unret(file, dest);
4200 if (ret) {
4201 BT_INSN(insn, "(call)");
4202 return ret;
4203 }
4204 /*
4205 * If a call returns without error, it must have seen UNTRAIN_RET.
4206 * Therefore any non-error return is a success.
4207 */
4208 return 0;
4209
4210 case INSN_RETURN:
4211 WARN_INSN(insn, "RET before UNTRAIN");
4212 return 1;
4213
4214 case INSN_SYSCALL:
4215 break;
4216
4217 case INSN_SYSRET:
4218 return 0;
4219
4220 case INSN_NOP:
4221 if (insn->retpoline_safe)
4222 return 0;
4223 break;
4224
4225 default:
4226 break;
4227 }
4228
4229 if (insn->dead_end)
4230 return 0;
4231
4232 if (!next) {
4233 WARN_INSN(insn, "teh end!");
4234 return 1;
4235 }
4236 insn = next;
4237 }
4238
4239 return 0;
4240 }
4241
4242 /*
4243 * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
4244 * VALIDATE_UNRET_END before RET.
4245 */
validate_unrets(struct objtool_file * file)4246 static int validate_unrets(struct objtool_file *file)
4247 {
4248 struct instruction *insn;
4249 int warnings = 0;
4250
4251 for_each_insn(file, insn) {
4252 if (!insn->unret)
4253 continue;
4254
4255 warnings += validate_unret(file, insn);
4256 }
4257
4258 return warnings;
4259 }
4260
validate_retpoline(struct objtool_file * file)4261 static int validate_retpoline(struct objtool_file *file)
4262 {
4263 struct instruction *insn;
4264 int warnings = 0;
4265
4266 for_each_insn(file, insn) {
4267 if (insn->type != INSN_JUMP_DYNAMIC &&
4268 insn->type != INSN_CALL_DYNAMIC &&
4269 insn->type != INSN_RETURN)
4270 continue;
4271
4272 if (insn->retpoline_safe)
4273 continue;
4274
4275 if (insn->sec->init)
4276 continue;
4277
4278 if (insn->type == INSN_RETURN) {
4279 if (opts.rethunk) {
4280 WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build");
4281 warnings++;
4282 }
4283 continue;
4284 }
4285
4286 WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
4287 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
4288 warnings++;
4289 }
4290
4291 if (!opts.cfi)
4292 return warnings;
4293
4294 /*
4295 * kCFI call sites look like:
4296 *
4297 * movl $(-0x12345678), %r10d
4298 * addl -4(%r11), %r10d
4299 * jz 1f
4300 * ud2
4301 * 1: cs call __x86_indirect_thunk_r11
4302 *
4303 * Verify all indirect calls are kCFI adorned by checking for the
4304 * UD2. Notably, doing __nocfi calls to regular (cfi) functions is
4305 * broken.
4306 */
4307 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
4308 struct symbol *sym = insn->sym;
4309
4310 if (sym && (sym->type == STT_NOTYPE ||
4311 sym->type == STT_FUNC) && !sym->nocfi) {
4312 struct instruction *prev =
4313 prev_insn_same_sym(file, insn);
4314
4315 if (!prev || prev->type != INSN_BUG) {
4316 WARN_INSN(insn, "no-cfi indirect call!");
4317 warnings++;
4318 }
4319 }
4320 }
4321
4322 return warnings;
4323 }
4324
is_kasan_insn(struct instruction * insn)4325 static bool is_kasan_insn(struct instruction *insn)
4326 {
4327 return (insn->type == INSN_CALL &&
4328 !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4329 }
4330
is_ubsan_insn(struct instruction * insn)4331 static bool is_ubsan_insn(struct instruction *insn)
4332 {
4333 return (insn->type == INSN_CALL &&
4334 !strcmp(insn_call_dest(insn)->name,
4335 "__ubsan_handle_builtin_unreachable"));
4336 }
4337
ignore_unreachable_insn(struct objtool_file * file,struct instruction * insn)4338 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4339 {
4340 struct symbol *func = insn_func(insn);
4341 struct instruction *prev_insn;
4342 int i;
4343
4344 if (insn->type == INSN_NOP || insn->type == INSN_TRAP ||
4345 insn->hole || (func && func->ignore))
4346 return true;
4347
4348 /*
4349 * Ignore alternative replacement instructions. This can happen
4350 * when a whitelisted function uses one of the ALTERNATIVE macros.
4351 */
4352 if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4353 !strcmp(insn->sec->name, ".altinstr_aux"))
4354 return true;
4355
4356 if (!func)
4357 return false;
4358
4359 if (func->static_call_tramp)
4360 return true;
4361
4362 /*
4363 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4364 * __builtin_unreachable(). The BUG() macro has an unreachable() after
4365 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4366 * (or occasionally a JMP to UD2).
4367 *
4368 * It may also insert a UD2 after calling a __noreturn function.
4369 */
4370 prev_insn = prev_insn_same_sec(file, insn);
4371 if (prev_insn && prev_insn->dead_end &&
4372 (insn->type == INSN_BUG ||
4373 (insn->type == INSN_JUMP_UNCONDITIONAL &&
4374 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4375 return true;
4376
4377 /*
4378 * Check if this (or a subsequent) instruction is related to
4379 * CONFIG_UBSAN or CONFIG_KASAN.
4380 *
4381 * End the search at 5 instructions to avoid going into the weeds.
4382 */
4383 for (i = 0; i < 5; i++) {
4384
4385 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4386 return true;
4387
4388 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4389 if (insn->jump_dest &&
4390 insn_func(insn->jump_dest) == func) {
4391 insn = insn->jump_dest;
4392 continue;
4393 }
4394
4395 break;
4396 }
4397
4398 if (insn->offset + insn->len >= func->offset + func->len)
4399 break;
4400
4401 insn = next_insn_same_sec(file, insn);
4402 }
4403
4404 return false;
4405 }
4406
4407 /*
4408 * For FineIBT or kCFI, a certain number of bytes preceding the function may be
4409 * NOPs. Those NOPs may be rewritten at runtime and executed, so give them a
4410 * proper function name: __pfx_<func>.
4411 *
4412 * The NOPs may not exist for the following cases:
4413 *
4414 * - compiler cloned functions (*.cold, *.part0, etc)
4415 * - asm functions created with inline asm or without SYM_FUNC_START()
4416 *
4417 * Also, the function may already have a prefix from a previous objtool run
4418 * (livepatch extracted functions, or manually running objtool multiple times).
4419 *
4420 * So return 0 if the NOPs are missing or the function already has a prefix
4421 * symbol.
4422 */
create_prefix_symbol(struct objtool_file * file,struct symbol * func)4423 static int create_prefix_symbol(struct objtool_file *file, struct symbol *func)
4424 {
4425 struct instruction *insn, *prev;
4426 char name[SYM_NAME_LEN];
4427 struct cfi_state *cfi;
4428
4429 if (!is_func_sym(func) || is_prefix_func(func) ||
4430 func->cold || func->static_call_tramp)
4431 return 0;
4432
4433 if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) {
4434 WARN("%s: symbol name too long, can't create __pfx_ symbol",
4435 func->name);
4436 return 0;
4437 }
4438
4439 if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name))
4440 return -1;
4441
4442 if (file->klp) {
4443 struct symbol *pfx;
4444
4445 pfx = find_symbol_by_offset(func->sec, func->offset - opts.prefix);
4446 if (pfx && is_prefix_func(pfx) && !strcmp(pfx->name, name))
4447 return 0;
4448 }
4449
4450 insn = find_insn(file, func->sec, func->offset);
4451 if (!insn) {
4452 WARN("%s: can't find starting instruction", func->name);
4453 return -1;
4454 }
4455
4456 for (prev = prev_insn_same_sec(file, insn);
4457 prev;
4458 prev = prev_insn_same_sec(file, prev)) {
4459 u64 offset;
4460
4461 if (prev->type != INSN_NOP)
4462 return 0;
4463
4464 offset = func->offset - prev->offset;
4465
4466 if (offset > opts.prefix)
4467 return 0;
4468
4469 if (offset < opts.prefix)
4470 continue;
4471
4472 if (!elf_create_symbol(file->elf, name, func->sec,
4473 GELF_ST_BIND(func->sym.st_info),
4474 GELF_ST_TYPE(func->sym.st_info),
4475 prev->offset, opts.prefix))
4476 return -1;
4477
4478 break;
4479 }
4480
4481 if (!prev)
4482 return 0;
4483
4484 if (!insn->cfi) {
4485 /*
4486 * This can happen if stack validation isn't enabled or the
4487 * function is annotated with STACK_FRAME_NON_STANDARD.
4488 */
4489 return 0;
4490 }
4491
4492 /* Propagate insn->cfi to the prefix code */
4493 cfi = cfi_hash_find_or_add(insn->cfi);
4494 for (; prev != insn; prev = next_insn_same_sec(file, prev))
4495 prev->cfi = cfi;
4496
4497 return 0;
4498 }
4499
create_prefix_symbols(struct objtool_file * file)4500 static int create_prefix_symbols(struct objtool_file *file)
4501 {
4502 struct section *sec;
4503 struct symbol *func;
4504
4505 for_each_sec(file->elf, sec) {
4506 if (!is_text_sec(sec))
4507 continue;
4508
4509 sec_for_each_sym(sec, func) {
4510 if (create_prefix_symbol(file, func))
4511 return -1;
4512 }
4513 }
4514
4515 return 0;
4516 }
4517
validate_symbol(struct objtool_file * file,struct section * sec,struct symbol * sym,struct insn_state * state)4518 static int validate_symbol(struct objtool_file *file, struct section *sec,
4519 struct symbol *sym, struct insn_state *state)
4520 {
4521 struct instruction *insn;
4522 struct symbol *func;
4523 int ret;
4524
4525 if (!sym->len) {
4526 WARN("%s() is missing an ELF size annotation", sym->name);
4527 return 1;
4528 }
4529
4530 if (sym->pfunc != sym || sym->alias != sym)
4531 return 0;
4532
4533 insn = find_insn(file, sec, sym->offset);
4534 if (!insn || insn->visited)
4535 return 0;
4536
4537 if (opts.uaccess)
4538 state->uaccess = sym->uaccess_safe;
4539
4540 func = insn_func(insn);
4541
4542 if (opts.checksum)
4543 checksum_init(func);
4544
4545 if (opts.trace && !fnmatch(opts.trace, sym->name, 0)) {
4546 trace_enable();
4547 TRACE("%s: validation begin\n", sym->name);
4548 }
4549
4550 ret = validate_branch(file, func, insn, *state);
4551 if (ret)
4552 BT_INSN(insn, "<=== (sym)");
4553
4554 TRACE("%s: validation %s\n\n", sym->name, ret ? "failed" : "end");
4555 trace_disable();
4556
4557 if (opts.checksum)
4558 checksum_finish(func);
4559
4560 return ret;
4561 }
4562
validate_section(struct objtool_file * file,struct section * sec)4563 static int validate_section(struct objtool_file *file, struct section *sec)
4564 {
4565 struct insn_state state;
4566 struct symbol *func;
4567 int warnings = 0;
4568
4569 sec_for_each_sym(sec, func) {
4570 if (!is_func_sym(func))
4571 continue;
4572
4573 init_insn_state(file, &state, sec);
4574 set_func_state(&state.cfi);
4575
4576 warnings += validate_symbol(file, sec, func, &state);
4577 }
4578
4579 return warnings;
4580 }
4581
validate_noinstr_sections(struct objtool_file * file)4582 static int validate_noinstr_sections(struct objtool_file *file)
4583 {
4584 struct section *sec;
4585 int warnings = 0;
4586
4587 sec = find_section_by_name(file->elf, ".noinstr.text");
4588 if (sec) {
4589 warnings += validate_section(file, sec);
4590 warnings += validate_unwind_hints(file, sec);
4591 }
4592
4593 sec = find_section_by_name(file->elf, ".entry.text");
4594 if (sec) {
4595 warnings += validate_section(file, sec);
4596 warnings += validate_unwind_hints(file, sec);
4597 }
4598
4599 sec = find_section_by_name(file->elf, ".cpuidle.text");
4600 if (sec) {
4601 warnings += validate_section(file, sec);
4602 warnings += validate_unwind_hints(file, sec);
4603 }
4604
4605 return warnings;
4606 }
4607
validate_functions(struct objtool_file * file)4608 static int validate_functions(struct objtool_file *file)
4609 {
4610 struct section *sec;
4611 int warnings = 0;
4612
4613 for_each_sec(file->elf, sec) {
4614 if (!is_text_sec(sec))
4615 continue;
4616
4617 warnings += validate_section(file, sec);
4618 }
4619
4620 return warnings;
4621 }
4622
mark_endbr_used(struct instruction * insn)4623 static void mark_endbr_used(struct instruction *insn)
4624 {
4625 if (!list_empty(&insn->call_node))
4626 list_del_init(&insn->call_node);
4627 }
4628
noendbr_range(struct objtool_file * file,struct instruction * insn)4629 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4630 {
4631 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4632 struct instruction *first;
4633
4634 if (!sym)
4635 return false;
4636
4637 first = find_insn(file, sym->sec, sym->offset);
4638 if (!first)
4639 return false;
4640
4641 if (first->type != INSN_ENDBR && !first->noendbr)
4642 return false;
4643
4644 return insn->offset == sym->offset + sym->len;
4645 }
4646
__validate_ibt_insn(struct objtool_file * file,struct instruction * insn,struct instruction * dest)4647 static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn,
4648 struct instruction *dest)
4649 {
4650 if (dest->type == INSN_ENDBR) {
4651 mark_endbr_used(dest);
4652 return 0;
4653 }
4654
4655 if (insn_func(dest) && insn_func(insn) &&
4656 insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
4657 /*
4658 * Anything from->to self is either _THIS_IP_ or
4659 * IRET-to-self.
4660 *
4661 * There is no sane way to annotate _THIS_IP_ since the
4662 * compiler treats the relocation as a constant and is
4663 * happy to fold in offsets, skewing any annotation we
4664 * do, leading to vast amounts of false-positives.
4665 *
4666 * There's also compiler generated _THIS_IP_ through
4667 * KCOV and such which we have no hope of annotating.
4668 *
4669 * As such, blanket accept self-references without
4670 * issue.
4671 */
4672 return 0;
4673 }
4674
4675 /*
4676 * Accept anything ANNOTATE_NOENDBR.
4677 */
4678 if (dest->noendbr)
4679 return 0;
4680
4681 /*
4682 * Accept if this is the instruction after a symbol
4683 * that is (no)endbr -- typical code-range usage.
4684 */
4685 if (noendbr_range(file, dest))
4686 return 0;
4687
4688 WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4689 return 1;
4690 }
4691
validate_ibt_insn(struct objtool_file * file,struct instruction * insn)4692 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4693 {
4694 struct instruction *dest;
4695 struct reloc *reloc;
4696 unsigned long off;
4697 int warnings = 0;
4698
4699 /*
4700 * Looking for function pointer load relocations. Ignore
4701 * direct/indirect branches:
4702 */
4703 switch (insn->type) {
4704
4705 case INSN_CALL:
4706 case INSN_CALL_DYNAMIC:
4707 case INSN_JUMP_CONDITIONAL:
4708 case INSN_JUMP_UNCONDITIONAL:
4709 case INSN_JUMP_DYNAMIC:
4710 case INSN_JUMP_DYNAMIC_CONDITIONAL:
4711 case INSN_RETURN:
4712 case INSN_NOP:
4713 return 0;
4714
4715 case INSN_LEA_RIP:
4716 if (!insn_reloc(file, insn)) {
4717 /* local function pointer reference without reloc */
4718
4719 off = arch_jump_destination(insn);
4720
4721 dest = find_insn(file, insn->sec, off);
4722 if (!dest) {
4723 WARN_INSN(insn, "corrupt function pointer reference");
4724 return 1;
4725 }
4726
4727 return __validate_ibt_insn(file, insn, dest);
4728 }
4729 break;
4730
4731 default:
4732 break;
4733 }
4734
4735 for (reloc = insn_reloc(file, insn);
4736 reloc;
4737 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4738 reloc_offset(reloc) + 1,
4739 (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
4740
4741 off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc);
4742
4743 dest = find_insn(file, reloc->sym->sec, off);
4744 if (!dest)
4745 continue;
4746
4747 warnings += __validate_ibt_insn(file, insn, dest);
4748 }
4749
4750 return warnings;
4751 }
4752
validate_ibt_data_reloc(struct objtool_file * file,struct reloc * reloc)4753 static int validate_ibt_data_reloc(struct objtool_file *file,
4754 struct reloc *reloc)
4755 {
4756 struct instruction *dest;
4757
4758 dest = find_insn(file, reloc->sym->sec,
4759 reloc->sym->offset + reloc_addend(reloc));
4760 if (!dest)
4761 return 0;
4762
4763 if (dest->type == INSN_ENDBR) {
4764 mark_endbr_used(dest);
4765 return 0;
4766 }
4767
4768 if (dest->noendbr)
4769 return 0;
4770
4771 WARN_FUNC(reloc->sec->base, reloc_offset(reloc),
4772 "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4773
4774 return 1;
4775 }
4776
4777 /*
4778 * Validate IBT rules and remove used ENDBR instructions from the seal list.
4779 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4780 * NOPs) later, in create_ibt_endbr_seal_sections().
4781 */
validate_ibt(struct objtool_file * file)4782 static int validate_ibt(struct objtool_file *file)
4783 {
4784 struct section *sec;
4785 struct reloc *reloc;
4786 struct instruction *insn;
4787 int warnings = 0;
4788
4789 for_each_insn(file, insn)
4790 warnings += validate_ibt_insn(file, insn);
4791
4792 for_each_sec(file->elf, sec) {
4793
4794 /* Already done by validate_ibt_insn() */
4795 if (is_text_sec(sec))
4796 continue;
4797
4798 if (!sec->rsec)
4799 continue;
4800
4801 /*
4802 * These sections can reference text addresses, but not with
4803 * the intent to indirect branch to them.
4804 */
4805 if ((!strncmp(sec->name, ".discard", 8) &&
4806 strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
4807 !strncmp(sec->name, ".debug", 6) ||
4808 !strcmp(sec->name, ".altinstructions") ||
4809 !strcmp(sec->name, ".ibt_endbr_seal") ||
4810 !strcmp(sec->name, ".kcfi_traps") ||
4811 !strcmp(sec->name, ".orc_unwind_ip") ||
4812 !strcmp(sec->name, ".retpoline_sites") ||
4813 !strcmp(sec->name, ".smp_locks") ||
4814 !strcmp(sec->name, ".static_call_sites") ||
4815 !strcmp(sec->name, "_error_injection_whitelist") ||
4816 !strcmp(sec->name, "_kprobe_blacklist") ||
4817 !strcmp(sec->name, "__bug_table") ||
4818 !strcmp(sec->name, "__ex_table") ||
4819 !strcmp(sec->name, "__jump_table") ||
4820 !strcmp(sec->name, ".init.klp_funcs") ||
4821 !strcmp(sec->name, "__mcount_loc") ||
4822 !strcmp(sec->name, ".llvm.call-graph-profile") ||
4823 !strcmp(sec->name, ".llvm_bb_addr_map") ||
4824 !strcmp(sec->name, "__tracepoints") ||
4825 !strcmp(sec->name, ".return_sites") ||
4826 !strcmp(sec->name, ".call_sites") ||
4827 !strcmp(sec->name, "__patchable_function_entries"))
4828 continue;
4829
4830 for_each_reloc(sec->rsec, reloc)
4831 warnings += validate_ibt_data_reloc(file, reloc);
4832 }
4833
4834 return warnings;
4835 }
4836
validate_sls(struct objtool_file * file)4837 static int validate_sls(struct objtool_file *file)
4838 {
4839 struct instruction *insn, *next_insn;
4840 int warnings = 0;
4841
4842 for_each_insn(file, insn) {
4843 next_insn = next_insn_same_sec(file, insn);
4844
4845 if (insn->retpoline_safe)
4846 continue;
4847
4848 switch (insn->type) {
4849 case INSN_RETURN:
4850 if (!next_insn || next_insn->type != INSN_TRAP) {
4851 WARN_INSN(insn, "missing int3 after ret");
4852 warnings++;
4853 }
4854
4855 break;
4856 case INSN_JUMP_DYNAMIC:
4857 if (!next_insn || next_insn->type != INSN_TRAP) {
4858 WARN_INSN(insn, "missing int3 after indirect jump");
4859 warnings++;
4860 }
4861 break;
4862 default:
4863 break;
4864 }
4865 }
4866
4867 return warnings;
4868 }
4869
validate_reachable_instructions(struct objtool_file * file)4870 static int validate_reachable_instructions(struct objtool_file *file)
4871 {
4872 struct instruction *insn, *prev_insn;
4873 struct symbol *call_dest;
4874 int warnings = 0;
4875
4876 if (file->ignore_unreachables)
4877 return 0;
4878
4879 for_each_insn(file, insn) {
4880 if (insn->visited || ignore_unreachable_insn(file, insn))
4881 continue;
4882
4883 prev_insn = prev_insn_same_sec(file, insn);
4884 if (prev_insn && prev_insn->dead_end) {
4885 call_dest = insn_call_dest(prev_insn);
4886 if (call_dest) {
4887 WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h",
4888 call_dest->name);
4889 warnings++;
4890 continue;
4891 }
4892 }
4893
4894 WARN_INSN(insn, "unreachable instruction");
4895 warnings++;
4896 }
4897
4898 return warnings;
4899 }
4900
arch_absolute_reloc(struct elf * elf,struct reloc * reloc)4901 __weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
4902 {
4903 unsigned int type = reloc_type(reloc);
4904 size_t sz = elf_addr_size(elf);
4905
4906 return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32);
4907 }
4908
check_abs_references(struct objtool_file * file)4909 static int check_abs_references(struct objtool_file *file)
4910 {
4911 struct section *sec;
4912 struct reloc *reloc;
4913 int ret = 0;
4914
4915 for_each_sec(file->elf, sec) {
4916 /* absolute references in non-loadable sections are fine */
4917 if (!(sec->sh.sh_flags & SHF_ALLOC))
4918 continue;
4919
4920 /* section must have an associated .rela section */
4921 if (!sec->rsec)
4922 continue;
4923
4924 /*
4925 * Special case for compiler generated metadata that is not
4926 * consumed until after boot.
4927 */
4928 if (!strcmp(sec->name, "__patchable_function_entries"))
4929 continue;
4930
4931 for_each_reloc(sec->rsec, reloc) {
4932 if (arch_absolute_reloc(file->elf, reloc)) {
4933 WARN("section %s has absolute relocation at offset 0x%llx",
4934 sec->name, (unsigned long long)reloc_offset(reloc));
4935 ret++;
4936 }
4937 }
4938 }
4939 return ret;
4940 }
4941
4942 struct insn_chunk {
4943 void *addr;
4944 struct insn_chunk *next;
4945 };
4946
4947 /*
4948 * Reduce peak RSS usage by freeing insns memory before writing the ELF file,
4949 * which can trigger more allocations for .debug_* sections whose data hasn't
4950 * been read yet.
4951 */
free_insns(struct objtool_file * file)4952 static void free_insns(struct objtool_file *file)
4953 {
4954 struct instruction *insn;
4955 struct insn_chunk *chunks = NULL, *chunk;
4956
4957 for_each_insn(file, insn) {
4958 if (!insn->idx) {
4959 chunk = malloc(sizeof(*chunk));
4960 chunk->addr = insn;
4961 chunk->next = chunks;
4962 chunks = chunk;
4963 }
4964 }
4965
4966 for (chunk = chunks; chunk; chunk = chunk->next)
4967 free(chunk->addr);
4968 }
4969
objtool_disas_insn(struct instruction * insn)4970 const char *objtool_disas_insn(struct instruction *insn)
4971 {
4972 struct disas_context *dctx = objtool_disas_ctx;
4973
4974 if (!dctx)
4975 return "";
4976
4977 disas_insn(dctx, insn);
4978 return disas_result(dctx);
4979 }
4980
check(struct objtool_file * file)4981 int check(struct objtool_file *file)
4982 {
4983 struct disas_context *disas_ctx = NULL;
4984 int ret = 0, warnings = 0;
4985
4986 /*
4987 * Create a disassembly context if we might disassemble any
4988 * instruction or function.
4989 */
4990 if (opts.verbose || opts.backtrace || opts.trace || opts.disas) {
4991 disas_ctx = disas_context_create(file);
4992 if (!disas_ctx) {
4993 opts.disas = false;
4994 opts.trace = false;
4995 }
4996 objtool_disas_ctx = disas_ctx;
4997 }
4998
4999 arch_initial_func_cfi_state(&initial_func_cfi);
5000 init_cfi_state(&init_cfi);
5001 init_cfi_state(&func_cfi);
5002 set_func_state(&func_cfi);
5003 init_cfi_state(&force_undefined_cfi);
5004 force_undefined_cfi.force_undefined = true;
5005
5006 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
5007 ret = -1;
5008 goto out;
5009 }
5010
5011 cfi_hash_add(&init_cfi);
5012 cfi_hash_add(&func_cfi);
5013
5014 ret = checksum_debug_init(file);
5015 if (ret)
5016 goto out;
5017
5018 ret = decode_sections(file);
5019 if (ret)
5020 goto out;
5021
5022 if (!nr_insns)
5023 goto out;
5024
5025 if (opts.retpoline)
5026 warnings += validate_retpoline(file);
5027
5028 if (validate_branch_enabled()) {
5029 int w = 0;
5030
5031 w += validate_functions(file);
5032 w += validate_unwind_hints(file, NULL);
5033 if (!w)
5034 w += validate_reachable_instructions(file);
5035
5036 warnings += w;
5037
5038 } else if (opts.noinstr) {
5039 warnings += validate_noinstr_sections(file);
5040 }
5041
5042 if (opts.unret) {
5043 /*
5044 * Must be after validate_branch() and friends, it plays
5045 * further games with insn->visited.
5046 */
5047 warnings += validate_unrets(file);
5048 }
5049
5050 if (opts.ibt)
5051 warnings += validate_ibt(file);
5052
5053 if (opts.sls)
5054 warnings += validate_sls(file);
5055
5056 if (opts.static_call) {
5057 ret = create_static_call_sections(file);
5058 if (ret)
5059 goto out;
5060 }
5061
5062 if (opts.retpoline) {
5063 ret = create_retpoline_sites_sections(file);
5064 if (ret)
5065 goto out;
5066 }
5067
5068 if (opts.cfi) {
5069 ret = create_cfi_sections(file);
5070 if (ret)
5071 goto out;
5072 }
5073
5074 if (opts.rethunk) {
5075 ret = create_return_sites_sections(file);
5076 if (ret)
5077 goto out;
5078
5079 if (opts.hack_skylake) {
5080 ret = create_direct_call_sections(file);
5081 if (ret)
5082 goto out;
5083 }
5084 }
5085
5086 if (opts.mcount) {
5087 ret = create_mcount_loc_sections(file);
5088 if (ret)
5089 goto out;
5090 }
5091
5092 if (opts.prefix) {
5093 ret = create_prefix_symbols(file);
5094 if (ret)
5095 goto out;
5096 }
5097
5098 if (opts.ibt) {
5099 ret = create_ibt_endbr_seal_sections(file);
5100 if (ret)
5101 goto out;
5102 }
5103
5104 if (opts.noabs)
5105 warnings += check_abs_references(file);
5106
5107 if (opts.checksum) {
5108 ret = create_sym_checksum_section(file);
5109 if (ret)
5110 goto out;
5111 }
5112
5113 if (opts.orc && nr_insns) {
5114 ret = orc_create(file);
5115 if (ret)
5116 goto out;
5117 }
5118
5119 if (opts.stats) {
5120 printf("nr_insns_visited: %ld\n", nr_insns_visited);
5121 printf("nr_cfi: %ld\n", nr_cfi);
5122 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
5123 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
5124 }
5125
5126 out:
5127 if (ret || warnings) {
5128 if (opts.werror && warnings)
5129 ret = 1;
5130
5131 if (opts.verbose) {
5132 if (opts.werror && warnings)
5133 WARN("%d warning(s) upgraded to errors", warnings);
5134 disas_warned_funcs(disas_ctx);
5135 }
5136 }
5137
5138 if (opts.disas)
5139 disas_funcs(disas_ctx);
5140
5141 if (disas_ctx) {
5142 disas_context_destroy(disas_ctx);
5143 objtool_disas_ctx = NULL;
5144 }
5145
5146 free_insns(file);
5147
5148 if (!ret && !warnings)
5149 return 0;
5150
5151 if (opts.backup && make_backup())
5152 return 1;
5153
5154 return ret;
5155 }
5156