1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6 #include <string.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10
11 #include <objtool/builtin.h>
12 #include <objtool/cfi.h>
13 #include <objtool/arch.h>
14 #include <objtool/check.h>
15 #include <objtool/special.h>
16 #include <objtool/warn.h>
17 #include <objtool/endianness.h>
18
19 #include <linux/objtool_types.h>
20 #include <linux/hashtable.h>
21 #include <linux/kernel.h>
22 #include <linux/static_call_types.h>
23 #include <linux/string.h>
24
25 struct alternative {
26 struct alternative *next;
27 struct instruction *insn;
28 };
29
30 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
31
32 static struct cfi_init_state initial_func_cfi;
33 static struct cfi_state init_cfi;
34 static struct cfi_state func_cfi;
35 static struct cfi_state force_undefined_cfi;
36
find_insn(struct objtool_file * file,struct section * sec,unsigned long offset)37 struct instruction *find_insn(struct objtool_file *file,
38 struct section *sec, unsigned long offset)
39 {
40 struct instruction *insn;
41
42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43 if (insn->sec == sec && insn->offset == offset)
44 return insn;
45 }
46
47 return NULL;
48 }
49
next_insn_same_sec(struct objtool_file * file,struct instruction * insn)50 struct instruction *next_insn_same_sec(struct objtool_file *file,
51 struct instruction *insn)
52 {
53 if (insn->idx == INSN_CHUNK_MAX)
54 return find_insn(file, insn->sec, insn->offset + insn->len);
55
56 insn++;
57 if (!insn->len)
58 return NULL;
59
60 return insn;
61 }
62
next_insn_same_func(struct objtool_file * file,struct instruction * insn)63 static struct instruction *next_insn_same_func(struct objtool_file *file,
64 struct instruction *insn)
65 {
66 struct instruction *next = next_insn_same_sec(file, insn);
67 struct symbol *func = insn_func(insn);
68
69 if (!func)
70 return NULL;
71
72 if (next && insn_func(next) == func)
73 return next;
74
75 /* Check if we're already in the subfunction: */
76 if (func == func->cfunc)
77 return NULL;
78
79 /* Move to the subfunction: */
80 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
81 }
82
prev_insn_same_sec(struct objtool_file * file,struct instruction * insn)83 static struct instruction *prev_insn_same_sec(struct objtool_file *file,
84 struct instruction *insn)
85 {
86 if (insn->idx == 0) {
87 if (insn->prev_len)
88 return find_insn(file, insn->sec, insn->offset - insn->prev_len);
89 return NULL;
90 }
91
92 return insn - 1;
93 }
94
prev_insn_same_sym(struct objtool_file * file,struct instruction * insn)95 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
96 struct instruction *insn)
97 {
98 struct instruction *prev = prev_insn_same_sec(file, insn);
99
100 if (prev && insn_func(prev) == insn_func(insn))
101 return prev;
102
103 return NULL;
104 }
105
106 #define for_each_insn(file, insn) \
107 for (struct section *__sec, *__fake = (struct section *)1; \
108 __fake; __fake = NULL) \
109 for_each_sec(file, __sec) \
110 sec_for_each_insn(file, __sec, insn)
111
112 #define func_for_each_insn(file, func, insn) \
113 for (insn = find_insn(file, func->sec, func->offset); \
114 insn; \
115 insn = next_insn_same_func(file, insn))
116
117 #define sym_for_each_insn(file, sym, insn) \
118 for (insn = find_insn(file, sym->sec, sym->offset); \
119 insn && insn->offset < sym->offset + sym->len; \
120 insn = next_insn_same_sec(file, insn))
121
122 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
123 for (insn = prev_insn_same_sec(file, insn); \
124 insn && insn->offset >= sym->offset; \
125 insn = prev_insn_same_sec(file, insn))
126
127 #define sec_for_each_insn_from(file, insn) \
128 for (; insn; insn = next_insn_same_sec(file, insn))
129
130 #define sec_for_each_insn_continue(file, insn) \
131 for (insn = next_insn_same_sec(file, insn); insn; \
132 insn = next_insn_same_sec(file, insn))
133
insn_call_dest(struct instruction * insn)134 static inline struct symbol *insn_call_dest(struct instruction *insn)
135 {
136 if (insn->type == INSN_JUMP_DYNAMIC ||
137 insn->type == INSN_CALL_DYNAMIC)
138 return NULL;
139
140 return insn->_call_dest;
141 }
142
insn_jump_table(struct instruction * insn)143 static inline struct reloc *insn_jump_table(struct instruction *insn)
144 {
145 if (insn->type == INSN_JUMP_DYNAMIC ||
146 insn->type == INSN_CALL_DYNAMIC)
147 return insn->_jump_table;
148
149 return NULL;
150 }
151
insn_jump_table_size(struct instruction * insn)152 static inline unsigned long insn_jump_table_size(struct instruction *insn)
153 {
154 if (insn->type == INSN_JUMP_DYNAMIC ||
155 insn->type == INSN_CALL_DYNAMIC)
156 return insn->_jump_table_size;
157
158 return 0;
159 }
160
is_jump_table_jump(struct instruction * insn)161 static bool is_jump_table_jump(struct instruction *insn)
162 {
163 struct alt_group *alt_group = insn->alt_group;
164
165 if (insn_jump_table(insn))
166 return true;
167
168 /* Retpoline alternative for a jump table? */
169 return alt_group && alt_group->orig_group &&
170 insn_jump_table(alt_group->orig_group->first_insn);
171 }
172
is_sibling_call(struct instruction * insn)173 static bool is_sibling_call(struct instruction *insn)
174 {
175 /*
176 * Assume only STT_FUNC calls have jump-tables.
177 */
178 if (insn_func(insn)) {
179 /* An indirect jump is either a sibling call or a jump to a table. */
180 if (insn->type == INSN_JUMP_DYNAMIC)
181 return !is_jump_table_jump(insn);
182 }
183
184 /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
185 return (is_static_jump(insn) && insn_call_dest(insn));
186 }
187
188 /*
189 * Checks if a string ends with another.
190 */
str_ends_with(const char * s,const char * sub)191 static bool str_ends_with(const char *s, const char *sub)
192 {
193 const int slen = strlen(s);
194 const int sublen = strlen(sub);
195
196 if (sublen > slen)
197 return 0;
198
199 return !memcmp(s + slen - sublen, sub, sublen);
200 }
201
202 /*
203 * Checks if a function is a Rust "noreturn" one.
204 */
is_rust_noreturn(const struct symbol * func)205 static bool is_rust_noreturn(const struct symbol *func)
206 {
207 /*
208 * If it does not start with "_R", then it is not a Rust symbol.
209 */
210 if (strncmp(func->name, "_R", 2))
211 return false;
212
213 /*
214 * These are just heuristics -- we do not control the precise symbol
215 * name, due to the crate disambiguators (which depend on the compiler)
216 * as well as changes to the source code itself between versions (since
217 * these come from the Rust standard library).
218 */
219 return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
220 str_ends_with(func->name, "_4core6option13expect_failed") ||
221 str_ends_with(func->name, "_4core6option13unwrap_failed") ||
222 str_ends_with(func->name, "_4core6result13unwrap_failed") ||
223 str_ends_with(func->name, "_4core9panicking5panic") ||
224 str_ends_with(func->name, "_4core9panicking9panic_fmt") ||
225 str_ends_with(func->name, "_4core9panicking14panic_explicit") ||
226 str_ends_with(func->name, "_4core9panicking14panic_nounwind") ||
227 str_ends_with(func->name, "_4core9panicking18panic_bounds_check") ||
228 str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt") ||
229 str_ends_with(func->name, "_4core9panicking19assert_failed_inner") ||
230 str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") ||
231 str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") ||
232 str_ends_with(func->name, "_7___rustc17rust_begin_unwind") ||
233 strstr(func->name, "_4core9panicking13assert_failed") ||
234 strstr(func->name, "_4core9panicking11panic_const24panic_const_") ||
235 (strstr(func->name, "_4core5slice5index") &&
236 strstr(func->name, "slice_") &&
237 str_ends_with(func->name, "_fail"));
238 }
239
240 /*
241 * This checks to see if the given function is a "noreturn" function.
242 *
243 * For global functions which are outside the scope of this object file, we
244 * have to keep a manual list of them.
245 *
246 * For local functions, we have to detect them manually by simply looking for
247 * the lack of a return instruction.
248 */
__dead_end_function(struct objtool_file * file,struct symbol * func,int recursion)249 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
250 int recursion)
251 {
252 int i;
253 struct instruction *insn;
254 bool empty = true;
255
256 #define NORETURN(func) __stringify(func),
257 static const char * const global_noreturns[] = {
258 #include "noreturns.h"
259 };
260 #undef NORETURN
261
262 if (!func)
263 return false;
264
265 if (func->bind == STB_GLOBAL || func->bind == STB_WEAK) {
266 if (is_rust_noreturn(func))
267 return true;
268
269 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
270 if (!strcmp(func->name, global_noreturns[i]))
271 return true;
272 }
273
274 if (func->bind == STB_WEAK)
275 return false;
276
277 if (!func->len)
278 return false;
279
280 insn = find_insn(file, func->sec, func->offset);
281 if (!insn || !insn_func(insn))
282 return false;
283
284 func_for_each_insn(file, func, insn) {
285 empty = false;
286
287 if (insn->type == INSN_RETURN)
288 return false;
289 }
290
291 if (empty)
292 return false;
293
294 /*
295 * A function can have a sibling call instead of a return. In that
296 * case, the function's dead-end status depends on whether the target
297 * of the sibling call returns.
298 */
299 func_for_each_insn(file, func, insn) {
300 if (is_sibling_call(insn)) {
301 struct instruction *dest = insn->jump_dest;
302
303 if (!dest)
304 /* sibling call to another file */
305 return false;
306
307 /* local sibling call */
308 if (recursion == 5) {
309 /*
310 * Infinite recursion: two functions have
311 * sibling calls to each other. This is a very
312 * rare case. It means they aren't dead ends.
313 */
314 return false;
315 }
316
317 return __dead_end_function(file, insn_func(dest), recursion+1);
318 }
319 }
320
321 return true;
322 }
323
dead_end_function(struct objtool_file * file,struct symbol * func)324 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
325 {
326 return __dead_end_function(file, func, 0);
327 }
328
init_cfi_state(struct cfi_state * cfi)329 static void init_cfi_state(struct cfi_state *cfi)
330 {
331 int i;
332
333 for (i = 0; i < CFI_NUM_REGS; i++) {
334 cfi->regs[i].base = CFI_UNDEFINED;
335 cfi->vals[i].base = CFI_UNDEFINED;
336 }
337 cfi->cfa.base = CFI_UNDEFINED;
338 cfi->drap_reg = CFI_UNDEFINED;
339 cfi->drap_offset = -1;
340 }
341
init_insn_state(struct objtool_file * file,struct insn_state * state,struct section * sec)342 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
343 struct section *sec)
344 {
345 memset(state, 0, sizeof(*state));
346 init_cfi_state(&state->cfi);
347
348 if (opts.noinstr && sec)
349 state->noinstr = sec->noinstr;
350 }
351
cfi_alloc(void)352 static struct cfi_state *cfi_alloc(void)
353 {
354 struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
355 if (!cfi) {
356 ERROR_GLIBC("calloc");
357 exit(1);
358 }
359 nr_cfi++;
360 return cfi;
361 }
362
363 static int cfi_bits;
364 static struct hlist_head *cfi_hash;
365
cficmp(struct cfi_state * cfi1,struct cfi_state * cfi2)366 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
367 {
368 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
369 (void *)cfi2 + sizeof(cfi2->hash),
370 sizeof(struct cfi_state) - sizeof(struct hlist_node));
371 }
372
cfi_key(struct cfi_state * cfi)373 static inline u32 cfi_key(struct cfi_state *cfi)
374 {
375 return jhash((void *)cfi + sizeof(cfi->hash),
376 sizeof(*cfi) - sizeof(cfi->hash), 0);
377 }
378
cfi_hash_find_or_add(struct cfi_state * cfi)379 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
380 {
381 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
382 struct cfi_state *obj;
383
384 hlist_for_each_entry(obj, head, hash) {
385 if (!cficmp(cfi, obj)) {
386 nr_cfi_cache++;
387 return obj;
388 }
389 }
390
391 obj = cfi_alloc();
392 *obj = *cfi;
393 hlist_add_head(&obj->hash, head);
394
395 return obj;
396 }
397
cfi_hash_add(struct cfi_state * cfi)398 static void cfi_hash_add(struct cfi_state *cfi)
399 {
400 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
401
402 hlist_add_head(&cfi->hash, head);
403 }
404
cfi_hash_alloc(unsigned long size)405 static void *cfi_hash_alloc(unsigned long size)
406 {
407 cfi_bits = max(10, ilog2(size));
408 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
409 PROT_READ|PROT_WRITE,
410 MAP_PRIVATE|MAP_ANON, -1, 0);
411 if (cfi_hash == (void *)-1L) {
412 ERROR_GLIBC("mmap fail cfi_hash");
413 cfi_hash = NULL;
414 } else if (opts.stats) {
415 printf("cfi_bits: %d\n", cfi_bits);
416 }
417
418 return cfi_hash;
419 }
420
421 static unsigned long nr_insns;
422 static unsigned long nr_insns_visited;
423
424 /*
425 * Call the arch-specific instruction decoder for all the instructions and add
426 * them to the global instruction list.
427 */
decode_instructions(struct objtool_file * file)428 static int decode_instructions(struct objtool_file *file)
429 {
430 struct section *sec;
431 struct symbol *func;
432 unsigned long offset;
433 struct instruction *insn;
434 int ret;
435
436 for_each_sec(file, sec) {
437 struct instruction *insns = NULL;
438 u8 prev_len = 0;
439 u8 idx = 0;
440
441 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
442 continue;
443
444 if (strcmp(sec->name, ".altinstr_replacement") &&
445 strcmp(sec->name, ".altinstr_aux") &&
446 strncmp(sec->name, ".discard.", 9))
447 sec->text = true;
448
449 if (!strcmp(sec->name, ".noinstr.text") ||
450 !strcmp(sec->name, ".entry.text") ||
451 !strcmp(sec->name, ".cpuidle.text") ||
452 !strncmp(sec->name, ".text..__x86.", 13))
453 sec->noinstr = true;
454
455 /*
456 * .init.text code is ran before userspace and thus doesn't
457 * strictly need retpolines, except for modules which are
458 * loaded late, they very much do need retpoline in their
459 * .init.text
460 */
461 if (!strcmp(sec->name, ".init.text") && !opts.module)
462 sec->init = true;
463
464 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
465 if (!insns || idx == INSN_CHUNK_MAX) {
466 insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
467 if (!insns) {
468 ERROR_GLIBC("calloc");
469 return -1;
470 }
471 idx = 0;
472 } else {
473 idx++;
474 }
475 insn = &insns[idx];
476 insn->idx = idx;
477
478 INIT_LIST_HEAD(&insn->call_node);
479 insn->sec = sec;
480 insn->offset = offset;
481 insn->prev_len = prev_len;
482
483 ret = arch_decode_instruction(file, sec, offset,
484 sec->sh.sh_size - offset,
485 insn);
486 if (ret)
487 return ret;
488
489 prev_len = insn->len;
490
491 /*
492 * By default, "ud2" is a dead end unless otherwise
493 * annotated, because GCC 7 inserts it for certain
494 * divide-by-zero cases.
495 */
496 if (insn->type == INSN_BUG)
497 insn->dead_end = true;
498
499 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
500 nr_insns++;
501 }
502
503 sec_for_each_sym(sec, func) {
504 if (func->type != STT_NOTYPE && func->type != STT_FUNC)
505 continue;
506
507 if (func->offset == sec->sh.sh_size) {
508 /* Heuristic: likely an "end" symbol */
509 if (func->type == STT_NOTYPE)
510 continue;
511 ERROR("%s(): STT_FUNC at end of section", func->name);
512 return -1;
513 }
514
515 if (func->embedded_insn || func->alias != func)
516 continue;
517
518 if (!find_insn(file, sec, func->offset)) {
519 ERROR("%s(): can't find starting instruction", func->name);
520 return -1;
521 }
522
523 sym_for_each_insn(file, func, insn) {
524 insn->sym = func;
525 if (func->type == STT_FUNC &&
526 insn->type == INSN_ENDBR &&
527 list_empty(&insn->call_node)) {
528 if (insn->offset == func->offset) {
529 list_add_tail(&insn->call_node, &file->endbr_list);
530 file->nr_endbr++;
531 } else {
532 file->nr_endbr_int++;
533 }
534 }
535 }
536 }
537 }
538
539 if (opts.stats)
540 printf("nr_insns: %lu\n", nr_insns);
541
542 return 0;
543 }
544
545 /*
546 * Read the pv_ops[] .data table to find the static initialized values.
547 */
add_pv_ops(struct objtool_file * file,const char * symname)548 static int add_pv_ops(struct objtool_file *file, const char *symname)
549 {
550 struct symbol *sym, *func;
551 unsigned long off, end;
552 struct reloc *reloc;
553 int idx;
554
555 sym = find_symbol_by_name(file->elf, symname);
556 if (!sym)
557 return 0;
558
559 off = sym->offset;
560 end = off + sym->len;
561 for (;;) {
562 reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
563 if (!reloc)
564 break;
565
566 idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
567
568 func = reloc->sym;
569 if (func->type == STT_SECTION)
570 func = find_symbol_by_offset(reloc->sym->sec,
571 reloc_addend(reloc));
572 if (!func) {
573 ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc),
574 "can't find func at %s[%d]", symname, idx);
575 return -1;
576 }
577
578 if (objtool_pv_add(file, idx, func))
579 return -1;
580
581 off = reloc_offset(reloc) + 1;
582 if (off > end)
583 break;
584 }
585
586 return 0;
587 }
588
589 /*
590 * Allocate and initialize file->pv_ops[].
591 */
init_pv_ops(struct objtool_file * file)592 static int init_pv_ops(struct objtool_file *file)
593 {
594 static const char *pv_ops_tables[] = {
595 "pv_ops",
596 "xen_cpu_ops",
597 "xen_irq_ops",
598 "xen_mmu_ops",
599 NULL,
600 };
601 const char *pv_ops;
602 struct symbol *sym;
603 int idx, nr, ret;
604
605 if (!opts.noinstr)
606 return 0;
607
608 file->pv_ops = NULL;
609
610 sym = find_symbol_by_name(file->elf, "pv_ops");
611 if (!sym)
612 return 0;
613
614 nr = sym->len / sizeof(unsigned long);
615 file->pv_ops = calloc(sizeof(struct pv_state), nr);
616 if (!file->pv_ops) {
617 ERROR_GLIBC("calloc");
618 return -1;
619 }
620
621 for (idx = 0; idx < nr; idx++)
622 INIT_LIST_HEAD(&file->pv_ops[idx].targets);
623
624 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) {
625 ret = add_pv_ops(file, pv_ops);
626 if (ret)
627 return ret;
628 }
629
630 return 0;
631 }
632
create_static_call_sections(struct objtool_file * file)633 static int create_static_call_sections(struct objtool_file *file)
634 {
635 struct static_call_site *site;
636 struct section *sec;
637 struct instruction *insn;
638 struct symbol *key_sym;
639 char *key_name, *tmp;
640 int idx;
641
642 sec = find_section_by_name(file->elf, ".static_call_sites");
643 if (sec) {
644 INIT_LIST_HEAD(&file->static_call_list);
645 WARN("file already has .static_call_sites section, skipping");
646 return 0;
647 }
648
649 if (list_empty(&file->static_call_list))
650 return 0;
651
652 idx = 0;
653 list_for_each_entry(insn, &file->static_call_list, call_node)
654 idx++;
655
656 sec = elf_create_section_pair(file->elf, ".static_call_sites",
657 sizeof(*site), idx, idx * 2);
658 if (!sec)
659 return -1;
660
661 /* Allow modules to modify the low bits of static_call_site::key */
662 sec->sh.sh_flags |= SHF_WRITE;
663
664 idx = 0;
665 list_for_each_entry(insn, &file->static_call_list, call_node) {
666
667 /* populate reloc for 'addr' */
668 if (!elf_init_reloc_text_sym(file->elf, sec,
669 idx * sizeof(*site), idx * 2,
670 insn->sec, insn->offset))
671 return -1;
672
673 /* find key symbol */
674 key_name = strdup(insn_call_dest(insn)->name);
675 if (!key_name) {
676 ERROR_GLIBC("strdup");
677 return -1;
678 }
679 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
680 STATIC_CALL_TRAMP_PREFIX_LEN)) {
681 ERROR("static_call: trampoline name malformed: %s", key_name);
682 return -1;
683 }
684 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
685 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
686
687 key_sym = find_symbol_by_name(file->elf, tmp);
688 if (!key_sym) {
689 if (!opts.module) {
690 ERROR("static_call: can't find static_call_key symbol: %s", tmp);
691 return -1;
692 }
693
694 /*
695 * For modules(), the key might not be exported, which
696 * means the module can make static calls but isn't
697 * allowed to change them.
698 *
699 * In that case we temporarily set the key to be the
700 * trampoline address. This is fixed up in
701 * static_call_add_module().
702 */
703 key_sym = insn_call_dest(insn);
704 }
705
706 /* populate reloc for 'key' */
707 if (!elf_init_reloc_data_sym(file->elf, sec,
708 idx * sizeof(*site) + 4,
709 (idx * 2) + 1, key_sym,
710 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
711 return -1;
712
713 idx++;
714 }
715
716 return 0;
717 }
718
create_retpoline_sites_sections(struct objtool_file * file)719 static int create_retpoline_sites_sections(struct objtool_file *file)
720 {
721 struct instruction *insn;
722 struct section *sec;
723 int idx;
724
725 sec = find_section_by_name(file->elf, ".retpoline_sites");
726 if (sec) {
727 WARN("file already has .retpoline_sites, skipping");
728 return 0;
729 }
730
731 idx = 0;
732 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
733 idx++;
734
735 if (!idx)
736 return 0;
737
738 sec = elf_create_section_pair(file->elf, ".retpoline_sites",
739 sizeof(int), idx, idx);
740 if (!sec)
741 return -1;
742
743 idx = 0;
744 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
745
746 if (!elf_init_reloc_text_sym(file->elf, sec,
747 idx * sizeof(int), idx,
748 insn->sec, insn->offset))
749 return -1;
750
751 idx++;
752 }
753
754 return 0;
755 }
756
create_return_sites_sections(struct objtool_file * file)757 static int create_return_sites_sections(struct objtool_file *file)
758 {
759 struct instruction *insn;
760 struct section *sec;
761 int idx;
762
763 sec = find_section_by_name(file->elf, ".return_sites");
764 if (sec) {
765 WARN("file already has .return_sites, skipping");
766 return 0;
767 }
768
769 idx = 0;
770 list_for_each_entry(insn, &file->return_thunk_list, call_node)
771 idx++;
772
773 if (!idx)
774 return 0;
775
776 sec = elf_create_section_pair(file->elf, ".return_sites",
777 sizeof(int), idx, idx);
778 if (!sec)
779 return -1;
780
781 idx = 0;
782 list_for_each_entry(insn, &file->return_thunk_list, call_node) {
783
784 if (!elf_init_reloc_text_sym(file->elf, sec,
785 idx * sizeof(int), idx,
786 insn->sec, insn->offset))
787 return -1;
788
789 idx++;
790 }
791
792 return 0;
793 }
794
create_ibt_endbr_seal_sections(struct objtool_file * file)795 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
796 {
797 struct instruction *insn;
798 struct section *sec;
799 int idx;
800
801 sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
802 if (sec) {
803 WARN("file already has .ibt_endbr_seal, skipping");
804 return 0;
805 }
806
807 idx = 0;
808 list_for_each_entry(insn, &file->endbr_list, call_node)
809 idx++;
810
811 if (opts.stats) {
812 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
813 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
814 printf("ibt: superfluous ENDBR: %d\n", idx);
815 }
816
817 if (!idx)
818 return 0;
819
820 sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal",
821 sizeof(int), idx, idx);
822 if (!sec)
823 return -1;
824
825 idx = 0;
826 list_for_each_entry(insn, &file->endbr_list, call_node) {
827
828 int *site = (int *)sec->data->d_buf + idx;
829 struct symbol *sym = insn->sym;
830 *site = 0;
831
832 if (opts.module && sym && sym->type == STT_FUNC &&
833 insn->offset == sym->offset &&
834 (!strcmp(sym->name, "init_module") ||
835 !strcmp(sym->name, "cleanup_module"))) {
836 ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
837 sym->name);
838 return -1;
839 }
840
841 if (!elf_init_reloc_text_sym(file->elf, sec,
842 idx * sizeof(int), idx,
843 insn->sec, insn->offset))
844 return -1;
845
846 idx++;
847 }
848
849 return 0;
850 }
851
create_cfi_sections(struct objtool_file * file)852 static int create_cfi_sections(struct objtool_file *file)
853 {
854 struct section *sec;
855 struct symbol *sym;
856 int idx;
857
858 sec = find_section_by_name(file->elf, ".cfi_sites");
859 if (sec) {
860 INIT_LIST_HEAD(&file->call_list);
861 WARN("file already has .cfi_sites section, skipping");
862 return 0;
863 }
864
865 idx = 0;
866 for_each_sym(file, sym) {
867 if (sym->type != STT_FUNC)
868 continue;
869
870 if (strncmp(sym->name, "__cfi_", 6))
871 continue;
872
873 idx++;
874 }
875
876 sec = elf_create_section_pair(file->elf, ".cfi_sites",
877 sizeof(unsigned int), idx, idx);
878 if (!sec)
879 return -1;
880
881 idx = 0;
882 for_each_sym(file, sym) {
883 if (sym->type != STT_FUNC)
884 continue;
885
886 if (strncmp(sym->name, "__cfi_", 6))
887 continue;
888
889 if (!elf_init_reloc_text_sym(file->elf, sec,
890 idx * sizeof(unsigned int), idx,
891 sym->sec, sym->offset))
892 return -1;
893
894 idx++;
895 }
896
897 return 0;
898 }
899
create_mcount_loc_sections(struct objtool_file * file)900 static int create_mcount_loc_sections(struct objtool_file *file)
901 {
902 size_t addr_size = elf_addr_size(file->elf);
903 struct instruction *insn;
904 struct section *sec;
905 int idx;
906
907 sec = find_section_by_name(file->elf, "__mcount_loc");
908 if (sec) {
909 INIT_LIST_HEAD(&file->mcount_loc_list);
910 WARN("file already has __mcount_loc section, skipping");
911 return 0;
912 }
913
914 if (list_empty(&file->mcount_loc_list))
915 return 0;
916
917 idx = 0;
918 list_for_each_entry(insn, &file->mcount_loc_list, call_node)
919 idx++;
920
921 sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size,
922 idx, idx);
923 if (!sec)
924 return -1;
925
926 sec->sh.sh_addralign = addr_size;
927
928 idx = 0;
929 list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
930
931 struct reloc *reloc;
932
933 reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx,
934 insn->sec, insn->offset);
935 if (!reloc)
936 return -1;
937
938 set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32);
939
940 idx++;
941 }
942
943 return 0;
944 }
945
create_direct_call_sections(struct objtool_file * file)946 static int create_direct_call_sections(struct objtool_file *file)
947 {
948 struct instruction *insn;
949 struct section *sec;
950 int idx;
951
952 sec = find_section_by_name(file->elf, ".call_sites");
953 if (sec) {
954 INIT_LIST_HEAD(&file->call_list);
955 WARN("file already has .call_sites section, skipping");
956 return 0;
957 }
958
959 if (list_empty(&file->call_list))
960 return 0;
961
962 idx = 0;
963 list_for_each_entry(insn, &file->call_list, call_node)
964 idx++;
965
966 sec = elf_create_section_pair(file->elf, ".call_sites",
967 sizeof(unsigned int), idx, idx);
968 if (!sec)
969 return -1;
970
971 idx = 0;
972 list_for_each_entry(insn, &file->call_list, call_node) {
973
974 if (!elf_init_reloc_text_sym(file->elf, sec,
975 idx * sizeof(unsigned int), idx,
976 insn->sec, insn->offset))
977 return -1;
978
979 idx++;
980 }
981
982 return 0;
983 }
984
985 /*
986 * Warnings shouldn't be reported for ignored functions.
987 */
add_ignores(struct objtool_file * file)988 static int add_ignores(struct objtool_file *file)
989 {
990 struct section *rsec;
991 struct symbol *func;
992 struct reloc *reloc;
993
994 rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
995 if (!rsec)
996 return 0;
997
998 for_each_reloc(rsec, reloc) {
999 switch (reloc->sym->type) {
1000 case STT_FUNC:
1001 func = reloc->sym;
1002 break;
1003
1004 case STT_SECTION:
1005 func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc));
1006 if (!func)
1007 continue;
1008 break;
1009
1010 default:
1011 ERROR("unexpected relocation symbol type in %s: %d",
1012 rsec->name, reloc->sym->type);
1013 return -1;
1014 }
1015
1016 func->ignore = true;
1017 if (func->cfunc)
1018 func->cfunc->ignore = true;
1019 }
1020
1021 return 0;
1022 }
1023
1024 /*
1025 * This is a whitelist of functions that is allowed to be called with AC set.
1026 * The list is meant to be minimal and only contains compiler instrumentation
1027 * ABI and a few functions used to implement *_{to,from}_user() functions.
1028 *
1029 * These functions must not directly change AC, but may PUSHF/POPF.
1030 */
1031 static const char *uaccess_safe_builtin[] = {
1032 /* KASAN */
1033 "kasan_report",
1034 "kasan_check_range",
1035 /* KASAN out-of-line */
1036 "__asan_loadN_noabort",
1037 "__asan_load1_noabort",
1038 "__asan_load2_noabort",
1039 "__asan_load4_noabort",
1040 "__asan_load8_noabort",
1041 "__asan_load16_noabort",
1042 "__asan_storeN_noabort",
1043 "__asan_store1_noabort",
1044 "__asan_store2_noabort",
1045 "__asan_store4_noabort",
1046 "__asan_store8_noabort",
1047 "__asan_store16_noabort",
1048 "__kasan_check_read",
1049 "__kasan_check_write",
1050 /* KASAN in-line */
1051 "__asan_report_load_n_noabort",
1052 "__asan_report_load1_noabort",
1053 "__asan_report_load2_noabort",
1054 "__asan_report_load4_noabort",
1055 "__asan_report_load8_noabort",
1056 "__asan_report_load16_noabort",
1057 "__asan_report_store_n_noabort",
1058 "__asan_report_store1_noabort",
1059 "__asan_report_store2_noabort",
1060 "__asan_report_store4_noabort",
1061 "__asan_report_store8_noabort",
1062 "__asan_report_store16_noabort",
1063 /* KCSAN */
1064 "__kcsan_check_access",
1065 "__kcsan_mb",
1066 "__kcsan_wmb",
1067 "__kcsan_rmb",
1068 "__kcsan_release",
1069 "kcsan_found_watchpoint",
1070 "kcsan_setup_watchpoint",
1071 "kcsan_check_scoped_accesses",
1072 "kcsan_disable_current",
1073 "kcsan_enable_current_nowarn",
1074 /* KCSAN/TSAN */
1075 "__tsan_func_entry",
1076 "__tsan_func_exit",
1077 "__tsan_read_range",
1078 "__tsan_write_range",
1079 "__tsan_read1",
1080 "__tsan_read2",
1081 "__tsan_read4",
1082 "__tsan_read8",
1083 "__tsan_read16",
1084 "__tsan_write1",
1085 "__tsan_write2",
1086 "__tsan_write4",
1087 "__tsan_write8",
1088 "__tsan_write16",
1089 "__tsan_read_write1",
1090 "__tsan_read_write2",
1091 "__tsan_read_write4",
1092 "__tsan_read_write8",
1093 "__tsan_read_write16",
1094 "__tsan_volatile_read1",
1095 "__tsan_volatile_read2",
1096 "__tsan_volatile_read4",
1097 "__tsan_volatile_read8",
1098 "__tsan_volatile_read16",
1099 "__tsan_volatile_write1",
1100 "__tsan_volatile_write2",
1101 "__tsan_volatile_write4",
1102 "__tsan_volatile_write8",
1103 "__tsan_volatile_write16",
1104 "__tsan_atomic8_load",
1105 "__tsan_atomic16_load",
1106 "__tsan_atomic32_load",
1107 "__tsan_atomic64_load",
1108 "__tsan_atomic8_store",
1109 "__tsan_atomic16_store",
1110 "__tsan_atomic32_store",
1111 "__tsan_atomic64_store",
1112 "__tsan_atomic8_exchange",
1113 "__tsan_atomic16_exchange",
1114 "__tsan_atomic32_exchange",
1115 "__tsan_atomic64_exchange",
1116 "__tsan_atomic8_fetch_add",
1117 "__tsan_atomic16_fetch_add",
1118 "__tsan_atomic32_fetch_add",
1119 "__tsan_atomic64_fetch_add",
1120 "__tsan_atomic8_fetch_sub",
1121 "__tsan_atomic16_fetch_sub",
1122 "__tsan_atomic32_fetch_sub",
1123 "__tsan_atomic64_fetch_sub",
1124 "__tsan_atomic8_fetch_and",
1125 "__tsan_atomic16_fetch_and",
1126 "__tsan_atomic32_fetch_and",
1127 "__tsan_atomic64_fetch_and",
1128 "__tsan_atomic8_fetch_or",
1129 "__tsan_atomic16_fetch_or",
1130 "__tsan_atomic32_fetch_or",
1131 "__tsan_atomic64_fetch_or",
1132 "__tsan_atomic8_fetch_xor",
1133 "__tsan_atomic16_fetch_xor",
1134 "__tsan_atomic32_fetch_xor",
1135 "__tsan_atomic64_fetch_xor",
1136 "__tsan_atomic8_fetch_nand",
1137 "__tsan_atomic16_fetch_nand",
1138 "__tsan_atomic32_fetch_nand",
1139 "__tsan_atomic64_fetch_nand",
1140 "__tsan_atomic8_compare_exchange_strong",
1141 "__tsan_atomic16_compare_exchange_strong",
1142 "__tsan_atomic32_compare_exchange_strong",
1143 "__tsan_atomic64_compare_exchange_strong",
1144 "__tsan_atomic8_compare_exchange_weak",
1145 "__tsan_atomic16_compare_exchange_weak",
1146 "__tsan_atomic32_compare_exchange_weak",
1147 "__tsan_atomic64_compare_exchange_weak",
1148 "__tsan_atomic8_compare_exchange_val",
1149 "__tsan_atomic16_compare_exchange_val",
1150 "__tsan_atomic32_compare_exchange_val",
1151 "__tsan_atomic64_compare_exchange_val",
1152 "__tsan_atomic_thread_fence",
1153 "__tsan_atomic_signal_fence",
1154 "__tsan_unaligned_read16",
1155 "__tsan_unaligned_write16",
1156 /* KCOV */
1157 "write_comp_data",
1158 "check_kcov_mode",
1159 "__sanitizer_cov_trace_pc",
1160 "__sanitizer_cov_trace_const_cmp1",
1161 "__sanitizer_cov_trace_const_cmp2",
1162 "__sanitizer_cov_trace_const_cmp4",
1163 "__sanitizer_cov_trace_const_cmp8",
1164 "__sanitizer_cov_trace_cmp1",
1165 "__sanitizer_cov_trace_cmp2",
1166 "__sanitizer_cov_trace_cmp4",
1167 "__sanitizer_cov_trace_cmp8",
1168 "__sanitizer_cov_trace_switch",
1169 /* KMSAN */
1170 "kmsan_copy_to_user",
1171 "kmsan_disable_current",
1172 "kmsan_enable_current",
1173 "kmsan_report",
1174 "kmsan_unpoison_entry_regs",
1175 "kmsan_unpoison_memory",
1176 "__msan_chain_origin",
1177 "__msan_get_context_state",
1178 "__msan_instrument_asm_store",
1179 "__msan_metadata_ptr_for_load_1",
1180 "__msan_metadata_ptr_for_load_2",
1181 "__msan_metadata_ptr_for_load_4",
1182 "__msan_metadata_ptr_for_load_8",
1183 "__msan_metadata_ptr_for_load_n",
1184 "__msan_metadata_ptr_for_store_1",
1185 "__msan_metadata_ptr_for_store_2",
1186 "__msan_metadata_ptr_for_store_4",
1187 "__msan_metadata_ptr_for_store_8",
1188 "__msan_metadata_ptr_for_store_n",
1189 "__msan_poison_alloca",
1190 "__msan_warning",
1191 /* UBSAN */
1192 "ubsan_type_mismatch_common",
1193 "__ubsan_handle_type_mismatch",
1194 "__ubsan_handle_type_mismatch_v1",
1195 "__ubsan_handle_shift_out_of_bounds",
1196 "__ubsan_handle_load_invalid_value",
1197 /* KSTACK_ERASE */
1198 "__sanitizer_cov_stack_depth",
1199 /* TRACE_BRANCH_PROFILING */
1200 "ftrace_likely_update",
1201 /* STACKPROTECTOR */
1202 "__stack_chk_fail",
1203 /* misc */
1204 "csum_partial_copy_generic",
1205 "copy_mc_fragile",
1206 "copy_mc_fragile_handle_tail",
1207 "copy_mc_enhanced_fast_string",
1208 "rep_stos_alternative",
1209 "rep_movs_alternative",
1210 "__copy_user_nocache",
1211 NULL
1212 };
1213
add_uaccess_safe(struct objtool_file * file)1214 static void add_uaccess_safe(struct objtool_file *file)
1215 {
1216 struct symbol *func;
1217 const char **name;
1218
1219 if (!opts.uaccess)
1220 return;
1221
1222 for (name = uaccess_safe_builtin; *name; name++) {
1223 func = find_symbol_by_name(file->elf, *name);
1224 if (!func)
1225 continue;
1226
1227 func->uaccess_safe = true;
1228 }
1229 }
1230
1231 /*
1232 * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
1233 * will be added to the .retpoline_sites section.
1234 */
arch_is_retpoline(struct symbol * sym)1235 __weak bool arch_is_retpoline(struct symbol *sym)
1236 {
1237 return false;
1238 }
1239
1240 /*
1241 * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
1242 * will be added to the .return_sites section.
1243 */
arch_is_rethunk(struct symbol * sym)1244 __weak bool arch_is_rethunk(struct symbol *sym)
1245 {
1246 return false;
1247 }
1248
1249 /*
1250 * Symbols that are embedded inside other instructions, because sometimes crazy
1251 * code exists. These are mostly ignored for validation purposes.
1252 */
arch_is_embedded_insn(struct symbol * sym)1253 __weak bool arch_is_embedded_insn(struct symbol *sym)
1254 {
1255 return false;
1256 }
1257
insn_reloc(struct objtool_file * file,struct instruction * insn)1258 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1259 {
1260 struct reloc *reloc;
1261
1262 if (insn->no_reloc)
1263 return NULL;
1264
1265 if (!file)
1266 return NULL;
1267
1268 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1269 insn->offset, insn->len);
1270 if (!reloc) {
1271 insn->no_reloc = 1;
1272 return NULL;
1273 }
1274
1275 return reloc;
1276 }
1277
remove_insn_ops(struct instruction * insn)1278 static void remove_insn_ops(struct instruction *insn)
1279 {
1280 struct stack_op *op, *next;
1281
1282 for (op = insn->stack_ops; op; op = next) {
1283 next = op->next;
1284 free(op);
1285 }
1286 insn->stack_ops = NULL;
1287 }
1288
annotate_call_site(struct objtool_file * file,struct instruction * insn,bool sibling)1289 static int annotate_call_site(struct objtool_file *file,
1290 struct instruction *insn, bool sibling)
1291 {
1292 struct reloc *reloc = insn_reloc(file, insn);
1293 struct symbol *sym = insn_call_dest(insn);
1294
1295 if (!sym)
1296 sym = reloc->sym;
1297
1298 if (sym->static_call_tramp) {
1299 list_add_tail(&insn->call_node, &file->static_call_list);
1300 return 0;
1301 }
1302
1303 if (sym->retpoline_thunk) {
1304 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1305 return 0;
1306 }
1307
1308 /*
1309 * Many compilers cannot disable KCOV or sanitizer calls with a function
1310 * attribute so they need a little help, NOP out any such calls from
1311 * noinstr text.
1312 */
1313 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1314 if (reloc)
1315 set_reloc_type(file->elf, reloc, R_NONE);
1316
1317 if (elf_write_insn(file->elf, insn->sec,
1318 insn->offset, insn->len,
1319 sibling ? arch_ret_insn(insn->len)
1320 : arch_nop_insn(insn->len))) {
1321 return -1;
1322 }
1323
1324 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1325
1326 if (sibling) {
1327 /*
1328 * We've replaced the tail-call JMP insn by two new
1329 * insn: RET; INT3, except we only have a single struct
1330 * insn here. Mark it retpoline_safe to avoid the SLS
1331 * warning, instead of adding another insn.
1332 */
1333 insn->retpoline_safe = true;
1334 }
1335
1336 return 0;
1337 }
1338
1339 if (opts.mcount && sym->fentry) {
1340 if (sibling)
1341 WARN_INSN(insn, "tail call to __fentry__ !?!?");
1342 if (opts.mnop) {
1343 if (reloc)
1344 set_reloc_type(file->elf, reloc, R_NONE);
1345
1346 if (elf_write_insn(file->elf, insn->sec,
1347 insn->offset, insn->len,
1348 arch_nop_insn(insn->len))) {
1349 return -1;
1350 }
1351
1352 insn->type = INSN_NOP;
1353 }
1354
1355 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1356 return 0;
1357 }
1358
1359 if (insn->type == INSN_CALL && !insn->sec->init &&
1360 !insn->_call_dest->embedded_insn)
1361 list_add_tail(&insn->call_node, &file->call_list);
1362
1363 if (!sibling && dead_end_function(file, sym))
1364 insn->dead_end = true;
1365
1366 return 0;
1367 }
1368
add_call_dest(struct objtool_file * file,struct instruction * insn,struct symbol * dest,bool sibling)1369 static int add_call_dest(struct objtool_file *file, struct instruction *insn,
1370 struct symbol *dest, bool sibling)
1371 {
1372 insn->_call_dest = dest;
1373 if (!dest)
1374 return 0;
1375
1376 /*
1377 * Whatever stack impact regular CALLs have, should be undone
1378 * by the RETURN of the called function.
1379 *
1380 * Annotated intra-function calls retain the stack_ops but
1381 * are converted to JUMP, see read_intra_function_calls().
1382 */
1383 remove_insn_ops(insn);
1384
1385 return annotate_call_site(file, insn, sibling);
1386 }
1387
add_retpoline_call(struct objtool_file * file,struct instruction * insn)1388 static int add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1389 {
1390 /*
1391 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1392 * so convert them accordingly.
1393 */
1394 switch (insn->type) {
1395 case INSN_CALL:
1396 insn->type = INSN_CALL_DYNAMIC;
1397 break;
1398 case INSN_JUMP_UNCONDITIONAL:
1399 insn->type = INSN_JUMP_DYNAMIC;
1400 break;
1401 case INSN_JUMP_CONDITIONAL:
1402 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1403 break;
1404 default:
1405 return 0;
1406 }
1407
1408 insn->retpoline_safe = true;
1409
1410 /*
1411 * Whatever stack impact regular CALLs have, should be undone
1412 * by the RETURN of the called function.
1413 *
1414 * Annotated intra-function calls retain the stack_ops but
1415 * are converted to JUMP, see read_intra_function_calls().
1416 */
1417 remove_insn_ops(insn);
1418
1419 return annotate_call_site(file, insn, false);
1420 }
1421
add_return_call(struct objtool_file * file,struct instruction * insn,bool add)1422 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1423 {
1424 /*
1425 * Return thunk tail calls are really just returns in disguise,
1426 * so convert them accordingly.
1427 */
1428 insn->type = INSN_RETURN;
1429 insn->retpoline_safe = true;
1430
1431 if (add)
1432 list_add_tail(&insn->call_node, &file->return_thunk_list);
1433 }
1434
is_first_func_insn(struct objtool_file * file,struct instruction * insn,struct symbol * sym)1435 static bool is_first_func_insn(struct objtool_file *file,
1436 struct instruction *insn, struct symbol *sym)
1437 {
1438 if (insn->offset == sym->offset)
1439 return true;
1440
1441 /* Allow direct CALL/JMP past ENDBR */
1442 if (opts.ibt) {
1443 struct instruction *prev = prev_insn_same_sym(file, insn);
1444
1445 if (prev && prev->type == INSN_ENDBR &&
1446 insn->offset == sym->offset + prev->len)
1447 return true;
1448 }
1449
1450 return false;
1451 }
1452
1453 /*
1454 * A sibling call is a tail-call to another symbol -- to differentiate from a
1455 * recursive tail-call which is to the same symbol.
1456 */
jump_is_sibling_call(struct objtool_file * file,struct instruction * from,struct instruction * to)1457 static bool jump_is_sibling_call(struct objtool_file *file,
1458 struct instruction *from, struct instruction *to)
1459 {
1460 struct symbol *fs = from->sym;
1461 struct symbol *ts = to->sym;
1462
1463 /* Not a sibling call if from/to a symbol hole */
1464 if (!fs || !ts)
1465 return false;
1466
1467 /* Not a sibling call if not targeting the start of a symbol. */
1468 if (!is_first_func_insn(file, to, ts))
1469 return false;
1470
1471 /* Disallow sibling calls into STT_NOTYPE */
1472 if (ts->type == STT_NOTYPE)
1473 return false;
1474
1475 /* Must not be self to be a sibling */
1476 return fs->pfunc != ts->pfunc;
1477 }
1478
1479 /*
1480 * Find the destination instructions for all jumps.
1481 */
add_jump_destinations(struct objtool_file * file)1482 static int add_jump_destinations(struct objtool_file *file)
1483 {
1484 struct instruction *insn, *jump_dest;
1485 struct reloc *reloc;
1486 struct section *dest_sec;
1487 unsigned long dest_off;
1488 int ret;
1489
1490 for_each_insn(file, insn) {
1491 struct symbol *func = insn_func(insn);
1492
1493 if (insn->jump_dest) {
1494 /*
1495 * handle_group_alt() may have previously set
1496 * 'jump_dest' for some alternatives.
1497 */
1498 continue;
1499 }
1500 if (!is_static_jump(insn))
1501 continue;
1502
1503 reloc = insn_reloc(file, insn);
1504 if (!reloc) {
1505 dest_sec = insn->sec;
1506 dest_off = arch_jump_destination(insn);
1507 } else if (reloc->sym->type == STT_SECTION) {
1508 dest_sec = reloc->sym->sec;
1509 dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
1510 } else if (reloc->sym->retpoline_thunk) {
1511 ret = add_retpoline_call(file, insn);
1512 if (ret)
1513 return ret;
1514 continue;
1515 } else if (reloc->sym->return_thunk) {
1516 add_return_call(file, insn, true);
1517 continue;
1518 } else if (func) {
1519 /*
1520 * External sibling call or internal sibling call with
1521 * STT_FUNC reloc.
1522 */
1523 ret = add_call_dest(file, insn, reloc->sym, true);
1524 if (ret)
1525 return ret;
1526 continue;
1527 } else if (reloc->sym->sec->idx) {
1528 dest_sec = reloc->sym->sec;
1529 dest_off = reloc->sym->sym.st_value +
1530 arch_dest_reloc_offset(reloc_addend(reloc));
1531 } else {
1532 /* non-func asm code jumping to another file */
1533 continue;
1534 }
1535
1536 jump_dest = find_insn(file, dest_sec, dest_off);
1537 if (!jump_dest) {
1538 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1539
1540 /*
1541 * This is a special case for retbleed_untrain_ret().
1542 * It jumps to __x86_return_thunk(), but objtool
1543 * can't find the thunk's starting RET
1544 * instruction, because the RET is also in the
1545 * middle of another instruction. Objtool only
1546 * knows about the outer instruction.
1547 */
1548 if (sym && sym->embedded_insn) {
1549 add_return_call(file, insn, false);
1550 continue;
1551 }
1552
1553 /*
1554 * GCOV/KCOV dead code can jump to the end of the
1555 * function/section.
1556 */
1557 if (file->ignore_unreachables && func &&
1558 dest_sec == insn->sec &&
1559 dest_off == func->offset + func->len)
1560 continue;
1561
1562 ERROR_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
1563 dest_sec->name, dest_off);
1564 return -1;
1565 }
1566
1567 /*
1568 * An intra-TU jump in retpoline.o might not have a relocation
1569 * for its jump dest, in which case the above
1570 * add_{retpoline,return}_call() didn't happen.
1571 */
1572 if (jump_dest->sym && jump_dest->offset == jump_dest->sym->offset) {
1573 if (jump_dest->sym->retpoline_thunk) {
1574 ret = add_retpoline_call(file, insn);
1575 if (ret)
1576 return ret;
1577 continue;
1578 }
1579 if (jump_dest->sym->return_thunk) {
1580 add_return_call(file, insn, true);
1581 continue;
1582 }
1583 }
1584
1585 /*
1586 * Cross-function jump.
1587 */
1588 if (func && insn_func(jump_dest) && func != insn_func(jump_dest)) {
1589
1590 /*
1591 * For GCC 8+, create parent/child links for any cold
1592 * subfunctions. This is _mostly_ redundant with a
1593 * similar initialization in read_symbols().
1594 *
1595 * If a function has aliases, we want the *first* such
1596 * function in the symbol table to be the subfunction's
1597 * parent. In that case we overwrite the
1598 * initialization done in read_symbols().
1599 *
1600 * However this code can't completely replace the
1601 * read_symbols() code because this doesn't detect the
1602 * case where the parent function's only reference to a
1603 * subfunction is through a jump table.
1604 */
1605 if (!strstr(func->name, ".cold") &&
1606 strstr(insn_func(jump_dest)->name, ".cold")) {
1607 func->cfunc = insn_func(jump_dest);
1608 insn_func(jump_dest)->pfunc = func;
1609 }
1610 }
1611
1612 if (jump_is_sibling_call(file, insn, jump_dest)) {
1613 /*
1614 * Internal sibling call without reloc or with
1615 * STT_SECTION reloc.
1616 */
1617 ret = add_call_dest(file, insn, insn_func(jump_dest), true);
1618 if (ret)
1619 return ret;
1620 continue;
1621 }
1622
1623 insn->jump_dest = jump_dest;
1624 }
1625
1626 return 0;
1627 }
1628
find_call_destination(struct section * sec,unsigned long offset)1629 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1630 {
1631 struct symbol *call_dest;
1632
1633 call_dest = find_func_by_offset(sec, offset);
1634 if (!call_dest)
1635 call_dest = find_symbol_by_offset(sec, offset);
1636
1637 return call_dest;
1638 }
1639
1640 /*
1641 * Find the destination instructions for all calls.
1642 */
add_call_destinations(struct objtool_file * file)1643 static int add_call_destinations(struct objtool_file *file)
1644 {
1645 struct instruction *insn;
1646 unsigned long dest_off;
1647 struct symbol *dest;
1648 struct reloc *reloc;
1649 int ret;
1650
1651 for_each_insn(file, insn) {
1652 struct symbol *func = insn_func(insn);
1653 if (insn->type != INSN_CALL)
1654 continue;
1655
1656 reloc = insn_reloc(file, insn);
1657 if (!reloc) {
1658 dest_off = arch_jump_destination(insn);
1659 dest = find_call_destination(insn->sec, dest_off);
1660
1661 ret = add_call_dest(file, insn, dest, false);
1662 if (ret)
1663 return ret;
1664
1665 if (func && func->ignore)
1666 continue;
1667
1668 if (!insn_call_dest(insn)) {
1669 ERROR_INSN(insn, "unannotated intra-function call");
1670 return -1;
1671 }
1672
1673 if (func && insn_call_dest(insn)->type != STT_FUNC) {
1674 ERROR_INSN(insn, "unsupported call to non-function");
1675 return -1;
1676 }
1677
1678 } else if (reloc->sym->type == STT_SECTION) {
1679 dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
1680 dest = find_call_destination(reloc->sym->sec, dest_off);
1681 if (!dest) {
1682 ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1683 reloc->sym->sec->name, dest_off);
1684 return -1;
1685 }
1686
1687 ret = add_call_dest(file, insn, dest, false);
1688 if (ret)
1689 return ret;
1690
1691 } else if (reloc->sym->retpoline_thunk) {
1692 ret = add_retpoline_call(file, insn);
1693 if (ret)
1694 return ret;
1695
1696 } else {
1697 ret = add_call_dest(file, insn, reloc->sym, false);
1698 if (ret)
1699 return ret;
1700 }
1701 }
1702
1703 return 0;
1704 }
1705
1706 /*
1707 * The .alternatives section requires some extra special care over and above
1708 * other special sections because alternatives are patched in place.
1709 */
handle_group_alt(struct objtool_file * file,struct special_alt * special_alt,struct instruction * orig_insn,struct instruction ** new_insn)1710 static int handle_group_alt(struct objtool_file *file,
1711 struct special_alt *special_alt,
1712 struct instruction *orig_insn,
1713 struct instruction **new_insn)
1714 {
1715 struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1716 struct alt_group *orig_alt_group, *new_alt_group;
1717 unsigned long dest_off;
1718
1719 orig_alt_group = orig_insn->alt_group;
1720 if (!orig_alt_group) {
1721 struct instruction *last_orig_insn = NULL;
1722
1723 orig_alt_group = calloc(1, sizeof(*orig_alt_group));
1724 if (!orig_alt_group) {
1725 ERROR_GLIBC("calloc");
1726 return -1;
1727 }
1728 orig_alt_group->cfi = calloc(special_alt->orig_len,
1729 sizeof(struct cfi_state *));
1730 if (!orig_alt_group->cfi) {
1731 ERROR_GLIBC("calloc");
1732 return -1;
1733 }
1734
1735 insn = orig_insn;
1736 sec_for_each_insn_from(file, insn) {
1737 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1738 break;
1739
1740 insn->alt_group = orig_alt_group;
1741 last_orig_insn = insn;
1742 }
1743 orig_alt_group->orig_group = NULL;
1744 orig_alt_group->first_insn = orig_insn;
1745 orig_alt_group->last_insn = last_orig_insn;
1746 orig_alt_group->nop = NULL;
1747 orig_alt_group->ignore = orig_insn->ignore_alts;
1748 } else {
1749 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1750 orig_alt_group->first_insn->offset != special_alt->orig_len) {
1751 ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1752 orig_alt_group->last_insn->offset +
1753 orig_alt_group->last_insn->len -
1754 orig_alt_group->first_insn->offset,
1755 special_alt->orig_len);
1756 return -1;
1757 }
1758 }
1759
1760 new_alt_group = calloc(1, sizeof(*new_alt_group));
1761 if (!new_alt_group) {
1762 ERROR_GLIBC("calloc");
1763 return -1;
1764 }
1765
1766 if (special_alt->new_len < special_alt->orig_len) {
1767 /*
1768 * Insert a fake nop at the end to make the replacement
1769 * alt_group the same size as the original. This is needed to
1770 * allow propagate_alt_cfi() to do its magic. When the last
1771 * instruction affects the stack, the instruction after it (the
1772 * nop) will propagate the new state to the shared CFI array.
1773 */
1774 nop = calloc(1, sizeof(*nop));
1775 if (!nop) {
1776 ERROR_GLIBC("calloc");
1777 return -1;
1778 }
1779 memset(nop, 0, sizeof(*nop));
1780
1781 nop->sec = special_alt->new_sec;
1782 nop->offset = special_alt->new_off + special_alt->new_len;
1783 nop->len = special_alt->orig_len - special_alt->new_len;
1784 nop->type = INSN_NOP;
1785 nop->sym = orig_insn->sym;
1786 nop->alt_group = new_alt_group;
1787 }
1788
1789 if (!special_alt->new_len) {
1790 *new_insn = nop;
1791 goto end;
1792 }
1793
1794 insn = *new_insn;
1795 sec_for_each_insn_from(file, insn) {
1796 struct reloc *alt_reloc;
1797
1798 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1799 break;
1800
1801 last_new_insn = insn;
1802
1803 insn->sym = orig_insn->sym;
1804 insn->alt_group = new_alt_group;
1805
1806 /*
1807 * Since alternative replacement code is copy/pasted by the
1808 * kernel after applying relocations, generally such code can't
1809 * have relative-address relocation references to outside the
1810 * .altinstr_replacement section, unless the arch's
1811 * alternatives code can adjust the relative offsets
1812 * accordingly.
1813 */
1814 alt_reloc = insn_reloc(file, insn);
1815 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1816 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1817
1818 ERROR_INSN(insn, "unsupported relocation in alternatives section");
1819 return -1;
1820 }
1821
1822 if (!is_static_jump(insn))
1823 continue;
1824
1825 if (!insn->immediate)
1826 continue;
1827
1828 dest_off = arch_jump_destination(insn);
1829 if (dest_off == special_alt->new_off + special_alt->new_len) {
1830 insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1831 if (!insn->jump_dest) {
1832 ERROR_INSN(insn, "can't find alternative jump destination");
1833 return -1;
1834 }
1835 }
1836 }
1837
1838 if (!last_new_insn) {
1839 ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1840 "can't find last new alternative instruction");
1841 return -1;
1842 }
1843
1844 end:
1845 new_alt_group->orig_group = orig_alt_group;
1846 new_alt_group->first_insn = *new_insn;
1847 new_alt_group->last_insn = last_new_insn;
1848 new_alt_group->nop = nop;
1849 new_alt_group->ignore = (*new_insn)->ignore_alts;
1850 new_alt_group->cfi = orig_alt_group->cfi;
1851 return 0;
1852 }
1853
1854 /*
1855 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1856 * If the original instruction is a jump, make the alt entry an effective nop
1857 * by just skipping the original instruction.
1858 */
handle_jump_alt(struct objtool_file * file,struct special_alt * special_alt,struct instruction * orig_insn,struct instruction ** new_insn)1859 static int handle_jump_alt(struct objtool_file *file,
1860 struct special_alt *special_alt,
1861 struct instruction *orig_insn,
1862 struct instruction **new_insn)
1863 {
1864 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1865 orig_insn->type != INSN_NOP) {
1866
1867 ERROR_INSN(orig_insn, "unsupported instruction at jump label");
1868 return -1;
1869 }
1870
1871 if (opts.hack_jump_label && special_alt->key_addend & 2) {
1872 struct reloc *reloc = insn_reloc(file, orig_insn);
1873
1874 if (reloc)
1875 set_reloc_type(file->elf, reloc, R_NONE);
1876
1877 if (elf_write_insn(file->elf, orig_insn->sec,
1878 orig_insn->offset, orig_insn->len,
1879 arch_nop_insn(orig_insn->len))) {
1880 return -1;
1881 }
1882
1883 orig_insn->type = INSN_NOP;
1884 }
1885
1886 if (orig_insn->type == INSN_NOP) {
1887 if (orig_insn->len == 2)
1888 file->jl_nop_short++;
1889 else
1890 file->jl_nop_long++;
1891
1892 return 0;
1893 }
1894
1895 if (orig_insn->len == 2)
1896 file->jl_short++;
1897 else
1898 file->jl_long++;
1899
1900 *new_insn = next_insn_same_sec(file, orig_insn);
1901 return 0;
1902 }
1903
1904 /*
1905 * Read all the special sections which have alternate instructions which can be
1906 * patched in or redirected to at runtime. Each instruction having alternate
1907 * instruction(s) has them added to its insn->alts list, which will be
1908 * traversed in validate_branch().
1909 */
add_special_section_alts(struct objtool_file * file)1910 static int add_special_section_alts(struct objtool_file *file)
1911 {
1912 struct list_head special_alts;
1913 struct instruction *orig_insn, *new_insn;
1914 struct special_alt *special_alt, *tmp;
1915 struct alternative *alt;
1916 int ret;
1917
1918 if (special_get_alts(file->elf, &special_alts))
1919 return -1;
1920
1921 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1922
1923 orig_insn = find_insn(file, special_alt->orig_sec,
1924 special_alt->orig_off);
1925 if (!orig_insn) {
1926 ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off,
1927 "special: can't find orig instruction");
1928 return -1;
1929 }
1930
1931 new_insn = NULL;
1932 if (!special_alt->group || special_alt->new_len) {
1933 new_insn = find_insn(file, special_alt->new_sec,
1934 special_alt->new_off);
1935 if (!new_insn) {
1936 ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1937 "special: can't find new instruction");
1938 return -1;
1939 }
1940 }
1941
1942 if (special_alt->group) {
1943 if (!special_alt->orig_len) {
1944 ERROR_INSN(orig_insn, "empty alternative entry");
1945 continue;
1946 }
1947
1948 ret = handle_group_alt(file, special_alt, orig_insn,
1949 &new_insn);
1950 if (ret)
1951 return ret;
1952
1953 } else if (special_alt->jump_or_nop) {
1954 ret = handle_jump_alt(file, special_alt, orig_insn,
1955 &new_insn);
1956 if (ret)
1957 return ret;
1958 }
1959
1960 alt = calloc(1, sizeof(*alt));
1961 if (!alt) {
1962 ERROR_GLIBC("calloc");
1963 return -1;
1964 }
1965
1966 alt->insn = new_insn;
1967 alt->next = orig_insn->alts;
1968 orig_insn->alts = alt;
1969
1970 list_del(&special_alt->list);
1971 free(special_alt);
1972 }
1973
1974 if (opts.stats) {
1975 printf("jl\\\tNOP\tJMP\n");
1976 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1977 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1978 }
1979
1980 return 0;
1981 }
1982
arch_jump_table_sym_offset(struct reloc * reloc,struct reloc * table)1983 __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
1984 {
1985 return reloc->sym->offset + reloc_addend(reloc);
1986 }
1987
add_jump_table(struct objtool_file * file,struct instruction * insn)1988 static int add_jump_table(struct objtool_file *file, struct instruction *insn)
1989 {
1990 unsigned long table_size = insn_jump_table_size(insn);
1991 struct symbol *pfunc = insn_func(insn)->pfunc;
1992 struct reloc *table = insn_jump_table(insn);
1993 struct instruction *dest_insn;
1994 unsigned int prev_offset = 0;
1995 struct reloc *reloc = table;
1996 struct alternative *alt;
1997 unsigned long sym_offset;
1998
1999 /*
2000 * Each @reloc is a switch table relocation which points to the target
2001 * instruction.
2002 */
2003 for_each_reloc_from(table->sec, reloc) {
2004
2005 /* Check for the end of the table: */
2006 if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
2007 break;
2008 if (reloc != table && is_jump_table(reloc))
2009 break;
2010
2011 /* Make sure the table entries are consecutive: */
2012 if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc))
2013 break;
2014
2015 sym_offset = arch_jump_table_sym_offset(reloc, table);
2016
2017 /* Detect function pointers from contiguous objects: */
2018 if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset)
2019 break;
2020
2021 /*
2022 * Clang sometimes leaves dangling unused jump table entries
2023 * which point to the end of the function. Ignore them.
2024 */
2025 if (reloc->sym->sec == pfunc->sec &&
2026 sym_offset == pfunc->offset + pfunc->len)
2027 goto next;
2028
2029 dest_insn = find_insn(file, reloc->sym->sec, sym_offset);
2030 if (!dest_insn)
2031 break;
2032
2033 /* Make sure the destination is in the same function: */
2034 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2035 break;
2036
2037 alt = calloc(1, sizeof(*alt));
2038 if (!alt) {
2039 ERROR_GLIBC("calloc");
2040 return -1;
2041 }
2042
2043 alt->insn = dest_insn;
2044 alt->next = insn->alts;
2045 insn->alts = alt;
2046 next:
2047 prev_offset = reloc_offset(reloc);
2048 }
2049
2050 if (!prev_offset) {
2051 ERROR_INSN(insn, "can't find switch jump table");
2052 return -1;
2053 }
2054
2055 return 0;
2056 }
2057
2058 /*
2059 * find_jump_table() - Given a dynamic jump, find the switch jump table
2060 * associated with it.
2061 */
find_jump_table(struct objtool_file * file,struct symbol * func,struct instruction * insn)2062 static void find_jump_table(struct objtool_file *file, struct symbol *func,
2063 struct instruction *insn)
2064 {
2065 struct reloc *table_reloc;
2066 struct instruction *dest_insn, *orig_insn = insn;
2067 unsigned long table_size;
2068 unsigned long sym_offset;
2069
2070 /*
2071 * Backward search using the @first_jump_src links, these help avoid
2072 * much of the 'in between' code. Which avoids us getting confused by
2073 * it.
2074 */
2075 for (;
2076 insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2077 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2078
2079 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2080 break;
2081
2082 /* allow small jumps within the range */
2083 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2084 insn->jump_dest &&
2085 (insn->jump_dest->offset <= insn->offset ||
2086 insn->jump_dest->offset > orig_insn->offset))
2087 break;
2088
2089 table_reloc = arch_find_switch_table(file, insn, &table_size);
2090 if (!table_reloc)
2091 continue;
2092
2093 sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc);
2094
2095 dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset);
2096 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2097 continue;
2098
2099 set_jump_table(table_reloc);
2100 orig_insn->_jump_table = table_reloc;
2101 orig_insn->_jump_table_size = table_size;
2102
2103 break;
2104 }
2105 }
2106
2107 /*
2108 * First pass: Mark the head of each jump table so that in the next pass,
2109 * we know when a given jump table ends and the next one starts.
2110 */
mark_func_jump_tables(struct objtool_file * file,struct symbol * func)2111 static void mark_func_jump_tables(struct objtool_file *file,
2112 struct symbol *func)
2113 {
2114 struct instruction *insn, *last = NULL;
2115
2116 func_for_each_insn(file, func, insn) {
2117 if (!last)
2118 last = insn;
2119
2120 /*
2121 * Store back-pointers for unconditional forward jumps such
2122 * that find_jump_table() can back-track using those and
2123 * avoid some potentially confusing code.
2124 */
2125 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2126 insn->offset > last->offset &&
2127 insn->jump_dest->offset > insn->offset &&
2128 !insn->jump_dest->first_jump_src) {
2129
2130 insn->jump_dest->first_jump_src = insn;
2131 last = insn->jump_dest;
2132 }
2133
2134 if (insn->type != INSN_JUMP_DYNAMIC)
2135 continue;
2136
2137 find_jump_table(file, func, insn);
2138 }
2139 }
2140
add_func_jump_tables(struct objtool_file * file,struct symbol * func)2141 static int add_func_jump_tables(struct objtool_file *file,
2142 struct symbol *func)
2143 {
2144 struct instruction *insn;
2145 int ret;
2146
2147 func_for_each_insn(file, func, insn) {
2148 if (!insn_jump_table(insn))
2149 continue;
2150
2151 ret = add_jump_table(file, insn);
2152 if (ret)
2153 return ret;
2154 }
2155
2156 return 0;
2157 }
2158
2159 /*
2160 * For some switch statements, gcc generates a jump table in the .rodata
2161 * section which contains a list of addresses within the function to jump to.
2162 * This finds these jump tables and adds them to the insn->alts lists.
2163 */
add_jump_table_alts(struct objtool_file * file)2164 static int add_jump_table_alts(struct objtool_file *file)
2165 {
2166 struct symbol *func;
2167 int ret;
2168
2169 if (!file->rodata)
2170 return 0;
2171
2172 for_each_sym(file, func) {
2173 if (func->type != STT_FUNC)
2174 continue;
2175
2176 mark_func_jump_tables(file, func);
2177 ret = add_func_jump_tables(file, func);
2178 if (ret)
2179 return ret;
2180 }
2181
2182 return 0;
2183 }
2184
set_func_state(struct cfi_state * state)2185 static void set_func_state(struct cfi_state *state)
2186 {
2187 state->cfa = initial_func_cfi.cfa;
2188 memcpy(&state->regs, &initial_func_cfi.regs,
2189 CFI_NUM_REGS * sizeof(struct cfi_reg));
2190 state->stack_size = initial_func_cfi.cfa.offset;
2191 state->type = UNWIND_HINT_TYPE_CALL;
2192 }
2193
read_unwind_hints(struct objtool_file * file)2194 static int read_unwind_hints(struct objtool_file *file)
2195 {
2196 struct cfi_state cfi = init_cfi;
2197 struct section *sec;
2198 struct unwind_hint *hint;
2199 struct instruction *insn;
2200 struct reloc *reloc;
2201 unsigned long offset;
2202 int i;
2203
2204 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2205 if (!sec)
2206 return 0;
2207
2208 if (!sec->rsec) {
2209 ERROR("missing .rela.discard.unwind_hints section");
2210 return -1;
2211 }
2212
2213 if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
2214 ERROR("struct unwind_hint size mismatch");
2215 return -1;
2216 }
2217
2218 file->hints = true;
2219
2220 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
2221 hint = (struct unwind_hint *)sec->data->d_buf + i;
2222
2223 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2224 if (!reloc) {
2225 ERROR("can't find reloc for unwind_hints[%d]", i);
2226 return -1;
2227 }
2228
2229 if (reloc->sym->type == STT_SECTION) {
2230 offset = reloc_addend(reloc);
2231 } else if (reloc->sym->local_label) {
2232 offset = reloc->sym->offset;
2233 } else {
2234 ERROR("unexpected relocation symbol type in %s", sec->rsec->name);
2235 return -1;
2236 }
2237
2238 insn = find_insn(file, reloc->sym->sec, offset);
2239 if (!insn) {
2240 ERROR("can't find insn for unwind_hints[%d]", i);
2241 return -1;
2242 }
2243
2244 insn->hint = true;
2245
2246 if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) {
2247 insn->cfi = &force_undefined_cfi;
2248 continue;
2249 }
2250
2251 if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2252 insn->hint = false;
2253 insn->save = true;
2254 continue;
2255 }
2256
2257 if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2258 insn->restore = true;
2259 continue;
2260 }
2261
2262 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2263 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2264
2265 if (sym && sym->bind == STB_GLOBAL) {
2266 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2267 ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2268 return -1;
2269 }
2270 }
2271 }
2272
2273 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2274 insn->cfi = &func_cfi;
2275 continue;
2276 }
2277
2278 if (insn->cfi)
2279 cfi = *(insn->cfi);
2280
2281 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2282 ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2283 return -1;
2284 }
2285
2286 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2287 cfi.type = hint->type;
2288 cfi.signal = hint->signal;
2289
2290 insn->cfi = cfi_hash_find_or_add(&cfi);
2291 }
2292
2293 return 0;
2294 }
2295
read_annotate(struct objtool_file * file,int (* func)(struct objtool_file * file,int type,struct instruction * insn))2296 static int read_annotate(struct objtool_file *file,
2297 int (*func)(struct objtool_file *file, int type, struct instruction *insn))
2298 {
2299 struct section *sec;
2300 struct instruction *insn;
2301 struct reloc *reloc;
2302 uint64_t offset;
2303 int type, ret;
2304
2305 sec = find_section_by_name(file->elf, ".discard.annotate_insn");
2306 if (!sec)
2307 return 0;
2308
2309 if (!sec->rsec)
2310 return 0;
2311
2312 if (sec->sh.sh_entsize != 8) {
2313 static bool warned = false;
2314 if (!warned && opts.verbose) {
2315 WARN("%s: dodgy linker, sh_entsize != 8", sec->name);
2316 warned = true;
2317 }
2318 sec->sh.sh_entsize = 8;
2319 }
2320
2321 for_each_reloc(sec->rsec, reloc) {
2322 type = *(u32 *)(sec->data->d_buf + (reloc_idx(reloc) * sec->sh.sh_entsize) + 4);
2323 type = bswap_if_needed(file->elf, type);
2324
2325 offset = reloc->sym->offset + reloc_addend(reloc);
2326 insn = find_insn(file, reloc->sym->sec, offset);
2327
2328 if (!insn) {
2329 ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
2330 return -1;
2331 }
2332
2333 ret = func(file, type, insn);
2334 if (ret < 0)
2335 return ret;
2336 }
2337
2338 return 0;
2339 }
2340
__annotate_early(struct objtool_file * file,int type,struct instruction * insn)2341 static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn)
2342 {
2343 switch (type) {
2344
2345 /* Must be before add_special_section_alts() */
2346 case ANNOTYPE_IGNORE_ALTS:
2347 insn->ignore_alts = true;
2348 break;
2349
2350 /*
2351 * Must be before read_unwind_hints() since that needs insn->noendbr.
2352 */
2353 case ANNOTYPE_NOENDBR:
2354 insn->noendbr = 1;
2355 break;
2356
2357 default:
2358 break;
2359 }
2360
2361 return 0;
2362 }
2363
__annotate_ifc(struct objtool_file * file,int type,struct instruction * insn)2364 static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn)
2365 {
2366 unsigned long dest_off;
2367
2368 if (type != ANNOTYPE_INTRA_FUNCTION_CALL)
2369 return 0;
2370
2371 if (insn->type != INSN_CALL) {
2372 ERROR_INSN(insn, "intra_function_call not a direct call");
2373 return -1;
2374 }
2375
2376 /*
2377 * Treat intra-function CALLs as JMPs, but with a stack_op.
2378 * See add_call_destinations(), which strips stack_ops from
2379 * normal CALLs.
2380 */
2381 insn->type = INSN_JUMP_UNCONDITIONAL;
2382
2383 dest_off = arch_jump_destination(insn);
2384 insn->jump_dest = find_insn(file, insn->sec, dest_off);
2385 if (!insn->jump_dest) {
2386 ERROR_INSN(insn, "can't find call dest at %s+0x%lx",
2387 insn->sec->name, dest_off);
2388 return -1;
2389 }
2390
2391 return 0;
2392 }
2393
__annotate_late(struct objtool_file * file,int type,struct instruction * insn)2394 static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn)
2395 {
2396 struct symbol *sym;
2397
2398 switch (type) {
2399 case ANNOTYPE_NOENDBR:
2400 /* early */
2401 break;
2402
2403 case ANNOTYPE_RETPOLINE_SAFE:
2404 if (insn->type != INSN_JUMP_DYNAMIC &&
2405 insn->type != INSN_CALL_DYNAMIC &&
2406 insn->type != INSN_RETURN &&
2407 insn->type != INSN_NOP) {
2408 ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2409 return -1;
2410 }
2411
2412 insn->retpoline_safe = true;
2413 break;
2414
2415 case ANNOTYPE_INSTR_BEGIN:
2416 insn->instr++;
2417 break;
2418
2419 case ANNOTYPE_INSTR_END:
2420 insn->instr--;
2421 break;
2422
2423 case ANNOTYPE_UNRET_BEGIN:
2424 insn->unret = 1;
2425 break;
2426
2427 case ANNOTYPE_IGNORE_ALTS:
2428 /* early */
2429 break;
2430
2431 case ANNOTYPE_INTRA_FUNCTION_CALL:
2432 /* ifc */
2433 break;
2434
2435 case ANNOTYPE_REACHABLE:
2436 insn->dead_end = false;
2437 break;
2438
2439 case ANNOTYPE_NOCFI:
2440 sym = insn->sym;
2441 if (!sym) {
2442 ERROR_INSN(insn, "dodgy NOCFI annotation");
2443 return -1;
2444 }
2445 insn->sym->nocfi = 1;
2446 break;
2447
2448 default:
2449 ERROR_INSN(insn, "Unknown annotation type: %d", type);
2450 return -1;
2451 }
2452
2453 return 0;
2454 }
2455
2456 /*
2457 * Return true if name matches an instrumentation function, where calls to that
2458 * function from noinstr code can safely be removed, but compilers won't do so.
2459 */
is_profiling_func(const char * name)2460 static bool is_profiling_func(const char *name)
2461 {
2462 /*
2463 * Many compilers cannot disable KCOV with a function attribute.
2464 */
2465 if (!strncmp(name, "__sanitizer_cov_", 16))
2466 return true;
2467
2468 return false;
2469 }
2470
classify_symbols(struct objtool_file * file)2471 static int classify_symbols(struct objtool_file *file)
2472 {
2473 struct symbol *func;
2474
2475 for_each_sym(file, func) {
2476 if (func->type == STT_NOTYPE && strstarts(func->name, ".L"))
2477 func->local_label = true;
2478
2479 if (func->bind != STB_GLOBAL)
2480 continue;
2481
2482 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2483 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2484 func->static_call_tramp = true;
2485
2486 if (arch_is_retpoline(func))
2487 func->retpoline_thunk = true;
2488
2489 if (arch_is_rethunk(func))
2490 func->return_thunk = true;
2491
2492 if (arch_is_embedded_insn(func))
2493 func->embedded_insn = true;
2494
2495 if (arch_ftrace_match(func->name))
2496 func->fentry = true;
2497
2498 if (is_profiling_func(func->name))
2499 func->profiling_func = true;
2500 }
2501
2502 return 0;
2503 }
2504
mark_rodata(struct objtool_file * file)2505 static void mark_rodata(struct objtool_file *file)
2506 {
2507 struct section *sec;
2508 bool found = false;
2509
2510 /*
2511 * Search for the following rodata sections, each of which can
2512 * potentially contain jump tables:
2513 *
2514 * - .rodata: can contain GCC switch tables
2515 * - .rodata.<func>: same, if -fdata-sections is being used
2516 * - .data.rel.ro.c_jump_table: contains C annotated jump tables
2517 *
2518 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2519 */
2520 for_each_sec(file, sec) {
2521 if ((!strncmp(sec->name, ".rodata", 7) &&
2522 !strstr(sec->name, ".str1.")) ||
2523 !strncmp(sec->name, ".data.rel.ro", 12)) {
2524 sec->rodata = true;
2525 found = true;
2526 }
2527 }
2528
2529 file->rodata = found;
2530 }
2531
decode_sections(struct objtool_file * file)2532 static int decode_sections(struct objtool_file *file)
2533 {
2534 int ret;
2535
2536 mark_rodata(file);
2537
2538 ret = init_pv_ops(file);
2539 if (ret)
2540 return ret;
2541
2542 /*
2543 * Must be before add_{jump_call}_destination.
2544 */
2545 ret = classify_symbols(file);
2546 if (ret)
2547 return ret;
2548
2549 ret = decode_instructions(file);
2550 if (ret)
2551 return ret;
2552
2553 ret = add_ignores(file);
2554 if (ret)
2555 return ret;
2556
2557 add_uaccess_safe(file);
2558
2559 ret = read_annotate(file, __annotate_early);
2560 if (ret)
2561 return ret;
2562
2563 /*
2564 * Must be before add_jump_destinations(), which depends on 'func'
2565 * being set for alternatives, to enable proper sibling call detection.
2566 */
2567 if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
2568 ret = add_special_section_alts(file);
2569 if (ret)
2570 return ret;
2571 }
2572
2573 ret = add_jump_destinations(file);
2574 if (ret)
2575 return ret;
2576
2577 /*
2578 * Must be before add_call_destination(); it changes INSN_CALL to
2579 * INSN_JUMP.
2580 */
2581 ret = read_annotate(file, __annotate_ifc);
2582 if (ret)
2583 return ret;
2584
2585 ret = add_call_destinations(file);
2586 if (ret)
2587 return ret;
2588
2589 ret = add_jump_table_alts(file);
2590 if (ret)
2591 return ret;
2592
2593 ret = read_unwind_hints(file);
2594 if (ret)
2595 return ret;
2596
2597 /*
2598 * Must be after add_call_destinations() such that it can override
2599 * dead_end_function() marks.
2600 */
2601 ret = read_annotate(file, __annotate_late);
2602 if (ret)
2603 return ret;
2604
2605 return 0;
2606 }
2607
is_special_call(struct instruction * insn)2608 static bool is_special_call(struct instruction *insn)
2609 {
2610 if (insn->type == INSN_CALL) {
2611 struct symbol *dest = insn_call_dest(insn);
2612
2613 if (!dest)
2614 return false;
2615
2616 if (dest->fentry || dest->embedded_insn)
2617 return true;
2618 }
2619
2620 return false;
2621 }
2622
has_modified_stack_frame(struct instruction * insn,struct insn_state * state)2623 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2624 {
2625 struct cfi_state *cfi = &state->cfi;
2626 int i;
2627
2628 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2629 return true;
2630
2631 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2632 return true;
2633
2634 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2635 return true;
2636
2637 for (i = 0; i < CFI_NUM_REGS; i++) {
2638 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2639 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2640 return true;
2641 }
2642
2643 return false;
2644 }
2645
check_reg_frame_pos(const struct cfi_reg * reg,int expected_offset)2646 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2647 int expected_offset)
2648 {
2649 return reg->base == CFI_CFA &&
2650 reg->offset == expected_offset;
2651 }
2652
has_valid_stack_frame(struct insn_state * state)2653 static bool has_valid_stack_frame(struct insn_state *state)
2654 {
2655 struct cfi_state *cfi = &state->cfi;
2656
2657 if (cfi->cfa.base == CFI_BP &&
2658 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2659 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2660 return true;
2661
2662 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2663 return true;
2664
2665 return false;
2666 }
2667
update_cfi_state_regs(struct instruction * insn,struct cfi_state * cfi,struct stack_op * op)2668 static int update_cfi_state_regs(struct instruction *insn,
2669 struct cfi_state *cfi,
2670 struct stack_op *op)
2671 {
2672 struct cfi_reg *cfa = &cfi->cfa;
2673
2674 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2675 return 0;
2676
2677 /* push */
2678 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2679 cfa->offset += 8;
2680
2681 /* pop */
2682 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2683 cfa->offset -= 8;
2684
2685 /* add immediate to sp */
2686 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2687 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2688 cfa->offset -= op->src.offset;
2689
2690 return 0;
2691 }
2692
save_reg(struct cfi_state * cfi,unsigned char reg,int base,int offset)2693 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2694 {
2695 if (arch_callee_saved_reg(reg) &&
2696 cfi->regs[reg].base == CFI_UNDEFINED) {
2697 cfi->regs[reg].base = base;
2698 cfi->regs[reg].offset = offset;
2699 }
2700 }
2701
restore_reg(struct cfi_state * cfi,unsigned char reg)2702 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2703 {
2704 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2705 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2706 }
2707
2708 /*
2709 * A note about DRAP stack alignment:
2710 *
2711 * GCC has the concept of a DRAP register, which is used to help keep track of
2712 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2713 * register. The typical DRAP pattern is:
2714 *
2715 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2716 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2717 * 41 ff 72 f8 pushq -0x8(%r10)
2718 * 55 push %rbp
2719 * 48 89 e5 mov %rsp,%rbp
2720 * (more pushes)
2721 * 41 52 push %r10
2722 * ...
2723 * 41 5a pop %r10
2724 * (more pops)
2725 * 5d pop %rbp
2726 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2727 * c3 retq
2728 *
2729 * There are some variations in the epilogues, like:
2730 *
2731 * 5b pop %rbx
2732 * 41 5a pop %r10
2733 * 41 5c pop %r12
2734 * 41 5d pop %r13
2735 * 41 5e pop %r14
2736 * c9 leaveq
2737 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2738 * c3 retq
2739 *
2740 * and:
2741 *
2742 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2743 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2744 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2745 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2746 * c9 leaveq
2747 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2748 * c3 retq
2749 *
2750 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2751 * restored beforehand:
2752 *
2753 * 41 55 push %r13
2754 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2755 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2756 * ...
2757 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2758 * 41 5d pop %r13
2759 * c3 retq
2760 */
update_cfi_state(struct instruction * insn,struct instruction * next_insn,struct cfi_state * cfi,struct stack_op * op)2761 static int update_cfi_state(struct instruction *insn,
2762 struct instruction *next_insn,
2763 struct cfi_state *cfi, struct stack_op *op)
2764 {
2765 struct cfi_reg *cfa = &cfi->cfa;
2766 struct cfi_reg *regs = cfi->regs;
2767
2768 /* ignore UNWIND_HINT_UNDEFINED regions */
2769 if (cfi->force_undefined)
2770 return 0;
2771
2772 /* stack operations don't make sense with an undefined CFA */
2773 if (cfa->base == CFI_UNDEFINED) {
2774 if (insn_func(insn)) {
2775 WARN_INSN(insn, "undefined stack state");
2776 return 1;
2777 }
2778 return 0;
2779 }
2780
2781 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2782 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2783 return update_cfi_state_regs(insn, cfi, op);
2784
2785 switch (op->dest.type) {
2786
2787 case OP_DEST_REG:
2788 switch (op->src.type) {
2789
2790 case OP_SRC_REG:
2791 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2792 cfa->base == CFI_SP &&
2793 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) {
2794
2795 /* mov %rsp, %rbp */
2796 cfa->base = op->dest.reg;
2797 cfi->bp_scratch = false;
2798 }
2799
2800 else if (op->src.reg == CFI_SP &&
2801 op->dest.reg == CFI_BP && cfi->drap) {
2802
2803 /* drap: mov %rsp, %rbp */
2804 regs[CFI_BP].base = CFI_BP;
2805 regs[CFI_BP].offset = -cfi->stack_size;
2806 cfi->bp_scratch = false;
2807 }
2808
2809 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2810
2811 /*
2812 * mov %rsp, %reg
2813 *
2814 * This is needed for the rare case where GCC
2815 * does:
2816 *
2817 * mov %rsp, %rax
2818 * ...
2819 * mov %rax, %rsp
2820 */
2821 cfi->vals[op->dest.reg].base = CFI_CFA;
2822 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2823 }
2824
2825 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2826 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2827
2828 /*
2829 * mov %rbp, %rsp
2830 *
2831 * Restore the original stack pointer (Clang).
2832 */
2833 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2834 }
2835
2836 else if (op->dest.reg == cfa->base) {
2837
2838 /* mov %reg, %rsp */
2839 if (cfa->base == CFI_SP &&
2840 cfi->vals[op->src.reg].base == CFI_CFA) {
2841
2842 /*
2843 * This is needed for the rare case
2844 * where GCC does something dumb like:
2845 *
2846 * lea 0x8(%rsp), %rcx
2847 * ...
2848 * mov %rcx, %rsp
2849 */
2850 cfa->offset = -cfi->vals[op->src.reg].offset;
2851 cfi->stack_size = cfa->offset;
2852
2853 } else if (cfa->base == CFI_SP &&
2854 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2855 cfi->vals[op->src.reg].offset == cfa->offset) {
2856
2857 /*
2858 * Stack swizzle:
2859 *
2860 * 1: mov %rsp, (%[tos])
2861 * 2: mov %[tos], %rsp
2862 * ...
2863 * 3: pop %rsp
2864 *
2865 * Where:
2866 *
2867 * 1 - places a pointer to the previous
2868 * stack at the Top-of-Stack of the
2869 * new stack.
2870 *
2871 * 2 - switches to the new stack.
2872 *
2873 * 3 - pops the Top-of-Stack to restore
2874 * the original stack.
2875 *
2876 * Note: we set base to SP_INDIRECT
2877 * here and preserve offset. Therefore
2878 * when the unwinder reaches ToS it
2879 * will dereference SP and then add the
2880 * offset to find the next frame, IOW:
2881 * (%rsp) + offset.
2882 */
2883 cfa->base = CFI_SP_INDIRECT;
2884
2885 } else {
2886 cfa->base = CFI_UNDEFINED;
2887 cfa->offset = 0;
2888 }
2889 }
2890
2891 else if (op->dest.reg == CFI_SP &&
2892 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2893 cfi->vals[op->src.reg].offset == cfa->offset) {
2894
2895 /*
2896 * The same stack swizzle case 2) as above. But
2897 * because we can't change cfa->base, case 3)
2898 * will become a regular POP. Pretend we're a
2899 * PUSH so things don't go unbalanced.
2900 */
2901 cfi->stack_size += 8;
2902 }
2903
2904
2905 break;
2906
2907 case OP_SRC_ADD:
2908 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2909
2910 /* add imm, %rsp */
2911 cfi->stack_size -= op->src.offset;
2912 if (cfa->base == CFI_SP)
2913 cfa->offset -= op->src.offset;
2914 break;
2915 }
2916
2917 if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
2918 insn->sym->frame_pointer) {
2919 /* addi.d fp,sp,imm on LoongArch */
2920 if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
2921 cfa->base = CFI_BP;
2922 cfa->offset = 0;
2923 }
2924 break;
2925 }
2926
2927 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2928 /* addi.d sp,fp,imm on LoongArch */
2929 if (cfa->base == CFI_BP && cfa->offset == 0) {
2930 if (insn->sym->frame_pointer) {
2931 cfa->base = CFI_SP;
2932 cfa->offset = -op->src.offset;
2933 }
2934 } else {
2935 /* lea disp(%rbp), %rsp */
2936 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2937 }
2938 break;
2939 }
2940
2941 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2942
2943 /* drap: lea disp(%rsp), %drap */
2944 cfi->drap_reg = op->dest.reg;
2945
2946 /*
2947 * lea disp(%rsp), %reg
2948 *
2949 * This is needed for the rare case where GCC
2950 * does something dumb like:
2951 *
2952 * lea 0x8(%rsp), %rcx
2953 * ...
2954 * mov %rcx, %rsp
2955 */
2956 cfi->vals[op->dest.reg].base = CFI_CFA;
2957 cfi->vals[op->dest.reg].offset = \
2958 -cfi->stack_size + op->src.offset;
2959
2960 break;
2961 }
2962
2963 if (cfi->drap && op->dest.reg == CFI_SP &&
2964 op->src.reg == cfi->drap_reg) {
2965
2966 /* drap: lea disp(%drap), %rsp */
2967 cfa->base = CFI_SP;
2968 cfa->offset = cfi->stack_size = -op->src.offset;
2969 cfi->drap_reg = CFI_UNDEFINED;
2970 cfi->drap = false;
2971 break;
2972 }
2973
2974 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2975 WARN_INSN(insn, "unsupported stack register modification");
2976 return -1;
2977 }
2978
2979 break;
2980
2981 case OP_SRC_AND:
2982 if (op->dest.reg != CFI_SP ||
2983 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2984 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2985 WARN_INSN(insn, "unsupported stack pointer realignment");
2986 return -1;
2987 }
2988
2989 if (cfi->drap_reg != CFI_UNDEFINED) {
2990 /* drap: and imm, %rsp */
2991 cfa->base = cfi->drap_reg;
2992 cfa->offset = cfi->stack_size = 0;
2993 cfi->drap = true;
2994 }
2995
2996 /*
2997 * Older versions of GCC (4.8ish) realign the stack
2998 * without DRAP, with a frame pointer.
2999 */
3000
3001 break;
3002
3003 case OP_SRC_POP:
3004 case OP_SRC_POPF:
3005 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3006
3007 /* pop %rsp; # restore from a stack swizzle */
3008 cfa->base = CFI_SP;
3009 break;
3010 }
3011
3012 if (!cfi->drap && op->dest.reg == cfa->base) {
3013
3014 /* pop %rbp */
3015 cfa->base = CFI_SP;
3016 }
3017
3018 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3019 op->dest.reg == cfi->drap_reg &&
3020 cfi->drap_offset == -cfi->stack_size) {
3021
3022 /* drap: pop %drap */
3023 cfa->base = cfi->drap_reg;
3024 cfa->offset = 0;
3025 cfi->drap_offset = -1;
3026
3027 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3028
3029 /* pop %reg */
3030 restore_reg(cfi, op->dest.reg);
3031 }
3032
3033 cfi->stack_size -= 8;
3034 if (cfa->base == CFI_SP)
3035 cfa->offset -= 8;
3036
3037 break;
3038
3039 case OP_SRC_REG_INDIRECT:
3040 if (!cfi->drap && op->dest.reg == cfa->base &&
3041 op->dest.reg == CFI_BP) {
3042
3043 /* mov disp(%rsp), %rbp */
3044 cfa->base = CFI_SP;
3045 cfa->offset = cfi->stack_size;
3046 }
3047
3048 if (cfi->drap && op->src.reg == CFI_BP &&
3049 op->src.offset == cfi->drap_offset) {
3050
3051 /* drap: mov disp(%rbp), %drap */
3052 cfa->base = cfi->drap_reg;
3053 cfa->offset = 0;
3054 cfi->drap_offset = -1;
3055 }
3056
3057 if (cfi->drap && op->src.reg == CFI_BP &&
3058 op->src.offset == regs[op->dest.reg].offset) {
3059
3060 /* drap: mov disp(%rbp), %reg */
3061 restore_reg(cfi, op->dest.reg);
3062
3063 } else if (op->src.reg == cfa->base &&
3064 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3065
3066 /* mov disp(%rbp), %reg */
3067 /* mov disp(%rsp), %reg */
3068 restore_reg(cfi, op->dest.reg);
3069
3070 } else if (op->src.reg == CFI_SP &&
3071 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3072
3073 /* mov disp(%rsp), %reg */
3074 restore_reg(cfi, op->dest.reg);
3075 }
3076
3077 break;
3078
3079 default:
3080 WARN_INSN(insn, "unknown stack-related instruction");
3081 return -1;
3082 }
3083
3084 break;
3085
3086 case OP_DEST_PUSH:
3087 case OP_DEST_PUSHF:
3088 cfi->stack_size += 8;
3089 if (cfa->base == CFI_SP)
3090 cfa->offset += 8;
3091
3092 if (op->src.type != OP_SRC_REG)
3093 break;
3094
3095 if (cfi->drap) {
3096 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3097
3098 /* drap: push %drap */
3099 cfa->base = CFI_BP_INDIRECT;
3100 cfa->offset = -cfi->stack_size;
3101
3102 /* save drap so we know when to restore it */
3103 cfi->drap_offset = -cfi->stack_size;
3104
3105 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3106
3107 /* drap: push %rbp */
3108 cfi->stack_size = 0;
3109
3110 } else {
3111
3112 /* drap: push %reg */
3113 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3114 }
3115
3116 } else {
3117
3118 /* push %reg */
3119 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3120 }
3121
3122 /* detect when asm code uses rbp as a scratch register */
3123 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3124 cfa->base != CFI_BP)
3125 cfi->bp_scratch = true;
3126 break;
3127
3128 case OP_DEST_REG_INDIRECT:
3129
3130 if (cfi->drap) {
3131 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3132
3133 /* drap: mov %drap, disp(%rbp) */
3134 cfa->base = CFI_BP_INDIRECT;
3135 cfa->offset = op->dest.offset;
3136
3137 /* save drap offset so we know when to restore it */
3138 cfi->drap_offset = op->dest.offset;
3139 } else {
3140
3141 /* drap: mov reg, disp(%rbp) */
3142 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3143 }
3144
3145 } else if (op->dest.reg == cfa->base) {
3146
3147 /* mov reg, disp(%rbp) */
3148 /* mov reg, disp(%rsp) */
3149 save_reg(cfi, op->src.reg, CFI_CFA,
3150 op->dest.offset - cfi->cfa.offset);
3151
3152 } else if (op->dest.reg == CFI_SP) {
3153
3154 /* mov reg, disp(%rsp) */
3155 save_reg(cfi, op->src.reg, CFI_CFA,
3156 op->dest.offset - cfi->stack_size);
3157
3158 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3159
3160 /* mov %rsp, (%reg); # setup a stack swizzle. */
3161 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3162 cfi->vals[op->dest.reg].offset = cfa->offset;
3163 }
3164
3165 break;
3166
3167 case OP_DEST_MEM:
3168 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3169 WARN_INSN(insn, "unknown stack-related memory operation");
3170 return -1;
3171 }
3172
3173 /* pop mem */
3174 cfi->stack_size -= 8;
3175 if (cfa->base == CFI_SP)
3176 cfa->offset -= 8;
3177
3178 break;
3179
3180 default:
3181 WARN_INSN(insn, "unknown stack-related instruction");
3182 return -1;
3183 }
3184
3185 return 0;
3186 }
3187
3188 /*
3189 * The stack layouts of alternatives instructions can sometimes diverge when
3190 * they have stack modifications. That's fine as long as the potential stack
3191 * layouts don't conflict at any given potential instruction boundary.
3192 *
3193 * Flatten the CFIs of the different alternative code streams (both original
3194 * and replacement) into a single shared CFI array which can be used to detect
3195 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3196 */
propagate_alt_cfi(struct objtool_file * file,struct instruction * insn)3197 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3198 {
3199 struct cfi_state **alt_cfi;
3200 int group_off;
3201
3202 if (!insn->alt_group)
3203 return 0;
3204
3205 if (!insn->cfi) {
3206 WARN("CFI missing");
3207 return -1;
3208 }
3209
3210 alt_cfi = insn->alt_group->cfi;
3211 group_off = insn->offset - insn->alt_group->first_insn->offset;
3212
3213 if (!alt_cfi[group_off]) {
3214 alt_cfi[group_off] = insn->cfi;
3215 } else {
3216 if (cficmp(alt_cfi[group_off], insn->cfi)) {
3217 struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3218 struct instruction *orig = orig_group->first_insn;
3219 WARN_INSN(orig, "stack layout conflict in alternatives: %s",
3220 offstr(insn->sec, insn->offset));
3221 return -1;
3222 }
3223 }
3224
3225 return 0;
3226 }
3227
handle_insn_ops(struct instruction * insn,struct instruction * next_insn,struct insn_state * state)3228 static int handle_insn_ops(struct instruction *insn,
3229 struct instruction *next_insn,
3230 struct insn_state *state)
3231 {
3232 struct stack_op *op;
3233 int ret;
3234
3235 for (op = insn->stack_ops; op; op = op->next) {
3236
3237 ret = update_cfi_state(insn, next_insn, &state->cfi, op);
3238 if (ret)
3239 return ret;
3240
3241 if (!opts.uaccess || !insn->alt_group)
3242 continue;
3243
3244 if (op->dest.type == OP_DEST_PUSHF) {
3245 if (!state->uaccess_stack) {
3246 state->uaccess_stack = 1;
3247 } else if (state->uaccess_stack >> 31) {
3248 WARN_INSN(insn, "PUSHF stack exhausted");
3249 return 1;
3250 }
3251 state->uaccess_stack <<= 1;
3252 state->uaccess_stack |= state->uaccess;
3253 }
3254
3255 if (op->src.type == OP_SRC_POPF) {
3256 if (state->uaccess_stack) {
3257 state->uaccess = state->uaccess_stack & 1;
3258 state->uaccess_stack >>= 1;
3259 if (state->uaccess_stack == 1)
3260 state->uaccess_stack = 0;
3261 }
3262 }
3263 }
3264
3265 return 0;
3266 }
3267
insn_cfi_match(struct instruction * insn,struct cfi_state * cfi2)3268 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3269 {
3270 struct cfi_state *cfi1 = insn->cfi;
3271 int i;
3272
3273 if (!cfi1) {
3274 WARN("CFI missing");
3275 return false;
3276 }
3277
3278 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3279
3280 WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3281 cfi1->cfa.base, cfi1->cfa.offset,
3282 cfi2->cfa.base, cfi2->cfa.offset);
3283 return false;
3284
3285 }
3286
3287 if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3288 for (i = 0; i < CFI_NUM_REGS; i++) {
3289
3290 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg)))
3291 continue;
3292
3293 WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3294 i, cfi1->regs[i].base, cfi1->regs[i].offset,
3295 i, cfi2->regs[i].base, cfi2->regs[i].offset);
3296 }
3297 return false;
3298 }
3299
3300 if (cfi1->type != cfi2->type) {
3301
3302 WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3303 cfi1->type, cfi2->type);
3304 return false;
3305 }
3306
3307 if (cfi1->drap != cfi2->drap ||
3308 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3309 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3310
3311 WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3312 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3313 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3314 return false;
3315 }
3316
3317 return true;
3318 }
3319
func_uaccess_safe(struct symbol * func)3320 static inline bool func_uaccess_safe(struct symbol *func)
3321 {
3322 if (func)
3323 return func->uaccess_safe;
3324
3325 return false;
3326 }
3327
call_dest_name(struct instruction * insn)3328 static inline const char *call_dest_name(struct instruction *insn)
3329 {
3330 static char pvname[19];
3331 struct reloc *reloc;
3332 int idx;
3333
3334 if (insn_call_dest(insn))
3335 return insn_call_dest(insn)->name;
3336
3337 reloc = insn_reloc(NULL, insn);
3338 if (reloc && !strcmp(reloc->sym->name, "pv_ops")) {
3339 idx = (reloc_addend(reloc) / sizeof(void *));
3340 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3341 return pvname;
3342 }
3343
3344 return "{dynamic}";
3345 }
3346
pv_call_dest(struct objtool_file * file,struct instruction * insn)3347 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3348 {
3349 struct symbol *target;
3350 struct reloc *reloc;
3351 int idx;
3352
3353 reloc = insn_reloc(file, insn);
3354 if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
3355 return false;
3356
3357 idx = (arch_dest_reloc_offset(reloc_addend(reloc)) / sizeof(void *));
3358
3359 if (file->pv_ops[idx].clean)
3360 return true;
3361
3362 file->pv_ops[idx].clean = true;
3363
3364 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3365 if (!target->sec->noinstr) {
3366 WARN("pv_ops[%d]: %s", idx, target->name);
3367 file->pv_ops[idx].clean = false;
3368 }
3369 }
3370
3371 return file->pv_ops[idx].clean;
3372 }
3373
noinstr_call_dest(struct objtool_file * file,struct instruction * insn,struct symbol * func)3374 static inline bool noinstr_call_dest(struct objtool_file *file,
3375 struct instruction *insn,
3376 struct symbol *func)
3377 {
3378 /*
3379 * We can't deal with indirect function calls at present;
3380 * assume they're instrumented.
3381 */
3382 if (!func) {
3383 if (file->pv_ops)
3384 return pv_call_dest(file, insn);
3385
3386 return false;
3387 }
3388
3389 /*
3390 * If the symbol is from a noinstr section; we good.
3391 */
3392 if (func->sec->noinstr)
3393 return true;
3394
3395 /*
3396 * If the symbol is a static_call trampoline, we can't tell.
3397 */
3398 if (func->static_call_tramp)
3399 return true;
3400
3401 /*
3402 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3403 * something 'BAD' happened. At the risk of taking the machine down,
3404 * let them proceed to get the message out.
3405 */
3406 if (!strncmp(func->name, "__ubsan_handle_", 15))
3407 return true;
3408
3409 return false;
3410 }
3411
validate_call(struct objtool_file * file,struct instruction * insn,struct insn_state * state)3412 static int validate_call(struct objtool_file *file,
3413 struct instruction *insn,
3414 struct insn_state *state)
3415 {
3416 if (state->noinstr && state->instr <= 0 &&
3417 !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3418 WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3419 return 1;
3420 }
3421
3422 if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3423 WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3424 return 1;
3425 }
3426
3427 if (state->df) {
3428 WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3429 return 1;
3430 }
3431
3432 return 0;
3433 }
3434
validate_sibling_call(struct objtool_file * file,struct instruction * insn,struct insn_state * state)3435 static int validate_sibling_call(struct objtool_file *file,
3436 struct instruction *insn,
3437 struct insn_state *state)
3438 {
3439 if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3440 WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3441 return 1;
3442 }
3443
3444 return validate_call(file, insn, state);
3445 }
3446
validate_return(struct symbol * func,struct instruction * insn,struct insn_state * state)3447 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3448 {
3449 if (state->noinstr && state->instr > 0) {
3450 WARN_INSN(insn, "return with instrumentation enabled");
3451 return 1;
3452 }
3453
3454 if (state->uaccess && !func_uaccess_safe(func)) {
3455 WARN_INSN(insn, "return with UACCESS enabled");
3456 return 1;
3457 }
3458
3459 if (!state->uaccess && func_uaccess_safe(func)) {
3460 WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3461 return 1;
3462 }
3463
3464 if (state->df) {
3465 WARN_INSN(insn, "return with DF set");
3466 return 1;
3467 }
3468
3469 if (func && has_modified_stack_frame(insn, state)) {
3470 WARN_INSN(insn, "return with modified stack frame");
3471 return 1;
3472 }
3473
3474 if (state->cfi.bp_scratch) {
3475 WARN_INSN(insn, "BP used as a scratch register");
3476 return 1;
3477 }
3478
3479 return 0;
3480 }
3481
next_insn_to_validate(struct objtool_file * file,struct instruction * insn)3482 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3483 struct instruction *insn)
3484 {
3485 struct alt_group *alt_group = insn->alt_group;
3486
3487 /*
3488 * Simulate the fact that alternatives are patched in-place. When the
3489 * end of a replacement alt_group is reached, redirect objtool flow to
3490 * the end of the original alt_group.
3491 *
3492 * insn->alts->insn -> alt_group->first_insn
3493 * ...
3494 * alt_group->last_insn
3495 * [alt_group->nop] -> next(orig_group->last_insn)
3496 */
3497 if (alt_group) {
3498 if (alt_group->nop) {
3499 /* ->nop implies ->orig_group */
3500 if (insn == alt_group->last_insn)
3501 return alt_group->nop;
3502 if (insn == alt_group->nop)
3503 goto next_orig;
3504 }
3505 if (insn == alt_group->last_insn && alt_group->orig_group)
3506 goto next_orig;
3507 }
3508
3509 return next_insn_same_sec(file, insn);
3510
3511 next_orig:
3512 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3513 }
3514
skip_alt_group(struct instruction * insn)3515 static bool skip_alt_group(struct instruction *insn)
3516 {
3517 struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
3518
3519 /* ANNOTATE_IGNORE_ALTERNATIVE */
3520 if (insn->alt_group && insn->alt_group->ignore)
3521 return true;
3522
3523 /*
3524 * For NOP patched with CLAC/STAC, only follow the latter to avoid
3525 * impossible code paths combining patched CLAC with unpatched STAC
3526 * or vice versa.
3527 *
3528 * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
3529 * requested not to do that to avoid hurting .s file readability
3530 * around CLAC/STAC alternative sites.
3531 */
3532
3533 if (!alt_insn)
3534 return false;
3535
3536 /* Don't override ASM_{CLAC,STAC}_UNSAFE */
3537 if (alt_insn->alt_group && alt_insn->alt_group->ignore)
3538 return false;
3539
3540 return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
3541 }
3542
3543 /*
3544 * Follow the branch starting at the given instruction, and recursively follow
3545 * any other branches (jumps). Meanwhile, track the frame pointer state at
3546 * each instruction and validate all the rules described in
3547 * tools/objtool/Documentation/objtool.txt.
3548 */
validate_branch(struct objtool_file * file,struct symbol * func,struct instruction * insn,struct insn_state state)3549 static int validate_branch(struct objtool_file *file, struct symbol *func,
3550 struct instruction *insn, struct insn_state state)
3551 {
3552 struct alternative *alt;
3553 struct instruction *next_insn, *prev_insn = NULL;
3554 struct section *sec;
3555 u8 visited;
3556 int ret;
3557
3558 if (func && func->ignore)
3559 return 0;
3560
3561 sec = insn->sec;
3562
3563 while (1) {
3564 next_insn = next_insn_to_validate(file, insn);
3565
3566 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3567 /* Ignore KCFI type preambles, which always fall through */
3568 if (!strncmp(func->name, "__cfi_", 6) ||
3569 !strncmp(func->name, "__pfx_", 6) ||
3570 !strncmp(func->name, "__pi___cfi_", 11) ||
3571 !strncmp(func->name, "__pi___pfx_", 11))
3572 return 0;
3573
3574 if (file->ignore_unreachables)
3575 return 0;
3576
3577 WARN("%s() falls through to next function %s()",
3578 func->name, insn_func(insn)->name);
3579 func->warned = 1;
3580
3581 return 1;
3582 }
3583
3584 visited = VISITED_BRANCH << state.uaccess;
3585 if (insn->visited & VISITED_BRANCH_MASK) {
3586 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3587 return 1;
3588
3589 if (insn->visited & visited)
3590 return 0;
3591 } else {
3592 nr_insns_visited++;
3593 }
3594
3595 if (state.noinstr)
3596 state.instr += insn->instr;
3597
3598 if (insn->hint) {
3599 if (insn->restore) {
3600 struct instruction *save_insn, *i;
3601
3602 i = insn;
3603 save_insn = NULL;
3604
3605 sym_for_each_insn_continue_reverse(file, func, i) {
3606 if (i->save) {
3607 save_insn = i;
3608 break;
3609 }
3610 }
3611
3612 if (!save_insn) {
3613 WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3614 return 1;
3615 }
3616
3617 if (!save_insn->visited) {
3618 /*
3619 * If the restore hint insn is at the
3620 * beginning of a basic block and was
3621 * branched to from elsewhere, and the
3622 * save insn hasn't been visited yet,
3623 * defer following this branch for now.
3624 * It will be seen later via the
3625 * straight-line path.
3626 */
3627 if (!prev_insn)
3628 return 0;
3629
3630 WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3631 return 1;
3632 }
3633
3634 insn->cfi = save_insn->cfi;
3635 nr_cfi_reused++;
3636 }
3637
3638 state.cfi = *insn->cfi;
3639 } else {
3640 /* XXX track if we actually changed state.cfi */
3641
3642 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3643 insn->cfi = prev_insn->cfi;
3644 nr_cfi_reused++;
3645 } else {
3646 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3647 }
3648 }
3649
3650 insn->visited |= visited;
3651
3652 if (propagate_alt_cfi(file, insn))
3653 return 1;
3654
3655 if (insn->alts) {
3656 for (alt = insn->alts; alt; alt = alt->next) {
3657 ret = validate_branch(file, func, alt->insn, state);
3658 if (ret) {
3659 BT_INSN(insn, "(alt)");
3660 return ret;
3661 }
3662 }
3663 }
3664
3665 if (skip_alt_group(insn))
3666 return 0;
3667
3668 if (handle_insn_ops(insn, next_insn, &state))
3669 return 1;
3670
3671 switch (insn->type) {
3672
3673 case INSN_RETURN:
3674 return validate_return(func, insn, &state);
3675
3676 case INSN_CALL:
3677 case INSN_CALL_DYNAMIC:
3678 ret = validate_call(file, insn, &state);
3679 if (ret)
3680 return ret;
3681
3682 if (opts.stackval && func && !is_special_call(insn) &&
3683 !has_valid_stack_frame(&state)) {
3684 WARN_INSN(insn, "call without frame pointer save/setup");
3685 return 1;
3686 }
3687
3688 break;
3689
3690 case INSN_JUMP_CONDITIONAL:
3691 case INSN_JUMP_UNCONDITIONAL:
3692 if (is_sibling_call(insn)) {
3693 ret = validate_sibling_call(file, insn, &state);
3694 if (ret)
3695 return ret;
3696
3697 } else if (insn->jump_dest) {
3698 ret = validate_branch(file, func,
3699 insn->jump_dest, state);
3700 if (ret) {
3701 BT_INSN(insn, "(branch)");
3702 return ret;
3703 }
3704 }
3705
3706 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3707 return 0;
3708
3709 break;
3710
3711 case INSN_JUMP_DYNAMIC:
3712 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3713 if (is_sibling_call(insn)) {
3714 ret = validate_sibling_call(file, insn, &state);
3715 if (ret)
3716 return ret;
3717 }
3718
3719 if (insn->type == INSN_JUMP_DYNAMIC)
3720 return 0;
3721
3722 break;
3723
3724 case INSN_SYSCALL:
3725 if (func && (!next_insn || !next_insn->hint)) {
3726 WARN_INSN(insn, "unsupported instruction in callable function");
3727 return 1;
3728 }
3729
3730 break;
3731
3732 case INSN_SYSRET:
3733 if (func && (!next_insn || !next_insn->hint)) {
3734 WARN_INSN(insn, "unsupported instruction in callable function");
3735 return 1;
3736 }
3737
3738 return 0;
3739
3740 case INSN_STAC:
3741 if (!opts.uaccess)
3742 break;
3743
3744 if (state.uaccess) {
3745 WARN_INSN(insn, "recursive UACCESS enable");
3746 return 1;
3747 }
3748
3749 state.uaccess = true;
3750 break;
3751
3752 case INSN_CLAC:
3753 if (!opts.uaccess)
3754 break;
3755
3756 if (!state.uaccess && func) {
3757 WARN_INSN(insn, "redundant UACCESS disable");
3758 return 1;
3759 }
3760
3761 if (func_uaccess_safe(func) && !state.uaccess_stack) {
3762 WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3763 return 1;
3764 }
3765
3766 state.uaccess = false;
3767 break;
3768
3769 case INSN_STD:
3770 if (state.df) {
3771 WARN_INSN(insn, "recursive STD");
3772 return 1;
3773 }
3774
3775 state.df = true;
3776 break;
3777
3778 case INSN_CLD:
3779 if (!state.df && func) {
3780 WARN_INSN(insn, "redundant CLD");
3781 return 1;
3782 }
3783
3784 state.df = false;
3785 break;
3786
3787 default:
3788 break;
3789 }
3790
3791 if (insn->dead_end)
3792 return 0;
3793
3794 if (!next_insn) {
3795 if (state.cfi.cfa.base == CFI_UNDEFINED)
3796 return 0;
3797 if (file->ignore_unreachables)
3798 return 0;
3799
3800 WARN("%s%sunexpected end of section %s",
3801 func ? func->name : "", func ? "(): " : "",
3802 sec->name);
3803 return 1;
3804 }
3805
3806 prev_insn = insn;
3807 insn = next_insn;
3808 }
3809
3810 return 0;
3811 }
3812
validate_unwind_hint(struct objtool_file * file,struct instruction * insn,struct insn_state * state)3813 static int validate_unwind_hint(struct objtool_file *file,
3814 struct instruction *insn,
3815 struct insn_state *state)
3816 {
3817 if (insn->hint && !insn->visited) {
3818 int ret = validate_branch(file, insn_func(insn), insn, *state);
3819 if (ret)
3820 BT_INSN(insn, "<=== (hint)");
3821 return ret;
3822 }
3823
3824 return 0;
3825 }
3826
validate_unwind_hints(struct objtool_file * file,struct section * sec)3827 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3828 {
3829 struct instruction *insn;
3830 struct insn_state state;
3831 int warnings = 0;
3832
3833 if (!file->hints)
3834 return 0;
3835
3836 init_insn_state(file, &state, sec);
3837
3838 if (sec) {
3839 sec_for_each_insn(file, sec, insn)
3840 warnings += validate_unwind_hint(file, insn, &state);
3841 } else {
3842 for_each_insn(file, insn)
3843 warnings += validate_unwind_hint(file, insn, &state);
3844 }
3845
3846 return warnings;
3847 }
3848
3849 /*
3850 * Validate rethunk entry constraint: must untrain RET before the first RET.
3851 *
3852 * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
3853 * before an actual RET instruction.
3854 */
validate_unret(struct objtool_file * file,struct instruction * insn)3855 static int validate_unret(struct objtool_file *file, struct instruction *insn)
3856 {
3857 struct instruction *next, *dest;
3858 int ret;
3859
3860 for (;;) {
3861 next = next_insn_to_validate(file, insn);
3862
3863 if (insn->visited & VISITED_UNRET)
3864 return 0;
3865
3866 insn->visited |= VISITED_UNRET;
3867
3868 if (insn->alts) {
3869 struct alternative *alt;
3870 for (alt = insn->alts; alt; alt = alt->next) {
3871 ret = validate_unret(file, alt->insn);
3872 if (ret) {
3873 BT_INSN(insn, "(alt)");
3874 return ret;
3875 }
3876 }
3877 }
3878
3879 switch (insn->type) {
3880
3881 case INSN_CALL_DYNAMIC:
3882 case INSN_JUMP_DYNAMIC:
3883 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3884 WARN_INSN(insn, "early indirect call");
3885 return 1;
3886
3887 case INSN_JUMP_UNCONDITIONAL:
3888 case INSN_JUMP_CONDITIONAL:
3889 if (!is_sibling_call(insn)) {
3890 if (!insn->jump_dest) {
3891 WARN_INSN(insn, "unresolved jump target after linking?!?");
3892 return 1;
3893 }
3894 ret = validate_unret(file, insn->jump_dest);
3895 if (ret) {
3896 BT_INSN(insn, "(branch%s)",
3897 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3898 return ret;
3899 }
3900
3901 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3902 return 0;
3903
3904 break;
3905 }
3906
3907 /* fallthrough */
3908 case INSN_CALL:
3909 dest = find_insn(file, insn_call_dest(insn)->sec,
3910 insn_call_dest(insn)->offset);
3911 if (!dest) {
3912 WARN("Unresolved function after linking!?: %s",
3913 insn_call_dest(insn)->name);
3914 return 1;
3915 }
3916
3917 ret = validate_unret(file, dest);
3918 if (ret) {
3919 BT_INSN(insn, "(call)");
3920 return ret;
3921 }
3922 /*
3923 * If a call returns without error, it must have seen UNTRAIN_RET.
3924 * Therefore any non-error return is a success.
3925 */
3926 return 0;
3927
3928 case INSN_RETURN:
3929 WARN_INSN(insn, "RET before UNTRAIN");
3930 return 1;
3931
3932 case INSN_SYSCALL:
3933 break;
3934
3935 case INSN_SYSRET:
3936 return 0;
3937
3938 case INSN_NOP:
3939 if (insn->retpoline_safe)
3940 return 0;
3941 break;
3942
3943 default:
3944 break;
3945 }
3946
3947 if (insn->dead_end)
3948 return 0;
3949
3950 if (!next) {
3951 WARN_INSN(insn, "teh end!");
3952 return 1;
3953 }
3954 insn = next;
3955 }
3956
3957 return 0;
3958 }
3959
3960 /*
3961 * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
3962 * VALIDATE_UNRET_END before RET.
3963 */
validate_unrets(struct objtool_file * file)3964 static int validate_unrets(struct objtool_file *file)
3965 {
3966 struct instruction *insn;
3967 int warnings = 0;
3968
3969 for_each_insn(file, insn) {
3970 if (!insn->unret)
3971 continue;
3972
3973 warnings += validate_unret(file, insn);
3974 }
3975
3976 return warnings;
3977 }
3978
validate_retpoline(struct objtool_file * file)3979 static int validate_retpoline(struct objtool_file *file)
3980 {
3981 struct instruction *insn;
3982 int warnings = 0;
3983
3984 for_each_insn(file, insn) {
3985 if (insn->type != INSN_JUMP_DYNAMIC &&
3986 insn->type != INSN_CALL_DYNAMIC &&
3987 insn->type != INSN_RETURN)
3988 continue;
3989
3990 if (insn->retpoline_safe)
3991 continue;
3992
3993 if (insn->sec->init)
3994 continue;
3995
3996 if (insn->type == INSN_RETURN) {
3997 if (opts.rethunk) {
3998 WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build");
3999 warnings++;
4000 }
4001 continue;
4002 }
4003
4004 WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
4005 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
4006 warnings++;
4007 }
4008
4009 if (!opts.cfi)
4010 return warnings;
4011
4012 /*
4013 * kCFI call sites look like:
4014 *
4015 * movl $(-0x12345678), %r10d
4016 * addl -4(%r11), %r10d
4017 * jz 1f
4018 * ud2
4019 * 1: cs call __x86_indirect_thunk_r11
4020 *
4021 * Verify all indirect calls are kCFI adorned by checking for the
4022 * UD2. Notably, doing __nocfi calls to regular (cfi) functions is
4023 * broken.
4024 */
4025 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
4026 struct symbol *sym = insn->sym;
4027
4028 if (sym && (sym->type == STT_NOTYPE ||
4029 sym->type == STT_FUNC) && !sym->nocfi) {
4030 struct instruction *prev =
4031 prev_insn_same_sym(file, insn);
4032
4033 if (!prev || prev->type != INSN_BUG) {
4034 WARN_INSN(insn, "no-cfi indirect call!");
4035 warnings++;
4036 }
4037 }
4038 }
4039
4040 return warnings;
4041 }
4042
is_kasan_insn(struct instruction * insn)4043 static bool is_kasan_insn(struct instruction *insn)
4044 {
4045 return (insn->type == INSN_CALL &&
4046 !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4047 }
4048
is_ubsan_insn(struct instruction * insn)4049 static bool is_ubsan_insn(struct instruction *insn)
4050 {
4051 return (insn->type == INSN_CALL &&
4052 !strcmp(insn_call_dest(insn)->name,
4053 "__ubsan_handle_builtin_unreachable"));
4054 }
4055
ignore_unreachable_insn(struct objtool_file * file,struct instruction * insn)4056 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4057 {
4058 struct symbol *func = insn_func(insn);
4059 struct instruction *prev_insn;
4060 int i;
4061
4062 if (insn->type == INSN_NOP || insn->type == INSN_TRAP || (func && func->ignore))
4063 return true;
4064
4065 /*
4066 * Ignore alternative replacement instructions. This can happen
4067 * when a whitelisted function uses one of the ALTERNATIVE macros.
4068 */
4069 if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4070 !strcmp(insn->sec->name, ".altinstr_aux"))
4071 return true;
4072
4073 /*
4074 * Whole archive runs might encounter dead code from weak symbols.
4075 * This is where the linker will have dropped the weak symbol in
4076 * favour of a regular symbol, but leaves the code in place.
4077 *
4078 * In this case we'll find a piece of code (whole function) that is not
4079 * covered by a !section symbol. Ignore them.
4080 */
4081 if (opts.link && !func) {
4082 int size = find_symbol_hole_containing(insn->sec, insn->offset);
4083 unsigned long end = insn->offset + size;
4084
4085 if (!size) /* not a hole */
4086 return false;
4087
4088 if (size < 0) /* hole until the end */
4089 return true;
4090
4091 sec_for_each_insn_continue(file, insn) {
4092 /*
4093 * If we reach a visited instruction at or before the
4094 * end of the hole, ignore the unreachable.
4095 */
4096 if (insn->visited)
4097 return true;
4098
4099 if (insn->offset >= end)
4100 break;
4101
4102 /*
4103 * If this hole jumps to a .cold function, mark it ignore too.
4104 */
4105 if (insn->jump_dest && insn_func(insn->jump_dest) &&
4106 strstr(insn_func(insn->jump_dest)->name, ".cold")) {
4107 insn_func(insn->jump_dest)->ignore = true;
4108 }
4109 }
4110
4111 return false;
4112 }
4113
4114 if (!func)
4115 return false;
4116
4117 if (func->static_call_tramp)
4118 return true;
4119
4120 /*
4121 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4122 * __builtin_unreachable(). The BUG() macro has an unreachable() after
4123 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4124 * (or occasionally a JMP to UD2).
4125 *
4126 * It may also insert a UD2 after calling a __noreturn function.
4127 */
4128 prev_insn = prev_insn_same_sec(file, insn);
4129 if (prev_insn && prev_insn->dead_end &&
4130 (insn->type == INSN_BUG ||
4131 (insn->type == INSN_JUMP_UNCONDITIONAL &&
4132 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4133 return true;
4134
4135 /*
4136 * Check if this (or a subsequent) instruction is related to
4137 * CONFIG_UBSAN or CONFIG_KASAN.
4138 *
4139 * End the search at 5 instructions to avoid going into the weeds.
4140 */
4141 for (i = 0; i < 5; i++) {
4142
4143 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4144 return true;
4145
4146 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4147 if (insn->jump_dest &&
4148 insn_func(insn->jump_dest) == func) {
4149 insn = insn->jump_dest;
4150 continue;
4151 }
4152
4153 break;
4154 }
4155
4156 if (insn->offset + insn->len >= func->offset + func->len)
4157 break;
4158
4159 insn = next_insn_same_sec(file, insn);
4160 }
4161
4162 return false;
4163 }
4164
add_prefix_symbol(struct objtool_file * file,struct symbol * func)4165 static int add_prefix_symbol(struct objtool_file *file, struct symbol *func)
4166 {
4167 struct instruction *insn, *prev;
4168 struct cfi_state *cfi;
4169
4170 insn = find_insn(file, func->sec, func->offset);
4171 if (!insn)
4172 return -1;
4173
4174 for (prev = prev_insn_same_sec(file, insn);
4175 prev;
4176 prev = prev_insn_same_sec(file, prev)) {
4177 u64 offset;
4178
4179 if (prev->type != INSN_NOP)
4180 return -1;
4181
4182 offset = func->offset - prev->offset;
4183
4184 if (offset > opts.prefix)
4185 return -1;
4186
4187 if (offset < opts.prefix)
4188 continue;
4189
4190 elf_create_prefix_symbol(file->elf, func, opts.prefix);
4191 break;
4192 }
4193
4194 if (!prev)
4195 return -1;
4196
4197 if (!insn->cfi) {
4198 /*
4199 * This can happen if stack validation isn't enabled or the
4200 * function is annotated with STACK_FRAME_NON_STANDARD.
4201 */
4202 return 0;
4203 }
4204
4205 /* Propagate insn->cfi to the prefix code */
4206 cfi = cfi_hash_find_or_add(insn->cfi);
4207 for (; prev != insn; prev = next_insn_same_sec(file, prev))
4208 prev->cfi = cfi;
4209
4210 return 0;
4211 }
4212
add_prefix_symbols(struct objtool_file * file)4213 static int add_prefix_symbols(struct objtool_file *file)
4214 {
4215 struct section *sec;
4216 struct symbol *func;
4217
4218 for_each_sec(file, sec) {
4219 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4220 continue;
4221
4222 sec_for_each_sym(sec, func) {
4223 if (func->type != STT_FUNC)
4224 continue;
4225
4226 add_prefix_symbol(file, func);
4227 }
4228 }
4229
4230 return 0;
4231 }
4232
validate_symbol(struct objtool_file * file,struct section * sec,struct symbol * sym,struct insn_state * state)4233 static int validate_symbol(struct objtool_file *file, struct section *sec,
4234 struct symbol *sym, struct insn_state *state)
4235 {
4236 struct instruction *insn;
4237 int ret;
4238
4239 if (!sym->len) {
4240 WARN("%s() is missing an ELF size annotation", sym->name);
4241 return 1;
4242 }
4243
4244 if (sym->pfunc != sym || sym->alias != sym)
4245 return 0;
4246
4247 insn = find_insn(file, sec, sym->offset);
4248 if (!insn || insn->visited)
4249 return 0;
4250
4251 if (opts.uaccess)
4252 state->uaccess = sym->uaccess_safe;
4253
4254 ret = validate_branch(file, insn_func(insn), insn, *state);
4255 if (ret)
4256 BT_INSN(insn, "<=== (sym)");
4257 return ret;
4258 }
4259
validate_section(struct objtool_file * file,struct section * sec)4260 static int validate_section(struct objtool_file *file, struct section *sec)
4261 {
4262 struct insn_state state;
4263 struct symbol *func;
4264 int warnings = 0;
4265
4266 sec_for_each_sym(sec, func) {
4267 if (func->type != STT_FUNC)
4268 continue;
4269
4270 init_insn_state(file, &state, sec);
4271 set_func_state(&state.cfi);
4272
4273 warnings += validate_symbol(file, sec, func, &state);
4274 }
4275
4276 return warnings;
4277 }
4278
validate_noinstr_sections(struct objtool_file * file)4279 static int validate_noinstr_sections(struct objtool_file *file)
4280 {
4281 struct section *sec;
4282 int warnings = 0;
4283
4284 sec = find_section_by_name(file->elf, ".noinstr.text");
4285 if (sec) {
4286 warnings += validate_section(file, sec);
4287 warnings += validate_unwind_hints(file, sec);
4288 }
4289
4290 sec = find_section_by_name(file->elf, ".entry.text");
4291 if (sec) {
4292 warnings += validate_section(file, sec);
4293 warnings += validate_unwind_hints(file, sec);
4294 }
4295
4296 sec = find_section_by_name(file->elf, ".cpuidle.text");
4297 if (sec) {
4298 warnings += validate_section(file, sec);
4299 warnings += validate_unwind_hints(file, sec);
4300 }
4301
4302 return warnings;
4303 }
4304
validate_functions(struct objtool_file * file)4305 static int validate_functions(struct objtool_file *file)
4306 {
4307 struct section *sec;
4308 int warnings = 0;
4309
4310 for_each_sec(file, sec) {
4311 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4312 continue;
4313
4314 warnings += validate_section(file, sec);
4315 }
4316
4317 return warnings;
4318 }
4319
mark_endbr_used(struct instruction * insn)4320 static void mark_endbr_used(struct instruction *insn)
4321 {
4322 if (!list_empty(&insn->call_node))
4323 list_del_init(&insn->call_node);
4324 }
4325
noendbr_range(struct objtool_file * file,struct instruction * insn)4326 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4327 {
4328 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4329 struct instruction *first;
4330
4331 if (!sym)
4332 return false;
4333
4334 first = find_insn(file, sym->sec, sym->offset);
4335 if (!first)
4336 return false;
4337
4338 if (first->type != INSN_ENDBR && !first->noendbr)
4339 return false;
4340
4341 return insn->offset == sym->offset + sym->len;
4342 }
4343
__validate_ibt_insn(struct objtool_file * file,struct instruction * insn,struct instruction * dest)4344 static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn,
4345 struct instruction *dest)
4346 {
4347 if (dest->type == INSN_ENDBR) {
4348 mark_endbr_used(dest);
4349 return 0;
4350 }
4351
4352 if (insn_func(dest) && insn_func(insn) &&
4353 insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
4354 /*
4355 * Anything from->to self is either _THIS_IP_ or
4356 * IRET-to-self.
4357 *
4358 * There is no sane way to annotate _THIS_IP_ since the
4359 * compiler treats the relocation as a constant and is
4360 * happy to fold in offsets, skewing any annotation we
4361 * do, leading to vast amounts of false-positives.
4362 *
4363 * There's also compiler generated _THIS_IP_ through
4364 * KCOV and such which we have no hope of annotating.
4365 *
4366 * As such, blanket accept self-references without
4367 * issue.
4368 */
4369 return 0;
4370 }
4371
4372 /*
4373 * Accept anything ANNOTATE_NOENDBR.
4374 */
4375 if (dest->noendbr)
4376 return 0;
4377
4378 /*
4379 * Accept if this is the instruction after a symbol
4380 * that is (no)endbr -- typical code-range usage.
4381 */
4382 if (noendbr_range(file, dest))
4383 return 0;
4384
4385 WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4386 return 1;
4387 }
4388
validate_ibt_insn(struct objtool_file * file,struct instruction * insn)4389 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4390 {
4391 struct instruction *dest;
4392 struct reloc *reloc;
4393 unsigned long off;
4394 int warnings = 0;
4395
4396 /*
4397 * Looking for function pointer load relocations. Ignore
4398 * direct/indirect branches:
4399 */
4400 switch (insn->type) {
4401
4402 case INSN_CALL:
4403 case INSN_CALL_DYNAMIC:
4404 case INSN_JUMP_CONDITIONAL:
4405 case INSN_JUMP_UNCONDITIONAL:
4406 case INSN_JUMP_DYNAMIC:
4407 case INSN_JUMP_DYNAMIC_CONDITIONAL:
4408 case INSN_RETURN:
4409 case INSN_NOP:
4410 return 0;
4411
4412 case INSN_LEA_RIP:
4413 if (!insn_reloc(file, insn)) {
4414 /* local function pointer reference without reloc */
4415
4416 off = arch_jump_destination(insn);
4417
4418 dest = find_insn(file, insn->sec, off);
4419 if (!dest) {
4420 WARN_INSN(insn, "corrupt function pointer reference");
4421 return 1;
4422 }
4423
4424 return __validate_ibt_insn(file, insn, dest);
4425 }
4426 break;
4427
4428 default:
4429 break;
4430 }
4431
4432 for (reloc = insn_reloc(file, insn);
4433 reloc;
4434 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4435 reloc_offset(reloc) + 1,
4436 (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
4437
4438 off = reloc->sym->offset;
4439 if (reloc_type(reloc) == R_X86_64_PC32 ||
4440 reloc_type(reloc) == R_X86_64_PLT32)
4441 off += arch_dest_reloc_offset(reloc_addend(reloc));
4442 else
4443 off += reloc_addend(reloc);
4444
4445 dest = find_insn(file, reloc->sym->sec, off);
4446 if (!dest)
4447 continue;
4448
4449 warnings += __validate_ibt_insn(file, insn, dest);
4450 }
4451
4452 return warnings;
4453 }
4454
validate_ibt_data_reloc(struct objtool_file * file,struct reloc * reloc)4455 static int validate_ibt_data_reloc(struct objtool_file *file,
4456 struct reloc *reloc)
4457 {
4458 struct instruction *dest;
4459
4460 dest = find_insn(file, reloc->sym->sec,
4461 reloc->sym->offset + reloc_addend(reloc));
4462 if (!dest)
4463 return 0;
4464
4465 if (dest->type == INSN_ENDBR) {
4466 mark_endbr_used(dest);
4467 return 0;
4468 }
4469
4470 if (dest->noendbr)
4471 return 0;
4472
4473 WARN_FUNC(reloc->sec->base, reloc_offset(reloc),
4474 "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4475
4476 return 1;
4477 }
4478
4479 /*
4480 * Validate IBT rules and remove used ENDBR instructions from the seal list.
4481 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4482 * NOPs) later, in create_ibt_endbr_seal_sections().
4483 */
validate_ibt(struct objtool_file * file)4484 static int validate_ibt(struct objtool_file *file)
4485 {
4486 struct section *sec;
4487 struct reloc *reloc;
4488 struct instruction *insn;
4489 int warnings = 0;
4490
4491 for_each_insn(file, insn)
4492 warnings += validate_ibt_insn(file, insn);
4493
4494 for_each_sec(file, sec) {
4495
4496 /* Already done by validate_ibt_insn() */
4497 if (sec->sh.sh_flags & SHF_EXECINSTR)
4498 continue;
4499
4500 if (!sec->rsec)
4501 continue;
4502
4503 /*
4504 * These sections can reference text addresses, but not with
4505 * the intent to indirect branch to them.
4506 */
4507 if ((!strncmp(sec->name, ".discard", 8) &&
4508 strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
4509 !strncmp(sec->name, ".debug", 6) ||
4510 !strcmp(sec->name, ".altinstructions") ||
4511 !strcmp(sec->name, ".ibt_endbr_seal") ||
4512 !strcmp(sec->name, ".orc_unwind_ip") ||
4513 !strcmp(sec->name, ".parainstructions") ||
4514 !strcmp(sec->name, ".retpoline_sites") ||
4515 !strcmp(sec->name, ".smp_locks") ||
4516 !strcmp(sec->name, ".static_call_sites") ||
4517 !strcmp(sec->name, "_error_injection_whitelist") ||
4518 !strcmp(sec->name, "_kprobe_blacklist") ||
4519 !strcmp(sec->name, "__bug_table") ||
4520 !strcmp(sec->name, "__ex_table") ||
4521 !strcmp(sec->name, "__jump_table") ||
4522 !strcmp(sec->name, "__mcount_loc") ||
4523 !strcmp(sec->name, ".kcfi_traps") ||
4524 !strcmp(sec->name, ".llvm.call-graph-profile") ||
4525 !strcmp(sec->name, ".llvm_bb_addr_map") ||
4526 !strcmp(sec->name, "__tracepoints") ||
4527 strstr(sec->name, "__patchable_function_entries"))
4528 continue;
4529
4530 for_each_reloc(sec->rsec, reloc)
4531 warnings += validate_ibt_data_reloc(file, reloc);
4532 }
4533
4534 return warnings;
4535 }
4536
validate_sls(struct objtool_file * file)4537 static int validate_sls(struct objtool_file *file)
4538 {
4539 struct instruction *insn, *next_insn;
4540 int warnings = 0;
4541
4542 for_each_insn(file, insn) {
4543 next_insn = next_insn_same_sec(file, insn);
4544
4545 if (insn->retpoline_safe)
4546 continue;
4547
4548 switch (insn->type) {
4549 case INSN_RETURN:
4550 if (!next_insn || next_insn->type != INSN_TRAP) {
4551 WARN_INSN(insn, "missing int3 after ret");
4552 warnings++;
4553 }
4554
4555 break;
4556 case INSN_JUMP_DYNAMIC:
4557 if (!next_insn || next_insn->type != INSN_TRAP) {
4558 WARN_INSN(insn, "missing int3 after indirect jump");
4559 warnings++;
4560 }
4561 break;
4562 default:
4563 break;
4564 }
4565 }
4566
4567 return warnings;
4568 }
4569
validate_reachable_instructions(struct objtool_file * file)4570 static int validate_reachable_instructions(struct objtool_file *file)
4571 {
4572 struct instruction *insn, *prev_insn;
4573 struct symbol *call_dest;
4574 int warnings = 0;
4575
4576 if (file->ignore_unreachables)
4577 return 0;
4578
4579 for_each_insn(file, insn) {
4580 if (insn->visited || ignore_unreachable_insn(file, insn))
4581 continue;
4582
4583 prev_insn = prev_insn_same_sec(file, insn);
4584 if (prev_insn && prev_insn->dead_end) {
4585 call_dest = insn_call_dest(prev_insn);
4586 if (call_dest) {
4587 WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h",
4588 call_dest->name);
4589 warnings++;
4590 continue;
4591 }
4592 }
4593
4594 WARN_INSN(insn, "unreachable instruction");
4595 warnings++;
4596 }
4597
4598 return warnings;
4599 }
4600
4601 /* 'funcs' is a space-separated list of function names */
disas_funcs(const char * funcs)4602 static void disas_funcs(const char *funcs)
4603 {
4604 const char *objdump_str, *cross_compile;
4605 int size, ret;
4606 char *cmd;
4607
4608 cross_compile = getenv("CROSS_COMPILE");
4609 if (!cross_compile)
4610 cross_compile = "";
4611
4612 objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '"
4613 "BEGIN { split(_funcs, funcs); }"
4614 "/^$/ { func_match = 0; }"
4615 "/<.*>:/ { "
4616 "f = gensub(/.*<(.*)>:/, \"\\\\1\", 1);"
4617 "for (i in funcs) {"
4618 "if (funcs[i] == f) {"
4619 "func_match = 1;"
4620 "base = strtonum(\"0x\" $1);"
4621 "break;"
4622 "}"
4623 "}"
4624 "}"
4625 "{"
4626 "if (func_match) {"
4627 "addr = strtonum(\"0x\" $1);"
4628 "printf(\"%%04x \", addr - base);"
4629 "print;"
4630 "}"
4631 "}' 1>&2";
4632
4633 /* fake snprintf() to calculate the size */
4634 size = snprintf(NULL, 0, objdump_str, cross_compile, objname, funcs) + 1;
4635 if (size <= 0) {
4636 WARN("objdump string size calculation failed");
4637 return;
4638 }
4639
4640 cmd = malloc(size);
4641
4642 /* real snprintf() */
4643 snprintf(cmd, size, objdump_str, cross_compile, objname, funcs);
4644 ret = system(cmd);
4645 if (ret) {
4646 WARN("disassembly failed: %d", ret);
4647 return;
4648 }
4649 }
4650
disas_warned_funcs(struct objtool_file * file)4651 static void disas_warned_funcs(struct objtool_file *file)
4652 {
4653 struct symbol *sym;
4654 char *funcs = NULL, *tmp;
4655
4656 for_each_sym(file, sym) {
4657 if (sym->warned) {
4658 if (!funcs) {
4659 funcs = malloc(strlen(sym->name) + 1);
4660 if (!funcs) {
4661 ERROR_GLIBC("malloc");
4662 return;
4663 }
4664 strcpy(funcs, sym->name);
4665 } else {
4666 tmp = malloc(strlen(funcs) + strlen(sym->name) + 2);
4667 if (!tmp) {
4668 ERROR_GLIBC("malloc");
4669 return;
4670 }
4671 sprintf(tmp, "%s %s", funcs, sym->name);
4672 free(funcs);
4673 funcs = tmp;
4674 }
4675 }
4676 }
4677
4678 if (funcs)
4679 disas_funcs(funcs);
4680 }
4681
arch_absolute_reloc(struct elf * elf,struct reloc * reloc)4682 __weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
4683 {
4684 unsigned int type = reloc_type(reloc);
4685 size_t sz = elf_addr_size(elf);
4686
4687 return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32);
4688 }
4689
check_abs_references(struct objtool_file * file)4690 static int check_abs_references(struct objtool_file *file)
4691 {
4692 struct section *sec;
4693 struct reloc *reloc;
4694 int ret = 0;
4695
4696 for_each_sec(file, sec) {
4697 /* absolute references in non-loadable sections are fine */
4698 if (!(sec->sh.sh_flags & SHF_ALLOC))
4699 continue;
4700
4701 /* section must have an associated .rela section */
4702 if (!sec->rsec)
4703 continue;
4704
4705 /*
4706 * Special case for compiler generated metadata that is not
4707 * consumed until after boot.
4708 */
4709 if (!strcmp(sec->name, "__patchable_function_entries"))
4710 continue;
4711
4712 for_each_reloc(sec->rsec, reloc) {
4713 if (arch_absolute_reloc(file->elf, reloc)) {
4714 WARN("section %s has absolute relocation at offset 0x%llx",
4715 sec->name, (unsigned long long)reloc_offset(reloc));
4716 ret++;
4717 }
4718 }
4719 }
4720 return ret;
4721 }
4722
4723 struct insn_chunk {
4724 void *addr;
4725 struct insn_chunk *next;
4726 };
4727
4728 /*
4729 * Reduce peak RSS usage by freeing insns memory before writing the ELF file,
4730 * which can trigger more allocations for .debug_* sections whose data hasn't
4731 * been read yet.
4732 */
free_insns(struct objtool_file * file)4733 static void free_insns(struct objtool_file *file)
4734 {
4735 struct instruction *insn;
4736 struct insn_chunk *chunks = NULL, *chunk;
4737
4738 for_each_insn(file, insn) {
4739 if (!insn->idx) {
4740 chunk = malloc(sizeof(*chunk));
4741 chunk->addr = insn;
4742 chunk->next = chunks;
4743 chunks = chunk;
4744 }
4745 }
4746
4747 for (chunk = chunks; chunk; chunk = chunk->next)
4748 free(chunk->addr);
4749 }
4750
check(struct objtool_file * file)4751 int check(struct objtool_file *file)
4752 {
4753 int ret = 0, warnings = 0;
4754
4755 arch_initial_func_cfi_state(&initial_func_cfi);
4756 init_cfi_state(&init_cfi);
4757 init_cfi_state(&func_cfi);
4758 set_func_state(&func_cfi);
4759 init_cfi_state(&force_undefined_cfi);
4760 force_undefined_cfi.force_undefined = true;
4761
4762 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
4763 ret = -1;
4764 goto out;
4765 }
4766
4767 cfi_hash_add(&init_cfi);
4768 cfi_hash_add(&func_cfi);
4769
4770 ret = decode_sections(file);
4771 if (ret)
4772 goto out;
4773
4774 if (!nr_insns)
4775 goto out;
4776
4777 if (opts.retpoline)
4778 warnings += validate_retpoline(file);
4779
4780 if (opts.stackval || opts.orc || opts.uaccess) {
4781 int w = 0;
4782
4783 w += validate_functions(file);
4784 w += validate_unwind_hints(file, NULL);
4785 if (!w)
4786 w += validate_reachable_instructions(file);
4787
4788 warnings += w;
4789
4790 } else if (opts.noinstr) {
4791 warnings += validate_noinstr_sections(file);
4792 }
4793
4794 if (opts.unret) {
4795 /*
4796 * Must be after validate_branch() and friends, it plays
4797 * further games with insn->visited.
4798 */
4799 warnings += validate_unrets(file);
4800 }
4801
4802 if (opts.ibt)
4803 warnings += validate_ibt(file);
4804
4805 if (opts.sls)
4806 warnings += validate_sls(file);
4807
4808 if (opts.static_call) {
4809 ret = create_static_call_sections(file);
4810 if (ret)
4811 goto out;
4812 }
4813
4814 if (opts.retpoline) {
4815 ret = create_retpoline_sites_sections(file);
4816 if (ret)
4817 goto out;
4818 }
4819
4820 if (opts.cfi) {
4821 ret = create_cfi_sections(file);
4822 if (ret)
4823 goto out;
4824 }
4825
4826 if (opts.rethunk) {
4827 ret = create_return_sites_sections(file);
4828 if (ret)
4829 goto out;
4830
4831 if (opts.hack_skylake) {
4832 ret = create_direct_call_sections(file);
4833 if (ret)
4834 goto out;
4835 }
4836 }
4837
4838 if (opts.mcount) {
4839 ret = create_mcount_loc_sections(file);
4840 if (ret)
4841 goto out;
4842 }
4843
4844 if (opts.prefix) {
4845 ret = add_prefix_symbols(file);
4846 if (ret)
4847 goto out;
4848 }
4849
4850 if (opts.ibt) {
4851 ret = create_ibt_endbr_seal_sections(file);
4852 if (ret)
4853 goto out;
4854 }
4855
4856 if (opts.noabs)
4857 warnings += check_abs_references(file);
4858
4859 if (opts.orc && nr_insns) {
4860 ret = orc_create(file);
4861 if (ret)
4862 goto out;
4863 }
4864
4865 free_insns(file);
4866
4867 if (opts.stats) {
4868 printf("nr_insns_visited: %ld\n", nr_insns_visited);
4869 printf("nr_cfi: %ld\n", nr_cfi);
4870 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4871 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4872 }
4873
4874 out:
4875 if (!ret && !warnings)
4876 return 0;
4877
4878 if (opts.werror && warnings)
4879 ret = 1;
4880
4881 if (opts.verbose) {
4882 if (opts.werror && warnings)
4883 WARN("%d warning(s) upgraded to errors", warnings);
4884 print_args();
4885 disas_warned_funcs(file);
4886 }
4887
4888 return ret;
4889 }
4890