xref: /linux/tools/objtool/check.c (revision f88dc319fcb6d6a155e94469a355ce456dd85441)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4  */
5 
6 #define _GNU_SOURCE /* memmem() */
7 #include <fnmatch.h>
8 #include <string.h>
9 #include <stdlib.h>
10 #include <inttypes.h>
11 #include <sys/mman.h>
12 
13 #include <objtool/builtin.h>
14 #include <objtool/cfi.h>
15 #include <objtool/arch.h>
16 #include <objtool/disas.h>
17 #include <objtool/check.h>
18 #include <objtool/special.h>
19 #include <objtool/trace.h>
20 #include <objtool/warn.h>
21 #include <objtool/checksum.h>
22 #include <objtool/util.h>
23 
24 #include <linux/objtool_types.h>
25 #include <linux/hashtable.h>
26 #include <linux/kernel.h>
27 #include <linux/static_call_types.h>
28 #include <linux/string.h>
29 
30 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
31 
32 static struct cfi_init_state initial_func_cfi;
33 static struct cfi_state init_cfi;
34 static struct cfi_state func_cfi;
35 static struct cfi_state force_undefined_cfi;
36 
37 struct disas_context *objtool_disas_ctx;
38 
39 size_t sym_name_max_len;
40 
41 struct instruction *find_insn(struct objtool_file *file,
42 			      struct section *sec, unsigned long offset)
43 {
44 	struct instruction *insn;
45 
46 	hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
47 		if (insn->sec == sec && insn->offset == offset)
48 			return insn;
49 	}
50 
51 	return NULL;
52 }
53 
54 struct instruction *next_insn_same_sec(struct objtool_file *file,
55 				       struct instruction *insn)
56 {
57 	if (insn->idx == INSN_CHUNK_MAX)
58 		return find_insn(file, insn->sec, insn->offset + insn->len);
59 
60 	insn++;
61 	if (!insn->len)
62 		return NULL;
63 
64 	return insn;
65 }
66 
67 static struct instruction *next_insn_same_func(struct objtool_file *file,
68 					       struct instruction *insn)
69 {
70 	struct instruction *next = next_insn_same_sec(file, insn);
71 	struct symbol *func = insn_func(insn);
72 
73 	if (!func)
74 		return NULL;
75 
76 	if (next && insn_func(next) == func)
77 		return next;
78 
79 	/* Check if we're already in the subfunction: */
80 	if (func == func->cfunc)
81 		return NULL;
82 
83 	/* Move to the subfunction: */
84 	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
85 }
86 
87 static struct instruction *prev_insn_same_sec(struct objtool_file *file,
88 					      struct instruction *insn)
89 {
90 	if (insn->idx == 0) {
91 		if (insn->prev_len)
92 			return find_insn(file, insn->sec, insn->offset - insn->prev_len);
93 		return NULL;
94 	}
95 
96 	return insn - 1;
97 }
98 
99 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
100 					      struct instruction *insn)
101 {
102 	struct instruction *prev = prev_insn_same_sec(file, insn);
103 
104 	if (prev && insn_func(prev) == insn_func(insn))
105 		return prev;
106 
107 	return NULL;
108 }
109 
110 #define for_each_insn(file, insn)					\
111 	for (struct section *__sec, *__fake = (struct section *)1;	\
112 	     __fake; __fake = NULL)					\
113 		for_each_sec(file->elf, __sec)				\
114 			sec_for_each_insn(file, __sec, insn)
115 
116 #define func_for_each_insn(file, func, insn)				\
117 	for (insn = find_insn(file, func->sec, func->offset);		\
118 	     insn;							\
119 	     insn = next_insn_same_func(file, insn))
120 
121 #define sym_for_each_insn(file, sym, insn)				\
122 	for (insn = find_insn(file, sym->sec, sym->offset);		\
123 	     insn && insn->offset < sym->offset + sym->len;		\
124 	     insn = next_insn_same_sec(file, insn))
125 
126 #define sym_for_each_insn_continue_reverse(file, sym, insn)		\
127 	for (insn = prev_insn_same_sec(file, insn);			\
128 	     insn && insn->offset >= sym->offset;			\
129 	     insn = prev_insn_same_sec(file, insn))
130 
131 #define sec_for_each_insn_from(file, insn)				\
132 	for (; insn; insn = next_insn_same_sec(file, insn))
133 
134 #define sec_for_each_insn_continue(file, insn)				\
135 	for (insn = next_insn_same_sec(file, insn); insn;		\
136 	     insn = next_insn_same_sec(file, insn))
137 
138 static inline struct reloc *insn_jump_table(struct instruction *insn)
139 {
140 	if (insn->type == INSN_JUMP_DYNAMIC ||
141 	    insn->type == INSN_CALL_DYNAMIC)
142 		return insn->_jump_table;
143 
144 	return NULL;
145 }
146 
147 static inline unsigned long insn_jump_table_size(struct instruction *insn)
148 {
149 	if (insn->type == INSN_JUMP_DYNAMIC ||
150 	    insn->type == INSN_CALL_DYNAMIC)
151 		return insn->_jump_table_size;
152 
153 	return 0;
154 }
155 
156 static bool is_jump_table_jump(struct instruction *insn)
157 {
158 	struct alt_group *alt_group = insn->alt_group;
159 
160 	if (insn_jump_table(insn))
161 		return true;
162 
163 	/* Retpoline alternative for a jump table? */
164 	return alt_group && alt_group->orig_group &&
165 	       insn_jump_table(alt_group->orig_group->first_insn);
166 }
167 
168 static bool is_sibling_call(struct instruction *insn)
169 {
170 	/*
171 	 * Assume only STT_FUNC calls have jump-tables.
172 	 */
173 	if (insn_func(insn)) {
174 		/* An indirect jump is either a sibling call or a jump to a table. */
175 		if (insn->type == INSN_JUMP_DYNAMIC)
176 			return !is_jump_table_jump(insn);
177 	}
178 
179 	/* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
180 	return (is_static_jump(insn) && insn_call_dest(insn));
181 }
182 
183 /*
184  * Checks if a function is a Rust "noreturn" one.
185  */
186 static bool is_rust_noreturn(const struct symbol *func)
187 {
188 	/*
189 	 * If it does not start with "_R", then it is not a Rust symbol.
190 	 */
191 	if (strncmp(func->name, "_R", 2))
192 		return false;
193 
194 	/*
195 	 * These are just heuristics -- we do not control the precise symbol
196 	 * name, due to the crate disambiguators (which depend on the compiler)
197 	 * as well as changes to the source code itself between versions (since
198 	 * these come from the Rust standard library).
199 	 */
200 	return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail")		||
201 	       str_ends_with(func->name, "_4core6option13expect_failed")				||
202 	       str_ends_with(func->name, "_4core6option13unwrap_failed")				||
203 	       str_ends_with(func->name, "_4core6result13unwrap_failed")				||
204 	       str_ends_with(func->name, "_4core9panicking5panic")					||
205 	       str_ends_with(func->name, "_4core9panicking9panic_fmt")					||
206 	       str_ends_with(func->name, "_4core9panicking14panic_explicit")				||
207 	       str_ends_with(func->name, "_4core9panicking14panic_nounwind")				||
208 	       str_ends_with(func->name, "_4core9panicking18panic_bounds_check")			||
209 	       str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt")			||
210 	       str_ends_with(func->name, "_4core9panicking19assert_failed_inner")			||
211 	       str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference")		||
212 	       str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference")	||
213 	       str_ends_with(func->name, "_7___rustc17rust_begin_unwind")				||
214 	       strstr(func->name, "_4core9panicking13assert_failed")					||
215 	       strstr(func->name, "_4core9panicking11panic_const24panic_const_")			||
216 	       (strstr(func->name, "_4core5slice5index") &&
217 		strstr(func->name, "slice_") &&
218 		str_ends_with(func->name, "_fail"));
219 }
220 
221 /*
222  * This checks to see if the given function is a "noreturn" function.
223  *
224  * For global functions which are outside the scope of this object file, we
225  * have to keep a manual list of them.
226  *
227  * For local functions, we have to detect them manually by simply looking for
228  * the lack of a return instruction.
229  */
230 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
231 				int recursion)
232 {
233 	int i;
234 	struct instruction *insn;
235 	bool empty = true;
236 
237 #define NORETURN(func) __stringify(func),
238 	static const char * const global_noreturns[] = {
239 #include "noreturns.h"
240 	};
241 #undef NORETURN
242 
243 	if (!func)
244 		return false;
245 
246 	if (!is_local_sym(func)) {
247 		if (is_rust_noreturn(func))
248 			return true;
249 
250 		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
251 			if (!strcmp(func->name, global_noreturns[i]))
252 				return true;
253 	}
254 
255 	if (is_weak_sym(func))
256 		return false;
257 
258 	if (!func->len)
259 		return false;
260 
261 	insn = find_insn(file, func->sec, func->offset);
262 	if (!insn || !insn_func(insn))
263 		return false;
264 
265 	func_for_each_insn(file, func, insn) {
266 		empty = false;
267 
268 		if (insn->type == INSN_RETURN)
269 			return false;
270 	}
271 
272 	if (empty)
273 		return false;
274 
275 	/*
276 	 * A function can have a sibling call instead of a return.  In that
277 	 * case, the function's dead-end status depends on whether the target
278 	 * of the sibling call returns.
279 	 */
280 	func_for_each_insn(file, func, insn) {
281 		if (is_sibling_call(insn)) {
282 			struct instruction *dest = insn->jump_dest;
283 
284 			if (!dest)
285 				/* sibling call to another file */
286 				return false;
287 
288 			/* local sibling call */
289 			if (recursion == 5) {
290 				/*
291 				 * Infinite recursion: two functions have
292 				 * sibling calls to each other.  This is a very
293 				 * rare case.  It means they aren't dead ends.
294 				 */
295 				return false;
296 			}
297 
298 			return __dead_end_function(file, insn_func(dest), recursion+1);
299 		}
300 	}
301 
302 	return true;
303 }
304 
305 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
306 {
307 	return __dead_end_function(file, func, 0);
308 }
309 
310 static void init_cfi_state(struct cfi_state *cfi)
311 {
312 	int i;
313 
314 	for (i = 0; i < CFI_NUM_REGS; i++) {
315 		cfi->regs[i].base = CFI_UNDEFINED;
316 		cfi->vals[i].base = CFI_UNDEFINED;
317 	}
318 	cfi->cfa.base = CFI_UNDEFINED;
319 	cfi->drap_reg = CFI_UNDEFINED;
320 	cfi->drap_offset = -1;
321 }
322 
323 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
324 			    struct section *sec)
325 {
326 	memset(state, 0, sizeof(*state));
327 	init_cfi_state(&state->cfi);
328 
329 	if (opts.noinstr && sec)
330 		state->noinstr = sec->noinstr;
331 }
332 
333 static struct cfi_state *cfi_alloc(void)
334 {
335 	struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
336 	if (!cfi) {
337 		ERROR_GLIBC("calloc");
338 		exit(1);
339 	}
340 	nr_cfi++;
341 	return cfi;
342 }
343 
344 static int cfi_bits;
345 static struct hlist_head *cfi_hash;
346 
347 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
348 {
349 	return memcmp((void *)cfi1 + sizeof(cfi1->hash),
350 		      (void *)cfi2 + sizeof(cfi2->hash),
351 		      sizeof(struct cfi_state) - sizeof(struct hlist_node));
352 }
353 
354 static inline u32 cfi_key(struct cfi_state *cfi)
355 {
356 	return jhash((void *)cfi + sizeof(cfi->hash),
357 		     sizeof(*cfi) - sizeof(cfi->hash), 0);
358 }
359 
360 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
361 {
362 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
363 	struct cfi_state *obj;
364 
365 	hlist_for_each_entry(obj, head, hash) {
366 		if (!cficmp(cfi, obj)) {
367 			nr_cfi_cache++;
368 			return obj;
369 		}
370 	}
371 
372 	obj = cfi_alloc();
373 	*obj = *cfi;
374 	hlist_add_head(&obj->hash, head);
375 
376 	return obj;
377 }
378 
379 static void cfi_hash_add(struct cfi_state *cfi)
380 {
381 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
382 
383 	hlist_add_head(&cfi->hash, head);
384 }
385 
386 static void *cfi_hash_alloc(unsigned long size)
387 {
388 	cfi_bits = max(10, ilog2(size));
389 	cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
390 			PROT_READ|PROT_WRITE,
391 			MAP_PRIVATE|MAP_ANON, -1, 0);
392 	if (cfi_hash == (void *)-1L) {
393 		ERROR_GLIBC("mmap fail cfi_hash");
394 		cfi_hash = NULL;
395 	}  else if (opts.stats) {
396 		printf("cfi_bits: %d\n", cfi_bits);
397 	}
398 
399 	return cfi_hash;
400 }
401 
402 static unsigned long nr_insns;
403 static unsigned long nr_insns_visited;
404 
405 /*
406  * Call the arch-specific instruction decoder for all the instructions and add
407  * them to the global instruction list.
408  */
409 static int decode_instructions(struct objtool_file *file)
410 {
411 	struct section *sec;
412 	struct symbol *func;
413 	unsigned long offset;
414 	struct instruction *insn;
415 
416 	for_each_sec(file->elf, sec) {
417 		struct instruction *insns = NULL;
418 		u8 prev_len = 0;
419 		u8 idx = 0;
420 
421 		if (!is_text_sec(sec))
422 			continue;
423 
424 		if (strcmp(sec->name, ".altinstr_replacement") &&
425 		    strcmp(sec->name, ".altinstr_aux") &&
426 		    strncmp(sec->name, ".discard.", 9))
427 			sec->text = true;
428 
429 		if (!strcmp(sec->name, ".noinstr.text") ||
430 		    !strcmp(sec->name, ".entry.text") ||
431 		    !strcmp(sec->name, ".cpuidle.text") ||
432 		    !strncmp(sec->name, ".text..__x86.", 13))
433 			sec->noinstr = true;
434 
435 		/*
436 		 * .init.text code is ran before userspace and thus doesn't
437 		 * strictly need retpolines, except for modules which are
438 		 * loaded late, they very much do need retpoline in their
439 		 * .init.text
440 		 */
441 		if (!strcmp(sec->name, ".init.text") && !opts.module)
442 			sec->init = true;
443 
444 		for (offset = 0; offset < sec_size(sec); offset += insn->len) {
445 			if (!insns || idx == INSN_CHUNK_MAX) {
446 				insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn));
447 				if (!insns) {
448 					ERROR_GLIBC("calloc");
449 					return -1;
450 				}
451 				idx = 0;
452 			} else {
453 				idx++;
454 			}
455 			insn = &insns[idx];
456 			insn->idx = idx;
457 
458 			INIT_LIST_HEAD(&insn->call_node);
459 			insn->sec = sec;
460 			insn->offset = offset;
461 			insn->prev_len = prev_len;
462 
463 			if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn))
464 				return -1;
465 
466 			prev_len = insn->len;
467 
468 			/*
469 			 * By default, "ud2" is a dead end unless otherwise
470 			 * annotated, because GCC 7 inserts it for certain
471 			 * divide-by-zero cases.
472 			 */
473 			if (insn->type == INSN_BUG)
474 				insn->dead_end = true;
475 
476 			hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
477 			nr_insns++;
478 		}
479 
480 		sec_for_each_sym(sec, func) {
481 			if (!is_notype_sym(func) && !is_func_sym(func))
482 				continue;
483 
484 			if (func->offset == sec_size(sec)) {
485 				/* Heuristic: likely an "end" symbol */
486 				if (is_notype_sym(func))
487 					continue;
488 				ERROR("%s(): STT_FUNC at end of section", func->name);
489 				return -1;
490 			}
491 
492 			if (func->embedded_insn || func->alias != func)
493 				continue;
494 
495 			if (!find_insn(file, sec, func->offset)) {
496 				ERROR("%s(): can't find starting instruction", func->name);
497 				return -1;
498 			}
499 
500 			sym_for_each_insn(file, func, insn) {
501 				insn->sym = func;
502 				if (is_func_sym(func) &&
503 				    insn->type == INSN_ENDBR &&
504 				    list_empty(&insn->call_node)) {
505 					if (insn->offset == func->offset) {
506 						list_add_tail(&insn->call_node, &file->endbr_list);
507 						file->nr_endbr++;
508 					} else {
509 						file->nr_endbr_int++;
510 					}
511 				}
512 			}
513 		}
514 	}
515 
516 	if (opts.stats)
517 		printf("nr_insns: %lu\n", nr_insns);
518 
519 	return 0;
520 }
521 
522 /*
523  * Known pv_ops*[] arrays.
524  */
525 static struct {
526 	const char *name;
527 	int idx_off;
528 } pv_ops_tables[] = {
529 	{ .name = "pv_ops", },
530 	{ .name = NULL, .idx_off = -1 }
531 };
532 
533 /*
534  * Get index offset for a pv_ops* array.
535  */
536 int pv_ops_idx_off(const char *symname)
537 {
538 	int idx;
539 
540 	for (idx = 0; pv_ops_tables[idx].name; idx++) {
541 		if (!strcmp(symname, pv_ops_tables[idx].name))
542 			break;
543 	}
544 
545 	return pv_ops_tables[idx].idx_off;
546 }
547 
548 /*
549  * Read a pv_ops*[] .data table to find the static initialized values.
550  */
551 static int add_pv_ops(struct objtool_file *file, int pv_ops_idx)
552 {
553 	struct symbol *sym, *func;
554 	unsigned long off, end;
555 	struct reloc *reloc;
556 	int idx, idx_off;
557 	const char *symname;
558 
559 	symname = pv_ops_tables[pv_ops_idx].name;
560 	sym = find_symbol_by_name(file->elf, symname);
561 	if (!sym) {
562 		ERROR("Unknown pv_ops array %s", symname);
563 		return -1;
564 	}
565 
566 	off = sym->offset;
567 	end = off + sym->len;
568 	idx_off = pv_ops_tables[pv_ops_idx].idx_off;
569 	if (idx_off < 0) {
570 		ERROR("pv_ops array %s has unknown index offset", symname);
571 		return -1;
572 	}
573 
574 	for (;;) {
575 		reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
576 		if (!reloc)
577 			break;
578 
579 		idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
580 
581 		func = reloc->sym;
582 		if (is_sec_sym(func))
583 			func = find_symbol_by_offset(reloc->sym->sec,
584 						     reloc_addend(reloc));
585 		if (!func) {
586 			ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc),
587 				   "can't find func at %s[%d]", symname, idx);
588 			return -1;
589 		}
590 
591 		if (objtool_pv_add(file, idx + idx_off, func))
592 			return -1;
593 
594 		off = reloc_offset(reloc) + 1;
595 		if (off > end)
596 			break;
597 	}
598 
599 	return 0;
600 }
601 
602 /*
603  * Allocate and initialize file->pv_ops[].
604  */
605 static int init_pv_ops(struct objtool_file *file)
606 {
607 	struct symbol *sym;
608 	int idx, nr;
609 
610 	if (!opts.noinstr)
611 		return 0;
612 
613 	file->pv_ops = NULL;
614 
615 	nr = 0;
616 	for (idx = 0; pv_ops_tables[idx].name; idx++) {
617 		sym = find_symbol_by_name(file->elf, pv_ops_tables[idx].name);
618 		if (!sym) {
619 			pv_ops_tables[idx].idx_off = -1;
620 			continue;
621 		}
622 		pv_ops_tables[idx].idx_off = nr;
623 		nr += sym->len / sizeof(unsigned long);
624 	}
625 
626 	if (nr == 0)
627 		return 0;
628 
629 	file->pv_ops = calloc(nr, sizeof(struct pv_state));
630 	if (!file->pv_ops) {
631 		ERROR_GLIBC("calloc");
632 		return -1;
633 	}
634 
635 	for (idx = 0; idx < nr; idx++)
636 		INIT_LIST_HEAD(&file->pv_ops[idx].targets);
637 
638 	for (idx = 0; pv_ops_tables[idx].name; idx++) {
639 		if (pv_ops_tables[idx].idx_off < 0)
640 			continue;
641 		if (add_pv_ops(file, idx))
642 			return -1;
643 	}
644 
645 	return 0;
646 }
647 
648 static bool is_livepatch_module(struct objtool_file *file)
649 {
650 	struct section *sec;
651 
652 	if (!opts.module)
653 		return false;
654 
655 	sec = find_section_by_name(file->elf, ".modinfo");
656 	if (!sec)
657 		return false;
658 
659 	return memmem(sec->data->d_buf, sec_size(sec), "\0livepatch=Y", 12);
660 }
661 
662 static int create_static_call_sections(struct objtool_file *file)
663 {
664 	struct static_call_site *site;
665 	struct section *sec;
666 	struct instruction *insn;
667 	struct symbol *key_sym;
668 	char *key_name, *tmp;
669 	int idx;
670 
671 	sec = find_section_by_name(file->elf, ".static_call_sites");
672 	if (sec) {
673 		/*
674 		 * Livepatch modules may have already extracted the static call
675 		 * site entries to take advantage of vmlinux static call
676 		 * privileges.
677 		 */
678 		if (!file->klp)
679 			WARN("file already has .static_call_sites section, skipping");
680 
681 		return 0;
682 	}
683 
684 	if (list_empty(&file->static_call_list))
685 		return 0;
686 
687 	idx = 0;
688 	list_for_each_entry(insn, &file->static_call_list, call_node)
689 		idx++;
690 
691 	sec = elf_create_section_pair(file->elf, ".static_call_sites",
692 				      sizeof(*site), idx, idx * 2);
693 	if (!sec)
694 		return -1;
695 
696 	/* Allow modules to modify the low bits of static_call_site::key */
697 	sec->sh.sh_flags |= SHF_WRITE;
698 
699 	idx = 0;
700 	list_for_each_entry(insn, &file->static_call_list, call_node) {
701 
702 		/* populate reloc for 'addr' */
703 		if (!elf_init_reloc_text_sym(file->elf, sec,
704 					     idx * sizeof(*site), idx * 2,
705 					     insn->sec, insn->offset))
706 			return -1;
707 
708 		/* find key symbol */
709 		key_name = strdup(insn_call_dest(insn)->name);
710 		if (!key_name) {
711 			ERROR_GLIBC("strdup");
712 			return -1;
713 		}
714 		if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
715 			    STATIC_CALL_TRAMP_PREFIX_LEN)) {
716 			ERROR("static_call: trampoline name malformed: %s", key_name);
717 			return -1;
718 		}
719 		tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
720 		memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
721 
722 		key_sym = find_symbol_by_name(file->elf, tmp);
723 		if (!key_sym) {
724 			if (!opts.module || file->klp) {
725 				ERROR("static_call: can't find static_call_key symbol: %s", tmp);
726 				return -1;
727 			}
728 
729 			/*
730 			 * For modules(), the key might not be exported, which
731 			 * means the module can make static calls but isn't
732 			 * allowed to change them.
733 			 *
734 			 * In that case we temporarily set the key to be the
735 			 * trampoline address.  This is fixed up in
736 			 * static_call_add_module().
737 			 */
738 			key_sym = insn_call_dest(insn);
739 		}
740 
741 		/* populate reloc for 'key' */
742 		if (!elf_init_reloc_data_sym(file->elf, sec,
743 					     idx * sizeof(*site) + 4,
744 					     (idx * 2) + 1, key_sym,
745 					     is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
746 			return -1;
747 
748 		idx++;
749 	}
750 
751 	return 0;
752 }
753 
754 static int create_retpoline_sites_sections(struct objtool_file *file)
755 {
756 	struct instruction *insn;
757 	struct section *sec;
758 	int idx;
759 
760 	sec = find_section_by_name(file->elf, ".retpoline_sites");
761 	if (sec) {
762 		WARN("file already has .retpoline_sites, skipping");
763 		return 0;
764 	}
765 
766 	idx = 0;
767 	list_for_each_entry(insn, &file->retpoline_call_list, call_node)
768 		idx++;
769 
770 	if (!idx)
771 		return 0;
772 
773 	sec = elf_create_section_pair(file->elf, ".retpoline_sites",
774 				      sizeof(int), idx, idx);
775 	if (!sec)
776 		return -1;
777 
778 	idx = 0;
779 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
780 
781 		if (!elf_init_reloc_text_sym(file->elf, sec,
782 					     idx * sizeof(int), idx,
783 					     insn->sec, insn->offset))
784 			return -1;
785 
786 		idx++;
787 	}
788 
789 	return 0;
790 }
791 
792 static int create_return_sites_sections(struct objtool_file *file)
793 {
794 	struct instruction *insn;
795 	struct section *sec;
796 	int idx;
797 
798 	sec = find_section_by_name(file->elf, ".return_sites");
799 	if (sec) {
800 		WARN("file already has .return_sites, skipping");
801 		return 0;
802 	}
803 
804 	idx = 0;
805 	list_for_each_entry(insn, &file->return_thunk_list, call_node)
806 		idx++;
807 
808 	if (!idx)
809 		return 0;
810 
811 	sec = elf_create_section_pair(file->elf, ".return_sites",
812 				      sizeof(int), idx, idx);
813 	if (!sec)
814 		return -1;
815 
816 	idx = 0;
817 	list_for_each_entry(insn, &file->return_thunk_list, call_node) {
818 
819 		if (!elf_init_reloc_text_sym(file->elf, sec,
820 					     idx * sizeof(int), idx,
821 					     insn->sec, insn->offset))
822 			return -1;
823 
824 		idx++;
825 	}
826 
827 	return 0;
828 }
829 
830 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
831 {
832 	struct instruction *insn;
833 	struct section *sec;
834 	int idx;
835 
836 	sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
837 	if (sec) {
838 		WARN("file already has .ibt_endbr_seal, skipping");
839 		return 0;
840 	}
841 
842 	idx = 0;
843 	list_for_each_entry(insn, &file->endbr_list, call_node)
844 		idx++;
845 
846 	if (opts.stats) {
847 		printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
848 		printf("ibt: ENDBR inside functions:  %d\n", file->nr_endbr_int);
849 		printf("ibt: superfluous ENDBR:       %d\n", idx);
850 	}
851 
852 	if (!idx)
853 		return 0;
854 
855 	sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal",
856 				      sizeof(int), idx, idx);
857 	if (!sec)
858 		return -1;
859 
860 	idx = 0;
861 	list_for_each_entry(insn, &file->endbr_list, call_node) {
862 
863 		int *site = (int *)sec->data->d_buf + idx;
864 		struct symbol *sym = insn->sym;
865 		*site = 0;
866 
867 		if (opts.module && sym && is_func_sym(sym) &&
868 		    insn->offset == sym->offset &&
869 		    (!strcmp(sym->name, "init_module") ||
870 		     !strcmp(sym->name, "cleanup_module"))) {
871 			ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
872 			      sym->name);
873 			return -1;
874 		}
875 
876 		if (!elf_init_reloc_text_sym(file->elf, sec,
877 					     idx * sizeof(int), idx,
878 					     insn->sec, insn->offset))
879 			return -1;
880 
881 		idx++;
882 	}
883 
884 	return 0;
885 }
886 
887 static int create_cfi_sections(struct objtool_file *file)
888 {
889 	struct section *sec;
890 	struct symbol *sym;
891 	int idx;
892 
893 	sec = find_section_by_name(file->elf, ".cfi_sites");
894 	if (sec) {
895 		WARN("file already has .cfi_sites section, skipping");
896 		return 0;
897 	}
898 
899 	idx = 0;
900 	for_each_sym(file->elf, sym) {
901 		if (!is_func_sym(sym))
902 			continue;
903 
904 		if (strncmp(sym->name, "__cfi_", 6))
905 			continue;
906 
907 		idx++;
908 	}
909 
910 	sec = elf_create_section_pair(file->elf, ".cfi_sites",
911 				      sizeof(unsigned int), idx, idx);
912 	if (!sec)
913 		return -1;
914 
915 	idx = 0;
916 	for_each_sym(file->elf, sym) {
917 		if (!is_func_sym(sym))
918 			continue;
919 
920 		if (strncmp(sym->name, "__cfi_", 6))
921 			continue;
922 
923 		if (!elf_init_reloc_text_sym(file->elf, sec,
924 					     idx * sizeof(unsigned int), idx,
925 					     sym->sec, sym->offset))
926 			return -1;
927 
928 		idx++;
929 	}
930 
931 	return 0;
932 }
933 
934 static int create_mcount_loc_sections(struct objtool_file *file)
935 {
936 	size_t addr_size = elf_addr_size(file->elf);
937 	struct instruction *insn;
938 	struct section *sec;
939 	int idx;
940 
941 	sec = find_section_by_name(file->elf, "__mcount_loc");
942 	if (sec) {
943 		/*
944 		 * Livepatch modules have already extracted their __mcount_loc
945 		 * entries to cover the !CONFIG_FTRACE_MCOUNT_USE_OBJTOOL case.
946 		 */
947 		if (!file->klp)
948 			WARN("file already has __mcount_loc section, skipping");
949 
950 		return 0;
951 	}
952 
953 	if (list_empty(&file->mcount_loc_list))
954 		return 0;
955 
956 	idx = 0;
957 	list_for_each_entry(insn, &file->mcount_loc_list, call_node)
958 		idx++;
959 
960 	sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size,
961 				      idx, idx);
962 	if (!sec)
963 		return -1;
964 
965 	sec->sh.sh_addralign = addr_size;
966 
967 	idx = 0;
968 	list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
969 
970 		struct reloc *reloc;
971 
972 		reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx,
973 					       insn->sec, insn->offset);
974 		if (!reloc)
975 			return -1;
976 
977 		set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32);
978 
979 		idx++;
980 	}
981 
982 	return 0;
983 }
984 
985 static int create_direct_call_sections(struct objtool_file *file)
986 {
987 	struct instruction *insn;
988 	struct section *sec;
989 	int idx;
990 
991 	sec = find_section_by_name(file->elf, ".call_sites");
992 	if (sec) {
993 		WARN("file already has .call_sites section, skipping");
994 		return 0;
995 	}
996 
997 	if (list_empty(&file->call_list))
998 		return 0;
999 
1000 	idx = 0;
1001 	list_for_each_entry(insn, &file->call_list, call_node)
1002 		idx++;
1003 
1004 	sec = elf_create_section_pair(file->elf, ".call_sites",
1005 				      sizeof(unsigned int), idx, idx);
1006 	if (!sec)
1007 		return -1;
1008 
1009 	idx = 0;
1010 	list_for_each_entry(insn, &file->call_list, call_node) {
1011 
1012 		if (!elf_init_reloc_text_sym(file->elf, sec,
1013 					     idx * sizeof(unsigned int), idx,
1014 					     insn->sec, insn->offset))
1015 			return -1;
1016 
1017 		idx++;
1018 	}
1019 
1020 	return 0;
1021 }
1022 
1023 #ifdef BUILD_KLP
1024 static int create_sym_checksum_section(struct objtool_file *file)
1025 {
1026 	struct section *sec;
1027 	struct symbol *sym;
1028 	unsigned int idx = 0;
1029 	struct sym_checksum *checksum;
1030 	size_t entsize = sizeof(struct sym_checksum);
1031 
1032 	sec = find_section_by_name(file->elf, ".discard.sym_checksum");
1033 	if (sec) {
1034 		if (!opts.dryrun)
1035 			WARN("file already has .discard.sym_checksum section, skipping");
1036 
1037 		return 0;
1038 	}
1039 
1040 	for_each_sym(file->elf, sym)
1041 		if (sym->csum.checksum)
1042 			idx++;
1043 
1044 	if (!idx)
1045 		return 0;
1046 
1047 	sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize,
1048 				      idx, idx);
1049 	if (!sec)
1050 		return -1;
1051 
1052 	idx = 0;
1053 	for_each_sym(file->elf, sym) {
1054 		if (!sym->csum.checksum)
1055 			continue;
1056 
1057 		if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize,
1058 				    sym, 0, R_TEXT64))
1059 			return -1;
1060 
1061 		checksum = (struct sym_checksum *)sec->data->d_buf + idx;
1062 		checksum->addr = 0; /* reloc */
1063 		checksum->checksum = sym->csum.checksum;
1064 
1065 		mark_sec_changed(file->elf, sec, true);
1066 
1067 		idx++;
1068 	}
1069 
1070 	return 0;
1071 }
1072 #else
1073 static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; }
1074 #endif
1075 
1076 /*
1077  * Warnings shouldn't be reported for ignored functions.
1078  */
1079 static int add_ignores(struct objtool_file *file)
1080 {
1081 	struct section *rsec;
1082 	struct symbol *func;
1083 	struct reloc *reloc;
1084 
1085 	rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1086 	if (!rsec)
1087 		return 0;
1088 
1089 	for_each_reloc(rsec, reloc) {
1090 		switch (reloc->sym->type) {
1091 		case STT_FUNC:
1092 			func = reloc->sym;
1093 			break;
1094 
1095 		case STT_SECTION:
1096 			func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc));
1097 			if (!func)
1098 				continue;
1099 			break;
1100 
1101 		default:
1102 			ERROR("unexpected relocation symbol type in %s: %d",
1103 			      rsec->name, reloc->sym->type);
1104 			return -1;
1105 		}
1106 
1107 		func->ignore = true;
1108 		if (func->cfunc)
1109 			func->cfunc->ignore = true;
1110 	}
1111 
1112 	return 0;
1113 }
1114 
1115 /*
1116  * This is a whitelist of functions that is allowed to be called with AC set.
1117  * The list is meant to be minimal and only contains compiler instrumentation
1118  * ABI and a few functions used to implement *_{to,from}_user() functions.
1119  *
1120  * These functions must not directly change AC, but may PUSHF/POPF.
1121  */
1122 static const char *uaccess_safe_builtin[] = {
1123 	/* KASAN */
1124 	"kasan_report",
1125 	"kasan_check_range",
1126 	/* KASAN out-of-line */
1127 	"__asan_loadN_noabort",
1128 	"__asan_load1_noabort",
1129 	"__asan_load2_noabort",
1130 	"__asan_load4_noabort",
1131 	"__asan_load8_noabort",
1132 	"__asan_load16_noabort",
1133 	"__asan_storeN_noabort",
1134 	"__asan_store1_noabort",
1135 	"__asan_store2_noabort",
1136 	"__asan_store4_noabort",
1137 	"__asan_store8_noabort",
1138 	"__asan_store16_noabort",
1139 	"__kasan_check_read",
1140 	"__kasan_check_write",
1141 	/* KASAN in-line */
1142 	"__asan_report_load_n_noabort",
1143 	"__asan_report_load1_noabort",
1144 	"__asan_report_load2_noabort",
1145 	"__asan_report_load4_noabort",
1146 	"__asan_report_load8_noabort",
1147 	"__asan_report_load16_noabort",
1148 	"__asan_report_store_n_noabort",
1149 	"__asan_report_store1_noabort",
1150 	"__asan_report_store2_noabort",
1151 	"__asan_report_store4_noabort",
1152 	"__asan_report_store8_noabort",
1153 	"__asan_report_store16_noabort",
1154 	/* KCSAN */
1155 	"__kcsan_check_access",
1156 	"__kcsan_mb",
1157 	"__kcsan_wmb",
1158 	"__kcsan_rmb",
1159 	"__kcsan_release",
1160 	"kcsan_found_watchpoint",
1161 	"kcsan_setup_watchpoint",
1162 	"kcsan_check_scoped_accesses",
1163 	"kcsan_disable_current",
1164 	"kcsan_enable_current_nowarn",
1165 	/* KCSAN/TSAN */
1166 	"__tsan_func_entry",
1167 	"__tsan_func_exit",
1168 	"__tsan_read_range",
1169 	"__tsan_write_range",
1170 	"__tsan_read1",
1171 	"__tsan_read2",
1172 	"__tsan_read4",
1173 	"__tsan_read8",
1174 	"__tsan_read16",
1175 	"__tsan_write1",
1176 	"__tsan_write2",
1177 	"__tsan_write4",
1178 	"__tsan_write8",
1179 	"__tsan_write16",
1180 	"__tsan_read_write1",
1181 	"__tsan_read_write2",
1182 	"__tsan_read_write4",
1183 	"__tsan_read_write8",
1184 	"__tsan_read_write16",
1185 	"__tsan_volatile_read1",
1186 	"__tsan_volatile_read2",
1187 	"__tsan_volatile_read4",
1188 	"__tsan_volatile_read8",
1189 	"__tsan_volatile_read16",
1190 	"__tsan_volatile_write1",
1191 	"__tsan_volatile_write2",
1192 	"__tsan_volatile_write4",
1193 	"__tsan_volatile_write8",
1194 	"__tsan_volatile_write16",
1195 	"__tsan_atomic8_load",
1196 	"__tsan_atomic16_load",
1197 	"__tsan_atomic32_load",
1198 	"__tsan_atomic64_load",
1199 	"__tsan_atomic8_store",
1200 	"__tsan_atomic16_store",
1201 	"__tsan_atomic32_store",
1202 	"__tsan_atomic64_store",
1203 	"__tsan_atomic8_exchange",
1204 	"__tsan_atomic16_exchange",
1205 	"__tsan_atomic32_exchange",
1206 	"__tsan_atomic64_exchange",
1207 	"__tsan_atomic8_fetch_add",
1208 	"__tsan_atomic16_fetch_add",
1209 	"__tsan_atomic32_fetch_add",
1210 	"__tsan_atomic64_fetch_add",
1211 	"__tsan_atomic8_fetch_sub",
1212 	"__tsan_atomic16_fetch_sub",
1213 	"__tsan_atomic32_fetch_sub",
1214 	"__tsan_atomic64_fetch_sub",
1215 	"__tsan_atomic8_fetch_and",
1216 	"__tsan_atomic16_fetch_and",
1217 	"__tsan_atomic32_fetch_and",
1218 	"__tsan_atomic64_fetch_and",
1219 	"__tsan_atomic8_fetch_or",
1220 	"__tsan_atomic16_fetch_or",
1221 	"__tsan_atomic32_fetch_or",
1222 	"__tsan_atomic64_fetch_or",
1223 	"__tsan_atomic8_fetch_xor",
1224 	"__tsan_atomic16_fetch_xor",
1225 	"__tsan_atomic32_fetch_xor",
1226 	"__tsan_atomic64_fetch_xor",
1227 	"__tsan_atomic8_fetch_nand",
1228 	"__tsan_atomic16_fetch_nand",
1229 	"__tsan_atomic32_fetch_nand",
1230 	"__tsan_atomic64_fetch_nand",
1231 	"__tsan_atomic8_compare_exchange_strong",
1232 	"__tsan_atomic16_compare_exchange_strong",
1233 	"__tsan_atomic32_compare_exchange_strong",
1234 	"__tsan_atomic64_compare_exchange_strong",
1235 	"__tsan_atomic8_compare_exchange_weak",
1236 	"__tsan_atomic16_compare_exchange_weak",
1237 	"__tsan_atomic32_compare_exchange_weak",
1238 	"__tsan_atomic64_compare_exchange_weak",
1239 	"__tsan_atomic8_compare_exchange_val",
1240 	"__tsan_atomic16_compare_exchange_val",
1241 	"__tsan_atomic32_compare_exchange_val",
1242 	"__tsan_atomic64_compare_exchange_val",
1243 	"__tsan_atomic_thread_fence",
1244 	"__tsan_atomic_signal_fence",
1245 	"__tsan_unaligned_read16",
1246 	"__tsan_unaligned_write16",
1247 	/* KCOV */
1248 	"write_comp_data",
1249 	"check_kcov_mode",
1250 	"__sanitizer_cov_trace_pc",
1251 	"__sanitizer_cov_trace_const_cmp1",
1252 	"__sanitizer_cov_trace_const_cmp2",
1253 	"__sanitizer_cov_trace_const_cmp4",
1254 	"__sanitizer_cov_trace_const_cmp8",
1255 	"__sanitizer_cov_trace_cmp1",
1256 	"__sanitizer_cov_trace_cmp2",
1257 	"__sanitizer_cov_trace_cmp4",
1258 	"__sanitizer_cov_trace_cmp8",
1259 	"__sanitizer_cov_trace_switch",
1260 	/* KMSAN */
1261 	"kmsan_copy_to_user",
1262 	"kmsan_disable_current",
1263 	"kmsan_enable_current",
1264 	"kmsan_report",
1265 	"kmsan_unpoison_entry_regs",
1266 	"kmsan_unpoison_memory",
1267 	"__msan_chain_origin",
1268 	"__msan_get_context_state",
1269 	"__msan_instrument_asm_store",
1270 	"__msan_metadata_ptr_for_load_1",
1271 	"__msan_metadata_ptr_for_load_2",
1272 	"__msan_metadata_ptr_for_load_4",
1273 	"__msan_metadata_ptr_for_load_8",
1274 	"__msan_metadata_ptr_for_load_n",
1275 	"__msan_metadata_ptr_for_store_1",
1276 	"__msan_metadata_ptr_for_store_2",
1277 	"__msan_metadata_ptr_for_store_4",
1278 	"__msan_metadata_ptr_for_store_8",
1279 	"__msan_metadata_ptr_for_store_n",
1280 	"__msan_poison_alloca",
1281 	"__msan_warning",
1282 	/* UBSAN */
1283 	"ubsan_type_mismatch_common",
1284 	"__ubsan_handle_type_mismatch",
1285 	"__ubsan_handle_type_mismatch_v1",
1286 	"__ubsan_handle_shift_out_of_bounds",
1287 	"__ubsan_handle_load_invalid_value",
1288 	/* KSTACK_ERASE */
1289 	"__sanitizer_cov_stack_depth",
1290 	/* TRACE_BRANCH_PROFILING */
1291 	"ftrace_likely_update",
1292 	/* STACKPROTECTOR */
1293 	"__stack_chk_fail",
1294 	/* misc */
1295 	"csum_partial_copy_generic",
1296 	"copy_mc_fragile",
1297 	"copy_mc_fragile_handle_tail",
1298 	"copy_mc_enhanced_fast_string",
1299 	"rep_stos_alternative",
1300 	"rep_movs_alternative",
1301 	"__copy_user_nocache",
1302 	NULL
1303 };
1304 
1305 static void add_uaccess_safe(struct objtool_file *file)
1306 {
1307 	struct symbol *func;
1308 	const char **name;
1309 
1310 	if (!opts.uaccess)
1311 		return;
1312 
1313 	for (name = uaccess_safe_builtin; *name; name++) {
1314 		func = find_symbol_by_name(file->elf, *name);
1315 		if (!func)
1316 			continue;
1317 
1318 		func->uaccess_safe = true;
1319 	}
1320 }
1321 
1322 /*
1323  * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
1324  * will be added to the .retpoline_sites section.
1325  */
1326 __weak bool arch_is_retpoline(struct symbol *sym)
1327 {
1328 	return false;
1329 }
1330 
1331 /*
1332  * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
1333  * will be added to the .return_sites section.
1334  */
1335 __weak bool arch_is_rethunk(struct symbol *sym)
1336 {
1337 	return false;
1338 }
1339 
1340 /*
1341  * Symbols that are embedded inside other instructions, because sometimes crazy
1342  * code exists. These are mostly ignored for validation purposes.
1343  */
1344 __weak bool arch_is_embedded_insn(struct symbol *sym)
1345 {
1346 	return false;
1347 }
1348 
1349 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1350 {
1351 	struct reloc *reloc;
1352 
1353 	if (insn->no_reloc)
1354 		return NULL;
1355 
1356 	if (!file)
1357 		return NULL;
1358 
1359 	reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1360 					 insn->offset, insn->len);
1361 	if (!reloc) {
1362 		insn->no_reloc = 1;
1363 		return NULL;
1364 	}
1365 
1366 	return reloc;
1367 }
1368 
1369 static void remove_insn_ops(struct instruction *insn)
1370 {
1371 	struct stack_op *op, *next;
1372 
1373 	for (op = insn->stack_ops; op; op = next) {
1374 		next = op->next;
1375 		free(op);
1376 	}
1377 	insn->stack_ops = NULL;
1378 }
1379 
1380 static int annotate_call_site(struct objtool_file *file,
1381 			       struct instruction *insn, bool sibling)
1382 {
1383 	struct reloc *reloc = insn_reloc(file, insn);
1384 	struct symbol *sym = insn_call_dest(insn);
1385 
1386 	if (!sym)
1387 		sym = reloc->sym;
1388 
1389 	if (sym->static_call_tramp) {
1390 		list_add_tail(&insn->call_node, &file->static_call_list);
1391 		return 0;
1392 	}
1393 
1394 	if (sym->retpoline_thunk) {
1395 		list_add_tail(&insn->call_node, &file->retpoline_call_list);
1396 		return 0;
1397 	}
1398 
1399 	/*
1400 	 * Many compilers cannot disable KCOV or sanitizer calls with a function
1401 	 * attribute so they need a little help, NOP out any such calls from
1402 	 * noinstr text.
1403 	 */
1404 	if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1405 		if (reloc)
1406 			set_reloc_type(file->elf, reloc, R_NONE);
1407 
1408 		if (elf_write_insn(file->elf, insn->sec,
1409 				   insn->offset, insn->len,
1410 				   sibling ? arch_ret_insn(insn->len)
1411 					   : arch_nop_insn(insn->len))) {
1412 			return -1;
1413 		}
1414 
1415 		insn->type = sibling ? INSN_RETURN : INSN_NOP;
1416 
1417 		if (sibling) {
1418 			/*
1419 			 * We've replaced the tail-call JMP insn by two new
1420 			 * insn: RET; INT3, except we only have a single struct
1421 			 * insn here. Mark it retpoline_safe to avoid the SLS
1422 			 * warning, instead of adding another insn.
1423 			 */
1424 			insn->retpoline_safe = true;
1425 		}
1426 
1427 		return 0;
1428 	}
1429 
1430 	if (opts.mcount && sym->fentry) {
1431 		if (sibling)
1432 			WARN_INSN(insn, "tail call to __fentry__ !?!?");
1433 		if (opts.mnop) {
1434 			if (reloc)
1435 				set_reloc_type(file->elf, reloc, R_NONE);
1436 
1437 			if (elf_write_insn(file->elf, insn->sec,
1438 					   insn->offset, insn->len,
1439 					   arch_nop_insn(insn->len))) {
1440 				return -1;
1441 			}
1442 
1443 			insn->type = INSN_NOP;
1444 		}
1445 
1446 		list_add_tail(&insn->call_node, &file->mcount_loc_list);
1447 		return 0;
1448 	}
1449 
1450 	if (insn->type == INSN_CALL && !insn->sec->init &&
1451 	    !insn->_call_dest->embedded_insn)
1452 		list_add_tail(&insn->call_node, &file->call_list);
1453 
1454 	if (!sibling && dead_end_function(file, sym))
1455 		insn->dead_end = true;
1456 
1457 	return 0;
1458 }
1459 
1460 static int add_call_dest(struct objtool_file *file, struct instruction *insn,
1461 			  struct symbol *dest, bool sibling)
1462 {
1463 	insn->_call_dest = dest;
1464 	if (!dest)
1465 		return 0;
1466 
1467 	/*
1468 	 * Whatever stack impact regular CALLs have, should be undone
1469 	 * by the RETURN of the called function.
1470 	 *
1471 	 * Annotated intra-function calls retain the stack_ops but
1472 	 * are converted to JUMP, see read_intra_function_calls().
1473 	 */
1474 	remove_insn_ops(insn);
1475 
1476 	return annotate_call_site(file, insn, sibling);
1477 }
1478 
1479 static int add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1480 {
1481 	/*
1482 	 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1483 	 * so convert them accordingly.
1484 	 */
1485 	switch (insn->type) {
1486 	case INSN_CALL:
1487 		insn->type = INSN_CALL_DYNAMIC;
1488 		break;
1489 	case INSN_JUMP_UNCONDITIONAL:
1490 		insn->type = INSN_JUMP_DYNAMIC;
1491 		break;
1492 	case INSN_JUMP_CONDITIONAL:
1493 		insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1494 		break;
1495 	default:
1496 		return 0;
1497 	}
1498 
1499 	insn->retpoline_safe = true;
1500 
1501 	/*
1502 	 * Whatever stack impact regular CALLs have, should be undone
1503 	 * by the RETURN of the called function.
1504 	 *
1505 	 * Annotated intra-function calls retain the stack_ops but
1506 	 * are converted to JUMP, see read_intra_function_calls().
1507 	 */
1508 	remove_insn_ops(insn);
1509 
1510 	return annotate_call_site(file, insn, false);
1511 }
1512 
1513 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1514 {
1515 	/*
1516 	 * Return thunk tail calls are really just returns in disguise,
1517 	 * so convert them accordingly.
1518 	 */
1519 	insn->type = INSN_RETURN;
1520 	insn->retpoline_safe = true;
1521 
1522 	if (add)
1523 		list_add_tail(&insn->call_node, &file->return_thunk_list);
1524 }
1525 
1526 static bool is_first_func_insn(struct objtool_file *file,
1527 			       struct instruction *insn)
1528 {
1529 	struct symbol *func = insn_func(insn);
1530 
1531 	if (!func)
1532 		return false;
1533 
1534 	if (insn->offset == func->offset)
1535 		return true;
1536 
1537 	/* Allow direct CALL/JMP past ENDBR */
1538 	if (opts.ibt) {
1539 		struct instruction *prev = prev_insn_same_sym(file, insn);
1540 
1541 		if (prev && prev->type == INSN_ENDBR &&
1542 		    insn->offset == func->offset + prev->len)
1543 			return true;
1544 	}
1545 
1546 	return false;
1547 }
1548 
1549 /*
1550  * Find the destination instructions for all jumps.
1551  */
1552 static int add_jump_destinations(struct objtool_file *file)
1553 {
1554 	struct instruction *insn;
1555 	struct reloc *reloc;
1556 
1557 	for_each_insn(file, insn) {
1558 		struct symbol *func = insn_func(insn);
1559 		struct instruction *dest_insn;
1560 		struct section *dest_sec;
1561 		struct symbol *dest_sym;
1562 		unsigned long dest_off;
1563 
1564 		if (!is_static_jump(insn))
1565 			continue;
1566 
1567 		if (insn->jump_dest) {
1568 			/*
1569 			 * handle_group_alt() may have previously set
1570 			 * 'jump_dest' for some alternatives.
1571 			 */
1572 			continue;
1573 		}
1574 
1575 		reloc = insn_reloc(file, insn);
1576 		if (!reloc) {
1577 			dest_sec = insn->sec;
1578 			dest_off = arch_jump_destination(insn);
1579 			dest_sym = dest_sec->sym;
1580 		} else {
1581 			dest_sym = reloc->sym;
1582 			if (is_undef_sym(dest_sym)) {
1583 				if (dest_sym->retpoline_thunk) {
1584 					if (add_retpoline_call(file, insn))
1585 						return -1;
1586 					continue;
1587 				}
1588 
1589 				if (dest_sym->return_thunk) {
1590 					add_return_call(file, insn, true);
1591 					continue;
1592 				}
1593 
1594 				/* External symbol */
1595 				if (func) {
1596 					/* External sibling call */
1597 					if (add_call_dest(file, insn, dest_sym, true))
1598 						return -1;
1599 					continue;
1600 				}
1601 
1602 				/* Non-func asm code jumping to external symbol */
1603 				continue;
1604 			}
1605 
1606 			dest_sec = dest_sym->sec;
1607 			dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc);
1608 		}
1609 
1610 		dest_insn = find_insn(file, dest_sec, dest_off);
1611 		if (!dest_insn) {
1612 			struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1613 
1614 			/*
1615 			 * retbleed_untrain_ret() jumps to
1616 			 * __x86_return_thunk(), but objtool can't find
1617 			 * the thunk's starting RET instruction,
1618 			 * because the RET is also in the middle of
1619 			 * another instruction.  Objtool only knows
1620 			 * about the outer instruction.
1621 			 */
1622 			if (sym && sym->embedded_insn) {
1623 				add_return_call(file, insn, false);
1624 				continue;
1625 			}
1626 
1627 			/*
1628 			 * GCOV/KCOV dead code can jump to the end of
1629 			 * the function/section.
1630 			 */
1631 			if (file->ignore_unreachables && func &&
1632 			    dest_sec == insn->sec &&
1633 			    dest_off == func->offset + func->len)
1634 				continue;
1635 
1636 			ERROR_INSN(insn, "can't find jump dest instruction at %s",
1637 				   offstr(dest_sec, dest_off));
1638 			return -1;
1639 		}
1640 
1641 		if (!dest_sym || is_sec_sym(dest_sym)) {
1642 			dest_sym = dest_insn->sym;
1643 			if (!dest_sym)
1644 				goto set_jump_dest;
1645 		}
1646 
1647 		if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) {
1648 			if (add_retpoline_call(file, insn))
1649 				return -1;
1650 			continue;
1651 		}
1652 
1653 		if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) {
1654 			add_return_call(file, insn, true);
1655 			continue;
1656 		}
1657 
1658 		if (!insn->sym || insn->sym->pfunc == dest_sym->pfunc)
1659 			goto set_jump_dest;
1660 
1661 		/*
1662 		 * Internal cross-function jump.
1663 		 */
1664 
1665 		if (is_first_func_insn(file, dest_insn)) {
1666 			/* Internal sibling call */
1667 			if (add_call_dest(file, insn, dest_sym, true))
1668 				return -1;
1669 			continue;
1670 		}
1671 
1672 set_jump_dest:
1673 		insn->jump_dest = dest_insn;
1674 	}
1675 
1676 	return 0;
1677 }
1678 
1679 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1680 {
1681 	struct symbol *call_dest;
1682 
1683 	call_dest = find_func_by_offset(sec, offset);
1684 	if (!call_dest)
1685 		call_dest = find_symbol_by_offset(sec, offset);
1686 
1687 	return call_dest;
1688 }
1689 
1690 /*
1691  * Find the destination instructions for all calls.
1692  */
1693 static int add_call_destinations(struct objtool_file *file)
1694 {
1695 	struct instruction *insn;
1696 	unsigned long dest_off;
1697 	struct symbol *dest;
1698 	struct reloc *reloc;
1699 
1700 	for_each_insn(file, insn) {
1701 		struct symbol *func = insn_func(insn);
1702 		if (insn->type != INSN_CALL)
1703 			continue;
1704 
1705 		reloc = insn_reloc(file, insn);
1706 		if (!reloc) {
1707 			dest_off = arch_jump_destination(insn);
1708 			dest = find_call_destination(insn->sec, dest_off);
1709 
1710 			if (add_call_dest(file, insn, dest, false))
1711 				return -1;
1712 
1713 			if (func && func->ignore)
1714 				continue;
1715 
1716 			if (!insn_call_dest(insn)) {
1717 				ERROR_INSN(insn, "unannotated intra-function call");
1718 				return -1;
1719 			}
1720 
1721 			if (func && !is_func_sym(insn_call_dest(insn))) {
1722 				ERROR_INSN(insn, "unsupported call to non-function");
1723 				return -1;
1724 			}
1725 
1726 		} else if (is_sec_sym(reloc->sym)) {
1727 			dest_off = arch_insn_adjusted_addend(insn, reloc);
1728 			dest = find_call_destination(reloc->sym->sec, dest_off);
1729 			if (!dest) {
1730 				ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1731 					   reloc->sym->sec->name, dest_off);
1732 				return -1;
1733 			}
1734 
1735 			if (add_call_dest(file, insn, dest, false))
1736 				return -1;
1737 
1738 		} else if (reloc->sym->retpoline_thunk) {
1739 			if (add_retpoline_call(file, insn))
1740 				return -1;
1741 
1742 		} else {
1743 			if (add_call_dest(file, insn, reloc->sym, false))
1744 				return -1;
1745 		}
1746 	}
1747 
1748 	return 0;
1749 }
1750 
1751 /*
1752  * The .alternatives section requires some extra special care over and above
1753  * other special sections because alternatives are patched in place.
1754  */
1755 static int handle_group_alt(struct objtool_file *file,
1756 			    struct special_alt *special_alt,
1757 			    struct instruction *orig_insn,
1758 			    struct instruction **new_insn)
1759 {
1760 	struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1761 	struct alt_group *orig_alt_group, *new_alt_group;
1762 	unsigned long dest_off;
1763 
1764 	orig_alt_group = orig_insn->alt_group;
1765 	if (!orig_alt_group) {
1766 		struct instruction *last_orig_insn = NULL;
1767 
1768 		orig_alt_group = calloc(1, sizeof(*orig_alt_group));
1769 		if (!orig_alt_group) {
1770 			ERROR_GLIBC("calloc");
1771 			return -1;
1772 		}
1773 		orig_alt_group->cfi = calloc(special_alt->orig_len,
1774 					     sizeof(struct cfi_state *));
1775 		if (!orig_alt_group->cfi) {
1776 			ERROR_GLIBC("calloc");
1777 			return -1;
1778 		}
1779 
1780 		insn = orig_insn;
1781 		sec_for_each_insn_from(file, insn) {
1782 			if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1783 				break;
1784 
1785 			insn->alt_group = orig_alt_group;
1786 			last_orig_insn = insn;
1787 		}
1788 		orig_alt_group->orig_group = NULL;
1789 		orig_alt_group->first_insn = orig_insn;
1790 		orig_alt_group->last_insn = last_orig_insn;
1791 		orig_alt_group->nop = NULL;
1792 		orig_alt_group->ignore = orig_insn->ignore_alts;
1793 		orig_alt_group->feature = 0;
1794 	} else {
1795 		if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1796 		    orig_alt_group->first_insn->offset != special_alt->orig_len) {
1797 			ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1798 				   orig_alt_group->last_insn->offset +
1799 				   orig_alt_group->last_insn->len -
1800 				   orig_alt_group->first_insn->offset,
1801 				   special_alt->orig_len);
1802 			return -1;
1803 		}
1804 	}
1805 
1806 	new_alt_group = calloc(1, sizeof(*new_alt_group));
1807 	if (!new_alt_group) {
1808 		ERROR_GLIBC("calloc");
1809 		return -1;
1810 	}
1811 
1812 	if (special_alt->new_len < special_alt->orig_len) {
1813 		/*
1814 		 * Insert a fake nop at the end to make the replacement
1815 		 * alt_group the same size as the original.  This is needed to
1816 		 * allow propagate_alt_cfi() to do its magic.  When the last
1817 		 * instruction affects the stack, the instruction after it (the
1818 		 * nop) will propagate the new state to the shared CFI array.
1819 		 */
1820 		nop = calloc(1, sizeof(*nop));
1821 		if (!nop) {
1822 			ERROR_GLIBC("calloc");
1823 			return -1;
1824 		}
1825 		memset(nop, 0, sizeof(*nop));
1826 
1827 		nop->sec = special_alt->new_sec;
1828 		nop->offset = special_alt->new_off + special_alt->new_len;
1829 		nop->len = special_alt->orig_len - special_alt->new_len;
1830 		nop->type = INSN_NOP;
1831 		nop->sym = orig_insn->sym;
1832 		nop->alt_group = new_alt_group;
1833 		nop->fake = 1;
1834 	}
1835 
1836 	if (!special_alt->new_len) {
1837 		*new_insn = nop;
1838 		goto end;
1839 	}
1840 
1841 	insn = *new_insn;
1842 	sec_for_each_insn_from(file, insn) {
1843 		struct reloc *alt_reloc;
1844 
1845 		if (insn->offset >= special_alt->new_off + special_alt->new_len)
1846 			break;
1847 
1848 		last_new_insn = insn;
1849 
1850 		insn->sym = orig_insn->sym;
1851 		insn->alt_group = new_alt_group;
1852 
1853 		/*
1854 		 * Since alternative replacement code is copy/pasted by the
1855 		 * kernel after applying relocations, generally such code can't
1856 		 * have relative-address relocation references to outside the
1857 		 * .altinstr_replacement section, unless the arch's
1858 		 * alternatives code can adjust the relative offsets
1859 		 * accordingly.
1860 		 */
1861 		alt_reloc = insn_reloc(file, insn);
1862 		if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1863 		    !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1864 
1865 			ERROR_INSN(insn, "unsupported relocation in alternatives section");
1866 			return -1;
1867 		}
1868 
1869 		if (!is_static_jump(insn))
1870 			continue;
1871 
1872 		if (!insn->immediate)
1873 			continue;
1874 
1875 		dest_off = arch_jump_destination(insn);
1876 		if (dest_off == special_alt->new_off + special_alt->new_len) {
1877 			insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1878 			if (!insn->jump_dest) {
1879 				ERROR_INSN(insn, "can't find alternative jump destination");
1880 				return -1;
1881 			}
1882 		}
1883 	}
1884 
1885 	if (!last_new_insn) {
1886 		ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1887 			   "can't find last new alternative instruction");
1888 		return -1;
1889 	}
1890 
1891 end:
1892 	new_alt_group->orig_group = orig_alt_group;
1893 	new_alt_group->first_insn = *new_insn;
1894 	new_alt_group->last_insn = last_new_insn;
1895 	new_alt_group->nop = nop;
1896 	new_alt_group->ignore = (*new_insn)->ignore_alts;
1897 	new_alt_group->cfi = orig_alt_group->cfi;
1898 	new_alt_group->feature = special_alt->feature;
1899 	return 0;
1900 }
1901 
1902 /*
1903  * A jump table entry can either convert a nop to a jump or a jump to a nop.
1904  * If the original instruction is a jump, make the alt entry an effective nop
1905  * by just skipping the original instruction.
1906  */
1907 static int handle_jump_alt(struct objtool_file *file,
1908 			   struct special_alt *special_alt,
1909 			   struct instruction *orig_insn,
1910 			   struct instruction **new_insn)
1911 {
1912 	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1913 	    orig_insn->type != INSN_NOP) {
1914 
1915 		ERROR_INSN(orig_insn, "unsupported instruction at jump label");
1916 		return -1;
1917 	}
1918 
1919 	if (opts.hack_jump_label && special_alt->key_addend & 2) {
1920 		struct reloc *reloc = insn_reloc(file, orig_insn);
1921 
1922 		if (reloc)
1923 			set_reloc_type(file->elf, reloc, R_NONE);
1924 
1925 		if (elf_write_insn(file->elf, orig_insn->sec,
1926 				   orig_insn->offset, orig_insn->len,
1927 				   arch_nop_insn(orig_insn->len))) {
1928 			return -1;
1929 		}
1930 
1931 		orig_insn->type = INSN_NOP;
1932 	}
1933 
1934 	if (orig_insn->type == INSN_NOP) {
1935 		if (orig_insn->len == 2)
1936 			file->jl_nop_short++;
1937 		else
1938 			file->jl_nop_long++;
1939 
1940 		return 0;
1941 	}
1942 
1943 	if (orig_insn->len == 2)
1944 		file->jl_short++;
1945 	else
1946 		file->jl_long++;
1947 
1948 	*new_insn = next_insn_same_sec(file, orig_insn);
1949 	return 0;
1950 }
1951 
1952 /*
1953  * Read all the special sections which have alternate instructions which can be
1954  * patched in or redirected to at runtime.  Each instruction having alternate
1955  * instruction(s) has them added to its insn->alts list, which will be
1956  * traversed in validate_branch().
1957  */
1958 static int add_special_section_alts(struct objtool_file *file)
1959 {
1960 	struct list_head special_alts;
1961 	struct instruction *orig_insn, *new_insn;
1962 	struct special_alt *special_alt, *tmp;
1963 	enum alternative_type alt_type;
1964 	struct alternative *alt;
1965 	struct alternative *a;
1966 
1967 	if (special_get_alts(file->elf, &special_alts))
1968 		return -1;
1969 
1970 	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1971 
1972 		orig_insn = find_insn(file, special_alt->orig_sec,
1973 				      special_alt->orig_off);
1974 		if (!orig_insn) {
1975 			ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off,
1976 				   "special: can't find orig instruction");
1977 			return -1;
1978 		}
1979 
1980 		new_insn = NULL;
1981 		if (!special_alt->group || special_alt->new_len) {
1982 			new_insn = find_insn(file, special_alt->new_sec,
1983 					     special_alt->new_off);
1984 			if (!new_insn) {
1985 				ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1986 					   "special: can't find new instruction");
1987 				return -1;
1988 			}
1989 		}
1990 
1991 		if (special_alt->group) {
1992 			if (!special_alt->orig_len) {
1993 				ERROR_INSN(orig_insn, "empty alternative entry");
1994 				continue;
1995 			}
1996 
1997 			if (handle_group_alt(file, special_alt, orig_insn, &new_insn))
1998 				return -1;
1999 
2000 			alt_type = ALT_TYPE_INSTRUCTIONS;
2001 
2002 		} else if (special_alt->jump_or_nop) {
2003 			if (handle_jump_alt(file, special_alt, orig_insn, &new_insn))
2004 				return -1;
2005 
2006 			alt_type = ALT_TYPE_JUMP_TABLE;
2007 		} else {
2008 			alt_type = ALT_TYPE_EX_TABLE;
2009 		}
2010 
2011 		alt = calloc(1, sizeof(*alt));
2012 		if (!alt) {
2013 			ERROR_GLIBC("calloc");
2014 			return -1;
2015 		}
2016 
2017 		alt->insn = new_insn;
2018 		alt->type = alt_type;
2019 		alt->next = NULL;
2020 
2021 		/*
2022 		 * Store alternatives in the same order they have been
2023 		 * defined.
2024 		 */
2025 		if (!orig_insn->alts) {
2026 			orig_insn->alts = alt;
2027 		} else {
2028 			for (a = orig_insn->alts; a->next; a = a->next)
2029 				;
2030 			a->next = alt;
2031 		}
2032 
2033 		list_del(&special_alt->list);
2034 		free(special_alt);
2035 	}
2036 
2037 	if (opts.stats) {
2038 		printf("jl\\\tNOP\tJMP\n");
2039 		printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
2040 		printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
2041 	}
2042 
2043 	return 0;
2044 }
2045 
2046 __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
2047 {
2048 	return reloc->sym->offset + reloc_addend(reloc);
2049 }
2050 
2051 static int add_jump_table(struct objtool_file *file, struct instruction *insn)
2052 {
2053 	unsigned long table_size = insn_jump_table_size(insn);
2054 	struct symbol *pfunc = insn_func(insn)->pfunc;
2055 	struct reloc *table = insn_jump_table(insn);
2056 	struct instruction *dest_insn;
2057 	unsigned int prev_offset = 0;
2058 	struct reloc *reloc = table;
2059 	struct alternative *alt;
2060 	unsigned long sym_offset;
2061 
2062 	/*
2063 	 * Each @reloc is a switch table relocation which points to the target
2064 	 * instruction.
2065 	 */
2066 	for_each_reloc_from(table->sec, reloc) {
2067 
2068 		/* Check for the end of the table: */
2069 		if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
2070 			break;
2071 		if (reloc != table && is_jump_table(reloc))
2072 			break;
2073 
2074 		/* Make sure the table entries are consecutive: */
2075 		if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc))
2076 			break;
2077 
2078 		sym_offset = arch_jump_table_sym_offset(reloc, table);
2079 
2080 		/* Detect function pointers from contiguous objects: */
2081 		if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset)
2082 			break;
2083 
2084 		/*
2085 		 * Clang sometimes leaves dangling unused jump table entries
2086 		 * which point to the end of the function.  Ignore them.
2087 		 */
2088 		if (reloc->sym->sec == pfunc->sec &&
2089 		    sym_offset == pfunc->offset + pfunc->len)
2090 			goto next;
2091 
2092 		dest_insn = find_insn(file, reloc->sym->sec, sym_offset);
2093 		if (!dest_insn)
2094 			break;
2095 
2096 		/* Make sure the destination is in the same function: */
2097 		if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2098 			break;
2099 
2100 		alt = calloc(1, sizeof(*alt));
2101 		if (!alt) {
2102 			ERROR_GLIBC("calloc");
2103 			return -1;
2104 		}
2105 
2106 		alt->insn = dest_insn;
2107 		alt->next = insn->alts;
2108 		insn->alts = alt;
2109 next:
2110 		prev_offset = reloc_offset(reloc);
2111 	}
2112 
2113 	if (!prev_offset) {
2114 		ERROR_INSN(insn, "can't find switch jump table");
2115 		return -1;
2116 	}
2117 
2118 	return 0;
2119 }
2120 
2121 /*
2122  * find_jump_table() - Given a dynamic jump, find the switch jump table
2123  * associated with it.
2124  */
2125 static void find_jump_table(struct objtool_file *file, struct symbol *func,
2126 			    struct instruction *insn)
2127 {
2128 	struct reloc *table_reloc;
2129 	struct instruction *dest_insn, *orig_insn = insn;
2130 	unsigned long table_size;
2131 	unsigned long sym_offset;
2132 
2133 	/*
2134 	 * Backward search using the @first_jump_src links, these help avoid
2135 	 * much of the 'in between' code. Which avoids us getting confused by
2136 	 * it.
2137 	 */
2138 	for (;
2139 	     insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2140 	     insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2141 
2142 		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2143 			break;
2144 
2145 		/* allow small jumps within the range */
2146 		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2147 		    insn->jump_dest &&
2148 		    (insn->jump_dest->offset <= insn->offset ||
2149 		     insn->jump_dest->offset > orig_insn->offset))
2150 			break;
2151 
2152 		table_reloc = arch_find_switch_table(file, insn, &table_size);
2153 		if (!table_reloc)
2154 			continue;
2155 
2156 		sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc);
2157 
2158 		dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset);
2159 		if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2160 			continue;
2161 
2162 		set_jump_table(table_reloc);
2163 		orig_insn->_jump_table = table_reloc;
2164 		orig_insn->_jump_table_size = table_size;
2165 
2166 		break;
2167 	}
2168 }
2169 
2170 /*
2171  * First pass: Mark the head of each jump table so that in the next pass,
2172  * we know when a given jump table ends and the next one starts.
2173  */
2174 static void mark_func_jump_tables(struct objtool_file *file,
2175 				    struct symbol *func)
2176 {
2177 	struct instruction *insn, *last = NULL;
2178 
2179 	func_for_each_insn(file, func, insn) {
2180 		if (!last)
2181 			last = insn;
2182 
2183 		/*
2184 		 * Store back-pointers for unconditional forward jumps such
2185 		 * that find_jump_table() can back-track using those and
2186 		 * avoid some potentially confusing code.
2187 		 */
2188 		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2189 		    insn->offset > last->offset &&
2190 		    insn->jump_dest->offset > insn->offset &&
2191 		    !insn->jump_dest->first_jump_src) {
2192 
2193 			insn->jump_dest->first_jump_src = insn;
2194 			last = insn->jump_dest;
2195 		}
2196 
2197 		if (insn->type != INSN_JUMP_DYNAMIC)
2198 			continue;
2199 
2200 		find_jump_table(file, func, insn);
2201 	}
2202 }
2203 
2204 static int add_func_jump_tables(struct objtool_file *file,
2205 				  struct symbol *func)
2206 {
2207 	struct instruction *insn;
2208 
2209 	func_for_each_insn(file, func, insn) {
2210 		if (!insn_jump_table(insn))
2211 			continue;
2212 
2213 		if (add_jump_table(file, insn))
2214 			return -1;
2215 	}
2216 
2217 	return 0;
2218 }
2219 
2220 /*
2221  * For some switch statements, gcc generates a jump table in the .rodata
2222  * section which contains a list of addresses within the function to jump to.
2223  * This finds these jump tables and adds them to the insn->alts lists.
2224  */
2225 static int add_jump_table_alts(struct objtool_file *file)
2226 {
2227 	struct symbol *func;
2228 
2229 	if (!file->rodata)
2230 		return 0;
2231 
2232 	for_each_sym(file->elf, func) {
2233 		if (!is_func_sym(func) || func->alias != func)
2234 			continue;
2235 
2236 		mark_func_jump_tables(file, func);
2237 		if (add_func_jump_tables(file, func))
2238 			return -1;
2239 	}
2240 
2241 	return 0;
2242 }
2243 
2244 static void set_func_state(struct cfi_state *state)
2245 {
2246 	state->cfa = initial_func_cfi.cfa;
2247 	memcpy(&state->regs, &initial_func_cfi.regs,
2248 	       CFI_NUM_REGS * sizeof(struct cfi_reg));
2249 	state->stack_size = initial_func_cfi.cfa.offset;
2250 	state->type = UNWIND_HINT_TYPE_CALL;
2251 }
2252 
2253 static int read_unwind_hints(struct objtool_file *file)
2254 {
2255 	struct cfi_state cfi = init_cfi;
2256 	struct section *sec;
2257 	struct unwind_hint *hint;
2258 	struct instruction *insn;
2259 	struct reloc *reloc;
2260 	unsigned long offset;
2261 	int i;
2262 
2263 	sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2264 	if (!sec)
2265 		return 0;
2266 
2267 	if (!sec->rsec) {
2268 		ERROR("missing .rela.discard.unwind_hints section");
2269 		return -1;
2270 	}
2271 
2272 	if (sec_size(sec) % sizeof(struct unwind_hint)) {
2273 		ERROR("struct unwind_hint size mismatch");
2274 		return -1;
2275 	}
2276 
2277 	file->hints = true;
2278 
2279 	for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) {
2280 		hint = (struct unwind_hint *)sec->data->d_buf + i;
2281 
2282 		reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2283 		if (!reloc) {
2284 			ERROR("can't find reloc for unwind_hints[%d]", i);
2285 			return -1;
2286 		}
2287 
2288 		offset = reloc->sym->offset + reloc_addend(reloc);
2289 
2290 		insn = find_insn(file, reloc->sym->sec, offset);
2291 		if (!insn) {
2292 			ERROR("can't find insn for unwind_hints[%d]", i);
2293 			return -1;
2294 		}
2295 
2296 		insn->hint = true;
2297 
2298 		if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) {
2299 			insn->cfi = &force_undefined_cfi;
2300 			continue;
2301 		}
2302 
2303 		if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2304 			insn->hint = false;
2305 			insn->save = true;
2306 			continue;
2307 		}
2308 
2309 		if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2310 			insn->restore = true;
2311 			continue;
2312 		}
2313 
2314 		if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2315 			struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2316 
2317 			if (sym && is_global_sym(sym)) {
2318 				if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2319 					ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2320 					return -1;
2321 				}
2322 			}
2323 		}
2324 
2325 		if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2326 			insn->cfi = &func_cfi;
2327 			continue;
2328 		}
2329 
2330 		if (insn->cfi)
2331 			cfi = *(insn->cfi);
2332 
2333 		if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2334 			ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2335 			return -1;
2336 		}
2337 
2338 		cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2339 		cfi.type = hint->type;
2340 		cfi.signal = hint->signal;
2341 
2342 		insn->cfi = cfi_hash_find_or_add(&cfi);
2343 	}
2344 
2345 	return 0;
2346 }
2347 
2348 static int read_annotate(struct objtool_file *file,
2349 			 int (*func)(struct objtool_file *file, int type, struct instruction *insn))
2350 {
2351 	struct section *sec;
2352 	struct instruction *insn;
2353 	struct reloc *reloc;
2354 	uint64_t offset;
2355 	int type;
2356 
2357 	sec = find_section_by_name(file->elf, ".discard.annotate_insn");
2358 	if (!sec)
2359 		return 0;
2360 
2361 	if (!sec->rsec)
2362 		return 0;
2363 
2364 	if (sec->sh.sh_entsize != 8) {
2365 		static bool warned = false;
2366 		if (!warned && opts.verbose) {
2367 			WARN("%s: dodgy linker, sh_entsize != 8", sec->name);
2368 			warned = true;
2369 		}
2370 		sec->sh.sh_entsize = 8;
2371 	}
2372 
2373 	if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) {
2374 		ERROR("bad .discard.annotate_insn section: missing relocs");
2375 		return -1;
2376 	}
2377 
2378 	for_each_reloc(sec->rsec, reloc) {
2379 		type = annotype(file->elf, sec, reloc);
2380 		offset = reloc->sym->offset + reloc_addend(reloc);
2381 		insn = find_insn(file, reloc->sym->sec, offset);
2382 
2383 		if (!insn) {
2384 			ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
2385 			return -1;
2386 		}
2387 
2388 		if (func(file, type, insn))
2389 			return -1;
2390 	}
2391 
2392 	return 0;
2393 }
2394 
2395 static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn)
2396 {
2397 	switch (type) {
2398 
2399 	/* Must be before add_special_section_alts() */
2400 	case ANNOTYPE_IGNORE_ALTS:
2401 		insn->ignore_alts = true;
2402 		break;
2403 
2404 	/*
2405 	 * Must be before read_unwind_hints() since that needs insn->noendbr.
2406 	 */
2407 	case ANNOTYPE_NOENDBR:
2408 		insn->noendbr = 1;
2409 		break;
2410 
2411 	default:
2412 		break;
2413 	}
2414 
2415 	return 0;
2416 }
2417 
2418 static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn)
2419 {
2420 	unsigned long dest_off;
2421 
2422 	if (type != ANNOTYPE_INTRA_FUNCTION_CALL)
2423 		return 0;
2424 
2425 	if (insn->type != INSN_CALL) {
2426 		ERROR_INSN(insn, "intra_function_call not a direct call");
2427 		return -1;
2428 	}
2429 
2430 	/*
2431 	 * Treat intra-function CALLs as JMPs, but with a stack_op.
2432 	 * See add_call_destinations(), which strips stack_ops from
2433 	 * normal CALLs.
2434 	 */
2435 	insn->type = INSN_JUMP_UNCONDITIONAL;
2436 
2437 	dest_off = arch_jump_destination(insn);
2438 	insn->jump_dest = find_insn(file, insn->sec, dest_off);
2439 	if (!insn->jump_dest) {
2440 		ERROR_INSN(insn, "can't find call dest at %s+0x%lx",
2441 			   insn->sec->name, dest_off);
2442 		return -1;
2443 	}
2444 
2445 	return 0;
2446 }
2447 
2448 static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn)
2449 {
2450 	struct symbol *sym;
2451 
2452 	switch (type) {
2453 	case ANNOTYPE_NOENDBR:
2454 		/* early */
2455 		break;
2456 
2457 	case ANNOTYPE_RETPOLINE_SAFE:
2458 		if (insn->type != INSN_JUMP_DYNAMIC &&
2459 		    insn->type != INSN_CALL_DYNAMIC &&
2460 		    insn->type != INSN_RETURN &&
2461 		    insn->type != INSN_NOP) {
2462 			ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2463 			return -1;
2464 		}
2465 
2466 		insn->retpoline_safe = true;
2467 		break;
2468 
2469 	case ANNOTYPE_INSTR_BEGIN:
2470 		insn->instr++;
2471 		break;
2472 
2473 	case ANNOTYPE_INSTR_END:
2474 		insn->instr--;
2475 		break;
2476 
2477 	case ANNOTYPE_UNRET_BEGIN:
2478 		insn->unret = 1;
2479 		break;
2480 
2481 	case ANNOTYPE_IGNORE_ALTS:
2482 		/* early */
2483 		break;
2484 
2485 	case ANNOTYPE_INTRA_FUNCTION_CALL:
2486 		/* ifc */
2487 		break;
2488 
2489 	case ANNOTYPE_REACHABLE:
2490 		insn->dead_end = false;
2491 		break;
2492 
2493 	case ANNOTYPE_NOCFI:
2494 		sym = insn->sym;
2495 		if (!sym) {
2496 			ERROR_INSN(insn, "dodgy NOCFI annotation");
2497 			return -1;
2498 		}
2499 		insn->sym->nocfi = 1;
2500 		break;
2501 
2502 	default:
2503 		ERROR_INSN(insn, "Unknown annotation type: %d", type);
2504 		return -1;
2505 	}
2506 
2507 	return 0;
2508 }
2509 
2510 /*
2511  * Return true if name matches an instrumentation function, where calls to that
2512  * function from noinstr code can safely be removed, but compilers won't do so.
2513  */
2514 static bool is_profiling_func(const char *name)
2515 {
2516 	/*
2517 	 * Many compilers cannot disable KCOV with a function attribute.
2518 	 */
2519 	if (!strncmp(name, "__sanitizer_cov_", 16))
2520 		return true;
2521 
2522 	return false;
2523 }
2524 
2525 static int classify_symbols(struct objtool_file *file)
2526 {
2527 	struct symbol *func;
2528 	size_t len;
2529 
2530 	for_each_sym(file->elf, func) {
2531 		if (is_notype_sym(func) && strstarts(func->name, ".L"))
2532 			func->local_label = true;
2533 
2534 		if (!is_global_sym(func))
2535 			continue;
2536 
2537 		if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2538 			     strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2539 			func->static_call_tramp = true;
2540 
2541 		if (arch_is_retpoline(func))
2542 			func->retpoline_thunk = true;
2543 
2544 		if (arch_is_rethunk(func))
2545 			func->return_thunk = true;
2546 
2547 		if (arch_is_embedded_insn(func))
2548 			func->embedded_insn = true;
2549 
2550 		if (arch_ftrace_match(func->name))
2551 			func->fentry = true;
2552 
2553 		if (is_profiling_func(func->name))
2554 			func->profiling_func = true;
2555 
2556 		len = strlen(func->name);
2557 		if (len > sym_name_max_len)
2558 			sym_name_max_len = len;
2559 	}
2560 
2561 	return 0;
2562 }
2563 
2564 static void mark_rodata(struct objtool_file *file)
2565 {
2566 	struct section *sec;
2567 	bool found = false;
2568 
2569 	/*
2570 	 * Search for the following rodata sections, each of which can
2571 	 * potentially contain jump tables:
2572 	 *
2573 	 * - .rodata: can contain GCC switch tables
2574 	 * - .rodata.<func>: same, if -fdata-sections is being used
2575 	 * - .data.rel.ro.c_jump_table: contains C annotated jump tables
2576 	 *
2577 	 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2578 	 */
2579 	for_each_sec(file->elf, sec) {
2580 		if ((!strncmp(sec->name, ".rodata", 7) &&
2581 		     !strstr(sec->name, ".str1.")) ||
2582 		    !strncmp(sec->name, ".data.rel.ro", 12)) {
2583 			sec->rodata = true;
2584 			found = true;
2585 		}
2586 	}
2587 
2588 	file->rodata = found;
2589 }
2590 
2591 static void mark_holes(struct objtool_file *file)
2592 {
2593 	struct instruction *insn;
2594 	bool in_hole = false;
2595 
2596 	if (!opts.link)
2597 		return;
2598 
2599 	/*
2600 	 * Whole archive runs might encounter dead code from weak symbols.
2601 	 * This is where the linker will have dropped the weak symbol in
2602 	 * favour of a regular symbol, but leaves the code in place.
2603 	 */
2604 	for_each_insn(file, insn) {
2605 		if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) {
2606 			in_hole = false;
2607 			continue;
2608 		}
2609 
2610 		/* Skip function padding and pfx code */
2611 		if (!in_hole && insn->type == INSN_NOP)
2612 			continue;
2613 
2614 		in_hole = true;
2615 		insn->hole = 1;
2616 
2617 		/*
2618 		 * If this hole jumps to a .cold function, mark it ignore.
2619 		 */
2620 		if (insn->jump_dest) {
2621 			struct symbol *dest_func = insn_func(insn->jump_dest);
2622 
2623 			if (dest_func && dest_func->cold)
2624 				dest_func->ignore = true;
2625 		}
2626 	}
2627 }
2628 
2629 static bool validate_branch_enabled(void)
2630 {
2631 	return opts.stackval ||
2632 	       opts.orc ||
2633 	       opts.uaccess ||
2634 	       opts.checksum;
2635 }
2636 
2637 static int decode_sections(struct objtool_file *file)
2638 {
2639 	file->klp = is_livepatch_module(file);
2640 
2641 	mark_rodata(file);
2642 
2643 	if (init_pv_ops(file))
2644 		return -1;
2645 
2646 	/*
2647 	 * Must be before add_{jump_call}_destination.
2648 	 */
2649 	if (classify_symbols(file))
2650 		return -1;
2651 
2652 	if (decode_instructions(file))
2653 		return -1;
2654 
2655 	if (add_ignores(file))
2656 		return -1;
2657 
2658 	add_uaccess_safe(file);
2659 
2660 	if (read_annotate(file, __annotate_early))
2661 		return -1;
2662 
2663 	/*
2664 	 * Must be before add_jump_destinations(), which depends on 'func'
2665 	 * being set for alternatives, to enable proper sibling call detection.
2666 	 */
2667 	if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label || opts.disas) {
2668 		if (add_special_section_alts(file))
2669 			return -1;
2670 	}
2671 
2672 	if (add_jump_destinations(file))
2673 		return -1;
2674 
2675 	/*
2676 	 * Must be before add_call_destination(); it changes INSN_CALL to
2677 	 * INSN_JUMP.
2678 	 */
2679 	if (read_annotate(file, __annotate_ifc))
2680 		return -1;
2681 
2682 	if (add_call_destinations(file))
2683 		return -1;
2684 
2685 	if (add_jump_table_alts(file))
2686 		return -1;
2687 
2688 	if (read_unwind_hints(file))
2689 		return -1;
2690 
2691 	/* Must be after add_jump_destinations() */
2692 	mark_holes(file);
2693 
2694 	/*
2695 	 * Must be after add_call_destinations() such that it can override
2696 	 * dead_end_function() marks.
2697 	 */
2698 	if (read_annotate(file, __annotate_late))
2699 		return -1;
2700 
2701 	return 0;
2702 }
2703 
2704 static bool is_special_call(struct instruction *insn)
2705 {
2706 	if (insn->type == INSN_CALL) {
2707 		struct symbol *dest = insn_call_dest(insn);
2708 
2709 		if (!dest)
2710 			return false;
2711 
2712 		if (dest->fentry || dest->embedded_insn)
2713 			return true;
2714 	}
2715 
2716 	return false;
2717 }
2718 
2719 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2720 {
2721 	struct cfi_state *cfi = &state->cfi;
2722 	int i;
2723 
2724 	if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2725 		return true;
2726 
2727 	if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2728 		return true;
2729 
2730 	if (cfi->stack_size != initial_func_cfi.cfa.offset)
2731 		return true;
2732 
2733 	for (i = 0; i < CFI_NUM_REGS; i++) {
2734 		if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2735 		    cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2736 			return true;
2737 	}
2738 
2739 	return false;
2740 }
2741 
2742 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2743 				int expected_offset)
2744 {
2745 	return reg->base == CFI_CFA &&
2746 	       reg->offset == expected_offset;
2747 }
2748 
2749 static bool has_valid_stack_frame(struct insn_state *state)
2750 {
2751 	struct cfi_state *cfi = &state->cfi;
2752 
2753 	if (cfi->cfa.base == CFI_BP &&
2754 	    check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2755 	    check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2756 		return true;
2757 
2758 	if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2759 		return true;
2760 
2761 	return false;
2762 }
2763 
2764 static int update_cfi_state_regs(struct instruction *insn,
2765 				  struct cfi_state *cfi,
2766 				  struct stack_op *op)
2767 {
2768 	struct cfi_reg *cfa = &cfi->cfa;
2769 
2770 	if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2771 		return 0;
2772 
2773 	/* push */
2774 	if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2775 		cfa->offset += 8;
2776 
2777 	/* pop */
2778 	if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2779 		cfa->offset -= 8;
2780 
2781 	/* add immediate to sp */
2782 	if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2783 	    op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2784 		cfa->offset -= op->src.offset;
2785 
2786 	return 0;
2787 }
2788 
2789 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2790 {
2791 	if (arch_callee_saved_reg(reg) &&
2792 	    cfi->regs[reg].base == CFI_UNDEFINED) {
2793 		cfi->regs[reg].base = base;
2794 		cfi->regs[reg].offset = offset;
2795 	}
2796 }
2797 
2798 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2799 {
2800 	cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2801 	cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2802 }
2803 
2804 /*
2805  * A note about DRAP stack alignment:
2806  *
2807  * GCC has the concept of a DRAP register, which is used to help keep track of
2808  * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
2809  * register.  The typical DRAP pattern is:
2810  *
2811  *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
2812  *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
2813  *   41 ff 72 f8		pushq  -0x8(%r10)
2814  *   55				push   %rbp
2815  *   48 89 e5			mov    %rsp,%rbp
2816  *				(more pushes)
2817  *   41 52			push   %r10
2818  *				...
2819  *   41 5a			pop    %r10
2820  *				(more pops)
2821  *   5d				pop    %rbp
2822  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2823  *   c3				retq
2824  *
2825  * There are some variations in the epilogues, like:
2826  *
2827  *   5b				pop    %rbx
2828  *   41 5a			pop    %r10
2829  *   41 5c			pop    %r12
2830  *   41 5d			pop    %r13
2831  *   41 5e			pop    %r14
2832  *   c9				leaveq
2833  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2834  *   c3				retq
2835  *
2836  * and:
2837  *
2838  *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
2839  *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
2840  *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
2841  *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
2842  *   c9				leaveq
2843  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2844  *   c3				retq
2845  *
2846  * Sometimes r13 is used as the DRAP register, in which case it's saved and
2847  * restored beforehand:
2848  *
2849  *   41 55			push   %r13
2850  *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
2851  *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
2852  *				...
2853  *   49 8d 65 f0		lea    -0x10(%r13),%rsp
2854  *   41 5d			pop    %r13
2855  *   c3				retq
2856  */
2857 static int update_cfi_state(struct instruction *insn,
2858 			    struct instruction *next_insn,
2859 			    struct cfi_state *cfi, struct stack_op *op)
2860 {
2861 	struct cfi_reg *cfa = &cfi->cfa;
2862 	struct cfi_reg *regs = cfi->regs;
2863 
2864 	/* ignore UNWIND_HINT_UNDEFINED regions */
2865 	if (cfi->force_undefined)
2866 		return 0;
2867 
2868 	/* stack operations don't make sense with an undefined CFA */
2869 	if (cfa->base == CFI_UNDEFINED) {
2870 		if (insn_func(insn)) {
2871 			WARN_INSN(insn, "undefined stack state");
2872 			return 1;
2873 		}
2874 		return 0;
2875 	}
2876 
2877 	if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2878 	    cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2879 		return update_cfi_state_regs(insn, cfi, op);
2880 
2881 	switch (op->dest.type) {
2882 
2883 	case OP_DEST_REG:
2884 		switch (op->src.type) {
2885 
2886 		case OP_SRC_REG:
2887 			if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2888 			    cfa->base == CFI_SP &&
2889 			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2890 
2891 				/* mov %rsp, %rbp */
2892 				cfa->base = op->dest.reg;
2893 				cfi->bp_scratch = false;
2894 			}
2895 
2896 			else if (op->src.reg == CFI_SP &&
2897 				 op->dest.reg == CFI_BP && cfi->drap) {
2898 
2899 				/* drap: mov %rsp, %rbp */
2900 				regs[CFI_BP].base = CFI_BP;
2901 				regs[CFI_BP].offset = -cfi->stack_size;
2902 				cfi->bp_scratch = false;
2903 			}
2904 
2905 			else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2906 
2907 				/*
2908 				 * mov %rsp, %reg
2909 				 *
2910 				 * This is needed for the rare case where GCC
2911 				 * does:
2912 				 *
2913 				 *   mov    %rsp, %rax
2914 				 *   ...
2915 				 *   mov    %rax, %rsp
2916 				 */
2917 				cfi->vals[op->dest.reg].base = CFI_CFA;
2918 				cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2919 			}
2920 
2921 			else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2922 				 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2923 
2924 				/*
2925 				 * mov %rbp, %rsp
2926 				 *
2927 				 * Restore the original stack pointer (Clang).
2928 				 */
2929 				cfi->stack_size = -cfi->regs[CFI_BP].offset;
2930 			}
2931 
2932 			else if (op->dest.reg == cfa->base) {
2933 
2934 				/* mov %reg, %rsp */
2935 				if (cfa->base == CFI_SP &&
2936 				    cfi->vals[op->src.reg].base == CFI_CFA) {
2937 
2938 					/*
2939 					 * This is needed for the rare case
2940 					 * where GCC does something dumb like:
2941 					 *
2942 					 *   lea    0x8(%rsp), %rcx
2943 					 *   ...
2944 					 *   mov    %rcx, %rsp
2945 					 */
2946 					cfa->offset = -cfi->vals[op->src.reg].offset;
2947 					cfi->stack_size = cfa->offset;
2948 
2949 				} else if (cfa->base == CFI_SP &&
2950 					   cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2951 					   cfi->vals[op->src.reg].offset == cfa->offset) {
2952 
2953 					/*
2954 					 * Stack swizzle:
2955 					 *
2956 					 * 1: mov %rsp, (%[tos])
2957 					 * 2: mov %[tos], %rsp
2958 					 *    ...
2959 					 * 3: pop %rsp
2960 					 *
2961 					 * Where:
2962 					 *
2963 					 * 1 - places a pointer to the previous
2964 					 *     stack at the Top-of-Stack of the
2965 					 *     new stack.
2966 					 *
2967 					 * 2 - switches to the new stack.
2968 					 *
2969 					 * 3 - pops the Top-of-Stack to restore
2970 					 *     the original stack.
2971 					 *
2972 					 * Note: we set base to SP_INDIRECT
2973 					 * here and preserve offset. Therefore
2974 					 * when the unwinder reaches ToS it
2975 					 * will dereference SP and then add the
2976 					 * offset to find the next frame, IOW:
2977 					 * (%rsp) + offset.
2978 					 */
2979 					cfa->base = CFI_SP_INDIRECT;
2980 
2981 				} else {
2982 					cfa->base = CFI_UNDEFINED;
2983 					cfa->offset = 0;
2984 				}
2985 			}
2986 
2987 			else if (op->dest.reg == CFI_SP &&
2988 				 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2989 				 cfi->vals[op->src.reg].offset == cfa->offset) {
2990 
2991 				/*
2992 				 * The same stack swizzle case 2) as above. But
2993 				 * because we can't change cfa->base, case 3)
2994 				 * will become a regular POP. Pretend we're a
2995 				 * PUSH so things don't go unbalanced.
2996 				 */
2997 				cfi->stack_size += 8;
2998 			}
2999 
3000 
3001 			break;
3002 
3003 		case OP_SRC_ADD:
3004 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
3005 
3006 				/* add imm, %rsp */
3007 				cfi->stack_size -= op->src.offset;
3008 				if (cfa->base == CFI_SP)
3009 					cfa->offset -= op->src.offset;
3010 				break;
3011 			}
3012 
3013 			if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
3014 			    insn->sym->frame_pointer) {
3015 				/* addi.d fp,sp,imm on LoongArch */
3016 				if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
3017 					cfa->base = CFI_BP;
3018 					cfa->offset = 0;
3019 				}
3020 				break;
3021 			}
3022 
3023 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
3024 				/* addi.d sp,fp,imm on LoongArch */
3025 				if (cfa->base == CFI_BP && cfa->offset == 0) {
3026 					if (insn->sym->frame_pointer) {
3027 						cfa->base = CFI_SP;
3028 						cfa->offset = -op->src.offset;
3029 					}
3030 				} else {
3031 					/* lea disp(%rbp), %rsp */
3032 					cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
3033 				}
3034 				break;
3035 			}
3036 
3037 			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
3038 
3039 				/* drap: lea disp(%rsp), %drap */
3040 				cfi->drap_reg = op->dest.reg;
3041 
3042 				/*
3043 				 * lea disp(%rsp), %reg
3044 				 *
3045 				 * This is needed for the rare case where GCC
3046 				 * does something dumb like:
3047 				 *
3048 				 *   lea    0x8(%rsp), %rcx
3049 				 *   ...
3050 				 *   mov    %rcx, %rsp
3051 				 */
3052 				cfi->vals[op->dest.reg].base = CFI_CFA;
3053 				cfi->vals[op->dest.reg].offset = \
3054 					-cfi->stack_size + op->src.offset;
3055 
3056 				break;
3057 			}
3058 
3059 			if (cfi->drap && op->dest.reg == CFI_SP &&
3060 			    op->src.reg == cfi->drap_reg) {
3061 
3062 				 /* drap: lea disp(%drap), %rsp */
3063 				cfa->base = CFI_SP;
3064 				cfa->offset = cfi->stack_size = -op->src.offset;
3065 				cfi->drap_reg = CFI_UNDEFINED;
3066 				cfi->drap = false;
3067 				break;
3068 			}
3069 
3070 			if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
3071 				WARN_INSN(insn, "unsupported stack register modification");
3072 				return -1;
3073 			}
3074 
3075 			break;
3076 
3077 		case OP_SRC_AND:
3078 			if (op->dest.reg != CFI_SP ||
3079 			    (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
3080 			    (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
3081 				WARN_INSN(insn, "unsupported stack pointer realignment");
3082 				return -1;
3083 			}
3084 
3085 			if (cfi->drap_reg != CFI_UNDEFINED) {
3086 				/* drap: and imm, %rsp */
3087 				cfa->base = cfi->drap_reg;
3088 				cfa->offset = cfi->stack_size = 0;
3089 				cfi->drap = true;
3090 			}
3091 
3092 			/*
3093 			 * Older versions of GCC (4.8ish) realign the stack
3094 			 * without DRAP, with a frame pointer.
3095 			 */
3096 
3097 			break;
3098 
3099 		case OP_SRC_POP:
3100 		case OP_SRC_POPF:
3101 			if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3102 
3103 				/* pop %rsp; # restore from a stack swizzle */
3104 				cfa->base = CFI_SP;
3105 				break;
3106 			}
3107 
3108 			if (!cfi->drap && op->dest.reg == cfa->base) {
3109 
3110 				/* pop %rbp */
3111 				cfa->base = CFI_SP;
3112 			}
3113 
3114 			if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3115 			    op->dest.reg == cfi->drap_reg &&
3116 			    cfi->drap_offset == -cfi->stack_size) {
3117 
3118 				/* drap: pop %drap */
3119 				cfa->base = cfi->drap_reg;
3120 				cfa->offset = 0;
3121 				cfi->drap_offset = -1;
3122 
3123 			} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3124 
3125 				/* pop %reg */
3126 				restore_reg(cfi, op->dest.reg);
3127 			}
3128 
3129 			cfi->stack_size -= 8;
3130 			if (cfa->base == CFI_SP)
3131 				cfa->offset -= 8;
3132 
3133 			break;
3134 
3135 		case OP_SRC_REG_INDIRECT:
3136 			if (!cfi->drap && op->dest.reg == cfa->base &&
3137 			    op->dest.reg == CFI_BP) {
3138 
3139 				/* mov disp(%rsp), %rbp */
3140 				cfa->base = CFI_SP;
3141 				cfa->offset = cfi->stack_size;
3142 			}
3143 
3144 			if (cfi->drap && op->src.reg == CFI_BP &&
3145 			    op->src.offset == cfi->drap_offset) {
3146 
3147 				/* drap: mov disp(%rbp), %drap */
3148 				cfa->base = cfi->drap_reg;
3149 				cfa->offset = 0;
3150 				cfi->drap_offset = -1;
3151 			}
3152 
3153 			if (cfi->drap && op->src.reg == CFI_BP &&
3154 			    op->src.offset == regs[op->dest.reg].offset) {
3155 
3156 				/* drap: mov disp(%rbp), %reg */
3157 				restore_reg(cfi, op->dest.reg);
3158 
3159 			} else if (op->src.reg == cfa->base &&
3160 			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3161 
3162 				/* mov disp(%rbp), %reg */
3163 				/* mov disp(%rsp), %reg */
3164 				restore_reg(cfi, op->dest.reg);
3165 
3166 			} else if (op->src.reg == CFI_SP &&
3167 				   op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3168 
3169 				/* mov disp(%rsp), %reg */
3170 				restore_reg(cfi, op->dest.reg);
3171 			}
3172 
3173 			break;
3174 
3175 		default:
3176 			WARN_INSN(insn, "unknown stack-related instruction");
3177 			return -1;
3178 		}
3179 
3180 		break;
3181 
3182 	case OP_DEST_PUSH:
3183 	case OP_DEST_PUSHF:
3184 		cfi->stack_size += 8;
3185 		if (cfa->base == CFI_SP)
3186 			cfa->offset += 8;
3187 
3188 		if (op->src.type != OP_SRC_REG)
3189 			break;
3190 
3191 		if (cfi->drap) {
3192 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3193 
3194 				/* drap: push %drap */
3195 				cfa->base = CFI_BP_INDIRECT;
3196 				cfa->offset = -cfi->stack_size;
3197 
3198 				/* save drap so we know when to restore it */
3199 				cfi->drap_offset = -cfi->stack_size;
3200 
3201 			} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3202 
3203 				/* drap: push %rbp */
3204 				cfi->stack_size = 0;
3205 
3206 			} else {
3207 
3208 				/* drap: push %reg */
3209 				save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3210 			}
3211 
3212 		} else {
3213 
3214 			/* push %reg */
3215 			save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3216 		}
3217 
3218 		/* detect when asm code uses rbp as a scratch register */
3219 		if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3220 		    cfa->base != CFI_BP)
3221 			cfi->bp_scratch = true;
3222 		break;
3223 
3224 	case OP_DEST_REG_INDIRECT:
3225 
3226 		if (cfi->drap) {
3227 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3228 
3229 				/* drap: mov %drap, disp(%rbp) */
3230 				cfa->base = CFI_BP_INDIRECT;
3231 				cfa->offset = op->dest.offset;
3232 
3233 				/* save drap offset so we know when to restore it */
3234 				cfi->drap_offset = op->dest.offset;
3235 			} else {
3236 
3237 				/* drap: mov reg, disp(%rbp) */
3238 				save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3239 			}
3240 
3241 		} else if (op->dest.reg == cfa->base) {
3242 
3243 			/* mov reg, disp(%rbp) */
3244 			/* mov reg, disp(%rsp) */
3245 			save_reg(cfi, op->src.reg, CFI_CFA,
3246 				 op->dest.offset - cfi->cfa.offset);
3247 
3248 		} else if (op->dest.reg == CFI_SP) {
3249 
3250 			/* mov reg, disp(%rsp) */
3251 			save_reg(cfi, op->src.reg, CFI_CFA,
3252 				 op->dest.offset - cfi->stack_size);
3253 
3254 		} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3255 
3256 			/* mov %rsp, (%reg); # setup a stack swizzle. */
3257 			cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3258 			cfi->vals[op->dest.reg].offset = cfa->offset;
3259 		}
3260 
3261 		break;
3262 
3263 	case OP_DEST_MEM:
3264 		if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3265 			WARN_INSN(insn, "unknown stack-related memory operation");
3266 			return -1;
3267 		}
3268 
3269 		/* pop mem */
3270 		cfi->stack_size -= 8;
3271 		if (cfa->base == CFI_SP)
3272 			cfa->offset -= 8;
3273 
3274 		break;
3275 
3276 	default:
3277 		WARN_INSN(insn, "unknown stack-related instruction");
3278 		return -1;
3279 	}
3280 
3281 	return 0;
3282 }
3283 
3284 /*
3285  * The stack layouts of alternatives instructions can sometimes diverge when
3286  * they have stack modifications.  That's fine as long as the potential stack
3287  * layouts don't conflict at any given potential instruction boundary.
3288  *
3289  * Flatten the CFIs of the different alternative code streams (both original
3290  * and replacement) into a single shared CFI array which can be used to detect
3291  * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3292  */
3293 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3294 {
3295 	struct cfi_state **alt_cfi;
3296 	int group_off;
3297 
3298 	if (!insn->alt_group)
3299 		return 0;
3300 
3301 	if (!insn->cfi) {
3302 		WARN("CFI missing");
3303 		return -1;
3304 	}
3305 
3306 	alt_cfi = insn->alt_group->cfi;
3307 	group_off = insn->offset - insn->alt_group->first_insn->offset;
3308 
3309 	if (!alt_cfi[group_off]) {
3310 		alt_cfi[group_off] = insn->cfi;
3311 	} else {
3312 		if (cficmp(alt_cfi[group_off], insn->cfi)) {
3313 			struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3314 			struct instruction *orig = orig_group->first_insn;
3315 			WARN_INSN(orig, "stack layout conflict in alternatives: %s",
3316 				  offstr(insn->sec, insn->offset));
3317 			return -1;
3318 		}
3319 	}
3320 
3321 	return 0;
3322 }
3323 
3324 static int noinline handle_insn_ops(struct instruction *insn,
3325 				    struct instruction *next_insn,
3326 				    struct insn_state *state)
3327 {
3328 	struct insn_state prev_state __maybe_unused = *state;
3329 	struct stack_op *op;
3330 	int ret = 0;
3331 
3332 	for (op = insn->stack_ops; op; op = op->next) {
3333 
3334 		ret = update_cfi_state(insn, next_insn, &state->cfi, op);
3335 		if (ret)
3336 			goto done;
3337 
3338 		if (!opts.uaccess || !insn->alt_group)
3339 			continue;
3340 
3341 		if (op->dest.type == OP_DEST_PUSHF) {
3342 			if (!state->uaccess_stack) {
3343 				state->uaccess_stack = 1;
3344 			} else if (state->uaccess_stack >> 31) {
3345 				WARN_INSN(insn, "PUSHF stack exhausted");
3346 				ret = 1;
3347 				goto done;
3348 			}
3349 			state->uaccess_stack <<= 1;
3350 			state->uaccess_stack  |= state->uaccess;
3351 		}
3352 
3353 		if (op->src.type == OP_SRC_POPF) {
3354 			if (state->uaccess_stack) {
3355 				state->uaccess = state->uaccess_stack & 1;
3356 				state->uaccess_stack >>= 1;
3357 				if (state->uaccess_stack == 1)
3358 					state->uaccess_stack = 0;
3359 			}
3360 		}
3361 	}
3362 
3363 done:
3364 	TRACE_INSN_STATE(insn, &prev_state, state);
3365 
3366 	return ret;
3367 }
3368 
3369 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3370 {
3371 	struct cfi_state *cfi1 = insn->cfi;
3372 	int i;
3373 
3374 	if (!cfi1) {
3375 		WARN("CFI missing");
3376 		return false;
3377 	}
3378 
3379 	if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3380 
3381 		WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3382 			  cfi1->cfa.base, cfi1->cfa.offset,
3383 			  cfi2->cfa.base, cfi2->cfa.offset);
3384 		return false;
3385 
3386 	}
3387 
3388 	if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3389 		for (i = 0; i < CFI_NUM_REGS; i++) {
3390 
3391 			if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg)))
3392 				continue;
3393 
3394 			WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3395 				  i, cfi1->regs[i].base, cfi1->regs[i].offset,
3396 				  i, cfi2->regs[i].base, cfi2->regs[i].offset);
3397 		}
3398 		return false;
3399 	}
3400 
3401 	if (cfi1->type != cfi2->type) {
3402 
3403 		WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3404 			  cfi1->type, cfi2->type);
3405 		return false;
3406 	}
3407 
3408 	if (cfi1->drap != cfi2->drap ||
3409 		   (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3410 		   (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3411 
3412 		WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3413 			  cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3414 			  cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3415 		return false;
3416 	}
3417 
3418 	return true;
3419 }
3420 
3421 static inline bool func_uaccess_safe(struct symbol *func)
3422 {
3423 	if (func)
3424 		return func->uaccess_safe;
3425 
3426 	return false;
3427 }
3428 
3429 static inline const char *call_dest_name(struct instruction *insn)
3430 {
3431 	static char pvname[19];
3432 	struct reloc *reloc;
3433 	int idx;
3434 
3435 	if (insn_call_dest(insn))
3436 		return insn_call_dest(insn)->name;
3437 
3438 	reloc = insn_reloc(NULL, insn);
3439 	if (reloc && !strcmp(reloc->sym->name, "pv_ops")) {
3440 		idx = (reloc_addend(reloc) / sizeof(void *));
3441 		snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3442 		return pvname;
3443 	}
3444 
3445 	return "{dynamic}";
3446 }
3447 
3448 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3449 {
3450 	struct symbol *target;
3451 	struct reloc *reloc;
3452 	int idx;
3453 
3454 	reloc = insn_reloc(file, insn);
3455 	if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
3456 		return false;
3457 
3458 	idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *);
3459 
3460 	if (file->pv_ops[idx].clean)
3461 		return true;
3462 
3463 	file->pv_ops[idx].clean = true;
3464 
3465 	list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3466 		if (!target->sec->noinstr) {
3467 			WARN("pv_ops[%d]: %s", idx, target->name);
3468 			file->pv_ops[idx].clean = false;
3469 		}
3470 	}
3471 
3472 	return file->pv_ops[idx].clean;
3473 }
3474 
3475 static inline bool noinstr_call_dest(struct objtool_file *file,
3476 				     struct instruction *insn,
3477 				     struct symbol *func)
3478 {
3479 	/*
3480 	 * We can't deal with indirect function calls at present;
3481 	 * assume they're instrumented.
3482 	 */
3483 	if (!func) {
3484 		if (file->pv_ops)
3485 			return pv_call_dest(file, insn);
3486 
3487 		return false;
3488 	}
3489 
3490 	/*
3491 	 * If the symbol is from a noinstr section; we good.
3492 	 */
3493 	if (func->sec->noinstr)
3494 		return true;
3495 
3496 	/*
3497 	 * If the symbol is a static_call trampoline, we can't tell.
3498 	 */
3499 	if (func->static_call_tramp)
3500 		return true;
3501 
3502 	/*
3503 	 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3504 	 * something 'BAD' happened. At the risk of taking the machine down,
3505 	 * let them proceed to get the message out.
3506 	 */
3507 	if (!strncmp(func->name, "__ubsan_handle_", 15))
3508 		return true;
3509 
3510 	return false;
3511 }
3512 
3513 static int validate_call(struct objtool_file *file,
3514 			 struct instruction *insn,
3515 			 struct insn_state *state)
3516 {
3517 	if (state->noinstr && state->instr <= 0 &&
3518 	    !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3519 		WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3520 		return 1;
3521 	}
3522 
3523 	if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3524 		WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3525 		return 1;
3526 	}
3527 
3528 	if (state->df) {
3529 		WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3530 		return 1;
3531 	}
3532 
3533 	return 0;
3534 }
3535 
3536 static int validate_sibling_call(struct objtool_file *file,
3537 				 struct instruction *insn,
3538 				 struct insn_state *state)
3539 {
3540 	if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3541 		WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3542 		return 1;
3543 	}
3544 
3545 	return validate_call(file, insn, state);
3546 }
3547 
3548 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3549 {
3550 	if (state->noinstr && state->instr > 0) {
3551 		WARN_INSN(insn, "return with instrumentation enabled");
3552 		return 1;
3553 	}
3554 
3555 	if (state->uaccess && !func_uaccess_safe(func)) {
3556 		WARN_INSN(insn, "return with UACCESS enabled");
3557 		return 1;
3558 	}
3559 
3560 	if (!state->uaccess && func_uaccess_safe(func)) {
3561 		WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3562 		return 1;
3563 	}
3564 
3565 	if (state->df) {
3566 		WARN_INSN(insn, "return with DF set");
3567 		return 1;
3568 	}
3569 
3570 	if (func && has_modified_stack_frame(insn, state)) {
3571 		WARN_INSN(insn, "return with modified stack frame");
3572 		return 1;
3573 	}
3574 
3575 	if (state->cfi.bp_scratch) {
3576 		WARN_INSN(insn, "BP used as a scratch register");
3577 		return 1;
3578 	}
3579 
3580 	return 0;
3581 }
3582 
3583 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3584 						 struct instruction *insn)
3585 {
3586 	struct alt_group *alt_group = insn->alt_group;
3587 
3588 	/*
3589 	 * Simulate the fact that alternatives are patched in-place.  When the
3590 	 * end of a replacement alt_group is reached, redirect objtool flow to
3591 	 * the end of the original alt_group.
3592 	 *
3593 	 * insn->alts->insn -> alt_group->first_insn
3594 	 *		       ...
3595 	 *		       alt_group->last_insn
3596 	 *		       [alt_group->nop]      -> next(orig_group->last_insn)
3597 	 */
3598 	if (alt_group) {
3599 		if (alt_group->nop) {
3600 			/* ->nop implies ->orig_group */
3601 			if (insn == alt_group->last_insn)
3602 				return alt_group->nop;
3603 			if (insn == alt_group->nop)
3604 				goto next_orig;
3605 		}
3606 		if (insn == alt_group->last_insn && alt_group->orig_group)
3607 			goto next_orig;
3608 	}
3609 
3610 	return next_insn_same_sec(file, insn);
3611 
3612 next_orig:
3613 	return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3614 }
3615 
3616 static bool skip_alt_group(struct instruction *insn)
3617 {
3618 	struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
3619 
3620 	if (!insn->alt_group)
3621 		return false;
3622 
3623 	/* ANNOTATE_IGNORE_ALTERNATIVE */
3624 	if (insn->alt_group->ignore) {
3625 		TRACE_ALT(insn, "alt group ignored");
3626 		return true;
3627 	}
3628 
3629 	/*
3630 	 * For NOP patched with CLAC/STAC, only follow the latter to avoid
3631 	 * impossible code paths combining patched CLAC with unpatched STAC
3632 	 * or vice versa.
3633 	 *
3634 	 * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
3635 	 * requested not to do that to avoid hurting .s file readability
3636 	 * around CLAC/STAC alternative sites.
3637 	 */
3638 
3639 	if (!alt_insn)
3640 		return false;
3641 
3642 	/* Don't override ASM_{CLAC,STAC}_UNSAFE */
3643 	if (alt_insn->alt_group && alt_insn->alt_group->ignore)
3644 		return false;
3645 
3646 	return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
3647 }
3648 
3649 static int checksum_debug_init(struct objtool_file *file)
3650 {
3651 	char *dup, *s;
3652 
3653 	if (!opts.debug_checksum)
3654 		return 0;
3655 
3656 	dup = strdup(opts.debug_checksum);
3657 	if (!dup) {
3658 		ERROR_GLIBC("strdup");
3659 		return -1;
3660 	}
3661 
3662 	s = dup;
3663 	while (*s) {
3664 		struct symbol *func;
3665 		char *comma;
3666 
3667 		comma = strchr(s, ',');
3668 		if (comma)
3669 			*comma = '\0';
3670 
3671 		func = find_symbol_by_name(file->elf, s);
3672 		if (!func || !is_func_sym(func))
3673 			WARN("--debug-checksum: can't find '%s'", s);
3674 		else
3675 			func->debug_checksum = 1;
3676 
3677 		if (!comma)
3678 			break;
3679 
3680 		s = comma + 1;
3681 	}
3682 
3683 	free(dup);
3684 	return 0;
3685 }
3686 
3687 static void checksum_update_insn(struct objtool_file *file, struct symbol *func,
3688 				 struct instruction *insn)
3689 {
3690 	struct reloc *reloc = insn_reloc(file, insn);
3691 	unsigned long offset;
3692 	struct symbol *sym;
3693 
3694 	if (insn->fake)
3695 		return;
3696 
3697 	checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len);
3698 
3699 	if (!reloc) {
3700 		struct symbol *call_dest = insn_call_dest(insn);
3701 
3702 		if (call_dest)
3703 			checksum_update(func, insn, call_dest->demangled_name,
3704 					strlen(call_dest->demangled_name));
3705 		return;
3706 	}
3707 
3708 	sym = reloc->sym;
3709 	offset = arch_insn_adjusted_addend(insn, reloc);
3710 
3711 	if (is_string_sec(sym->sec)) {
3712 		char *str;
3713 
3714 		str = sym->sec->data->d_buf + sym->offset + offset;
3715 		checksum_update(func, insn, str, strlen(str));
3716 		return;
3717 	}
3718 
3719 	if (is_sec_sym(sym)) {
3720 		sym = find_symbol_containing(reloc->sym->sec, offset);
3721 		if (!sym)
3722 			return;
3723 
3724 		offset -= sym->offset;
3725 	}
3726 
3727 	checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name));
3728 	checksum_update(func, insn, &offset, sizeof(offset));
3729 }
3730 
3731 static int validate_branch(struct objtool_file *file, struct symbol *func,
3732 			   struct instruction *insn, struct insn_state state);
3733 static int do_validate_branch(struct objtool_file *file, struct symbol *func,
3734 			      struct instruction *insn, struct insn_state state);
3735 
3736 static int validate_insn(struct objtool_file *file, struct symbol *func,
3737 			 struct instruction *insn, struct insn_state *statep,
3738 			 struct instruction *prev_insn, struct instruction *next_insn,
3739 			 bool *dead_end)
3740 {
3741 	char *alt_name __maybe_unused = NULL;
3742 	struct alternative *alt;
3743 	u8 visited;
3744 	int ret;
3745 
3746 	/*
3747 	 * Any returns before the end of this function are effectively dead
3748 	 * ends, i.e. validate_branch() has reached the end of the branch.
3749 	 */
3750 	*dead_end = true;
3751 
3752 	visited = VISITED_BRANCH << statep->uaccess;
3753 	if (insn->visited & VISITED_BRANCH_MASK) {
3754 		if (!insn->hint && !insn_cfi_match(insn, &statep->cfi))
3755 			return 1;
3756 
3757 		if (insn->visited & visited) {
3758 			TRACE_INSN(insn, "already visited");
3759 			return 0;
3760 		}
3761 	} else {
3762 		nr_insns_visited++;
3763 	}
3764 
3765 	if (statep->noinstr)
3766 		statep->instr += insn->instr;
3767 
3768 	if (insn->hint) {
3769 		if (insn->restore) {
3770 			struct instruction *save_insn, *i;
3771 
3772 			i = insn;
3773 			save_insn = NULL;
3774 
3775 			sym_for_each_insn_continue_reverse(file, func, i) {
3776 				if (i->save) {
3777 					save_insn = i;
3778 					break;
3779 				}
3780 			}
3781 
3782 			if (!save_insn) {
3783 				WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3784 				return 1;
3785 			}
3786 
3787 			if (!save_insn->visited) {
3788 				/*
3789 				 * If the restore hint insn is at the
3790 				 * beginning of a basic block and was
3791 				 * branched to from elsewhere, and the
3792 				 * save insn hasn't been visited yet,
3793 				 * defer following this branch for now.
3794 				 * It will be seen later via the
3795 				 * straight-line path.
3796 				 */
3797 				if (!prev_insn) {
3798 					TRACE_INSN(insn, "defer restore");
3799 					return 0;
3800 				}
3801 
3802 				WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3803 				return 1;
3804 			}
3805 
3806 			insn->cfi = save_insn->cfi;
3807 			nr_cfi_reused++;
3808 		}
3809 
3810 		statep->cfi = *insn->cfi;
3811 	} else {
3812 		/* XXX track if we actually changed statep->cfi */
3813 
3814 		if (prev_insn && !cficmp(prev_insn->cfi, &statep->cfi)) {
3815 			insn->cfi = prev_insn->cfi;
3816 			nr_cfi_reused++;
3817 		} else {
3818 			insn->cfi = cfi_hash_find_or_add(&statep->cfi);
3819 		}
3820 	}
3821 
3822 	insn->visited |= visited;
3823 
3824 	if (propagate_alt_cfi(file, insn))
3825 		return 1;
3826 
3827 	if (insn->alts) {
3828 		for (alt = insn->alts; alt; alt = alt->next) {
3829 			TRACE_ALT_BEGIN(insn, alt, alt_name);
3830 			ret = validate_branch(file, func, alt->insn, *statep);
3831 			TRACE_ALT_END(insn, alt, alt_name);
3832 			if (ret) {
3833 				BT_INSN(insn, "(alt)");
3834 				return ret;
3835 			}
3836 		}
3837 		TRACE_ALT_INFO_NOADDR(insn, "/ ", "DEFAULT");
3838 	}
3839 
3840 	if (skip_alt_group(insn))
3841 		return 0;
3842 
3843 	if (handle_insn_ops(insn, next_insn, statep))
3844 		return 1;
3845 
3846 	switch (insn->type) {
3847 
3848 	case INSN_RETURN:
3849 		TRACE_INSN(insn, "return");
3850 		return validate_return(func, insn, statep);
3851 
3852 	case INSN_CALL:
3853 	case INSN_CALL_DYNAMIC:
3854 		if (insn->type == INSN_CALL)
3855 			TRACE_INSN(insn, "call");
3856 		else
3857 			TRACE_INSN(insn, "indirect call");
3858 
3859 		ret = validate_call(file, insn, statep);
3860 		if (ret)
3861 			return ret;
3862 
3863 		if (opts.stackval && func && !is_special_call(insn) &&
3864 		    !has_valid_stack_frame(statep)) {
3865 			WARN_INSN(insn, "call without frame pointer save/setup");
3866 			return 1;
3867 		}
3868 
3869 		break;
3870 
3871 	case INSN_JUMP_CONDITIONAL:
3872 	case INSN_JUMP_UNCONDITIONAL:
3873 		if (is_sibling_call(insn)) {
3874 			TRACE_INSN(insn, "sibling call");
3875 			ret = validate_sibling_call(file, insn, statep);
3876 			if (ret)
3877 				return ret;
3878 
3879 		} else if (insn->jump_dest) {
3880 			if (insn->type == INSN_JUMP_UNCONDITIONAL)
3881 				TRACE_INSN(insn, "unconditional jump");
3882 			else
3883 				TRACE_INSN(insn, "jump taken");
3884 
3885 			ret = validate_branch(file, func, insn->jump_dest, *statep);
3886 			if (ret) {
3887 				BT_INSN(insn, "(branch)");
3888 				return ret;
3889 			}
3890 		}
3891 
3892 		if (insn->type == INSN_JUMP_UNCONDITIONAL)
3893 			return 0;
3894 
3895 		TRACE_INSN(insn, "jump not taken");
3896 		break;
3897 
3898 	case INSN_JUMP_DYNAMIC:
3899 	case INSN_JUMP_DYNAMIC_CONDITIONAL:
3900 		TRACE_INSN(insn, "indirect jump");
3901 		if (is_sibling_call(insn)) {
3902 			ret = validate_sibling_call(file, insn, statep);
3903 			if (ret)
3904 				return ret;
3905 		}
3906 
3907 		if (insn->type == INSN_JUMP_DYNAMIC)
3908 			return 0;
3909 
3910 		break;
3911 
3912 	case INSN_SYSCALL:
3913 		TRACE_INSN(insn, "syscall");
3914 		if (func && (!next_insn || !next_insn->hint)) {
3915 			WARN_INSN(insn, "unsupported instruction in callable function");
3916 			return 1;
3917 		}
3918 
3919 		break;
3920 
3921 	case INSN_SYSRET:
3922 		TRACE_INSN(insn, "sysret");
3923 		if (func && (!next_insn || !next_insn->hint)) {
3924 			WARN_INSN(insn, "unsupported instruction in callable function");
3925 			return 1;
3926 		}
3927 
3928 		return 0;
3929 
3930 	case INSN_STAC:
3931 		TRACE_INSN(insn, "stac");
3932 		if (!opts.uaccess)
3933 			break;
3934 
3935 		if (statep->uaccess) {
3936 			WARN_INSN(insn, "recursive UACCESS enable");
3937 			return 1;
3938 		}
3939 
3940 		statep->uaccess = true;
3941 		break;
3942 
3943 	case INSN_CLAC:
3944 		TRACE_INSN(insn, "clac");
3945 		if (!opts.uaccess)
3946 			break;
3947 
3948 		if (!statep->uaccess && func) {
3949 			WARN_INSN(insn, "redundant UACCESS disable");
3950 			return 1;
3951 		}
3952 
3953 		if (func_uaccess_safe(func) && !statep->uaccess_stack) {
3954 			WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3955 			return 1;
3956 		}
3957 
3958 		statep->uaccess = false;
3959 		break;
3960 
3961 	case INSN_STD:
3962 		TRACE_INSN(insn, "std");
3963 		if (statep->df) {
3964 			WARN_INSN(insn, "recursive STD");
3965 			return 1;
3966 		}
3967 
3968 		statep->df = true;
3969 		break;
3970 
3971 	case INSN_CLD:
3972 		TRACE_INSN(insn, "cld");
3973 		if (!statep->df && func) {
3974 			WARN_INSN(insn, "redundant CLD");
3975 			return 1;
3976 		}
3977 
3978 		statep->df = false;
3979 		break;
3980 
3981 	default:
3982 		break;
3983 	}
3984 
3985 	if (insn->dead_end)
3986 		TRACE_INSN(insn, "dead end");
3987 
3988 	*dead_end = insn->dead_end;
3989 	return 0;
3990 }
3991 
3992 /*
3993  * Follow the branch starting at the given instruction, and recursively follow
3994  * any other branches (jumps).  Meanwhile, track the frame pointer state at
3995  * each instruction and validate all the rules described in
3996  * tools/objtool/Documentation/objtool.txt.
3997  */
3998 static int do_validate_branch(struct objtool_file *file, struct symbol *func,
3999 			      struct instruction *insn, struct insn_state state)
4000 {
4001 	struct instruction *next_insn, *prev_insn = NULL;
4002 	bool dead_end;
4003 	int ret;
4004 
4005 	if (func && func->ignore)
4006 		return 0;
4007 
4008 	do {
4009 		insn->trace = 0;
4010 		next_insn = next_insn_to_validate(file, insn);
4011 
4012 		if (opts.checksum && func && insn->sec)
4013 			checksum_update_insn(file, func, insn);
4014 
4015 		if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
4016 			/* Ignore KCFI type preambles, which always fall through */
4017 			if (is_prefix_func(func))
4018 				return 0;
4019 
4020 			if (file->ignore_unreachables)
4021 				return 0;
4022 
4023 			WARN("%s() falls through to next function %s()",
4024 			     func->name, insn_func(insn)->name);
4025 			func->warned = 1;
4026 
4027 			return 1;
4028 		}
4029 
4030 		ret = validate_insn(file, func, insn, &state, prev_insn, next_insn,
4031 				    &dead_end);
4032 
4033 		if (!insn->trace) {
4034 			if (ret)
4035 				TRACE_INSN(insn, "warning (%d)", ret);
4036 			else
4037 				TRACE_INSN(insn, NULL);
4038 		}
4039 
4040 		if (!dead_end && !next_insn) {
4041 			if (state.cfi.cfa.base == CFI_UNDEFINED)
4042 				return 0;
4043 			if (file->ignore_unreachables)
4044 				return 0;
4045 
4046 			WARN("%s%sunexpected end of section %s",
4047 			     func ? func->name : "", func ? "(): " : "",
4048 			     insn->sec->name);
4049 			return 1;
4050 		}
4051 
4052 		prev_insn = insn;
4053 		insn = next_insn;
4054 
4055 	} while (!dead_end);
4056 
4057 	return ret;
4058 }
4059 
4060 static int validate_branch(struct objtool_file *file, struct symbol *func,
4061 			   struct instruction *insn, struct insn_state state)
4062 {
4063 	int ret;
4064 
4065 	trace_depth_inc();
4066 	ret = do_validate_branch(file, func, insn, state);
4067 	trace_depth_dec();
4068 
4069 	return ret;
4070 }
4071 
4072 static int validate_unwind_hint(struct objtool_file *file,
4073 				  struct instruction *insn,
4074 				  struct insn_state *state)
4075 {
4076 	if (insn->hint && !insn->visited) {
4077 		struct symbol *func = insn_func(insn);
4078 		int ret;
4079 
4080 		if (opts.checksum)
4081 			checksum_init(func);
4082 
4083 		ret = validate_branch(file, func, insn, *state);
4084 		if (ret)
4085 			BT_INSN(insn, "<=== (hint)");
4086 		return ret;
4087 	}
4088 
4089 	return 0;
4090 }
4091 
4092 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
4093 {
4094 	struct instruction *insn;
4095 	struct insn_state state;
4096 	int warnings = 0;
4097 
4098 	if (!file->hints)
4099 		return 0;
4100 
4101 	init_insn_state(file, &state, sec);
4102 
4103 	if (sec) {
4104 		sec_for_each_insn(file, sec, insn)
4105 			warnings += validate_unwind_hint(file, insn, &state);
4106 	} else {
4107 		for_each_insn(file, insn)
4108 			warnings += validate_unwind_hint(file, insn, &state);
4109 	}
4110 
4111 	return warnings;
4112 }
4113 
4114 /*
4115  * Validate rethunk entry constraint: must untrain RET before the first RET.
4116  *
4117  * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
4118  * before an actual RET instruction.
4119  */
4120 static int validate_unret(struct objtool_file *file, struct instruction *insn)
4121 {
4122 	struct instruction *next, *dest;
4123 	int ret;
4124 
4125 	for (;;) {
4126 		next = next_insn_to_validate(file, insn);
4127 
4128 		if (insn->visited & VISITED_UNRET)
4129 			return 0;
4130 
4131 		insn->visited |= VISITED_UNRET;
4132 
4133 		if (insn->alts) {
4134 			struct alternative *alt;
4135 			for (alt = insn->alts; alt; alt = alt->next) {
4136 				ret = validate_unret(file, alt->insn);
4137 				if (ret) {
4138 					BT_INSN(insn, "(alt)");
4139 					return ret;
4140 				}
4141 			}
4142 		}
4143 
4144 		switch (insn->type) {
4145 
4146 		case INSN_CALL_DYNAMIC:
4147 		case INSN_JUMP_DYNAMIC:
4148 		case INSN_JUMP_DYNAMIC_CONDITIONAL:
4149 			WARN_INSN(insn, "early indirect call");
4150 			return 1;
4151 
4152 		case INSN_JUMP_UNCONDITIONAL:
4153 		case INSN_JUMP_CONDITIONAL:
4154 			if (!is_sibling_call(insn)) {
4155 				if (!insn->jump_dest) {
4156 					WARN_INSN(insn, "unresolved jump target after linking?!?");
4157 					return 1;
4158 				}
4159 				ret = validate_unret(file, insn->jump_dest);
4160 				if (ret) {
4161 					BT_INSN(insn, "(branch%s)",
4162 						insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
4163 					return ret;
4164 				}
4165 
4166 				if (insn->type == INSN_JUMP_UNCONDITIONAL)
4167 					return 0;
4168 
4169 				break;
4170 			}
4171 
4172 			/* fallthrough */
4173 		case INSN_CALL:
4174 			dest = find_insn(file, insn_call_dest(insn)->sec,
4175 					 insn_call_dest(insn)->offset);
4176 			if (!dest) {
4177 				WARN("Unresolved function after linking!?: %s",
4178 				     insn_call_dest(insn)->name);
4179 				return 1;
4180 			}
4181 
4182 			ret = validate_unret(file, dest);
4183 			if (ret) {
4184 				BT_INSN(insn, "(call)");
4185 				return ret;
4186 			}
4187 			/*
4188 			 * If a call returns without error, it must have seen UNTRAIN_RET.
4189 			 * Therefore any non-error return is a success.
4190 			 */
4191 			return 0;
4192 
4193 		case INSN_RETURN:
4194 			WARN_INSN(insn, "RET before UNTRAIN");
4195 			return 1;
4196 
4197 		case INSN_SYSCALL:
4198 			break;
4199 
4200 		case INSN_SYSRET:
4201 			return 0;
4202 
4203 		case INSN_NOP:
4204 			if (insn->retpoline_safe)
4205 				return 0;
4206 			break;
4207 
4208 		default:
4209 			break;
4210 		}
4211 
4212 		if (insn->dead_end)
4213 			return 0;
4214 
4215 		if (!next) {
4216 			WARN_INSN(insn, "teh end!");
4217 			return 1;
4218 		}
4219 		insn = next;
4220 	}
4221 
4222 	return 0;
4223 }
4224 
4225 /*
4226  * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
4227  * VALIDATE_UNRET_END before RET.
4228  */
4229 static int validate_unrets(struct objtool_file *file)
4230 {
4231 	struct instruction *insn;
4232 	int warnings = 0;
4233 
4234 	for_each_insn(file, insn) {
4235 		if (!insn->unret)
4236 			continue;
4237 
4238 		warnings += validate_unret(file, insn);
4239 	}
4240 
4241 	return warnings;
4242 }
4243 
4244 static int validate_retpoline(struct objtool_file *file)
4245 {
4246 	struct instruction *insn;
4247 	int warnings = 0;
4248 
4249 	for_each_insn(file, insn) {
4250 		if (insn->type != INSN_JUMP_DYNAMIC &&
4251 		    insn->type != INSN_CALL_DYNAMIC &&
4252 		    insn->type != INSN_RETURN)
4253 			continue;
4254 
4255 		if (insn->retpoline_safe)
4256 			continue;
4257 
4258 		if (insn->sec->init)
4259 			continue;
4260 
4261 		if (insn->type == INSN_RETURN) {
4262 			if (opts.rethunk) {
4263 				WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build");
4264 				warnings++;
4265 			}
4266 			continue;
4267 		}
4268 
4269 		WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
4270 			  insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
4271 		warnings++;
4272 	}
4273 
4274 	if (!opts.cfi)
4275 		return warnings;
4276 
4277 	/*
4278 	 * kCFI call sites look like:
4279 	 *
4280 	 *     movl $(-0x12345678), %r10d
4281 	 *     addl -4(%r11), %r10d
4282 	 *     jz 1f
4283 	 *     ud2
4284 	 *  1: cs call __x86_indirect_thunk_r11
4285 	 *
4286 	 * Verify all indirect calls are kCFI adorned by checking for the
4287 	 * UD2. Notably, doing __nocfi calls to regular (cfi) functions is
4288 	 * broken.
4289 	 */
4290 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
4291 		struct symbol *sym = insn->sym;
4292 
4293 		if (sym && (sym->type == STT_NOTYPE ||
4294 			    sym->type == STT_FUNC) && !sym->nocfi) {
4295 			struct instruction *prev =
4296 				prev_insn_same_sym(file, insn);
4297 
4298 			if (!prev || prev->type != INSN_BUG) {
4299 				WARN_INSN(insn, "no-cfi indirect call!");
4300 				warnings++;
4301 			}
4302 		}
4303 	}
4304 
4305 	return warnings;
4306 }
4307 
4308 static bool is_kasan_insn(struct instruction *insn)
4309 {
4310 	return (insn->type == INSN_CALL &&
4311 		!strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4312 }
4313 
4314 static bool is_ubsan_insn(struct instruction *insn)
4315 {
4316 	return (insn->type == INSN_CALL &&
4317 		!strcmp(insn_call_dest(insn)->name,
4318 			"__ubsan_handle_builtin_unreachable"));
4319 }
4320 
4321 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4322 {
4323 	struct symbol *func = insn_func(insn);
4324 	struct instruction *prev_insn;
4325 	int i;
4326 
4327 	if (insn->type == INSN_NOP || insn->type == INSN_TRAP ||
4328 	    insn->hole || (func && func->ignore))
4329 		return true;
4330 
4331 	/*
4332 	 * Ignore alternative replacement instructions.  This can happen
4333 	 * when a whitelisted function uses one of the ALTERNATIVE macros.
4334 	 */
4335 	if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4336 	    !strcmp(insn->sec->name, ".altinstr_aux"))
4337 		return true;
4338 
4339 	if (!func)
4340 		return false;
4341 
4342 	if (func->static_call_tramp)
4343 		return true;
4344 
4345 	/*
4346 	 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4347 	 * __builtin_unreachable().  The BUG() macro has an unreachable() after
4348 	 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4349 	 * (or occasionally a JMP to UD2).
4350 	 *
4351 	 * It may also insert a UD2 after calling a __noreturn function.
4352 	 */
4353 	prev_insn = prev_insn_same_sec(file, insn);
4354 	if (prev_insn && prev_insn->dead_end &&
4355 	    (insn->type == INSN_BUG ||
4356 	     (insn->type == INSN_JUMP_UNCONDITIONAL &&
4357 	      insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4358 		return true;
4359 
4360 	/*
4361 	 * Check if this (or a subsequent) instruction is related to
4362 	 * CONFIG_UBSAN or CONFIG_KASAN.
4363 	 *
4364 	 * End the search at 5 instructions to avoid going into the weeds.
4365 	 */
4366 	for (i = 0; i < 5; i++) {
4367 
4368 		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4369 			return true;
4370 
4371 		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4372 			if (insn->jump_dest &&
4373 			    insn_func(insn->jump_dest) == func) {
4374 				insn = insn->jump_dest;
4375 				continue;
4376 			}
4377 
4378 			break;
4379 		}
4380 
4381 		if (insn->offset + insn->len >= func->offset + func->len)
4382 			break;
4383 
4384 		insn = next_insn_same_sec(file, insn);
4385 	}
4386 
4387 	return false;
4388 }
4389 
4390 /*
4391  * For FineIBT or kCFI, a certain number of bytes preceding the function may be
4392  * NOPs.  Those NOPs may be rewritten at runtime and executed, so give them a
4393  * proper function name: __pfx_<func>.
4394  *
4395  * The NOPs may not exist for the following cases:
4396  *
4397  *   - compiler cloned functions (*.cold, *.part0, etc)
4398  *   - asm functions created with inline asm or without SYM_FUNC_START()
4399  *
4400  * Also, the function may already have a prefix from a previous objtool run
4401  * (livepatch extracted functions, or manually running objtool multiple times).
4402  *
4403  * So return 0 if the NOPs are missing or the function already has a prefix
4404  * symbol.
4405  */
4406 static int create_prefix_symbol(struct objtool_file *file, struct symbol *func)
4407 {
4408 	struct instruction *insn, *prev;
4409 	char name[SYM_NAME_LEN];
4410 	struct cfi_state *cfi;
4411 
4412 	if (!is_func_sym(func) || is_prefix_func(func) ||
4413 	    func->cold || func->static_call_tramp)
4414 		return 0;
4415 
4416 	if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) {
4417 		WARN("%s: symbol name too long, can't create __pfx_ symbol",
4418 		      func->name);
4419 		return 0;
4420 	}
4421 
4422 	if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name))
4423 		return -1;
4424 
4425 	if (file->klp) {
4426 		struct symbol *pfx;
4427 
4428 		pfx = find_symbol_by_offset(func->sec, func->offset - opts.prefix);
4429 		if (pfx && is_prefix_func(pfx) && !strcmp(pfx->name, name))
4430 			return 0;
4431 	}
4432 
4433 	insn = find_insn(file, func->sec, func->offset);
4434 	if (!insn) {
4435 		WARN("%s: can't find starting instruction", func->name);
4436 		return -1;
4437 	}
4438 
4439 	for (prev = prev_insn_same_sec(file, insn);
4440 	     prev;
4441 	     prev = prev_insn_same_sec(file, prev)) {
4442 		u64 offset;
4443 
4444 		if (prev->type != INSN_NOP)
4445 			return 0;
4446 
4447 		offset = func->offset - prev->offset;
4448 
4449 		if (offset > opts.prefix)
4450 			return 0;
4451 
4452 		if (offset < opts.prefix)
4453 			continue;
4454 
4455 		if (!elf_create_symbol(file->elf, name, func->sec,
4456 				       GELF_ST_BIND(func->sym.st_info),
4457 				       GELF_ST_TYPE(func->sym.st_info),
4458 				       prev->offset, opts.prefix))
4459 			return -1;
4460 
4461 		break;
4462 	}
4463 
4464 	if (!prev)
4465 		return 0;
4466 
4467 	if (!insn->cfi) {
4468 		/*
4469 		 * This can happen if stack validation isn't enabled or the
4470 		 * function is annotated with STACK_FRAME_NON_STANDARD.
4471 		 */
4472 		return 0;
4473 	}
4474 
4475 	/* Propagate insn->cfi to the prefix code */
4476 	cfi = cfi_hash_find_or_add(insn->cfi);
4477 	for (; prev != insn; prev = next_insn_same_sec(file, prev))
4478 		prev->cfi = cfi;
4479 
4480 	return 0;
4481 }
4482 
4483 static int create_prefix_symbols(struct objtool_file *file)
4484 {
4485 	struct section *sec;
4486 	struct symbol *func;
4487 
4488 	for_each_sec(file->elf, sec) {
4489 		if (!is_text_sec(sec))
4490 			continue;
4491 
4492 		sec_for_each_sym(sec, func) {
4493 			if (create_prefix_symbol(file, func))
4494 				return -1;
4495 		}
4496 	}
4497 
4498 	return 0;
4499 }
4500 
4501 static int validate_symbol(struct objtool_file *file, struct section *sec,
4502 			   struct symbol *sym, struct insn_state *state)
4503 {
4504 	struct instruction *insn;
4505 	struct symbol *func;
4506 	int ret;
4507 
4508 	if (!sym->len) {
4509 		WARN("%s() is missing an ELF size annotation", sym->name);
4510 		return 1;
4511 	}
4512 
4513 	if (sym->pfunc != sym || sym->alias != sym)
4514 		return 0;
4515 
4516 	insn = find_insn(file, sec, sym->offset);
4517 	if (!insn || insn->visited)
4518 		return 0;
4519 
4520 	if (opts.uaccess)
4521 		state->uaccess = sym->uaccess_safe;
4522 
4523 	func = insn_func(insn);
4524 
4525 	if (opts.checksum)
4526 		checksum_init(func);
4527 
4528 	if (opts.trace && !fnmatch(opts.trace, sym->name, 0)) {
4529 		trace_enable();
4530 		TRACE("%s: validation begin\n", sym->name);
4531 	}
4532 
4533 	ret = validate_branch(file, func, insn, *state);
4534 	if (ret)
4535 		BT_INSN(insn, "<=== (sym)");
4536 
4537 	TRACE("%s: validation %s\n\n", sym->name, ret ? "failed" : "end");
4538 	trace_disable();
4539 
4540 	if (opts.checksum)
4541 		checksum_finish(func);
4542 
4543 	return ret;
4544 }
4545 
4546 static int validate_section(struct objtool_file *file, struct section *sec)
4547 {
4548 	struct insn_state state;
4549 	struct symbol *func;
4550 	int warnings = 0;
4551 
4552 	sec_for_each_sym(sec, func) {
4553 		if (!is_func_sym(func))
4554 			continue;
4555 
4556 		init_insn_state(file, &state, sec);
4557 		set_func_state(&state.cfi);
4558 
4559 		warnings += validate_symbol(file, sec, func, &state);
4560 	}
4561 
4562 	return warnings;
4563 }
4564 
4565 static int validate_noinstr_sections(struct objtool_file *file)
4566 {
4567 	struct section *sec;
4568 	int warnings = 0;
4569 
4570 	sec = find_section_by_name(file->elf, ".noinstr.text");
4571 	if (sec) {
4572 		warnings += validate_section(file, sec);
4573 		warnings += validate_unwind_hints(file, sec);
4574 	}
4575 
4576 	sec = find_section_by_name(file->elf, ".entry.text");
4577 	if (sec) {
4578 		warnings += validate_section(file, sec);
4579 		warnings += validate_unwind_hints(file, sec);
4580 	}
4581 
4582 	sec = find_section_by_name(file->elf, ".cpuidle.text");
4583 	if (sec) {
4584 		warnings += validate_section(file, sec);
4585 		warnings += validate_unwind_hints(file, sec);
4586 	}
4587 
4588 	return warnings;
4589 }
4590 
4591 static int validate_functions(struct objtool_file *file)
4592 {
4593 	struct section *sec;
4594 	int warnings = 0;
4595 
4596 	for_each_sec(file->elf, sec) {
4597 		if (!is_text_sec(sec))
4598 			continue;
4599 
4600 		warnings += validate_section(file, sec);
4601 	}
4602 
4603 	return warnings;
4604 }
4605 
4606 static void mark_endbr_used(struct instruction *insn)
4607 {
4608 	if (!list_empty(&insn->call_node))
4609 		list_del_init(&insn->call_node);
4610 }
4611 
4612 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4613 {
4614 	struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4615 	struct instruction *first;
4616 
4617 	if (!sym)
4618 		return false;
4619 
4620 	first = find_insn(file, sym->sec, sym->offset);
4621 	if (!first)
4622 		return false;
4623 
4624 	if (first->type != INSN_ENDBR && !first->noendbr)
4625 		return false;
4626 
4627 	return insn->offset == sym->offset + sym->len;
4628 }
4629 
4630 static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn,
4631 			       struct instruction *dest)
4632 {
4633 	if (dest->type == INSN_ENDBR) {
4634 		mark_endbr_used(dest);
4635 		return 0;
4636 	}
4637 
4638 	if (insn_func(dest) && insn_func(insn) &&
4639 	    insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
4640 		/*
4641 		 * Anything from->to self is either _THIS_IP_ or
4642 		 * IRET-to-self.
4643 		 *
4644 		 * There is no sane way to annotate _THIS_IP_ since the
4645 		 * compiler treats the relocation as a constant and is
4646 		 * happy to fold in offsets, skewing any annotation we
4647 		 * do, leading to vast amounts of false-positives.
4648 		 *
4649 		 * There's also compiler generated _THIS_IP_ through
4650 		 * KCOV and such which we have no hope of annotating.
4651 		 *
4652 		 * As such, blanket accept self-references without
4653 		 * issue.
4654 		 */
4655 		return 0;
4656 	}
4657 
4658 	/*
4659 	 * Accept anything ANNOTATE_NOENDBR.
4660 	 */
4661 	if (dest->noendbr)
4662 		return 0;
4663 
4664 	/*
4665 	 * Accept if this is the instruction after a symbol
4666 	 * that is (no)endbr -- typical code-range usage.
4667 	 */
4668 	if (noendbr_range(file, dest))
4669 		return 0;
4670 
4671 	WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4672 	return 1;
4673 }
4674 
4675 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4676 {
4677 	struct instruction *dest;
4678 	struct reloc *reloc;
4679 	unsigned long off;
4680 	int warnings = 0;
4681 
4682 	/*
4683 	 * Looking for function pointer load relocations.  Ignore
4684 	 * direct/indirect branches:
4685 	 */
4686 	switch (insn->type) {
4687 
4688 	case INSN_CALL:
4689 	case INSN_CALL_DYNAMIC:
4690 	case INSN_JUMP_CONDITIONAL:
4691 	case INSN_JUMP_UNCONDITIONAL:
4692 	case INSN_JUMP_DYNAMIC:
4693 	case INSN_JUMP_DYNAMIC_CONDITIONAL:
4694 	case INSN_RETURN:
4695 	case INSN_NOP:
4696 		return 0;
4697 
4698 	case INSN_LEA_RIP:
4699 		if (!insn_reloc(file, insn)) {
4700 			/* local function pointer reference without reloc */
4701 
4702 			off = arch_jump_destination(insn);
4703 
4704 			dest = find_insn(file, insn->sec, off);
4705 			if (!dest) {
4706 				WARN_INSN(insn, "corrupt function pointer reference");
4707 				return 1;
4708 			}
4709 
4710 			return __validate_ibt_insn(file, insn, dest);
4711 		}
4712 		break;
4713 
4714 	default:
4715 		break;
4716 	}
4717 
4718 	for (reloc = insn_reloc(file, insn);
4719 	     reloc;
4720 	     reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4721 					      reloc_offset(reloc) + 1,
4722 					      (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
4723 
4724 		off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc);
4725 
4726 		dest = find_insn(file, reloc->sym->sec, off);
4727 		if (!dest)
4728 			continue;
4729 
4730 		warnings += __validate_ibt_insn(file, insn, dest);
4731 	}
4732 
4733 	return warnings;
4734 }
4735 
4736 static int validate_ibt_data_reloc(struct objtool_file *file,
4737 				   struct reloc *reloc)
4738 {
4739 	struct instruction *dest;
4740 
4741 	dest = find_insn(file, reloc->sym->sec,
4742 			 reloc->sym->offset + reloc_addend(reloc));
4743 	if (!dest)
4744 		return 0;
4745 
4746 	if (dest->type == INSN_ENDBR) {
4747 		mark_endbr_used(dest);
4748 		return 0;
4749 	}
4750 
4751 	if (dest->noendbr)
4752 		return 0;
4753 
4754 	WARN_FUNC(reloc->sec->base, reloc_offset(reloc),
4755 		  "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4756 
4757 	return 1;
4758 }
4759 
4760 /*
4761  * Validate IBT rules and remove used ENDBR instructions from the seal list.
4762  * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4763  * NOPs) later, in create_ibt_endbr_seal_sections().
4764  */
4765 static int validate_ibt(struct objtool_file *file)
4766 {
4767 	struct section *sec;
4768 	struct reloc *reloc;
4769 	struct instruction *insn;
4770 	int warnings = 0;
4771 
4772 	for_each_insn(file, insn)
4773 		warnings += validate_ibt_insn(file, insn);
4774 
4775 	for_each_sec(file->elf, sec) {
4776 
4777 		/* Already done by validate_ibt_insn() */
4778 		if (is_text_sec(sec))
4779 			continue;
4780 
4781 		if (!sec->rsec)
4782 			continue;
4783 
4784 		/*
4785 		 * These sections can reference text addresses, but not with
4786 		 * the intent to indirect branch to them.
4787 		 */
4788 		if ((!strncmp(sec->name, ".discard", 8) &&
4789 		     strcmp(sec->name, ".discard.ibt_endbr_noseal"))	||
4790 		    !strncmp(sec->name, ".debug", 6)			||
4791 		    !strcmp(sec->name, ".altinstructions")		||
4792 		    !strcmp(sec->name, ".ibt_endbr_seal")		||
4793 		    !strcmp(sec->name, ".kcfi_traps")			||
4794 		    !strcmp(sec->name, ".orc_unwind_ip")		||
4795 		    !strcmp(sec->name, ".retpoline_sites")		||
4796 		    !strcmp(sec->name, ".smp_locks")			||
4797 		    !strcmp(sec->name, ".static_call_sites")		||
4798 		    !strcmp(sec->name, "_error_injection_whitelist")	||
4799 		    !strcmp(sec->name, "_kprobe_blacklist")		||
4800 		    !strcmp(sec->name, "__bug_table")			||
4801 		    !strcmp(sec->name, "__ex_table")			||
4802 		    !strcmp(sec->name, "__jump_table")			||
4803 		    !strcmp(sec->name, "__klp_funcs")			||
4804 		    !strcmp(sec->name, "__mcount_loc")			||
4805 		    !strcmp(sec->name, ".llvm.call-graph-profile")	||
4806 		    !strcmp(sec->name, ".llvm_bb_addr_map")		||
4807 		    !strcmp(sec->name, "__tracepoints")			||
4808 		    !strcmp(sec->name, ".return_sites")			||
4809 		    !strcmp(sec->name, ".call_sites")			||
4810 		    !strcmp(sec->name, "__patchable_function_entries"))
4811 			continue;
4812 
4813 		for_each_reloc(sec->rsec, reloc)
4814 			warnings += validate_ibt_data_reloc(file, reloc);
4815 	}
4816 
4817 	return warnings;
4818 }
4819 
4820 static int validate_sls(struct objtool_file *file)
4821 {
4822 	struct instruction *insn, *next_insn;
4823 	int warnings = 0;
4824 
4825 	for_each_insn(file, insn) {
4826 		next_insn = next_insn_same_sec(file, insn);
4827 
4828 		if (insn->retpoline_safe)
4829 			continue;
4830 
4831 		switch (insn->type) {
4832 		case INSN_RETURN:
4833 			if (!next_insn || next_insn->type != INSN_TRAP) {
4834 				WARN_INSN(insn, "missing int3 after ret");
4835 				warnings++;
4836 			}
4837 
4838 			break;
4839 		case INSN_JUMP_DYNAMIC:
4840 			if (!next_insn || next_insn->type != INSN_TRAP) {
4841 				WARN_INSN(insn, "missing int3 after indirect jump");
4842 				warnings++;
4843 			}
4844 			break;
4845 		default:
4846 			break;
4847 		}
4848 	}
4849 
4850 	return warnings;
4851 }
4852 
4853 static int validate_reachable_instructions(struct objtool_file *file)
4854 {
4855 	struct instruction *insn, *prev_insn;
4856 	struct symbol *call_dest;
4857 	int warnings = 0;
4858 
4859 	if (file->ignore_unreachables)
4860 		return 0;
4861 
4862 	for_each_insn(file, insn) {
4863 		if (insn->visited || ignore_unreachable_insn(file, insn))
4864 			continue;
4865 
4866 		prev_insn = prev_insn_same_sec(file, insn);
4867 		if (prev_insn && prev_insn->dead_end) {
4868 			call_dest = insn_call_dest(prev_insn);
4869 			if (call_dest) {
4870 				WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h",
4871 					  call_dest->name);
4872 				warnings++;
4873 				continue;
4874 			}
4875 		}
4876 
4877 		WARN_INSN(insn, "unreachable instruction");
4878 		warnings++;
4879 	}
4880 
4881 	return warnings;
4882 }
4883 
4884 __weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
4885 {
4886 	unsigned int type = reloc_type(reloc);
4887 	size_t sz = elf_addr_size(elf);
4888 
4889 	return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32);
4890 }
4891 
4892 static int check_abs_references(struct objtool_file *file)
4893 {
4894 	struct section *sec;
4895 	struct reloc *reloc;
4896 	int ret = 0;
4897 
4898 	for_each_sec(file->elf, sec) {
4899 		/* absolute references in non-loadable sections are fine */
4900 		if (!(sec->sh.sh_flags & SHF_ALLOC))
4901 			continue;
4902 
4903 		/* section must have an associated .rela section */
4904 		if (!sec->rsec)
4905 			continue;
4906 
4907 		/*
4908 		 * Special case for compiler generated metadata that is not
4909 		 * consumed until after boot.
4910 		 */
4911 		if (!strcmp(sec->name, "__patchable_function_entries"))
4912 			continue;
4913 
4914 		for_each_reloc(sec->rsec, reloc) {
4915 			if (arch_absolute_reloc(file->elf, reloc)) {
4916 				WARN("section %s has absolute relocation at offset 0x%llx",
4917 				     sec->name, (unsigned long long)reloc_offset(reloc));
4918 				ret++;
4919 			}
4920 		}
4921 	}
4922 	return ret;
4923 }
4924 
4925 struct insn_chunk {
4926 	void *addr;
4927 	struct insn_chunk *next;
4928 };
4929 
4930 /*
4931  * Reduce peak RSS usage by freeing insns memory before writing the ELF file,
4932  * which can trigger more allocations for .debug_* sections whose data hasn't
4933  * been read yet.
4934  */
4935 static void free_insns(struct objtool_file *file)
4936 {
4937 	struct instruction *insn;
4938 	struct insn_chunk *chunks = NULL, *chunk;
4939 
4940 	for_each_insn(file, insn) {
4941 		if (!insn->idx) {
4942 			chunk = malloc(sizeof(*chunk));
4943 			chunk->addr = insn;
4944 			chunk->next = chunks;
4945 			chunks = chunk;
4946 		}
4947 	}
4948 
4949 	for (chunk = chunks; chunk; chunk = chunk->next)
4950 		free(chunk->addr);
4951 }
4952 
4953 const char *objtool_disas_insn(struct instruction *insn)
4954 {
4955 	struct disas_context *dctx = objtool_disas_ctx;
4956 
4957 	if (!dctx)
4958 		return "";
4959 
4960 	disas_insn(dctx, insn);
4961 	return disas_result(dctx);
4962 }
4963 
4964 int check(struct objtool_file *file)
4965 {
4966 	struct disas_context *disas_ctx = NULL;
4967 	int ret = 0, warnings = 0;
4968 
4969 	/*
4970 	 * Create a disassembly context if we might disassemble any
4971 	 * instruction or function.
4972 	 */
4973 	if (opts.verbose || opts.backtrace || opts.trace || opts.disas) {
4974 		disas_ctx = disas_context_create(file);
4975 		if (!disas_ctx) {
4976 			opts.disas = false;
4977 			opts.trace = false;
4978 		}
4979 		objtool_disas_ctx = disas_ctx;
4980 	}
4981 
4982 	arch_initial_func_cfi_state(&initial_func_cfi);
4983 	init_cfi_state(&init_cfi);
4984 	init_cfi_state(&func_cfi);
4985 	set_func_state(&func_cfi);
4986 	init_cfi_state(&force_undefined_cfi);
4987 	force_undefined_cfi.force_undefined = true;
4988 
4989 	if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
4990 		ret = -1;
4991 		goto out;
4992 	}
4993 
4994 	cfi_hash_add(&init_cfi);
4995 	cfi_hash_add(&func_cfi);
4996 
4997 	ret = checksum_debug_init(file);
4998 	if (ret)
4999 		goto out;
5000 
5001 	ret = decode_sections(file);
5002 	if (ret)
5003 		goto out;
5004 
5005 	if (!nr_insns)
5006 		goto out;
5007 
5008 	if (opts.retpoline)
5009 		warnings += validate_retpoline(file);
5010 
5011 	if (validate_branch_enabled()) {
5012 		int w = 0;
5013 
5014 		w += validate_functions(file);
5015 		w += validate_unwind_hints(file, NULL);
5016 		if (!w)
5017 			w += validate_reachable_instructions(file);
5018 
5019 		warnings += w;
5020 
5021 	} else if (opts.noinstr) {
5022 		warnings += validate_noinstr_sections(file);
5023 	}
5024 
5025 	if (opts.unret) {
5026 		/*
5027 		 * Must be after validate_branch() and friends, it plays
5028 		 * further games with insn->visited.
5029 		 */
5030 		warnings += validate_unrets(file);
5031 	}
5032 
5033 	if (opts.ibt)
5034 		warnings += validate_ibt(file);
5035 
5036 	if (opts.sls)
5037 		warnings += validate_sls(file);
5038 
5039 	if (opts.static_call) {
5040 		ret = create_static_call_sections(file);
5041 		if (ret)
5042 			goto out;
5043 	}
5044 
5045 	if (opts.retpoline) {
5046 		ret = create_retpoline_sites_sections(file);
5047 		if (ret)
5048 			goto out;
5049 	}
5050 
5051 	if (opts.cfi) {
5052 		ret = create_cfi_sections(file);
5053 		if (ret)
5054 			goto out;
5055 	}
5056 
5057 	if (opts.rethunk) {
5058 		ret = create_return_sites_sections(file);
5059 		if (ret)
5060 			goto out;
5061 
5062 		if (opts.hack_skylake) {
5063 			ret = create_direct_call_sections(file);
5064 			if (ret)
5065 				goto out;
5066 		}
5067 	}
5068 
5069 	if (opts.mcount) {
5070 		ret = create_mcount_loc_sections(file);
5071 		if (ret)
5072 			goto out;
5073 	}
5074 
5075 	if (opts.prefix) {
5076 		ret = create_prefix_symbols(file);
5077 		if (ret)
5078 			goto out;
5079 	}
5080 
5081 	if (opts.ibt) {
5082 		ret = create_ibt_endbr_seal_sections(file);
5083 		if (ret)
5084 			goto out;
5085 	}
5086 
5087 	if (opts.noabs)
5088 		warnings += check_abs_references(file);
5089 
5090 	if (opts.checksum) {
5091 		ret = create_sym_checksum_section(file);
5092 		if (ret)
5093 			goto out;
5094 	}
5095 
5096 	if (opts.orc && nr_insns) {
5097 		ret = orc_create(file);
5098 		if (ret)
5099 			goto out;
5100 	}
5101 
5102 	if (opts.stats) {
5103 		printf("nr_insns_visited: %ld\n", nr_insns_visited);
5104 		printf("nr_cfi: %ld\n", nr_cfi);
5105 		printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
5106 		printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
5107 	}
5108 
5109 out:
5110 	if (ret || warnings) {
5111 		if (opts.werror && warnings)
5112 			ret = 1;
5113 
5114 		if (opts.verbose) {
5115 			if (opts.werror && warnings)
5116 				WARN("%d warning(s) upgraded to errors", warnings);
5117 			disas_warned_funcs(disas_ctx);
5118 		}
5119 	}
5120 
5121 	if (opts.disas)
5122 		disas_funcs(disas_ctx);
5123 
5124 	if (disas_ctx) {
5125 		disas_context_destroy(disas_ctx);
5126 		objtool_disas_ctx = NULL;
5127 	}
5128 
5129 	free_insns(file);
5130 
5131 	if (!ret && !warnings)
5132 		return 0;
5133 
5134 	if (opts.backup && make_backup())
5135 		return 1;
5136 
5137 	return ret;
5138 }
5139