xref: /linux/tools/objtool/check.c (revision 97b281d7edb2ae662365be2809cd728470119720)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4  */
5 
6 #define _GNU_SOURCE /* memmem() */
7 #include <fnmatch.h>
8 #include <string.h>
9 #include <stdlib.h>
10 #include <inttypes.h>
11 #include <sys/mman.h>
12 
13 #include <objtool/builtin.h>
14 #include <objtool/cfi.h>
15 #include <objtool/arch.h>
16 #include <objtool/disas.h>
17 #include <objtool/check.h>
18 #include <objtool/special.h>
19 #include <objtool/trace.h>
20 #include <objtool/warn.h>
21 #include <objtool/checksum.h>
22 #include <objtool/util.h>
23 
24 #include <linux/objtool_types.h>
25 #include <linux/hashtable.h>
26 #include <linux/kernel.h>
27 #include <linux/static_call_types.h>
28 #include <linux/string.h>
29 
30 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
31 
32 static struct cfi_init_state initial_func_cfi;
33 static struct cfi_state init_cfi;
34 static struct cfi_state func_cfi;
35 static struct cfi_state force_undefined_cfi;
36 
37 struct disas_context *objtool_disas_ctx;
38 
39 size_t sym_name_max_len;
40 
41 struct instruction *find_insn(struct objtool_file *file,
42 			      struct section *sec, unsigned long offset)
43 {
44 	struct instruction *insn;
45 
46 	hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
47 		if (insn->sec == sec && insn->offset == offset)
48 			return insn;
49 	}
50 
51 	return NULL;
52 }
53 
54 struct instruction *next_insn_same_sec(struct objtool_file *file,
55 				       struct instruction *insn)
56 {
57 	if (insn->idx == INSN_CHUNK_MAX)
58 		return find_insn(file, insn->sec, insn->offset + insn->len);
59 
60 	insn++;
61 	if (!insn->len)
62 		return NULL;
63 
64 	return insn;
65 }
66 
67 static struct instruction *next_insn_same_func(struct objtool_file *file,
68 					       struct instruction *insn)
69 {
70 	struct instruction *next = next_insn_same_sec(file, insn);
71 	struct symbol *func = insn_func(insn);
72 
73 	if (!func)
74 		return NULL;
75 
76 	if (next && insn_func(next) == func)
77 		return next;
78 
79 	/* Check if we're already in the subfunction: */
80 	if (func == func->cfunc)
81 		return NULL;
82 
83 	/* Move to the subfunction: */
84 	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
85 }
86 
87 static struct instruction *prev_insn_same_sec(struct objtool_file *file,
88 					      struct instruction *insn)
89 {
90 	if (insn->idx == 0) {
91 		if (insn->prev_len)
92 			return find_insn(file, insn->sec, insn->offset - insn->prev_len);
93 		return NULL;
94 	}
95 
96 	return insn - 1;
97 }
98 
99 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
100 					      struct instruction *insn)
101 {
102 	struct instruction *prev = prev_insn_same_sec(file, insn);
103 
104 	if (prev && insn_func(prev) == insn_func(insn))
105 		return prev;
106 
107 	return NULL;
108 }
109 
110 #define for_each_insn(file, insn)					\
111 	for (struct section *__sec, *__fake = (struct section *)1;	\
112 	     __fake; __fake = NULL)					\
113 		for_each_sec(file->elf, __sec)				\
114 			sec_for_each_insn(file, __sec, insn)
115 
116 #define func_for_each_insn(file, func, insn)				\
117 	for (insn = find_insn(file, func->sec, func->offset);		\
118 	     insn;							\
119 	     insn = next_insn_same_func(file, insn))
120 
121 #define sym_for_each_insn(file, sym, insn)				\
122 	for (insn = find_insn(file, sym->sec, sym->offset);		\
123 	     insn && insn->offset < sym->offset + sym->len;		\
124 	     insn = next_insn_same_sec(file, insn))
125 
126 #define sym_for_each_insn_continue_reverse(file, sym, insn)		\
127 	for (insn = prev_insn_same_sec(file, insn);			\
128 	     insn && insn->offset >= sym->offset;			\
129 	     insn = prev_insn_same_sec(file, insn))
130 
131 #define sec_for_each_insn_from(file, insn)				\
132 	for (; insn; insn = next_insn_same_sec(file, insn))
133 
134 #define sec_for_each_insn_continue(file, insn)				\
135 	for (insn = next_insn_same_sec(file, insn); insn;		\
136 	     insn = next_insn_same_sec(file, insn))
137 
138 static inline struct reloc *insn_jump_table(struct instruction *insn)
139 {
140 	if (insn->type == INSN_JUMP_DYNAMIC ||
141 	    insn->type == INSN_CALL_DYNAMIC)
142 		return insn->_jump_table;
143 
144 	return NULL;
145 }
146 
147 static inline unsigned long insn_jump_table_size(struct instruction *insn)
148 {
149 	if (insn->type == INSN_JUMP_DYNAMIC ||
150 	    insn->type == INSN_CALL_DYNAMIC)
151 		return insn->_jump_table_size;
152 
153 	return 0;
154 }
155 
156 static bool is_jump_table_jump(struct instruction *insn)
157 {
158 	struct alt_group *alt_group = insn->alt_group;
159 
160 	if (insn_jump_table(insn))
161 		return true;
162 
163 	/* Retpoline alternative for a jump table? */
164 	return alt_group && alt_group->orig_group &&
165 	       insn_jump_table(alt_group->orig_group->first_insn);
166 }
167 
168 static bool is_sibling_call(struct instruction *insn)
169 {
170 	/*
171 	 * Assume only STT_FUNC calls have jump-tables.
172 	 */
173 	if (insn_func(insn)) {
174 		/* An indirect jump is either a sibling call or a jump to a table. */
175 		if (insn->type == INSN_JUMP_DYNAMIC)
176 			return !is_jump_table_jump(insn);
177 	}
178 
179 	/* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
180 	return (is_static_jump(insn) && insn_call_dest(insn));
181 }
182 
183 /*
184  * Checks if a function is a Rust "noreturn" one.
185  */
186 static bool is_rust_noreturn(const struct symbol *func)
187 {
188 	/*
189 	 * If it does not start with "_R", then it is not a Rust symbol.
190 	 */
191 	if (strncmp(func->name, "_R", 2))
192 		return false;
193 
194 	/*
195 	 * These are just heuristics -- we do not control the precise symbol
196 	 * name, due to the crate disambiguators (which depend on the compiler)
197 	 * as well as changes to the source code itself between versions (since
198 	 * these come from the Rust standard library).
199 	 */
200 	return str_ends_with(func->name, "_4core3num20from_str_radix_panic")				||
201 	       str_ends_with(func->name, "_4core3num22from_ascii_radix_panic")				||
202 	       str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail")		||
203 	       str_ends_with(func->name, "_4core6option13expect_failed")				||
204 	       str_ends_with(func->name, "_4core6option13unwrap_failed")				||
205 	       str_ends_with(func->name, "_4core6result13unwrap_failed")				||
206 	       str_ends_with(func->name, "_4core9panicking5panic")					||
207 	       str_ends_with(func->name, "_4core9panicking9panic_fmt")					||
208 	       str_ends_with(func->name, "_4core9panicking14panic_explicit")				||
209 	       str_ends_with(func->name, "_4core9panicking14panic_nounwind")				||
210 	       str_ends_with(func->name, "_4core9panicking18panic_bounds_check")			||
211 	       str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt")			||
212 	       str_ends_with(func->name, "_4core9panicking19assert_failed_inner")			||
213 	       str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference")		||
214 	       str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference")	||
215 	       str_ends_with(func->name, "_7___rustc17rust_begin_unwind")				||
216 	       strstr(func->name, "_4core9panicking13assert_failed")					||
217 	       strstr(func->name, "_4core9panicking11panic_const24panic_const_")			||
218 	       (strstr(func->name, "_4core5slice5index") &&
219 		strstr(func->name, "slice_") &&
220 		str_ends_with(func->name, "_fail"));
221 }
222 
223 /*
224  * This checks to see if the given function is a "noreturn" function.
225  *
226  * For global functions which are outside the scope of this object file, we
227  * have to keep a manual list of them.
228  *
229  * For local functions, we have to detect them manually by simply looking for
230  * the lack of a return instruction.
231  */
232 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
233 				int recursion)
234 {
235 	int i;
236 	struct instruction *insn;
237 	bool empty = true;
238 
239 #define NORETURN(func) __stringify(func),
240 	static const char * const global_noreturns[] = {
241 #include "noreturns.h"
242 	};
243 #undef NORETURN
244 
245 	if (!func)
246 		return false;
247 
248 	if (!is_local_sym(func)) {
249 		if (is_rust_noreturn(func))
250 			return true;
251 
252 		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
253 			if (!strcmp(func->name, global_noreturns[i]))
254 				return true;
255 	}
256 
257 	if (is_weak_sym(func))
258 		return false;
259 
260 	if (!func->len)
261 		return false;
262 
263 	insn = find_insn(file, func->sec, func->offset);
264 	if (!insn || !insn_func(insn))
265 		return false;
266 
267 	func_for_each_insn(file, func, insn) {
268 		empty = false;
269 
270 		if (insn->type == INSN_RETURN)
271 			return false;
272 	}
273 
274 	if (empty)
275 		return false;
276 
277 	/*
278 	 * A function can have a sibling call instead of a return.  In that
279 	 * case, the function's dead-end status depends on whether the target
280 	 * of the sibling call returns.
281 	 */
282 	func_for_each_insn(file, func, insn) {
283 		if (is_sibling_call(insn)) {
284 			struct instruction *dest = insn->jump_dest;
285 
286 			if (!dest)
287 				/* sibling call to another file */
288 				return false;
289 
290 			/* local sibling call */
291 			if (recursion == 5) {
292 				/*
293 				 * Infinite recursion: two functions have
294 				 * sibling calls to each other.  This is a very
295 				 * rare case.  It means they aren't dead ends.
296 				 */
297 				return false;
298 			}
299 
300 			return __dead_end_function(file, insn_func(dest), recursion+1);
301 		}
302 	}
303 
304 	return true;
305 }
306 
307 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
308 {
309 	return __dead_end_function(file, func, 0);
310 }
311 
312 static void init_cfi_state(struct cfi_state *cfi)
313 {
314 	int i;
315 
316 	for (i = 0; i < CFI_NUM_REGS; i++) {
317 		cfi->regs[i].base = CFI_UNDEFINED;
318 		cfi->vals[i].base = CFI_UNDEFINED;
319 	}
320 	cfi->cfa.base = CFI_UNDEFINED;
321 	cfi->drap_reg = CFI_UNDEFINED;
322 	cfi->drap_offset = -1;
323 }
324 
325 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
326 			    struct section *sec)
327 {
328 	memset(state, 0, sizeof(*state));
329 	init_cfi_state(&state->cfi);
330 
331 	if (opts.noinstr && sec)
332 		state->noinstr = sec->noinstr;
333 }
334 
335 static struct cfi_state *cfi_alloc(void)
336 {
337 	struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
338 	if (!cfi) {
339 		ERROR_GLIBC("calloc");
340 		exit(1);
341 	}
342 	nr_cfi++;
343 	return cfi;
344 }
345 
346 static int cfi_bits;
347 static struct hlist_head *cfi_hash;
348 
349 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
350 {
351 	return memcmp((void *)cfi1 + sizeof(cfi1->hash),
352 		      (void *)cfi2 + sizeof(cfi2->hash),
353 		      sizeof(struct cfi_state) - sizeof(struct hlist_node));
354 }
355 
356 static inline u32 cfi_key(struct cfi_state *cfi)
357 {
358 	return jhash((void *)cfi + sizeof(cfi->hash),
359 		     sizeof(*cfi) - sizeof(cfi->hash), 0);
360 }
361 
362 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
363 {
364 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
365 	struct cfi_state *obj;
366 
367 	hlist_for_each_entry(obj, head, hash) {
368 		if (!cficmp(cfi, obj)) {
369 			nr_cfi_cache++;
370 			return obj;
371 		}
372 	}
373 
374 	obj = cfi_alloc();
375 	*obj = *cfi;
376 	hlist_add_head(&obj->hash, head);
377 
378 	return obj;
379 }
380 
381 static void cfi_hash_add(struct cfi_state *cfi)
382 {
383 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
384 
385 	hlist_add_head(&cfi->hash, head);
386 }
387 
388 static void *cfi_hash_alloc(unsigned long size)
389 {
390 	cfi_bits = max(10, ilog2(size));
391 	cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
392 			PROT_READ|PROT_WRITE,
393 			MAP_PRIVATE|MAP_ANON, -1, 0);
394 	if (cfi_hash == (void *)-1L) {
395 		ERROR_GLIBC("mmap fail cfi_hash");
396 		cfi_hash = NULL;
397 	}  else if (opts.stats) {
398 		printf("cfi_bits: %d\n", cfi_bits);
399 	}
400 
401 	return cfi_hash;
402 }
403 
404 static unsigned long nr_insns;
405 static unsigned long nr_insns_visited;
406 
407 /*
408  * Call the arch-specific instruction decoder for all the instructions and add
409  * them to the global instruction list.
410  */
411 static int decode_instructions(struct objtool_file *file)
412 {
413 	struct section *sec;
414 	struct symbol *func;
415 	unsigned long offset;
416 	struct instruction *insn;
417 
418 	for_each_sec(file->elf, sec) {
419 		struct instruction *insns = NULL;
420 		u8 prev_len = 0;
421 		u8 idx = 0;
422 
423 		if (!is_text_sec(sec))
424 			continue;
425 
426 		if (strcmp(sec->name, ".altinstr_replacement") &&
427 		    strcmp(sec->name, ".altinstr_aux") &&
428 		    strncmp(sec->name, ".discard.", 9))
429 			sec->text = true;
430 
431 		if (!strcmp(sec->name, ".noinstr.text") ||
432 		    !strcmp(sec->name, ".entry.text") ||
433 		    !strcmp(sec->name, ".cpuidle.text") ||
434 		    !strncmp(sec->name, ".text..__x86.", 13))
435 			sec->noinstr = true;
436 
437 		/*
438 		 * .init.text code is ran before userspace and thus doesn't
439 		 * strictly need retpolines, except for modules which are
440 		 * loaded late, they very much do need retpoline in their
441 		 * .init.text
442 		 */
443 		if (!strcmp(sec->name, ".init.text") && !opts.module)
444 			sec->init = true;
445 
446 		for (offset = 0; offset < sec_size(sec); offset += insn->len) {
447 			if (!insns || idx == INSN_CHUNK_MAX) {
448 				insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn));
449 				if (!insns) {
450 					ERROR_GLIBC("calloc");
451 					return -1;
452 				}
453 				idx = 0;
454 			} else {
455 				idx++;
456 			}
457 			insn = &insns[idx];
458 			insn->idx = idx;
459 
460 			INIT_LIST_HEAD(&insn->call_node);
461 			insn->sec = sec;
462 			insn->offset = offset;
463 			insn->prev_len = prev_len;
464 
465 			if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn))
466 				return -1;
467 
468 			prev_len = insn->len;
469 
470 			/*
471 			 * By default, "ud2" is a dead end unless otherwise
472 			 * annotated, because GCC 7 inserts it for certain
473 			 * divide-by-zero cases.
474 			 */
475 			if (insn->type == INSN_BUG)
476 				insn->dead_end = true;
477 
478 			hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
479 			nr_insns++;
480 		}
481 
482 		sec_for_each_sym(sec, func) {
483 			if (!is_notype_sym(func) && !is_func_sym(func))
484 				continue;
485 
486 			if (func->offset == sec_size(sec)) {
487 				/* Heuristic: likely an "end" symbol */
488 				if (is_notype_sym(func))
489 					continue;
490 				ERROR("%s(): STT_FUNC at end of section", func->name);
491 				return -1;
492 			}
493 
494 			if (func->embedded_insn || func->alias != func)
495 				continue;
496 
497 			if (!find_insn(file, sec, func->offset)) {
498 				ERROR("%s(): can't find starting instruction", func->name);
499 				return -1;
500 			}
501 
502 			sym_for_each_insn(file, func, insn) {
503 				insn->sym = func;
504 				if (is_func_sym(func) &&
505 				    insn->type == INSN_ENDBR &&
506 				    list_empty(&insn->call_node)) {
507 					if (insn->offset == func->offset) {
508 						list_add_tail(&insn->call_node, &file->endbr_list);
509 						file->nr_endbr++;
510 					} else {
511 						file->nr_endbr_int++;
512 					}
513 				}
514 			}
515 		}
516 	}
517 
518 	if (opts.stats)
519 		printf("nr_insns: %lu\n", nr_insns);
520 
521 	return 0;
522 }
523 
524 /*
525  * Read the pv_ops[] .data table to find the static initialized values.
526  */
527 static int add_pv_ops(struct objtool_file *file, const char *symname)
528 {
529 	struct symbol *sym, *func;
530 	unsigned long off, end;
531 	struct reloc *reloc;
532 	int idx;
533 
534 	sym = find_symbol_by_name(file->elf, symname);
535 	if (!sym)
536 		return 0;
537 
538 	off = sym->offset;
539 	end = off + sym->len;
540 	for (;;) {
541 		reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
542 		if (!reloc)
543 			break;
544 
545 		idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
546 
547 		func = reloc->sym;
548 		if (is_sec_sym(func))
549 			func = find_symbol_by_offset(reloc->sym->sec,
550 						     reloc_addend(reloc));
551 		if (!func) {
552 			ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc),
553 				   "can't find func at %s[%d]", symname, idx);
554 			return -1;
555 		}
556 
557 		if (objtool_pv_add(file, idx, func))
558 			return -1;
559 
560 		off = reloc_offset(reloc) + 1;
561 		if (off > end)
562 			break;
563 	}
564 
565 	return 0;
566 }
567 
568 /*
569  * Allocate and initialize file->pv_ops[].
570  */
571 static int init_pv_ops(struct objtool_file *file)
572 {
573 	static const char *pv_ops_tables[] = {
574 		"pv_ops",
575 		"xen_cpu_ops",
576 		"xen_irq_ops",
577 		"xen_mmu_ops",
578 		NULL,
579 	};
580 	const char *pv_ops;
581 	struct symbol *sym;
582 	int idx, nr;
583 
584 	if (!opts.noinstr)
585 		return 0;
586 
587 	file->pv_ops = NULL;
588 
589 	sym = find_symbol_by_name(file->elf, "pv_ops");
590 	if (!sym)
591 		return 0;
592 
593 	nr = sym->len / sizeof(unsigned long);
594 	file->pv_ops = calloc(nr, sizeof(struct pv_state));
595 	if (!file->pv_ops) {
596 		ERROR_GLIBC("calloc");
597 		return -1;
598 	}
599 
600 	for (idx = 0; idx < nr; idx++)
601 		INIT_LIST_HEAD(&file->pv_ops[idx].targets);
602 
603 	for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) {
604 		if (add_pv_ops(file, pv_ops))
605 			return -1;
606 	}
607 
608 	return 0;
609 }
610 
611 static bool is_livepatch_module(struct objtool_file *file)
612 {
613 	struct section *sec;
614 
615 	if (!opts.module)
616 		return false;
617 
618 	sec = find_section_by_name(file->elf, ".modinfo");
619 	if (!sec)
620 		return false;
621 
622 	return memmem(sec->data->d_buf, sec_size(sec), "\0livepatch=Y", 12);
623 }
624 
625 static int create_static_call_sections(struct objtool_file *file)
626 {
627 	struct static_call_site *site;
628 	struct section *sec;
629 	struct instruction *insn;
630 	struct symbol *key_sym;
631 	char *key_name, *tmp;
632 	int idx;
633 
634 	sec = find_section_by_name(file->elf, ".static_call_sites");
635 	if (sec) {
636 		/*
637 		 * Livepatch modules may have already extracted the static call
638 		 * site entries to take advantage of vmlinux static call
639 		 * privileges.
640 		 */
641 		if (!file->klp)
642 			WARN("file already has .static_call_sites section, skipping");
643 
644 		return 0;
645 	}
646 
647 	if (list_empty(&file->static_call_list))
648 		return 0;
649 
650 	idx = 0;
651 	list_for_each_entry(insn, &file->static_call_list, call_node)
652 		idx++;
653 
654 	sec = elf_create_section_pair(file->elf, ".static_call_sites",
655 				      sizeof(*site), idx, idx * 2);
656 	if (!sec)
657 		return -1;
658 
659 	/* Allow modules to modify the low bits of static_call_site::key */
660 	sec->sh.sh_flags |= SHF_WRITE;
661 
662 	idx = 0;
663 	list_for_each_entry(insn, &file->static_call_list, call_node) {
664 
665 		/* populate reloc for 'addr' */
666 		if (!elf_init_reloc_text_sym(file->elf, sec,
667 					     idx * sizeof(*site), idx * 2,
668 					     insn->sec, insn->offset))
669 			return -1;
670 
671 		/* find key symbol */
672 		key_name = strdup(insn_call_dest(insn)->name);
673 		if (!key_name) {
674 			ERROR_GLIBC("strdup");
675 			return -1;
676 		}
677 		if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
678 			    STATIC_CALL_TRAMP_PREFIX_LEN)) {
679 			ERROR("static_call: trampoline name malformed: %s", key_name);
680 			return -1;
681 		}
682 		tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
683 		memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
684 
685 		key_sym = find_symbol_by_name(file->elf, tmp);
686 		if (!key_sym) {
687 			if (!opts.module) {
688 				ERROR("static_call: can't find static_call_key symbol: %s", tmp);
689 				return -1;
690 			}
691 
692 			/*
693 			 * For modules(), the key might not be exported, which
694 			 * means the module can make static calls but isn't
695 			 * allowed to change them.
696 			 *
697 			 * In that case we temporarily set the key to be the
698 			 * trampoline address.  This is fixed up in
699 			 * static_call_add_module().
700 			 */
701 			key_sym = insn_call_dest(insn);
702 		}
703 
704 		/* populate reloc for 'key' */
705 		if (!elf_init_reloc_data_sym(file->elf, sec,
706 					     idx * sizeof(*site) + 4,
707 					     (idx * 2) + 1, key_sym,
708 					     is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
709 			return -1;
710 
711 		idx++;
712 	}
713 
714 	return 0;
715 }
716 
717 static int create_retpoline_sites_sections(struct objtool_file *file)
718 {
719 	struct instruction *insn;
720 	struct section *sec;
721 	int idx;
722 
723 	sec = find_section_by_name(file->elf, ".retpoline_sites");
724 	if (sec) {
725 		WARN("file already has .retpoline_sites, skipping");
726 		return 0;
727 	}
728 
729 	idx = 0;
730 	list_for_each_entry(insn, &file->retpoline_call_list, call_node)
731 		idx++;
732 
733 	if (!idx)
734 		return 0;
735 
736 	sec = elf_create_section_pair(file->elf, ".retpoline_sites",
737 				      sizeof(int), idx, idx);
738 	if (!sec)
739 		return -1;
740 
741 	idx = 0;
742 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
743 
744 		if (!elf_init_reloc_text_sym(file->elf, sec,
745 					     idx * sizeof(int), idx,
746 					     insn->sec, insn->offset))
747 			return -1;
748 
749 		idx++;
750 	}
751 
752 	return 0;
753 }
754 
755 static int create_return_sites_sections(struct objtool_file *file)
756 {
757 	struct instruction *insn;
758 	struct section *sec;
759 	int idx;
760 
761 	sec = find_section_by_name(file->elf, ".return_sites");
762 	if (sec) {
763 		WARN("file already has .return_sites, skipping");
764 		return 0;
765 	}
766 
767 	idx = 0;
768 	list_for_each_entry(insn, &file->return_thunk_list, call_node)
769 		idx++;
770 
771 	if (!idx)
772 		return 0;
773 
774 	sec = elf_create_section_pair(file->elf, ".return_sites",
775 				      sizeof(int), idx, idx);
776 	if (!sec)
777 		return -1;
778 
779 	idx = 0;
780 	list_for_each_entry(insn, &file->return_thunk_list, call_node) {
781 
782 		if (!elf_init_reloc_text_sym(file->elf, sec,
783 					     idx * sizeof(int), idx,
784 					     insn->sec, insn->offset))
785 			return -1;
786 
787 		idx++;
788 	}
789 
790 	return 0;
791 }
792 
793 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
794 {
795 	struct instruction *insn;
796 	struct section *sec;
797 	int idx;
798 
799 	sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
800 	if (sec) {
801 		WARN("file already has .ibt_endbr_seal, skipping");
802 		return 0;
803 	}
804 
805 	idx = 0;
806 	list_for_each_entry(insn, &file->endbr_list, call_node)
807 		idx++;
808 
809 	if (opts.stats) {
810 		printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
811 		printf("ibt: ENDBR inside functions:  %d\n", file->nr_endbr_int);
812 		printf("ibt: superfluous ENDBR:       %d\n", idx);
813 	}
814 
815 	if (!idx)
816 		return 0;
817 
818 	sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal",
819 				      sizeof(int), idx, idx);
820 	if (!sec)
821 		return -1;
822 
823 	idx = 0;
824 	list_for_each_entry(insn, &file->endbr_list, call_node) {
825 
826 		int *site = (int *)sec->data->d_buf + idx;
827 		struct symbol *sym = insn->sym;
828 		*site = 0;
829 
830 		if (opts.module && sym && is_func_sym(sym) &&
831 		    insn->offset == sym->offset &&
832 		    (!strcmp(sym->name, "init_module") ||
833 		     !strcmp(sym->name, "cleanup_module"))) {
834 			ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
835 			      sym->name);
836 			return -1;
837 		}
838 
839 		if (!elf_init_reloc_text_sym(file->elf, sec,
840 					     idx * sizeof(int), idx,
841 					     insn->sec, insn->offset))
842 			return -1;
843 
844 		idx++;
845 	}
846 
847 	return 0;
848 }
849 
850 static int create_cfi_sections(struct objtool_file *file)
851 {
852 	struct section *sec;
853 	struct symbol *sym;
854 	int idx;
855 
856 	sec = find_section_by_name(file->elf, ".cfi_sites");
857 	if (sec) {
858 		WARN("file already has .cfi_sites section, skipping");
859 		return 0;
860 	}
861 
862 	idx = 0;
863 	for_each_sym(file->elf, sym) {
864 		if (!is_func_sym(sym))
865 			continue;
866 
867 		if (strncmp(sym->name, "__cfi_", 6))
868 			continue;
869 
870 		idx++;
871 	}
872 
873 	sec = elf_create_section_pair(file->elf, ".cfi_sites",
874 				      sizeof(unsigned int), idx, idx);
875 	if (!sec)
876 		return -1;
877 
878 	idx = 0;
879 	for_each_sym(file->elf, sym) {
880 		if (!is_func_sym(sym))
881 			continue;
882 
883 		if (strncmp(sym->name, "__cfi_", 6))
884 			continue;
885 
886 		if (!elf_init_reloc_text_sym(file->elf, sec,
887 					     idx * sizeof(unsigned int), idx,
888 					     sym->sec, sym->offset))
889 			return -1;
890 
891 		idx++;
892 	}
893 
894 	return 0;
895 }
896 
897 static int create_mcount_loc_sections(struct objtool_file *file)
898 {
899 	size_t addr_size = elf_addr_size(file->elf);
900 	struct instruction *insn;
901 	struct section *sec;
902 	int idx;
903 
904 	sec = find_section_by_name(file->elf, "__mcount_loc");
905 	if (sec) {
906 		/*
907 		 * Livepatch modules have already extracted their __mcount_loc
908 		 * entries to cover the !CONFIG_FTRACE_MCOUNT_USE_OBJTOOL case.
909 		 */
910 		if (!file->klp)
911 			WARN("file already has __mcount_loc section, skipping");
912 
913 		return 0;
914 	}
915 
916 	if (list_empty(&file->mcount_loc_list))
917 		return 0;
918 
919 	idx = 0;
920 	list_for_each_entry(insn, &file->mcount_loc_list, call_node)
921 		idx++;
922 
923 	sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size,
924 				      idx, idx);
925 	if (!sec)
926 		return -1;
927 
928 	sec->sh.sh_addralign = addr_size;
929 
930 	idx = 0;
931 	list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
932 
933 		struct reloc *reloc;
934 
935 		reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx,
936 					       insn->sec, insn->offset);
937 		if (!reloc)
938 			return -1;
939 
940 		set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32);
941 
942 		idx++;
943 	}
944 
945 	return 0;
946 }
947 
948 static int create_direct_call_sections(struct objtool_file *file)
949 {
950 	struct instruction *insn;
951 	struct section *sec;
952 	int idx;
953 
954 	sec = find_section_by_name(file->elf, ".call_sites");
955 	if (sec) {
956 		WARN("file already has .call_sites section, skipping");
957 		return 0;
958 	}
959 
960 	if (list_empty(&file->call_list))
961 		return 0;
962 
963 	idx = 0;
964 	list_for_each_entry(insn, &file->call_list, call_node)
965 		idx++;
966 
967 	sec = elf_create_section_pair(file->elf, ".call_sites",
968 				      sizeof(unsigned int), idx, idx);
969 	if (!sec)
970 		return -1;
971 
972 	idx = 0;
973 	list_for_each_entry(insn, &file->call_list, call_node) {
974 
975 		if (!elf_init_reloc_text_sym(file->elf, sec,
976 					     idx * sizeof(unsigned int), idx,
977 					     insn->sec, insn->offset))
978 			return -1;
979 
980 		idx++;
981 	}
982 
983 	return 0;
984 }
985 
986 #ifdef BUILD_KLP
987 static int create_sym_checksum_section(struct objtool_file *file)
988 {
989 	struct section *sec;
990 	struct symbol *sym;
991 	unsigned int idx = 0;
992 	struct sym_checksum *checksum;
993 	size_t entsize = sizeof(struct sym_checksum);
994 
995 	sec = find_section_by_name(file->elf, ".discard.sym_checksum");
996 	if (sec) {
997 		if (!opts.dryrun)
998 			WARN("file already has .discard.sym_checksum section, skipping");
999 
1000 		return 0;
1001 	}
1002 
1003 	for_each_sym(file->elf, sym)
1004 		if (sym->csum.checksum)
1005 			idx++;
1006 
1007 	if (!idx)
1008 		return 0;
1009 
1010 	sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize,
1011 				      idx, idx);
1012 	if (!sec)
1013 		return -1;
1014 
1015 	idx = 0;
1016 	for_each_sym(file->elf, sym) {
1017 		if (!sym->csum.checksum)
1018 			continue;
1019 
1020 		if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize,
1021 				    sym, 0, R_TEXT64))
1022 			return -1;
1023 
1024 		checksum = (struct sym_checksum *)sec->data->d_buf + idx;
1025 		checksum->addr = 0; /* reloc */
1026 		checksum->checksum = sym->csum.checksum;
1027 
1028 		mark_sec_changed(file->elf, sec, true);
1029 
1030 		idx++;
1031 	}
1032 
1033 	return 0;
1034 }
1035 #else
1036 static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; }
1037 #endif
1038 
1039 /*
1040  * Warnings shouldn't be reported for ignored functions.
1041  */
1042 static int add_ignores(struct objtool_file *file)
1043 {
1044 	struct section *rsec;
1045 	struct symbol *func;
1046 	struct reloc *reloc;
1047 
1048 	rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1049 	if (!rsec)
1050 		return 0;
1051 
1052 	for_each_reloc(rsec, reloc) {
1053 		switch (reloc->sym->type) {
1054 		case STT_FUNC:
1055 			func = reloc->sym;
1056 			break;
1057 
1058 		case STT_SECTION:
1059 			func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc));
1060 			if (!func)
1061 				continue;
1062 			break;
1063 
1064 		default:
1065 			ERROR("unexpected relocation symbol type in %s: %d",
1066 			      rsec->name, reloc->sym->type);
1067 			return -1;
1068 		}
1069 
1070 		func->ignore = true;
1071 		if (func->cfunc)
1072 			func->cfunc->ignore = true;
1073 	}
1074 
1075 	return 0;
1076 }
1077 
1078 /*
1079  * This is a whitelist of functions that is allowed to be called with AC set.
1080  * The list is meant to be minimal and only contains compiler instrumentation
1081  * ABI and a few functions used to implement *_{to,from}_user() functions.
1082  *
1083  * These functions must not directly change AC, but may PUSHF/POPF.
1084  */
1085 static const char *uaccess_safe_builtin[] = {
1086 	/* KASAN */
1087 	"kasan_report",
1088 	"kasan_check_range",
1089 	/* KASAN out-of-line */
1090 	"__asan_loadN_noabort",
1091 	"__asan_load1_noabort",
1092 	"__asan_load2_noabort",
1093 	"__asan_load4_noabort",
1094 	"__asan_load8_noabort",
1095 	"__asan_load16_noabort",
1096 	"__asan_storeN_noabort",
1097 	"__asan_store1_noabort",
1098 	"__asan_store2_noabort",
1099 	"__asan_store4_noabort",
1100 	"__asan_store8_noabort",
1101 	"__asan_store16_noabort",
1102 	"__kasan_check_read",
1103 	"__kasan_check_write",
1104 	/* KASAN in-line */
1105 	"__asan_report_load_n_noabort",
1106 	"__asan_report_load1_noabort",
1107 	"__asan_report_load2_noabort",
1108 	"__asan_report_load4_noabort",
1109 	"__asan_report_load8_noabort",
1110 	"__asan_report_load16_noabort",
1111 	"__asan_report_store_n_noabort",
1112 	"__asan_report_store1_noabort",
1113 	"__asan_report_store2_noabort",
1114 	"__asan_report_store4_noabort",
1115 	"__asan_report_store8_noabort",
1116 	"__asan_report_store16_noabort",
1117 	/* KCSAN */
1118 	"__kcsan_check_access",
1119 	"__kcsan_mb",
1120 	"__kcsan_wmb",
1121 	"__kcsan_rmb",
1122 	"__kcsan_release",
1123 	"kcsan_found_watchpoint",
1124 	"kcsan_setup_watchpoint",
1125 	"kcsan_check_scoped_accesses",
1126 	"kcsan_disable_current",
1127 	"kcsan_enable_current_nowarn",
1128 	/* KCSAN/TSAN */
1129 	"__tsan_func_entry",
1130 	"__tsan_func_exit",
1131 	"__tsan_read_range",
1132 	"__tsan_write_range",
1133 	"__tsan_read1",
1134 	"__tsan_read2",
1135 	"__tsan_read4",
1136 	"__tsan_read8",
1137 	"__tsan_read16",
1138 	"__tsan_write1",
1139 	"__tsan_write2",
1140 	"__tsan_write4",
1141 	"__tsan_write8",
1142 	"__tsan_write16",
1143 	"__tsan_read_write1",
1144 	"__tsan_read_write2",
1145 	"__tsan_read_write4",
1146 	"__tsan_read_write8",
1147 	"__tsan_read_write16",
1148 	"__tsan_volatile_read1",
1149 	"__tsan_volatile_read2",
1150 	"__tsan_volatile_read4",
1151 	"__tsan_volatile_read8",
1152 	"__tsan_volatile_read16",
1153 	"__tsan_volatile_write1",
1154 	"__tsan_volatile_write2",
1155 	"__tsan_volatile_write4",
1156 	"__tsan_volatile_write8",
1157 	"__tsan_volatile_write16",
1158 	"__tsan_atomic8_load",
1159 	"__tsan_atomic16_load",
1160 	"__tsan_atomic32_load",
1161 	"__tsan_atomic64_load",
1162 	"__tsan_atomic8_store",
1163 	"__tsan_atomic16_store",
1164 	"__tsan_atomic32_store",
1165 	"__tsan_atomic64_store",
1166 	"__tsan_atomic8_exchange",
1167 	"__tsan_atomic16_exchange",
1168 	"__tsan_atomic32_exchange",
1169 	"__tsan_atomic64_exchange",
1170 	"__tsan_atomic8_fetch_add",
1171 	"__tsan_atomic16_fetch_add",
1172 	"__tsan_atomic32_fetch_add",
1173 	"__tsan_atomic64_fetch_add",
1174 	"__tsan_atomic8_fetch_sub",
1175 	"__tsan_atomic16_fetch_sub",
1176 	"__tsan_atomic32_fetch_sub",
1177 	"__tsan_atomic64_fetch_sub",
1178 	"__tsan_atomic8_fetch_and",
1179 	"__tsan_atomic16_fetch_and",
1180 	"__tsan_atomic32_fetch_and",
1181 	"__tsan_atomic64_fetch_and",
1182 	"__tsan_atomic8_fetch_or",
1183 	"__tsan_atomic16_fetch_or",
1184 	"__tsan_atomic32_fetch_or",
1185 	"__tsan_atomic64_fetch_or",
1186 	"__tsan_atomic8_fetch_xor",
1187 	"__tsan_atomic16_fetch_xor",
1188 	"__tsan_atomic32_fetch_xor",
1189 	"__tsan_atomic64_fetch_xor",
1190 	"__tsan_atomic8_fetch_nand",
1191 	"__tsan_atomic16_fetch_nand",
1192 	"__tsan_atomic32_fetch_nand",
1193 	"__tsan_atomic64_fetch_nand",
1194 	"__tsan_atomic8_compare_exchange_strong",
1195 	"__tsan_atomic16_compare_exchange_strong",
1196 	"__tsan_atomic32_compare_exchange_strong",
1197 	"__tsan_atomic64_compare_exchange_strong",
1198 	"__tsan_atomic8_compare_exchange_weak",
1199 	"__tsan_atomic16_compare_exchange_weak",
1200 	"__tsan_atomic32_compare_exchange_weak",
1201 	"__tsan_atomic64_compare_exchange_weak",
1202 	"__tsan_atomic8_compare_exchange_val",
1203 	"__tsan_atomic16_compare_exchange_val",
1204 	"__tsan_atomic32_compare_exchange_val",
1205 	"__tsan_atomic64_compare_exchange_val",
1206 	"__tsan_atomic_thread_fence",
1207 	"__tsan_atomic_signal_fence",
1208 	"__tsan_unaligned_read16",
1209 	"__tsan_unaligned_write16",
1210 	/* KCOV */
1211 	"write_comp_data",
1212 	"check_kcov_mode",
1213 	"__sanitizer_cov_trace_pc",
1214 	"__sanitizer_cov_trace_const_cmp1",
1215 	"__sanitizer_cov_trace_const_cmp2",
1216 	"__sanitizer_cov_trace_const_cmp4",
1217 	"__sanitizer_cov_trace_const_cmp8",
1218 	"__sanitizer_cov_trace_cmp1",
1219 	"__sanitizer_cov_trace_cmp2",
1220 	"__sanitizer_cov_trace_cmp4",
1221 	"__sanitizer_cov_trace_cmp8",
1222 	"__sanitizer_cov_trace_switch",
1223 	/* KMSAN */
1224 	"kmsan_copy_to_user",
1225 	"kmsan_disable_current",
1226 	"kmsan_enable_current",
1227 	"kmsan_report",
1228 	"kmsan_unpoison_entry_regs",
1229 	"kmsan_unpoison_memory",
1230 	"__msan_chain_origin",
1231 	"__msan_get_context_state",
1232 	"__msan_instrument_asm_store",
1233 	"__msan_metadata_ptr_for_load_1",
1234 	"__msan_metadata_ptr_for_load_2",
1235 	"__msan_metadata_ptr_for_load_4",
1236 	"__msan_metadata_ptr_for_load_8",
1237 	"__msan_metadata_ptr_for_load_n",
1238 	"__msan_metadata_ptr_for_store_1",
1239 	"__msan_metadata_ptr_for_store_2",
1240 	"__msan_metadata_ptr_for_store_4",
1241 	"__msan_metadata_ptr_for_store_8",
1242 	"__msan_metadata_ptr_for_store_n",
1243 	"__msan_poison_alloca",
1244 	"__msan_warning",
1245 	/* UBSAN */
1246 	"ubsan_type_mismatch_common",
1247 	"__ubsan_handle_type_mismatch",
1248 	"__ubsan_handle_type_mismatch_v1",
1249 	"__ubsan_handle_shift_out_of_bounds",
1250 	"__ubsan_handle_load_invalid_value",
1251 	/* KSTACK_ERASE */
1252 	"__sanitizer_cov_stack_depth",
1253 	/* TRACE_BRANCH_PROFILING */
1254 	"ftrace_likely_update",
1255 	/* STACKPROTECTOR */
1256 	"__stack_chk_fail",
1257 	/* misc */
1258 	"csum_partial_copy_generic",
1259 	"copy_mc_fragile",
1260 	"copy_mc_fragile_handle_tail",
1261 	"copy_mc_enhanced_fast_string",
1262 	"rep_stos_alternative",
1263 	"rep_movs_alternative",
1264 	"__copy_user_nocache",
1265 	NULL
1266 };
1267 
1268 static void add_uaccess_safe(struct objtool_file *file)
1269 {
1270 	struct symbol *func;
1271 	const char **name;
1272 
1273 	if (!opts.uaccess)
1274 		return;
1275 
1276 	for (name = uaccess_safe_builtin; *name; name++) {
1277 		func = find_symbol_by_name(file->elf, *name);
1278 		if (!func)
1279 			continue;
1280 
1281 		func->uaccess_safe = true;
1282 	}
1283 }
1284 
1285 /*
1286  * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
1287  * will be added to the .retpoline_sites section.
1288  */
1289 __weak bool arch_is_retpoline(struct symbol *sym)
1290 {
1291 	return false;
1292 }
1293 
1294 /*
1295  * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
1296  * will be added to the .return_sites section.
1297  */
1298 __weak bool arch_is_rethunk(struct symbol *sym)
1299 {
1300 	return false;
1301 }
1302 
1303 /*
1304  * Symbols that are embedded inside other instructions, because sometimes crazy
1305  * code exists. These are mostly ignored for validation purposes.
1306  */
1307 __weak bool arch_is_embedded_insn(struct symbol *sym)
1308 {
1309 	return false;
1310 }
1311 
1312 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1313 {
1314 	struct reloc *reloc;
1315 
1316 	if (insn->no_reloc)
1317 		return NULL;
1318 
1319 	if (!file)
1320 		return NULL;
1321 
1322 	reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1323 					 insn->offset, insn->len);
1324 	if (!reloc) {
1325 		insn->no_reloc = 1;
1326 		return NULL;
1327 	}
1328 
1329 	return reloc;
1330 }
1331 
1332 static void remove_insn_ops(struct instruction *insn)
1333 {
1334 	struct stack_op *op, *next;
1335 
1336 	for (op = insn->stack_ops; op; op = next) {
1337 		next = op->next;
1338 		free(op);
1339 	}
1340 	insn->stack_ops = NULL;
1341 }
1342 
1343 static int annotate_call_site(struct objtool_file *file,
1344 			       struct instruction *insn, bool sibling)
1345 {
1346 	struct reloc *reloc = insn_reloc(file, insn);
1347 	struct symbol *sym = insn_call_dest(insn);
1348 
1349 	if (!sym)
1350 		sym = reloc->sym;
1351 
1352 	if (sym->static_call_tramp) {
1353 		list_add_tail(&insn->call_node, &file->static_call_list);
1354 		return 0;
1355 	}
1356 
1357 	if (sym->retpoline_thunk) {
1358 		list_add_tail(&insn->call_node, &file->retpoline_call_list);
1359 		return 0;
1360 	}
1361 
1362 	/*
1363 	 * Many compilers cannot disable KCOV or sanitizer calls with a function
1364 	 * attribute so they need a little help, NOP out any such calls from
1365 	 * noinstr text.
1366 	 */
1367 	if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1368 		if (reloc)
1369 			set_reloc_type(file->elf, reloc, R_NONE);
1370 
1371 		if (elf_write_insn(file->elf, insn->sec,
1372 				   insn->offset, insn->len,
1373 				   sibling ? arch_ret_insn(insn->len)
1374 					   : arch_nop_insn(insn->len))) {
1375 			return -1;
1376 		}
1377 
1378 		insn->type = sibling ? INSN_RETURN : INSN_NOP;
1379 
1380 		if (sibling) {
1381 			/*
1382 			 * We've replaced the tail-call JMP insn by two new
1383 			 * insn: RET; INT3, except we only have a single struct
1384 			 * insn here. Mark it retpoline_safe to avoid the SLS
1385 			 * warning, instead of adding another insn.
1386 			 */
1387 			insn->retpoline_safe = true;
1388 		}
1389 
1390 		return 0;
1391 	}
1392 
1393 	if (opts.mcount && sym->fentry) {
1394 		if (sibling)
1395 			WARN_INSN(insn, "tail call to __fentry__ !?!?");
1396 		if (opts.mnop) {
1397 			if (reloc)
1398 				set_reloc_type(file->elf, reloc, R_NONE);
1399 
1400 			if (elf_write_insn(file->elf, insn->sec,
1401 					   insn->offset, insn->len,
1402 					   arch_nop_insn(insn->len))) {
1403 				return -1;
1404 			}
1405 
1406 			insn->type = INSN_NOP;
1407 		}
1408 
1409 		list_add_tail(&insn->call_node, &file->mcount_loc_list);
1410 		return 0;
1411 	}
1412 
1413 	if (insn->type == INSN_CALL && !insn->sec->init &&
1414 	    !insn->_call_dest->embedded_insn)
1415 		list_add_tail(&insn->call_node, &file->call_list);
1416 
1417 	if (!sibling && dead_end_function(file, sym))
1418 		insn->dead_end = true;
1419 
1420 	return 0;
1421 }
1422 
1423 static int add_call_dest(struct objtool_file *file, struct instruction *insn,
1424 			  struct symbol *dest, bool sibling)
1425 {
1426 	insn->_call_dest = dest;
1427 	if (!dest)
1428 		return 0;
1429 
1430 	/*
1431 	 * Whatever stack impact regular CALLs have, should be undone
1432 	 * by the RETURN of the called function.
1433 	 *
1434 	 * Annotated intra-function calls retain the stack_ops but
1435 	 * are converted to JUMP, see read_intra_function_calls().
1436 	 */
1437 	remove_insn_ops(insn);
1438 
1439 	return annotate_call_site(file, insn, sibling);
1440 }
1441 
1442 static int add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1443 {
1444 	/*
1445 	 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1446 	 * so convert them accordingly.
1447 	 */
1448 	switch (insn->type) {
1449 	case INSN_CALL:
1450 		insn->type = INSN_CALL_DYNAMIC;
1451 		break;
1452 	case INSN_JUMP_UNCONDITIONAL:
1453 		insn->type = INSN_JUMP_DYNAMIC;
1454 		break;
1455 	case INSN_JUMP_CONDITIONAL:
1456 		insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1457 		break;
1458 	default:
1459 		return 0;
1460 	}
1461 
1462 	insn->retpoline_safe = true;
1463 
1464 	/*
1465 	 * Whatever stack impact regular CALLs have, should be undone
1466 	 * by the RETURN of the called function.
1467 	 *
1468 	 * Annotated intra-function calls retain the stack_ops but
1469 	 * are converted to JUMP, see read_intra_function_calls().
1470 	 */
1471 	remove_insn_ops(insn);
1472 
1473 	return annotate_call_site(file, insn, false);
1474 }
1475 
1476 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1477 {
1478 	/*
1479 	 * Return thunk tail calls are really just returns in disguise,
1480 	 * so convert them accordingly.
1481 	 */
1482 	insn->type = INSN_RETURN;
1483 	insn->retpoline_safe = true;
1484 
1485 	if (add)
1486 		list_add_tail(&insn->call_node, &file->return_thunk_list);
1487 }
1488 
1489 static bool is_first_func_insn(struct objtool_file *file,
1490 			       struct instruction *insn)
1491 {
1492 	struct symbol *func = insn_func(insn);
1493 
1494 	if (!func)
1495 		return false;
1496 
1497 	if (insn->offset == func->offset)
1498 		return true;
1499 
1500 	/* Allow direct CALL/JMP past ENDBR */
1501 	if (opts.ibt) {
1502 		struct instruction *prev = prev_insn_same_sym(file, insn);
1503 
1504 		if (prev && prev->type == INSN_ENDBR &&
1505 		    insn->offset == func->offset + prev->len)
1506 			return true;
1507 	}
1508 
1509 	return false;
1510 }
1511 
1512 /*
1513  * Find the destination instructions for all jumps.
1514  */
1515 static int add_jump_destinations(struct objtool_file *file)
1516 {
1517 	struct instruction *insn;
1518 	struct reloc *reloc;
1519 
1520 	for_each_insn(file, insn) {
1521 		struct symbol *func = insn_func(insn);
1522 		struct instruction *dest_insn;
1523 		struct section *dest_sec;
1524 		struct symbol *dest_sym;
1525 		unsigned long dest_off;
1526 
1527 		if (!is_static_jump(insn))
1528 			continue;
1529 
1530 		if (insn->jump_dest) {
1531 			/*
1532 			 * handle_group_alt() may have previously set
1533 			 * 'jump_dest' for some alternatives.
1534 			 */
1535 			continue;
1536 		}
1537 
1538 		reloc = insn_reloc(file, insn);
1539 		if (!reloc) {
1540 			dest_sec = insn->sec;
1541 			dest_off = arch_jump_destination(insn);
1542 			dest_sym = dest_sec->sym;
1543 		} else {
1544 			dest_sym = reloc->sym;
1545 			if (is_undef_sym(dest_sym)) {
1546 				if (dest_sym->retpoline_thunk) {
1547 					if (add_retpoline_call(file, insn))
1548 						return -1;
1549 					continue;
1550 				}
1551 
1552 				if (dest_sym->return_thunk) {
1553 					add_return_call(file, insn, true);
1554 					continue;
1555 				}
1556 
1557 				/* External symbol */
1558 				if (func) {
1559 					/* External sibling call */
1560 					if (add_call_dest(file, insn, dest_sym, true))
1561 						return -1;
1562 					continue;
1563 				}
1564 
1565 				/* Non-func asm code jumping to external symbol */
1566 				continue;
1567 			}
1568 
1569 			dest_sec = dest_sym->sec;
1570 			dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc);
1571 		}
1572 
1573 		dest_insn = find_insn(file, dest_sec, dest_off);
1574 		if (!dest_insn) {
1575 			struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1576 
1577 			/*
1578 			 * retbleed_untrain_ret() jumps to
1579 			 * __x86_return_thunk(), but objtool can't find
1580 			 * the thunk's starting RET instruction,
1581 			 * because the RET is also in the middle of
1582 			 * another instruction.  Objtool only knows
1583 			 * about the outer instruction.
1584 			 */
1585 			if (sym && sym->embedded_insn) {
1586 				add_return_call(file, insn, false);
1587 				continue;
1588 			}
1589 
1590 			/*
1591 			 * GCOV/KCOV dead code can jump to the end of
1592 			 * the function/section.
1593 			 */
1594 			if (file->ignore_unreachables && func &&
1595 			    dest_sec == insn->sec &&
1596 			    dest_off == func->offset + func->len)
1597 				continue;
1598 
1599 			ERROR_INSN(insn, "can't find jump dest instruction at %s",
1600 				   offstr(dest_sec, dest_off));
1601 			return -1;
1602 		}
1603 
1604 		if (!dest_sym || is_sec_sym(dest_sym)) {
1605 			dest_sym = dest_insn->sym;
1606 			if (!dest_sym)
1607 				goto set_jump_dest;
1608 		}
1609 
1610 		if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) {
1611 			if (add_retpoline_call(file, insn))
1612 				return -1;
1613 			continue;
1614 		}
1615 
1616 		if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) {
1617 			add_return_call(file, insn, true);
1618 			continue;
1619 		}
1620 
1621 		if (!insn->sym || insn->sym->pfunc == dest_sym->pfunc)
1622 			goto set_jump_dest;
1623 
1624 		/*
1625 		 * Internal cross-function jump.
1626 		 */
1627 
1628 		if (is_first_func_insn(file, dest_insn)) {
1629 			/* Internal sibling call */
1630 			if (add_call_dest(file, insn, dest_sym, true))
1631 				return -1;
1632 			continue;
1633 		}
1634 
1635 set_jump_dest:
1636 		insn->jump_dest = dest_insn;
1637 	}
1638 
1639 	return 0;
1640 }
1641 
1642 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1643 {
1644 	struct symbol *call_dest;
1645 
1646 	call_dest = find_func_by_offset(sec, offset);
1647 	if (!call_dest)
1648 		call_dest = find_symbol_by_offset(sec, offset);
1649 
1650 	return call_dest;
1651 }
1652 
1653 /*
1654  * Find the destination instructions for all calls.
1655  */
1656 static int add_call_destinations(struct objtool_file *file)
1657 {
1658 	struct instruction *insn;
1659 	unsigned long dest_off;
1660 	struct symbol *dest;
1661 	struct reloc *reloc;
1662 
1663 	for_each_insn(file, insn) {
1664 		struct symbol *func = insn_func(insn);
1665 		if (insn->type != INSN_CALL)
1666 			continue;
1667 
1668 		reloc = insn_reloc(file, insn);
1669 		if (!reloc) {
1670 			dest_off = arch_jump_destination(insn);
1671 			dest = find_call_destination(insn->sec, dest_off);
1672 
1673 			if (add_call_dest(file, insn, dest, false))
1674 				return -1;
1675 
1676 			if (func && func->ignore)
1677 				continue;
1678 
1679 			if (!insn_call_dest(insn)) {
1680 				ERROR_INSN(insn, "unannotated intra-function call");
1681 				return -1;
1682 			}
1683 
1684 			if (func && !is_func_sym(insn_call_dest(insn))) {
1685 				ERROR_INSN(insn, "unsupported call to non-function");
1686 				return -1;
1687 			}
1688 
1689 		} else if (is_sec_sym(reloc->sym)) {
1690 			dest_off = arch_insn_adjusted_addend(insn, reloc);
1691 			dest = find_call_destination(reloc->sym->sec, dest_off);
1692 			if (!dest) {
1693 				ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1694 					   reloc->sym->sec->name, dest_off);
1695 				return -1;
1696 			}
1697 
1698 			if (add_call_dest(file, insn, dest, false))
1699 				return -1;
1700 
1701 		} else if (reloc->sym->retpoline_thunk) {
1702 			if (add_retpoline_call(file, insn))
1703 				return -1;
1704 
1705 		} else {
1706 			if (add_call_dest(file, insn, reloc->sym, false))
1707 				return -1;
1708 		}
1709 	}
1710 
1711 	return 0;
1712 }
1713 
1714 /*
1715  * The .alternatives section requires some extra special care over and above
1716  * other special sections because alternatives are patched in place.
1717  */
1718 static int handle_group_alt(struct objtool_file *file,
1719 			    struct special_alt *special_alt,
1720 			    struct instruction *orig_insn,
1721 			    struct instruction **new_insn)
1722 {
1723 	struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1724 	struct alt_group *orig_alt_group, *new_alt_group;
1725 	unsigned long dest_off;
1726 
1727 	orig_alt_group = orig_insn->alt_group;
1728 	if (!orig_alt_group) {
1729 		struct instruction *last_orig_insn = NULL;
1730 
1731 		orig_alt_group = calloc(1, sizeof(*orig_alt_group));
1732 		if (!orig_alt_group) {
1733 			ERROR_GLIBC("calloc");
1734 			return -1;
1735 		}
1736 		orig_alt_group->cfi = calloc(special_alt->orig_len,
1737 					     sizeof(struct cfi_state *));
1738 		if (!orig_alt_group->cfi) {
1739 			ERROR_GLIBC("calloc");
1740 			return -1;
1741 		}
1742 
1743 		insn = orig_insn;
1744 		sec_for_each_insn_from(file, insn) {
1745 			if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1746 				break;
1747 
1748 			insn->alt_group = orig_alt_group;
1749 			last_orig_insn = insn;
1750 		}
1751 		orig_alt_group->orig_group = NULL;
1752 		orig_alt_group->first_insn = orig_insn;
1753 		orig_alt_group->last_insn = last_orig_insn;
1754 		orig_alt_group->nop = NULL;
1755 		orig_alt_group->ignore = orig_insn->ignore_alts;
1756 		orig_alt_group->feature = 0;
1757 	} else {
1758 		if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1759 		    orig_alt_group->first_insn->offset != special_alt->orig_len) {
1760 			ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1761 				   orig_alt_group->last_insn->offset +
1762 				   orig_alt_group->last_insn->len -
1763 				   orig_alt_group->first_insn->offset,
1764 				   special_alt->orig_len);
1765 			return -1;
1766 		}
1767 	}
1768 
1769 	new_alt_group = calloc(1, sizeof(*new_alt_group));
1770 	if (!new_alt_group) {
1771 		ERROR_GLIBC("calloc");
1772 		return -1;
1773 	}
1774 
1775 	if (special_alt->new_len < special_alt->orig_len) {
1776 		/*
1777 		 * Insert a fake nop at the end to make the replacement
1778 		 * alt_group the same size as the original.  This is needed to
1779 		 * allow propagate_alt_cfi() to do its magic.  When the last
1780 		 * instruction affects the stack, the instruction after it (the
1781 		 * nop) will propagate the new state to the shared CFI array.
1782 		 */
1783 		nop = calloc(1, sizeof(*nop));
1784 		if (!nop) {
1785 			ERROR_GLIBC("calloc");
1786 			return -1;
1787 		}
1788 		memset(nop, 0, sizeof(*nop));
1789 
1790 		nop->sec = special_alt->new_sec;
1791 		nop->offset = special_alt->new_off + special_alt->new_len;
1792 		nop->len = special_alt->orig_len - special_alt->new_len;
1793 		nop->type = INSN_NOP;
1794 		nop->sym = orig_insn->sym;
1795 		nop->alt_group = new_alt_group;
1796 		nop->fake = 1;
1797 	}
1798 
1799 	if (!special_alt->new_len) {
1800 		*new_insn = nop;
1801 		goto end;
1802 	}
1803 
1804 	insn = *new_insn;
1805 	sec_for_each_insn_from(file, insn) {
1806 		struct reloc *alt_reloc;
1807 
1808 		if (insn->offset >= special_alt->new_off + special_alt->new_len)
1809 			break;
1810 
1811 		last_new_insn = insn;
1812 
1813 		insn->sym = orig_insn->sym;
1814 		insn->alt_group = new_alt_group;
1815 
1816 		/*
1817 		 * Since alternative replacement code is copy/pasted by the
1818 		 * kernel after applying relocations, generally such code can't
1819 		 * have relative-address relocation references to outside the
1820 		 * .altinstr_replacement section, unless the arch's
1821 		 * alternatives code can adjust the relative offsets
1822 		 * accordingly.
1823 		 */
1824 		alt_reloc = insn_reloc(file, insn);
1825 		if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1826 		    !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1827 
1828 			ERROR_INSN(insn, "unsupported relocation in alternatives section");
1829 			return -1;
1830 		}
1831 
1832 		if (!is_static_jump(insn))
1833 			continue;
1834 
1835 		if (!insn->immediate)
1836 			continue;
1837 
1838 		dest_off = arch_jump_destination(insn);
1839 		if (dest_off == special_alt->new_off + special_alt->new_len) {
1840 			insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1841 			if (!insn->jump_dest) {
1842 				ERROR_INSN(insn, "can't find alternative jump destination");
1843 				return -1;
1844 			}
1845 		}
1846 	}
1847 
1848 	if (!last_new_insn) {
1849 		ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1850 			   "can't find last new alternative instruction");
1851 		return -1;
1852 	}
1853 
1854 end:
1855 	new_alt_group->orig_group = orig_alt_group;
1856 	new_alt_group->first_insn = *new_insn;
1857 	new_alt_group->last_insn = last_new_insn;
1858 	new_alt_group->nop = nop;
1859 	new_alt_group->ignore = (*new_insn)->ignore_alts;
1860 	new_alt_group->cfi = orig_alt_group->cfi;
1861 	new_alt_group->feature = special_alt->feature;
1862 	return 0;
1863 }
1864 
1865 /*
1866  * A jump table entry can either convert a nop to a jump or a jump to a nop.
1867  * If the original instruction is a jump, make the alt entry an effective nop
1868  * by just skipping the original instruction.
1869  */
1870 static int handle_jump_alt(struct objtool_file *file,
1871 			   struct special_alt *special_alt,
1872 			   struct instruction *orig_insn,
1873 			   struct instruction **new_insn)
1874 {
1875 	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1876 	    orig_insn->type != INSN_NOP) {
1877 
1878 		ERROR_INSN(orig_insn, "unsupported instruction at jump label");
1879 		return -1;
1880 	}
1881 
1882 	if (opts.hack_jump_label && special_alt->key_addend & 2) {
1883 		struct reloc *reloc = insn_reloc(file, orig_insn);
1884 
1885 		if (reloc)
1886 			set_reloc_type(file->elf, reloc, R_NONE);
1887 
1888 		if (elf_write_insn(file->elf, orig_insn->sec,
1889 				   orig_insn->offset, orig_insn->len,
1890 				   arch_nop_insn(orig_insn->len))) {
1891 			return -1;
1892 		}
1893 
1894 		orig_insn->type = INSN_NOP;
1895 	}
1896 
1897 	if (orig_insn->type == INSN_NOP) {
1898 		if (orig_insn->len == 2)
1899 			file->jl_nop_short++;
1900 		else
1901 			file->jl_nop_long++;
1902 
1903 		return 0;
1904 	}
1905 
1906 	if (orig_insn->len == 2)
1907 		file->jl_short++;
1908 	else
1909 		file->jl_long++;
1910 
1911 	*new_insn = next_insn_same_sec(file, orig_insn);
1912 	return 0;
1913 }
1914 
1915 /*
1916  * Read all the special sections which have alternate instructions which can be
1917  * patched in or redirected to at runtime.  Each instruction having alternate
1918  * instruction(s) has them added to its insn->alts list, which will be
1919  * traversed in validate_branch().
1920  */
1921 static int add_special_section_alts(struct objtool_file *file)
1922 {
1923 	struct list_head special_alts;
1924 	struct instruction *orig_insn, *new_insn;
1925 	struct special_alt *special_alt, *tmp;
1926 	enum alternative_type alt_type;
1927 	struct alternative *alt;
1928 	struct alternative *a;
1929 
1930 	if (special_get_alts(file->elf, &special_alts))
1931 		return -1;
1932 
1933 	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1934 
1935 		orig_insn = find_insn(file, special_alt->orig_sec,
1936 				      special_alt->orig_off);
1937 		if (!orig_insn) {
1938 			ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off,
1939 				   "special: can't find orig instruction");
1940 			return -1;
1941 		}
1942 
1943 		new_insn = NULL;
1944 		if (!special_alt->group || special_alt->new_len) {
1945 			new_insn = find_insn(file, special_alt->new_sec,
1946 					     special_alt->new_off);
1947 			if (!new_insn) {
1948 				ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1949 					   "special: can't find new instruction");
1950 				return -1;
1951 			}
1952 		}
1953 
1954 		if (special_alt->group) {
1955 			if (!special_alt->orig_len) {
1956 				ERROR_INSN(orig_insn, "empty alternative entry");
1957 				continue;
1958 			}
1959 
1960 			if (handle_group_alt(file, special_alt, orig_insn, &new_insn))
1961 				return -1;
1962 
1963 			alt_type = ALT_TYPE_INSTRUCTIONS;
1964 
1965 		} else if (special_alt->jump_or_nop) {
1966 			if (handle_jump_alt(file, special_alt, orig_insn, &new_insn))
1967 				return -1;
1968 
1969 			alt_type = ALT_TYPE_JUMP_TABLE;
1970 		} else {
1971 			alt_type = ALT_TYPE_EX_TABLE;
1972 		}
1973 
1974 		alt = calloc(1, sizeof(*alt));
1975 		if (!alt) {
1976 			ERROR_GLIBC("calloc");
1977 			return -1;
1978 		}
1979 
1980 		alt->insn = new_insn;
1981 		alt->type = alt_type;
1982 		alt->next = NULL;
1983 
1984 		/*
1985 		 * Store alternatives in the same order they have been
1986 		 * defined.
1987 		 */
1988 		if (!orig_insn->alts) {
1989 			orig_insn->alts = alt;
1990 		} else {
1991 			for (a = orig_insn->alts; a->next; a = a->next)
1992 				;
1993 			a->next = alt;
1994 		}
1995 
1996 		list_del(&special_alt->list);
1997 		free(special_alt);
1998 	}
1999 
2000 	if (opts.stats) {
2001 		printf("jl\\\tNOP\tJMP\n");
2002 		printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
2003 		printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
2004 	}
2005 
2006 	return 0;
2007 }
2008 
2009 __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
2010 {
2011 	return reloc->sym->offset + reloc_addend(reloc);
2012 }
2013 
2014 static int add_jump_table(struct objtool_file *file, struct instruction *insn)
2015 {
2016 	unsigned long table_size = insn_jump_table_size(insn);
2017 	struct symbol *pfunc = insn_func(insn)->pfunc;
2018 	struct reloc *table = insn_jump_table(insn);
2019 	struct instruction *dest_insn;
2020 	unsigned int prev_offset = 0;
2021 	struct reloc *reloc = table;
2022 	struct alternative *alt;
2023 	unsigned long sym_offset;
2024 
2025 	/*
2026 	 * Each @reloc is a switch table relocation which points to the target
2027 	 * instruction.
2028 	 */
2029 	for_each_reloc_from(table->sec, reloc) {
2030 
2031 		/* Check for the end of the table: */
2032 		if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
2033 			break;
2034 		if (reloc != table && is_jump_table(reloc))
2035 			break;
2036 
2037 		/* Make sure the table entries are consecutive: */
2038 		if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc))
2039 			break;
2040 
2041 		sym_offset = arch_jump_table_sym_offset(reloc, table);
2042 
2043 		/* Detect function pointers from contiguous objects: */
2044 		if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset)
2045 			break;
2046 
2047 		/*
2048 		 * Clang sometimes leaves dangling unused jump table entries
2049 		 * which point to the end of the function.  Ignore them.
2050 		 */
2051 		if (reloc->sym->sec == pfunc->sec &&
2052 		    sym_offset == pfunc->offset + pfunc->len)
2053 			goto next;
2054 
2055 		dest_insn = find_insn(file, reloc->sym->sec, sym_offset);
2056 		if (!dest_insn)
2057 			break;
2058 
2059 		/* Make sure the destination is in the same function: */
2060 		if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2061 			break;
2062 
2063 		alt = calloc(1, sizeof(*alt));
2064 		if (!alt) {
2065 			ERROR_GLIBC("calloc");
2066 			return -1;
2067 		}
2068 
2069 		alt->insn = dest_insn;
2070 		alt->next = insn->alts;
2071 		insn->alts = alt;
2072 next:
2073 		prev_offset = reloc_offset(reloc);
2074 	}
2075 
2076 	if (!prev_offset) {
2077 		ERROR_INSN(insn, "can't find switch jump table");
2078 		return -1;
2079 	}
2080 
2081 	return 0;
2082 }
2083 
2084 /*
2085  * find_jump_table() - Given a dynamic jump, find the switch jump table
2086  * associated with it.
2087  */
2088 static void find_jump_table(struct objtool_file *file, struct symbol *func,
2089 			    struct instruction *insn)
2090 {
2091 	struct reloc *table_reloc;
2092 	struct instruction *dest_insn, *orig_insn = insn;
2093 	unsigned long table_size;
2094 	unsigned long sym_offset;
2095 
2096 	/*
2097 	 * Backward search using the @first_jump_src links, these help avoid
2098 	 * much of the 'in between' code. Which avoids us getting confused by
2099 	 * it.
2100 	 */
2101 	for (;
2102 	     insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2103 	     insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2104 
2105 		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2106 			break;
2107 
2108 		/* allow small jumps within the range */
2109 		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2110 		    insn->jump_dest &&
2111 		    (insn->jump_dest->offset <= insn->offset ||
2112 		     insn->jump_dest->offset > orig_insn->offset))
2113 			break;
2114 
2115 		table_reloc = arch_find_switch_table(file, insn, &table_size);
2116 		if (!table_reloc)
2117 			continue;
2118 
2119 		sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc);
2120 
2121 		dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset);
2122 		if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2123 			continue;
2124 
2125 		set_jump_table(table_reloc);
2126 		orig_insn->_jump_table = table_reloc;
2127 		orig_insn->_jump_table_size = table_size;
2128 
2129 		break;
2130 	}
2131 }
2132 
2133 /*
2134  * First pass: Mark the head of each jump table so that in the next pass,
2135  * we know when a given jump table ends and the next one starts.
2136  */
2137 static void mark_func_jump_tables(struct objtool_file *file,
2138 				    struct symbol *func)
2139 {
2140 	struct instruction *insn, *last = NULL;
2141 
2142 	func_for_each_insn(file, func, insn) {
2143 		if (!last)
2144 			last = insn;
2145 
2146 		/*
2147 		 * Store back-pointers for unconditional forward jumps such
2148 		 * that find_jump_table() can back-track using those and
2149 		 * avoid some potentially confusing code.
2150 		 */
2151 		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2152 		    insn->offset > last->offset &&
2153 		    insn->jump_dest->offset > insn->offset &&
2154 		    !insn->jump_dest->first_jump_src) {
2155 
2156 			insn->jump_dest->first_jump_src = insn;
2157 			last = insn->jump_dest;
2158 		}
2159 
2160 		if (insn->type != INSN_JUMP_DYNAMIC)
2161 			continue;
2162 
2163 		find_jump_table(file, func, insn);
2164 	}
2165 }
2166 
2167 static int add_func_jump_tables(struct objtool_file *file,
2168 				  struct symbol *func)
2169 {
2170 	struct instruction *insn;
2171 
2172 	func_for_each_insn(file, func, insn) {
2173 		if (!insn_jump_table(insn))
2174 			continue;
2175 
2176 		if (add_jump_table(file, insn))
2177 			return -1;
2178 	}
2179 
2180 	return 0;
2181 }
2182 
2183 /*
2184  * For some switch statements, gcc generates a jump table in the .rodata
2185  * section which contains a list of addresses within the function to jump to.
2186  * This finds these jump tables and adds them to the insn->alts lists.
2187  */
2188 static int add_jump_table_alts(struct objtool_file *file)
2189 {
2190 	struct symbol *func;
2191 
2192 	if (!file->rodata)
2193 		return 0;
2194 
2195 	for_each_sym(file->elf, func) {
2196 		if (!is_func_sym(func) || func->alias != func)
2197 			continue;
2198 
2199 		mark_func_jump_tables(file, func);
2200 		if (add_func_jump_tables(file, func))
2201 			return -1;
2202 	}
2203 
2204 	return 0;
2205 }
2206 
2207 static void set_func_state(struct cfi_state *state)
2208 {
2209 	state->cfa = initial_func_cfi.cfa;
2210 	memcpy(&state->regs, &initial_func_cfi.regs,
2211 	       CFI_NUM_REGS * sizeof(struct cfi_reg));
2212 	state->stack_size = initial_func_cfi.cfa.offset;
2213 	state->type = UNWIND_HINT_TYPE_CALL;
2214 }
2215 
2216 static int read_unwind_hints(struct objtool_file *file)
2217 {
2218 	struct cfi_state cfi = init_cfi;
2219 	struct section *sec;
2220 	struct unwind_hint *hint;
2221 	struct instruction *insn;
2222 	struct reloc *reloc;
2223 	unsigned long offset;
2224 	int i;
2225 
2226 	sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2227 	if (!sec)
2228 		return 0;
2229 
2230 	if (!sec->rsec) {
2231 		ERROR("missing .rela.discard.unwind_hints section");
2232 		return -1;
2233 	}
2234 
2235 	if (sec_size(sec) % sizeof(struct unwind_hint)) {
2236 		ERROR("struct unwind_hint size mismatch");
2237 		return -1;
2238 	}
2239 
2240 	file->hints = true;
2241 
2242 	for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) {
2243 		hint = (struct unwind_hint *)sec->data->d_buf + i;
2244 
2245 		reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2246 		if (!reloc) {
2247 			ERROR("can't find reloc for unwind_hints[%d]", i);
2248 			return -1;
2249 		}
2250 
2251 		offset = reloc->sym->offset + reloc_addend(reloc);
2252 
2253 		insn = find_insn(file, reloc->sym->sec, offset);
2254 		if (!insn) {
2255 			ERROR("can't find insn for unwind_hints[%d]", i);
2256 			return -1;
2257 		}
2258 
2259 		insn->hint = true;
2260 
2261 		if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) {
2262 			insn->cfi = &force_undefined_cfi;
2263 			continue;
2264 		}
2265 
2266 		if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2267 			insn->hint = false;
2268 			insn->save = true;
2269 			continue;
2270 		}
2271 
2272 		if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2273 			insn->restore = true;
2274 			continue;
2275 		}
2276 
2277 		if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2278 			struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2279 
2280 			if (sym && is_global_sym(sym)) {
2281 				if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2282 					ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2283 					return -1;
2284 				}
2285 			}
2286 		}
2287 
2288 		if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2289 			insn->cfi = &func_cfi;
2290 			continue;
2291 		}
2292 
2293 		if (insn->cfi)
2294 			cfi = *(insn->cfi);
2295 
2296 		if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2297 			ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2298 			return -1;
2299 		}
2300 
2301 		cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2302 		cfi.type = hint->type;
2303 		cfi.signal = hint->signal;
2304 
2305 		insn->cfi = cfi_hash_find_or_add(&cfi);
2306 	}
2307 
2308 	return 0;
2309 }
2310 
2311 static int read_annotate(struct objtool_file *file,
2312 			 int (*func)(struct objtool_file *file, int type, struct instruction *insn))
2313 {
2314 	struct section *sec;
2315 	struct instruction *insn;
2316 	struct reloc *reloc;
2317 	uint64_t offset;
2318 	int type;
2319 
2320 	sec = find_section_by_name(file->elf, ".discard.annotate_insn");
2321 	if (!sec)
2322 		return 0;
2323 
2324 	if (!sec->rsec)
2325 		return 0;
2326 
2327 	if (sec->sh.sh_entsize != 8) {
2328 		static bool warned = false;
2329 		if (!warned && opts.verbose) {
2330 			WARN("%s: dodgy linker, sh_entsize != 8", sec->name);
2331 			warned = true;
2332 		}
2333 		sec->sh.sh_entsize = 8;
2334 	}
2335 
2336 	if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) {
2337 		ERROR("bad .discard.annotate_insn section: missing relocs");
2338 		return -1;
2339 	}
2340 
2341 	for_each_reloc(sec->rsec, reloc) {
2342 		type = annotype(file->elf, sec, reloc);
2343 		offset = reloc->sym->offset + reloc_addend(reloc);
2344 		insn = find_insn(file, reloc->sym->sec, offset);
2345 
2346 		if (!insn) {
2347 			ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
2348 			return -1;
2349 		}
2350 
2351 		if (func(file, type, insn))
2352 			return -1;
2353 	}
2354 
2355 	return 0;
2356 }
2357 
2358 static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn)
2359 {
2360 	switch (type) {
2361 
2362 	/* Must be before add_special_section_alts() */
2363 	case ANNOTYPE_IGNORE_ALTS:
2364 		insn->ignore_alts = true;
2365 		break;
2366 
2367 	/*
2368 	 * Must be before read_unwind_hints() since that needs insn->noendbr.
2369 	 */
2370 	case ANNOTYPE_NOENDBR:
2371 		insn->noendbr = 1;
2372 		break;
2373 
2374 	default:
2375 		break;
2376 	}
2377 
2378 	return 0;
2379 }
2380 
2381 static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn)
2382 {
2383 	unsigned long dest_off;
2384 
2385 	if (type != ANNOTYPE_INTRA_FUNCTION_CALL)
2386 		return 0;
2387 
2388 	if (insn->type != INSN_CALL) {
2389 		ERROR_INSN(insn, "intra_function_call not a direct call");
2390 		return -1;
2391 	}
2392 
2393 	/*
2394 	 * Treat intra-function CALLs as JMPs, but with a stack_op.
2395 	 * See add_call_destinations(), which strips stack_ops from
2396 	 * normal CALLs.
2397 	 */
2398 	insn->type = INSN_JUMP_UNCONDITIONAL;
2399 
2400 	dest_off = arch_jump_destination(insn);
2401 	insn->jump_dest = find_insn(file, insn->sec, dest_off);
2402 	if (!insn->jump_dest) {
2403 		ERROR_INSN(insn, "can't find call dest at %s+0x%lx",
2404 			   insn->sec->name, dest_off);
2405 		return -1;
2406 	}
2407 
2408 	return 0;
2409 }
2410 
2411 static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn)
2412 {
2413 	struct symbol *sym;
2414 
2415 	switch (type) {
2416 	case ANNOTYPE_NOENDBR:
2417 		/* early */
2418 		break;
2419 
2420 	case ANNOTYPE_RETPOLINE_SAFE:
2421 		if (insn->type != INSN_JUMP_DYNAMIC &&
2422 		    insn->type != INSN_CALL_DYNAMIC &&
2423 		    insn->type != INSN_RETURN &&
2424 		    insn->type != INSN_NOP) {
2425 			ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2426 			return -1;
2427 		}
2428 
2429 		insn->retpoline_safe = true;
2430 		break;
2431 
2432 	case ANNOTYPE_INSTR_BEGIN:
2433 		insn->instr++;
2434 		break;
2435 
2436 	case ANNOTYPE_INSTR_END:
2437 		insn->instr--;
2438 		break;
2439 
2440 	case ANNOTYPE_UNRET_BEGIN:
2441 		insn->unret = 1;
2442 		break;
2443 
2444 	case ANNOTYPE_IGNORE_ALTS:
2445 		/* early */
2446 		break;
2447 
2448 	case ANNOTYPE_INTRA_FUNCTION_CALL:
2449 		/* ifc */
2450 		break;
2451 
2452 	case ANNOTYPE_REACHABLE:
2453 		insn->dead_end = false;
2454 		break;
2455 
2456 	case ANNOTYPE_NOCFI:
2457 		sym = insn->sym;
2458 		if (!sym) {
2459 			ERROR_INSN(insn, "dodgy NOCFI annotation");
2460 			return -1;
2461 		}
2462 		insn->sym->nocfi = 1;
2463 		break;
2464 
2465 	default:
2466 		ERROR_INSN(insn, "Unknown annotation type: %d", type);
2467 		return -1;
2468 	}
2469 
2470 	return 0;
2471 }
2472 
2473 /*
2474  * Return true if name matches an instrumentation function, where calls to that
2475  * function from noinstr code can safely be removed, but compilers won't do so.
2476  */
2477 static bool is_profiling_func(const char *name)
2478 {
2479 	/*
2480 	 * Many compilers cannot disable KCOV with a function attribute.
2481 	 */
2482 	if (!strncmp(name, "__sanitizer_cov_", 16))
2483 		return true;
2484 
2485 	return false;
2486 }
2487 
2488 static int classify_symbols(struct objtool_file *file)
2489 {
2490 	struct symbol *func;
2491 	size_t len;
2492 
2493 	for_each_sym(file->elf, func) {
2494 		if (is_notype_sym(func) && strstarts(func->name, ".L"))
2495 			func->local_label = true;
2496 
2497 		if (!is_global_sym(func))
2498 			continue;
2499 
2500 		if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2501 			     strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2502 			func->static_call_tramp = true;
2503 
2504 		if (arch_is_retpoline(func))
2505 			func->retpoline_thunk = true;
2506 
2507 		if (arch_is_rethunk(func))
2508 			func->return_thunk = true;
2509 
2510 		if (arch_is_embedded_insn(func))
2511 			func->embedded_insn = true;
2512 
2513 		if (arch_ftrace_match(func->name))
2514 			func->fentry = true;
2515 
2516 		if (is_profiling_func(func->name))
2517 			func->profiling_func = true;
2518 
2519 		len = strlen(func->name);
2520 		if (len > sym_name_max_len)
2521 			sym_name_max_len = len;
2522 	}
2523 
2524 	return 0;
2525 }
2526 
2527 static void mark_rodata(struct objtool_file *file)
2528 {
2529 	struct section *sec;
2530 	bool found = false;
2531 
2532 	/*
2533 	 * Search for the following rodata sections, each of which can
2534 	 * potentially contain jump tables:
2535 	 *
2536 	 * - .rodata: can contain GCC switch tables
2537 	 * - .rodata.<func>: same, if -fdata-sections is being used
2538 	 * - .data.rel.ro.c_jump_table: contains C annotated jump tables
2539 	 *
2540 	 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2541 	 */
2542 	for_each_sec(file->elf, sec) {
2543 		if ((!strncmp(sec->name, ".rodata", 7) &&
2544 		     !strstr(sec->name, ".str1.")) ||
2545 		    !strncmp(sec->name, ".data.rel.ro", 12)) {
2546 			sec->rodata = true;
2547 			found = true;
2548 		}
2549 	}
2550 
2551 	file->rodata = found;
2552 }
2553 
2554 static void mark_holes(struct objtool_file *file)
2555 {
2556 	struct instruction *insn;
2557 	bool in_hole = false;
2558 
2559 	if (!opts.link)
2560 		return;
2561 
2562 	/*
2563 	 * Whole archive runs might encounter dead code from weak symbols.
2564 	 * This is where the linker will have dropped the weak symbol in
2565 	 * favour of a regular symbol, but leaves the code in place.
2566 	 */
2567 	for_each_insn(file, insn) {
2568 		if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) {
2569 			in_hole = false;
2570 			continue;
2571 		}
2572 
2573 		/* Skip function padding and pfx code */
2574 		if (!in_hole && insn->type == INSN_NOP)
2575 			continue;
2576 
2577 		in_hole = true;
2578 		insn->hole = 1;
2579 
2580 		/*
2581 		 * If this hole jumps to a .cold function, mark it ignore.
2582 		 */
2583 		if (insn->jump_dest) {
2584 			struct symbol *dest_func = insn_func(insn->jump_dest);
2585 
2586 			if (dest_func && dest_func->cold)
2587 				dest_func->ignore = true;
2588 		}
2589 	}
2590 }
2591 
2592 static bool validate_branch_enabled(void)
2593 {
2594 	return opts.stackval ||
2595 	       opts.orc ||
2596 	       opts.uaccess ||
2597 	       opts.checksum;
2598 }
2599 
2600 static int decode_sections(struct objtool_file *file)
2601 {
2602 	file->klp = is_livepatch_module(file);
2603 
2604 	mark_rodata(file);
2605 
2606 	if (init_pv_ops(file))
2607 		return -1;
2608 
2609 	/*
2610 	 * Must be before add_{jump_call}_destination.
2611 	 */
2612 	if (classify_symbols(file))
2613 		return -1;
2614 
2615 	if (decode_instructions(file))
2616 		return -1;
2617 
2618 	if (add_ignores(file))
2619 		return -1;
2620 
2621 	add_uaccess_safe(file);
2622 
2623 	if (read_annotate(file, __annotate_early))
2624 		return -1;
2625 
2626 	/*
2627 	 * Must be before add_jump_destinations(), which depends on 'func'
2628 	 * being set for alternatives, to enable proper sibling call detection.
2629 	 */
2630 	if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label || opts.disas) {
2631 		if (add_special_section_alts(file))
2632 			return -1;
2633 	}
2634 
2635 	if (add_jump_destinations(file))
2636 		return -1;
2637 
2638 	/*
2639 	 * Must be before add_call_destination(); it changes INSN_CALL to
2640 	 * INSN_JUMP.
2641 	 */
2642 	if (read_annotate(file, __annotate_ifc))
2643 		return -1;
2644 
2645 	if (add_call_destinations(file))
2646 		return -1;
2647 
2648 	if (add_jump_table_alts(file))
2649 		return -1;
2650 
2651 	if (read_unwind_hints(file))
2652 		return -1;
2653 
2654 	/* Must be after add_jump_destinations() */
2655 	mark_holes(file);
2656 
2657 	/*
2658 	 * Must be after add_call_destinations() such that it can override
2659 	 * dead_end_function() marks.
2660 	 */
2661 	if (read_annotate(file, __annotate_late))
2662 		return -1;
2663 
2664 	return 0;
2665 }
2666 
2667 static bool is_special_call(struct instruction *insn)
2668 {
2669 	if (insn->type == INSN_CALL) {
2670 		struct symbol *dest = insn_call_dest(insn);
2671 
2672 		if (!dest)
2673 			return false;
2674 
2675 		if (dest->fentry || dest->embedded_insn)
2676 			return true;
2677 	}
2678 
2679 	return false;
2680 }
2681 
2682 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2683 {
2684 	struct cfi_state *cfi = &state->cfi;
2685 	int i;
2686 
2687 	if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2688 		return true;
2689 
2690 	if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2691 		return true;
2692 
2693 	if (cfi->stack_size != initial_func_cfi.cfa.offset)
2694 		return true;
2695 
2696 	for (i = 0; i < CFI_NUM_REGS; i++) {
2697 		if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2698 		    cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2699 			return true;
2700 	}
2701 
2702 	return false;
2703 }
2704 
2705 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2706 				int expected_offset)
2707 {
2708 	return reg->base == CFI_CFA &&
2709 	       reg->offset == expected_offset;
2710 }
2711 
2712 static bool has_valid_stack_frame(struct insn_state *state)
2713 {
2714 	struct cfi_state *cfi = &state->cfi;
2715 
2716 	if (cfi->cfa.base == CFI_BP &&
2717 	    check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2718 	    check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2719 		return true;
2720 
2721 	if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2722 		return true;
2723 
2724 	return false;
2725 }
2726 
2727 static int update_cfi_state_regs(struct instruction *insn,
2728 				  struct cfi_state *cfi,
2729 				  struct stack_op *op)
2730 {
2731 	struct cfi_reg *cfa = &cfi->cfa;
2732 
2733 	if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2734 		return 0;
2735 
2736 	/* push */
2737 	if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2738 		cfa->offset += 8;
2739 
2740 	/* pop */
2741 	if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2742 		cfa->offset -= 8;
2743 
2744 	/* add immediate to sp */
2745 	if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2746 	    op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2747 		cfa->offset -= op->src.offset;
2748 
2749 	return 0;
2750 }
2751 
2752 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2753 {
2754 	if (arch_callee_saved_reg(reg) &&
2755 	    cfi->regs[reg].base == CFI_UNDEFINED) {
2756 		cfi->regs[reg].base = base;
2757 		cfi->regs[reg].offset = offset;
2758 	}
2759 }
2760 
2761 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2762 {
2763 	cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2764 	cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2765 }
2766 
2767 /*
2768  * A note about DRAP stack alignment:
2769  *
2770  * GCC has the concept of a DRAP register, which is used to help keep track of
2771  * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
2772  * register.  The typical DRAP pattern is:
2773  *
2774  *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
2775  *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
2776  *   41 ff 72 f8		pushq  -0x8(%r10)
2777  *   55				push   %rbp
2778  *   48 89 e5			mov    %rsp,%rbp
2779  *				(more pushes)
2780  *   41 52			push   %r10
2781  *				...
2782  *   41 5a			pop    %r10
2783  *				(more pops)
2784  *   5d				pop    %rbp
2785  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2786  *   c3				retq
2787  *
2788  * There are some variations in the epilogues, like:
2789  *
2790  *   5b				pop    %rbx
2791  *   41 5a			pop    %r10
2792  *   41 5c			pop    %r12
2793  *   41 5d			pop    %r13
2794  *   41 5e			pop    %r14
2795  *   c9				leaveq
2796  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2797  *   c3				retq
2798  *
2799  * and:
2800  *
2801  *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
2802  *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
2803  *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
2804  *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
2805  *   c9				leaveq
2806  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2807  *   c3				retq
2808  *
2809  * Sometimes r13 is used as the DRAP register, in which case it's saved and
2810  * restored beforehand:
2811  *
2812  *   41 55			push   %r13
2813  *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
2814  *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
2815  *				...
2816  *   49 8d 65 f0		lea    -0x10(%r13),%rsp
2817  *   41 5d			pop    %r13
2818  *   c3				retq
2819  */
2820 static int update_cfi_state(struct instruction *insn,
2821 			    struct instruction *next_insn,
2822 			    struct cfi_state *cfi, struct stack_op *op)
2823 {
2824 	struct cfi_reg *cfa = &cfi->cfa;
2825 	struct cfi_reg *regs = cfi->regs;
2826 
2827 	/* ignore UNWIND_HINT_UNDEFINED regions */
2828 	if (cfi->force_undefined)
2829 		return 0;
2830 
2831 	/* stack operations don't make sense with an undefined CFA */
2832 	if (cfa->base == CFI_UNDEFINED) {
2833 		if (insn_func(insn)) {
2834 			WARN_INSN(insn, "undefined stack state");
2835 			return 1;
2836 		}
2837 		return 0;
2838 	}
2839 
2840 	if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2841 	    cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2842 		return update_cfi_state_regs(insn, cfi, op);
2843 
2844 	switch (op->dest.type) {
2845 
2846 	case OP_DEST_REG:
2847 		switch (op->src.type) {
2848 
2849 		case OP_SRC_REG:
2850 			if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2851 			    cfa->base == CFI_SP &&
2852 			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2853 
2854 				/* mov %rsp, %rbp */
2855 				cfa->base = op->dest.reg;
2856 				cfi->bp_scratch = false;
2857 			}
2858 
2859 			else if (op->src.reg == CFI_SP &&
2860 				 op->dest.reg == CFI_BP && cfi->drap) {
2861 
2862 				/* drap: mov %rsp, %rbp */
2863 				regs[CFI_BP].base = CFI_BP;
2864 				regs[CFI_BP].offset = -cfi->stack_size;
2865 				cfi->bp_scratch = false;
2866 			}
2867 
2868 			else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2869 
2870 				/*
2871 				 * mov %rsp, %reg
2872 				 *
2873 				 * This is needed for the rare case where GCC
2874 				 * does:
2875 				 *
2876 				 *   mov    %rsp, %rax
2877 				 *   ...
2878 				 *   mov    %rax, %rsp
2879 				 */
2880 				cfi->vals[op->dest.reg].base = CFI_CFA;
2881 				cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2882 			}
2883 
2884 			else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2885 				 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2886 
2887 				/*
2888 				 * mov %rbp, %rsp
2889 				 *
2890 				 * Restore the original stack pointer (Clang).
2891 				 */
2892 				cfi->stack_size = -cfi->regs[CFI_BP].offset;
2893 			}
2894 
2895 			else if (op->dest.reg == cfa->base) {
2896 
2897 				/* mov %reg, %rsp */
2898 				if (cfa->base == CFI_SP &&
2899 				    cfi->vals[op->src.reg].base == CFI_CFA) {
2900 
2901 					/*
2902 					 * This is needed for the rare case
2903 					 * where GCC does something dumb like:
2904 					 *
2905 					 *   lea    0x8(%rsp), %rcx
2906 					 *   ...
2907 					 *   mov    %rcx, %rsp
2908 					 */
2909 					cfa->offset = -cfi->vals[op->src.reg].offset;
2910 					cfi->stack_size = cfa->offset;
2911 
2912 				} else if (cfa->base == CFI_SP &&
2913 					   cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2914 					   cfi->vals[op->src.reg].offset == cfa->offset) {
2915 
2916 					/*
2917 					 * Stack swizzle:
2918 					 *
2919 					 * 1: mov %rsp, (%[tos])
2920 					 * 2: mov %[tos], %rsp
2921 					 *    ...
2922 					 * 3: pop %rsp
2923 					 *
2924 					 * Where:
2925 					 *
2926 					 * 1 - places a pointer to the previous
2927 					 *     stack at the Top-of-Stack of the
2928 					 *     new stack.
2929 					 *
2930 					 * 2 - switches to the new stack.
2931 					 *
2932 					 * 3 - pops the Top-of-Stack to restore
2933 					 *     the original stack.
2934 					 *
2935 					 * Note: we set base to SP_INDIRECT
2936 					 * here and preserve offset. Therefore
2937 					 * when the unwinder reaches ToS it
2938 					 * will dereference SP and then add the
2939 					 * offset to find the next frame, IOW:
2940 					 * (%rsp) + offset.
2941 					 */
2942 					cfa->base = CFI_SP_INDIRECT;
2943 
2944 				} else {
2945 					cfa->base = CFI_UNDEFINED;
2946 					cfa->offset = 0;
2947 				}
2948 			}
2949 
2950 			else if (op->dest.reg == CFI_SP &&
2951 				 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2952 				 cfi->vals[op->src.reg].offset == cfa->offset) {
2953 
2954 				/*
2955 				 * The same stack swizzle case 2) as above. But
2956 				 * because we can't change cfa->base, case 3)
2957 				 * will become a regular POP. Pretend we're a
2958 				 * PUSH so things don't go unbalanced.
2959 				 */
2960 				cfi->stack_size += 8;
2961 			}
2962 
2963 
2964 			break;
2965 
2966 		case OP_SRC_ADD:
2967 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2968 
2969 				/* add imm, %rsp */
2970 				cfi->stack_size -= op->src.offset;
2971 				if (cfa->base == CFI_SP)
2972 					cfa->offset -= op->src.offset;
2973 				break;
2974 			}
2975 
2976 			if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
2977 			    insn->sym->frame_pointer) {
2978 				/* addi.d fp,sp,imm on LoongArch */
2979 				if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
2980 					cfa->base = CFI_BP;
2981 					cfa->offset = 0;
2982 				}
2983 				break;
2984 			}
2985 
2986 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2987 				/* addi.d sp,fp,imm on LoongArch */
2988 				if (cfa->base == CFI_BP && cfa->offset == 0) {
2989 					if (insn->sym->frame_pointer) {
2990 						cfa->base = CFI_SP;
2991 						cfa->offset = -op->src.offset;
2992 					}
2993 				} else {
2994 					/* lea disp(%rbp), %rsp */
2995 					cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2996 				}
2997 				break;
2998 			}
2999 
3000 			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
3001 
3002 				/* drap: lea disp(%rsp), %drap */
3003 				cfi->drap_reg = op->dest.reg;
3004 
3005 				/*
3006 				 * lea disp(%rsp), %reg
3007 				 *
3008 				 * This is needed for the rare case where GCC
3009 				 * does something dumb like:
3010 				 *
3011 				 *   lea    0x8(%rsp), %rcx
3012 				 *   ...
3013 				 *   mov    %rcx, %rsp
3014 				 */
3015 				cfi->vals[op->dest.reg].base = CFI_CFA;
3016 				cfi->vals[op->dest.reg].offset = \
3017 					-cfi->stack_size + op->src.offset;
3018 
3019 				break;
3020 			}
3021 
3022 			if (cfi->drap && op->dest.reg == CFI_SP &&
3023 			    op->src.reg == cfi->drap_reg) {
3024 
3025 				 /* drap: lea disp(%drap), %rsp */
3026 				cfa->base = CFI_SP;
3027 				cfa->offset = cfi->stack_size = -op->src.offset;
3028 				cfi->drap_reg = CFI_UNDEFINED;
3029 				cfi->drap = false;
3030 				break;
3031 			}
3032 
3033 			if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
3034 				WARN_INSN(insn, "unsupported stack register modification");
3035 				return -1;
3036 			}
3037 
3038 			break;
3039 
3040 		case OP_SRC_AND:
3041 			if (op->dest.reg != CFI_SP ||
3042 			    (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
3043 			    (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
3044 				WARN_INSN(insn, "unsupported stack pointer realignment");
3045 				return -1;
3046 			}
3047 
3048 			if (cfi->drap_reg != CFI_UNDEFINED) {
3049 				/* drap: and imm, %rsp */
3050 				cfa->base = cfi->drap_reg;
3051 				cfa->offset = cfi->stack_size = 0;
3052 				cfi->drap = true;
3053 			}
3054 
3055 			/*
3056 			 * Older versions of GCC (4.8ish) realign the stack
3057 			 * without DRAP, with a frame pointer.
3058 			 */
3059 
3060 			break;
3061 
3062 		case OP_SRC_POP:
3063 		case OP_SRC_POPF:
3064 			if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3065 
3066 				/* pop %rsp; # restore from a stack swizzle */
3067 				cfa->base = CFI_SP;
3068 				break;
3069 			}
3070 
3071 			if (!cfi->drap && op->dest.reg == cfa->base) {
3072 
3073 				/* pop %rbp */
3074 				cfa->base = CFI_SP;
3075 			}
3076 
3077 			if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3078 			    op->dest.reg == cfi->drap_reg &&
3079 			    cfi->drap_offset == -cfi->stack_size) {
3080 
3081 				/* drap: pop %drap */
3082 				cfa->base = cfi->drap_reg;
3083 				cfa->offset = 0;
3084 				cfi->drap_offset = -1;
3085 
3086 			} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3087 
3088 				/* pop %reg */
3089 				restore_reg(cfi, op->dest.reg);
3090 			}
3091 
3092 			cfi->stack_size -= 8;
3093 			if (cfa->base == CFI_SP)
3094 				cfa->offset -= 8;
3095 
3096 			break;
3097 
3098 		case OP_SRC_REG_INDIRECT:
3099 			if (!cfi->drap && op->dest.reg == cfa->base &&
3100 			    op->dest.reg == CFI_BP) {
3101 
3102 				/* mov disp(%rsp), %rbp */
3103 				cfa->base = CFI_SP;
3104 				cfa->offset = cfi->stack_size;
3105 			}
3106 
3107 			if (cfi->drap && op->src.reg == CFI_BP &&
3108 			    op->src.offset == cfi->drap_offset) {
3109 
3110 				/* drap: mov disp(%rbp), %drap */
3111 				cfa->base = cfi->drap_reg;
3112 				cfa->offset = 0;
3113 				cfi->drap_offset = -1;
3114 			}
3115 
3116 			if (cfi->drap && op->src.reg == CFI_BP &&
3117 			    op->src.offset == regs[op->dest.reg].offset) {
3118 
3119 				/* drap: mov disp(%rbp), %reg */
3120 				restore_reg(cfi, op->dest.reg);
3121 
3122 			} else if (op->src.reg == cfa->base &&
3123 			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3124 
3125 				/* mov disp(%rbp), %reg */
3126 				/* mov disp(%rsp), %reg */
3127 				restore_reg(cfi, op->dest.reg);
3128 
3129 			} else if (op->src.reg == CFI_SP &&
3130 				   op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3131 
3132 				/* mov disp(%rsp), %reg */
3133 				restore_reg(cfi, op->dest.reg);
3134 			}
3135 
3136 			break;
3137 
3138 		default:
3139 			WARN_INSN(insn, "unknown stack-related instruction");
3140 			return -1;
3141 		}
3142 
3143 		break;
3144 
3145 	case OP_DEST_PUSH:
3146 	case OP_DEST_PUSHF:
3147 		cfi->stack_size += 8;
3148 		if (cfa->base == CFI_SP)
3149 			cfa->offset += 8;
3150 
3151 		if (op->src.type != OP_SRC_REG)
3152 			break;
3153 
3154 		if (cfi->drap) {
3155 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3156 
3157 				/* drap: push %drap */
3158 				cfa->base = CFI_BP_INDIRECT;
3159 				cfa->offset = -cfi->stack_size;
3160 
3161 				/* save drap so we know when to restore it */
3162 				cfi->drap_offset = -cfi->stack_size;
3163 
3164 			} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3165 
3166 				/* drap: push %rbp */
3167 				cfi->stack_size = 0;
3168 
3169 			} else {
3170 
3171 				/* drap: push %reg */
3172 				save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3173 			}
3174 
3175 		} else {
3176 
3177 			/* push %reg */
3178 			save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3179 		}
3180 
3181 		/* detect when asm code uses rbp as a scratch register */
3182 		if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3183 		    cfa->base != CFI_BP)
3184 			cfi->bp_scratch = true;
3185 		break;
3186 
3187 	case OP_DEST_REG_INDIRECT:
3188 
3189 		if (cfi->drap) {
3190 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3191 
3192 				/* drap: mov %drap, disp(%rbp) */
3193 				cfa->base = CFI_BP_INDIRECT;
3194 				cfa->offset = op->dest.offset;
3195 
3196 				/* save drap offset so we know when to restore it */
3197 				cfi->drap_offset = op->dest.offset;
3198 			} else {
3199 
3200 				/* drap: mov reg, disp(%rbp) */
3201 				save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3202 			}
3203 
3204 		} else if (op->dest.reg == cfa->base) {
3205 
3206 			/* mov reg, disp(%rbp) */
3207 			/* mov reg, disp(%rsp) */
3208 			save_reg(cfi, op->src.reg, CFI_CFA,
3209 				 op->dest.offset - cfi->cfa.offset);
3210 
3211 		} else if (op->dest.reg == CFI_SP) {
3212 
3213 			/* mov reg, disp(%rsp) */
3214 			save_reg(cfi, op->src.reg, CFI_CFA,
3215 				 op->dest.offset - cfi->stack_size);
3216 
3217 		} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3218 
3219 			/* mov %rsp, (%reg); # setup a stack swizzle. */
3220 			cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3221 			cfi->vals[op->dest.reg].offset = cfa->offset;
3222 		}
3223 
3224 		break;
3225 
3226 	case OP_DEST_MEM:
3227 		if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3228 			WARN_INSN(insn, "unknown stack-related memory operation");
3229 			return -1;
3230 		}
3231 
3232 		/* pop mem */
3233 		cfi->stack_size -= 8;
3234 		if (cfa->base == CFI_SP)
3235 			cfa->offset -= 8;
3236 
3237 		break;
3238 
3239 	default:
3240 		WARN_INSN(insn, "unknown stack-related instruction");
3241 		return -1;
3242 	}
3243 
3244 	return 0;
3245 }
3246 
3247 /*
3248  * The stack layouts of alternatives instructions can sometimes diverge when
3249  * they have stack modifications.  That's fine as long as the potential stack
3250  * layouts don't conflict at any given potential instruction boundary.
3251  *
3252  * Flatten the CFIs of the different alternative code streams (both original
3253  * and replacement) into a single shared CFI array which can be used to detect
3254  * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3255  */
3256 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3257 {
3258 	struct cfi_state **alt_cfi;
3259 	int group_off;
3260 
3261 	if (!insn->alt_group)
3262 		return 0;
3263 
3264 	if (!insn->cfi) {
3265 		WARN("CFI missing");
3266 		return -1;
3267 	}
3268 
3269 	alt_cfi = insn->alt_group->cfi;
3270 	group_off = insn->offset - insn->alt_group->first_insn->offset;
3271 
3272 	if (!alt_cfi[group_off]) {
3273 		alt_cfi[group_off] = insn->cfi;
3274 	} else {
3275 		if (cficmp(alt_cfi[group_off], insn->cfi)) {
3276 			struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3277 			struct instruction *orig = orig_group->first_insn;
3278 			WARN_INSN(orig, "stack layout conflict in alternatives: %s",
3279 				  offstr(insn->sec, insn->offset));
3280 			return -1;
3281 		}
3282 	}
3283 
3284 	return 0;
3285 }
3286 
3287 static int noinline handle_insn_ops(struct instruction *insn,
3288 				    struct instruction *next_insn,
3289 				    struct insn_state *state)
3290 {
3291 	struct insn_state prev_state __maybe_unused = *state;
3292 	struct stack_op *op;
3293 	int ret = 0;
3294 
3295 	for (op = insn->stack_ops; op; op = op->next) {
3296 
3297 		ret = update_cfi_state(insn, next_insn, &state->cfi, op);
3298 		if (ret)
3299 			goto done;
3300 
3301 		if (!opts.uaccess || !insn->alt_group)
3302 			continue;
3303 
3304 		if (op->dest.type == OP_DEST_PUSHF) {
3305 			if (!state->uaccess_stack) {
3306 				state->uaccess_stack = 1;
3307 			} else if (state->uaccess_stack >> 31) {
3308 				WARN_INSN(insn, "PUSHF stack exhausted");
3309 				ret = 1;
3310 				goto done;
3311 			}
3312 			state->uaccess_stack <<= 1;
3313 			state->uaccess_stack  |= state->uaccess;
3314 		}
3315 
3316 		if (op->src.type == OP_SRC_POPF) {
3317 			if (state->uaccess_stack) {
3318 				state->uaccess = state->uaccess_stack & 1;
3319 				state->uaccess_stack >>= 1;
3320 				if (state->uaccess_stack == 1)
3321 					state->uaccess_stack = 0;
3322 			}
3323 		}
3324 	}
3325 
3326 done:
3327 	TRACE_INSN_STATE(insn, &prev_state, state);
3328 
3329 	return ret;
3330 }
3331 
3332 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3333 {
3334 	struct cfi_state *cfi1 = insn->cfi;
3335 	int i;
3336 
3337 	if (!cfi1) {
3338 		WARN("CFI missing");
3339 		return false;
3340 	}
3341 
3342 	if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3343 
3344 		WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3345 			  cfi1->cfa.base, cfi1->cfa.offset,
3346 			  cfi2->cfa.base, cfi2->cfa.offset);
3347 		return false;
3348 
3349 	}
3350 
3351 	if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3352 		for (i = 0; i < CFI_NUM_REGS; i++) {
3353 
3354 			if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg)))
3355 				continue;
3356 
3357 			WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3358 				  i, cfi1->regs[i].base, cfi1->regs[i].offset,
3359 				  i, cfi2->regs[i].base, cfi2->regs[i].offset);
3360 		}
3361 		return false;
3362 	}
3363 
3364 	if (cfi1->type != cfi2->type) {
3365 
3366 		WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3367 			  cfi1->type, cfi2->type);
3368 		return false;
3369 	}
3370 
3371 	if (cfi1->drap != cfi2->drap ||
3372 		   (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3373 		   (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3374 
3375 		WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3376 			  cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3377 			  cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3378 		return false;
3379 	}
3380 
3381 	return true;
3382 }
3383 
3384 static inline bool func_uaccess_safe(struct symbol *func)
3385 {
3386 	if (func)
3387 		return func->uaccess_safe;
3388 
3389 	return false;
3390 }
3391 
3392 static inline const char *call_dest_name(struct instruction *insn)
3393 {
3394 	static char pvname[19];
3395 	struct reloc *reloc;
3396 	int idx;
3397 
3398 	if (insn_call_dest(insn))
3399 		return insn_call_dest(insn)->name;
3400 
3401 	reloc = insn_reloc(NULL, insn);
3402 	if (reloc && !strcmp(reloc->sym->name, "pv_ops")) {
3403 		idx = (reloc_addend(reloc) / sizeof(void *));
3404 		snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3405 		return pvname;
3406 	}
3407 
3408 	return "{dynamic}";
3409 }
3410 
3411 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3412 {
3413 	struct symbol *target;
3414 	struct reloc *reloc;
3415 	int idx;
3416 
3417 	reloc = insn_reloc(file, insn);
3418 	if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
3419 		return false;
3420 
3421 	idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *);
3422 
3423 	if (file->pv_ops[idx].clean)
3424 		return true;
3425 
3426 	file->pv_ops[idx].clean = true;
3427 
3428 	list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3429 		if (!target->sec->noinstr) {
3430 			WARN("pv_ops[%d]: %s", idx, target->name);
3431 			file->pv_ops[idx].clean = false;
3432 		}
3433 	}
3434 
3435 	return file->pv_ops[idx].clean;
3436 }
3437 
3438 static inline bool noinstr_call_dest(struct objtool_file *file,
3439 				     struct instruction *insn,
3440 				     struct symbol *func)
3441 {
3442 	/*
3443 	 * We can't deal with indirect function calls at present;
3444 	 * assume they're instrumented.
3445 	 */
3446 	if (!func) {
3447 		if (file->pv_ops)
3448 			return pv_call_dest(file, insn);
3449 
3450 		return false;
3451 	}
3452 
3453 	/*
3454 	 * If the symbol is from a noinstr section; we good.
3455 	 */
3456 	if (func->sec->noinstr)
3457 		return true;
3458 
3459 	/*
3460 	 * If the symbol is a static_call trampoline, we can't tell.
3461 	 */
3462 	if (func->static_call_tramp)
3463 		return true;
3464 
3465 	/*
3466 	 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3467 	 * something 'BAD' happened. At the risk of taking the machine down,
3468 	 * let them proceed to get the message out.
3469 	 */
3470 	if (!strncmp(func->name, "__ubsan_handle_", 15))
3471 		return true;
3472 
3473 	return false;
3474 }
3475 
3476 static int validate_call(struct objtool_file *file,
3477 			 struct instruction *insn,
3478 			 struct insn_state *state)
3479 {
3480 	if (state->noinstr && state->instr <= 0 &&
3481 	    !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3482 		WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3483 		return 1;
3484 	}
3485 
3486 	if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3487 		WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3488 		return 1;
3489 	}
3490 
3491 	if (state->df) {
3492 		WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3493 		return 1;
3494 	}
3495 
3496 	return 0;
3497 }
3498 
3499 static int validate_sibling_call(struct objtool_file *file,
3500 				 struct instruction *insn,
3501 				 struct insn_state *state)
3502 {
3503 	if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3504 		WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3505 		return 1;
3506 	}
3507 
3508 	return validate_call(file, insn, state);
3509 }
3510 
3511 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3512 {
3513 	if (state->noinstr && state->instr > 0) {
3514 		WARN_INSN(insn, "return with instrumentation enabled");
3515 		return 1;
3516 	}
3517 
3518 	if (state->uaccess && !func_uaccess_safe(func)) {
3519 		WARN_INSN(insn, "return with UACCESS enabled");
3520 		return 1;
3521 	}
3522 
3523 	if (!state->uaccess && func_uaccess_safe(func)) {
3524 		WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3525 		return 1;
3526 	}
3527 
3528 	if (state->df) {
3529 		WARN_INSN(insn, "return with DF set");
3530 		return 1;
3531 	}
3532 
3533 	if (func && has_modified_stack_frame(insn, state)) {
3534 		WARN_INSN(insn, "return with modified stack frame");
3535 		return 1;
3536 	}
3537 
3538 	if (state->cfi.bp_scratch) {
3539 		WARN_INSN(insn, "BP used as a scratch register");
3540 		return 1;
3541 	}
3542 
3543 	return 0;
3544 }
3545 
3546 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3547 						 struct instruction *insn)
3548 {
3549 	struct alt_group *alt_group = insn->alt_group;
3550 
3551 	/*
3552 	 * Simulate the fact that alternatives are patched in-place.  When the
3553 	 * end of a replacement alt_group is reached, redirect objtool flow to
3554 	 * the end of the original alt_group.
3555 	 *
3556 	 * insn->alts->insn -> alt_group->first_insn
3557 	 *		       ...
3558 	 *		       alt_group->last_insn
3559 	 *		       [alt_group->nop]      -> next(orig_group->last_insn)
3560 	 */
3561 	if (alt_group) {
3562 		if (alt_group->nop) {
3563 			/* ->nop implies ->orig_group */
3564 			if (insn == alt_group->last_insn)
3565 				return alt_group->nop;
3566 			if (insn == alt_group->nop)
3567 				goto next_orig;
3568 		}
3569 		if (insn == alt_group->last_insn && alt_group->orig_group)
3570 			goto next_orig;
3571 	}
3572 
3573 	return next_insn_same_sec(file, insn);
3574 
3575 next_orig:
3576 	return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3577 }
3578 
3579 static bool skip_alt_group(struct instruction *insn)
3580 {
3581 	struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
3582 
3583 	if (!insn->alt_group)
3584 		return false;
3585 
3586 	/* ANNOTATE_IGNORE_ALTERNATIVE */
3587 	if (insn->alt_group->ignore) {
3588 		TRACE_ALT(insn, "alt group ignored");
3589 		return true;
3590 	}
3591 
3592 	/*
3593 	 * For NOP patched with CLAC/STAC, only follow the latter to avoid
3594 	 * impossible code paths combining patched CLAC with unpatched STAC
3595 	 * or vice versa.
3596 	 *
3597 	 * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
3598 	 * requested not to do that to avoid hurting .s file readability
3599 	 * around CLAC/STAC alternative sites.
3600 	 */
3601 
3602 	if (!alt_insn)
3603 		return false;
3604 
3605 	/* Don't override ASM_{CLAC,STAC}_UNSAFE */
3606 	if (alt_insn->alt_group && alt_insn->alt_group->ignore)
3607 		return false;
3608 
3609 	return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
3610 }
3611 
3612 static int checksum_debug_init(struct objtool_file *file)
3613 {
3614 	char *dup, *s;
3615 
3616 	if (!opts.debug_checksum)
3617 		return 0;
3618 
3619 	dup = strdup(opts.debug_checksum);
3620 	if (!dup) {
3621 		ERROR_GLIBC("strdup");
3622 		return -1;
3623 	}
3624 
3625 	s = dup;
3626 	while (*s) {
3627 		struct symbol *func;
3628 		char *comma;
3629 
3630 		comma = strchr(s, ',');
3631 		if (comma)
3632 			*comma = '\0';
3633 
3634 		func = find_symbol_by_name(file->elf, s);
3635 		if (!func || !is_func_sym(func))
3636 			WARN("--debug-checksum: can't find '%s'", s);
3637 		else
3638 			func->debug_checksum = 1;
3639 
3640 		if (!comma)
3641 			break;
3642 
3643 		s = comma + 1;
3644 	}
3645 
3646 	free(dup);
3647 	return 0;
3648 }
3649 
3650 static void checksum_update_insn(struct objtool_file *file, struct symbol *func,
3651 				 struct instruction *insn)
3652 {
3653 	struct reloc *reloc = insn_reloc(file, insn);
3654 	unsigned long offset;
3655 	struct symbol *sym;
3656 
3657 	if (insn->fake)
3658 		return;
3659 
3660 	checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len);
3661 
3662 	if (!reloc) {
3663 		struct symbol *call_dest = insn_call_dest(insn);
3664 
3665 		if (call_dest)
3666 			checksum_update(func, insn, call_dest->demangled_name,
3667 					strlen(call_dest->demangled_name));
3668 		return;
3669 	}
3670 
3671 	sym = reloc->sym;
3672 	offset = arch_insn_adjusted_addend(insn, reloc);
3673 
3674 	if (is_string_sec(sym->sec)) {
3675 		char *str;
3676 
3677 		str = sym->sec->data->d_buf + sym->offset + offset;
3678 		checksum_update(func, insn, str, strlen(str));
3679 		return;
3680 	}
3681 
3682 	if (is_sec_sym(sym)) {
3683 		sym = find_symbol_containing(reloc->sym->sec, offset);
3684 		if (!sym)
3685 			return;
3686 
3687 		offset -= sym->offset;
3688 	}
3689 
3690 	checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name));
3691 	checksum_update(func, insn, &offset, sizeof(offset));
3692 }
3693 
3694 static int validate_branch(struct objtool_file *file, struct symbol *func,
3695 			   struct instruction *insn, struct insn_state state);
3696 static int do_validate_branch(struct objtool_file *file, struct symbol *func,
3697 			      struct instruction *insn, struct insn_state state);
3698 
3699 static int validate_insn(struct objtool_file *file, struct symbol *func,
3700 			 struct instruction *insn, struct insn_state *statep,
3701 			 struct instruction *prev_insn, struct instruction *next_insn,
3702 			 bool *dead_end)
3703 {
3704 	char *alt_name __maybe_unused = NULL;
3705 	struct alternative *alt;
3706 	u8 visited;
3707 	int ret;
3708 
3709 	/*
3710 	 * Any returns before the end of this function are effectively dead
3711 	 * ends, i.e. validate_branch() has reached the end of the branch.
3712 	 */
3713 	*dead_end = true;
3714 
3715 	visited = VISITED_BRANCH << statep->uaccess;
3716 	if (insn->visited & VISITED_BRANCH_MASK) {
3717 		if (!insn->hint && !insn_cfi_match(insn, &statep->cfi))
3718 			return 1;
3719 
3720 		if (insn->visited & visited) {
3721 			TRACE_INSN(insn, "already visited");
3722 			return 0;
3723 		}
3724 	} else {
3725 		nr_insns_visited++;
3726 	}
3727 
3728 	if (statep->noinstr)
3729 		statep->instr += insn->instr;
3730 
3731 	if (insn->hint) {
3732 		if (insn->restore) {
3733 			struct instruction *save_insn, *i;
3734 
3735 			i = insn;
3736 			save_insn = NULL;
3737 
3738 			sym_for_each_insn_continue_reverse(file, func, i) {
3739 				if (i->save) {
3740 					save_insn = i;
3741 					break;
3742 				}
3743 			}
3744 
3745 			if (!save_insn) {
3746 				WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3747 				return 1;
3748 			}
3749 
3750 			if (!save_insn->visited) {
3751 				/*
3752 				 * If the restore hint insn is at the
3753 				 * beginning of a basic block and was
3754 				 * branched to from elsewhere, and the
3755 				 * save insn hasn't been visited yet,
3756 				 * defer following this branch for now.
3757 				 * It will be seen later via the
3758 				 * straight-line path.
3759 				 */
3760 				if (!prev_insn) {
3761 					TRACE_INSN(insn, "defer restore");
3762 					return 0;
3763 				}
3764 
3765 				WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3766 				return 1;
3767 			}
3768 
3769 			insn->cfi = save_insn->cfi;
3770 			nr_cfi_reused++;
3771 		}
3772 
3773 		statep->cfi = *insn->cfi;
3774 	} else {
3775 		/* XXX track if we actually changed statep->cfi */
3776 
3777 		if (prev_insn && !cficmp(prev_insn->cfi, &statep->cfi)) {
3778 			insn->cfi = prev_insn->cfi;
3779 			nr_cfi_reused++;
3780 		} else {
3781 			insn->cfi = cfi_hash_find_or_add(&statep->cfi);
3782 		}
3783 	}
3784 
3785 	insn->visited |= visited;
3786 
3787 	if (propagate_alt_cfi(file, insn))
3788 		return 1;
3789 
3790 	if (insn->alts) {
3791 		for (alt = insn->alts; alt; alt = alt->next) {
3792 			TRACE_ALT_BEGIN(insn, alt, alt_name);
3793 			ret = validate_branch(file, func, alt->insn, *statep);
3794 			TRACE_ALT_END(insn, alt, alt_name);
3795 			if (ret) {
3796 				BT_INSN(insn, "(alt)");
3797 				return ret;
3798 			}
3799 		}
3800 		TRACE_ALT_INFO_NOADDR(insn, "/ ", "DEFAULT");
3801 	}
3802 
3803 	if (skip_alt_group(insn))
3804 		return 0;
3805 
3806 	if (handle_insn_ops(insn, next_insn, statep))
3807 		return 1;
3808 
3809 	switch (insn->type) {
3810 
3811 	case INSN_RETURN:
3812 		TRACE_INSN(insn, "return");
3813 		return validate_return(func, insn, statep);
3814 
3815 	case INSN_CALL:
3816 	case INSN_CALL_DYNAMIC:
3817 		if (insn->type == INSN_CALL)
3818 			TRACE_INSN(insn, "call");
3819 		else
3820 			TRACE_INSN(insn, "indirect call");
3821 
3822 		ret = validate_call(file, insn, statep);
3823 		if (ret)
3824 			return ret;
3825 
3826 		if (opts.stackval && func && !is_special_call(insn) &&
3827 		    !has_valid_stack_frame(statep)) {
3828 			WARN_INSN(insn, "call without frame pointer save/setup");
3829 			return 1;
3830 		}
3831 
3832 		break;
3833 
3834 	case INSN_JUMP_CONDITIONAL:
3835 	case INSN_JUMP_UNCONDITIONAL:
3836 		if (is_sibling_call(insn)) {
3837 			TRACE_INSN(insn, "sibling call");
3838 			ret = validate_sibling_call(file, insn, statep);
3839 			if (ret)
3840 				return ret;
3841 
3842 		} else if (insn->jump_dest) {
3843 			if (insn->type == INSN_JUMP_UNCONDITIONAL)
3844 				TRACE_INSN(insn, "unconditional jump");
3845 			else
3846 				TRACE_INSN(insn, "jump taken");
3847 
3848 			ret = validate_branch(file, func, insn->jump_dest, *statep);
3849 			if (ret) {
3850 				BT_INSN(insn, "(branch)");
3851 				return ret;
3852 			}
3853 		}
3854 
3855 		if (insn->type == INSN_JUMP_UNCONDITIONAL)
3856 			return 0;
3857 
3858 		TRACE_INSN(insn, "jump not taken");
3859 		break;
3860 
3861 	case INSN_JUMP_DYNAMIC:
3862 	case INSN_JUMP_DYNAMIC_CONDITIONAL:
3863 		TRACE_INSN(insn, "indirect jump");
3864 		if (is_sibling_call(insn)) {
3865 			ret = validate_sibling_call(file, insn, statep);
3866 			if (ret)
3867 				return ret;
3868 		}
3869 
3870 		if (insn->type == INSN_JUMP_DYNAMIC)
3871 			return 0;
3872 
3873 		break;
3874 
3875 	case INSN_SYSCALL:
3876 		TRACE_INSN(insn, "syscall");
3877 		if (func && (!next_insn || !next_insn->hint)) {
3878 			WARN_INSN(insn, "unsupported instruction in callable function");
3879 			return 1;
3880 		}
3881 
3882 		break;
3883 
3884 	case INSN_SYSRET:
3885 		TRACE_INSN(insn, "sysret");
3886 		if (func && (!next_insn || !next_insn->hint)) {
3887 			WARN_INSN(insn, "unsupported instruction in callable function");
3888 			return 1;
3889 		}
3890 
3891 		return 0;
3892 
3893 	case INSN_STAC:
3894 		TRACE_INSN(insn, "stac");
3895 		if (!opts.uaccess)
3896 			break;
3897 
3898 		if (statep->uaccess) {
3899 			WARN_INSN(insn, "recursive UACCESS enable");
3900 			return 1;
3901 		}
3902 
3903 		statep->uaccess = true;
3904 		break;
3905 
3906 	case INSN_CLAC:
3907 		TRACE_INSN(insn, "clac");
3908 		if (!opts.uaccess)
3909 			break;
3910 
3911 		if (!statep->uaccess && func) {
3912 			WARN_INSN(insn, "redundant UACCESS disable");
3913 			return 1;
3914 		}
3915 
3916 		if (func_uaccess_safe(func) && !statep->uaccess_stack) {
3917 			WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3918 			return 1;
3919 		}
3920 
3921 		statep->uaccess = false;
3922 		break;
3923 
3924 	case INSN_STD:
3925 		TRACE_INSN(insn, "std");
3926 		if (statep->df) {
3927 			WARN_INSN(insn, "recursive STD");
3928 			return 1;
3929 		}
3930 
3931 		statep->df = true;
3932 		break;
3933 
3934 	case INSN_CLD:
3935 		TRACE_INSN(insn, "cld");
3936 		if (!statep->df && func) {
3937 			WARN_INSN(insn, "redundant CLD");
3938 			return 1;
3939 		}
3940 
3941 		statep->df = false;
3942 		break;
3943 
3944 	default:
3945 		break;
3946 	}
3947 
3948 	if (insn->dead_end)
3949 		TRACE_INSN(insn, "dead end");
3950 
3951 	*dead_end = insn->dead_end;
3952 	return 0;
3953 }
3954 
3955 /*
3956  * Follow the branch starting at the given instruction, and recursively follow
3957  * any other branches (jumps).  Meanwhile, track the frame pointer state at
3958  * each instruction and validate all the rules described in
3959  * tools/objtool/Documentation/objtool.txt.
3960  */
3961 static int do_validate_branch(struct objtool_file *file, struct symbol *func,
3962 			      struct instruction *insn, struct insn_state state)
3963 {
3964 	struct instruction *next_insn, *prev_insn = NULL;
3965 	bool dead_end;
3966 	int ret;
3967 
3968 	if (func && func->ignore)
3969 		return 0;
3970 
3971 	do {
3972 		insn->trace = 0;
3973 		next_insn = next_insn_to_validate(file, insn);
3974 
3975 		if (opts.checksum && func && insn->sec)
3976 			checksum_update_insn(file, func, insn);
3977 
3978 		if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3979 			/* Ignore KCFI type preambles, which always fall through */
3980 			if (is_prefix_func(func))
3981 				return 0;
3982 
3983 			if (file->ignore_unreachables)
3984 				return 0;
3985 
3986 			WARN("%s() falls through to next function %s()",
3987 			     func->name, insn_func(insn)->name);
3988 			func->warned = 1;
3989 
3990 			return 1;
3991 		}
3992 
3993 		ret = validate_insn(file, func, insn, &state, prev_insn, next_insn,
3994 				    &dead_end);
3995 
3996 		if (!insn->trace) {
3997 			if (ret)
3998 				TRACE_INSN(insn, "warning (%d)", ret);
3999 			else
4000 				TRACE_INSN(insn, NULL);
4001 		}
4002 
4003 		if (!dead_end && !next_insn) {
4004 			if (state.cfi.cfa.base == CFI_UNDEFINED)
4005 				return 0;
4006 			if (file->ignore_unreachables)
4007 				return 0;
4008 
4009 			WARN("%s%sunexpected end of section %s",
4010 			     func ? func->name : "", func ? "(): " : "",
4011 			     insn->sec->name);
4012 			return 1;
4013 		}
4014 
4015 		prev_insn = insn;
4016 		insn = next_insn;
4017 
4018 	} while (!dead_end);
4019 
4020 	return ret;
4021 }
4022 
4023 static int validate_branch(struct objtool_file *file, struct symbol *func,
4024 			   struct instruction *insn, struct insn_state state)
4025 {
4026 	int ret;
4027 
4028 	trace_depth_inc();
4029 	ret = do_validate_branch(file, func, insn, state);
4030 	trace_depth_dec();
4031 
4032 	return ret;
4033 }
4034 
4035 static int validate_unwind_hint(struct objtool_file *file,
4036 				  struct instruction *insn,
4037 				  struct insn_state *state)
4038 {
4039 	if (insn->hint && !insn->visited) {
4040 		struct symbol *func = insn_func(insn);
4041 		int ret;
4042 
4043 		if (opts.checksum)
4044 			checksum_init(func);
4045 
4046 		ret = validate_branch(file, func, insn, *state);
4047 		if (ret)
4048 			BT_INSN(insn, "<=== (hint)");
4049 		return ret;
4050 	}
4051 
4052 	return 0;
4053 }
4054 
4055 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
4056 {
4057 	struct instruction *insn;
4058 	struct insn_state state;
4059 	int warnings = 0;
4060 
4061 	if (!file->hints)
4062 		return 0;
4063 
4064 	init_insn_state(file, &state, sec);
4065 
4066 	if (sec) {
4067 		sec_for_each_insn(file, sec, insn)
4068 			warnings += validate_unwind_hint(file, insn, &state);
4069 	} else {
4070 		for_each_insn(file, insn)
4071 			warnings += validate_unwind_hint(file, insn, &state);
4072 	}
4073 
4074 	return warnings;
4075 }
4076 
4077 /*
4078  * Validate rethunk entry constraint: must untrain RET before the first RET.
4079  *
4080  * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
4081  * before an actual RET instruction.
4082  */
4083 static int validate_unret(struct objtool_file *file, struct instruction *insn)
4084 {
4085 	struct instruction *next, *dest;
4086 	int ret;
4087 
4088 	for (;;) {
4089 		next = next_insn_to_validate(file, insn);
4090 
4091 		if (insn->visited & VISITED_UNRET)
4092 			return 0;
4093 
4094 		insn->visited |= VISITED_UNRET;
4095 
4096 		if (insn->alts) {
4097 			struct alternative *alt;
4098 			for (alt = insn->alts; alt; alt = alt->next) {
4099 				ret = validate_unret(file, alt->insn);
4100 				if (ret) {
4101 					BT_INSN(insn, "(alt)");
4102 					return ret;
4103 				}
4104 			}
4105 		}
4106 
4107 		switch (insn->type) {
4108 
4109 		case INSN_CALL_DYNAMIC:
4110 		case INSN_JUMP_DYNAMIC:
4111 		case INSN_JUMP_DYNAMIC_CONDITIONAL:
4112 			WARN_INSN(insn, "early indirect call");
4113 			return 1;
4114 
4115 		case INSN_JUMP_UNCONDITIONAL:
4116 		case INSN_JUMP_CONDITIONAL:
4117 			if (!is_sibling_call(insn)) {
4118 				if (!insn->jump_dest) {
4119 					WARN_INSN(insn, "unresolved jump target after linking?!?");
4120 					return 1;
4121 				}
4122 				ret = validate_unret(file, insn->jump_dest);
4123 				if (ret) {
4124 					BT_INSN(insn, "(branch%s)",
4125 						insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
4126 					return ret;
4127 				}
4128 
4129 				if (insn->type == INSN_JUMP_UNCONDITIONAL)
4130 					return 0;
4131 
4132 				break;
4133 			}
4134 
4135 			/* fallthrough */
4136 		case INSN_CALL:
4137 			dest = find_insn(file, insn_call_dest(insn)->sec,
4138 					 insn_call_dest(insn)->offset);
4139 			if (!dest) {
4140 				WARN("Unresolved function after linking!?: %s",
4141 				     insn_call_dest(insn)->name);
4142 				return 1;
4143 			}
4144 
4145 			ret = validate_unret(file, dest);
4146 			if (ret) {
4147 				BT_INSN(insn, "(call)");
4148 				return ret;
4149 			}
4150 			/*
4151 			 * If a call returns without error, it must have seen UNTRAIN_RET.
4152 			 * Therefore any non-error return is a success.
4153 			 */
4154 			return 0;
4155 
4156 		case INSN_RETURN:
4157 			WARN_INSN(insn, "RET before UNTRAIN");
4158 			return 1;
4159 
4160 		case INSN_SYSCALL:
4161 			break;
4162 
4163 		case INSN_SYSRET:
4164 			return 0;
4165 
4166 		case INSN_NOP:
4167 			if (insn->retpoline_safe)
4168 				return 0;
4169 			break;
4170 
4171 		default:
4172 			break;
4173 		}
4174 
4175 		if (insn->dead_end)
4176 			return 0;
4177 
4178 		if (!next) {
4179 			WARN_INSN(insn, "teh end!");
4180 			return 1;
4181 		}
4182 		insn = next;
4183 	}
4184 
4185 	return 0;
4186 }
4187 
4188 /*
4189  * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
4190  * VALIDATE_UNRET_END before RET.
4191  */
4192 static int validate_unrets(struct objtool_file *file)
4193 {
4194 	struct instruction *insn;
4195 	int warnings = 0;
4196 
4197 	for_each_insn(file, insn) {
4198 		if (!insn->unret)
4199 			continue;
4200 
4201 		warnings += validate_unret(file, insn);
4202 	}
4203 
4204 	return warnings;
4205 }
4206 
4207 static int validate_retpoline(struct objtool_file *file)
4208 {
4209 	struct instruction *insn;
4210 	int warnings = 0;
4211 
4212 	for_each_insn(file, insn) {
4213 		if (insn->type != INSN_JUMP_DYNAMIC &&
4214 		    insn->type != INSN_CALL_DYNAMIC &&
4215 		    insn->type != INSN_RETURN)
4216 			continue;
4217 
4218 		if (insn->retpoline_safe)
4219 			continue;
4220 
4221 		if (insn->sec->init)
4222 			continue;
4223 
4224 		if (insn->type == INSN_RETURN) {
4225 			if (opts.rethunk) {
4226 				WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build");
4227 				warnings++;
4228 			}
4229 			continue;
4230 		}
4231 
4232 		WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
4233 			  insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
4234 		warnings++;
4235 	}
4236 
4237 	if (!opts.cfi)
4238 		return warnings;
4239 
4240 	/*
4241 	 * kCFI call sites look like:
4242 	 *
4243 	 *     movl $(-0x12345678), %r10d
4244 	 *     addl -4(%r11), %r10d
4245 	 *     jz 1f
4246 	 *     ud2
4247 	 *  1: cs call __x86_indirect_thunk_r11
4248 	 *
4249 	 * Verify all indirect calls are kCFI adorned by checking for the
4250 	 * UD2. Notably, doing __nocfi calls to regular (cfi) functions is
4251 	 * broken.
4252 	 */
4253 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
4254 		struct symbol *sym = insn->sym;
4255 
4256 		if (sym && (sym->type == STT_NOTYPE ||
4257 			    sym->type == STT_FUNC) && !sym->nocfi) {
4258 			struct instruction *prev =
4259 				prev_insn_same_sym(file, insn);
4260 
4261 			if (!prev || prev->type != INSN_BUG) {
4262 				WARN_INSN(insn, "no-cfi indirect call!");
4263 				warnings++;
4264 			}
4265 		}
4266 	}
4267 
4268 	return warnings;
4269 }
4270 
4271 static bool is_kasan_insn(struct instruction *insn)
4272 {
4273 	return (insn->type == INSN_CALL &&
4274 		!strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4275 }
4276 
4277 static bool is_ubsan_insn(struct instruction *insn)
4278 {
4279 	return (insn->type == INSN_CALL &&
4280 		!strcmp(insn_call_dest(insn)->name,
4281 			"__ubsan_handle_builtin_unreachable"));
4282 }
4283 
4284 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4285 {
4286 	struct symbol *func = insn_func(insn);
4287 	struct instruction *prev_insn;
4288 	int i;
4289 
4290 	if (insn->type == INSN_NOP || insn->type == INSN_TRAP ||
4291 	    insn->hole || (func && func->ignore))
4292 		return true;
4293 
4294 	/*
4295 	 * Ignore alternative replacement instructions.  This can happen
4296 	 * when a whitelisted function uses one of the ALTERNATIVE macros.
4297 	 */
4298 	if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4299 	    !strcmp(insn->sec->name, ".altinstr_aux"))
4300 		return true;
4301 
4302 	if (!func)
4303 		return false;
4304 
4305 	if (func->static_call_tramp)
4306 		return true;
4307 
4308 	/*
4309 	 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4310 	 * __builtin_unreachable().  The BUG() macro has an unreachable() after
4311 	 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4312 	 * (or occasionally a JMP to UD2).
4313 	 *
4314 	 * It may also insert a UD2 after calling a __noreturn function.
4315 	 */
4316 	prev_insn = prev_insn_same_sec(file, insn);
4317 	if (prev_insn && prev_insn->dead_end &&
4318 	    (insn->type == INSN_BUG ||
4319 	     (insn->type == INSN_JUMP_UNCONDITIONAL &&
4320 	      insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4321 		return true;
4322 
4323 	/*
4324 	 * Check if this (or a subsequent) instruction is related to
4325 	 * CONFIG_UBSAN or CONFIG_KASAN.
4326 	 *
4327 	 * End the search at 5 instructions to avoid going into the weeds.
4328 	 */
4329 	for (i = 0; i < 5; i++) {
4330 
4331 		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4332 			return true;
4333 
4334 		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4335 			if (insn->jump_dest &&
4336 			    insn_func(insn->jump_dest) == func) {
4337 				insn = insn->jump_dest;
4338 				continue;
4339 			}
4340 
4341 			break;
4342 		}
4343 
4344 		if (insn->offset + insn->len >= func->offset + func->len)
4345 			break;
4346 
4347 		insn = next_insn_same_sec(file, insn);
4348 	}
4349 
4350 	return false;
4351 }
4352 
4353 /*
4354  * For FineIBT or kCFI, a certain number of bytes preceding the function may be
4355  * NOPs.  Those NOPs may be rewritten at runtime and executed, so give them a
4356  * proper function name: __pfx_<func>.
4357  *
4358  * The NOPs may not exist for the following cases:
4359  *
4360  *   - compiler cloned functions (*.cold, *.part0, etc)
4361  *   - asm functions created with inline asm or without SYM_FUNC_START()
4362  *
4363  * Also, the function may already have a prefix from a previous objtool run
4364  * (livepatch extracted functions, or manually running objtool multiple times).
4365  *
4366  * So return 0 if the NOPs are missing or the function already has a prefix
4367  * symbol.
4368  */
4369 static int create_prefix_symbol(struct objtool_file *file, struct symbol *func)
4370 {
4371 	struct instruction *insn, *prev;
4372 	char name[SYM_NAME_LEN];
4373 	struct cfi_state *cfi;
4374 
4375 	if (!is_func_sym(func) || is_prefix_func(func) ||
4376 	    func->cold || func->static_call_tramp)
4377 		return 0;
4378 
4379 	if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) {
4380 		WARN("%s: symbol name too long, can't create __pfx_ symbol",
4381 		      func->name);
4382 		return 0;
4383 	}
4384 
4385 	if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name))
4386 		return -1;
4387 
4388 	if (file->klp) {
4389 		struct symbol *pfx;
4390 
4391 		pfx = find_symbol_by_offset(func->sec, func->offset - opts.prefix);
4392 		if (pfx && is_prefix_func(pfx) && !strcmp(pfx->name, name))
4393 			return 0;
4394 	}
4395 
4396 	insn = find_insn(file, func->sec, func->offset);
4397 	if (!insn) {
4398 		WARN("%s: can't find starting instruction", func->name);
4399 		return -1;
4400 	}
4401 
4402 	for (prev = prev_insn_same_sec(file, insn);
4403 	     prev;
4404 	     prev = prev_insn_same_sec(file, prev)) {
4405 		u64 offset;
4406 
4407 		if (prev->type != INSN_NOP)
4408 			return 0;
4409 
4410 		offset = func->offset - prev->offset;
4411 
4412 		if (offset > opts.prefix)
4413 			return 0;
4414 
4415 		if (offset < opts.prefix)
4416 			continue;
4417 
4418 		if (!elf_create_symbol(file->elf, name, func->sec,
4419 				       GELF_ST_BIND(func->sym.st_info),
4420 				       GELF_ST_TYPE(func->sym.st_info),
4421 				       prev->offset, opts.prefix))
4422 			return -1;
4423 
4424 		break;
4425 	}
4426 
4427 	if (!prev)
4428 		return 0;
4429 
4430 	if (!insn->cfi) {
4431 		/*
4432 		 * This can happen if stack validation isn't enabled or the
4433 		 * function is annotated with STACK_FRAME_NON_STANDARD.
4434 		 */
4435 		return 0;
4436 	}
4437 
4438 	/* Propagate insn->cfi to the prefix code */
4439 	cfi = cfi_hash_find_or_add(insn->cfi);
4440 	for (; prev != insn; prev = next_insn_same_sec(file, prev))
4441 		prev->cfi = cfi;
4442 
4443 	return 0;
4444 }
4445 
4446 static int create_prefix_symbols(struct objtool_file *file)
4447 {
4448 	struct section *sec;
4449 	struct symbol *func;
4450 
4451 	for_each_sec(file->elf, sec) {
4452 		if (!is_text_sec(sec))
4453 			continue;
4454 
4455 		sec_for_each_sym(sec, func) {
4456 			if (create_prefix_symbol(file, func))
4457 				return -1;
4458 		}
4459 	}
4460 
4461 	return 0;
4462 }
4463 
4464 static int validate_symbol(struct objtool_file *file, struct section *sec,
4465 			   struct symbol *sym, struct insn_state *state)
4466 {
4467 	struct instruction *insn;
4468 	struct symbol *func;
4469 	int ret;
4470 
4471 	if (!sym->len) {
4472 		WARN("%s() is missing an ELF size annotation", sym->name);
4473 		return 1;
4474 	}
4475 
4476 	if (sym->pfunc != sym || sym->alias != sym)
4477 		return 0;
4478 
4479 	insn = find_insn(file, sec, sym->offset);
4480 	if (!insn || insn->visited)
4481 		return 0;
4482 
4483 	if (opts.uaccess)
4484 		state->uaccess = sym->uaccess_safe;
4485 
4486 	func = insn_func(insn);
4487 
4488 	if (opts.checksum)
4489 		checksum_init(func);
4490 
4491 	if (opts.trace && !fnmatch(opts.trace, sym->name, 0)) {
4492 		trace_enable();
4493 		TRACE("%s: validation begin\n", sym->name);
4494 	}
4495 
4496 	ret = validate_branch(file, func, insn, *state);
4497 	if (ret)
4498 		BT_INSN(insn, "<=== (sym)");
4499 
4500 	TRACE("%s: validation %s\n\n", sym->name, ret ? "failed" : "end");
4501 	trace_disable();
4502 
4503 	if (opts.checksum)
4504 		checksum_finish(func);
4505 
4506 	return ret;
4507 }
4508 
4509 static int validate_section(struct objtool_file *file, struct section *sec)
4510 {
4511 	struct insn_state state;
4512 	struct symbol *func;
4513 	int warnings = 0;
4514 
4515 	sec_for_each_sym(sec, func) {
4516 		if (!is_func_sym(func))
4517 			continue;
4518 
4519 		init_insn_state(file, &state, sec);
4520 		set_func_state(&state.cfi);
4521 
4522 		warnings += validate_symbol(file, sec, func, &state);
4523 	}
4524 
4525 	return warnings;
4526 }
4527 
4528 static int validate_noinstr_sections(struct objtool_file *file)
4529 {
4530 	struct section *sec;
4531 	int warnings = 0;
4532 
4533 	sec = find_section_by_name(file->elf, ".noinstr.text");
4534 	if (sec) {
4535 		warnings += validate_section(file, sec);
4536 		warnings += validate_unwind_hints(file, sec);
4537 	}
4538 
4539 	sec = find_section_by_name(file->elf, ".entry.text");
4540 	if (sec) {
4541 		warnings += validate_section(file, sec);
4542 		warnings += validate_unwind_hints(file, sec);
4543 	}
4544 
4545 	sec = find_section_by_name(file->elf, ".cpuidle.text");
4546 	if (sec) {
4547 		warnings += validate_section(file, sec);
4548 		warnings += validate_unwind_hints(file, sec);
4549 	}
4550 
4551 	return warnings;
4552 }
4553 
4554 static int validate_functions(struct objtool_file *file)
4555 {
4556 	struct section *sec;
4557 	int warnings = 0;
4558 
4559 	for_each_sec(file->elf, sec) {
4560 		if (!is_text_sec(sec))
4561 			continue;
4562 
4563 		warnings += validate_section(file, sec);
4564 	}
4565 
4566 	return warnings;
4567 }
4568 
4569 static void mark_endbr_used(struct instruction *insn)
4570 {
4571 	if (!list_empty(&insn->call_node))
4572 		list_del_init(&insn->call_node);
4573 }
4574 
4575 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4576 {
4577 	struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4578 	struct instruction *first;
4579 
4580 	if (!sym)
4581 		return false;
4582 
4583 	first = find_insn(file, sym->sec, sym->offset);
4584 	if (!first)
4585 		return false;
4586 
4587 	if (first->type != INSN_ENDBR && !first->noendbr)
4588 		return false;
4589 
4590 	return insn->offset == sym->offset + sym->len;
4591 }
4592 
4593 static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn,
4594 			       struct instruction *dest)
4595 {
4596 	if (dest->type == INSN_ENDBR) {
4597 		mark_endbr_used(dest);
4598 		return 0;
4599 	}
4600 
4601 	if (insn_func(dest) && insn_func(insn) &&
4602 	    insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
4603 		/*
4604 		 * Anything from->to self is either _THIS_IP_ or
4605 		 * IRET-to-self.
4606 		 *
4607 		 * There is no sane way to annotate _THIS_IP_ since the
4608 		 * compiler treats the relocation as a constant and is
4609 		 * happy to fold in offsets, skewing any annotation we
4610 		 * do, leading to vast amounts of false-positives.
4611 		 *
4612 		 * There's also compiler generated _THIS_IP_ through
4613 		 * KCOV and such which we have no hope of annotating.
4614 		 *
4615 		 * As such, blanket accept self-references without
4616 		 * issue.
4617 		 */
4618 		return 0;
4619 	}
4620 
4621 	/*
4622 	 * Accept anything ANNOTATE_NOENDBR.
4623 	 */
4624 	if (dest->noendbr)
4625 		return 0;
4626 
4627 	/*
4628 	 * Accept if this is the instruction after a symbol
4629 	 * that is (no)endbr -- typical code-range usage.
4630 	 */
4631 	if (noendbr_range(file, dest))
4632 		return 0;
4633 
4634 	WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4635 	return 1;
4636 }
4637 
4638 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4639 {
4640 	struct instruction *dest;
4641 	struct reloc *reloc;
4642 	unsigned long off;
4643 	int warnings = 0;
4644 
4645 	/*
4646 	 * Looking for function pointer load relocations.  Ignore
4647 	 * direct/indirect branches:
4648 	 */
4649 	switch (insn->type) {
4650 
4651 	case INSN_CALL:
4652 	case INSN_CALL_DYNAMIC:
4653 	case INSN_JUMP_CONDITIONAL:
4654 	case INSN_JUMP_UNCONDITIONAL:
4655 	case INSN_JUMP_DYNAMIC:
4656 	case INSN_JUMP_DYNAMIC_CONDITIONAL:
4657 	case INSN_RETURN:
4658 	case INSN_NOP:
4659 		return 0;
4660 
4661 	case INSN_LEA_RIP:
4662 		if (!insn_reloc(file, insn)) {
4663 			/* local function pointer reference without reloc */
4664 
4665 			off = arch_jump_destination(insn);
4666 
4667 			dest = find_insn(file, insn->sec, off);
4668 			if (!dest) {
4669 				WARN_INSN(insn, "corrupt function pointer reference");
4670 				return 1;
4671 			}
4672 
4673 			return __validate_ibt_insn(file, insn, dest);
4674 		}
4675 		break;
4676 
4677 	default:
4678 		break;
4679 	}
4680 
4681 	for (reloc = insn_reloc(file, insn);
4682 	     reloc;
4683 	     reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4684 					      reloc_offset(reloc) + 1,
4685 					      (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
4686 
4687 		off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc);
4688 
4689 		dest = find_insn(file, reloc->sym->sec, off);
4690 		if (!dest)
4691 			continue;
4692 
4693 		warnings += __validate_ibt_insn(file, insn, dest);
4694 	}
4695 
4696 	return warnings;
4697 }
4698 
4699 static int validate_ibt_data_reloc(struct objtool_file *file,
4700 				   struct reloc *reloc)
4701 {
4702 	struct instruction *dest;
4703 
4704 	dest = find_insn(file, reloc->sym->sec,
4705 			 reloc->sym->offset + reloc_addend(reloc));
4706 	if (!dest)
4707 		return 0;
4708 
4709 	if (dest->type == INSN_ENDBR) {
4710 		mark_endbr_used(dest);
4711 		return 0;
4712 	}
4713 
4714 	if (dest->noendbr)
4715 		return 0;
4716 
4717 	WARN_FUNC(reloc->sec->base, reloc_offset(reloc),
4718 		  "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4719 
4720 	return 1;
4721 }
4722 
4723 /*
4724  * Validate IBT rules and remove used ENDBR instructions from the seal list.
4725  * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4726  * NOPs) later, in create_ibt_endbr_seal_sections().
4727  */
4728 static int validate_ibt(struct objtool_file *file)
4729 {
4730 	struct section *sec;
4731 	struct reloc *reloc;
4732 	struct instruction *insn;
4733 	int warnings = 0;
4734 
4735 	for_each_insn(file, insn)
4736 		warnings += validate_ibt_insn(file, insn);
4737 
4738 	for_each_sec(file->elf, sec) {
4739 
4740 		/* Already done by validate_ibt_insn() */
4741 		if (is_text_sec(sec))
4742 			continue;
4743 
4744 		if (!sec->rsec)
4745 			continue;
4746 
4747 		/*
4748 		 * These sections can reference text addresses, but not with
4749 		 * the intent to indirect branch to them.
4750 		 */
4751 		if ((!strncmp(sec->name, ".discard", 8) &&
4752 		     strcmp(sec->name, ".discard.ibt_endbr_noseal"))	||
4753 		    !strncmp(sec->name, ".debug", 6)			||
4754 		    !strcmp(sec->name, ".altinstructions")		||
4755 		    !strcmp(sec->name, ".ibt_endbr_seal")		||
4756 		    !strcmp(sec->name, ".kcfi_traps")			||
4757 		    !strcmp(sec->name, ".orc_unwind_ip")		||
4758 		    !strcmp(sec->name, ".retpoline_sites")		||
4759 		    !strcmp(sec->name, ".smp_locks")			||
4760 		    !strcmp(sec->name, ".static_call_sites")		||
4761 		    !strcmp(sec->name, "_error_injection_whitelist")	||
4762 		    !strcmp(sec->name, "_kprobe_blacklist")		||
4763 		    !strcmp(sec->name, "__bug_table")			||
4764 		    !strcmp(sec->name, "__ex_table")			||
4765 		    !strcmp(sec->name, "__jump_table")			||
4766 		    !strcmp(sec->name, ".init.klp_funcs")		||
4767 		    !strcmp(sec->name, "__mcount_loc")			||
4768 		    !strcmp(sec->name, ".llvm.call-graph-profile")	||
4769 		    !strcmp(sec->name, ".llvm_bb_addr_map")		||
4770 		    !strcmp(sec->name, "__tracepoints")			||
4771 		    !strcmp(sec->name, ".return_sites")			||
4772 		    !strcmp(sec->name, ".call_sites")			||
4773 		    !strcmp(sec->name, "__patchable_function_entries"))
4774 			continue;
4775 
4776 		for_each_reloc(sec->rsec, reloc)
4777 			warnings += validate_ibt_data_reloc(file, reloc);
4778 	}
4779 
4780 	return warnings;
4781 }
4782 
4783 static int validate_sls(struct objtool_file *file)
4784 {
4785 	struct instruction *insn, *next_insn;
4786 	int warnings = 0;
4787 
4788 	for_each_insn(file, insn) {
4789 		next_insn = next_insn_same_sec(file, insn);
4790 
4791 		if (insn->retpoline_safe)
4792 			continue;
4793 
4794 		switch (insn->type) {
4795 		case INSN_RETURN:
4796 			if (!next_insn || next_insn->type != INSN_TRAP) {
4797 				WARN_INSN(insn, "missing int3 after ret");
4798 				warnings++;
4799 			}
4800 
4801 			break;
4802 		case INSN_JUMP_DYNAMIC:
4803 			if (!next_insn || next_insn->type != INSN_TRAP) {
4804 				WARN_INSN(insn, "missing int3 after indirect jump");
4805 				warnings++;
4806 			}
4807 			break;
4808 		default:
4809 			break;
4810 		}
4811 	}
4812 
4813 	return warnings;
4814 }
4815 
4816 static int validate_reachable_instructions(struct objtool_file *file)
4817 {
4818 	struct instruction *insn, *prev_insn;
4819 	struct symbol *call_dest;
4820 	int warnings = 0;
4821 
4822 	if (file->ignore_unreachables)
4823 		return 0;
4824 
4825 	for_each_insn(file, insn) {
4826 		if (insn->visited || ignore_unreachable_insn(file, insn))
4827 			continue;
4828 
4829 		prev_insn = prev_insn_same_sec(file, insn);
4830 		if (prev_insn && prev_insn->dead_end) {
4831 			call_dest = insn_call_dest(prev_insn);
4832 			if (call_dest) {
4833 				WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h",
4834 					  call_dest->name);
4835 				warnings++;
4836 				continue;
4837 			}
4838 		}
4839 
4840 		WARN_INSN(insn, "unreachable instruction");
4841 		warnings++;
4842 	}
4843 
4844 	return warnings;
4845 }
4846 
4847 __weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
4848 {
4849 	unsigned int type = reloc_type(reloc);
4850 	size_t sz = elf_addr_size(elf);
4851 
4852 	return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32);
4853 }
4854 
4855 static int check_abs_references(struct objtool_file *file)
4856 {
4857 	struct section *sec;
4858 	struct reloc *reloc;
4859 	int ret = 0;
4860 
4861 	for_each_sec(file->elf, sec) {
4862 		/* absolute references in non-loadable sections are fine */
4863 		if (!(sec->sh.sh_flags & SHF_ALLOC))
4864 			continue;
4865 
4866 		/* section must have an associated .rela section */
4867 		if (!sec->rsec)
4868 			continue;
4869 
4870 		/*
4871 		 * Special case for compiler generated metadata that is not
4872 		 * consumed until after boot.
4873 		 */
4874 		if (!strcmp(sec->name, "__patchable_function_entries"))
4875 			continue;
4876 
4877 		for_each_reloc(sec->rsec, reloc) {
4878 			if (arch_absolute_reloc(file->elf, reloc)) {
4879 				WARN("section %s has absolute relocation at offset 0x%llx",
4880 				     sec->name, (unsigned long long)reloc_offset(reloc));
4881 				ret++;
4882 			}
4883 		}
4884 	}
4885 	return ret;
4886 }
4887 
4888 struct insn_chunk {
4889 	void *addr;
4890 	struct insn_chunk *next;
4891 };
4892 
4893 /*
4894  * Reduce peak RSS usage by freeing insns memory before writing the ELF file,
4895  * which can trigger more allocations for .debug_* sections whose data hasn't
4896  * been read yet.
4897  */
4898 static void free_insns(struct objtool_file *file)
4899 {
4900 	struct instruction *insn;
4901 	struct insn_chunk *chunks = NULL, *chunk;
4902 
4903 	for_each_insn(file, insn) {
4904 		if (!insn->idx) {
4905 			chunk = malloc(sizeof(*chunk));
4906 			chunk->addr = insn;
4907 			chunk->next = chunks;
4908 			chunks = chunk;
4909 		}
4910 	}
4911 
4912 	for (chunk = chunks; chunk; chunk = chunk->next)
4913 		free(chunk->addr);
4914 }
4915 
4916 const char *objtool_disas_insn(struct instruction *insn)
4917 {
4918 	struct disas_context *dctx = objtool_disas_ctx;
4919 
4920 	if (!dctx)
4921 		return "";
4922 
4923 	disas_insn(dctx, insn);
4924 	return disas_result(dctx);
4925 }
4926 
4927 int check(struct objtool_file *file)
4928 {
4929 	struct disas_context *disas_ctx = NULL;
4930 	int ret = 0, warnings = 0;
4931 
4932 	/*
4933 	 * Create a disassembly context if we might disassemble any
4934 	 * instruction or function.
4935 	 */
4936 	if (opts.verbose || opts.backtrace || opts.trace || opts.disas) {
4937 		disas_ctx = disas_context_create(file);
4938 		if (!disas_ctx) {
4939 			opts.disas = false;
4940 			opts.trace = false;
4941 		}
4942 		objtool_disas_ctx = disas_ctx;
4943 	}
4944 
4945 	arch_initial_func_cfi_state(&initial_func_cfi);
4946 	init_cfi_state(&init_cfi);
4947 	init_cfi_state(&func_cfi);
4948 	set_func_state(&func_cfi);
4949 	init_cfi_state(&force_undefined_cfi);
4950 	force_undefined_cfi.force_undefined = true;
4951 
4952 	if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
4953 		ret = -1;
4954 		goto out;
4955 	}
4956 
4957 	cfi_hash_add(&init_cfi);
4958 	cfi_hash_add(&func_cfi);
4959 
4960 	ret = checksum_debug_init(file);
4961 	if (ret)
4962 		goto out;
4963 
4964 	ret = decode_sections(file);
4965 	if (ret)
4966 		goto out;
4967 
4968 	if (!nr_insns)
4969 		goto out;
4970 
4971 	if (opts.retpoline)
4972 		warnings += validate_retpoline(file);
4973 
4974 	if (validate_branch_enabled()) {
4975 		int w = 0;
4976 
4977 		w += validate_functions(file);
4978 		w += validate_unwind_hints(file, NULL);
4979 		if (!w)
4980 			w += validate_reachable_instructions(file);
4981 
4982 		warnings += w;
4983 
4984 	} else if (opts.noinstr) {
4985 		warnings += validate_noinstr_sections(file);
4986 	}
4987 
4988 	if (opts.unret) {
4989 		/*
4990 		 * Must be after validate_branch() and friends, it plays
4991 		 * further games with insn->visited.
4992 		 */
4993 		warnings += validate_unrets(file);
4994 	}
4995 
4996 	if (opts.ibt)
4997 		warnings += validate_ibt(file);
4998 
4999 	if (opts.sls)
5000 		warnings += validate_sls(file);
5001 
5002 	if (opts.static_call) {
5003 		ret = create_static_call_sections(file);
5004 		if (ret)
5005 			goto out;
5006 	}
5007 
5008 	if (opts.retpoline) {
5009 		ret = create_retpoline_sites_sections(file);
5010 		if (ret)
5011 			goto out;
5012 	}
5013 
5014 	if (opts.cfi) {
5015 		ret = create_cfi_sections(file);
5016 		if (ret)
5017 			goto out;
5018 	}
5019 
5020 	if (opts.rethunk) {
5021 		ret = create_return_sites_sections(file);
5022 		if (ret)
5023 			goto out;
5024 
5025 		if (opts.hack_skylake) {
5026 			ret = create_direct_call_sections(file);
5027 			if (ret)
5028 				goto out;
5029 		}
5030 	}
5031 
5032 	if (opts.mcount) {
5033 		ret = create_mcount_loc_sections(file);
5034 		if (ret)
5035 			goto out;
5036 	}
5037 
5038 	if (opts.prefix) {
5039 		ret = create_prefix_symbols(file);
5040 		if (ret)
5041 			goto out;
5042 	}
5043 
5044 	if (opts.ibt) {
5045 		ret = create_ibt_endbr_seal_sections(file);
5046 		if (ret)
5047 			goto out;
5048 	}
5049 
5050 	if (opts.noabs)
5051 		warnings += check_abs_references(file);
5052 
5053 	if (opts.checksum) {
5054 		ret = create_sym_checksum_section(file);
5055 		if (ret)
5056 			goto out;
5057 	}
5058 
5059 	if (opts.orc && nr_insns) {
5060 		ret = orc_create(file);
5061 		if (ret)
5062 			goto out;
5063 	}
5064 
5065 	if (opts.stats) {
5066 		printf("nr_insns_visited: %ld\n", nr_insns_visited);
5067 		printf("nr_cfi: %ld\n", nr_cfi);
5068 		printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
5069 		printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
5070 	}
5071 
5072 out:
5073 	if (ret || warnings) {
5074 		if (opts.werror && warnings)
5075 			ret = 1;
5076 
5077 		if (opts.verbose) {
5078 			if (opts.werror && warnings)
5079 				WARN("%d warning(s) upgraded to errors", warnings);
5080 			disas_warned_funcs(disas_ctx);
5081 		}
5082 	}
5083 
5084 	if (opts.disas)
5085 		disas_funcs(disas_ctx);
5086 
5087 	if (disas_ctx) {
5088 		disas_context_destroy(disas_ctx);
5089 		objtool_disas_ctx = NULL;
5090 	}
5091 
5092 	free_insns(file);
5093 
5094 	if (!ret && !warnings)
5095 		return 0;
5096 
5097 	if (opts.backup && make_backup())
5098 		return 1;
5099 
5100 	return ret;
5101 }
5102