xref: /linux/tools/objtool/check.c (revision 2058f6d1660edc4a9bda9bee627792b352121b10)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4  */
5 
6 #include <string.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10 
11 #include <objtool/builtin.h>
12 #include <objtool/cfi.h>
13 #include <objtool/arch.h>
14 #include <objtool/check.h>
15 #include <objtool/special.h>
16 #include <objtool/warn.h>
17 #include <objtool/checksum.h>
18 #include <objtool/util.h>
19 
20 #include <linux/objtool_types.h>
21 #include <linux/hashtable.h>
22 #include <linux/kernel.h>
23 #include <linux/static_call_types.h>
24 #include <linux/string.h>
25 
26 struct alternative {
27 	struct alternative *next;
28 	struct instruction *insn;
29 };
30 
31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
32 
33 static struct cfi_init_state initial_func_cfi;
34 static struct cfi_state init_cfi;
35 static struct cfi_state func_cfi;
36 static struct cfi_state force_undefined_cfi;
37 
38 struct instruction *find_insn(struct objtool_file *file,
39 			      struct section *sec, unsigned long offset)
40 {
41 	struct instruction *insn;
42 
43 	hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
44 		if (insn->sec == sec && insn->offset == offset)
45 			return insn;
46 	}
47 
48 	return NULL;
49 }
50 
51 struct instruction *next_insn_same_sec(struct objtool_file *file,
52 				       struct instruction *insn)
53 {
54 	if (insn->idx == INSN_CHUNK_MAX)
55 		return find_insn(file, insn->sec, insn->offset + insn->len);
56 
57 	insn++;
58 	if (!insn->len)
59 		return NULL;
60 
61 	return insn;
62 }
63 
64 static struct instruction *next_insn_same_func(struct objtool_file *file,
65 					       struct instruction *insn)
66 {
67 	struct instruction *next = next_insn_same_sec(file, insn);
68 	struct symbol *func = insn_func(insn);
69 
70 	if (!func)
71 		return NULL;
72 
73 	if (next && insn_func(next) == func)
74 		return next;
75 
76 	/* Check if we're already in the subfunction: */
77 	if (func == func->cfunc)
78 		return NULL;
79 
80 	/* Move to the subfunction: */
81 	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
82 }
83 
84 static struct instruction *prev_insn_same_sec(struct objtool_file *file,
85 					      struct instruction *insn)
86 {
87 	if (insn->idx == 0) {
88 		if (insn->prev_len)
89 			return find_insn(file, insn->sec, insn->offset - insn->prev_len);
90 		return NULL;
91 	}
92 
93 	return insn - 1;
94 }
95 
96 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
97 					      struct instruction *insn)
98 {
99 	struct instruction *prev = prev_insn_same_sec(file, insn);
100 
101 	if (prev && insn_func(prev) == insn_func(insn))
102 		return prev;
103 
104 	return NULL;
105 }
106 
107 #define for_each_insn(file, insn)					\
108 	for (struct section *__sec, *__fake = (struct section *)1;	\
109 	     __fake; __fake = NULL)					\
110 		for_each_sec(file->elf, __sec)				\
111 			sec_for_each_insn(file, __sec, insn)
112 
113 #define func_for_each_insn(file, func, insn)				\
114 	for (insn = find_insn(file, func->sec, func->offset);		\
115 	     insn;							\
116 	     insn = next_insn_same_func(file, insn))
117 
118 #define sym_for_each_insn(file, sym, insn)				\
119 	for (insn = find_insn(file, sym->sec, sym->offset);		\
120 	     insn && insn->offset < sym->offset + sym->len;		\
121 	     insn = next_insn_same_sec(file, insn))
122 
123 #define sym_for_each_insn_continue_reverse(file, sym, insn)		\
124 	for (insn = prev_insn_same_sec(file, insn);			\
125 	     insn && insn->offset >= sym->offset;			\
126 	     insn = prev_insn_same_sec(file, insn))
127 
128 #define sec_for_each_insn_from(file, insn)				\
129 	for (; insn; insn = next_insn_same_sec(file, insn))
130 
131 #define sec_for_each_insn_continue(file, insn)				\
132 	for (insn = next_insn_same_sec(file, insn); insn;		\
133 	     insn = next_insn_same_sec(file, insn))
134 
135 static inline struct symbol *insn_call_dest(struct instruction *insn)
136 {
137 	if (insn->type == INSN_JUMP_DYNAMIC ||
138 	    insn->type == INSN_CALL_DYNAMIC)
139 		return NULL;
140 
141 	return insn->_call_dest;
142 }
143 
144 static inline struct reloc *insn_jump_table(struct instruction *insn)
145 {
146 	if (insn->type == INSN_JUMP_DYNAMIC ||
147 	    insn->type == INSN_CALL_DYNAMIC)
148 		return insn->_jump_table;
149 
150 	return NULL;
151 }
152 
153 static inline unsigned long insn_jump_table_size(struct instruction *insn)
154 {
155 	if (insn->type == INSN_JUMP_DYNAMIC ||
156 	    insn->type == INSN_CALL_DYNAMIC)
157 		return insn->_jump_table_size;
158 
159 	return 0;
160 }
161 
162 static bool is_jump_table_jump(struct instruction *insn)
163 {
164 	struct alt_group *alt_group = insn->alt_group;
165 
166 	if (insn_jump_table(insn))
167 		return true;
168 
169 	/* Retpoline alternative for a jump table? */
170 	return alt_group && alt_group->orig_group &&
171 	       insn_jump_table(alt_group->orig_group->first_insn);
172 }
173 
174 static bool is_sibling_call(struct instruction *insn)
175 {
176 	/*
177 	 * Assume only STT_FUNC calls have jump-tables.
178 	 */
179 	if (insn_func(insn)) {
180 		/* An indirect jump is either a sibling call or a jump to a table. */
181 		if (insn->type == INSN_JUMP_DYNAMIC)
182 			return !is_jump_table_jump(insn);
183 	}
184 
185 	/* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
186 	return (is_static_jump(insn) && insn_call_dest(insn));
187 }
188 
189 /*
190  * Checks if a function is a Rust "noreturn" one.
191  */
192 static bool is_rust_noreturn(const struct symbol *func)
193 {
194 	/*
195 	 * If it does not start with "_R", then it is not a Rust symbol.
196 	 */
197 	if (strncmp(func->name, "_R", 2))
198 		return false;
199 
200 	/*
201 	 * These are just heuristics -- we do not control the precise symbol
202 	 * name, due to the crate disambiguators (which depend on the compiler)
203 	 * as well as changes to the source code itself between versions (since
204 	 * these come from the Rust standard library).
205 	 */
206 	return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail")		||
207 	       str_ends_with(func->name, "_4core6option13unwrap_failed")				||
208 	       str_ends_with(func->name, "_4core6result13unwrap_failed")				||
209 	       str_ends_with(func->name, "_4core9panicking5panic")					||
210 	       str_ends_with(func->name, "_4core9panicking9panic_fmt")					||
211 	       str_ends_with(func->name, "_4core9panicking14panic_explicit")				||
212 	       str_ends_with(func->name, "_4core9panicking14panic_nounwind")				||
213 	       str_ends_with(func->name, "_4core9panicking18panic_bounds_check")			||
214 	       str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt")			||
215 	       str_ends_with(func->name, "_4core9panicking19assert_failed_inner")			||
216 	       str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference")		||
217 	       str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference")	||
218 	       str_ends_with(func->name, "_7___rustc17rust_begin_unwind")				||
219 	       strstr(func->name, "_4core9panicking13assert_failed")					||
220 	       strstr(func->name, "_4core9panicking11panic_const24panic_const_")			||
221 	       (strstr(func->name, "_4core5slice5index") &&
222 		strstr(func->name, "slice_") &&
223 		str_ends_with(func->name, "_fail"));
224 }
225 
226 /*
227  * This checks to see if the given function is a "noreturn" function.
228  *
229  * For global functions which are outside the scope of this object file, we
230  * have to keep a manual list of them.
231  *
232  * For local functions, we have to detect them manually by simply looking for
233  * the lack of a return instruction.
234  */
235 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
236 				int recursion)
237 {
238 	int i;
239 	struct instruction *insn;
240 	bool empty = true;
241 
242 #define NORETURN(func) __stringify(func),
243 	static const char * const global_noreturns[] = {
244 #include "noreturns.h"
245 	};
246 #undef NORETURN
247 
248 	if (!func)
249 		return false;
250 
251 	if (!is_local_sym(func)) {
252 		if (is_rust_noreturn(func))
253 			return true;
254 
255 		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
256 			if (!strcmp(func->name, global_noreturns[i]))
257 				return true;
258 	}
259 
260 	if (is_weak_sym(func))
261 		return false;
262 
263 	if (!func->len)
264 		return false;
265 
266 	insn = find_insn(file, func->sec, func->offset);
267 	if (!insn || !insn_func(insn))
268 		return false;
269 
270 	func_for_each_insn(file, func, insn) {
271 		empty = false;
272 
273 		if (insn->type == INSN_RETURN)
274 			return false;
275 	}
276 
277 	if (empty)
278 		return false;
279 
280 	/*
281 	 * A function can have a sibling call instead of a return.  In that
282 	 * case, the function's dead-end status depends on whether the target
283 	 * of the sibling call returns.
284 	 */
285 	func_for_each_insn(file, func, insn) {
286 		if (is_sibling_call(insn)) {
287 			struct instruction *dest = insn->jump_dest;
288 
289 			if (!dest)
290 				/* sibling call to another file */
291 				return false;
292 
293 			/* local sibling call */
294 			if (recursion == 5) {
295 				/*
296 				 * Infinite recursion: two functions have
297 				 * sibling calls to each other.  This is a very
298 				 * rare case.  It means they aren't dead ends.
299 				 */
300 				return false;
301 			}
302 
303 			return __dead_end_function(file, insn_func(dest), recursion+1);
304 		}
305 	}
306 
307 	return true;
308 }
309 
310 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
311 {
312 	return __dead_end_function(file, func, 0);
313 }
314 
315 static void init_cfi_state(struct cfi_state *cfi)
316 {
317 	int i;
318 
319 	for (i = 0; i < CFI_NUM_REGS; i++) {
320 		cfi->regs[i].base = CFI_UNDEFINED;
321 		cfi->vals[i].base = CFI_UNDEFINED;
322 	}
323 	cfi->cfa.base = CFI_UNDEFINED;
324 	cfi->drap_reg = CFI_UNDEFINED;
325 	cfi->drap_offset = -1;
326 }
327 
328 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
329 			    struct section *sec)
330 {
331 	memset(state, 0, sizeof(*state));
332 	init_cfi_state(&state->cfi);
333 
334 	if (opts.noinstr && sec)
335 		state->noinstr = sec->noinstr;
336 }
337 
338 static struct cfi_state *cfi_alloc(void)
339 {
340 	struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
341 	if (!cfi) {
342 		ERROR_GLIBC("calloc");
343 		exit(1);
344 	}
345 	nr_cfi++;
346 	return cfi;
347 }
348 
349 static int cfi_bits;
350 static struct hlist_head *cfi_hash;
351 
352 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
353 {
354 	return memcmp((void *)cfi1 + sizeof(cfi1->hash),
355 		      (void *)cfi2 + sizeof(cfi2->hash),
356 		      sizeof(struct cfi_state) - sizeof(struct hlist_node));
357 }
358 
359 static inline u32 cfi_key(struct cfi_state *cfi)
360 {
361 	return jhash((void *)cfi + sizeof(cfi->hash),
362 		     sizeof(*cfi) - sizeof(cfi->hash), 0);
363 }
364 
365 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
366 {
367 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
368 	struct cfi_state *obj;
369 
370 	hlist_for_each_entry(obj, head, hash) {
371 		if (!cficmp(cfi, obj)) {
372 			nr_cfi_cache++;
373 			return obj;
374 		}
375 	}
376 
377 	obj = cfi_alloc();
378 	*obj = *cfi;
379 	hlist_add_head(&obj->hash, head);
380 
381 	return obj;
382 }
383 
384 static void cfi_hash_add(struct cfi_state *cfi)
385 {
386 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
387 
388 	hlist_add_head(&cfi->hash, head);
389 }
390 
391 static void *cfi_hash_alloc(unsigned long size)
392 {
393 	cfi_bits = max(10, ilog2(size));
394 	cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
395 			PROT_READ|PROT_WRITE,
396 			MAP_PRIVATE|MAP_ANON, -1, 0);
397 	if (cfi_hash == (void *)-1L) {
398 		ERROR_GLIBC("mmap fail cfi_hash");
399 		cfi_hash = NULL;
400 	}  else if (opts.stats) {
401 		printf("cfi_bits: %d\n", cfi_bits);
402 	}
403 
404 	return cfi_hash;
405 }
406 
407 static unsigned long nr_insns;
408 static unsigned long nr_insns_visited;
409 
410 /*
411  * Call the arch-specific instruction decoder for all the instructions and add
412  * them to the global instruction list.
413  */
414 static int decode_instructions(struct objtool_file *file)
415 {
416 	struct section *sec;
417 	struct symbol *func;
418 	unsigned long offset;
419 	struct instruction *insn;
420 
421 	for_each_sec(file->elf, sec) {
422 		struct instruction *insns = NULL;
423 		u8 prev_len = 0;
424 		u8 idx = 0;
425 
426 		if (!is_text_sec(sec))
427 			continue;
428 
429 		if (strcmp(sec->name, ".altinstr_replacement") &&
430 		    strcmp(sec->name, ".altinstr_aux") &&
431 		    strncmp(sec->name, ".discard.", 9))
432 			sec->text = true;
433 
434 		if (!strcmp(sec->name, ".noinstr.text") ||
435 		    !strcmp(sec->name, ".entry.text") ||
436 		    !strcmp(sec->name, ".cpuidle.text") ||
437 		    !strncmp(sec->name, ".text..__x86.", 13))
438 			sec->noinstr = true;
439 
440 		/*
441 		 * .init.text code is ran before userspace and thus doesn't
442 		 * strictly need retpolines, except for modules which are
443 		 * loaded late, they very much do need retpoline in their
444 		 * .init.text
445 		 */
446 		if (!strcmp(sec->name, ".init.text") && !opts.module)
447 			sec->init = true;
448 
449 		for (offset = 0; offset < sec_size(sec); offset += insn->len) {
450 			if (!insns || idx == INSN_CHUNK_MAX) {
451 				insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn));
452 				if (!insns) {
453 					ERROR_GLIBC("calloc");
454 					return -1;
455 				}
456 				idx = 0;
457 			} else {
458 				idx++;
459 			}
460 			insn = &insns[idx];
461 			insn->idx = idx;
462 
463 			INIT_LIST_HEAD(&insn->call_node);
464 			insn->sec = sec;
465 			insn->offset = offset;
466 			insn->prev_len = prev_len;
467 
468 			if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn))
469 				return -1;
470 
471 			prev_len = insn->len;
472 
473 			/*
474 			 * By default, "ud2" is a dead end unless otherwise
475 			 * annotated, because GCC 7 inserts it for certain
476 			 * divide-by-zero cases.
477 			 */
478 			if (insn->type == INSN_BUG)
479 				insn->dead_end = true;
480 
481 			hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
482 			nr_insns++;
483 		}
484 
485 		sec_for_each_sym(sec, func) {
486 			if (!is_notype_sym(func) && !is_func_sym(func))
487 				continue;
488 
489 			if (func->offset == sec_size(sec)) {
490 				/* Heuristic: likely an "end" symbol */
491 				if (is_notype_sym(func))
492 					continue;
493 				ERROR("%s(): STT_FUNC at end of section", func->name);
494 				return -1;
495 			}
496 
497 			if (func->embedded_insn || func->alias != func)
498 				continue;
499 
500 			if (!find_insn(file, sec, func->offset)) {
501 				ERROR("%s(): can't find starting instruction", func->name);
502 				return -1;
503 			}
504 
505 			sym_for_each_insn(file, func, insn) {
506 				insn->sym = func;
507 				if (is_func_sym(func) &&
508 				    insn->type == INSN_ENDBR &&
509 				    list_empty(&insn->call_node)) {
510 					if (insn->offset == func->offset) {
511 						list_add_tail(&insn->call_node, &file->endbr_list);
512 						file->nr_endbr++;
513 					} else {
514 						file->nr_endbr_int++;
515 					}
516 				}
517 			}
518 		}
519 	}
520 
521 	if (opts.stats)
522 		printf("nr_insns: %lu\n", nr_insns);
523 
524 	return 0;
525 }
526 
527 /*
528  * Read the pv_ops[] .data table to find the static initialized values.
529  */
530 static int add_pv_ops(struct objtool_file *file, const char *symname)
531 {
532 	struct symbol *sym, *func;
533 	unsigned long off, end;
534 	struct reloc *reloc;
535 	int idx;
536 
537 	sym = find_symbol_by_name(file->elf, symname);
538 	if (!sym)
539 		return 0;
540 
541 	off = sym->offset;
542 	end = off + sym->len;
543 	for (;;) {
544 		reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
545 		if (!reloc)
546 			break;
547 
548 		idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
549 
550 		func = reloc->sym;
551 		if (is_sec_sym(func))
552 			func = find_symbol_by_offset(reloc->sym->sec,
553 						     reloc_addend(reloc));
554 		if (!func) {
555 			ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc),
556 				   "can't find func at %s[%d]", symname, idx);
557 			return -1;
558 		}
559 
560 		if (objtool_pv_add(file, idx, func))
561 			return -1;
562 
563 		off = reloc_offset(reloc) + 1;
564 		if (off > end)
565 			break;
566 	}
567 
568 	return 0;
569 }
570 
571 /*
572  * Allocate and initialize file->pv_ops[].
573  */
574 static int init_pv_ops(struct objtool_file *file)
575 {
576 	static const char *pv_ops_tables[] = {
577 		"pv_ops",
578 		"xen_cpu_ops",
579 		"xen_irq_ops",
580 		"xen_mmu_ops",
581 		NULL,
582 	};
583 	const char *pv_ops;
584 	struct symbol *sym;
585 	int idx, nr;
586 
587 	if (!opts.noinstr)
588 		return 0;
589 
590 	file->pv_ops = NULL;
591 
592 	sym = find_symbol_by_name(file->elf, "pv_ops");
593 	if (!sym)
594 		return 0;
595 
596 	nr = sym->len / sizeof(unsigned long);
597 	file->pv_ops = calloc(nr, sizeof(struct pv_state));
598 	if (!file->pv_ops) {
599 		ERROR_GLIBC("calloc");
600 		return -1;
601 	}
602 
603 	for (idx = 0; idx < nr; idx++)
604 		INIT_LIST_HEAD(&file->pv_ops[idx].targets);
605 
606 	for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) {
607 		if (add_pv_ops(file, pv_ops))
608 			return -1;
609 	}
610 
611 	return 0;
612 }
613 
614 static int create_static_call_sections(struct objtool_file *file)
615 {
616 	struct static_call_site *site;
617 	struct section *sec;
618 	struct instruction *insn;
619 	struct symbol *key_sym;
620 	char *key_name, *tmp;
621 	int idx;
622 
623 	sec = find_section_by_name(file->elf, ".static_call_sites");
624 	if (sec) {
625 		WARN("file already has .static_call_sites section, skipping");
626 		return 0;
627 	}
628 
629 	if (list_empty(&file->static_call_list))
630 		return 0;
631 
632 	idx = 0;
633 	list_for_each_entry(insn, &file->static_call_list, call_node)
634 		idx++;
635 
636 	sec = elf_create_section_pair(file->elf, ".static_call_sites",
637 				      sizeof(*site), idx, idx * 2);
638 	if (!sec)
639 		return -1;
640 
641 	/* Allow modules to modify the low bits of static_call_site::key */
642 	sec->sh.sh_flags |= SHF_WRITE;
643 
644 	idx = 0;
645 	list_for_each_entry(insn, &file->static_call_list, call_node) {
646 
647 		/* populate reloc for 'addr' */
648 		if (!elf_init_reloc_text_sym(file->elf, sec,
649 					     idx * sizeof(*site), idx * 2,
650 					     insn->sec, insn->offset))
651 			return -1;
652 
653 		/* find key symbol */
654 		key_name = strdup(insn_call_dest(insn)->name);
655 		if (!key_name) {
656 			ERROR_GLIBC("strdup");
657 			return -1;
658 		}
659 		if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
660 			    STATIC_CALL_TRAMP_PREFIX_LEN)) {
661 			ERROR("static_call: trampoline name malformed: %s", key_name);
662 			return -1;
663 		}
664 		tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
665 		memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
666 
667 		key_sym = find_symbol_by_name(file->elf, tmp);
668 		if (!key_sym) {
669 			if (!opts.module) {
670 				ERROR("static_call: can't find static_call_key symbol: %s", tmp);
671 				return -1;
672 			}
673 
674 			/*
675 			 * For modules(), the key might not be exported, which
676 			 * means the module can make static calls but isn't
677 			 * allowed to change them.
678 			 *
679 			 * In that case we temporarily set the key to be the
680 			 * trampoline address.  This is fixed up in
681 			 * static_call_add_module().
682 			 */
683 			key_sym = insn_call_dest(insn);
684 		}
685 
686 		/* populate reloc for 'key' */
687 		if (!elf_init_reloc_data_sym(file->elf, sec,
688 					     idx * sizeof(*site) + 4,
689 					     (idx * 2) + 1, key_sym,
690 					     is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
691 			return -1;
692 
693 		idx++;
694 	}
695 
696 	return 0;
697 }
698 
699 static int create_retpoline_sites_sections(struct objtool_file *file)
700 {
701 	struct instruction *insn;
702 	struct section *sec;
703 	int idx;
704 
705 	sec = find_section_by_name(file->elf, ".retpoline_sites");
706 	if (sec) {
707 		WARN("file already has .retpoline_sites, skipping");
708 		return 0;
709 	}
710 
711 	idx = 0;
712 	list_for_each_entry(insn, &file->retpoline_call_list, call_node)
713 		idx++;
714 
715 	if (!idx)
716 		return 0;
717 
718 	sec = elf_create_section_pair(file->elf, ".retpoline_sites",
719 				      sizeof(int), idx, idx);
720 	if (!sec)
721 		return -1;
722 
723 	idx = 0;
724 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
725 
726 		if (!elf_init_reloc_text_sym(file->elf, sec,
727 					     idx * sizeof(int), idx,
728 					     insn->sec, insn->offset))
729 			return -1;
730 
731 		idx++;
732 	}
733 
734 	return 0;
735 }
736 
737 static int create_return_sites_sections(struct objtool_file *file)
738 {
739 	struct instruction *insn;
740 	struct section *sec;
741 	int idx;
742 
743 	sec = find_section_by_name(file->elf, ".return_sites");
744 	if (sec) {
745 		WARN("file already has .return_sites, skipping");
746 		return 0;
747 	}
748 
749 	idx = 0;
750 	list_for_each_entry(insn, &file->return_thunk_list, call_node)
751 		idx++;
752 
753 	if (!idx)
754 		return 0;
755 
756 	sec = elf_create_section_pair(file->elf, ".return_sites",
757 				      sizeof(int), idx, idx);
758 	if (!sec)
759 		return -1;
760 
761 	idx = 0;
762 	list_for_each_entry(insn, &file->return_thunk_list, call_node) {
763 
764 		if (!elf_init_reloc_text_sym(file->elf, sec,
765 					     idx * sizeof(int), idx,
766 					     insn->sec, insn->offset))
767 			return -1;
768 
769 		idx++;
770 	}
771 
772 	return 0;
773 }
774 
775 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
776 {
777 	struct instruction *insn;
778 	struct section *sec;
779 	int idx;
780 
781 	sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
782 	if (sec) {
783 		WARN("file already has .ibt_endbr_seal, skipping");
784 		return 0;
785 	}
786 
787 	idx = 0;
788 	list_for_each_entry(insn, &file->endbr_list, call_node)
789 		idx++;
790 
791 	if (opts.stats) {
792 		printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
793 		printf("ibt: ENDBR inside functions:  %d\n", file->nr_endbr_int);
794 		printf("ibt: superfluous ENDBR:       %d\n", idx);
795 	}
796 
797 	if (!idx)
798 		return 0;
799 
800 	sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal",
801 				      sizeof(int), idx, idx);
802 	if (!sec)
803 		return -1;
804 
805 	idx = 0;
806 	list_for_each_entry(insn, &file->endbr_list, call_node) {
807 
808 		int *site = (int *)sec->data->d_buf + idx;
809 		struct symbol *sym = insn->sym;
810 		*site = 0;
811 
812 		if (opts.module && sym && is_func_sym(sym) &&
813 		    insn->offset == sym->offset &&
814 		    (!strcmp(sym->name, "init_module") ||
815 		     !strcmp(sym->name, "cleanup_module"))) {
816 			ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
817 			      sym->name);
818 			return -1;
819 		}
820 
821 		if (!elf_init_reloc_text_sym(file->elf, sec,
822 					     idx * sizeof(int), idx,
823 					     insn->sec, insn->offset))
824 			return -1;
825 
826 		idx++;
827 	}
828 
829 	return 0;
830 }
831 
832 static int create_cfi_sections(struct objtool_file *file)
833 {
834 	struct section *sec;
835 	struct symbol *sym;
836 	int idx;
837 
838 	sec = find_section_by_name(file->elf, ".cfi_sites");
839 	if (sec) {
840 		WARN("file already has .cfi_sites section, skipping");
841 		return 0;
842 	}
843 
844 	idx = 0;
845 	for_each_sym(file->elf, sym) {
846 		if (!is_func_sym(sym))
847 			continue;
848 
849 		if (strncmp(sym->name, "__cfi_", 6))
850 			continue;
851 
852 		idx++;
853 	}
854 
855 	sec = elf_create_section_pair(file->elf, ".cfi_sites",
856 				      sizeof(unsigned int), idx, idx);
857 	if (!sec)
858 		return -1;
859 
860 	idx = 0;
861 	for_each_sym(file->elf, sym) {
862 		if (!is_func_sym(sym))
863 			continue;
864 
865 		if (strncmp(sym->name, "__cfi_", 6))
866 			continue;
867 
868 		if (!elf_init_reloc_text_sym(file->elf, sec,
869 					     idx * sizeof(unsigned int), idx,
870 					     sym->sec, sym->offset))
871 			return -1;
872 
873 		idx++;
874 	}
875 
876 	return 0;
877 }
878 
879 static int create_mcount_loc_sections(struct objtool_file *file)
880 {
881 	size_t addr_size = elf_addr_size(file->elf);
882 	struct instruction *insn;
883 	struct section *sec;
884 	int idx;
885 
886 	sec = find_section_by_name(file->elf, "__mcount_loc");
887 	if (sec) {
888 		WARN("file already has __mcount_loc section, skipping");
889 		return 0;
890 	}
891 
892 	if (list_empty(&file->mcount_loc_list))
893 		return 0;
894 
895 	idx = 0;
896 	list_for_each_entry(insn, &file->mcount_loc_list, call_node)
897 		idx++;
898 
899 	sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size,
900 				      idx, idx);
901 	if (!sec)
902 		return -1;
903 
904 	sec->sh.sh_addralign = addr_size;
905 
906 	idx = 0;
907 	list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
908 
909 		struct reloc *reloc;
910 
911 		reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx,
912 					       insn->sec, insn->offset);
913 		if (!reloc)
914 			return -1;
915 
916 		set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32);
917 
918 		idx++;
919 	}
920 
921 	return 0;
922 }
923 
924 static int create_direct_call_sections(struct objtool_file *file)
925 {
926 	struct instruction *insn;
927 	struct section *sec;
928 	int idx;
929 
930 	sec = find_section_by_name(file->elf, ".call_sites");
931 	if (sec) {
932 		WARN("file already has .call_sites section, skipping");
933 		return 0;
934 	}
935 
936 	if (list_empty(&file->call_list))
937 		return 0;
938 
939 	idx = 0;
940 	list_for_each_entry(insn, &file->call_list, call_node)
941 		idx++;
942 
943 	sec = elf_create_section_pair(file->elf, ".call_sites",
944 				      sizeof(unsigned int), idx, idx);
945 	if (!sec)
946 		return -1;
947 
948 	idx = 0;
949 	list_for_each_entry(insn, &file->call_list, call_node) {
950 
951 		if (!elf_init_reloc_text_sym(file->elf, sec,
952 					     idx * sizeof(unsigned int), idx,
953 					     insn->sec, insn->offset))
954 			return -1;
955 
956 		idx++;
957 	}
958 
959 	return 0;
960 }
961 
962 #ifdef BUILD_KLP
963 static int create_sym_checksum_section(struct objtool_file *file)
964 {
965 	struct section *sec;
966 	struct symbol *sym;
967 	unsigned int idx = 0;
968 	struct sym_checksum *checksum;
969 	size_t entsize = sizeof(struct sym_checksum);
970 
971 	sec = find_section_by_name(file->elf, ".discard.sym_checksum");
972 	if (sec) {
973 		if (!opts.dryrun)
974 			WARN("file already has .discard.sym_checksum section, skipping");
975 
976 		return 0;
977 	}
978 
979 	for_each_sym(file->elf, sym)
980 		if (sym->csum.checksum)
981 			idx++;
982 
983 	if (!idx)
984 		return 0;
985 
986 	sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize,
987 				      idx, idx);
988 	if (!sec)
989 		return -1;
990 
991 	idx = 0;
992 	for_each_sym(file->elf, sym) {
993 		if (!sym->csum.checksum)
994 			continue;
995 
996 		if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize,
997 				    sym, 0, R_TEXT64))
998 			return -1;
999 
1000 		checksum = (struct sym_checksum *)sec->data->d_buf + idx;
1001 		checksum->addr = 0; /* reloc */
1002 		checksum->checksum = sym->csum.checksum;
1003 
1004 		mark_sec_changed(file->elf, sec, true);
1005 
1006 		idx++;
1007 	}
1008 
1009 	return 0;
1010 }
1011 #else
1012 static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; }
1013 #endif
1014 
1015 /*
1016  * Warnings shouldn't be reported for ignored functions.
1017  */
1018 static int add_ignores(struct objtool_file *file)
1019 {
1020 	struct section *rsec;
1021 	struct symbol *func;
1022 	struct reloc *reloc;
1023 
1024 	rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1025 	if (!rsec)
1026 		return 0;
1027 
1028 	for_each_reloc(rsec, reloc) {
1029 		switch (reloc->sym->type) {
1030 		case STT_FUNC:
1031 			func = reloc->sym;
1032 			break;
1033 
1034 		case STT_SECTION:
1035 			func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc));
1036 			if (!func)
1037 				continue;
1038 			break;
1039 
1040 		default:
1041 			ERROR("unexpected relocation symbol type in %s: %d",
1042 			      rsec->name, reloc->sym->type);
1043 			return -1;
1044 		}
1045 
1046 		func->ignore = true;
1047 		if (func->cfunc)
1048 			func->cfunc->ignore = true;
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 /*
1055  * This is a whitelist of functions that is allowed to be called with AC set.
1056  * The list is meant to be minimal and only contains compiler instrumentation
1057  * ABI and a few functions used to implement *_{to,from}_user() functions.
1058  *
1059  * These functions must not directly change AC, but may PUSHF/POPF.
1060  */
1061 static const char *uaccess_safe_builtin[] = {
1062 	/* KASAN */
1063 	"kasan_report",
1064 	"kasan_check_range",
1065 	/* KASAN out-of-line */
1066 	"__asan_loadN_noabort",
1067 	"__asan_load1_noabort",
1068 	"__asan_load2_noabort",
1069 	"__asan_load4_noabort",
1070 	"__asan_load8_noabort",
1071 	"__asan_load16_noabort",
1072 	"__asan_storeN_noabort",
1073 	"__asan_store1_noabort",
1074 	"__asan_store2_noabort",
1075 	"__asan_store4_noabort",
1076 	"__asan_store8_noabort",
1077 	"__asan_store16_noabort",
1078 	"__kasan_check_read",
1079 	"__kasan_check_write",
1080 	/* KASAN in-line */
1081 	"__asan_report_load_n_noabort",
1082 	"__asan_report_load1_noabort",
1083 	"__asan_report_load2_noabort",
1084 	"__asan_report_load4_noabort",
1085 	"__asan_report_load8_noabort",
1086 	"__asan_report_load16_noabort",
1087 	"__asan_report_store_n_noabort",
1088 	"__asan_report_store1_noabort",
1089 	"__asan_report_store2_noabort",
1090 	"__asan_report_store4_noabort",
1091 	"__asan_report_store8_noabort",
1092 	"__asan_report_store16_noabort",
1093 	/* KCSAN */
1094 	"__kcsan_check_access",
1095 	"__kcsan_mb",
1096 	"__kcsan_wmb",
1097 	"__kcsan_rmb",
1098 	"__kcsan_release",
1099 	"kcsan_found_watchpoint",
1100 	"kcsan_setup_watchpoint",
1101 	"kcsan_check_scoped_accesses",
1102 	"kcsan_disable_current",
1103 	"kcsan_enable_current_nowarn",
1104 	/* KCSAN/TSAN */
1105 	"__tsan_func_entry",
1106 	"__tsan_func_exit",
1107 	"__tsan_read_range",
1108 	"__tsan_write_range",
1109 	"__tsan_read1",
1110 	"__tsan_read2",
1111 	"__tsan_read4",
1112 	"__tsan_read8",
1113 	"__tsan_read16",
1114 	"__tsan_write1",
1115 	"__tsan_write2",
1116 	"__tsan_write4",
1117 	"__tsan_write8",
1118 	"__tsan_write16",
1119 	"__tsan_read_write1",
1120 	"__tsan_read_write2",
1121 	"__tsan_read_write4",
1122 	"__tsan_read_write8",
1123 	"__tsan_read_write16",
1124 	"__tsan_volatile_read1",
1125 	"__tsan_volatile_read2",
1126 	"__tsan_volatile_read4",
1127 	"__tsan_volatile_read8",
1128 	"__tsan_volatile_read16",
1129 	"__tsan_volatile_write1",
1130 	"__tsan_volatile_write2",
1131 	"__tsan_volatile_write4",
1132 	"__tsan_volatile_write8",
1133 	"__tsan_volatile_write16",
1134 	"__tsan_atomic8_load",
1135 	"__tsan_atomic16_load",
1136 	"__tsan_atomic32_load",
1137 	"__tsan_atomic64_load",
1138 	"__tsan_atomic8_store",
1139 	"__tsan_atomic16_store",
1140 	"__tsan_atomic32_store",
1141 	"__tsan_atomic64_store",
1142 	"__tsan_atomic8_exchange",
1143 	"__tsan_atomic16_exchange",
1144 	"__tsan_atomic32_exchange",
1145 	"__tsan_atomic64_exchange",
1146 	"__tsan_atomic8_fetch_add",
1147 	"__tsan_atomic16_fetch_add",
1148 	"__tsan_atomic32_fetch_add",
1149 	"__tsan_atomic64_fetch_add",
1150 	"__tsan_atomic8_fetch_sub",
1151 	"__tsan_atomic16_fetch_sub",
1152 	"__tsan_atomic32_fetch_sub",
1153 	"__tsan_atomic64_fetch_sub",
1154 	"__tsan_atomic8_fetch_and",
1155 	"__tsan_atomic16_fetch_and",
1156 	"__tsan_atomic32_fetch_and",
1157 	"__tsan_atomic64_fetch_and",
1158 	"__tsan_atomic8_fetch_or",
1159 	"__tsan_atomic16_fetch_or",
1160 	"__tsan_atomic32_fetch_or",
1161 	"__tsan_atomic64_fetch_or",
1162 	"__tsan_atomic8_fetch_xor",
1163 	"__tsan_atomic16_fetch_xor",
1164 	"__tsan_atomic32_fetch_xor",
1165 	"__tsan_atomic64_fetch_xor",
1166 	"__tsan_atomic8_fetch_nand",
1167 	"__tsan_atomic16_fetch_nand",
1168 	"__tsan_atomic32_fetch_nand",
1169 	"__tsan_atomic64_fetch_nand",
1170 	"__tsan_atomic8_compare_exchange_strong",
1171 	"__tsan_atomic16_compare_exchange_strong",
1172 	"__tsan_atomic32_compare_exchange_strong",
1173 	"__tsan_atomic64_compare_exchange_strong",
1174 	"__tsan_atomic8_compare_exchange_weak",
1175 	"__tsan_atomic16_compare_exchange_weak",
1176 	"__tsan_atomic32_compare_exchange_weak",
1177 	"__tsan_atomic64_compare_exchange_weak",
1178 	"__tsan_atomic8_compare_exchange_val",
1179 	"__tsan_atomic16_compare_exchange_val",
1180 	"__tsan_atomic32_compare_exchange_val",
1181 	"__tsan_atomic64_compare_exchange_val",
1182 	"__tsan_atomic_thread_fence",
1183 	"__tsan_atomic_signal_fence",
1184 	"__tsan_unaligned_read16",
1185 	"__tsan_unaligned_write16",
1186 	/* KCOV */
1187 	"write_comp_data",
1188 	"check_kcov_mode",
1189 	"__sanitizer_cov_trace_pc",
1190 	"__sanitizer_cov_trace_const_cmp1",
1191 	"__sanitizer_cov_trace_const_cmp2",
1192 	"__sanitizer_cov_trace_const_cmp4",
1193 	"__sanitizer_cov_trace_const_cmp8",
1194 	"__sanitizer_cov_trace_cmp1",
1195 	"__sanitizer_cov_trace_cmp2",
1196 	"__sanitizer_cov_trace_cmp4",
1197 	"__sanitizer_cov_trace_cmp8",
1198 	"__sanitizer_cov_trace_switch",
1199 	/* KMSAN */
1200 	"kmsan_copy_to_user",
1201 	"kmsan_disable_current",
1202 	"kmsan_enable_current",
1203 	"kmsan_report",
1204 	"kmsan_unpoison_entry_regs",
1205 	"kmsan_unpoison_memory",
1206 	"__msan_chain_origin",
1207 	"__msan_get_context_state",
1208 	"__msan_instrument_asm_store",
1209 	"__msan_metadata_ptr_for_load_1",
1210 	"__msan_metadata_ptr_for_load_2",
1211 	"__msan_metadata_ptr_for_load_4",
1212 	"__msan_metadata_ptr_for_load_8",
1213 	"__msan_metadata_ptr_for_load_n",
1214 	"__msan_metadata_ptr_for_store_1",
1215 	"__msan_metadata_ptr_for_store_2",
1216 	"__msan_metadata_ptr_for_store_4",
1217 	"__msan_metadata_ptr_for_store_8",
1218 	"__msan_metadata_ptr_for_store_n",
1219 	"__msan_poison_alloca",
1220 	"__msan_warning",
1221 	/* UBSAN */
1222 	"ubsan_type_mismatch_common",
1223 	"__ubsan_handle_type_mismatch",
1224 	"__ubsan_handle_type_mismatch_v1",
1225 	"__ubsan_handle_shift_out_of_bounds",
1226 	"__ubsan_handle_load_invalid_value",
1227 	/* KSTACK_ERASE */
1228 	"__sanitizer_cov_stack_depth",
1229 	/* TRACE_BRANCH_PROFILING */
1230 	"ftrace_likely_update",
1231 	/* STACKPROTECTOR */
1232 	"__stack_chk_fail",
1233 	/* misc */
1234 	"csum_partial_copy_generic",
1235 	"copy_mc_fragile",
1236 	"copy_mc_fragile_handle_tail",
1237 	"copy_mc_enhanced_fast_string",
1238 	"rep_stos_alternative",
1239 	"rep_movs_alternative",
1240 	"__copy_user_nocache",
1241 	NULL
1242 };
1243 
1244 static void add_uaccess_safe(struct objtool_file *file)
1245 {
1246 	struct symbol *func;
1247 	const char **name;
1248 
1249 	if (!opts.uaccess)
1250 		return;
1251 
1252 	for (name = uaccess_safe_builtin; *name; name++) {
1253 		func = find_symbol_by_name(file->elf, *name);
1254 		if (!func)
1255 			continue;
1256 
1257 		func->uaccess_safe = true;
1258 	}
1259 }
1260 
1261 /*
1262  * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
1263  * will be added to the .retpoline_sites section.
1264  */
1265 __weak bool arch_is_retpoline(struct symbol *sym)
1266 {
1267 	return false;
1268 }
1269 
1270 /*
1271  * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
1272  * will be added to the .return_sites section.
1273  */
1274 __weak bool arch_is_rethunk(struct symbol *sym)
1275 {
1276 	return false;
1277 }
1278 
1279 /*
1280  * Symbols that are embedded inside other instructions, because sometimes crazy
1281  * code exists. These are mostly ignored for validation purposes.
1282  */
1283 __weak bool arch_is_embedded_insn(struct symbol *sym)
1284 {
1285 	return false;
1286 }
1287 
1288 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1289 {
1290 	struct reloc *reloc;
1291 
1292 	if (insn->no_reloc)
1293 		return NULL;
1294 
1295 	if (!file)
1296 		return NULL;
1297 
1298 	reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1299 					 insn->offset, insn->len);
1300 	if (!reloc) {
1301 		insn->no_reloc = 1;
1302 		return NULL;
1303 	}
1304 
1305 	return reloc;
1306 }
1307 
1308 static void remove_insn_ops(struct instruction *insn)
1309 {
1310 	struct stack_op *op, *next;
1311 
1312 	for (op = insn->stack_ops; op; op = next) {
1313 		next = op->next;
1314 		free(op);
1315 	}
1316 	insn->stack_ops = NULL;
1317 }
1318 
1319 static int annotate_call_site(struct objtool_file *file,
1320 			       struct instruction *insn, bool sibling)
1321 {
1322 	struct reloc *reloc = insn_reloc(file, insn);
1323 	struct symbol *sym = insn_call_dest(insn);
1324 
1325 	if (!sym)
1326 		sym = reloc->sym;
1327 
1328 	if (sym->static_call_tramp) {
1329 		list_add_tail(&insn->call_node, &file->static_call_list);
1330 		return 0;
1331 	}
1332 
1333 	if (sym->retpoline_thunk) {
1334 		list_add_tail(&insn->call_node, &file->retpoline_call_list);
1335 		return 0;
1336 	}
1337 
1338 	/*
1339 	 * Many compilers cannot disable KCOV or sanitizer calls with a function
1340 	 * attribute so they need a little help, NOP out any such calls from
1341 	 * noinstr text.
1342 	 */
1343 	if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1344 		if (reloc)
1345 			set_reloc_type(file->elf, reloc, R_NONE);
1346 
1347 		if (elf_write_insn(file->elf, insn->sec,
1348 				   insn->offset, insn->len,
1349 				   sibling ? arch_ret_insn(insn->len)
1350 					   : arch_nop_insn(insn->len))) {
1351 			return -1;
1352 		}
1353 
1354 		insn->type = sibling ? INSN_RETURN : INSN_NOP;
1355 
1356 		if (sibling) {
1357 			/*
1358 			 * We've replaced the tail-call JMP insn by two new
1359 			 * insn: RET; INT3, except we only have a single struct
1360 			 * insn here. Mark it retpoline_safe to avoid the SLS
1361 			 * warning, instead of adding another insn.
1362 			 */
1363 			insn->retpoline_safe = true;
1364 		}
1365 
1366 		return 0;
1367 	}
1368 
1369 	if (opts.mcount && sym->fentry) {
1370 		if (sibling)
1371 			WARN_INSN(insn, "tail call to __fentry__ !?!?");
1372 		if (opts.mnop) {
1373 			if (reloc)
1374 				set_reloc_type(file->elf, reloc, R_NONE);
1375 
1376 			if (elf_write_insn(file->elf, insn->sec,
1377 					   insn->offset, insn->len,
1378 					   arch_nop_insn(insn->len))) {
1379 				return -1;
1380 			}
1381 
1382 			insn->type = INSN_NOP;
1383 		}
1384 
1385 		list_add_tail(&insn->call_node, &file->mcount_loc_list);
1386 		return 0;
1387 	}
1388 
1389 	if (insn->type == INSN_CALL && !insn->sec->init &&
1390 	    !insn->_call_dest->embedded_insn)
1391 		list_add_tail(&insn->call_node, &file->call_list);
1392 
1393 	if (!sibling && dead_end_function(file, sym))
1394 		insn->dead_end = true;
1395 
1396 	return 0;
1397 }
1398 
1399 static int add_call_dest(struct objtool_file *file, struct instruction *insn,
1400 			  struct symbol *dest, bool sibling)
1401 {
1402 	insn->_call_dest = dest;
1403 	if (!dest)
1404 		return 0;
1405 
1406 	/*
1407 	 * Whatever stack impact regular CALLs have, should be undone
1408 	 * by the RETURN of the called function.
1409 	 *
1410 	 * Annotated intra-function calls retain the stack_ops but
1411 	 * are converted to JUMP, see read_intra_function_calls().
1412 	 */
1413 	remove_insn_ops(insn);
1414 
1415 	return annotate_call_site(file, insn, sibling);
1416 }
1417 
1418 static int add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1419 {
1420 	/*
1421 	 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1422 	 * so convert them accordingly.
1423 	 */
1424 	switch (insn->type) {
1425 	case INSN_CALL:
1426 		insn->type = INSN_CALL_DYNAMIC;
1427 		break;
1428 	case INSN_JUMP_UNCONDITIONAL:
1429 		insn->type = INSN_JUMP_DYNAMIC;
1430 		break;
1431 	case INSN_JUMP_CONDITIONAL:
1432 		insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1433 		break;
1434 	default:
1435 		return 0;
1436 	}
1437 
1438 	insn->retpoline_safe = true;
1439 
1440 	/*
1441 	 * Whatever stack impact regular CALLs have, should be undone
1442 	 * by the RETURN of the called function.
1443 	 *
1444 	 * Annotated intra-function calls retain the stack_ops but
1445 	 * are converted to JUMP, see read_intra_function_calls().
1446 	 */
1447 	remove_insn_ops(insn);
1448 
1449 	return annotate_call_site(file, insn, false);
1450 }
1451 
1452 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1453 {
1454 	/*
1455 	 * Return thunk tail calls are really just returns in disguise,
1456 	 * so convert them accordingly.
1457 	 */
1458 	insn->type = INSN_RETURN;
1459 	insn->retpoline_safe = true;
1460 
1461 	if (add)
1462 		list_add_tail(&insn->call_node, &file->return_thunk_list);
1463 }
1464 
1465 static bool is_first_func_insn(struct objtool_file *file,
1466 			       struct instruction *insn)
1467 {
1468 	struct symbol *func = insn_func(insn);
1469 
1470 	if (!func)
1471 		return false;
1472 
1473 	if (insn->offset == func->offset)
1474 		return true;
1475 
1476 	/* Allow direct CALL/JMP past ENDBR */
1477 	if (opts.ibt) {
1478 		struct instruction *prev = prev_insn_same_sym(file, insn);
1479 
1480 		if (prev && prev->type == INSN_ENDBR &&
1481 		    insn->offset == func->offset + prev->len)
1482 			return true;
1483 	}
1484 
1485 	return false;
1486 }
1487 
1488 /*
1489  * Find the destination instructions for all jumps.
1490  */
1491 static int add_jump_destinations(struct objtool_file *file)
1492 {
1493 	struct instruction *insn;
1494 	struct reloc *reloc;
1495 
1496 	for_each_insn(file, insn) {
1497 		struct symbol *func = insn_func(insn);
1498 		struct instruction *dest_insn;
1499 		struct section *dest_sec;
1500 		struct symbol *dest_sym;
1501 		unsigned long dest_off;
1502 
1503 		if (!is_static_jump(insn))
1504 			continue;
1505 
1506 		if (insn->jump_dest) {
1507 			/*
1508 			 * handle_group_alt() may have previously set
1509 			 * 'jump_dest' for some alternatives.
1510 			 */
1511 			continue;
1512 		}
1513 
1514 		reloc = insn_reloc(file, insn);
1515 		if (!reloc) {
1516 			dest_sec = insn->sec;
1517 			dest_off = arch_jump_destination(insn);
1518 			dest_sym = dest_sec->sym;
1519 		} else {
1520 			dest_sym = reloc->sym;
1521 			if (is_undef_sym(dest_sym)) {
1522 				if (dest_sym->retpoline_thunk) {
1523 					if (add_retpoline_call(file, insn))
1524 						return -1;
1525 					continue;
1526 				}
1527 
1528 				if (dest_sym->return_thunk) {
1529 					add_return_call(file, insn, true);
1530 					continue;
1531 				}
1532 
1533 				/* External symbol */
1534 				if (func) {
1535 					/* External sibling call */
1536 					if (add_call_dest(file, insn, dest_sym, true))
1537 						return -1;
1538 					continue;
1539 				}
1540 
1541 				/* Non-func asm code jumping to external symbol */
1542 				continue;
1543 			}
1544 
1545 			dest_sec = dest_sym->sec;
1546 			dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc);
1547 		}
1548 
1549 		dest_insn = find_insn(file, dest_sec, dest_off);
1550 		if (!dest_insn) {
1551 			struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1552 
1553 			/*
1554 			 * retbleed_untrain_ret() jumps to
1555 			 * __x86_return_thunk(), but objtool can't find
1556 			 * the thunk's starting RET instruction,
1557 			 * because the RET is also in the middle of
1558 			 * another instruction.  Objtool only knows
1559 			 * about the outer instruction.
1560 			 */
1561 			if (sym && sym->embedded_insn) {
1562 				add_return_call(file, insn, false);
1563 				continue;
1564 			}
1565 
1566 			/*
1567 			 * GCOV/KCOV dead code can jump to the end of
1568 			 * the function/section.
1569 			 */
1570 			if (file->ignore_unreachables && func &&
1571 			    dest_sec == insn->sec &&
1572 			    dest_off == func->offset + func->len)
1573 				continue;
1574 
1575 			ERROR_INSN(insn, "can't find jump dest instruction at %s",
1576 				   offstr(dest_sec, dest_off));
1577 			return -1;
1578 		}
1579 
1580 		if (!dest_sym || is_sec_sym(dest_sym)) {
1581 			dest_sym = dest_insn->sym;
1582 			if (!dest_sym)
1583 				goto set_jump_dest;
1584 		}
1585 
1586 		if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) {
1587 			if (add_retpoline_call(file, insn))
1588 				return -1;
1589 			continue;
1590 		}
1591 
1592 		if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) {
1593 			add_return_call(file, insn, true);
1594 			continue;
1595 		}
1596 
1597 		if (!insn->sym || insn->sym == dest_insn->sym)
1598 			goto set_jump_dest;
1599 
1600 		/*
1601 		 * Internal cross-function jump.
1602 		 */
1603 
1604 		/*
1605 		 * For GCC 8+, create parent/child links for any cold
1606 		 * subfunctions.  This is _mostly_ redundant with a
1607 		 * similar initialization in read_symbols().
1608 		 *
1609 		 * If a function has aliases, we want the *first* such
1610 		 * function in the symbol table to be the subfunction's
1611 		 * parent.  In that case we overwrite the
1612 		 * initialization done in read_symbols().
1613 		 *
1614 		 * However this code can't completely replace the
1615 		 * read_symbols() code because this doesn't detect the
1616 		 * case where the parent function's only reference to a
1617 		 * subfunction is through a jump table.
1618 		 */
1619 		if (func && dest_sym->cold) {
1620 			func->cfunc = dest_sym;
1621 			dest_sym->pfunc = func;
1622 			goto set_jump_dest;
1623 		}
1624 
1625 		if (is_first_func_insn(file, dest_insn)) {
1626 			/* Internal sibling call */
1627 			if (add_call_dest(file, insn, dest_sym, true))
1628 				return -1;
1629 			continue;
1630 		}
1631 
1632 set_jump_dest:
1633 		insn->jump_dest = dest_insn;
1634 	}
1635 
1636 	return 0;
1637 }
1638 
1639 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1640 {
1641 	struct symbol *call_dest;
1642 
1643 	call_dest = find_func_by_offset(sec, offset);
1644 	if (!call_dest)
1645 		call_dest = find_symbol_by_offset(sec, offset);
1646 
1647 	return call_dest;
1648 }
1649 
1650 /*
1651  * Find the destination instructions for all calls.
1652  */
1653 static int add_call_destinations(struct objtool_file *file)
1654 {
1655 	struct instruction *insn;
1656 	unsigned long dest_off;
1657 	struct symbol *dest;
1658 	struct reloc *reloc;
1659 
1660 	for_each_insn(file, insn) {
1661 		struct symbol *func = insn_func(insn);
1662 		if (insn->type != INSN_CALL)
1663 			continue;
1664 
1665 		reloc = insn_reloc(file, insn);
1666 		if (!reloc) {
1667 			dest_off = arch_jump_destination(insn);
1668 			dest = find_call_destination(insn->sec, dest_off);
1669 
1670 			if (add_call_dest(file, insn, dest, false))
1671 				return -1;
1672 
1673 			if (func && func->ignore)
1674 				continue;
1675 
1676 			if (!insn_call_dest(insn)) {
1677 				ERROR_INSN(insn, "unannotated intra-function call");
1678 				return -1;
1679 			}
1680 
1681 			if (func && !is_func_sym(insn_call_dest(insn))) {
1682 				ERROR_INSN(insn, "unsupported call to non-function");
1683 				return -1;
1684 			}
1685 
1686 		} else if (is_sec_sym(reloc->sym)) {
1687 			dest_off = arch_insn_adjusted_addend(insn, reloc);
1688 			dest = find_call_destination(reloc->sym->sec, dest_off);
1689 			if (!dest) {
1690 				ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1691 					   reloc->sym->sec->name, dest_off);
1692 				return -1;
1693 			}
1694 
1695 			if (add_call_dest(file, insn, dest, false))
1696 				return -1;
1697 
1698 		} else if (reloc->sym->retpoline_thunk) {
1699 			if (add_retpoline_call(file, insn))
1700 				return -1;
1701 
1702 		} else {
1703 			if (add_call_dest(file, insn, reloc->sym, false))
1704 				return -1;
1705 		}
1706 	}
1707 
1708 	return 0;
1709 }
1710 
1711 /*
1712  * The .alternatives section requires some extra special care over and above
1713  * other special sections because alternatives are patched in place.
1714  */
1715 static int handle_group_alt(struct objtool_file *file,
1716 			    struct special_alt *special_alt,
1717 			    struct instruction *orig_insn,
1718 			    struct instruction **new_insn)
1719 {
1720 	struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1721 	struct alt_group *orig_alt_group, *new_alt_group;
1722 	unsigned long dest_off;
1723 
1724 	orig_alt_group = orig_insn->alt_group;
1725 	if (!orig_alt_group) {
1726 		struct instruction *last_orig_insn = NULL;
1727 
1728 		orig_alt_group = calloc(1, sizeof(*orig_alt_group));
1729 		if (!orig_alt_group) {
1730 			ERROR_GLIBC("calloc");
1731 			return -1;
1732 		}
1733 		orig_alt_group->cfi = calloc(special_alt->orig_len,
1734 					     sizeof(struct cfi_state *));
1735 		if (!orig_alt_group->cfi) {
1736 			ERROR_GLIBC("calloc");
1737 			return -1;
1738 		}
1739 
1740 		insn = orig_insn;
1741 		sec_for_each_insn_from(file, insn) {
1742 			if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1743 				break;
1744 
1745 			insn->alt_group = orig_alt_group;
1746 			last_orig_insn = insn;
1747 		}
1748 		orig_alt_group->orig_group = NULL;
1749 		orig_alt_group->first_insn = orig_insn;
1750 		orig_alt_group->last_insn = last_orig_insn;
1751 		orig_alt_group->nop = NULL;
1752 		orig_alt_group->ignore = orig_insn->ignore_alts;
1753 	} else {
1754 		if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1755 		    orig_alt_group->first_insn->offset != special_alt->orig_len) {
1756 			ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1757 				   orig_alt_group->last_insn->offset +
1758 				   orig_alt_group->last_insn->len -
1759 				   orig_alt_group->first_insn->offset,
1760 				   special_alt->orig_len);
1761 			return -1;
1762 		}
1763 	}
1764 
1765 	new_alt_group = calloc(1, sizeof(*new_alt_group));
1766 	if (!new_alt_group) {
1767 		ERROR_GLIBC("calloc");
1768 		return -1;
1769 	}
1770 
1771 	if (special_alt->new_len < special_alt->orig_len) {
1772 		/*
1773 		 * Insert a fake nop at the end to make the replacement
1774 		 * alt_group the same size as the original.  This is needed to
1775 		 * allow propagate_alt_cfi() to do its magic.  When the last
1776 		 * instruction affects the stack, the instruction after it (the
1777 		 * nop) will propagate the new state to the shared CFI array.
1778 		 */
1779 		nop = calloc(1, sizeof(*nop));
1780 		if (!nop) {
1781 			ERROR_GLIBC("calloc");
1782 			return -1;
1783 		}
1784 		memset(nop, 0, sizeof(*nop));
1785 
1786 		nop->sec = special_alt->new_sec;
1787 		nop->offset = special_alt->new_off + special_alt->new_len;
1788 		nop->len = special_alt->orig_len - special_alt->new_len;
1789 		nop->type = INSN_NOP;
1790 		nop->sym = orig_insn->sym;
1791 		nop->alt_group = new_alt_group;
1792 		nop->fake = 1;
1793 	}
1794 
1795 	if (!special_alt->new_len) {
1796 		*new_insn = nop;
1797 		goto end;
1798 	}
1799 
1800 	insn = *new_insn;
1801 	sec_for_each_insn_from(file, insn) {
1802 		struct reloc *alt_reloc;
1803 
1804 		if (insn->offset >= special_alt->new_off + special_alt->new_len)
1805 			break;
1806 
1807 		last_new_insn = insn;
1808 
1809 		insn->sym = orig_insn->sym;
1810 		insn->alt_group = new_alt_group;
1811 
1812 		/*
1813 		 * Since alternative replacement code is copy/pasted by the
1814 		 * kernel after applying relocations, generally such code can't
1815 		 * have relative-address relocation references to outside the
1816 		 * .altinstr_replacement section, unless the arch's
1817 		 * alternatives code can adjust the relative offsets
1818 		 * accordingly.
1819 		 */
1820 		alt_reloc = insn_reloc(file, insn);
1821 		if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1822 		    !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1823 
1824 			ERROR_INSN(insn, "unsupported relocation in alternatives section");
1825 			return -1;
1826 		}
1827 
1828 		if (!is_static_jump(insn))
1829 			continue;
1830 
1831 		if (!insn->immediate)
1832 			continue;
1833 
1834 		dest_off = arch_jump_destination(insn);
1835 		if (dest_off == special_alt->new_off + special_alt->new_len) {
1836 			insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1837 			if (!insn->jump_dest) {
1838 				ERROR_INSN(insn, "can't find alternative jump destination");
1839 				return -1;
1840 			}
1841 		}
1842 	}
1843 
1844 	if (!last_new_insn) {
1845 		ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1846 			   "can't find last new alternative instruction");
1847 		return -1;
1848 	}
1849 
1850 end:
1851 	new_alt_group->orig_group = orig_alt_group;
1852 	new_alt_group->first_insn = *new_insn;
1853 	new_alt_group->last_insn = last_new_insn;
1854 	new_alt_group->nop = nop;
1855 	new_alt_group->ignore = (*new_insn)->ignore_alts;
1856 	new_alt_group->cfi = orig_alt_group->cfi;
1857 	return 0;
1858 }
1859 
1860 /*
1861  * A jump table entry can either convert a nop to a jump or a jump to a nop.
1862  * If the original instruction is a jump, make the alt entry an effective nop
1863  * by just skipping the original instruction.
1864  */
1865 static int handle_jump_alt(struct objtool_file *file,
1866 			   struct special_alt *special_alt,
1867 			   struct instruction *orig_insn,
1868 			   struct instruction **new_insn)
1869 {
1870 	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1871 	    orig_insn->type != INSN_NOP) {
1872 
1873 		ERROR_INSN(orig_insn, "unsupported instruction at jump label");
1874 		return -1;
1875 	}
1876 
1877 	if (opts.hack_jump_label && special_alt->key_addend & 2) {
1878 		struct reloc *reloc = insn_reloc(file, orig_insn);
1879 
1880 		if (reloc)
1881 			set_reloc_type(file->elf, reloc, R_NONE);
1882 
1883 		if (elf_write_insn(file->elf, orig_insn->sec,
1884 				   orig_insn->offset, orig_insn->len,
1885 				   arch_nop_insn(orig_insn->len))) {
1886 			return -1;
1887 		}
1888 
1889 		orig_insn->type = INSN_NOP;
1890 	}
1891 
1892 	if (orig_insn->type == INSN_NOP) {
1893 		if (orig_insn->len == 2)
1894 			file->jl_nop_short++;
1895 		else
1896 			file->jl_nop_long++;
1897 
1898 		return 0;
1899 	}
1900 
1901 	if (orig_insn->len == 2)
1902 		file->jl_short++;
1903 	else
1904 		file->jl_long++;
1905 
1906 	*new_insn = next_insn_same_sec(file, orig_insn);
1907 	return 0;
1908 }
1909 
1910 /*
1911  * Read all the special sections which have alternate instructions which can be
1912  * patched in or redirected to at runtime.  Each instruction having alternate
1913  * instruction(s) has them added to its insn->alts list, which will be
1914  * traversed in validate_branch().
1915  */
1916 static int add_special_section_alts(struct objtool_file *file)
1917 {
1918 	struct list_head special_alts;
1919 	struct instruction *orig_insn, *new_insn;
1920 	struct special_alt *special_alt, *tmp;
1921 	struct alternative *alt;
1922 
1923 	if (special_get_alts(file->elf, &special_alts))
1924 		return -1;
1925 
1926 	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1927 
1928 		orig_insn = find_insn(file, special_alt->orig_sec,
1929 				      special_alt->orig_off);
1930 		if (!orig_insn) {
1931 			ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off,
1932 				   "special: can't find orig instruction");
1933 			return -1;
1934 		}
1935 
1936 		new_insn = NULL;
1937 		if (!special_alt->group || special_alt->new_len) {
1938 			new_insn = find_insn(file, special_alt->new_sec,
1939 					     special_alt->new_off);
1940 			if (!new_insn) {
1941 				ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1942 					   "special: can't find new instruction");
1943 				return -1;
1944 			}
1945 		}
1946 
1947 		if (special_alt->group) {
1948 			if (!special_alt->orig_len) {
1949 				ERROR_INSN(orig_insn, "empty alternative entry");
1950 				continue;
1951 			}
1952 
1953 			if (handle_group_alt(file, special_alt, orig_insn, &new_insn))
1954 				return -1;
1955 
1956 		} else if (special_alt->jump_or_nop) {
1957 			if (handle_jump_alt(file, special_alt, orig_insn, &new_insn))
1958 				return -1;
1959 		}
1960 
1961 		alt = calloc(1, sizeof(*alt));
1962 		if (!alt) {
1963 			ERROR_GLIBC("calloc");
1964 			return -1;
1965 		}
1966 
1967 		alt->insn = new_insn;
1968 		alt->next = orig_insn->alts;
1969 		orig_insn->alts = alt;
1970 
1971 		list_del(&special_alt->list);
1972 		free(special_alt);
1973 	}
1974 
1975 	if (opts.stats) {
1976 		printf("jl\\\tNOP\tJMP\n");
1977 		printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1978 		printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1979 	}
1980 
1981 	return 0;
1982 }
1983 
1984 __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
1985 {
1986 	return reloc->sym->offset + reloc_addend(reloc);
1987 }
1988 
1989 static int add_jump_table(struct objtool_file *file, struct instruction *insn)
1990 {
1991 	unsigned long table_size = insn_jump_table_size(insn);
1992 	struct symbol *pfunc = insn_func(insn)->pfunc;
1993 	struct reloc *table = insn_jump_table(insn);
1994 	struct instruction *dest_insn;
1995 	unsigned int prev_offset = 0;
1996 	struct reloc *reloc = table;
1997 	struct alternative *alt;
1998 	unsigned long sym_offset;
1999 
2000 	/*
2001 	 * Each @reloc is a switch table relocation which points to the target
2002 	 * instruction.
2003 	 */
2004 	for_each_reloc_from(table->sec, reloc) {
2005 
2006 		/* Check for the end of the table: */
2007 		if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
2008 			break;
2009 		if (reloc != table && is_jump_table(reloc))
2010 			break;
2011 
2012 		/* Make sure the table entries are consecutive: */
2013 		if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc))
2014 			break;
2015 
2016 		sym_offset = arch_jump_table_sym_offset(reloc, table);
2017 
2018 		/* Detect function pointers from contiguous objects: */
2019 		if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset)
2020 			break;
2021 
2022 		/*
2023 		 * Clang sometimes leaves dangling unused jump table entries
2024 		 * which point to the end of the function.  Ignore them.
2025 		 */
2026 		if (reloc->sym->sec == pfunc->sec &&
2027 		    sym_offset == pfunc->offset + pfunc->len)
2028 			goto next;
2029 
2030 		dest_insn = find_insn(file, reloc->sym->sec, sym_offset);
2031 		if (!dest_insn)
2032 			break;
2033 
2034 		/* Make sure the destination is in the same function: */
2035 		if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2036 			break;
2037 
2038 		alt = calloc(1, sizeof(*alt));
2039 		if (!alt) {
2040 			ERROR_GLIBC("calloc");
2041 			return -1;
2042 		}
2043 
2044 		alt->insn = dest_insn;
2045 		alt->next = insn->alts;
2046 		insn->alts = alt;
2047 next:
2048 		prev_offset = reloc_offset(reloc);
2049 	}
2050 
2051 	if (!prev_offset) {
2052 		ERROR_INSN(insn, "can't find switch jump table");
2053 		return -1;
2054 	}
2055 
2056 	return 0;
2057 }
2058 
2059 /*
2060  * find_jump_table() - Given a dynamic jump, find the switch jump table
2061  * associated with it.
2062  */
2063 static void find_jump_table(struct objtool_file *file, struct symbol *func,
2064 			    struct instruction *insn)
2065 {
2066 	struct reloc *table_reloc;
2067 	struct instruction *dest_insn, *orig_insn = insn;
2068 	unsigned long table_size;
2069 	unsigned long sym_offset;
2070 
2071 	/*
2072 	 * Backward search using the @first_jump_src links, these help avoid
2073 	 * much of the 'in between' code. Which avoids us getting confused by
2074 	 * it.
2075 	 */
2076 	for (;
2077 	     insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2078 	     insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2079 
2080 		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2081 			break;
2082 
2083 		/* allow small jumps within the range */
2084 		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2085 		    insn->jump_dest &&
2086 		    (insn->jump_dest->offset <= insn->offset ||
2087 		     insn->jump_dest->offset > orig_insn->offset))
2088 			break;
2089 
2090 		table_reloc = arch_find_switch_table(file, insn, &table_size);
2091 		if (!table_reloc)
2092 			continue;
2093 
2094 		sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc);
2095 
2096 		dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset);
2097 		if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2098 			continue;
2099 
2100 		set_jump_table(table_reloc);
2101 		orig_insn->_jump_table = table_reloc;
2102 		orig_insn->_jump_table_size = table_size;
2103 
2104 		break;
2105 	}
2106 }
2107 
2108 /*
2109  * First pass: Mark the head of each jump table so that in the next pass,
2110  * we know when a given jump table ends and the next one starts.
2111  */
2112 static void mark_func_jump_tables(struct objtool_file *file,
2113 				    struct symbol *func)
2114 {
2115 	struct instruction *insn, *last = NULL;
2116 
2117 	func_for_each_insn(file, func, insn) {
2118 		if (!last)
2119 			last = insn;
2120 
2121 		/*
2122 		 * Store back-pointers for unconditional forward jumps such
2123 		 * that find_jump_table() can back-track using those and
2124 		 * avoid some potentially confusing code.
2125 		 */
2126 		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2127 		    insn->offset > last->offset &&
2128 		    insn->jump_dest->offset > insn->offset &&
2129 		    !insn->jump_dest->first_jump_src) {
2130 
2131 			insn->jump_dest->first_jump_src = insn;
2132 			last = insn->jump_dest;
2133 		}
2134 
2135 		if (insn->type != INSN_JUMP_DYNAMIC)
2136 			continue;
2137 
2138 		find_jump_table(file, func, insn);
2139 	}
2140 }
2141 
2142 static int add_func_jump_tables(struct objtool_file *file,
2143 				  struct symbol *func)
2144 {
2145 	struct instruction *insn;
2146 
2147 	func_for_each_insn(file, func, insn) {
2148 		if (!insn_jump_table(insn))
2149 			continue;
2150 
2151 		if (add_jump_table(file, insn))
2152 			return -1;
2153 	}
2154 
2155 	return 0;
2156 }
2157 
2158 /*
2159  * For some switch statements, gcc generates a jump table in the .rodata
2160  * section which contains a list of addresses within the function to jump to.
2161  * This finds these jump tables and adds them to the insn->alts lists.
2162  */
2163 static int add_jump_table_alts(struct objtool_file *file)
2164 {
2165 	struct symbol *func;
2166 
2167 	if (!file->rodata)
2168 		return 0;
2169 
2170 	for_each_sym(file->elf, func) {
2171 		if (!is_func_sym(func))
2172 			continue;
2173 
2174 		mark_func_jump_tables(file, func);
2175 		if (add_func_jump_tables(file, func))
2176 			return -1;
2177 	}
2178 
2179 	return 0;
2180 }
2181 
2182 static void set_func_state(struct cfi_state *state)
2183 {
2184 	state->cfa = initial_func_cfi.cfa;
2185 	memcpy(&state->regs, &initial_func_cfi.regs,
2186 	       CFI_NUM_REGS * sizeof(struct cfi_reg));
2187 	state->stack_size = initial_func_cfi.cfa.offset;
2188 	state->type = UNWIND_HINT_TYPE_CALL;
2189 }
2190 
2191 static int read_unwind_hints(struct objtool_file *file)
2192 {
2193 	struct cfi_state cfi = init_cfi;
2194 	struct section *sec;
2195 	struct unwind_hint *hint;
2196 	struct instruction *insn;
2197 	struct reloc *reloc;
2198 	unsigned long offset;
2199 	int i;
2200 
2201 	sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2202 	if (!sec)
2203 		return 0;
2204 
2205 	if (!sec->rsec) {
2206 		ERROR("missing .rela.discard.unwind_hints section");
2207 		return -1;
2208 	}
2209 
2210 	if (sec_size(sec) % sizeof(struct unwind_hint)) {
2211 		ERROR("struct unwind_hint size mismatch");
2212 		return -1;
2213 	}
2214 
2215 	file->hints = true;
2216 
2217 	for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) {
2218 		hint = (struct unwind_hint *)sec->data->d_buf + i;
2219 
2220 		reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2221 		if (!reloc) {
2222 			ERROR("can't find reloc for unwind_hints[%d]", i);
2223 			return -1;
2224 		}
2225 
2226 		offset = reloc->sym->offset + reloc_addend(reloc);
2227 
2228 		insn = find_insn(file, reloc->sym->sec, offset);
2229 		if (!insn) {
2230 			ERROR("can't find insn for unwind_hints[%d]", i);
2231 			return -1;
2232 		}
2233 
2234 		insn->hint = true;
2235 
2236 		if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) {
2237 			insn->cfi = &force_undefined_cfi;
2238 			continue;
2239 		}
2240 
2241 		if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2242 			insn->hint = false;
2243 			insn->save = true;
2244 			continue;
2245 		}
2246 
2247 		if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2248 			insn->restore = true;
2249 			continue;
2250 		}
2251 
2252 		if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2253 			struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2254 
2255 			if (sym && is_global_sym(sym)) {
2256 				if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2257 					ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2258 					return -1;
2259 				}
2260 			}
2261 		}
2262 
2263 		if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2264 			insn->cfi = &func_cfi;
2265 			continue;
2266 		}
2267 
2268 		if (insn->cfi)
2269 			cfi = *(insn->cfi);
2270 
2271 		if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2272 			ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2273 			return -1;
2274 		}
2275 
2276 		cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2277 		cfi.type = hint->type;
2278 		cfi.signal = hint->signal;
2279 
2280 		insn->cfi = cfi_hash_find_or_add(&cfi);
2281 	}
2282 
2283 	return 0;
2284 }
2285 
2286 static int read_annotate(struct objtool_file *file,
2287 			 int (*func)(struct objtool_file *file, int type, struct instruction *insn))
2288 {
2289 	struct section *sec;
2290 	struct instruction *insn;
2291 	struct reloc *reloc;
2292 	uint64_t offset;
2293 	int type;
2294 
2295 	sec = find_section_by_name(file->elf, ".discard.annotate_insn");
2296 	if (!sec)
2297 		return 0;
2298 
2299 	if (!sec->rsec)
2300 		return 0;
2301 
2302 	if (sec->sh.sh_entsize != 8) {
2303 		static bool warned = false;
2304 		if (!warned && opts.verbose) {
2305 			WARN("%s: dodgy linker, sh_entsize != 8", sec->name);
2306 			warned = true;
2307 		}
2308 		sec->sh.sh_entsize = 8;
2309 	}
2310 
2311 	if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) {
2312 		ERROR("bad .discard.annotate_insn section: missing relocs");
2313 		return -1;
2314 	}
2315 
2316 	for_each_reloc(sec->rsec, reloc) {
2317 		type = annotype(file->elf, sec, reloc);
2318 		offset = reloc->sym->offset + reloc_addend(reloc);
2319 		insn = find_insn(file, reloc->sym->sec, offset);
2320 
2321 		if (!insn) {
2322 			ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
2323 			return -1;
2324 		}
2325 
2326 		if (func(file, type, insn))
2327 			return -1;
2328 	}
2329 
2330 	return 0;
2331 }
2332 
2333 static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn)
2334 {
2335 	switch (type) {
2336 
2337 	/* Must be before add_special_section_alts() */
2338 	case ANNOTYPE_IGNORE_ALTS:
2339 		insn->ignore_alts = true;
2340 		break;
2341 
2342 	/*
2343 	 * Must be before read_unwind_hints() since that needs insn->noendbr.
2344 	 */
2345 	case ANNOTYPE_NOENDBR:
2346 		insn->noendbr = 1;
2347 		break;
2348 
2349 	default:
2350 		break;
2351 	}
2352 
2353 	return 0;
2354 }
2355 
2356 static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn)
2357 {
2358 	unsigned long dest_off;
2359 
2360 	if (type != ANNOTYPE_INTRA_FUNCTION_CALL)
2361 		return 0;
2362 
2363 	if (insn->type != INSN_CALL) {
2364 		ERROR_INSN(insn, "intra_function_call not a direct call");
2365 		return -1;
2366 	}
2367 
2368 	/*
2369 	 * Treat intra-function CALLs as JMPs, but with a stack_op.
2370 	 * See add_call_destinations(), which strips stack_ops from
2371 	 * normal CALLs.
2372 	 */
2373 	insn->type = INSN_JUMP_UNCONDITIONAL;
2374 
2375 	dest_off = arch_jump_destination(insn);
2376 	insn->jump_dest = find_insn(file, insn->sec, dest_off);
2377 	if (!insn->jump_dest) {
2378 		ERROR_INSN(insn, "can't find call dest at %s+0x%lx",
2379 			   insn->sec->name, dest_off);
2380 		return -1;
2381 	}
2382 
2383 	return 0;
2384 }
2385 
2386 static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn)
2387 {
2388 	struct symbol *sym;
2389 
2390 	switch (type) {
2391 	case ANNOTYPE_NOENDBR:
2392 		/* early */
2393 		break;
2394 
2395 	case ANNOTYPE_RETPOLINE_SAFE:
2396 		if (insn->type != INSN_JUMP_DYNAMIC &&
2397 		    insn->type != INSN_CALL_DYNAMIC &&
2398 		    insn->type != INSN_RETURN &&
2399 		    insn->type != INSN_NOP) {
2400 			ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2401 			return -1;
2402 		}
2403 
2404 		insn->retpoline_safe = true;
2405 		break;
2406 
2407 	case ANNOTYPE_INSTR_BEGIN:
2408 		insn->instr++;
2409 		break;
2410 
2411 	case ANNOTYPE_INSTR_END:
2412 		insn->instr--;
2413 		break;
2414 
2415 	case ANNOTYPE_UNRET_BEGIN:
2416 		insn->unret = 1;
2417 		break;
2418 
2419 	case ANNOTYPE_IGNORE_ALTS:
2420 		/* early */
2421 		break;
2422 
2423 	case ANNOTYPE_INTRA_FUNCTION_CALL:
2424 		/* ifc */
2425 		break;
2426 
2427 	case ANNOTYPE_REACHABLE:
2428 		insn->dead_end = false;
2429 		break;
2430 
2431 	case ANNOTYPE_NOCFI:
2432 		sym = insn->sym;
2433 		if (!sym) {
2434 			ERROR_INSN(insn, "dodgy NOCFI annotation");
2435 			return -1;
2436 		}
2437 		insn->sym->nocfi = 1;
2438 		break;
2439 
2440 	default:
2441 		ERROR_INSN(insn, "Unknown annotation type: %d", type);
2442 		return -1;
2443 	}
2444 
2445 	return 0;
2446 }
2447 
2448 /*
2449  * Return true if name matches an instrumentation function, where calls to that
2450  * function from noinstr code can safely be removed, but compilers won't do so.
2451  */
2452 static bool is_profiling_func(const char *name)
2453 {
2454 	/*
2455 	 * Many compilers cannot disable KCOV with a function attribute.
2456 	 */
2457 	if (!strncmp(name, "__sanitizer_cov_", 16))
2458 		return true;
2459 
2460 	return false;
2461 }
2462 
2463 static int classify_symbols(struct objtool_file *file)
2464 {
2465 	struct symbol *func;
2466 
2467 	for_each_sym(file->elf, func) {
2468 		if (is_notype_sym(func) && strstarts(func->name, ".L"))
2469 			func->local_label = true;
2470 
2471 		if (!is_global_sym(func))
2472 			continue;
2473 
2474 		if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2475 			     strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2476 			func->static_call_tramp = true;
2477 
2478 		if (arch_is_retpoline(func))
2479 			func->retpoline_thunk = true;
2480 
2481 		if (arch_is_rethunk(func))
2482 			func->return_thunk = true;
2483 
2484 		if (arch_is_embedded_insn(func))
2485 			func->embedded_insn = true;
2486 
2487 		if (arch_ftrace_match(func->name))
2488 			func->fentry = true;
2489 
2490 		if (is_profiling_func(func->name))
2491 			func->profiling_func = true;
2492 	}
2493 
2494 	return 0;
2495 }
2496 
2497 static void mark_rodata(struct objtool_file *file)
2498 {
2499 	struct section *sec;
2500 	bool found = false;
2501 
2502 	/*
2503 	 * Search for the following rodata sections, each of which can
2504 	 * potentially contain jump tables:
2505 	 *
2506 	 * - .rodata: can contain GCC switch tables
2507 	 * - .rodata.<func>: same, if -fdata-sections is being used
2508 	 * - .data.rel.ro.c_jump_table: contains C annotated jump tables
2509 	 *
2510 	 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2511 	 */
2512 	for_each_sec(file->elf, sec) {
2513 		if ((!strncmp(sec->name, ".rodata", 7) &&
2514 		     !strstr(sec->name, ".str1.")) ||
2515 		    !strncmp(sec->name, ".data.rel.ro", 12)) {
2516 			sec->rodata = true;
2517 			found = true;
2518 		}
2519 	}
2520 
2521 	file->rodata = found;
2522 }
2523 
2524 static void mark_holes(struct objtool_file *file)
2525 {
2526 	struct instruction *insn;
2527 	bool in_hole = false;
2528 
2529 	if (!opts.link)
2530 		return;
2531 
2532 	/*
2533 	 * Whole archive runs might encounter dead code from weak symbols.
2534 	 * This is where the linker will have dropped the weak symbol in
2535 	 * favour of a regular symbol, but leaves the code in place.
2536 	 */
2537 	for_each_insn(file, insn) {
2538 		if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) {
2539 			in_hole = false;
2540 			continue;
2541 		}
2542 
2543 		/* Skip function padding and pfx code */
2544 		if (!in_hole && insn->type == INSN_NOP)
2545 			continue;
2546 
2547 		in_hole = true;
2548 		insn->hole = 1;
2549 
2550 		/*
2551 		 * If this hole jumps to a .cold function, mark it ignore.
2552 		 */
2553 		if (insn->jump_dest) {
2554 			struct symbol *dest_func = insn_func(insn->jump_dest);
2555 
2556 			if (dest_func && dest_func->cold)
2557 				dest_func->ignore = true;
2558 		}
2559 	}
2560 }
2561 
2562 static bool validate_branch_enabled(void)
2563 {
2564 	return opts.stackval ||
2565 	       opts.orc ||
2566 	       opts.uaccess ||
2567 	       opts.checksum;
2568 }
2569 
2570 static int decode_sections(struct objtool_file *file)
2571 {
2572 	mark_rodata(file);
2573 
2574 	if (init_pv_ops(file))
2575 		return -1;
2576 
2577 	/*
2578 	 * Must be before add_{jump_call}_destination.
2579 	 */
2580 	if (classify_symbols(file))
2581 		return -1;
2582 
2583 	if (decode_instructions(file))
2584 		return -1;
2585 
2586 	if (add_ignores(file))
2587 		return -1;
2588 
2589 	add_uaccess_safe(file);
2590 
2591 	if (read_annotate(file, __annotate_early))
2592 		return -1;
2593 
2594 	/*
2595 	 * Must be before add_jump_destinations(), which depends on 'func'
2596 	 * being set for alternatives, to enable proper sibling call detection.
2597 	 */
2598 	if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label) {
2599 		if (add_special_section_alts(file))
2600 			return -1;
2601 	}
2602 
2603 	if (add_jump_destinations(file))
2604 		return -1;
2605 
2606 	/*
2607 	 * Must be before add_call_destination(); it changes INSN_CALL to
2608 	 * INSN_JUMP.
2609 	 */
2610 	if (read_annotate(file, __annotate_ifc))
2611 		return -1;
2612 
2613 	if (add_call_destinations(file))
2614 		return -1;
2615 
2616 	if (add_jump_table_alts(file))
2617 		return -1;
2618 
2619 	if (read_unwind_hints(file))
2620 		return -1;
2621 
2622 	/* Must be after add_jump_destinations() */
2623 	mark_holes(file);
2624 
2625 	/*
2626 	 * Must be after add_call_destinations() such that it can override
2627 	 * dead_end_function() marks.
2628 	 */
2629 	if (read_annotate(file, __annotate_late))
2630 		return -1;
2631 
2632 	return 0;
2633 }
2634 
2635 static bool is_special_call(struct instruction *insn)
2636 {
2637 	if (insn->type == INSN_CALL) {
2638 		struct symbol *dest = insn_call_dest(insn);
2639 
2640 		if (!dest)
2641 			return false;
2642 
2643 		if (dest->fentry || dest->embedded_insn)
2644 			return true;
2645 	}
2646 
2647 	return false;
2648 }
2649 
2650 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2651 {
2652 	struct cfi_state *cfi = &state->cfi;
2653 	int i;
2654 
2655 	if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2656 		return true;
2657 
2658 	if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2659 		return true;
2660 
2661 	if (cfi->stack_size != initial_func_cfi.cfa.offset)
2662 		return true;
2663 
2664 	for (i = 0; i < CFI_NUM_REGS; i++) {
2665 		if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2666 		    cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2667 			return true;
2668 	}
2669 
2670 	return false;
2671 }
2672 
2673 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2674 				int expected_offset)
2675 {
2676 	return reg->base == CFI_CFA &&
2677 	       reg->offset == expected_offset;
2678 }
2679 
2680 static bool has_valid_stack_frame(struct insn_state *state)
2681 {
2682 	struct cfi_state *cfi = &state->cfi;
2683 
2684 	if (cfi->cfa.base == CFI_BP &&
2685 	    check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2686 	    check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2687 		return true;
2688 
2689 	if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2690 		return true;
2691 
2692 	return false;
2693 }
2694 
2695 static int update_cfi_state_regs(struct instruction *insn,
2696 				  struct cfi_state *cfi,
2697 				  struct stack_op *op)
2698 {
2699 	struct cfi_reg *cfa = &cfi->cfa;
2700 
2701 	if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2702 		return 0;
2703 
2704 	/* push */
2705 	if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2706 		cfa->offset += 8;
2707 
2708 	/* pop */
2709 	if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2710 		cfa->offset -= 8;
2711 
2712 	/* add immediate to sp */
2713 	if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2714 	    op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2715 		cfa->offset -= op->src.offset;
2716 
2717 	return 0;
2718 }
2719 
2720 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2721 {
2722 	if (arch_callee_saved_reg(reg) &&
2723 	    cfi->regs[reg].base == CFI_UNDEFINED) {
2724 		cfi->regs[reg].base = base;
2725 		cfi->regs[reg].offset = offset;
2726 	}
2727 }
2728 
2729 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2730 {
2731 	cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2732 	cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2733 }
2734 
2735 /*
2736  * A note about DRAP stack alignment:
2737  *
2738  * GCC has the concept of a DRAP register, which is used to help keep track of
2739  * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
2740  * register.  The typical DRAP pattern is:
2741  *
2742  *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
2743  *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
2744  *   41 ff 72 f8		pushq  -0x8(%r10)
2745  *   55				push   %rbp
2746  *   48 89 e5			mov    %rsp,%rbp
2747  *				(more pushes)
2748  *   41 52			push   %r10
2749  *				...
2750  *   41 5a			pop    %r10
2751  *				(more pops)
2752  *   5d				pop    %rbp
2753  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2754  *   c3				retq
2755  *
2756  * There are some variations in the epilogues, like:
2757  *
2758  *   5b				pop    %rbx
2759  *   41 5a			pop    %r10
2760  *   41 5c			pop    %r12
2761  *   41 5d			pop    %r13
2762  *   41 5e			pop    %r14
2763  *   c9				leaveq
2764  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2765  *   c3				retq
2766  *
2767  * and:
2768  *
2769  *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
2770  *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
2771  *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
2772  *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
2773  *   c9				leaveq
2774  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2775  *   c3				retq
2776  *
2777  * Sometimes r13 is used as the DRAP register, in which case it's saved and
2778  * restored beforehand:
2779  *
2780  *   41 55			push   %r13
2781  *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
2782  *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
2783  *				...
2784  *   49 8d 65 f0		lea    -0x10(%r13),%rsp
2785  *   41 5d			pop    %r13
2786  *   c3				retq
2787  */
2788 static int update_cfi_state(struct instruction *insn,
2789 			    struct instruction *next_insn,
2790 			    struct cfi_state *cfi, struct stack_op *op)
2791 {
2792 	struct cfi_reg *cfa = &cfi->cfa;
2793 	struct cfi_reg *regs = cfi->regs;
2794 
2795 	/* ignore UNWIND_HINT_UNDEFINED regions */
2796 	if (cfi->force_undefined)
2797 		return 0;
2798 
2799 	/* stack operations don't make sense with an undefined CFA */
2800 	if (cfa->base == CFI_UNDEFINED) {
2801 		if (insn_func(insn)) {
2802 			WARN_INSN(insn, "undefined stack state");
2803 			return 1;
2804 		}
2805 		return 0;
2806 	}
2807 
2808 	if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2809 	    cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2810 		return update_cfi_state_regs(insn, cfi, op);
2811 
2812 	switch (op->dest.type) {
2813 
2814 	case OP_DEST_REG:
2815 		switch (op->src.type) {
2816 
2817 		case OP_SRC_REG:
2818 			if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2819 			    cfa->base == CFI_SP &&
2820 			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2821 
2822 				/* mov %rsp, %rbp */
2823 				cfa->base = op->dest.reg;
2824 				cfi->bp_scratch = false;
2825 			}
2826 
2827 			else if (op->src.reg == CFI_SP &&
2828 				 op->dest.reg == CFI_BP && cfi->drap) {
2829 
2830 				/* drap: mov %rsp, %rbp */
2831 				regs[CFI_BP].base = CFI_BP;
2832 				regs[CFI_BP].offset = -cfi->stack_size;
2833 				cfi->bp_scratch = false;
2834 			}
2835 
2836 			else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2837 
2838 				/*
2839 				 * mov %rsp, %reg
2840 				 *
2841 				 * This is needed for the rare case where GCC
2842 				 * does:
2843 				 *
2844 				 *   mov    %rsp, %rax
2845 				 *   ...
2846 				 *   mov    %rax, %rsp
2847 				 */
2848 				cfi->vals[op->dest.reg].base = CFI_CFA;
2849 				cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2850 			}
2851 
2852 			else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2853 				 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2854 
2855 				/*
2856 				 * mov %rbp, %rsp
2857 				 *
2858 				 * Restore the original stack pointer (Clang).
2859 				 */
2860 				cfi->stack_size = -cfi->regs[CFI_BP].offset;
2861 			}
2862 
2863 			else if (op->dest.reg == cfa->base) {
2864 
2865 				/* mov %reg, %rsp */
2866 				if (cfa->base == CFI_SP &&
2867 				    cfi->vals[op->src.reg].base == CFI_CFA) {
2868 
2869 					/*
2870 					 * This is needed for the rare case
2871 					 * where GCC does something dumb like:
2872 					 *
2873 					 *   lea    0x8(%rsp), %rcx
2874 					 *   ...
2875 					 *   mov    %rcx, %rsp
2876 					 */
2877 					cfa->offset = -cfi->vals[op->src.reg].offset;
2878 					cfi->stack_size = cfa->offset;
2879 
2880 				} else if (cfa->base == CFI_SP &&
2881 					   cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2882 					   cfi->vals[op->src.reg].offset == cfa->offset) {
2883 
2884 					/*
2885 					 * Stack swizzle:
2886 					 *
2887 					 * 1: mov %rsp, (%[tos])
2888 					 * 2: mov %[tos], %rsp
2889 					 *    ...
2890 					 * 3: pop %rsp
2891 					 *
2892 					 * Where:
2893 					 *
2894 					 * 1 - places a pointer to the previous
2895 					 *     stack at the Top-of-Stack of the
2896 					 *     new stack.
2897 					 *
2898 					 * 2 - switches to the new stack.
2899 					 *
2900 					 * 3 - pops the Top-of-Stack to restore
2901 					 *     the original stack.
2902 					 *
2903 					 * Note: we set base to SP_INDIRECT
2904 					 * here and preserve offset. Therefore
2905 					 * when the unwinder reaches ToS it
2906 					 * will dereference SP and then add the
2907 					 * offset to find the next frame, IOW:
2908 					 * (%rsp) + offset.
2909 					 */
2910 					cfa->base = CFI_SP_INDIRECT;
2911 
2912 				} else {
2913 					cfa->base = CFI_UNDEFINED;
2914 					cfa->offset = 0;
2915 				}
2916 			}
2917 
2918 			else if (op->dest.reg == CFI_SP &&
2919 				 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2920 				 cfi->vals[op->src.reg].offset == cfa->offset) {
2921 
2922 				/*
2923 				 * The same stack swizzle case 2) as above. But
2924 				 * because we can't change cfa->base, case 3)
2925 				 * will become a regular POP. Pretend we're a
2926 				 * PUSH so things don't go unbalanced.
2927 				 */
2928 				cfi->stack_size += 8;
2929 			}
2930 
2931 
2932 			break;
2933 
2934 		case OP_SRC_ADD:
2935 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2936 
2937 				/* add imm, %rsp */
2938 				cfi->stack_size -= op->src.offset;
2939 				if (cfa->base == CFI_SP)
2940 					cfa->offset -= op->src.offset;
2941 				break;
2942 			}
2943 
2944 			if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
2945 			    insn->sym->frame_pointer) {
2946 				/* addi.d fp,sp,imm on LoongArch */
2947 				if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
2948 					cfa->base = CFI_BP;
2949 					cfa->offset = 0;
2950 				}
2951 				break;
2952 			}
2953 
2954 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2955 				/* addi.d sp,fp,imm on LoongArch */
2956 				if (cfa->base == CFI_BP && cfa->offset == 0) {
2957 					if (insn->sym->frame_pointer) {
2958 						cfa->base = CFI_SP;
2959 						cfa->offset = -op->src.offset;
2960 					}
2961 				} else {
2962 					/* lea disp(%rbp), %rsp */
2963 					cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2964 				}
2965 				break;
2966 			}
2967 
2968 			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2969 
2970 				/* drap: lea disp(%rsp), %drap */
2971 				cfi->drap_reg = op->dest.reg;
2972 
2973 				/*
2974 				 * lea disp(%rsp), %reg
2975 				 *
2976 				 * This is needed for the rare case where GCC
2977 				 * does something dumb like:
2978 				 *
2979 				 *   lea    0x8(%rsp), %rcx
2980 				 *   ...
2981 				 *   mov    %rcx, %rsp
2982 				 */
2983 				cfi->vals[op->dest.reg].base = CFI_CFA;
2984 				cfi->vals[op->dest.reg].offset = \
2985 					-cfi->stack_size + op->src.offset;
2986 
2987 				break;
2988 			}
2989 
2990 			if (cfi->drap && op->dest.reg == CFI_SP &&
2991 			    op->src.reg == cfi->drap_reg) {
2992 
2993 				 /* drap: lea disp(%drap), %rsp */
2994 				cfa->base = CFI_SP;
2995 				cfa->offset = cfi->stack_size = -op->src.offset;
2996 				cfi->drap_reg = CFI_UNDEFINED;
2997 				cfi->drap = false;
2998 				break;
2999 			}
3000 
3001 			if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
3002 				WARN_INSN(insn, "unsupported stack register modification");
3003 				return -1;
3004 			}
3005 
3006 			break;
3007 
3008 		case OP_SRC_AND:
3009 			if (op->dest.reg != CFI_SP ||
3010 			    (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
3011 			    (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
3012 				WARN_INSN(insn, "unsupported stack pointer realignment");
3013 				return -1;
3014 			}
3015 
3016 			if (cfi->drap_reg != CFI_UNDEFINED) {
3017 				/* drap: and imm, %rsp */
3018 				cfa->base = cfi->drap_reg;
3019 				cfa->offset = cfi->stack_size = 0;
3020 				cfi->drap = true;
3021 			}
3022 
3023 			/*
3024 			 * Older versions of GCC (4.8ish) realign the stack
3025 			 * without DRAP, with a frame pointer.
3026 			 */
3027 
3028 			break;
3029 
3030 		case OP_SRC_POP:
3031 		case OP_SRC_POPF:
3032 			if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3033 
3034 				/* pop %rsp; # restore from a stack swizzle */
3035 				cfa->base = CFI_SP;
3036 				break;
3037 			}
3038 
3039 			if (!cfi->drap && op->dest.reg == cfa->base) {
3040 
3041 				/* pop %rbp */
3042 				cfa->base = CFI_SP;
3043 			}
3044 
3045 			if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3046 			    op->dest.reg == cfi->drap_reg &&
3047 			    cfi->drap_offset == -cfi->stack_size) {
3048 
3049 				/* drap: pop %drap */
3050 				cfa->base = cfi->drap_reg;
3051 				cfa->offset = 0;
3052 				cfi->drap_offset = -1;
3053 
3054 			} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3055 
3056 				/* pop %reg */
3057 				restore_reg(cfi, op->dest.reg);
3058 			}
3059 
3060 			cfi->stack_size -= 8;
3061 			if (cfa->base == CFI_SP)
3062 				cfa->offset -= 8;
3063 
3064 			break;
3065 
3066 		case OP_SRC_REG_INDIRECT:
3067 			if (!cfi->drap && op->dest.reg == cfa->base &&
3068 			    op->dest.reg == CFI_BP) {
3069 
3070 				/* mov disp(%rsp), %rbp */
3071 				cfa->base = CFI_SP;
3072 				cfa->offset = cfi->stack_size;
3073 			}
3074 
3075 			if (cfi->drap && op->src.reg == CFI_BP &&
3076 			    op->src.offset == cfi->drap_offset) {
3077 
3078 				/* drap: mov disp(%rbp), %drap */
3079 				cfa->base = cfi->drap_reg;
3080 				cfa->offset = 0;
3081 				cfi->drap_offset = -1;
3082 			}
3083 
3084 			if (cfi->drap && op->src.reg == CFI_BP &&
3085 			    op->src.offset == regs[op->dest.reg].offset) {
3086 
3087 				/* drap: mov disp(%rbp), %reg */
3088 				restore_reg(cfi, op->dest.reg);
3089 
3090 			} else if (op->src.reg == cfa->base &&
3091 			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3092 
3093 				/* mov disp(%rbp), %reg */
3094 				/* mov disp(%rsp), %reg */
3095 				restore_reg(cfi, op->dest.reg);
3096 
3097 			} else if (op->src.reg == CFI_SP &&
3098 				   op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3099 
3100 				/* mov disp(%rsp), %reg */
3101 				restore_reg(cfi, op->dest.reg);
3102 			}
3103 
3104 			break;
3105 
3106 		default:
3107 			WARN_INSN(insn, "unknown stack-related instruction");
3108 			return -1;
3109 		}
3110 
3111 		break;
3112 
3113 	case OP_DEST_PUSH:
3114 	case OP_DEST_PUSHF:
3115 		cfi->stack_size += 8;
3116 		if (cfa->base == CFI_SP)
3117 			cfa->offset += 8;
3118 
3119 		if (op->src.type != OP_SRC_REG)
3120 			break;
3121 
3122 		if (cfi->drap) {
3123 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3124 
3125 				/* drap: push %drap */
3126 				cfa->base = CFI_BP_INDIRECT;
3127 				cfa->offset = -cfi->stack_size;
3128 
3129 				/* save drap so we know when to restore it */
3130 				cfi->drap_offset = -cfi->stack_size;
3131 
3132 			} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3133 
3134 				/* drap: push %rbp */
3135 				cfi->stack_size = 0;
3136 
3137 			} else {
3138 
3139 				/* drap: push %reg */
3140 				save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3141 			}
3142 
3143 		} else {
3144 
3145 			/* push %reg */
3146 			save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3147 		}
3148 
3149 		/* detect when asm code uses rbp as a scratch register */
3150 		if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3151 		    cfa->base != CFI_BP)
3152 			cfi->bp_scratch = true;
3153 		break;
3154 
3155 	case OP_DEST_REG_INDIRECT:
3156 
3157 		if (cfi->drap) {
3158 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3159 
3160 				/* drap: mov %drap, disp(%rbp) */
3161 				cfa->base = CFI_BP_INDIRECT;
3162 				cfa->offset = op->dest.offset;
3163 
3164 				/* save drap offset so we know when to restore it */
3165 				cfi->drap_offset = op->dest.offset;
3166 			} else {
3167 
3168 				/* drap: mov reg, disp(%rbp) */
3169 				save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3170 			}
3171 
3172 		} else if (op->dest.reg == cfa->base) {
3173 
3174 			/* mov reg, disp(%rbp) */
3175 			/* mov reg, disp(%rsp) */
3176 			save_reg(cfi, op->src.reg, CFI_CFA,
3177 				 op->dest.offset - cfi->cfa.offset);
3178 
3179 		} else if (op->dest.reg == CFI_SP) {
3180 
3181 			/* mov reg, disp(%rsp) */
3182 			save_reg(cfi, op->src.reg, CFI_CFA,
3183 				 op->dest.offset - cfi->stack_size);
3184 
3185 		} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3186 
3187 			/* mov %rsp, (%reg); # setup a stack swizzle. */
3188 			cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3189 			cfi->vals[op->dest.reg].offset = cfa->offset;
3190 		}
3191 
3192 		break;
3193 
3194 	case OP_DEST_MEM:
3195 		if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3196 			WARN_INSN(insn, "unknown stack-related memory operation");
3197 			return -1;
3198 		}
3199 
3200 		/* pop mem */
3201 		cfi->stack_size -= 8;
3202 		if (cfa->base == CFI_SP)
3203 			cfa->offset -= 8;
3204 
3205 		break;
3206 
3207 	default:
3208 		WARN_INSN(insn, "unknown stack-related instruction");
3209 		return -1;
3210 	}
3211 
3212 	return 0;
3213 }
3214 
3215 /*
3216  * The stack layouts of alternatives instructions can sometimes diverge when
3217  * they have stack modifications.  That's fine as long as the potential stack
3218  * layouts don't conflict at any given potential instruction boundary.
3219  *
3220  * Flatten the CFIs of the different alternative code streams (both original
3221  * and replacement) into a single shared CFI array which can be used to detect
3222  * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3223  */
3224 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3225 {
3226 	struct cfi_state **alt_cfi;
3227 	int group_off;
3228 
3229 	if (!insn->alt_group)
3230 		return 0;
3231 
3232 	if (!insn->cfi) {
3233 		WARN("CFI missing");
3234 		return -1;
3235 	}
3236 
3237 	alt_cfi = insn->alt_group->cfi;
3238 	group_off = insn->offset - insn->alt_group->first_insn->offset;
3239 
3240 	if (!alt_cfi[group_off]) {
3241 		alt_cfi[group_off] = insn->cfi;
3242 	} else {
3243 		if (cficmp(alt_cfi[group_off], insn->cfi)) {
3244 			struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3245 			struct instruction *orig = orig_group->first_insn;
3246 			WARN_INSN(orig, "stack layout conflict in alternatives: %s",
3247 				  offstr(insn->sec, insn->offset));
3248 			return -1;
3249 		}
3250 	}
3251 
3252 	return 0;
3253 }
3254 
3255 static int handle_insn_ops(struct instruction *insn,
3256 			   struct instruction *next_insn,
3257 			   struct insn_state *state)
3258 {
3259 	struct stack_op *op;
3260 	int ret;
3261 
3262 	for (op = insn->stack_ops; op; op = op->next) {
3263 
3264 		ret = update_cfi_state(insn, next_insn, &state->cfi, op);
3265 		if (ret)
3266 			return ret;
3267 
3268 		if (!opts.uaccess || !insn->alt_group)
3269 			continue;
3270 
3271 		if (op->dest.type == OP_DEST_PUSHF) {
3272 			if (!state->uaccess_stack) {
3273 				state->uaccess_stack = 1;
3274 			} else if (state->uaccess_stack >> 31) {
3275 				WARN_INSN(insn, "PUSHF stack exhausted");
3276 				return 1;
3277 			}
3278 			state->uaccess_stack <<= 1;
3279 			state->uaccess_stack  |= state->uaccess;
3280 		}
3281 
3282 		if (op->src.type == OP_SRC_POPF) {
3283 			if (state->uaccess_stack) {
3284 				state->uaccess = state->uaccess_stack & 1;
3285 				state->uaccess_stack >>= 1;
3286 				if (state->uaccess_stack == 1)
3287 					state->uaccess_stack = 0;
3288 			}
3289 		}
3290 	}
3291 
3292 	return 0;
3293 }
3294 
3295 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3296 {
3297 	struct cfi_state *cfi1 = insn->cfi;
3298 	int i;
3299 
3300 	if (!cfi1) {
3301 		WARN("CFI missing");
3302 		return false;
3303 	}
3304 
3305 	if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3306 
3307 		WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3308 			  cfi1->cfa.base, cfi1->cfa.offset,
3309 			  cfi2->cfa.base, cfi2->cfa.offset);
3310 		return false;
3311 
3312 	}
3313 
3314 	if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3315 		for (i = 0; i < CFI_NUM_REGS; i++) {
3316 
3317 			if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg)))
3318 				continue;
3319 
3320 			WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3321 				  i, cfi1->regs[i].base, cfi1->regs[i].offset,
3322 				  i, cfi2->regs[i].base, cfi2->regs[i].offset);
3323 		}
3324 		return false;
3325 	}
3326 
3327 	if (cfi1->type != cfi2->type) {
3328 
3329 		WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3330 			  cfi1->type, cfi2->type);
3331 		return false;
3332 	}
3333 
3334 	if (cfi1->drap != cfi2->drap ||
3335 		   (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3336 		   (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3337 
3338 		WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3339 			  cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3340 			  cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3341 		return false;
3342 	}
3343 
3344 	return true;
3345 }
3346 
3347 static inline bool func_uaccess_safe(struct symbol *func)
3348 {
3349 	if (func)
3350 		return func->uaccess_safe;
3351 
3352 	return false;
3353 }
3354 
3355 static inline const char *call_dest_name(struct instruction *insn)
3356 {
3357 	static char pvname[19];
3358 	struct reloc *reloc;
3359 	int idx;
3360 
3361 	if (insn_call_dest(insn))
3362 		return insn_call_dest(insn)->name;
3363 
3364 	reloc = insn_reloc(NULL, insn);
3365 	if (reloc && !strcmp(reloc->sym->name, "pv_ops")) {
3366 		idx = (reloc_addend(reloc) / sizeof(void *));
3367 		snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3368 		return pvname;
3369 	}
3370 
3371 	return "{dynamic}";
3372 }
3373 
3374 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3375 {
3376 	struct symbol *target;
3377 	struct reloc *reloc;
3378 	int idx;
3379 
3380 	reloc = insn_reloc(file, insn);
3381 	if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
3382 		return false;
3383 
3384 	idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *);
3385 
3386 	if (file->pv_ops[idx].clean)
3387 		return true;
3388 
3389 	file->pv_ops[idx].clean = true;
3390 
3391 	list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3392 		if (!target->sec->noinstr) {
3393 			WARN("pv_ops[%d]: %s", idx, target->name);
3394 			file->pv_ops[idx].clean = false;
3395 		}
3396 	}
3397 
3398 	return file->pv_ops[idx].clean;
3399 }
3400 
3401 static inline bool noinstr_call_dest(struct objtool_file *file,
3402 				     struct instruction *insn,
3403 				     struct symbol *func)
3404 {
3405 	/*
3406 	 * We can't deal with indirect function calls at present;
3407 	 * assume they're instrumented.
3408 	 */
3409 	if (!func) {
3410 		if (file->pv_ops)
3411 			return pv_call_dest(file, insn);
3412 
3413 		return false;
3414 	}
3415 
3416 	/*
3417 	 * If the symbol is from a noinstr section; we good.
3418 	 */
3419 	if (func->sec->noinstr)
3420 		return true;
3421 
3422 	/*
3423 	 * If the symbol is a static_call trampoline, we can't tell.
3424 	 */
3425 	if (func->static_call_tramp)
3426 		return true;
3427 
3428 	/*
3429 	 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3430 	 * something 'BAD' happened. At the risk of taking the machine down,
3431 	 * let them proceed to get the message out.
3432 	 */
3433 	if (!strncmp(func->name, "__ubsan_handle_", 15))
3434 		return true;
3435 
3436 	return false;
3437 }
3438 
3439 static int validate_call(struct objtool_file *file,
3440 			 struct instruction *insn,
3441 			 struct insn_state *state)
3442 {
3443 	if (state->noinstr && state->instr <= 0 &&
3444 	    !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3445 		WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3446 		return 1;
3447 	}
3448 
3449 	if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3450 		WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3451 		return 1;
3452 	}
3453 
3454 	if (state->df) {
3455 		WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3456 		return 1;
3457 	}
3458 
3459 	return 0;
3460 }
3461 
3462 static int validate_sibling_call(struct objtool_file *file,
3463 				 struct instruction *insn,
3464 				 struct insn_state *state)
3465 {
3466 	if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3467 		WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3468 		return 1;
3469 	}
3470 
3471 	return validate_call(file, insn, state);
3472 }
3473 
3474 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3475 {
3476 	if (state->noinstr && state->instr > 0) {
3477 		WARN_INSN(insn, "return with instrumentation enabled");
3478 		return 1;
3479 	}
3480 
3481 	if (state->uaccess && !func_uaccess_safe(func)) {
3482 		WARN_INSN(insn, "return with UACCESS enabled");
3483 		return 1;
3484 	}
3485 
3486 	if (!state->uaccess && func_uaccess_safe(func)) {
3487 		WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3488 		return 1;
3489 	}
3490 
3491 	if (state->df) {
3492 		WARN_INSN(insn, "return with DF set");
3493 		return 1;
3494 	}
3495 
3496 	if (func && has_modified_stack_frame(insn, state)) {
3497 		WARN_INSN(insn, "return with modified stack frame");
3498 		return 1;
3499 	}
3500 
3501 	if (state->cfi.bp_scratch) {
3502 		WARN_INSN(insn, "BP used as a scratch register");
3503 		return 1;
3504 	}
3505 
3506 	return 0;
3507 }
3508 
3509 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3510 						 struct instruction *insn)
3511 {
3512 	struct alt_group *alt_group = insn->alt_group;
3513 
3514 	/*
3515 	 * Simulate the fact that alternatives are patched in-place.  When the
3516 	 * end of a replacement alt_group is reached, redirect objtool flow to
3517 	 * the end of the original alt_group.
3518 	 *
3519 	 * insn->alts->insn -> alt_group->first_insn
3520 	 *		       ...
3521 	 *		       alt_group->last_insn
3522 	 *		       [alt_group->nop]      -> next(orig_group->last_insn)
3523 	 */
3524 	if (alt_group) {
3525 		if (alt_group->nop) {
3526 			/* ->nop implies ->orig_group */
3527 			if (insn == alt_group->last_insn)
3528 				return alt_group->nop;
3529 			if (insn == alt_group->nop)
3530 				goto next_orig;
3531 		}
3532 		if (insn == alt_group->last_insn && alt_group->orig_group)
3533 			goto next_orig;
3534 	}
3535 
3536 	return next_insn_same_sec(file, insn);
3537 
3538 next_orig:
3539 	return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3540 }
3541 
3542 static bool skip_alt_group(struct instruction *insn)
3543 {
3544 	struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
3545 
3546 	/* ANNOTATE_IGNORE_ALTERNATIVE */
3547 	if (insn->alt_group && insn->alt_group->ignore)
3548 		return true;
3549 
3550 	/*
3551 	 * For NOP patched with CLAC/STAC, only follow the latter to avoid
3552 	 * impossible code paths combining patched CLAC with unpatched STAC
3553 	 * or vice versa.
3554 	 *
3555 	 * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
3556 	 * requested not to do that to avoid hurting .s file readability
3557 	 * around CLAC/STAC alternative sites.
3558 	 */
3559 
3560 	if (!alt_insn)
3561 		return false;
3562 
3563 	/* Don't override ASM_{CLAC,STAC}_UNSAFE */
3564 	if (alt_insn->alt_group && alt_insn->alt_group->ignore)
3565 		return false;
3566 
3567 	return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
3568 }
3569 
3570 static int checksum_debug_init(struct objtool_file *file)
3571 {
3572 	char *dup, *s;
3573 
3574 	if (!opts.debug_checksum)
3575 		return 0;
3576 
3577 	dup = strdup(opts.debug_checksum);
3578 	if (!dup) {
3579 		ERROR_GLIBC("strdup");
3580 		return -1;
3581 	}
3582 
3583 	s = dup;
3584 	while (*s) {
3585 		struct symbol *func;
3586 		char *comma;
3587 
3588 		comma = strchr(s, ',');
3589 		if (comma)
3590 			*comma = '\0';
3591 
3592 		func = find_symbol_by_name(file->elf, s);
3593 		if (!func || !is_func_sym(func))
3594 			WARN("--debug-checksum: can't find '%s'", s);
3595 		else
3596 			func->debug_checksum = 1;
3597 
3598 		if (!comma)
3599 			break;
3600 
3601 		s = comma + 1;
3602 	}
3603 
3604 	free(dup);
3605 	return 0;
3606 }
3607 
3608 static void checksum_update_insn(struct objtool_file *file, struct symbol *func,
3609 				 struct instruction *insn)
3610 {
3611 	struct reloc *reloc = insn_reloc(file, insn);
3612 	unsigned long offset;
3613 	struct symbol *sym;
3614 
3615 	if (insn->fake)
3616 		return;
3617 
3618 	checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len);
3619 
3620 	if (!reloc) {
3621 		struct symbol *call_dest = insn_call_dest(insn);
3622 
3623 		if (call_dest)
3624 			checksum_update(func, insn, call_dest->demangled_name,
3625 					strlen(call_dest->demangled_name));
3626 		return;
3627 	}
3628 
3629 	sym = reloc->sym;
3630 	offset = arch_insn_adjusted_addend(insn, reloc);
3631 
3632 	if (is_string_sec(sym->sec)) {
3633 		char *str;
3634 
3635 		str = sym->sec->data->d_buf + sym->offset + offset;
3636 		checksum_update(func, insn, str, strlen(str));
3637 		return;
3638 	}
3639 
3640 	if (is_sec_sym(sym)) {
3641 		sym = find_symbol_containing(reloc->sym->sec, offset);
3642 		if (!sym)
3643 			return;
3644 
3645 		offset -= sym->offset;
3646 	}
3647 
3648 	checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name));
3649 	checksum_update(func, insn, &offset, sizeof(offset));
3650 }
3651 
3652 /*
3653  * Follow the branch starting at the given instruction, and recursively follow
3654  * any other branches (jumps).  Meanwhile, track the frame pointer state at
3655  * each instruction and validate all the rules described in
3656  * tools/objtool/Documentation/objtool.txt.
3657  */
3658 static int validate_branch(struct objtool_file *file, struct symbol *func,
3659 			   struct instruction *insn, struct insn_state state)
3660 {
3661 	struct alternative *alt;
3662 	struct instruction *next_insn, *prev_insn = NULL;
3663 	u8 visited;
3664 	int ret;
3665 
3666 	if (func && func->ignore)
3667 		return 0;
3668 
3669 	while (1) {
3670 		next_insn = next_insn_to_validate(file, insn);
3671 
3672 		if (opts.checksum && func && insn->sec)
3673 			checksum_update_insn(file, func, insn);
3674 
3675 		if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3676 			/* Ignore KCFI type preambles, which always fall through */
3677 			if (is_prefix_func(func))
3678 				return 0;
3679 
3680 			if (file->ignore_unreachables)
3681 				return 0;
3682 
3683 			WARN("%s() falls through to next function %s()",
3684 			     func->name, insn_func(insn)->name);
3685 			func->warned = 1;
3686 
3687 			return 1;
3688 		}
3689 
3690 		visited = VISITED_BRANCH << state.uaccess;
3691 		if (insn->visited & VISITED_BRANCH_MASK) {
3692 			if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3693 				return 1;
3694 
3695 			if (insn->visited & visited)
3696 				return 0;
3697 		} else {
3698 			nr_insns_visited++;
3699 		}
3700 
3701 		if (state.noinstr)
3702 			state.instr += insn->instr;
3703 
3704 		if (insn->hint) {
3705 			if (insn->restore) {
3706 				struct instruction *save_insn, *i;
3707 
3708 				i = insn;
3709 				save_insn = NULL;
3710 
3711 				sym_for_each_insn_continue_reverse(file, func, i) {
3712 					if (i->save) {
3713 						save_insn = i;
3714 						break;
3715 					}
3716 				}
3717 
3718 				if (!save_insn) {
3719 					WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3720 					return 1;
3721 				}
3722 
3723 				if (!save_insn->visited) {
3724 					/*
3725 					 * If the restore hint insn is at the
3726 					 * beginning of a basic block and was
3727 					 * branched to from elsewhere, and the
3728 					 * save insn hasn't been visited yet,
3729 					 * defer following this branch for now.
3730 					 * It will be seen later via the
3731 					 * straight-line path.
3732 					 */
3733 					if (!prev_insn)
3734 						return 0;
3735 
3736 					WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3737 					return 1;
3738 				}
3739 
3740 				insn->cfi = save_insn->cfi;
3741 				nr_cfi_reused++;
3742 			}
3743 
3744 			state.cfi = *insn->cfi;
3745 		} else {
3746 			/* XXX track if we actually changed state.cfi */
3747 
3748 			if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3749 				insn->cfi = prev_insn->cfi;
3750 				nr_cfi_reused++;
3751 			} else {
3752 				insn->cfi = cfi_hash_find_or_add(&state.cfi);
3753 			}
3754 		}
3755 
3756 		insn->visited |= visited;
3757 
3758 		if (propagate_alt_cfi(file, insn))
3759 			return 1;
3760 
3761 		if (insn->alts) {
3762 			for (alt = insn->alts; alt; alt = alt->next) {
3763 				ret = validate_branch(file, func, alt->insn, state);
3764 				if (ret) {
3765 					BT_INSN(insn, "(alt)");
3766 					return ret;
3767 				}
3768 			}
3769 		}
3770 
3771 		if (skip_alt_group(insn))
3772 			return 0;
3773 
3774 		if (handle_insn_ops(insn, next_insn, &state))
3775 			return 1;
3776 
3777 		switch (insn->type) {
3778 
3779 		case INSN_RETURN:
3780 			return validate_return(func, insn, &state);
3781 
3782 		case INSN_CALL:
3783 		case INSN_CALL_DYNAMIC:
3784 			ret = validate_call(file, insn, &state);
3785 			if (ret)
3786 				return ret;
3787 
3788 			if (opts.stackval && func && !is_special_call(insn) &&
3789 			    !has_valid_stack_frame(&state)) {
3790 				WARN_INSN(insn, "call without frame pointer save/setup");
3791 				return 1;
3792 			}
3793 
3794 			break;
3795 
3796 		case INSN_JUMP_CONDITIONAL:
3797 		case INSN_JUMP_UNCONDITIONAL:
3798 			if (is_sibling_call(insn)) {
3799 				ret = validate_sibling_call(file, insn, &state);
3800 				if (ret)
3801 					return ret;
3802 
3803 			} else if (insn->jump_dest) {
3804 				ret = validate_branch(file, func,
3805 						      insn->jump_dest, state);
3806 				if (ret) {
3807 					BT_INSN(insn, "(branch)");
3808 					return ret;
3809 				}
3810 			}
3811 
3812 			if (insn->type == INSN_JUMP_UNCONDITIONAL)
3813 				return 0;
3814 
3815 			break;
3816 
3817 		case INSN_JUMP_DYNAMIC:
3818 		case INSN_JUMP_DYNAMIC_CONDITIONAL:
3819 			if (is_sibling_call(insn)) {
3820 				ret = validate_sibling_call(file, insn, &state);
3821 				if (ret)
3822 					return ret;
3823 			}
3824 
3825 			if (insn->type == INSN_JUMP_DYNAMIC)
3826 				return 0;
3827 
3828 			break;
3829 
3830 		case INSN_SYSCALL:
3831 			if (func && (!next_insn || !next_insn->hint)) {
3832 				WARN_INSN(insn, "unsupported instruction in callable function");
3833 				return 1;
3834 			}
3835 
3836 			break;
3837 
3838 		case INSN_SYSRET:
3839 			if (func && (!next_insn || !next_insn->hint)) {
3840 				WARN_INSN(insn, "unsupported instruction in callable function");
3841 				return 1;
3842 			}
3843 
3844 			return 0;
3845 
3846 		case INSN_STAC:
3847 			if (!opts.uaccess)
3848 				break;
3849 
3850 			if (state.uaccess) {
3851 				WARN_INSN(insn, "recursive UACCESS enable");
3852 				return 1;
3853 			}
3854 
3855 			state.uaccess = true;
3856 			break;
3857 
3858 		case INSN_CLAC:
3859 			if (!opts.uaccess)
3860 				break;
3861 
3862 			if (!state.uaccess && func) {
3863 				WARN_INSN(insn, "redundant UACCESS disable");
3864 				return 1;
3865 			}
3866 
3867 			if (func_uaccess_safe(func) && !state.uaccess_stack) {
3868 				WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3869 				return 1;
3870 			}
3871 
3872 			state.uaccess = false;
3873 			break;
3874 
3875 		case INSN_STD:
3876 			if (state.df) {
3877 				WARN_INSN(insn, "recursive STD");
3878 				return 1;
3879 			}
3880 
3881 			state.df = true;
3882 			break;
3883 
3884 		case INSN_CLD:
3885 			if (!state.df && func) {
3886 				WARN_INSN(insn, "redundant CLD");
3887 				return 1;
3888 			}
3889 
3890 			state.df = false;
3891 			break;
3892 
3893 		default:
3894 			break;
3895 		}
3896 
3897 		if (insn->dead_end)
3898 			return 0;
3899 
3900 		if (!next_insn) {
3901 			if (state.cfi.cfa.base == CFI_UNDEFINED)
3902 				return 0;
3903 			if (file->ignore_unreachables)
3904 				return 0;
3905 
3906 			WARN("%s%sunexpected end of section %s",
3907 			     func ? func->name : "", func ? "(): " : "",
3908 			     insn->sec->name);
3909 			return 1;
3910 		}
3911 
3912 		prev_insn = insn;
3913 		insn = next_insn;
3914 	}
3915 
3916 	return 0;
3917 }
3918 
3919 static int validate_unwind_hint(struct objtool_file *file,
3920 				  struct instruction *insn,
3921 				  struct insn_state *state)
3922 {
3923 	if (insn->hint && !insn->visited) {
3924 		struct symbol *func = insn_func(insn);
3925 		int ret;
3926 
3927 		if (opts.checksum)
3928 			checksum_init(func);
3929 
3930 		ret = validate_branch(file, func, insn, *state);
3931 		if (ret)
3932 			BT_INSN(insn, "<=== (hint)");
3933 		return ret;
3934 	}
3935 
3936 	return 0;
3937 }
3938 
3939 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3940 {
3941 	struct instruction *insn;
3942 	struct insn_state state;
3943 	int warnings = 0;
3944 
3945 	if (!file->hints)
3946 		return 0;
3947 
3948 	init_insn_state(file, &state, sec);
3949 
3950 	if (sec) {
3951 		sec_for_each_insn(file, sec, insn)
3952 			warnings += validate_unwind_hint(file, insn, &state);
3953 	} else {
3954 		for_each_insn(file, insn)
3955 			warnings += validate_unwind_hint(file, insn, &state);
3956 	}
3957 
3958 	return warnings;
3959 }
3960 
3961 /*
3962  * Validate rethunk entry constraint: must untrain RET before the first RET.
3963  *
3964  * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
3965  * before an actual RET instruction.
3966  */
3967 static int validate_unret(struct objtool_file *file, struct instruction *insn)
3968 {
3969 	struct instruction *next, *dest;
3970 	int ret;
3971 
3972 	for (;;) {
3973 		next = next_insn_to_validate(file, insn);
3974 
3975 		if (insn->visited & VISITED_UNRET)
3976 			return 0;
3977 
3978 		insn->visited |= VISITED_UNRET;
3979 
3980 		if (insn->alts) {
3981 			struct alternative *alt;
3982 			for (alt = insn->alts; alt; alt = alt->next) {
3983 				ret = validate_unret(file, alt->insn);
3984 				if (ret) {
3985 					BT_INSN(insn, "(alt)");
3986 					return ret;
3987 				}
3988 			}
3989 		}
3990 
3991 		switch (insn->type) {
3992 
3993 		case INSN_CALL_DYNAMIC:
3994 		case INSN_JUMP_DYNAMIC:
3995 		case INSN_JUMP_DYNAMIC_CONDITIONAL:
3996 			WARN_INSN(insn, "early indirect call");
3997 			return 1;
3998 
3999 		case INSN_JUMP_UNCONDITIONAL:
4000 		case INSN_JUMP_CONDITIONAL:
4001 			if (!is_sibling_call(insn)) {
4002 				if (!insn->jump_dest) {
4003 					WARN_INSN(insn, "unresolved jump target after linking?!?");
4004 					return 1;
4005 				}
4006 				ret = validate_unret(file, insn->jump_dest);
4007 				if (ret) {
4008 					BT_INSN(insn, "(branch%s)",
4009 						insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
4010 					return ret;
4011 				}
4012 
4013 				if (insn->type == INSN_JUMP_UNCONDITIONAL)
4014 					return 0;
4015 
4016 				break;
4017 			}
4018 
4019 			/* fallthrough */
4020 		case INSN_CALL:
4021 			dest = find_insn(file, insn_call_dest(insn)->sec,
4022 					 insn_call_dest(insn)->offset);
4023 			if (!dest) {
4024 				WARN("Unresolved function after linking!?: %s",
4025 				     insn_call_dest(insn)->name);
4026 				return 1;
4027 			}
4028 
4029 			ret = validate_unret(file, dest);
4030 			if (ret) {
4031 				BT_INSN(insn, "(call)");
4032 				return ret;
4033 			}
4034 			/*
4035 			 * If a call returns without error, it must have seen UNTRAIN_RET.
4036 			 * Therefore any non-error return is a success.
4037 			 */
4038 			return 0;
4039 
4040 		case INSN_RETURN:
4041 			WARN_INSN(insn, "RET before UNTRAIN");
4042 			return 1;
4043 
4044 		case INSN_SYSCALL:
4045 			break;
4046 
4047 		case INSN_SYSRET:
4048 			return 0;
4049 
4050 		case INSN_NOP:
4051 			if (insn->retpoline_safe)
4052 				return 0;
4053 			break;
4054 
4055 		default:
4056 			break;
4057 		}
4058 
4059 		if (insn->dead_end)
4060 			return 0;
4061 
4062 		if (!next) {
4063 			WARN_INSN(insn, "teh end!");
4064 			return 1;
4065 		}
4066 		insn = next;
4067 	}
4068 
4069 	return 0;
4070 }
4071 
4072 /*
4073  * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
4074  * VALIDATE_UNRET_END before RET.
4075  */
4076 static int validate_unrets(struct objtool_file *file)
4077 {
4078 	struct instruction *insn;
4079 	int warnings = 0;
4080 
4081 	for_each_insn(file, insn) {
4082 		if (!insn->unret)
4083 			continue;
4084 
4085 		warnings += validate_unret(file, insn);
4086 	}
4087 
4088 	return warnings;
4089 }
4090 
4091 static int validate_retpoline(struct objtool_file *file)
4092 {
4093 	struct instruction *insn;
4094 	int warnings = 0;
4095 
4096 	for_each_insn(file, insn) {
4097 		if (insn->type != INSN_JUMP_DYNAMIC &&
4098 		    insn->type != INSN_CALL_DYNAMIC &&
4099 		    insn->type != INSN_RETURN)
4100 			continue;
4101 
4102 		if (insn->retpoline_safe)
4103 			continue;
4104 
4105 		if (insn->sec->init)
4106 			continue;
4107 
4108 		if (insn->type == INSN_RETURN) {
4109 			if (opts.rethunk) {
4110 				WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build");
4111 				warnings++;
4112 			}
4113 			continue;
4114 		}
4115 
4116 		WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
4117 			  insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
4118 		warnings++;
4119 	}
4120 
4121 	if (!opts.cfi)
4122 		return warnings;
4123 
4124 	/*
4125 	 * kCFI call sites look like:
4126 	 *
4127 	 *     movl $(-0x12345678), %r10d
4128 	 *     addl -4(%r11), %r10d
4129 	 *     jz 1f
4130 	 *     ud2
4131 	 *  1: cs call __x86_indirect_thunk_r11
4132 	 *
4133 	 * Verify all indirect calls are kCFI adorned by checking for the
4134 	 * UD2. Notably, doing __nocfi calls to regular (cfi) functions is
4135 	 * broken.
4136 	 */
4137 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
4138 		struct symbol *sym = insn->sym;
4139 
4140 		if (sym && (sym->type == STT_NOTYPE ||
4141 			    sym->type == STT_FUNC) && !sym->nocfi) {
4142 			struct instruction *prev =
4143 				prev_insn_same_sym(file, insn);
4144 
4145 			if (!prev || prev->type != INSN_BUG) {
4146 				WARN_INSN(insn, "no-cfi indirect call!");
4147 				warnings++;
4148 			}
4149 		}
4150 	}
4151 
4152 	return warnings;
4153 }
4154 
4155 static bool is_kasan_insn(struct instruction *insn)
4156 {
4157 	return (insn->type == INSN_CALL &&
4158 		!strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4159 }
4160 
4161 static bool is_ubsan_insn(struct instruction *insn)
4162 {
4163 	return (insn->type == INSN_CALL &&
4164 		!strcmp(insn_call_dest(insn)->name,
4165 			"__ubsan_handle_builtin_unreachable"));
4166 }
4167 
4168 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4169 {
4170 	struct symbol *func = insn_func(insn);
4171 	struct instruction *prev_insn;
4172 	int i;
4173 
4174 	if (insn->type == INSN_NOP || insn->type == INSN_TRAP ||
4175 	    insn->hole || (func && func->ignore))
4176 		return true;
4177 
4178 	/*
4179 	 * Ignore alternative replacement instructions.  This can happen
4180 	 * when a whitelisted function uses one of the ALTERNATIVE macros.
4181 	 */
4182 	if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4183 	    !strcmp(insn->sec->name, ".altinstr_aux"))
4184 		return true;
4185 
4186 	if (!func)
4187 		return false;
4188 
4189 	if (func->static_call_tramp)
4190 		return true;
4191 
4192 	/*
4193 	 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4194 	 * __builtin_unreachable().  The BUG() macro has an unreachable() after
4195 	 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4196 	 * (or occasionally a JMP to UD2).
4197 	 *
4198 	 * It may also insert a UD2 after calling a __noreturn function.
4199 	 */
4200 	prev_insn = prev_insn_same_sec(file, insn);
4201 	if (prev_insn && prev_insn->dead_end &&
4202 	    (insn->type == INSN_BUG ||
4203 	     (insn->type == INSN_JUMP_UNCONDITIONAL &&
4204 	      insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4205 		return true;
4206 
4207 	/*
4208 	 * Check if this (or a subsequent) instruction is related to
4209 	 * CONFIG_UBSAN or CONFIG_KASAN.
4210 	 *
4211 	 * End the search at 5 instructions to avoid going into the weeds.
4212 	 */
4213 	for (i = 0; i < 5; i++) {
4214 
4215 		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4216 			return true;
4217 
4218 		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4219 			if (insn->jump_dest &&
4220 			    insn_func(insn->jump_dest) == func) {
4221 				insn = insn->jump_dest;
4222 				continue;
4223 			}
4224 
4225 			break;
4226 		}
4227 
4228 		if (insn->offset + insn->len >= func->offset + func->len)
4229 			break;
4230 
4231 		insn = next_insn_same_sec(file, insn);
4232 	}
4233 
4234 	return false;
4235 }
4236 
4237 /*
4238  * For FineIBT or kCFI, a certain number of bytes preceding the function may be
4239  * NOPs.  Those NOPs may be rewritten at runtime and executed, so give them a
4240  * proper function name: __pfx_<func>.
4241  *
4242  * The NOPs may not exist for the following cases:
4243  *
4244  *   - compiler cloned functions (*.cold, *.part0, etc)
4245  *   - asm functions created with inline asm or without SYM_FUNC_START()
4246  *
4247  * So return 0 if the NOPs are missing or the function already has a prefix
4248  * symbol.
4249  */
4250 static int create_prefix_symbol(struct objtool_file *file, struct symbol *func)
4251 {
4252 	struct instruction *insn, *prev;
4253 	char name[SYM_NAME_LEN];
4254 	struct cfi_state *cfi;
4255 
4256 	if (!is_func_sym(func) || is_prefix_func(func) ||
4257 	    func->cold || func->static_call_tramp)
4258 		return 0;
4259 
4260 	if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) {
4261 		WARN("%s: symbol name too long, can't create __pfx_ symbol",
4262 		      func->name);
4263 		return 0;
4264 	}
4265 
4266 	if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name))
4267 		return -1;
4268 
4269 	insn = find_insn(file, func->sec, func->offset);
4270 	if (!insn) {
4271 		WARN("%s: can't find starting instruction", func->name);
4272 		return -1;
4273 	}
4274 
4275 	for (prev = prev_insn_same_sec(file, insn);
4276 	     prev;
4277 	     prev = prev_insn_same_sec(file, prev)) {
4278 		u64 offset;
4279 
4280 		if (prev->type != INSN_NOP)
4281 			return 0;
4282 
4283 		offset = func->offset - prev->offset;
4284 
4285 		if (offset > opts.prefix)
4286 			return 0;
4287 
4288 		if (offset < opts.prefix)
4289 			continue;
4290 
4291 		if (!elf_create_symbol(file->elf, name, func->sec,
4292 				       GELF_ST_BIND(func->sym.st_info),
4293 				       GELF_ST_TYPE(func->sym.st_info),
4294 				       prev->offset, opts.prefix))
4295 			return -1;
4296 
4297 		break;
4298 	}
4299 
4300 	if (!prev)
4301 		return 0;
4302 
4303 	if (!insn->cfi) {
4304 		/*
4305 		 * This can happen if stack validation isn't enabled or the
4306 		 * function is annotated with STACK_FRAME_NON_STANDARD.
4307 		 */
4308 		return 0;
4309 	}
4310 
4311 	/* Propagate insn->cfi to the prefix code */
4312 	cfi = cfi_hash_find_or_add(insn->cfi);
4313 	for (; prev != insn; prev = next_insn_same_sec(file, prev))
4314 		prev->cfi = cfi;
4315 
4316 	return 0;
4317 }
4318 
4319 static int create_prefix_symbols(struct objtool_file *file)
4320 {
4321 	struct section *sec;
4322 	struct symbol *func;
4323 
4324 	for_each_sec(file->elf, sec) {
4325 		if (!is_text_sec(sec))
4326 			continue;
4327 
4328 		sec_for_each_sym(sec, func) {
4329 			if (create_prefix_symbol(file, func))
4330 				return -1;
4331 		}
4332 	}
4333 
4334 	return 0;
4335 }
4336 
4337 static int validate_symbol(struct objtool_file *file, struct section *sec,
4338 			   struct symbol *sym, struct insn_state *state)
4339 {
4340 	struct instruction *insn;
4341 	struct symbol *func;
4342 	int ret;
4343 
4344 	if (!sym->len) {
4345 		WARN("%s() is missing an ELF size annotation", sym->name);
4346 		return 1;
4347 	}
4348 
4349 	if (sym->pfunc != sym || sym->alias != sym)
4350 		return 0;
4351 
4352 	insn = find_insn(file, sec, sym->offset);
4353 	if (!insn || insn->visited)
4354 		return 0;
4355 
4356 	if (opts.uaccess)
4357 		state->uaccess = sym->uaccess_safe;
4358 
4359 	func = insn_func(insn);
4360 
4361 	if (opts.checksum)
4362 		checksum_init(func);
4363 
4364 	ret = validate_branch(file, func, insn, *state);
4365 	if (ret)
4366 		BT_INSN(insn, "<=== (sym)");
4367 
4368 	if (opts.checksum)
4369 		checksum_finish(func);
4370 
4371 	return ret;
4372 }
4373 
4374 static int validate_section(struct objtool_file *file, struct section *sec)
4375 {
4376 	struct insn_state state;
4377 	struct symbol *func;
4378 	int warnings = 0;
4379 
4380 	sec_for_each_sym(sec, func) {
4381 		if (!is_func_sym(func))
4382 			continue;
4383 
4384 		init_insn_state(file, &state, sec);
4385 		set_func_state(&state.cfi);
4386 
4387 		warnings += validate_symbol(file, sec, func, &state);
4388 	}
4389 
4390 	return warnings;
4391 }
4392 
4393 static int validate_noinstr_sections(struct objtool_file *file)
4394 {
4395 	struct section *sec;
4396 	int warnings = 0;
4397 
4398 	sec = find_section_by_name(file->elf, ".noinstr.text");
4399 	if (sec) {
4400 		warnings += validate_section(file, sec);
4401 		warnings += validate_unwind_hints(file, sec);
4402 	}
4403 
4404 	sec = find_section_by_name(file->elf, ".entry.text");
4405 	if (sec) {
4406 		warnings += validate_section(file, sec);
4407 		warnings += validate_unwind_hints(file, sec);
4408 	}
4409 
4410 	sec = find_section_by_name(file->elf, ".cpuidle.text");
4411 	if (sec) {
4412 		warnings += validate_section(file, sec);
4413 		warnings += validate_unwind_hints(file, sec);
4414 	}
4415 
4416 	return warnings;
4417 }
4418 
4419 static int validate_functions(struct objtool_file *file)
4420 {
4421 	struct section *sec;
4422 	int warnings = 0;
4423 
4424 	for_each_sec(file->elf, sec) {
4425 		if (!is_text_sec(sec))
4426 			continue;
4427 
4428 		warnings += validate_section(file, sec);
4429 	}
4430 
4431 	return warnings;
4432 }
4433 
4434 static void mark_endbr_used(struct instruction *insn)
4435 {
4436 	if (!list_empty(&insn->call_node))
4437 		list_del_init(&insn->call_node);
4438 }
4439 
4440 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4441 {
4442 	struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4443 	struct instruction *first;
4444 
4445 	if (!sym)
4446 		return false;
4447 
4448 	first = find_insn(file, sym->sec, sym->offset);
4449 	if (!first)
4450 		return false;
4451 
4452 	if (first->type != INSN_ENDBR && !first->noendbr)
4453 		return false;
4454 
4455 	return insn->offset == sym->offset + sym->len;
4456 }
4457 
4458 static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn,
4459 			       struct instruction *dest)
4460 {
4461 	if (dest->type == INSN_ENDBR) {
4462 		mark_endbr_used(dest);
4463 		return 0;
4464 	}
4465 
4466 	if (insn_func(dest) && insn_func(insn) &&
4467 	    insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
4468 		/*
4469 		 * Anything from->to self is either _THIS_IP_ or
4470 		 * IRET-to-self.
4471 		 *
4472 		 * There is no sane way to annotate _THIS_IP_ since the
4473 		 * compiler treats the relocation as a constant and is
4474 		 * happy to fold in offsets, skewing any annotation we
4475 		 * do, leading to vast amounts of false-positives.
4476 		 *
4477 		 * There's also compiler generated _THIS_IP_ through
4478 		 * KCOV and such which we have no hope of annotating.
4479 		 *
4480 		 * As such, blanket accept self-references without
4481 		 * issue.
4482 		 */
4483 		return 0;
4484 	}
4485 
4486 	/*
4487 	 * Accept anything ANNOTATE_NOENDBR.
4488 	 */
4489 	if (dest->noendbr)
4490 		return 0;
4491 
4492 	/*
4493 	 * Accept if this is the instruction after a symbol
4494 	 * that is (no)endbr -- typical code-range usage.
4495 	 */
4496 	if (noendbr_range(file, dest))
4497 		return 0;
4498 
4499 	WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4500 	return 1;
4501 }
4502 
4503 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4504 {
4505 	struct instruction *dest;
4506 	struct reloc *reloc;
4507 	unsigned long off;
4508 	int warnings = 0;
4509 
4510 	/*
4511 	 * Looking for function pointer load relocations.  Ignore
4512 	 * direct/indirect branches:
4513 	 */
4514 	switch (insn->type) {
4515 
4516 	case INSN_CALL:
4517 	case INSN_CALL_DYNAMIC:
4518 	case INSN_JUMP_CONDITIONAL:
4519 	case INSN_JUMP_UNCONDITIONAL:
4520 	case INSN_JUMP_DYNAMIC:
4521 	case INSN_JUMP_DYNAMIC_CONDITIONAL:
4522 	case INSN_RETURN:
4523 	case INSN_NOP:
4524 		return 0;
4525 
4526 	case INSN_LEA_RIP:
4527 		if (!insn_reloc(file, insn)) {
4528 			/* local function pointer reference without reloc */
4529 
4530 			off = arch_jump_destination(insn);
4531 
4532 			dest = find_insn(file, insn->sec, off);
4533 			if (!dest) {
4534 				WARN_INSN(insn, "corrupt function pointer reference");
4535 				return 1;
4536 			}
4537 
4538 			return __validate_ibt_insn(file, insn, dest);
4539 		}
4540 		break;
4541 
4542 	default:
4543 		break;
4544 	}
4545 
4546 	for (reloc = insn_reloc(file, insn);
4547 	     reloc;
4548 	     reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4549 					      reloc_offset(reloc) + 1,
4550 					      (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
4551 
4552 		off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc);
4553 
4554 		dest = find_insn(file, reloc->sym->sec, off);
4555 		if (!dest)
4556 			continue;
4557 
4558 		warnings += __validate_ibt_insn(file, insn, dest);
4559 	}
4560 
4561 	return warnings;
4562 }
4563 
4564 static int validate_ibt_data_reloc(struct objtool_file *file,
4565 				   struct reloc *reloc)
4566 {
4567 	struct instruction *dest;
4568 
4569 	dest = find_insn(file, reloc->sym->sec,
4570 			 reloc->sym->offset + reloc_addend(reloc));
4571 	if (!dest)
4572 		return 0;
4573 
4574 	if (dest->type == INSN_ENDBR) {
4575 		mark_endbr_used(dest);
4576 		return 0;
4577 	}
4578 
4579 	if (dest->noendbr)
4580 		return 0;
4581 
4582 	WARN_FUNC(reloc->sec->base, reloc_offset(reloc),
4583 		  "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4584 
4585 	return 1;
4586 }
4587 
4588 /*
4589  * Validate IBT rules and remove used ENDBR instructions from the seal list.
4590  * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4591  * NOPs) later, in create_ibt_endbr_seal_sections().
4592  */
4593 static int validate_ibt(struct objtool_file *file)
4594 {
4595 	struct section *sec;
4596 	struct reloc *reloc;
4597 	struct instruction *insn;
4598 	int warnings = 0;
4599 
4600 	for_each_insn(file, insn)
4601 		warnings += validate_ibt_insn(file, insn);
4602 
4603 	for_each_sec(file->elf, sec) {
4604 
4605 		/* Already done by validate_ibt_insn() */
4606 		if (is_text_sec(sec))
4607 			continue;
4608 
4609 		if (!sec->rsec)
4610 			continue;
4611 
4612 		/*
4613 		 * These sections can reference text addresses, but not with
4614 		 * the intent to indirect branch to them.
4615 		 */
4616 		if ((!strncmp(sec->name, ".discard", 8) &&
4617 		     strcmp(sec->name, ".discard.ibt_endbr_noseal"))	||
4618 		    !strncmp(sec->name, ".debug", 6)			||
4619 		    !strcmp(sec->name, ".altinstructions")		||
4620 		    !strcmp(sec->name, ".ibt_endbr_seal")		||
4621 		    !strcmp(sec->name, ".orc_unwind_ip")		||
4622 		    !strcmp(sec->name, ".retpoline_sites")		||
4623 		    !strcmp(sec->name, ".smp_locks")			||
4624 		    !strcmp(sec->name, ".static_call_sites")		||
4625 		    !strcmp(sec->name, "_error_injection_whitelist")	||
4626 		    !strcmp(sec->name, "_kprobe_blacklist")		||
4627 		    !strcmp(sec->name, "__bug_table")			||
4628 		    !strcmp(sec->name, "__ex_table")			||
4629 		    !strcmp(sec->name, "__jump_table")			||
4630 		    !strcmp(sec->name, "__mcount_loc")			||
4631 		    !strcmp(sec->name, ".kcfi_traps")			||
4632 		    !strcmp(sec->name, ".llvm.call-graph-profile")	||
4633 		    !strcmp(sec->name, ".llvm_bb_addr_map")		||
4634 		    !strcmp(sec->name, "__tracepoints")			||
4635 		    strstr(sec->name, "__patchable_function_entries"))
4636 			continue;
4637 
4638 		for_each_reloc(sec->rsec, reloc)
4639 			warnings += validate_ibt_data_reloc(file, reloc);
4640 	}
4641 
4642 	return warnings;
4643 }
4644 
4645 static int validate_sls(struct objtool_file *file)
4646 {
4647 	struct instruction *insn, *next_insn;
4648 	int warnings = 0;
4649 
4650 	for_each_insn(file, insn) {
4651 		next_insn = next_insn_same_sec(file, insn);
4652 
4653 		if (insn->retpoline_safe)
4654 			continue;
4655 
4656 		switch (insn->type) {
4657 		case INSN_RETURN:
4658 			if (!next_insn || next_insn->type != INSN_TRAP) {
4659 				WARN_INSN(insn, "missing int3 after ret");
4660 				warnings++;
4661 			}
4662 
4663 			break;
4664 		case INSN_JUMP_DYNAMIC:
4665 			if (!next_insn || next_insn->type != INSN_TRAP) {
4666 				WARN_INSN(insn, "missing int3 after indirect jump");
4667 				warnings++;
4668 			}
4669 			break;
4670 		default:
4671 			break;
4672 		}
4673 	}
4674 
4675 	return warnings;
4676 }
4677 
4678 static int validate_reachable_instructions(struct objtool_file *file)
4679 {
4680 	struct instruction *insn, *prev_insn;
4681 	struct symbol *call_dest;
4682 	int warnings = 0;
4683 
4684 	if (file->ignore_unreachables)
4685 		return 0;
4686 
4687 	for_each_insn(file, insn) {
4688 		if (insn->visited || ignore_unreachable_insn(file, insn))
4689 			continue;
4690 
4691 		prev_insn = prev_insn_same_sec(file, insn);
4692 		if (prev_insn && prev_insn->dead_end) {
4693 			call_dest = insn_call_dest(prev_insn);
4694 			if (call_dest) {
4695 				WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h",
4696 					  call_dest->name);
4697 				warnings++;
4698 				continue;
4699 			}
4700 		}
4701 
4702 		WARN_INSN(insn, "unreachable instruction");
4703 		warnings++;
4704 	}
4705 
4706 	return warnings;
4707 }
4708 
4709 /* 'funcs' is a space-separated list of function names */
4710 static void disas_funcs(const char *funcs)
4711 {
4712 	const char *objdump_str, *cross_compile;
4713 	int size, ret;
4714 	char *cmd;
4715 
4716 	cross_compile = getenv("CROSS_COMPILE");
4717 	if (!cross_compile)
4718 		cross_compile = "";
4719 
4720 	objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '"
4721 			"BEGIN { split(_funcs, funcs); }"
4722 			"/^$/ { func_match = 0; }"
4723 			"/<.*>:/ { "
4724 				"f = gensub(/.*<(.*)>:/, \"\\\\1\", 1);"
4725 				"for (i in funcs) {"
4726 					"if (funcs[i] == f) {"
4727 						"func_match = 1;"
4728 						"base = strtonum(\"0x\" $1);"
4729 						"break;"
4730 					"}"
4731 				"}"
4732 			"}"
4733 			"{"
4734 				"if (func_match) {"
4735 					"addr = strtonum(\"0x\" $1);"
4736 					"printf(\"%%04x \", addr - base);"
4737 					"print;"
4738 				"}"
4739 			"}' 1>&2";
4740 
4741 	/* fake snprintf() to calculate the size */
4742 	size = snprintf(NULL, 0, objdump_str, cross_compile, objname, funcs) + 1;
4743 	if (size <= 0) {
4744 		WARN("objdump string size calculation failed");
4745 		return;
4746 	}
4747 
4748 	cmd = malloc(size);
4749 
4750 	/* real snprintf() */
4751 	snprintf(cmd, size, objdump_str, cross_compile, objname, funcs);
4752 	ret = system(cmd);
4753 	if (ret) {
4754 		WARN("disassembly failed: %d", ret);
4755 		return;
4756 	}
4757 }
4758 
4759 static void disas_warned_funcs(struct objtool_file *file)
4760 {
4761 	struct symbol *sym;
4762 	char *funcs = NULL, *tmp;
4763 
4764 	for_each_sym(file->elf, sym) {
4765 		if (sym->warned) {
4766 			if (!funcs) {
4767 				funcs = malloc(strlen(sym->name) + 1);
4768 				if (!funcs) {
4769 					ERROR_GLIBC("malloc");
4770 					return;
4771 				}
4772 				strcpy(funcs, sym->name);
4773 			} else {
4774 				tmp = malloc(strlen(funcs) + strlen(sym->name) + 2);
4775 				if (!tmp) {
4776 					ERROR_GLIBC("malloc");
4777 					return;
4778 				}
4779 				sprintf(tmp, "%s %s", funcs, sym->name);
4780 				free(funcs);
4781 				funcs = tmp;
4782 			}
4783 		}
4784 	}
4785 
4786 	if (funcs)
4787 		disas_funcs(funcs);
4788 }
4789 
4790 __weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
4791 {
4792 	unsigned int type = reloc_type(reloc);
4793 	size_t sz = elf_addr_size(elf);
4794 
4795 	return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32);
4796 }
4797 
4798 static int check_abs_references(struct objtool_file *file)
4799 {
4800 	struct section *sec;
4801 	struct reloc *reloc;
4802 	int ret = 0;
4803 
4804 	for_each_sec(file->elf, sec) {
4805 		/* absolute references in non-loadable sections are fine */
4806 		if (!(sec->sh.sh_flags & SHF_ALLOC))
4807 			continue;
4808 
4809 		/* section must have an associated .rela section */
4810 		if (!sec->rsec)
4811 			continue;
4812 
4813 		/*
4814 		 * Special case for compiler generated metadata that is not
4815 		 * consumed until after boot.
4816 		 */
4817 		if (!strcmp(sec->name, "__patchable_function_entries"))
4818 			continue;
4819 
4820 		for_each_reloc(sec->rsec, reloc) {
4821 			if (arch_absolute_reloc(file->elf, reloc)) {
4822 				WARN("section %s has absolute relocation at offset 0x%lx",
4823 				     sec->name, reloc_offset(reloc));
4824 				ret++;
4825 			}
4826 		}
4827 	}
4828 	return ret;
4829 }
4830 
4831 struct insn_chunk {
4832 	void *addr;
4833 	struct insn_chunk *next;
4834 };
4835 
4836 /*
4837  * Reduce peak RSS usage by freeing insns memory before writing the ELF file,
4838  * which can trigger more allocations for .debug_* sections whose data hasn't
4839  * been read yet.
4840  */
4841 static void free_insns(struct objtool_file *file)
4842 {
4843 	struct instruction *insn;
4844 	struct insn_chunk *chunks = NULL, *chunk;
4845 
4846 	for_each_insn(file, insn) {
4847 		if (!insn->idx) {
4848 			chunk = malloc(sizeof(*chunk));
4849 			chunk->addr = insn;
4850 			chunk->next = chunks;
4851 			chunks = chunk;
4852 		}
4853 	}
4854 
4855 	for (chunk = chunks; chunk; chunk = chunk->next)
4856 		free(chunk->addr);
4857 }
4858 
4859 int check(struct objtool_file *file)
4860 {
4861 	int ret = 0, warnings = 0;
4862 
4863 	arch_initial_func_cfi_state(&initial_func_cfi);
4864 	init_cfi_state(&init_cfi);
4865 	init_cfi_state(&func_cfi);
4866 	set_func_state(&func_cfi);
4867 	init_cfi_state(&force_undefined_cfi);
4868 	force_undefined_cfi.force_undefined = true;
4869 
4870 	if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
4871 		ret = -1;
4872 		goto out;
4873 	}
4874 
4875 	cfi_hash_add(&init_cfi);
4876 	cfi_hash_add(&func_cfi);
4877 
4878 	ret = checksum_debug_init(file);
4879 	if (ret)
4880 		goto out;
4881 
4882 	ret = decode_sections(file);
4883 	if (ret)
4884 		goto out;
4885 
4886 	if (!nr_insns)
4887 		goto out;
4888 
4889 	if (opts.retpoline)
4890 		warnings += validate_retpoline(file);
4891 
4892 	if (validate_branch_enabled()) {
4893 		int w = 0;
4894 
4895 		w += validate_functions(file);
4896 		w += validate_unwind_hints(file, NULL);
4897 		if (!w)
4898 			w += validate_reachable_instructions(file);
4899 
4900 		warnings += w;
4901 
4902 	} else if (opts.noinstr) {
4903 		warnings += validate_noinstr_sections(file);
4904 	}
4905 
4906 	if (opts.unret) {
4907 		/*
4908 		 * Must be after validate_branch() and friends, it plays
4909 		 * further games with insn->visited.
4910 		 */
4911 		warnings += validate_unrets(file);
4912 	}
4913 
4914 	if (opts.ibt)
4915 		warnings += validate_ibt(file);
4916 
4917 	if (opts.sls)
4918 		warnings += validate_sls(file);
4919 
4920 	if (opts.static_call) {
4921 		ret = create_static_call_sections(file);
4922 		if (ret)
4923 			goto out;
4924 	}
4925 
4926 	if (opts.retpoline) {
4927 		ret = create_retpoline_sites_sections(file);
4928 		if (ret)
4929 			goto out;
4930 	}
4931 
4932 	if (opts.cfi) {
4933 		ret = create_cfi_sections(file);
4934 		if (ret)
4935 			goto out;
4936 	}
4937 
4938 	if (opts.rethunk) {
4939 		ret = create_return_sites_sections(file);
4940 		if (ret)
4941 			goto out;
4942 
4943 		if (opts.hack_skylake) {
4944 			ret = create_direct_call_sections(file);
4945 			if (ret)
4946 				goto out;
4947 		}
4948 	}
4949 
4950 	if (opts.mcount) {
4951 		ret = create_mcount_loc_sections(file);
4952 		if (ret)
4953 			goto out;
4954 	}
4955 
4956 	if (opts.prefix) {
4957 		ret = create_prefix_symbols(file);
4958 		if (ret)
4959 			goto out;
4960 	}
4961 
4962 	if (opts.ibt) {
4963 		ret = create_ibt_endbr_seal_sections(file);
4964 		if (ret)
4965 			goto out;
4966 	}
4967 
4968 	if (opts.noabs)
4969 		warnings += check_abs_references(file);
4970 
4971 	if (opts.checksum) {
4972 		ret = create_sym_checksum_section(file);
4973 		if (ret)
4974 			goto out;
4975 	}
4976 
4977 	if (opts.orc && nr_insns) {
4978 		ret = orc_create(file);
4979 		if (ret)
4980 			goto out;
4981 	}
4982 
4983 	free_insns(file);
4984 
4985 	if (opts.stats) {
4986 		printf("nr_insns_visited: %ld\n", nr_insns_visited);
4987 		printf("nr_cfi: %ld\n", nr_cfi);
4988 		printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4989 		printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4990 	}
4991 
4992 out:
4993 	if (!ret && !warnings)
4994 		return 0;
4995 
4996 	if (opts.werror && warnings)
4997 		ret = 1;
4998 
4999 	if (opts.verbose) {
5000 		if (opts.werror && warnings)
5001 			WARN("%d warning(s) upgraded to errors", warnings);
5002 		disas_warned_funcs(file);
5003 	}
5004 
5005 	if (opts.backup && make_backup())
5006 		return 1;
5007 
5008 	return ret;
5009 }
5010