xref: /linux/tools/objtool/check.c (revision 5d859dff266f7e57664dc6bcf80ef2c66547c58a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4  */
5 
6 #define _GNU_SOURCE /* memmem() */
7 #include <string.h>
8 #include <stdlib.h>
9 #include <inttypes.h>
10 #include <sys/mman.h>
11 
12 #include <objtool/builtin.h>
13 #include <objtool/cfi.h>
14 #include <objtool/arch.h>
15 #include <objtool/disas.h>
16 #include <objtool/check.h>
17 #include <objtool/special.h>
18 #include <objtool/warn.h>
19 #include <objtool/checksum.h>
20 #include <objtool/util.h>
21 
22 #include <linux/objtool_types.h>
23 #include <linux/hashtable.h>
24 #include <linux/kernel.h>
25 #include <linux/static_call_types.h>
26 #include <linux/string.h>
27 
28 struct alternative {
29 	struct alternative *next;
30 	struct instruction *insn;
31 };
32 
33 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
34 
35 static struct cfi_init_state initial_func_cfi;
36 static struct cfi_state init_cfi;
37 static struct cfi_state func_cfi;
38 static struct cfi_state force_undefined_cfi;
39 
40 struct instruction *find_insn(struct objtool_file *file,
41 			      struct section *sec, unsigned long offset)
42 {
43 	struct instruction *insn;
44 
45 	hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
46 		if (insn->sec == sec && insn->offset == offset)
47 			return insn;
48 	}
49 
50 	return NULL;
51 }
52 
53 struct instruction *next_insn_same_sec(struct objtool_file *file,
54 				       struct instruction *insn)
55 {
56 	if (insn->idx == INSN_CHUNK_MAX)
57 		return find_insn(file, insn->sec, insn->offset + insn->len);
58 
59 	insn++;
60 	if (!insn->len)
61 		return NULL;
62 
63 	return insn;
64 }
65 
66 static struct instruction *next_insn_same_func(struct objtool_file *file,
67 					       struct instruction *insn)
68 {
69 	struct instruction *next = next_insn_same_sec(file, insn);
70 	struct symbol *func = insn_func(insn);
71 
72 	if (!func)
73 		return NULL;
74 
75 	if (next && insn_func(next) == func)
76 		return next;
77 
78 	/* Check if we're already in the subfunction: */
79 	if (func == func->cfunc)
80 		return NULL;
81 
82 	/* Move to the subfunction: */
83 	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
84 }
85 
86 static struct instruction *prev_insn_same_sec(struct objtool_file *file,
87 					      struct instruction *insn)
88 {
89 	if (insn->idx == 0) {
90 		if (insn->prev_len)
91 			return find_insn(file, insn->sec, insn->offset - insn->prev_len);
92 		return NULL;
93 	}
94 
95 	return insn - 1;
96 }
97 
98 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
99 					      struct instruction *insn)
100 {
101 	struct instruction *prev = prev_insn_same_sec(file, insn);
102 
103 	if (prev && insn_func(prev) == insn_func(insn))
104 		return prev;
105 
106 	return NULL;
107 }
108 
109 #define for_each_insn(file, insn)					\
110 	for (struct section *__sec, *__fake = (struct section *)1;	\
111 	     __fake; __fake = NULL)					\
112 		for_each_sec(file->elf, __sec)				\
113 			sec_for_each_insn(file, __sec, insn)
114 
115 #define func_for_each_insn(file, func, insn)				\
116 	for (insn = find_insn(file, func->sec, func->offset);		\
117 	     insn;							\
118 	     insn = next_insn_same_func(file, insn))
119 
120 #define sym_for_each_insn(file, sym, insn)				\
121 	for (insn = find_insn(file, sym->sec, sym->offset);		\
122 	     insn && insn->offset < sym->offset + sym->len;		\
123 	     insn = next_insn_same_sec(file, insn))
124 
125 #define sym_for_each_insn_continue_reverse(file, sym, insn)		\
126 	for (insn = prev_insn_same_sec(file, insn);			\
127 	     insn && insn->offset >= sym->offset;			\
128 	     insn = prev_insn_same_sec(file, insn))
129 
130 #define sec_for_each_insn_from(file, insn)				\
131 	for (; insn; insn = next_insn_same_sec(file, insn))
132 
133 #define sec_for_each_insn_continue(file, insn)				\
134 	for (insn = next_insn_same_sec(file, insn); insn;		\
135 	     insn = next_insn_same_sec(file, insn))
136 
137 static inline struct reloc *insn_jump_table(struct instruction *insn)
138 {
139 	if (insn->type == INSN_JUMP_DYNAMIC ||
140 	    insn->type == INSN_CALL_DYNAMIC)
141 		return insn->_jump_table;
142 
143 	return NULL;
144 }
145 
146 static inline unsigned long insn_jump_table_size(struct instruction *insn)
147 {
148 	if (insn->type == INSN_JUMP_DYNAMIC ||
149 	    insn->type == INSN_CALL_DYNAMIC)
150 		return insn->_jump_table_size;
151 
152 	return 0;
153 }
154 
155 static bool is_jump_table_jump(struct instruction *insn)
156 {
157 	struct alt_group *alt_group = insn->alt_group;
158 
159 	if (insn_jump_table(insn))
160 		return true;
161 
162 	/* Retpoline alternative for a jump table? */
163 	return alt_group && alt_group->orig_group &&
164 	       insn_jump_table(alt_group->orig_group->first_insn);
165 }
166 
167 static bool is_sibling_call(struct instruction *insn)
168 {
169 	/*
170 	 * Assume only STT_FUNC calls have jump-tables.
171 	 */
172 	if (insn_func(insn)) {
173 		/* An indirect jump is either a sibling call or a jump to a table. */
174 		if (insn->type == INSN_JUMP_DYNAMIC)
175 			return !is_jump_table_jump(insn);
176 	}
177 
178 	/* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
179 	return (is_static_jump(insn) && insn_call_dest(insn));
180 }
181 
182 /*
183  * Checks if a function is a Rust "noreturn" one.
184  */
185 static bool is_rust_noreturn(const struct symbol *func)
186 {
187 	/*
188 	 * If it does not start with "_R", then it is not a Rust symbol.
189 	 */
190 	if (strncmp(func->name, "_R", 2))
191 		return false;
192 
193 	/*
194 	 * These are just heuristics -- we do not control the precise symbol
195 	 * name, due to the crate disambiguators (which depend on the compiler)
196 	 * as well as changes to the source code itself between versions (since
197 	 * these come from the Rust standard library).
198 	 */
199 	return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail")		||
200 	       str_ends_with(func->name, "_4core6option13expect_failed")				||
201 	       str_ends_with(func->name, "_4core6option13unwrap_failed")				||
202 	       str_ends_with(func->name, "_4core6result13unwrap_failed")				||
203 	       str_ends_with(func->name, "_4core9panicking5panic")					||
204 	       str_ends_with(func->name, "_4core9panicking9panic_fmt")					||
205 	       str_ends_with(func->name, "_4core9panicking14panic_explicit")				||
206 	       str_ends_with(func->name, "_4core9panicking14panic_nounwind")				||
207 	       str_ends_with(func->name, "_4core9panicking18panic_bounds_check")			||
208 	       str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt")			||
209 	       str_ends_with(func->name, "_4core9panicking19assert_failed_inner")			||
210 	       str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference")		||
211 	       str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference")	||
212 	       str_ends_with(func->name, "_7___rustc17rust_begin_unwind")				||
213 	       strstr(func->name, "_4core9panicking13assert_failed")					||
214 	       strstr(func->name, "_4core9panicking11panic_const24panic_const_")			||
215 	       (strstr(func->name, "_4core5slice5index") &&
216 		strstr(func->name, "slice_") &&
217 		str_ends_with(func->name, "_fail"));
218 }
219 
220 /*
221  * This checks to see if the given function is a "noreturn" function.
222  *
223  * For global functions which are outside the scope of this object file, we
224  * have to keep a manual list of them.
225  *
226  * For local functions, we have to detect them manually by simply looking for
227  * the lack of a return instruction.
228  */
229 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
230 				int recursion)
231 {
232 	int i;
233 	struct instruction *insn;
234 	bool empty = true;
235 
236 #define NORETURN(func) __stringify(func),
237 	static const char * const global_noreturns[] = {
238 #include "noreturns.h"
239 	};
240 #undef NORETURN
241 
242 	if (!func)
243 		return false;
244 
245 	if (!is_local_sym(func)) {
246 		if (is_rust_noreturn(func))
247 			return true;
248 
249 		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
250 			if (!strcmp(func->name, global_noreturns[i]))
251 				return true;
252 	}
253 
254 	if (is_weak_sym(func))
255 		return false;
256 
257 	if (!func->len)
258 		return false;
259 
260 	insn = find_insn(file, func->sec, func->offset);
261 	if (!insn || !insn_func(insn))
262 		return false;
263 
264 	func_for_each_insn(file, func, insn) {
265 		empty = false;
266 
267 		if (insn->type == INSN_RETURN)
268 			return false;
269 	}
270 
271 	if (empty)
272 		return false;
273 
274 	/*
275 	 * A function can have a sibling call instead of a return.  In that
276 	 * case, the function's dead-end status depends on whether the target
277 	 * of the sibling call returns.
278 	 */
279 	func_for_each_insn(file, func, insn) {
280 		if (is_sibling_call(insn)) {
281 			struct instruction *dest = insn->jump_dest;
282 
283 			if (!dest)
284 				/* sibling call to another file */
285 				return false;
286 
287 			/* local sibling call */
288 			if (recursion == 5) {
289 				/*
290 				 * Infinite recursion: two functions have
291 				 * sibling calls to each other.  This is a very
292 				 * rare case.  It means they aren't dead ends.
293 				 */
294 				return false;
295 			}
296 
297 			return __dead_end_function(file, insn_func(dest), recursion+1);
298 		}
299 	}
300 
301 	return true;
302 }
303 
304 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
305 {
306 	return __dead_end_function(file, func, 0);
307 }
308 
309 static void init_cfi_state(struct cfi_state *cfi)
310 {
311 	int i;
312 
313 	for (i = 0; i < CFI_NUM_REGS; i++) {
314 		cfi->regs[i].base = CFI_UNDEFINED;
315 		cfi->vals[i].base = CFI_UNDEFINED;
316 	}
317 	cfi->cfa.base = CFI_UNDEFINED;
318 	cfi->drap_reg = CFI_UNDEFINED;
319 	cfi->drap_offset = -1;
320 }
321 
322 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
323 			    struct section *sec)
324 {
325 	memset(state, 0, sizeof(*state));
326 	init_cfi_state(&state->cfi);
327 
328 	if (opts.noinstr && sec)
329 		state->noinstr = sec->noinstr;
330 }
331 
332 static struct cfi_state *cfi_alloc(void)
333 {
334 	struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
335 	if (!cfi) {
336 		ERROR_GLIBC("calloc");
337 		exit(1);
338 	}
339 	nr_cfi++;
340 	return cfi;
341 }
342 
343 static int cfi_bits;
344 static struct hlist_head *cfi_hash;
345 
346 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
347 {
348 	return memcmp((void *)cfi1 + sizeof(cfi1->hash),
349 		      (void *)cfi2 + sizeof(cfi2->hash),
350 		      sizeof(struct cfi_state) - sizeof(struct hlist_node));
351 }
352 
353 static inline u32 cfi_key(struct cfi_state *cfi)
354 {
355 	return jhash((void *)cfi + sizeof(cfi->hash),
356 		     sizeof(*cfi) - sizeof(cfi->hash), 0);
357 }
358 
359 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
360 {
361 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
362 	struct cfi_state *obj;
363 
364 	hlist_for_each_entry(obj, head, hash) {
365 		if (!cficmp(cfi, obj)) {
366 			nr_cfi_cache++;
367 			return obj;
368 		}
369 	}
370 
371 	obj = cfi_alloc();
372 	*obj = *cfi;
373 	hlist_add_head(&obj->hash, head);
374 
375 	return obj;
376 }
377 
378 static void cfi_hash_add(struct cfi_state *cfi)
379 {
380 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
381 
382 	hlist_add_head(&cfi->hash, head);
383 }
384 
385 static void *cfi_hash_alloc(unsigned long size)
386 {
387 	cfi_bits = max(10, ilog2(size));
388 	cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
389 			PROT_READ|PROT_WRITE,
390 			MAP_PRIVATE|MAP_ANON, -1, 0);
391 	if (cfi_hash == (void *)-1L) {
392 		ERROR_GLIBC("mmap fail cfi_hash");
393 		cfi_hash = NULL;
394 	}  else if (opts.stats) {
395 		printf("cfi_bits: %d\n", cfi_bits);
396 	}
397 
398 	return cfi_hash;
399 }
400 
401 static unsigned long nr_insns;
402 static unsigned long nr_insns_visited;
403 
404 /*
405  * Call the arch-specific instruction decoder for all the instructions and add
406  * them to the global instruction list.
407  */
408 static int decode_instructions(struct objtool_file *file)
409 {
410 	struct section *sec;
411 	struct symbol *func;
412 	unsigned long offset;
413 	struct instruction *insn;
414 
415 	for_each_sec(file->elf, sec) {
416 		struct instruction *insns = NULL;
417 		u8 prev_len = 0;
418 		u8 idx = 0;
419 
420 		if (!is_text_sec(sec))
421 			continue;
422 
423 		if (strcmp(sec->name, ".altinstr_replacement") &&
424 		    strcmp(sec->name, ".altinstr_aux") &&
425 		    strncmp(sec->name, ".discard.", 9))
426 			sec->text = true;
427 
428 		if (!strcmp(sec->name, ".noinstr.text") ||
429 		    !strcmp(sec->name, ".entry.text") ||
430 		    !strcmp(sec->name, ".cpuidle.text") ||
431 		    !strncmp(sec->name, ".text..__x86.", 13))
432 			sec->noinstr = true;
433 
434 		/*
435 		 * .init.text code is ran before userspace and thus doesn't
436 		 * strictly need retpolines, except for modules which are
437 		 * loaded late, they very much do need retpoline in their
438 		 * .init.text
439 		 */
440 		if (!strcmp(sec->name, ".init.text") && !opts.module)
441 			sec->init = true;
442 
443 		for (offset = 0; offset < sec_size(sec); offset += insn->len) {
444 			if (!insns || idx == INSN_CHUNK_MAX) {
445 				insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn));
446 				if (!insns) {
447 					ERROR_GLIBC("calloc");
448 					return -1;
449 				}
450 				idx = 0;
451 			} else {
452 				idx++;
453 			}
454 			insn = &insns[idx];
455 			insn->idx = idx;
456 
457 			INIT_LIST_HEAD(&insn->call_node);
458 			insn->sec = sec;
459 			insn->offset = offset;
460 			insn->prev_len = prev_len;
461 
462 			if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn))
463 				return -1;
464 
465 			prev_len = insn->len;
466 
467 			/*
468 			 * By default, "ud2" is a dead end unless otherwise
469 			 * annotated, because GCC 7 inserts it for certain
470 			 * divide-by-zero cases.
471 			 */
472 			if (insn->type == INSN_BUG)
473 				insn->dead_end = true;
474 
475 			hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
476 			nr_insns++;
477 		}
478 
479 		sec_for_each_sym(sec, func) {
480 			if (!is_notype_sym(func) && !is_func_sym(func))
481 				continue;
482 
483 			if (func->offset == sec_size(sec)) {
484 				/* Heuristic: likely an "end" symbol */
485 				if (is_notype_sym(func))
486 					continue;
487 				ERROR("%s(): STT_FUNC at end of section", func->name);
488 				return -1;
489 			}
490 
491 			if (func->embedded_insn || func->alias != func)
492 				continue;
493 
494 			if (!find_insn(file, sec, func->offset)) {
495 				ERROR("%s(): can't find starting instruction", func->name);
496 				return -1;
497 			}
498 
499 			sym_for_each_insn(file, func, insn) {
500 				insn->sym = func;
501 				if (is_func_sym(func) &&
502 				    insn->type == INSN_ENDBR &&
503 				    list_empty(&insn->call_node)) {
504 					if (insn->offset == func->offset) {
505 						list_add_tail(&insn->call_node, &file->endbr_list);
506 						file->nr_endbr++;
507 					} else {
508 						file->nr_endbr_int++;
509 					}
510 				}
511 			}
512 		}
513 	}
514 
515 	if (opts.stats)
516 		printf("nr_insns: %lu\n", nr_insns);
517 
518 	return 0;
519 }
520 
521 /*
522  * Read the pv_ops[] .data table to find the static initialized values.
523  */
524 static int add_pv_ops(struct objtool_file *file, const char *symname)
525 {
526 	struct symbol *sym, *func;
527 	unsigned long off, end;
528 	struct reloc *reloc;
529 	int idx;
530 
531 	sym = find_symbol_by_name(file->elf, symname);
532 	if (!sym)
533 		return 0;
534 
535 	off = sym->offset;
536 	end = off + sym->len;
537 	for (;;) {
538 		reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
539 		if (!reloc)
540 			break;
541 
542 		idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
543 
544 		func = reloc->sym;
545 		if (is_sec_sym(func))
546 			func = find_symbol_by_offset(reloc->sym->sec,
547 						     reloc_addend(reloc));
548 		if (!func) {
549 			ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc),
550 				   "can't find func at %s[%d]", symname, idx);
551 			return -1;
552 		}
553 
554 		if (objtool_pv_add(file, idx, func))
555 			return -1;
556 
557 		off = reloc_offset(reloc) + 1;
558 		if (off > end)
559 			break;
560 	}
561 
562 	return 0;
563 }
564 
565 /*
566  * Allocate and initialize file->pv_ops[].
567  */
568 static int init_pv_ops(struct objtool_file *file)
569 {
570 	static const char *pv_ops_tables[] = {
571 		"pv_ops",
572 		"xen_cpu_ops",
573 		"xen_irq_ops",
574 		"xen_mmu_ops",
575 		NULL,
576 	};
577 	const char *pv_ops;
578 	struct symbol *sym;
579 	int idx, nr;
580 
581 	if (!opts.noinstr)
582 		return 0;
583 
584 	file->pv_ops = NULL;
585 
586 	sym = find_symbol_by_name(file->elf, "pv_ops");
587 	if (!sym)
588 		return 0;
589 
590 	nr = sym->len / sizeof(unsigned long);
591 	file->pv_ops = calloc(nr, sizeof(struct pv_state));
592 	if (!file->pv_ops) {
593 		ERROR_GLIBC("calloc");
594 		return -1;
595 	}
596 
597 	for (idx = 0; idx < nr; idx++)
598 		INIT_LIST_HEAD(&file->pv_ops[idx].targets);
599 
600 	for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) {
601 		if (add_pv_ops(file, pv_ops))
602 			return -1;
603 	}
604 
605 	return 0;
606 }
607 
608 static bool is_livepatch_module(struct objtool_file *file)
609 {
610 	struct section *sec;
611 
612 	if (!opts.module)
613 		return false;
614 
615 	sec = find_section_by_name(file->elf, ".modinfo");
616 	if (!sec)
617 		return false;
618 
619 	return memmem(sec->data->d_buf, sec_size(sec), "\0livepatch=Y", 12);
620 }
621 
622 static int create_static_call_sections(struct objtool_file *file)
623 {
624 	struct static_call_site *site;
625 	struct section *sec;
626 	struct instruction *insn;
627 	struct symbol *key_sym;
628 	char *key_name, *tmp;
629 	int idx;
630 
631 	sec = find_section_by_name(file->elf, ".static_call_sites");
632 	if (sec) {
633 		/*
634 		 * Livepatch modules may have already extracted the static call
635 		 * site entries to take advantage of vmlinux static call
636 		 * privileges.
637 		 */
638 		if (!file->klp)
639 			WARN("file already has .static_call_sites section, skipping");
640 
641 		return 0;
642 	}
643 
644 	if (list_empty(&file->static_call_list))
645 		return 0;
646 
647 	idx = 0;
648 	list_for_each_entry(insn, &file->static_call_list, call_node)
649 		idx++;
650 
651 	sec = elf_create_section_pair(file->elf, ".static_call_sites",
652 				      sizeof(*site), idx, idx * 2);
653 	if (!sec)
654 		return -1;
655 
656 	/* Allow modules to modify the low bits of static_call_site::key */
657 	sec->sh.sh_flags |= SHF_WRITE;
658 
659 	idx = 0;
660 	list_for_each_entry(insn, &file->static_call_list, call_node) {
661 
662 		/* populate reloc for 'addr' */
663 		if (!elf_init_reloc_text_sym(file->elf, sec,
664 					     idx * sizeof(*site), idx * 2,
665 					     insn->sec, insn->offset))
666 			return -1;
667 
668 		/* find key symbol */
669 		key_name = strdup(insn_call_dest(insn)->name);
670 		if (!key_name) {
671 			ERROR_GLIBC("strdup");
672 			return -1;
673 		}
674 		if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
675 			    STATIC_CALL_TRAMP_PREFIX_LEN)) {
676 			ERROR("static_call: trampoline name malformed: %s", key_name);
677 			return -1;
678 		}
679 		tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
680 		memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
681 
682 		key_sym = find_symbol_by_name(file->elf, tmp);
683 		if (!key_sym) {
684 			if (!opts.module || file->klp) {
685 				ERROR("static_call: can't find static_call_key symbol: %s", tmp);
686 				return -1;
687 			}
688 
689 			/*
690 			 * For modules(), the key might not be exported, which
691 			 * means the module can make static calls but isn't
692 			 * allowed to change them.
693 			 *
694 			 * In that case we temporarily set the key to be the
695 			 * trampoline address.  This is fixed up in
696 			 * static_call_add_module().
697 			 */
698 			key_sym = insn_call_dest(insn);
699 		}
700 
701 		/* populate reloc for 'key' */
702 		if (!elf_init_reloc_data_sym(file->elf, sec,
703 					     idx * sizeof(*site) + 4,
704 					     (idx * 2) + 1, key_sym,
705 					     is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
706 			return -1;
707 
708 		idx++;
709 	}
710 
711 	return 0;
712 }
713 
714 static int create_retpoline_sites_sections(struct objtool_file *file)
715 {
716 	struct instruction *insn;
717 	struct section *sec;
718 	int idx;
719 
720 	sec = find_section_by_name(file->elf, ".retpoline_sites");
721 	if (sec) {
722 		WARN("file already has .retpoline_sites, skipping");
723 		return 0;
724 	}
725 
726 	idx = 0;
727 	list_for_each_entry(insn, &file->retpoline_call_list, call_node)
728 		idx++;
729 
730 	if (!idx)
731 		return 0;
732 
733 	sec = elf_create_section_pair(file->elf, ".retpoline_sites",
734 				      sizeof(int), idx, idx);
735 	if (!sec)
736 		return -1;
737 
738 	idx = 0;
739 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
740 
741 		if (!elf_init_reloc_text_sym(file->elf, sec,
742 					     idx * sizeof(int), idx,
743 					     insn->sec, insn->offset))
744 			return -1;
745 
746 		idx++;
747 	}
748 
749 	return 0;
750 }
751 
752 static int create_return_sites_sections(struct objtool_file *file)
753 {
754 	struct instruction *insn;
755 	struct section *sec;
756 	int idx;
757 
758 	sec = find_section_by_name(file->elf, ".return_sites");
759 	if (sec) {
760 		WARN("file already has .return_sites, skipping");
761 		return 0;
762 	}
763 
764 	idx = 0;
765 	list_for_each_entry(insn, &file->return_thunk_list, call_node)
766 		idx++;
767 
768 	if (!idx)
769 		return 0;
770 
771 	sec = elf_create_section_pair(file->elf, ".return_sites",
772 				      sizeof(int), idx, idx);
773 	if (!sec)
774 		return -1;
775 
776 	idx = 0;
777 	list_for_each_entry(insn, &file->return_thunk_list, call_node) {
778 
779 		if (!elf_init_reloc_text_sym(file->elf, sec,
780 					     idx * sizeof(int), idx,
781 					     insn->sec, insn->offset))
782 			return -1;
783 
784 		idx++;
785 	}
786 
787 	return 0;
788 }
789 
790 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
791 {
792 	struct instruction *insn;
793 	struct section *sec;
794 	int idx;
795 
796 	sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
797 	if (sec) {
798 		WARN("file already has .ibt_endbr_seal, skipping");
799 		return 0;
800 	}
801 
802 	idx = 0;
803 	list_for_each_entry(insn, &file->endbr_list, call_node)
804 		idx++;
805 
806 	if (opts.stats) {
807 		printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
808 		printf("ibt: ENDBR inside functions:  %d\n", file->nr_endbr_int);
809 		printf("ibt: superfluous ENDBR:       %d\n", idx);
810 	}
811 
812 	if (!idx)
813 		return 0;
814 
815 	sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal",
816 				      sizeof(int), idx, idx);
817 	if (!sec)
818 		return -1;
819 
820 	idx = 0;
821 	list_for_each_entry(insn, &file->endbr_list, call_node) {
822 
823 		int *site = (int *)sec->data->d_buf + idx;
824 		struct symbol *sym = insn->sym;
825 		*site = 0;
826 
827 		if (opts.module && sym && is_func_sym(sym) &&
828 		    insn->offset == sym->offset &&
829 		    (!strcmp(sym->name, "init_module") ||
830 		     !strcmp(sym->name, "cleanup_module"))) {
831 			ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
832 			      sym->name);
833 			return -1;
834 		}
835 
836 		if (!elf_init_reloc_text_sym(file->elf, sec,
837 					     idx * sizeof(int), idx,
838 					     insn->sec, insn->offset))
839 			return -1;
840 
841 		idx++;
842 	}
843 
844 	return 0;
845 }
846 
847 static int create_cfi_sections(struct objtool_file *file)
848 {
849 	struct section *sec;
850 	struct symbol *sym;
851 	int idx;
852 
853 	sec = find_section_by_name(file->elf, ".cfi_sites");
854 	if (sec) {
855 		WARN("file already has .cfi_sites section, skipping");
856 		return 0;
857 	}
858 
859 	idx = 0;
860 	for_each_sym(file->elf, sym) {
861 		if (!is_func_sym(sym))
862 			continue;
863 
864 		if (strncmp(sym->name, "__cfi_", 6))
865 			continue;
866 
867 		idx++;
868 	}
869 
870 	sec = elf_create_section_pair(file->elf, ".cfi_sites",
871 				      sizeof(unsigned int), idx, idx);
872 	if (!sec)
873 		return -1;
874 
875 	idx = 0;
876 	for_each_sym(file->elf, sym) {
877 		if (!is_func_sym(sym))
878 			continue;
879 
880 		if (strncmp(sym->name, "__cfi_", 6))
881 			continue;
882 
883 		if (!elf_init_reloc_text_sym(file->elf, sec,
884 					     idx * sizeof(unsigned int), idx,
885 					     sym->sec, sym->offset))
886 			return -1;
887 
888 		idx++;
889 	}
890 
891 	return 0;
892 }
893 
894 static int create_mcount_loc_sections(struct objtool_file *file)
895 {
896 	size_t addr_size = elf_addr_size(file->elf);
897 	struct instruction *insn;
898 	struct section *sec;
899 	int idx;
900 
901 	sec = find_section_by_name(file->elf, "__mcount_loc");
902 	if (sec) {
903 		/*
904 		 * Livepatch modules have already extracted their __mcount_loc
905 		 * entries to cover the !CONFIG_FTRACE_MCOUNT_USE_OBJTOOL case.
906 		 */
907 		if (!file->klp)
908 			WARN("file already has __mcount_loc section, skipping");
909 
910 		return 0;
911 	}
912 
913 	if (list_empty(&file->mcount_loc_list))
914 		return 0;
915 
916 	idx = 0;
917 	list_for_each_entry(insn, &file->mcount_loc_list, call_node)
918 		idx++;
919 
920 	sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size,
921 				      idx, idx);
922 	if (!sec)
923 		return -1;
924 
925 	sec->sh.sh_addralign = addr_size;
926 
927 	idx = 0;
928 	list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
929 
930 		struct reloc *reloc;
931 
932 		reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx,
933 					       insn->sec, insn->offset);
934 		if (!reloc)
935 			return -1;
936 
937 		set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32);
938 
939 		idx++;
940 	}
941 
942 	return 0;
943 }
944 
945 static int create_direct_call_sections(struct objtool_file *file)
946 {
947 	struct instruction *insn;
948 	struct section *sec;
949 	int idx;
950 
951 	sec = find_section_by_name(file->elf, ".call_sites");
952 	if (sec) {
953 		WARN("file already has .call_sites section, skipping");
954 		return 0;
955 	}
956 
957 	if (list_empty(&file->call_list))
958 		return 0;
959 
960 	idx = 0;
961 	list_for_each_entry(insn, &file->call_list, call_node)
962 		idx++;
963 
964 	sec = elf_create_section_pair(file->elf, ".call_sites",
965 				      sizeof(unsigned int), idx, idx);
966 	if (!sec)
967 		return -1;
968 
969 	idx = 0;
970 	list_for_each_entry(insn, &file->call_list, call_node) {
971 
972 		if (!elf_init_reloc_text_sym(file->elf, sec,
973 					     idx * sizeof(unsigned int), idx,
974 					     insn->sec, insn->offset))
975 			return -1;
976 
977 		idx++;
978 	}
979 
980 	return 0;
981 }
982 
983 #ifdef BUILD_KLP
984 static int create_sym_checksum_section(struct objtool_file *file)
985 {
986 	struct section *sec;
987 	struct symbol *sym;
988 	unsigned int idx = 0;
989 	struct sym_checksum *checksum;
990 	size_t entsize = sizeof(struct sym_checksum);
991 
992 	sec = find_section_by_name(file->elf, ".discard.sym_checksum");
993 	if (sec) {
994 		if (!opts.dryrun)
995 			WARN("file already has .discard.sym_checksum section, skipping");
996 
997 		return 0;
998 	}
999 
1000 	for_each_sym(file->elf, sym)
1001 		if (sym->csum.checksum)
1002 			idx++;
1003 
1004 	if (!idx)
1005 		return 0;
1006 
1007 	sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize,
1008 				      idx, idx);
1009 	if (!sec)
1010 		return -1;
1011 
1012 	idx = 0;
1013 	for_each_sym(file->elf, sym) {
1014 		if (!sym->csum.checksum)
1015 			continue;
1016 
1017 		if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize,
1018 				    sym, 0, R_TEXT64))
1019 			return -1;
1020 
1021 		checksum = (struct sym_checksum *)sec->data->d_buf + idx;
1022 		checksum->addr = 0; /* reloc */
1023 		checksum->checksum = sym->csum.checksum;
1024 
1025 		mark_sec_changed(file->elf, sec, true);
1026 
1027 		idx++;
1028 	}
1029 
1030 	return 0;
1031 }
1032 #else
1033 static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; }
1034 #endif
1035 
1036 /*
1037  * Warnings shouldn't be reported for ignored functions.
1038  */
1039 static int add_ignores(struct objtool_file *file)
1040 {
1041 	struct section *rsec;
1042 	struct symbol *func;
1043 	struct reloc *reloc;
1044 
1045 	rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1046 	if (!rsec)
1047 		return 0;
1048 
1049 	for_each_reloc(rsec, reloc) {
1050 		switch (reloc->sym->type) {
1051 		case STT_FUNC:
1052 			func = reloc->sym;
1053 			break;
1054 
1055 		case STT_SECTION:
1056 			func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc));
1057 			if (!func)
1058 				continue;
1059 			break;
1060 
1061 		default:
1062 			ERROR("unexpected relocation symbol type in %s: %d",
1063 			      rsec->name, reloc->sym->type);
1064 			return -1;
1065 		}
1066 
1067 		func->ignore = true;
1068 		if (func->cfunc)
1069 			func->cfunc->ignore = true;
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 /*
1076  * This is a whitelist of functions that is allowed to be called with AC set.
1077  * The list is meant to be minimal and only contains compiler instrumentation
1078  * ABI and a few functions used to implement *_{to,from}_user() functions.
1079  *
1080  * These functions must not directly change AC, but may PUSHF/POPF.
1081  */
1082 static const char *uaccess_safe_builtin[] = {
1083 	/* KASAN */
1084 	"kasan_report",
1085 	"kasan_check_range",
1086 	/* KASAN out-of-line */
1087 	"__asan_loadN_noabort",
1088 	"__asan_load1_noabort",
1089 	"__asan_load2_noabort",
1090 	"__asan_load4_noabort",
1091 	"__asan_load8_noabort",
1092 	"__asan_load16_noabort",
1093 	"__asan_storeN_noabort",
1094 	"__asan_store1_noabort",
1095 	"__asan_store2_noabort",
1096 	"__asan_store4_noabort",
1097 	"__asan_store8_noabort",
1098 	"__asan_store16_noabort",
1099 	"__kasan_check_read",
1100 	"__kasan_check_write",
1101 	/* KASAN in-line */
1102 	"__asan_report_load_n_noabort",
1103 	"__asan_report_load1_noabort",
1104 	"__asan_report_load2_noabort",
1105 	"__asan_report_load4_noabort",
1106 	"__asan_report_load8_noabort",
1107 	"__asan_report_load16_noabort",
1108 	"__asan_report_store_n_noabort",
1109 	"__asan_report_store1_noabort",
1110 	"__asan_report_store2_noabort",
1111 	"__asan_report_store4_noabort",
1112 	"__asan_report_store8_noabort",
1113 	"__asan_report_store16_noabort",
1114 	/* KCSAN */
1115 	"__kcsan_check_access",
1116 	"__kcsan_mb",
1117 	"__kcsan_wmb",
1118 	"__kcsan_rmb",
1119 	"__kcsan_release",
1120 	"kcsan_found_watchpoint",
1121 	"kcsan_setup_watchpoint",
1122 	"kcsan_check_scoped_accesses",
1123 	"kcsan_disable_current",
1124 	"kcsan_enable_current_nowarn",
1125 	/* KCSAN/TSAN */
1126 	"__tsan_func_entry",
1127 	"__tsan_func_exit",
1128 	"__tsan_read_range",
1129 	"__tsan_write_range",
1130 	"__tsan_read1",
1131 	"__tsan_read2",
1132 	"__tsan_read4",
1133 	"__tsan_read8",
1134 	"__tsan_read16",
1135 	"__tsan_write1",
1136 	"__tsan_write2",
1137 	"__tsan_write4",
1138 	"__tsan_write8",
1139 	"__tsan_write16",
1140 	"__tsan_read_write1",
1141 	"__tsan_read_write2",
1142 	"__tsan_read_write4",
1143 	"__tsan_read_write8",
1144 	"__tsan_read_write16",
1145 	"__tsan_volatile_read1",
1146 	"__tsan_volatile_read2",
1147 	"__tsan_volatile_read4",
1148 	"__tsan_volatile_read8",
1149 	"__tsan_volatile_read16",
1150 	"__tsan_volatile_write1",
1151 	"__tsan_volatile_write2",
1152 	"__tsan_volatile_write4",
1153 	"__tsan_volatile_write8",
1154 	"__tsan_volatile_write16",
1155 	"__tsan_atomic8_load",
1156 	"__tsan_atomic16_load",
1157 	"__tsan_atomic32_load",
1158 	"__tsan_atomic64_load",
1159 	"__tsan_atomic8_store",
1160 	"__tsan_atomic16_store",
1161 	"__tsan_atomic32_store",
1162 	"__tsan_atomic64_store",
1163 	"__tsan_atomic8_exchange",
1164 	"__tsan_atomic16_exchange",
1165 	"__tsan_atomic32_exchange",
1166 	"__tsan_atomic64_exchange",
1167 	"__tsan_atomic8_fetch_add",
1168 	"__tsan_atomic16_fetch_add",
1169 	"__tsan_atomic32_fetch_add",
1170 	"__tsan_atomic64_fetch_add",
1171 	"__tsan_atomic8_fetch_sub",
1172 	"__tsan_atomic16_fetch_sub",
1173 	"__tsan_atomic32_fetch_sub",
1174 	"__tsan_atomic64_fetch_sub",
1175 	"__tsan_atomic8_fetch_and",
1176 	"__tsan_atomic16_fetch_and",
1177 	"__tsan_atomic32_fetch_and",
1178 	"__tsan_atomic64_fetch_and",
1179 	"__tsan_atomic8_fetch_or",
1180 	"__tsan_atomic16_fetch_or",
1181 	"__tsan_atomic32_fetch_or",
1182 	"__tsan_atomic64_fetch_or",
1183 	"__tsan_atomic8_fetch_xor",
1184 	"__tsan_atomic16_fetch_xor",
1185 	"__tsan_atomic32_fetch_xor",
1186 	"__tsan_atomic64_fetch_xor",
1187 	"__tsan_atomic8_fetch_nand",
1188 	"__tsan_atomic16_fetch_nand",
1189 	"__tsan_atomic32_fetch_nand",
1190 	"__tsan_atomic64_fetch_nand",
1191 	"__tsan_atomic8_compare_exchange_strong",
1192 	"__tsan_atomic16_compare_exchange_strong",
1193 	"__tsan_atomic32_compare_exchange_strong",
1194 	"__tsan_atomic64_compare_exchange_strong",
1195 	"__tsan_atomic8_compare_exchange_weak",
1196 	"__tsan_atomic16_compare_exchange_weak",
1197 	"__tsan_atomic32_compare_exchange_weak",
1198 	"__tsan_atomic64_compare_exchange_weak",
1199 	"__tsan_atomic8_compare_exchange_val",
1200 	"__tsan_atomic16_compare_exchange_val",
1201 	"__tsan_atomic32_compare_exchange_val",
1202 	"__tsan_atomic64_compare_exchange_val",
1203 	"__tsan_atomic_thread_fence",
1204 	"__tsan_atomic_signal_fence",
1205 	"__tsan_unaligned_read16",
1206 	"__tsan_unaligned_write16",
1207 	/* KCOV */
1208 	"write_comp_data",
1209 	"check_kcov_mode",
1210 	"__sanitizer_cov_trace_pc",
1211 	"__sanitizer_cov_trace_const_cmp1",
1212 	"__sanitizer_cov_trace_const_cmp2",
1213 	"__sanitizer_cov_trace_const_cmp4",
1214 	"__sanitizer_cov_trace_const_cmp8",
1215 	"__sanitizer_cov_trace_cmp1",
1216 	"__sanitizer_cov_trace_cmp2",
1217 	"__sanitizer_cov_trace_cmp4",
1218 	"__sanitizer_cov_trace_cmp8",
1219 	"__sanitizer_cov_trace_switch",
1220 	/* KMSAN */
1221 	"kmsan_copy_to_user",
1222 	"kmsan_disable_current",
1223 	"kmsan_enable_current",
1224 	"kmsan_report",
1225 	"kmsan_unpoison_entry_regs",
1226 	"kmsan_unpoison_memory",
1227 	"__msan_chain_origin",
1228 	"__msan_get_context_state",
1229 	"__msan_instrument_asm_store",
1230 	"__msan_metadata_ptr_for_load_1",
1231 	"__msan_metadata_ptr_for_load_2",
1232 	"__msan_metadata_ptr_for_load_4",
1233 	"__msan_metadata_ptr_for_load_8",
1234 	"__msan_metadata_ptr_for_load_n",
1235 	"__msan_metadata_ptr_for_store_1",
1236 	"__msan_metadata_ptr_for_store_2",
1237 	"__msan_metadata_ptr_for_store_4",
1238 	"__msan_metadata_ptr_for_store_8",
1239 	"__msan_metadata_ptr_for_store_n",
1240 	"__msan_poison_alloca",
1241 	"__msan_warning",
1242 	/* UBSAN */
1243 	"ubsan_type_mismatch_common",
1244 	"__ubsan_handle_type_mismatch",
1245 	"__ubsan_handle_type_mismatch_v1",
1246 	"__ubsan_handle_shift_out_of_bounds",
1247 	"__ubsan_handle_load_invalid_value",
1248 	/* KSTACK_ERASE */
1249 	"__sanitizer_cov_stack_depth",
1250 	/* TRACE_BRANCH_PROFILING */
1251 	"ftrace_likely_update",
1252 	/* STACKPROTECTOR */
1253 	"__stack_chk_fail",
1254 	/* misc */
1255 	"csum_partial_copy_generic",
1256 	"copy_mc_fragile",
1257 	"copy_mc_fragile_handle_tail",
1258 	"copy_mc_enhanced_fast_string",
1259 	"rep_stos_alternative",
1260 	"rep_movs_alternative",
1261 	"__copy_user_nocache",
1262 	NULL
1263 };
1264 
1265 static void add_uaccess_safe(struct objtool_file *file)
1266 {
1267 	struct symbol *func;
1268 	const char **name;
1269 
1270 	if (!opts.uaccess)
1271 		return;
1272 
1273 	for (name = uaccess_safe_builtin; *name; name++) {
1274 		func = find_symbol_by_name(file->elf, *name);
1275 		if (!func)
1276 			continue;
1277 
1278 		func->uaccess_safe = true;
1279 	}
1280 }
1281 
1282 /*
1283  * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
1284  * will be added to the .retpoline_sites section.
1285  */
1286 __weak bool arch_is_retpoline(struct symbol *sym)
1287 {
1288 	return false;
1289 }
1290 
1291 /*
1292  * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
1293  * will be added to the .return_sites section.
1294  */
1295 __weak bool arch_is_rethunk(struct symbol *sym)
1296 {
1297 	return false;
1298 }
1299 
1300 /*
1301  * Symbols that are embedded inside other instructions, because sometimes crazy
1302  * code exists. These are mostly ignored for validation purposes.
1303  */
1304 __weak bool arch_is_embedded_insn(struct symbol *sym)
1305 {
1306 	return false;
1307 }
1308 
1309 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1310 {
1311 	struct reloc *reloc;
1312 
1313 	if (insn->no_reloc)
1314 		return NULL;
1315 
1316 	if (!file)
1317 		return NULL;
1318 
1319 	reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1320 					 insn->offset, insn->len);
1321 	if (!reloc) {
1322 		insn->no_reloc = 1;
1323 		return NULL;
1324 	}
1325 
1326 	return reloc;
1327 }
1328 
1329 static void remove_insn_ops(struct instruction *insn)
1330 {
1331 	struct stack_op *op, *next;
1332 
1333 	for (op = insn->stack_ops; op; op = next) {
1334 		next = op->next;
1335 		free(op);
1336 	}
1337 	insn->stack_ops = NULL;
1338 }
1339 
1340 static int annotate_call_site(struct objtool_file *file,
1341 			       struct instruction *insn, bool sibling)
1342 {
1343 	struct reloc *reloc = insn_reloc(file, insn);
1344 	struct symbol *sym = insn_call_dest(insn);
1345 
1346 	if (!sym)
1347 		sym = reloc->sym;
1348 
1349 	if (sym->static_call_tramp) {
1350 		list_add_tail(&insn->call_node, &file->static_call_list);
1351 		return 0;
1352 	}
1353 
1354 	if (sym->retpoline_thunk) {
1355 		list_add_tail(&insn->call_node, &file->retpoline_call_list);
1356 		return 0;
1357 	}
1358 
1359 	/*
1360 	 * Many compilers cannot disable KCOV or sanitizer calls with a function
1361 	 * attribute so they need a little help, NOP out any such calls from
1362 	 * noinstr text.
1363 	 */
1364 	if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1365 		if (reloc)
1366 			set_reloc_type(file->elf, reloc, R_NONE);
1367 
1368 		if (elf_write_insn(file->elf, insn->sec,
1369 				   insn->offset, insn->len,
1370 				   sibling ? arch_ret_insn(insn->len)
1371 					   : arch_nop_insn(insn->len))) {
1372 			return -1;
1373 		}
1374 
1375 		insn->type = sibling ? INSN_RETURN : INSN_NOP;
1376 
1377 		if (sibling) {
1378 			/*
1379 			 * We've replaced the tail-call JMP insn by two new
1380 			 * insn: RET; INT3, except we only have a single struct
1381 			 * insn here. Mark it retpoline_safe to avoid the SLS
1382 			 * warning, instead of adding another insn.
1383 			 */
1384 			insn->retpoline_safe = true;
1385 		}
1386 
1387 		return 0;
1388 	}
1389 
1390 	if (opts.mcount && sym->fentry) {
1391 		if (sibling)
1392 			WARN_INSN(insn, "tail call to __fentry__ !?!?");
1393 		if (opts.mnop) {
1394 			if (reloc)
1395 				set_reloc_type(file->elf, reloc, R_NONE);
1396 
1397 			if (elf_write_insn(file->elf, insn->sec,
1398 					   insn->offset, insn->len,
1399 					   arch_nop_insn(insn->len))) {
1400 				return -1;
1401 			}
1402 
1403 			insn->type = INSN_NOP;
1404 		}
1405 
1406 		list_add_tail(&insn->call_node, &file->mcount_loc_list);
1407 		return 0;
1408 	}
1409 
1410 	if (insn->type == INSN_CALL && !insn->sec->init &&
1411 	    !insn->_call_dest->embedded_insn)
1412 		list_add_tail(&insn->call_node, &file->call_list);
1413 
1414 	if (!sibling && dead_end_function(file, sym))
1415 		insn->dead_end = true;
1416 
1417 	return 0;
1418 }
1419 
1420 static int add_call_dest(struct objtool_file *file, struct instruction *insn,
1421 			  struct symbol *dest, bool sibling)
1422 {
1423 	insn->_call_dest = dest;
1424 	if (!dest)
1425 		return 0;
1426 
1427 	/*
1428 	 * Whatever stack impact regular CALLs have, should be undone
1429 	 * by the RETURN of the called function.
1430 	 *
1431 	 * Annotated intra-function calls retain the stack_ops but
1432 	 * are converted to JUMP, see read_intra_function_calls().
1433 	 */
1434 	remove_insn_ops(insn);
1435 
1436 	return annotate_call_site(file, insn, sibling);
1437 }
1438 
1439 static int add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1440 {
1441 	/*
1442 	 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1443 	 * so convert them accordingly.
1444 	 */
1445 	switch (insn->type) {
1446 	case INSN_CALL:
1447 		insn->type = INSN_CALL_DYNAMIC;
1448 		break;
1449 	case INSN_JUMP_UNCONDITIONAL:
1450 		insn->type = INSN_JUMP_DYNAMIC;
1451 		break;
1452 	case INSN_JUMP_CONDITIONAL:
1453 		insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1454 		break;
1455 	default:
1456 		return 0;
1457 	}
1458 
1459 	insn->retpoline_safe = true;
1460 
1461 	/*
1462 	 * Whatever stack impact regular CALLs have, should be undone
1463 	 * by the RETURN of the called function.
1464 	 *
1465 	 * Annotated intra-function calls retain the stack_ops but
1466 	 * are converted to JUMP, see read_intra_function_calls().
1467 	 */
1468 	remove_insn_ops(insn);
1469 
1470 	return annotate_call_site(file, insn, false);
1471 }
1472 
1473 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1474 {
1475 	/*
1476 	 * Return thunk tail calls are really just returns in disguise,
1477 	 * so convert them accordingly.
1478 	 */
1479 	insn->type = INSN_RETURN;
1480 	insn->retpoline_safe = true;
1481 
1482 	if (add)
1483 		list_add_tail(&insn->call_node, &file->return_thunk_list);
1484 }
1485 
1486 static bool is_first_func_insn(struct objtool_file *file,
1487 			       struct instruction *insn)
1488 {
1489 	struct symbol *func = insn_func(insn);
1490 
1491 	if (!func)
1492 		return false;
1493 
1494 	if (insn->offset == func->offset)
1495 		return true;
1496 
1497 	/* Allow direct CALL/JMP past ENDBR */
1498 	if (opts.ibt) {
1499 		struct instruction *prev = prev_insn_same_sym(file, insn);
1500 
1501 		if (prev && prev->type == INSN_ENDBR &&
1502 		    insn->offset == func->offset + prev->len)
1503 			return true;
1504 	}
1505 
1506 	return false;
1507 }
1508 
1509 /*
1510  * Find the destination instructions for all jumps.
1511  */
1512 static int add_jump_destinations(struct objtool_file *file)
1513 {
1514 	struct instruction *insn;
1515 	struct reloc *reloc;
1516 
1517 	for_each_insn(file, insn) {
1518 		struct symbol *func = insn_func(insn);
1519 		struct instruction *dest_insn;
1520 		struct section *dest_sec;
1521 		struct symbol *dest_sym;
1522 		unsigned long dest_off;
1523 
1524 		if (!is_static_jump(insn))
1525 			continue;
1526 
1527 		if (insn->jump_dest) {
1528 			/*
1529 			 * handle_group_alt() may have previously set
1530 			 * 'jump_dest' for some alternatives.
1531 			 */
1532 			continue;
1533 		}
1534 
1535 		reloc = insn_reloc(file, insn);
1536 		if (!reloc) {
1537 			dest_sec = insn->sec;
1538 			dest_off = arch_jump_destination(insn);
1539 			dest_sym = dest_sec->sym;
1540 		} else {
1541 			dest_sym = reloc->sym;
1542 			if (is_undef_sym(dest_sym)) {
1543 				if (dest_sym->retpoline_thunk) {
1544 					if (add_retpoline_call(file, insn))
1545 						return -1;
1546 					continue;
1547 				}
1548 
1549 				if (dest_sym->return_thunk) {
1550 					add_return_call(file, insn, true);
1551 					continue;
1552 				}
1553 
1554 				/* External symbol */
1555 				if (func) {
1556 					/* External sibling call */
1557 					if (add_call_dest(file, insn, dest_sym, true))
1558 						return -1;
1559 					continue;
1560 				}
1561 
1562 				/* Non-func asm code jumping to external symbol */
1563 				continue;
1564 			}
1565 
1566 			dest_sec = dest_sym->sec;
1567 			dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc);
1568 		}
1569 
1570 		dest_insn = find_insn(file, dest_sec, dest_off);
1571 		if (!dest_insn) {
1572 			struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1573 
1574 			/*
1575 			 * retbleed_untrain_ret() jumps to
1576 			 * __x86_return_thunk(), but objtool can't find
1577 			 * the thunk's starting RET instruction,
1578 			 * because the RET is also in the middle of
1579 			 * another instruction.  Objtool only knows
1580 			 * about the outer instruction.
1581 			 */
1582 			if (sym && sym->embedded_insn) {
1583 				add_return_call(file, insn, false);
1584 				continue;
1585 			}
1586 
1587 			/*
1588 			 * GCOV/KCOV dead code can jump to the end of
1589 			 * the function/section.
1590 			 */
1591 			if (file->ignore_unreachables && func &&
1592 			    dest_sec == insn->sec &&
1593 			    dest_off == func->offset + func->len)
1594 				continue;
1595 
1596 			ERROR_INSN(insn, "can't find jump dest instruction at %s",
1597 				   offstr(dest_sec, dest_off));
1598 			return -1;
1599 		}
1600 
1601 		if (!dest_sym || is_sec_sym(dest_sym)) {
1602 			dest_sym = dest_insn->sym;
1603 			if (!dest_sym)
1604 				goto set_jump_dest;
1605 		}
1606 
1607 		if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) {
1608 			if (add_retpoline_call(file, insn))
1609 				return -1;
1610 			continue;
1611 		}
1612 
1613 		if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) {
1614 			add_return_call(file, insn, true);
1615 			continue;
1616 		}
1617 
1618 		if (!insn->sym || insn->sym->pfunc == dest_sym->pfunc)
1619 			goto set_jump_dest;
1620 
1621 		/*
1622 		 * Internal cross-function jump.
1623 		 */
1624 
1625 		if (is_first_func_insn(file, dest_insn)) {
1626 			/* Internal sibling call */
1627 			if (add_call_dest(file, insn, dest_sym, true))
1628 				return -1;
1629 			continue;
1630 		}
1631 
1632 set_jump_dest:
1633 		insn->jump_dest = dest_insn;
1634 	}
1635 
1636 	return 0;
1637 }
1638 
1639 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1640 {
1641 	struct symbol *call_dest;
1642 
1643 	call_dest = find_func_by_offset(sec, offset);
1644 	if (!call_dest)
1645 		call_dest = find_symbol_by_offset(sec, offset);
1646 
1647 	return call_dest;
1648 }
1649 
1650 /*
1651  * Find the destination instructions for all calls.
1652  */
1653 static int add_call_destinations(struct objtool_file *file)
1654 {
1655 	struct instruction *insn;
1656 	unsigned long dest_off;
1657 	struct symbol *dest;
1658 	struct reloc *reloc;
1659 
1660 	for_each_insn(file, insn) {
1661 		struct symbol *func = insn_func(insn);
1662 		if (insn->type != INSN_CALL)
1663 			continue;
1664 
1665 		reloc = insn_reloc(file, insn);
1666 		if (!reloc) {
1667 			dest_off = arch_jump_destination(insn);
1668 			dest = find_call_destination(insn->sec, dest_off);
1669 
1670 			if (add_call_dest(file, insn, dest, false))
1671 				return -1;
1672 
1673 			if (func && func->ignore)
1674 				continue;
1675 
1676 			if (!insn_call_dest(insn)) {
1677 				ERROR_INSN(insn, "unannotated intra-function call");
1678 				return -1;
1679 			}
1680 
1681 			if (func && !is_func_sym(insn_call_dest(insn))) {
1682 				ERROR_INSN(insn, "unsupported call to non-function");
1683 				return -1;
1684 			}
1685 
1686 		} else if (is_sec_sym(reloc->sym)) {
1687 			dest_off = arch_insn_adjusted_addend(insn, reloc);
1688 			dest = find_call_destination(reloc->sym->sec, dest_off);
1689 			if (!dest) {
1690 				ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1691 					   reloc->sym->sec->name, dest_off);
1692 				return -1;
1693 			}
1694 
1695 			if (add_call_dest(file, insn, dest, false))
1696 				return -1;
1697 
1698 		} else if (reloc->sym->retpoline_thunk) {
1699 			if (add_retpoline_call(file, insn))
1700 				return -1;
1701 
1702 		} else {
1703 			if (add_call_dest(file, insn, reloc->sym, false))
1704 				return -1;
1705 		}
1706 	}
1707 
1708 	return 0;
1709 }
1710 
1711 /*
1712  * The .alternatives section requires some extra special care over and above
1713  * other special sections because alternatives are patched in place.
1714  */
1715 static int handle_group_alt(struct objtool_file *file,
1716 			    struct special_alt *special_alt,
1717 			    struct instruction *orig_insn,
1718 			    struct instruction **new_insn)
1719 {
1720 	struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1721 	struct alt_group *orig_alt_group, *new_alt_group;
1722 	unsigned long dest_off;
1723 
1724 	orig_alt_group = orig_insn->alt_group;
1725 	if (!orig_alt_group) {
1726 		struct instruction *last_orig_insn = NULL;
1727 
1728 		orig_alt_group = calloc(1, sizeof(*orig_alt_group));
1729 		if (!orig_alt_group) {
1730 			ERROR_GLIBC("calloc");
1731 			return -1;
1732 		}
1733 		orig_alt_group->cfi = calloc(special_alt->orig_len,
1734 					     sizeof(struct cfi_state *));
1735 		if (!orig_alt_group->cfi) {
1736 			ERROR_GLIBC("calloc");
1737 			return -1;
1738 		}
1739 
1740 		insn = orig_insn;
1741 		sec_for_each_insn_from(file, insn) {
1742 			if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1743 				break;
1744 
1745 			insn->alt_group = orig_alt_group;
1746 			last_orig_insn = insn;
1747 		}
1748 		orig_alt_group->orig_group = NULL;
1749 		orig_alt_group->first_insn = orig_insn;
1750 		orig_alt_group->last_insn = last_orig_insn;
1751 		orig_alt_group->nop = NULL;
1752 		orig_alt_group->ignore = orig_insn->ignore_alts;
1753 	} else {
1754 		if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1755 		    orig_alt_group->first_insn->offset != special_alt->orig_len) {
1756 			ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1757 				   orig_alt_group->last_insn->offset +
1758 				   orig_alt_group->last_insn->len -
1759 				   orig_alt_group->first_insn->offset,
1760 				   special_alt->orig_len);
1761 			return -1;
1762 		}
1763 	}
1764 
1765 	new_alt_group = calloc(1, sizeof(*new_alt_group));
1766 	if (!new_alt_group) {
1767 		ERROR_GLIBC("calloc");
1768 		return -1;
1769 	}
1770 
1771 	if (special_alt->new_len < special_alt->orig_len) {
1772 		/*
1773 		 * Insert a fake nop at the end to make the replacement
1774 		 * alt_group the same size as the original.  This is needed to
1775 		 * allow propagate_alt_cfi() to do its magic.  When the last
1776 		 * instruction affects the stack, the instruction after it (the
1777 		 * nop) will propagate the new state to the shared CFI array.
1778 		 */
1779 		nop = calloc(1, sizeof(*nop));
1780 		if (!nop) {
1781 			ERROR_GLIBC("calloc");
1782 			return -1;
1783 		}
1784 		memset(nop, 0, sizeof(*nop));
1785 
1786 		nop->sec = special_alt->new_sec;
1787 		nop->offset = special_alt->new_off + special_alt->new_len;
1788 		nop->len = special_alt->orig_len - special_alt->new_len;
1789 		nop->type = INSN_NOP;
1790 		nop->sym = orig_insn->sym;
1791 		nop->alt_group = new_alt_group;
1792 		nop->fake = 1;
1793 	}
1794 
1795 	if (!special_alt->new_len) {
1796 		*new_insn = nop;
1797 		goto end;
1798 	}
1799 
1800 	insn = *new_insn;
1801 	sec_for_each_insn_from(file, insn) {
1802 		struct reloc *alt_reloc;
1803 
1804 		if (insn->offset >= special_alt->new_off + special_alt->new_len)
1805 			break;
1806 
1807 		last_new_insn = insn;
1808 
1809 		insn->sym = orig_insn->sym;
1810 		insn->alt_group = new_alt_group;
1811 
1812 		/*
1813 		 * Since alternative replacement code is copy/pasted by the
1814 		 * kernel after applying relocations, generally such code can't
1815 		 * have relative-address relocation references to outside the
1816 		 * .altinstr_replacement section, unless the arch's
1817 		 * alternatives code can adjust the relative offsets
1818 		 * accordingly.
1819 		 */
1820 		alt_reloc = insn_reloc(file, insn);
1821 		if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1822 		    !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1823 
1824 			ERROR_INSN(insn, "unsupported relocation in alternatives section");
1825 			return -1;
1826 		}
1827 
1828 		if (!is_static_jump(insn))
1829 			continue;
1830 
1831 		if (!insn->immediate)
1832 			continue;
1833 
1834 		dest_off = arch_jump_destination(insn);
1835 		if (dest_off == special_alt->new_off + special_alt->new_len) {
1836 			insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1837 			if (!insn->jump_dest) {
1838 				ERROR_INSN(insn, "can't find alternative jump destination");
1839 				return -1;
1840 			}
1841 		}
1842 	}
1843 
1844 	if (!last_new_insn) {
1845 		ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1846 			   "can't find last new alternative instruction");
1847 		return -1;
1848 	}
1849 
1850 end:
1851 	new_alt_group->orig_group = orig_alt_group;
1852 	new_alt_group->first_insn = *new_insn;
1853 	new_alt_group->last_insn = last_new_insn;
1854 	new_alt_group->nop = nop;
1855 	new_alt_group->ignore = (*new_insn)->ignore_alts;
1856 	new_alt_group->cfi = orig_alt_group->cfi;
1857 	return 0;
1858 }
1859 
1860 /*
1861  * A jump table entry can either convert a nop to a jump or a jump to a nop.
1862  * If the original instruction is a jump, make the alt entry an effective nop
1863  * by just skipping the original instruction.
1864  */
1865 static int handle_jump_alt(struct objtool_file *file,
1866 			   struct special_alt *special_alt,
1867 			   struct instruction *orig_insn,
1868 			   struct instruction **new_insn)
1869 {
1870 	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1871 	    orig_insn->type != INSN_NOP) {
1872 
1873 		ERROR_INSN(orig_insn, "unsupported instruction at jump label");
1874 		return -1;
1875 	}
1876 
1877 	if (opts.hack_jump_label && special_alt->key_addend & 2) {
1878 		struct reloc *reloc = insn_reloc(file, orig_insn);
1879 
1880 		if (reloc)
1881 			set_reloc_type(file->elf, reloc, R_NONE);
1882 
1883 		if (elf_write_insn(file->elf, orig_insn->sec,
1884 				   orig_insn->offset, orig_insn->len,
1885 				   arch_nop_insn(orig_insn->len))) {
1886 			return -1;
1887 		}
1888 
1889 		orig_insn->type = INSN_NOP;
1890 	}
1891 
1892 	if (orig_insn->type == INSN_NOP) {
1893 		if (orig_insn->len == 2)
1894 			file->jl_nop_short++;
1895 		else
1896 			file->jl_nop_long++;
1897 
1898 		return 0;
1899 	}
1900 
1901 	if (orig_insn->len == 2)
1902 		file->jl_short++;
1903 	else
1904 		file->jl_long++;
1905 
1906 	*new_insn = next_insn_same_sec(file, orig_insn);
1907 	return 0;
1908 }
1909 
1910 /*
1911  * Read all the special sections which have alternate instructions which can be
1912  * patched in or redirected to at runtime.  Each instruction having alternate
1913  * instruction(s) has them added to its insn->alts list, which will be
1914  * traversed in validate_branch().
1915  */
1916 static int add_special_section_alts(struct objtool_file *file)
1917 {
1918 	struct list_head special_alts;
1919 	struct instruction *orig_insn, *new_insn;
1920 	struct special_alt *special_alt, *tmp;
1921 	struct alternative *alt;
1922 
1923 	if (special_get_alts(file->elf, &special_alts))
1924 		return -1;
1925 
1926 	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1927 
1928 		orig_insn = find_insn(file, special_alt->orig_sec,
1929 				      special_alt->orig_off);
1930 		if (!orig_insn) {
1931 			ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off,
1932 				   "special: can't find orig instruction");
1933 			return -1;
1934 		}
1935 
1936 		new_insn = NULL;
1937 		if (!special_alt->group || special_alt->new_len) {
1938 			new_insn = find_insn(file, special_alt->new_sec,
1939 					     special_alt->new_off);
1940 			if (!new_insn) {
1941 				ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1942 					   "special: can't find new instruction");
1943 				return -1;
1944 			}
1945 		}
1946 
1947 		if (special_alt->group) {
1948 			if (!special_alt->orig_len) {
1949 				ERROR_INSN(orig_insn, "empty alternative entry");
1950 				continue;
1951 			}
1952 
1953 			if (handle_group_alt(file, special_alt, orig_insn, &new_insn))
1954 				return -1;
1955 
1956 		} else if (special_alt->jump_or_nop) {
1957 			if (handle_jump_alt(file, special_alt, orig_insn, &new_insn))
1958 				return -1;
1959 		}
1960 
1961 		alt = calloc(1, sizeof(*alt));
1962 		if (!alt) {
1963 			ERROR_GLIBC("calloc");
1964 			return -1;
1965 		}
1966 
1967 		alt->insn = new_insn;
1968 		alt->next = orig_insn->alts;
1969 		orig_insn->alts = alt;
1970 
1971 		list_del(&special_alt->list);
1972 		free(special_alt);
1973 	}
1974 
1975 	if (opts.stats) {
1976 		printf("jl\\\tNOP\tJMP\n");
1977 		printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1978 		printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1979 	}
1980 
1981 	return 0;
1982 }
1983 
1984 __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
1985 {
1986 	return reloc->sym->offset + reloc_addend(reloc);
1987 }
1988 
1989 static int add_jump_table(struct objtool_file *file, struct instruction *insn)
1990 {
1991 	unsigned long table_size = insn_jump_table_size(insn);
1992 	struct symbol *pfunc = insn_func(insn)->pfunc;
1993 	struct reloc *table = insn_jump_table(insn);
1994 	struct instruction *dest_insn;
1995 	unsigned int prev_offset = 0;
1996 	struct reloc *reloc = table;
1997 	struct alternative *alt;
1998 	unsigned long sym_offset;
1999 
2000 	/*
2001 	 * Each @reloc is a switch table relocation which points to the target
2002 	 * instruction.
2003 	 */
2004 	for_each_reloc_from(table->sec, reloc) {
2005 
2006 		/* Check for the end of the table: */
2007 		if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
2008 			break;
2009 		if (reloc != table && is_jump_table(reloc))
2010 			break;
2011 
2012 		/* Make sure the table entries are consecutive: */
2013 		if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc))
2014 			break;
2015 
2016 		sym_offset = arch_jump_table_sym_offset(reloc, table);
2017 
2018 		/* Detect function pointers from contiguous objects: */
2019 		if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset)
2020 			break;
2021 
2022 		/*
2023 		 * Clang sometimes leaves dangling unused jump table entries
2024 		 * which point to the end of the function.  Ignore them.
2025 		 */
2026 		if (reloc->sym->sec == pfunc->sec &&
2027 		    sym_offset == pfunc->offset + pfunc->len)
2028 			goto next;
2029 
2030 		dest_insn = find_insn(file, reloc->sym->sec, sym_offset);
2031 		if (!dest_insn)
2032 			break;
2033 
2034 		/* Make sure the destination is in the same function: */
2035 		if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2036 			break;
2037 
2038 		alt = calloc(1, sizeof(*alt));
2039 		if (!alt) {
2040 			ERROR_GLIBC("calloc");
2041 			return -1;
2042 		}
2043 
2044 		alt->insn = dest_insn;
2045 		alt->next = insn->alts;
2046 		insn->alts = alt;
2047 next:
2048 		prev_offset = reloc_offset(reloc);
2049 	}
2050 
2051 	if (!prev_offset) {
2052 		ERROR_INSN(insn, "can't find switch jump table");
2053 		return -1;
2054 	}
2055 
2056 	return 0;
2057 }
2058 
2059 /*
2060  * find_jump_table() - Given a dynamic jump, find the switch jump table
2061  * associated with it.
2062  */
2063 static void find_jump_table(struct objtool_file *file, struct symbol *func,
2064 			    struct instruction *insn)
2065 {
2066 	struct reloc *table_reloc;
2067 	struct instruction *dest_insn, *orig_insn = insn;
2068 	unsigned long table_size;
2069 	unsigned long sym_offset;
2070 
2071 	/*
2072 	 * Backward search using the @first_jump_src links, these help avoid
2073 	 * much of the 'in between' code. Which avoids us getting confused by
2074 	 * it.
2075 	 */
2076 	for (;
2077 	     insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2078 	     insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2079 
2080 		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2081 			break;
2082 
2083 		/* allow small jumps within the range */
2084 		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2085 		    insn->jump_dest &&
2086 		    (insn->jump_dest->offset <= insn->offset ||
2087 		     insn->jump_dest->offset > orig_insn->offset))
2088 			break;
2089 
2090 		table_reloc = arch_find_switch_table(file, insn, &table_size);
2091 		if (!table_reloc)
2092 			continue;
2093 
2094 		sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc);
2095 
2096 		dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset);
2097 		if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2098 			continue;
2099 
2100 		set_jump_table(table_reloc);
2101 		orig_insn->_jump_table = table_reloc;
2102 		orig_insn->_jump_table_size = table_size;
2103 
2104 		break;
2105 	}
2106 }
2107 
2108 /*
2109  * First pass: Mark the head of each jump table so that in the next pass,
2110  * we know when a given jump table ends and the next one starts.
2111  */
2112 static void mark_func_jump_tables(struct objtool_file *file,
2113 				    struct symbol *func)
2114 {
2115 	struct instruction *insn, *last = NULL;
2116 
2117 	func_for_each_insn(file, func, insn) {
2118 		if (!last)
2119 			last = insn;
2120 
2121 		/*
2122 		 * Store back-pointers for unconditional forward jumps such
2123 		 * that find_jump_table() can back-track using those and
2124 		 * avoid some potentially confusing code.
2125 		 */
2126 		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2127 		    insn->offset > last->offset &&
2128 		    insn->jump_dest->offset > insn->offset &&
2129 		    !insn->jump_dest->first_jump_src) {
2130 
2131 			insn->jump_dest->first_jump_src = insn;
2132 			last = insn->jump_dest;
2133 		}
2134 
2135 		if (insn->type != INSN_JUMP_DYNAMIC)
2136 			continue;
2137 
2138 		find_jump_table(file, func, insn);
2139 	}
2140 }
2141 
2142 static int add_func_jump_tables(struct objtool_file *file,
2143 				  struct symbol *func)
2144 {
2145 	struct instruction *insn;
2146 
2147 	func_for_each_insn(file, func, insn) {
2148 		if (!insn_jump_table(insn))
2149 			continue;
2150 
2151 		if (add_jump_table(file, insn))
2152 			return -1;
2153 	}
2154 
2155 	return 0;
2156 }
2157 
2158 /*
2159  * For some switch statements, gcc generates a jump table in the .rodata
2160  * section which contains a list of addresses within the function to jump to.
2161  * This finds these jump tables and adds them to the insn->alts lists.
2162  */
2163 static int add_jump_table_alts(struct objtool_file *file)
2164 {
2165 	struct symbol *func;
2166 
2167 	if (!file->rodata)
2168 		return 0;
2169 
2170 	for_each_sym(file->elf, func) {
2171 		if (!is_func_sym(func) || func->alias != func)
2172 			continue;
2173 
2174 		mark_func_jump_tables(file, func);
2175 		if (add_func_jump_tables(file, func))
2176 			return -1;
2177 	}
2178 
2179 	return 0;
2180 }
2181 
2182 static void set_func_state(struct cfi_state *state)
2183 {
2184 	state->cfa = initial_func_cfi.cfa;
2185 	memcpy(&state->regs, &initial_func_cfi.regs,
2186 	       CFI_NUM_REGS * sizeof(struct cfi_reg));
2187 	state->stack_size = initial_func_cfi.cfa.offset;
2188 	state->type = UNWIND_HINT_TYPE_CALL;
2189 }
2190 
2191 static int read_unwind_hints(struct objtool_file *file)
2192 {
2193 	struct cfi_state cfi = init_cfi;
2194 	struct section *sec;
2195 	struct unwind_hint *hint;
2196 	struct instruction *insn;
2197 	struct reloc *reloc;
2198 	unsigned long offset;
2199 	int i;
2200 
2201 	sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2202 	if (!sec)
2203 		return 0;
2204 
2205 	if (!sec->rsec) {
2206 		ERROR("missing .rela.discard.unwind_hints section");
2207 		return -1;
2208 	}
2209 
2210 	if (sec_size(sec) % sizeof(struct unwind_hint)) {
2211 		ERROR("struct unwind_hint size mismatch");
2212 		return -1;
2213 	}
2214 
2215 	file->hints = true;
2216 
2217 	for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) {
2218 		hint = (struct unwind_hint *)sec->data->d_buf + i;
2219 
2220 		reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2221 		if (!reloc) {
2222 			ERROR("can't find reloc for unwind_hints[%d]", i);
2223 			return -1;
2224 		}
2225 
2226 		offset = reloc->sym->offset + reloc_addend(reloc);
2227 
2228 		insn = find_insn(file, reloc->sym->sec, offset);
2229 		if (!insn) {
2230 			ERROR("can't find insn for unwind_hints[%d]", i);
2231 			return -1;
2232 		}
2233 
2234 		insn->hint = true;
2235 
2236 		if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) {
2237 			insn->cfi = &force_undefined_cfi;
2238 			continue;
2239 		}
2240 
2241 		if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2242 			insn->hint = false;
2243 			insn->save = true;
2244 			continue;
2245 		}
2246 
2247 		if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2248 			insn->restore = true;
2249 			continue;
2250 		}
2251 
2252 		if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2253 			struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2254 
2255 			if (sym && is_global_sym(sym)) {
2256 				if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2257 					ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2258 					return -1;
2259 				}
2260 			}
2261 		}
2262 
2263 		if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2264 			insn->cfi = &func_cfi;
2265 			continue;
2266 		}
2267 
2268 		if (insn->cfi)
2269 			cfi = *(insn->cfi);
2270 
2271 		if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2272 			ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2273 			return -1;
2274 		}
2275 
2276 		cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2277 		cfi.type = hint->type;
2278 		cfi.signal = hint->signal;
2279 
2280 		insn->cfi = cfi_hash_find_or_add(&cfi);
2281 	}
2282 
2283 	return 0;
2284 }
2285 
2286 static int read_annotate(struct objtool_file *file,
2287 			 int (*func)(struct objtool_file *file, int type, struct instruction *insn))
2288 {
2289 	struct section *sec;
2290 	struct instruction *insn;
2291 	struct reloc *reloc;
2292 	uint64_t offset;
2293 	int type;
2294 
2295 	sec = find_section_by_name(file->elf, ".discard.annotate_insn");
2296 	if (!sec)
2297 		return 0;
2298 
2299 	if (!sec->rsec)
2300 		return 0;
2301 
2302 	if (sec->sh.sh_entsize != 8) {
2303 		static bool warned = false;
2304 		if (!warned && opts.verbose) {
2305 			WARN("%s: dodgy linker, sh_entsize != 8", sec->name);
2306 			warned = true;
2307 		}
2308 		sec->sh.sh_entsize = 8;
2309 	}
2310 
2311 	if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) {
2312 		ERROR("bad .discard.annotate_insn section: missing relocs");
2313 		return -1;
2314 	}
2315 
2316 	for_each_reloc(sec->rsec, reloc) {
2317 		type = annotype(file->elf, sec, reloc);
2318 		offset = reloc->sym->offset + reloc_addend(reloc);
2319 		insn = find_insn(file, reloc->sym->sec, offset);
2320 
2321 		if (!insn) {
2322 			ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
2323 			return -1;
2324 		}
2325 
2326 		if (func(file, type, insn))
2327 			return -1;
2328 	}
2329 
2330 	return 0;
2331 }
2332 
2333 static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn)
2334 {
2335 	switch (type) {
2336 
2337 	/* Must be before add_special_section_alts() */
2338 	case ANNOTYPE_IGNORE_ALTS:
2339 		insn->ignore_alts = true;
2340 		break;
2341 
2342 	/*
2343 	 * Must be before read_unwind_hints() since that needs insn->noendbr.
2344 	 */
2345 	case ANNOTYPE_NOENDBR:
2346 		insn->noendbr = 1;
2347 		break;
2348 
2349 	default:
2350 		break;
2351 	}
2352 
2353 	return 0;
2354 }
2355 
2356 static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn)
2357 {
2358 	unsigned long dest_off;
2359 
2360 	if (type != ANNOTYPE_INTRA_FUNCTION_CALL)
2361 		return 0;
2362 
2363 	if (insn->type != INSN_CALL) {
2364 		ERROR_INSN(insn, "intra_function_call not a direct call");
2365 		return -1;
2366 	}
2367 
2368 	/*
2369 	 * Treat intra-function CALLs as JMPs, but with a stack_op.
2370 	 * See add_call_destinations(), which strips stack_ops from
2371 	 * normal CALLs.
2372 	 */
2373 	insn->type = INSN_JUMP_UNCONDITIONAL;
2374 
2375 	dest_off = arch_jump_destination(insn);
2376 	insn->jump_dest = find_insn(file, insn->sec, dest_off);
2377 	if (!insn->jump_dest) {
2378 		ERROR_INSN(insn, "can't find call dest at %s+0x%lx",
2379 			   insn->sec->name, dest_off);
2380 		return -1;
2381 	}
2382 
2383 	return 0;
2384 }
2385 
2386 static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn)
2387 {
2388 	struct symbol *sym;
2389 
2390 	switch (type) {
2391 	case ANNOTYPE_NOENDBR:
2392 		/* early */
2393 		break;
2394 
2395 	case ANNOTYPE_RETPOLINE_SAFE:
2396 		if (insn->type != INSN_JUMP_DYNAMIC &&
2397 		    insn->type != INSN_CALL_DYNAMIC &&
2398 		    insn->type != INSN_RETURN &&
2399 		    insn->type != INSN_NOP) {
2400 			ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2401 			return -1;
2402 		}
2403 
2404 		insn->retpoline_safe = true;
2405 		break;
2406 
2407 	case ANNOTYPE_INSTR_BEGIN:
2408 		insn->instr++;
2409 		break;
2410 
2411 	case ANNOTYPE_INSTR_END:
2412 		insn->instr--;
2413 		break;
2414 
2415 	case ANNOTYPE_UNRET_BEGIN:
2416 		insn->unret = 1;
2417 		break;
2418 
2419 	case ANNOTYPE_IGNORE_ALTS:
2420 		/* early */
2421 		break;
2422 
2423 	case ANNOTYPE_INTRA_FUNCTION_CALL:
2424 		/* ifc */
2425 		break;
2426 
2427 	case ANNOTYPE_REACHABLE:
2428 		insn->dead_end = false;
2429 		break;
2430 
2431 	case ANNOTYPE_NOCFI:
2432 		sym = insn->sym;
2433 		if (!sym) {
2434 			ERROR_INSN(insn, "dodgy NOCFI annotation");
2435 			return -1;
2436 		}
2437 		insn->sym->nocfi = 1;
2438 		break;
2439 
2440 	default:
2441 		ERROR_INSN(insn, "Unknown annotation type: %d", type);
2442 		return -1;
2443 	}
2444 
2445 	return 0;
2446 }
2447 
2448 /*
2449  * Return true if name matches an instrumentation function, where calls to that
2450  * function from noinstr code can safely be removed, but compilers won't do so.
2451  */
2452 static bool is_profiling_func(const char *name)
2453 {
2454 	/*
2455 	 * Many compilers cannot disable KCOV with a function attribute.
2456 	 */
2457 	if (!strncmp(name, "__sanitizer_cov_", 16))
2458 		return true;
2459 
2460 	return false;
2461 }
2462 
2463 static int classify_symbols(struct objtool_file *file)
2464 {
2465 	struct symbol *func;
2466 
2467 	for_each_sym(file->elf, func) {
2468 		if (is_notype_sym(func) && strstarts(func->name, ".L"))
2469 			func->local_label = true;
2470 
2471 		if (!is_global_sym(func))
2472 			continue;
2473 
2474 		if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2475 			     strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2476 			func->static_call_tramp = true;
2477 
2478 		if (arch_is_retpoline(func))
2479 			func->retpoline_thunk = true;
2480 
2481 		if (arch_is_rethunk(func))
2482 			func->return_thunk = true;
2483 
2484 		if (arch_is_embedded_insn(func))
2485 			func->embedded_insn = true;
2486 
2487 		if (arch_ftrace_match(func->name))
2488 			func->fentry = true;
2489 
2490 		if (is_profiling_func(func->name))
2491 			func->profiling_func = true;
2492 	}
2493 
2494 	return 0;
2495 }
2496 
2497 static void mark_rodata(struct objtool_file *file)
2498 {
2499 	struct section *sec;
2500 	bool found = false;
2501 
2502 	/*
2503 	 * Search for the following rodata sections, each of which can
2504 	 * potentially contain jump tables:
2505 	 *
2506 	 * - .rodata: can contain GCC switch tables
2507 	 * - .rodata.<func>: same, if -fdata-sections is being used
2508 	 * - .data.rel.ro.c_jump_table: contains C annotated jump tables
2509 	 *
2510 	 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2511 	 */
2512 	for_each_sec(file->elf, sec) {
2513 		if ((!strncmp(sec->name, ".rodata", 7) &&
2514 		     !strstr(sec->name, ".str1.")) ||
2515 		    !strncmp(sec->name, ".data.rel.ro", 12)) {
2516 			sec->rodata = true;
2517 			found = true;
2518 		}
2519 	}
2520 
2521 	file->rodata = found;
2522 }
2523 
2524 static void mark_holes(struct objtool_file *file)
2525 {
2526 	struct instruction *insn;
2527 	bool in_hole = false;
2528 
2529 	if (!opts.link)
2530 		return;
2531 
2532 	/*
2533 	 * Whole archive runs might encounter dead code from weak symbols.
2534 	 * This is where the linker will have dropped the weak symbol in
2535 	 * favour of a regular symbol, but leaves the code in place.
2536 	 */
2537 	for_each_insn(file, insn) {
2538 		if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) {
2539 			in_hole = false;
2540 			continue;
2541 		}
2542 
2543 		/* Skip function padding and pfx code */
2544 		if (!in_hole && insn->type == INSN_NOP)
2545 			continue;
2546 
2547 		in_hole = true;
2548 		insn->hole = 1;
2549 
2550 		/*
2551 		 * If this hole jumps to a .cold function, mark it ignore.
2552 		 */
2553 		if (insn->jump_dest) {
2554 			struct symbol *dest_func = insn_func(insn->jump_dest);
2555 
2556 			if (dest_func && dest_func->cold)
2557 				dest_func->ignore = true;
2558 		}
2559 	}
2560 }
2561 
2562 static bool validate_branch_enabled(void)
2563 {
2564 	return opts.stackval ||
2565 	       opts.orc ||
2566 	       opts.uaccess ||
2567 	       opts.checksum;
2568 }
2569 
2570 static int decode_sections(struct objtool_file *file)
2571 {
2572 	file->klp = is_livepatch_module(file);
2573 
2574 	mark_rodata(file);
2575 
2576 	if (init_pv_ops(file))
2577 		return -1;
2578 
2579 	/*
2580 	 * Must be before add_{jump_call}_destination.
2581 	 */
2582 	if (classify_symbols(file))
2583 		return -1;
2584 
2585 	if (decode_instructions(file))
2586 		return -1;
2587 
2588 	if (add_ignores(file))
2589 		return -1;
2590 
2591 	add_uaccess_safe(file);
2592 
2593 	if (read_annotate(file, __annotate_early))
2594 		return -1;
2595 
2596 	/*
2597 	 * Must be before add_jump_destinations(), which depends on 'func'
2598 	 * being set for alternatives, to enable proper sibling call detection.
2599 	 */
2600 	if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label) {
2601 		if (add_special_section_alts(file))
2602 			return -1;
2603 	}
2604 
2605 	if (add_jump_destinations(file))
2606 		return -1;
2607 
2608 	/*
2609 	 * Must be before add_call_destination(); it changes INSN_CALL to
2610 	 * INSN_JUMP.
2611 	 */
2612 	if (read_annotate(file, __annotate_ifc))
2613 		return -1;
2614 
2615 	if (add_call_destinations(file))
2616 		return -1;
2617 
2618 	if (add_jump_table_alts(file))
2619 		return -1;
2620 
2621 	if (read_unwind_hints(file))
2622 		return -1;
2623 
2624 	/* Must be after add_jump_destinations() */
2625 	mark_holes(file);
2626 
2627 	/*
2628 	 * Must be after add_call_destinations() such that it can override
2629 	 * dead_end_function() marks.
2630 	 */
2631 	if (read_annotate(file, __annotate_late))
2632 		return -1;
2633 
2634 	return 0;
2635 }
2636 
2637 static bool is_special_call(struct instruction *insn)
2638 {
2639 	if (insn->type == INSN_CALL) {
2640 		struct symbol *dest = insn_call_dest(insn);
2641 
2642 		if (!dest)
2643 			return false;
2644 
2645 		if (dest->fentry || dest->embedded_insn)
2646 			return true;
2647 	}
2648 
2649 	return false;
2650 }
2651 
2652 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2653 {
2654 	struct cfi_state *cfi = &state->cfi;
2655 	int i;
2656 
2657 	if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2658 		return true;
2659 
2660 	if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2661 		return true;
2662 
2663 	if (cfi->stack_size != initial_func_cfi.cfa.offset)
2664 		return true;
2665 
2666 	for (i = 0; i < CFI_NUM_REGS; i++) {
2667 		if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2668 		    cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2669 			return true;
2670 	}
2671 
2672 	return false;
2673 }
2674 
2675 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2676 				int expected_offset)
2677 {
2678 	return reg->base == CFI_CFA &&
2679 	       reg->offset == expected_offset;
2680 }
2681 
2682 static bool has_valid_stack_frame(struct insn_state *state)
2683 {
2684 	struct cfi_state *cfi = &state->cfi;
2685 
2686 	if (cfi->cfa.base == CFI_BP &&
2687 	    check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2688 	    check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2689 		return true;
2690 
2691 	if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2692 		return true;
2693 
2694 	return false;
2695 }
2696 
2697 static int update_cfi_state_regs(struct instruction *insn,
2698 				  struct cfi_state *cfi,
2699 				  struct stack_op *op)
2700 {
2701 	struct cfi_reg *cfa = &cfi->cfa;
2702 
2703 	if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2704 		return 0;
2705 
2706 	/* push */
2707 	if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2708 		cfa->offset += 8;
2709 
2710 	/* pop */
2711 	if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2712 		cfa->offset -= 8;
2713 
2714 	/* add immediate to sp */
2715 	if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2716 	    op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2717 		cfa->offset -= op->src.offset;
2718 
2719 	return 0;
2720 }
2721 
2722 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2723 {
2724 	if (arch_callee_saved_reg(reg) &&
2725 	    cfi->regs[reg].base == CFI_UNDEFINED) {
2726 		cfi->regs[reg].base = base;
2727 		cfi->regs[reg].offset = offset;
2728 	}
2729 }
2730 
2731 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2732 {
2733 	cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2734 	cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2735 }
2736 
2737 /*
2738  * A note about DRAP stack alignment:
2739  *
2740  * GCC has the concept of a DRAP register, which is used to help keep track of
2741  * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
2742  * register.  The typical DRAP pattern is:
2743  *
2744  *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
2745  *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
2746  *   41 ff 72 f8		pushq  -0x8(%r10)
2747  *   55				push   %rbp
2748  *   48 89 e5			mov    %rsp,%rbp
2749  *				(more pushes)
2750  *   41 52			push   %r10
2751  *				...
2752  *   41 5a			pop    %r10
2753  *				(more pops)
2754  *   5d				pop    %rbp
2755  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2756  *   c3				retq
2757  *
2758  * There are some variations in the epilogues, like:
2759  *
2760  *   5b				pop    %rbx
2761  *   41 5a			pop    %r10
2762  *   41 5c			pop    %r12
2763  *   41 5d			pop    %r13
2764  *   41 5e			pop    %r14
2765  *   c9				leaveq
2766  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2767  *   c3				retq
2768  *
2769  * and:
2770  *
2771  *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
2772  *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
2773  *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
2774  *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
2775  *   c9				leaveq
2776  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2777  *   c3				retq
2778  *
2779  * Sometimes r13 is used as the DRAP register, in which case it's saved and
2780  * restored beforehand:
2781  *
2782  *   41 55			push   %r13
2783  *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
2784  *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
2785  *				...
2786  *   49 8d 65 f0		lea    -0x10(%r13),%rsp
2787  *   41 5d			pop    %r13
2788  *   c3				retq
2789  */
2790 static int update_cfi_state(struct instruction *insn,
2791 			    struct instruction *next_insn,
2792 			    struct cfi_state *cfi, struct stack_op *op)
2793 {
2794 	struct cfi_reg *cfa = &cfi->cfa;
2795 	struct cfi_reg *regs = cfi->regs;
2796 
2797 	/* ignore UNWIND_HINT_UNDEFINED regions */
2798 	if (cfi->force_undefined)
2799 		return 0;
2800 
2801 	/* stack operations don't make sense with an undefined CFA */
2802 	if (cfa->base == CFI_UNDEFINED) {
2803 		if (insn_func(insn)) {
2804 			WARN_INSN(insn, "undefined stack state");
2805 			return 1;
2806 		}
2807 		return 0;
2808 	}
2809 
2810 	if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2811 	    cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2812 		return update_cfi_state_regs(insn, cfi, op);
2813 
2814 	switch (op->dest.type) {
2815 
2816 	case OP_DEST_REG:
2817 		switch (op->src.type) {
2818 
2819 		case OP_SRC_REG:
2820 			if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2821 			    cfa->base == CFI_SP &&
2822 			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2823 
2824 				/* mov %rsp, %rbp */
2825 				cfa->base = op->dest.reg;
2826 				cfi->bp_scratch = false;
2827 			}
2828 
2829 			else if (op->src.reg == CFI_SP &&
2830 				 op->dest.reg == CFI_BP && cfi->drap) {
2831 
2832 				/* drap: mov %rsp, %rbp */
2833 				regs[CFI_BP].base = CFI_BP;
2834 				regs[CFI_BP].offset = -cfi->stack_size;
2835 				cfi->bp_scratch = false;
2836 			}
2837 
2838 			else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2839 
2840 				/*
2841 				 * mov %rsp, %reg
2842 				 *
2843 				 * This is needed for the rare case where GCC
2844 				 * does:
2845 				 *
2846 				 *   mov    %rsp, %rax
2847 				 *   ...
2848 				 *   mov    %rax, %rsp
2849 				 */
2850 				cfi->vals[op->dest.reg].base = CFI_CFA;
2851 				cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2852 			}
2853 
2854 			else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2855 				 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2856 
2857 				/*
2858 				 * mov %rbp, %rsp
2859 				 *
2860 				 * Restore the original stack pointer (Clang).
2861 				 */
2862 				cfi->stack_size = -cfi->regs[CFI_BP].offset;
2863 			}
2864 
2865 			else if (op->dest.reg == cfa->base) {
2866 
2867 				/* mov %reg, %rsp */
2868 				if (cfa->base == CFI_SP &&
2869 				    cfi->vals[op->src.reg].base == CFI_CFA) {
2870 
2871 					/*
2872 					 * This is needed for the rare case
2873 					 * where GCC does something dumb like:
2874 					 *
2875 					 *   lea    0x8(%rsp), %rcx
2876 					 *   ...
2877 					 *   mov    %rcx, %rsp
2878 					 */
2879 					cfa->offset = -cfi->vals[op->src.reg].offset;
2880 					cfi->stack_size = cfa->offset;
2881 
2882 				} else if (cfa->base == CFI_SP &&
2883 					   cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2884 					   cfi->vals[op->src.reg].offset == cfa->offset) {
2885 
2886 					/*
2887 					 * Stack swizzle:
2888 					 *
2889 					 * 1: mov %rsp, (%[tos])
2890 					 * 2: mov %[tos], %rsp
2891 					 *    ...
2892 					 * 3: pop %rsp
2893 					 *
2894 					 * Where:
2895 					 *
2896 					 * 1 - places a pointer to the previous
2897 					 *     stack at the Top-of-Stack of the
2898 					 *     new stack.
2899 					 *
2900 					 * 2 - switches to the new stack.
2901 					 *
2902 					 * 3 - pops the Top-of-Stack to restore
2903 					 *     the original stack.
2904 					 *
2905 					 * Note: we set base to SP_INDIRECT
2906 					 * here and preserve offset. Therefore
2907 					 * when the unwinder reaches ToS it
2908 					 * will dereference SP and then add the
2909 					 * offset to find the next frame, IOW:
2910 					 * (%rsp) + offset.
2911 					 */
2912 					cfa->base = CFI_SP_INDIRECT;
2913 
2914 				} else {
2915 					cfa->base = CFI_UNDEFINED;
2916 					cfa->offset = 0;
2917 				}
2918 			}
2919 
2920 			else if (op->dest.reg == CFI_SP &&
2921 				 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2922 				 cfi->vals[op->src.reg].offset == cfa->offset) {
2923 
2924 				/*
2925 				 * The same stack swizzle case 2) as above. But
2926 				 * because we can't change cfa->base, case 3)
2927 				 * will become a regular POP. Pretend we're a
2928 				 * PUSH so things don't go unbalanced.
2929 				 */
2930 				cfi->stack_size += 8;
2931 			}
2932 
2933 
2934 			break;
2935 
2936 		case OP_SRC_ADD:
2937 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2938 
2939 				/* add imm, %rsp */
2940 				cfi->stack_size -= op->src.offset;
2941 				if (cfa->base == CFI_SP)
2942 					cfa->offset -= op->src.offset;
2943 				break;
2944 			}
2945 
2946 			if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
2947 			    insn->sym->frame_pointer) {
2948 				/* addi.d fp,sp,imm on LoongArch */
2949 				if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
2950 					cfa->base = CFI_BP;
2951 					cfa->offset = 0;
2952 				}
2953 				break;
2954 			}
2955 
2956 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2957 				/* addi.d sp,fp,imm on LoongArch */
2958 				if (cfa->base == CFI_BP && cfa->offset == 0) {
2959 					if (insn->sym->frame_pointer) {
2960 						cfa->base = CFI_SP;
2961 						cfa->offset = -op->src.offset;
2962 					}
2963 				} else {
2964 					/* lea disp(%rbp), %rsp */
2965 					cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2966 				}
2967 				break;
2968 			}
2969 
2970 			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2971 
2972 				/* drap: lea disp(%rsp), %drap */
2973 				cfi->drap_reg = op->dest.reg;
2974 
2975 				/*
2976 				 * lea disp(%rsp), %reg
2977 				 *
2978 				 * This is needed for the rare case where GCC
2979 				 * does something dumb like:
2980 				 *
2981 				 *   lea    0x8(%rsp), %rcx
2982 				 *   ...
2983 				 *   mov    %rcx, %rsp
2984 				 */
2985 				cfi->vals[op->dest.reg].base = CFI_CFA;
2986 				cfi->vals[op->dest.reg].offset = \
2987 					-cfi->stack_size + op->src.offset;
2988 
2989 				break;
2990 			}
2991 
2992 			if (cfi->drap && op->dest.reg == CFI_SP &&
2993 			    op->src.reg == cfi->drap_reg) {
2994 
2995 				 /* drap: lea disp(%drap), %rsp */
2996 				cfa->base = CFI_SP;
2997 				cfa->offset = cfi->stack_size = -op->src.offset;
2998 				cfi->drap_reg = CFI_UNDEFINED;
2999 				cfi->drap = false;
3000 				break;
3001 			}
3002 
3003 			if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
3004 				WARN_INSN(insn, "unsupported stack register modification");
3005 				return -1;
3006 			}
3007 
3008 			break;
3009 
3010 		case OP_SRC_AND:
3011 			if (op->dest.reg != CFI_SP ||
3012 			    (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
3013 			    (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
3014 				WARN_INSN(insn, "unsupported stack pointer realignment");
3015 				return -1;
3016 			}
3017 
3018 			if (cfi->drap_reg != CFI_UNDEFINED) {
3019 				/* drap: and imm, %rsp */
3020 				cfa->base = cfi->drap_reg;
3021 				cfa->offset = cfi->stack_size = 0;
3022 				cfi->drap = true;
3023 			}
3024 
3025 			/*
3026 			 * Older versions of GCC (4.8ish) realign the stack
3027 			 * without DRAP, with a frame pointer.
3028 			 */
3029 
3030 			break;
3031 
3032 		case OP_SRC_POP:
3033 		case OP_SRC_POPF:
3034 			if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3035 
3036 				/* pop %rsp; # restore from a stack swizzle */
3037 				cfa->base = CFI_SP;
3038 				break;
3039 			}
3040 
3041 			if (!cfi->drap && op->dest.reg == cfa->base) {
3042 
3043 				/* pop %rbp */
3044 				cfa->base = CFI_SP;
3045 			}
3046 
3047 			if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3048 			    op->dest.reg == cfi->drap_reg &&
3049 			    cfi->drap_offset == -cfi->stack_size) {
3050 
3051 				/* drap: pop %drap */
3052 				cfa->base = cfi->drap_reg;
3053 				cfa->offset = 0;
3054 				cfi->drap_offset = -1;
3055 
3056 			} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3057 
3058 				/* pop %reg */
3059 				restore_reg(cfi, op->dest.reg);
3060 			}
3061 
3062 			cfi->stack_size -= 8;
3063 			if (cfa->base == CFI_SP)
3064 				cfa->offset -= 8;
3065 
3066 			break;
3067 
3068 		case OP_SRC_REG_INDIRECT:
3069 			if (!cfi->drap && op->dest.reg == cfa->base &&
3070 			    op->dest.reg == CFI_BP) {
3071 
3072 				/* mov disp(%rsp), %rbp */
3073 				cfa->base = CFI_SP;
3074 				cfa->offset = cfi->stack_size;
3075 			}
3076 
3077 			if (cfi->drap && op->src.reg == CFI_BP &&
3078 			    op->src.offset == cfi->drap_offset) {
3079 
3080 				/* drap: mov disp(%rbp), %drap */
3081 				cfa->base = cfi->drap_reg;
3082 				cfa->offset = 0;
3083 				cfi->drap_offset = -1;
3084 			}
3085 
3086 			if (cfi->drap && op->src.reg == CFI_BP &&
3087 			    op->src.offset == regs[op->dest.reg].offset) {
3088 
3089 				/* drap: mov disp(%rbp), %reg */
3090 				restore_reg(cfi, op->dest.reg);
3091 
3092 			} else if (op->src.reg == cfa->base &&
3093 			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3094 
3095 				/* mov disp(%rbp), %reg */
3096 				/* mov disp(%rsp), %reg */
3097 				restore_reg(cfi, op->dest.reg);
3098 
3099 			} else if (op->src.reg == CFI_SP &&
3100 				   op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3101 
3102 				/* mov disp(%rsp), %reg */
3103 				restore_reg(cfi, op->dest.reg);
3104 			}
3105 
3106 			break;
3107 
3108 		default:
3109 			WARN_INSN(insn, "unknown stack-related instruction");
3110 			return -1;
3111 		}
3112 
3113 		break;
3114 
3115 	case OP_DEST_PUSH:
3116 	case OP_DEST_PUSHF:
3117 		cfi->stack_size += 8;
3118 		if (cfa->base == CFI_SP)
3119 			cfa->offset += 8;
3120 
3121 		if (op->src.type != OP_SRC_REG)
3122 			break;
3123 
3124 		if (cfi->drap) {
3125 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3126 
3127 				/* drap: push %drap */
3128 				cfa->base = CFI_BP_INDIRECT;
3129 				cfa->offset = -cfi->stack_size;
3130 
3131 				/* save drap so we know when to restore it */
3132 				cfi->drap_offset = -cfi->stack_size;
3133 
3134 			} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3135 
3136 				/* drap: push %rbp */
3137 				cfi->stack_size = 0;
3138 
3139 			} else {
3140 
3141 				/* drap: push %reg */
3142 				save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3143 			}
3144 
3145 		} else {
3146 
3147 			/* push %reg */
3148 			save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3149 		}
3150 
3151 		/* detect when asm code uses rbp as a scratch register */
3152 		if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3153 		    cfa->base != CFI_BP)
3154 			cfi->bp_scratch = true;
3155 		break;
3156 
3157 	case OP_DEST_REG_INDIRECT:
3158 
3159 		if (cfi->drap) {
3160 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3161 
3162 				/* drap: mov %drap, disp(%rbp) */
3163 				cfa->base = CFI_BP_INDIRECT;
3164 				cfa->offset = op->dest.offset;
3165 
3166 				/* save drap offset so we know when to restore it */
3167 				cfi->drap_offset = op->dest.offset;
3168 			} else {
3169 
3170 				/* drap: mov reg, disp(%rbp) */
3171 				save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3172 			}
3173 
3174 		} else if (op->dest.reg == cfa->base) {
3175 
3176 			/* mov reg, disp(%rbp) */
3177 			/* mov reg, disp(%rsp) */
3178 			save_reg(cfi, op->src.reg, CFI_CFA,
3179 				 op->dest.offset - cfi->cfa.offset);
3180 
3181 		} else if (op->dest.reg == CFI_SP) {
3182 
3183 			/* mov reg, disp(%rsp) */
3184 			save_reg(cfi, op->src.reg, CFI_CFA,
3185 				 op->dest.offset - cfi->stack_size);
3186 
3187 		} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3188 
3189 			/* mov %rsp, (%reg); # setup a stack swizzle. */
3190 			cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3191 			cfi->vals[op->dest.reg].offset = cfa->offset;
3192 		}
3193 
3194 		break;
3195 
3196 	case OP_DEST_MEM:
3197 		if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3198 			WARN_INSN(insn, "unknown stack-related memory operation");
3199 			return -1;
3200 		}
3201 
3202 		/* pop mem */
3203 		cfi->stack_size -= 8;
3204 		if (cfa->base == CFI_SP)
3205 			cfa->offset -= 8;
3206 
3207 		break;
3208 
3209 	default:
3210 		WARN_INSN(insn, "unknown stack-related instruction");
3211 		return -1;
3212 	}
3213 
3214 	return 0;
3215 }
3216 
3217 /*
3218  * The stack layouts of alternatives instructions can sometimes diverge when
3219  * they have stack modifications.  That's fine as long as the potential stack
3220  * layouts don't conflict at any given potential instruction boundary.
3221  *
3222  * Flatten the CFIs of the different alternative code streams (both original
3223  * and replacement) into a single shared CFI array which can be used to detect
3224  * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3225  */
3226 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3227 {
3228 	struct cfi_state **alt_cfi;
3229 	int group_off;
3230 
3231 	if (!insn->alt_group)
3232 		return 0;
3233 
3234 	if (!insn->cfi) {
3235 		WARN("CFI missing");
3236 		return -1;
3237 	}
3238 
3239 	alt_cfi = insn->alt_group->cfi;
3240 	group_off = insn->offset - insn->alt_group->first_insn->offset;
3241 
3242 	if (!alt_cfi[group_off]) {
3243 		alt_cfi[group_off] = insn->cfi;
3244 	} else {
3245 		if (cficmp(alt_cfi[group_off], insn->cfi)) {
3246 			struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3247 			struct instruction *orig = orig_group->first_insn;
3248 			WARN_INSN(orig, "stack layout conflict in alternatives: %s",
3249 				  offstr(insn->sec, insn->offset));
3250 			return -1;
3251 		}
3252 	}
3253 
3254 	return 0;
3255 }
3256 
3257 static int handle_insn_ops(struct instruction *insn,
3258 			   struct instruction *next_insn,
3259 			   struct insn_state *state)
3260 {
3261 	struct stack_op *op;
3262 	int ret;
3263 
3264 	for (op = insn->stack_ops; op; op = op->next) {
3265 
3266 		ret = update_cfi_state(insn, next_insn, &state->cfi, op);
3267 		if (ret)
3268 			return ret;
3269 
3270 		if (!opts.uaccess || !insn->alt_group)
3271 			continue;
3272 
3273 		if (op->dest.type == OP_DEST_PUSHF) {
3274 			if (!state->uaccess_stack) {
3275 				state->uaccess_stack = 1;
3276 			} else if (state->uaccess_stack >> 31) {
3277 				WARN_INSN(insn, "PUSHF stack exhausted");
3278 				return 1;
3279 			}
3280 			state->uaccess_stack <<= 1;
3281 			state->uaccess_stack  |= state->uaccess;
3282 		}
3283 
3284 		if (op->src.type == OP_SRC_POPF) {
3285 			if (state->uaccess_stack) {
3286 				state->uaccess = state->uaccess_stack & 1;
3287 				state->uaccess_stack >>= 1;
3288 				if (state->uaccess_stack == 1)
3289 					state->uaccess_stack = 0;
3290 			}
3291 		}
3292 	}
3293 
3294 	return 0;
3295 }
3296 
3297 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3298 {
3299 	struct cfi_state *cfi1 = insn->cfi;
3300 	int i;
3301 
3302 	if (!cfi1) {
3303 		WARN("CFI missing");
3304 		return false;
3305 	}
3306 
3307 	if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3308 
3309 		WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3310 			  cfi1->cfa.base, cfi1->cfa.offset,
3311 			  cfi2->cfa.base, cfi2->cfa.offset);
3312 		return false;
3313 
3314 	}
3315 
3316 	if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3317 		for (i = 0; i < CFI_NUM_REGS; i++) {
3318 
3319 			if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg)))
3320 				continue;
3321 
3322 			WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3323 				  i, cfi1->regs[i].base, cfi1->regs[i].offset,
3324 				  i, cfi2->regs[i].base, cfi2->regs[i].offset);
3325 		}
3326 		return false;
3327 	}
3328 
3329 	if (cfi1->type != cfi2->type) {
3330 
3331 		WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3332 			  cfi1->type, cfi2->type);
3333 		return false;
3334 	}
3335 
3336 	if (cfi1->drap != cfi2->drap ||
3337 		   (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3338 		   (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3339 
3340 		WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3341 			  cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3342 			  cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3343 		return false;
3344 	}
3345 
3346 	return true;
3347 }
3348 
3349 static inline bool func_uaccess_safe(struct symbol *func)
3350 {
3351 	if (func)
3352 		return func->uaccess_safe;
3353 
3354 	return false;
3355 }
3356 
3357 static inline const char *call_dest_name(struct instruction *insn)
3358 {
3359 	static char pvname[19];
3360 	struct reloc *reloc;
3361 	int idx;
3362 
3363 	if (insn_call_dest(insn))
3364 		return insn_call_dest(insn)->name;
3365 
3366 	reloc = insn_reloc(NULL, insn);
3367 	if (reloc && !strcmp(reloc->sym->name, "pv_ops")) {
3368 		idx = (reloc_addend(reloc) / sizeof(void *));
3369 		snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3370 		return pvname;
3371 	}
3372 
3373 	return "{dynamic}";
3374 }
3375 
3376 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3377 {
3378 	struct symbol *target;
3379 	struct reloc *reloc;
3380 	int idx;
3381 
3382 	reloc = insn_reloc(file, insn);
3383 	if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
3384 		return false;
3385 
3386 	idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *);
3387 
3388 	if (file->pv_ops[idx].clean)
3389 		return true;
3390 
3391 	file->pv_ops[idx].clean = true;
3392 
3393 	list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3394 		if (!target->sec->noinstr) {
3395 			WARN("pv_ops[%d]: %s", idx, target->name);
3396 			file->pv_ops[idx].clean = false;
3397 		}
3398 	}
3399 
3400 	return file->pv_ops[idx].clean;
3401 }
3402 
3403 static inline bool noinstr_call_dest(struct objtool_file *file,
3404 				     struct instruction *insn,
3405 				     struct symbol *func)
3406 {
3407 	/*
3408 	 * We can't deal with indirect function calls at present;
3409 	 * assume they're instrumented.
3410 	 */
3411 	if (!func) {
3412 		if (file->pv_ops)
3413 			return pv_call_dest(file, insn);
3414 
3415 		return false;
3416 	}
3417 
3418 	/*
3419 	 * If the symbol is from a noinstr section; we good.
3420 	 */
3421 	if (func->sec->noinstr)
3422 		return true;
3423 
3424 	/*
3425 	 * If the symbol is a static_call trampoline, we can't tell.
3426 	 */
3427 	if (func->static_call_tramp)
3428 		return true;
3429 
3430 	/*
3431 	 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3432 	 * something 'BAD' happened. At the risk of taking the machine down,
3433 	 * let them proceed to get the message out.
3434 	 */
3435 	if (!strncmp(func->name, "__ubsan_handle_", 15))
3436 		return true;
3437 
3438 	return false;
3439 }
3440 
3441 static int validate_call(struct objtool_file *file,
3442 			 struct instruction *insn,
3443 			 struct insn_state *state)
3444 {
3445 	if (state->noinstr && state->instr <= 0 &&
3446 	    !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3447 		WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3448 		return 1;
3449 	}
3450 
3451 	if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3452 		WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3453 		return 1;
3454 	}
3455 
3456 	if (state->df) {
3457 		WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3458 		return 1;
3459 	}
3460 
3461 	return 0;
3462 }
3463 
3464 static int validate_sibling_call(struct objtool_file *file,
3465 				 struct instruction *insn,
3466 				 struct insn_state *state)
3467 {
3468 	if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3469 		WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3470 		return 1;
3471 	}
3472 
3473 	return validate_call(file, insn, state);
3474 }
3475 
3476 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3477 {
3478 	if (state->noinstr && state->instr > 0) {
3479 		WARN_INSN(insn, "return with instrumentation enabled");
3480 		return 1;
3481 	}
3482 
3483 	if (state->uaccess && !func_uaccess_safe(func)) {
3484 		WARN_INSN(insn, "return with UACCESS enabled");
3485 		return 1;
3486 	}
3487 
3488 	if (!state->uaccess && func_uaccess_safe(func)) {
3489 		WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3490 		return 1;
3491 	}
3492 
3493 	if (state->df) {
3494 		WARN_INSN(insn, "return with DF set");
3495 		return 1;
3496 	}
3497 
3498 	if (func && has_modified_stack_frame(insn, state)) {
3499 		WARN_INSN(insn, "return with modified stack frame");
3500 		return 1;
3501 	}
3502 
3503 	if (state->cfi.bp_scratch) {
3504 		WARN_INSN(insn, "BP used as a scratch register");
3505 		return 1;
3506 	}
3507 
3508 	return 0;
3509 }
3510 
3511 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3512 						 struct instruction *insn)
3513 {
3514 	struct alt_group *alt_group = insn->alt_group;
3515 
3516 	/*
3517 	 * Simulate the fact that alternatives are patched in-place.  When the
3518 	 * end of a replacement alt_group is reached, redirect objtool flow to
3519 	 * the end of the original alt_group.
3520 	 *
3521 	 * insn->alts->insn -> alt_group->first_insn
3522 	 *		       ...
3523 	 *		       alt_group->last_insn
3524 	 *		       [alt_group->nop]      -> next(orig_group->last_insn)
3525 	 */
3526 	if (alt_group) {
3527 		if (alt_group->nop) {
3528 			/* ->nop implies ->orig_group */
3529 			if (insn == alt_group->last_insn)
3530 				return alt_group->nop;
3531 			if (insn == alt_group->nop)
3532 				goto next_orig;
3533 		}
3534 		if (insn == alt_group->last_insn && alt_group->orig_group)
3535 			goto next_orig;
3536 	}
3537 
3538 	return next_insn_same_sec(file, insn);
3539 
3540 next_orig:
3541 	return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3542 }
3543 
3544 static bool skip_alt_group(struct instruction *insn)
3545 {
3546 	struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
3547 
3548 	if (!insn->alt_group)
3549 		return false;
3550 
3551 	/* ANNOTATE_IGNORE_ALTERNATIVE */
3552 	if (insn->alt_group->ignore)
3553 		return true;
3554 
3555 	/*
3556 	 * For NOP patched with CLAC/STAC, only follow the latter to avoid
3557 	 * impossible code paths combining patched CLAC with unpatched STAC
3558 	 * or vice versa.
3559 	 *
3560 	 * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
3561 	 * requested not to do that to avoid hurting .s file readability
3562 	 * around CLAC/STAC alternative sites.
3563 	 */
3564 
3565 	if (!alt_insn)
3566 		return false;
3567 
3568 	/* Don't override ASM_{CLAC,STAC}_UNSAFE */
3569 	if (alt_insn->alt_group && alt_insn->alt_group->ignore)
3570 		return false;
3571 
3572 	return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
3573 }
3574 
3575 static int checksum_debug_init(struct objtool_file *file)
3576 {
3577 	char *dup, *s;
3578 
3579 	if (!opts.debug_checksum)
3580 		return 0;
3581 
3582 	dup = strdup(opts.debug_checksum);
3583 	if (!dup) {
3584 		ERROR_GLIBC("strdup");
3585 		return -1;
3586 	}
3587 
3588 	s = dup;
3589 	while (*s) {
3590 		struct symbol *func;
3591 		char *comma;
3592 
3593 		comma = strchr(s, ',');
3594 		if (comma)
3595 			*comma = '\0';
3596 
3597 		func = find_symbol_by_name(file->elf, s);
3598 		if (!func || !is_func_sym(func))
3599 			WARN("--debug-checksum: can't find '%s'", s);
3600 		else
3601 			func->debug_checksum = 1;
3602 
3603 		if (!comma)
3604 			break;
3605 
3606 		s = comma + 1;
3607 	}
3608 
3609 	free(dup);
3610 	return 0;
3611 }
3612 
3613 static void checksum_update_insn(struct objtool_file *file, struct symbol *func,
3614 				 struct instruction *insn)
3615 {
3616 	struct reloc *reloc = insn_reloc(file, insn);
3617 	unsigned long offset;
3618 	struct symbol *sym;
3619 
3620 	if (insn->fake)
3621 		return;
3622 
3623 	checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len);
3624 
3625 	if (!reloc) {
3626 		struct symbol *call_dest = insn_call_dest(insn);
3627 
3628 		if (call_dest)
3629 			checksum_update(func, insn, call_dest->demangled_name,
3630 					strlen(call_dest->demangled_name));
3631 		return;
3632 	}
3633 
3634 	sym = reloc->sym;
3635 	offset = arch_insn_adjusted_addend(insn, reloc);
3636 
3637 	if (is_string_sec(sym->sec)) {
3638 		char *str;
3639 
3640 		str = sym->sec->data->d_buf + sym->offset + offset;
3641 		checksum_update(func, insn, str, strlen(str));
3642 		return;
3643 	}
3644 
3645 	if (is_sec_sym(sym)) {
3646 		sym = find_symbol_containing(reloc->sym->sec, offset);
3647 		if (!sym)
3648 			return;
3649 
3650 		offset -= sym->offset;
3651 	}
3652 
3653 	checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name));
3654 	checksum_update(func, insn, &offset, sizeof(offset));
3655 }
3656 
3657 /*
3658  * Follow the branch starting at the given instruction, and recursively follow
3659  * any other branches (jumps).  Meanwhile, track the frame pointer state at
3660  * each instruction and validate all the rules described in
3661  * tools/objtool/Documentation/objtool.txt.
3662  */
3663 static int validate_branch(struct objtool_file *file, struct symbol *func,
3664 			   struct instruction *insn, struct insn_state state)
3665 {
3666 	struct alternative *alt;
3667 	struct instruction *next_insn, *prev_insn = NULL;
3668 	u8 visited;
3669 	int ret;
3670 
3671 	if (func && func->ignore)
3672 		return 0;
3673 
3674 	while (1) {
3675 		next_insn = next_insn_to_validate(file, insn);
3676 
3677 		if (opts.checksum && func && insn->sec)
3678 			checksum_update_insn(file, func, insn);
3679 
3680 		if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3681 			/* Ignore KCFI type preambles, which always fall through */
3682 			if (is_prefix_func(func))
3683 				return 0;
3684 
3685 			if (file->ignore_unreachables)
3686 				return 0;
3687 
3688 			WARN("%s() falls through to next function %s()",
3689 			     func->name, insn_func(insn)->name);
3690 			func->warned = 1;
3691 
3692 			return 1;
3693 		}
3694 
3695 		visited = VISITED_BRANCH << state.uaccess;
3696 		if (insn->visited & VISITED_BRANCH_MASK) {
3697 			if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3698 				return 1;
3699 
3700 			if (insn->visited & visited)
3701 				return 0;
3702 		} else {
3703 			nr_insns_visited++;
3704 		}
3705 
3706 		if (state.noinstr)
3707 			state.instr += insn->instr;
3708 
3709 		if (insn->hint) {
3710 			if (insn->restore) {
3711 				struct instruction *save_insn, *i;
3712 
3713 				i = insn;
3714 				save_insn = NULL;
3715 
3716 				sym_for_each_insn_continue_reverse(file, func, i) {
3717 					if (i->save) {
3718 						save_insn = i;
3719 						break;
3720 					}
3721 				}
3722 
3723 				if (!save_insn) {
3724 					WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3725 					return 1;
3726 				}
3727 
3728 				if (!save_insn->visited) {
3729 					/*
3730 					 * If the restore hint insn is at the
3731 					 * beginning of a basic block and was
3732 					 * branched to from elsewhere, and the
3733 					 * save insn hasn't been visited yet,
3734 					 * defer following this branch for now.
3735 					 * It will be seen later via the
3736 					 * straight-line path.
3737 					 */
3738 					if (!prev_insn)
3739 						return 0;
3740 
3741 					WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3742 					return 1;
3743 				}
3744 
3745 				insn->cfi = save_insn->cfi;
3746 				nr_cfi_reused++;
3747 			}
3748 
3749 			state.cfi = *insn->cfi;
3750 		} else {
3751 			/* XXX track if we actually changed state.cfi */
3752 
3753 			if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3754 				insn->cfi = prev_insn->cfi;
3755 				nr_cfi_reused++;
3756 			} else {
3757 				insn->cfi = cfi_hash_find_or_add(&state.cfi);
3758 			}
3759 		}
3760 
3761 		insn->visited |= visited;
3762 
3763 		if (propagate_alt_cfi(file, insn))
3764 			return 1;
3765 
3766 		if (insn->alts) {
3767 			for (alt = insn->alts; alt; alt = alt->next) {
3768 				ret = validate_branch(file, func, alt->insn, state);
3769 				if (ret) {
3770 					BT_INSN(insn, "(alt)");
3771 					return ret;
3772 				}
3773 			}
3774 		}
3775 
3776 		if (skip_alt_group(insn))
3777 			return 0;
3778 
3779 		if (handle_insn_ops(insn, next_insn, &state))
3780 			return 1;
3781 
3782 		switch (insn->type) {
3783 
3784 		case INSN_RETURN:
3785 			return validate_return(func, insn, &state);
3786 
3787 		case INSN_CALL:
3788 		case INSN_CALL_DYNAMIC:
3789 			ret = validate_call(file, insn, &state);
3790 			if (ret)
3791 				return ret;
3792 
3793 			if (opts.stackval && func && !is_special_call(insn) &&
3794 			    !has_valid_stack_frame(&state)) {
3795 				WARN_INSN(insn, "call without frame pointer save/setup");
3796 				return 1;
3797 			}
3798 
3799 			break;
3800 
3801 		case INSN_JUMP_CONDITIONAL:
3802 		case INSN_JUMP_UNCONDITIONAL:
3803 			if (is_sibling_call(insn)) {
3804 				ret = validate_sibling_call(file, insn, &state);
3805 				if (ret)
3806 					return ret;
3807 
3808 			} else if (insn->jump_dest) {
3809 				ret = validate_branch(file, func,
3810 						      insn->jump_dest, state);
3811 				if (ret) {
3812 					BT_INSN(insn, "(branch)");
3813 					return ret;
3814 				}
3815 			}
3816 
3817 			if (insn->type == INSN_JUMP_UNCONDITIONAL)
3818 				return 0;
3819 
3820 			break;
3821 
3822 		case INSN_JUMP_DYNAMIC:
3823 		case INSN_JUMP_DYNAMIC_CONDITIONAL:
3824 			if (is_sibling_call(insn)) {
3825 				ret = validate_sibling_call(file, insn, &state);
3826 				if (ret)
3827 					return ret;
3828 			}
3829 
3830 			if (insn->type == INSN_JUMP_DYNAMIC)
3831 				return 0;
3832 
3833 			break;
3834 
3835 		case INSN_SYSCALL:
3836 			if (func && (!next_insn || !next_insn->hint)) {
3837 				WARN_INSN(insn, "unsupported instruction in callable function");
3838 				return 1;
3839 			}
3840 
3841 			break;
3842 
3843 		case INSN_SYSRET:
3844 			if (func && (!next_insn || !next_insn->hint)) {
3845 				WARN_INSN(insn, "unsupported instruction in callable function");
3846 				return 1;
3847 			}
3848 
3849 			return 0;
3850 
3851 		case INSN_STAC:
3852 			if (!opts.uaccess)
3853 				break;
3854 
3855 			if (state.uaccess) {
3856 				WARN_INSN(insn, "recursive UACCESS enable");
3857 				return 1;
3858 			}
3859 
3860 			state.uaccess = true;
3861 			break;
3862 
3863 		case INSN_CLAC:
3864 			if (!opts.uaccess)
3865 				break;
3866 
3867 			if (!state.uaccess && func) {
3868 				WARN_INSN(insn, "redundant UACCESS disable");
3869 				return 1;
3870 			}
3871 
3872 			if (func_uaccess_safe(func) && !state.uaccess_stack) {
3873 				WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3874 				return 1;
3875 			}
3876 
3877 			state.uaccess = false;
3878 			break;
3879 
3880 		case INSN_STD:
3881 			if (state.df) {
3882 				WARN_INSN(insn, "recursive STD");
3883 				return 1;
3884 			}
3885 
3886 			state.df = true;
3887 			break;
3888 
3889 		case INSN_CLD:
3890 			if (!state.df && func) {
3891 				WARN_INSN(insn, "redundant CLD");
3892 				return 1;
3893 			}
3894 
3895 			state.df = false;
3896 			break;
3897 
3898 		default:
3899 			break;
3900 		}
3901 
3902 		if (insn->dead_end)
3903 			return 0;
3904 
3905 		if (!next_insn) {
3906 			if (state.cfi.cfa.base == CFI_UNDEFINED)
3907 				return 0;
3908 			if (file->ignore_unreachables)
3909 				return 0;
3910 
3911 			WARN("%s%sunexpected end of section %s",
3912 			     func ? func->name : "", func ? "(): " : "",
3913 			     insn->sec->name);
3914 			return 1;
3915 		}
3916 
3917 		prev_insn = insn;
3918 		insn = next_insn;
3919 	}
3920 
3921 	return 0;
3922 }
3923 
3924 static int validate_unwind_hint(struct objtool_file *file,
3925 				  struct instruction *insn,
3926 				  struct insn_state *state)
3927 {
3928 	if (insn->hint && !insn->visited) {
3929 		struct symbol *func = insn_func(insn);
3930 		int ret;
3931 
3932 		if (opts.checksum)
3933 			checksum_init(func);
3934 
3935 		ret = validate_branch(file, func, insn, *state);
3936 		if (ret)
3937 			BT_INSN(insn, "<=== (hint)");
3938 		return ret;
3939 	}
3940 
3941 	return 0;
3942 }
3943 
3944 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3945 {
3946 	struct instruction *insn;
3947 	struct insn_state state;
3948 	int warnings = 0;
3949 
3950 	if (!file->hints)
3951 		return 0;
3952 
3953 	init_insn_state(file, &state, sec);
3954 
3955 	if (sec) {
3956 		sec_for_each_insn(file, sec, insn)
3957 			warnings += validate_unwind_hint(file, insn, &state);
3958 	} else {
3959 		for_each_insn(file, insn)
3960 			warnings += validate_unwind_hint(file, insn, &state);
3961 	}
3962 
3963 	return warnings;
3964 }
3965 
3966 /*
3967  * Validate rethunk entry constraint: must untrain RET before the first RET.
3968  *
3969  * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
3970  * before an actual RET instruction.
3971  */
3972 static int validate_unret(struct objtool_file *file, struct instruction *insn)
3973 {
3974 	struct instruction *next, *dest;
3975 	int ret;
3976 
3977 	for (;;) {
3978 		next = next_insn_to_validate(file, insn);
3979 
3980 		if (insn->visited & VISITED_UNRET)
3981 			return 0;
3982 
3983 		insn->visited |= VISITED_UNRET;
3984 
3985 		if (insn->alts) {
3986 			struct alternative *alt;
3987 			for (alt = insn->alts; alt; alt = alt->next) {
3988 				ret = validate_unret(file, alt->insn);
3989 				if (ret) {
3990 					BT_INSN(insn, "(alt)");
3991 					return ret;
3992 				}
3993 			}
3994 		}
3995 
3996 		switch (insn->type) {
3997 
3998 		case INSN_CALL_DYNAMIC:
3999 		case INSN_JUMP_DYNAMIC:
4000 		case INSN_JUMP_DYNAMIC_CONDITIONAL:
4001 			WARN_INSN(insn, "early indirect call");
4002 			return 1;
4003 
4004 		case INSN_JUMP_UNCONDITIONAL:
4005 		case INSN_JUMP_CONDITIONAL:
4006 			if (!is_sibling_call(insn)) {
4007 				if (!insn->jump_dest) {
4008 					WARN_INSN(insn, "unresolved jump target after linking?!?");
4009 					return 1;
4010 				}
4011 				ret = validate_unret(file, insn->jump_dest);
4012 				if (ret) {
4013 					BT_INSN(insn, "(branch%s)",
4014 						insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
4015 					return ret;
4016 				}
4017 
4018 				if (insn->type == INSN_JUMP_UNCONDITIONAL)
4019 					return 0;
4020 
4021 				break;
4022 			}
4023 
4024 			/* fallthrough */
4025 		case INSN_CALL:
4026 			dest = find_insn(file, insn_call_dest(insn)->sec,
4027 					 insn_call_dest(insn)->offset);
4028 			if (!dest) {
4029 				WARN("Unresolved function after linking!?: %s",
4030 				     insn_call_dest(insn)->name);
4031 				return 1;
4032 			}
4033 
4034 			ret = validate_unret(file, dest);
4035 			if (ret) {
4036 				BT_INSN(insn, "(call)");
4037 				return ret;
4038 			}
4039 			/*
4040 			 * If a call returns without error, it must have seen UNTRAIN_RET.
4041 			 * Therefore any non-error return is a success.
4042 			 */
4043 			return 0;
4044 
4045 		case INSN_RETURN:
4046 			WARN_INSN(insn, "RET before UNTRAIN");
4047 			return 1;
4048 
4049 		case INSN_SYSCALL:
4050 			break;
4051 
4052 		case INSN_SYSRET:
4053 			return 0;
4054 
4055 		case INSN_NOP:
4056 			if (insn->retpoline_safe)
4057 				return 0;
4058 			break;
4059 
4060 		default:
4061 			break;
4062 		}
4063 
4064 		if (insn->dead_end)
4065 			return 0;
4066 
4067 		if (!next) {
4068 			WARN_INSN(insn, "teh end!");
4069 			return 1;
4070 		}
4071 		insn = next;
4072 	}
4073 
4074 	return 0;
4075 }
4076 
4077 /*
4078  * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
4079  * VALIDATE_UNRET_END before RET.
4080  */
4081 static int validate_unrets(struct objtool_file *file)
4082 {
4083 	struct instruction *insn;
4084 	int warnings = 0;
4085 
4086 	for_each_insn(file, insn) {
4087 		if (!insn->unret)
4088 			continue;
4089 
4090 		warnings += validate_unret(file, insn);
4091 	}
4092 
4093 	return warnings;
4094 }
4095 
4096 static int validate_retpoline(struct objtool_file *file)
4097 {
4098 	struct instruction *insn;
4099 	int warnings = 0;
4100 
4101 	for_each_insn(file, insn) {
4102 		if (insn->type != INSN_JUMP_DYNAMIC &&
4103 		    insn->type != INSN_CALL_DYNAMIC &&
4104 		    insn->type != INSN_RETURN)
4105 			continue;
4106 
4107 		if (insn->retpoline_safe)
4108 			continue;
4109 
4110 		if (insn->sec->init)
4111 			continue;
4112 
4113 		if (insn->type == INSN_RETURN) {
4114 			if (opts.rethunk) {
4115 				WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build");
4116 				warnings++;
4117 			}
4118 			continue;
4119 		}
4120 
4121 		WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
4122 			  insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
4123 		warnings++;
4124 	}
4125 
4126 	if (!opts.cfi)
4127 		return warnings;
4128 
4129 	/*
4130 	 * kCFI call sites look like:
4131 	 *
4132 	 *     movl $(-0x12345678), %r10d
4133 	 *     addl -4(%r11), %r10d
4134 	 *     jz 1f
4135 	 *     ud2
4136 	 *  1: cs call __x86_indirect_thunk_r11
4137 	 *
4138 	 * Verify all indirect calls are kCFI adorned by checking for the
4139 	 * UD2. Notably, doing __nocfi calls to regular (cfi) functions is
4140 	 * broken.
4141 	 */
4142 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
4143 		struct symbol *sym = insn->sym;
4144 
4145 		if (sym && (sym->type == STT_NOTYPE ||
4146 			    sym->type == STT_FUNC) && !sym->nocfi) {
4147 			struct instruction *prev =
4148 				prev_insn_same_sym(file, insn);
4149 
4150 			if (!prev || prev->type != INSN_BUG) {
4151 				WARN_INSN(insn, "no-cfi indirect call!");
4152 				warnings++;
4153 			}
4154 		}
4155 	}
4156 
4157 	return warnings;
4158 }
4159 
4160 static bool is_kasan_insn(struct instruction *insn)
4161 {
4162 	return (insn->type == INSN_CALL &&
4163 		!strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4164 }
4165 
4166 static bool is_ubsan_insn(struct instruction *insn)
4167 {
4168 	return (insn->type == INSN_CALL &&
4169 		!strcmp(insn_call_dest(insn)->name,
4170 			"__ubsan_handle_builtin_unreachable"));
4171 }
4172 
4173 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4174 {
4175 	struct symbol *func = insn_func(insn);
4176 	struct instruction *prev_insn;
4177 	int i;
4178 
4179 	if (insn->type == INSN_NOP || insn->type == INSN_TRAP ||
4180 	    insn->hole || (func && func->ignore))
4181 		return true;
4182 
4183 	/*
4184 	 * Ignore alternative replacement instructions.  This can happen
4185 	 * when a whitelisted function uses one of the ALTERNATIVE macros.
4186 	 */
4187 	if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4188 	    !strcmp(insn->sec->name, ".altinstr_aux"))
4189 		return true;
4190 
4191 	if (!func)
4192 		return false;
4193 
4194 	if (func->static_call_tramp)
4195 		return true;
4196 
4197 	/*
4198 	 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4199 	 * __builtin_unreachable().  The BUG() macro has an unreachable() after
4200 	 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4201 	 * (or occasionally a JMP to UD2).
4202 	 *
4203 	 * It may also insert a UD2 after calling a __noreturn function.
4204 	 */
4205 	prev_insn = prev_insn_same_sec(file, insn);
4206 	if (prev_insn && prev_insn->dead_end &&
4207 	    (insn->type == INSN_BUG ||
4208 	     (insn->type == INSN_JUMP_UNCONDITIONAL &&
4209 	      insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4210 		return true;
4211 
4212 	/*
4213 	 * Check if this (or a subsequent) instruction is related to
4214 	 * CONFIG_UBSAN or CONFIG_KASAN.
4215 	 *
4216 	 * End the search at 5 instructions to avoid going into the weeds.
4217 	 */
4218 	for (i = 0; i < 5; i++) {
4219 
4220 		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4221 			return true;
4222 
4223 		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4224 			if (insn->jump_dest &&
4225 			    insn_func(insn->jump_dest) == func) {
4226 				insn = insn->jump_dest;
4227 				continue;
4228 			}
4229 
4230 			break;
4231 		}
4232 
4233 		if (insn->offset + insn->len >= func->offset + func->len)
4234 			break;
4235 
4236 		insn = next_insn_same_sec(file, insn);
4237 	}
4238 
4239 	return false;
4240 }
4241 
4242 /*
4243  * For FineIBT or kCFI, a certain number of bytes preceding the function may be
4244  * NOPs.  Those NOPs may be rewritten at runtime and executed, so give them a
4245  * proper function name: __pfx_<func>.
4246  *
4247  * The NOPs may not exist for the following cases:
4248  *
4249  *   - compiler cloned functions (*.cold, *.part0, etc)
4250  *   - asm functions created with inline asm or without SYM_FUNC_START()
4251  *
4252  * Also, the function may already have a prefix from a previous objtool run
4253  * (livepatch extracted functions, or manually running objtool multiple times).
4254  *
4255  * So return 0 if the NOPs are missing or the function already has a prefix
4256  * symbol.
4257  */
4258 static int create_prefix_symbol(struct objtool_file *file, struct symbol *func)
4259 {
4260 	struct instruction *insn, *prev;
4261 	char name[SYM_NAME_LEN];
4262 	struct cfi_state *cfi;
4263 
4264 	if (!is_func_sym(func) || is_prefix_func(func) ||
4265 	    func->cold || func->static_call_tramp)
4266 		return 0;
4267 
4268 	if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) {
4269 		WARN("%s: symbol name too long, can't create __pfx_ symbol",
4270 		      func->name);
4271 		return 0;
4272 	}
4273 
4274 	if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name))
4275 		return -1;
4276 
4277 	if (file->klp) {
4278 		struct symbol *pfx;
4279 
4280 		pfx = find_symbol_by_offset(func->sec, func->offset - opts.prefix);
4281 		if (pfx && is_prefix_func(pfx) && !strcmp(pfx->name, name))
4282 			return 0;
4283 	}
4284 
4285 	insn = find_insn(file, func->sec, func->offset);
4286 	if (!insn) {
4287 		WARN("%s: can't find starting instruction", func->name);
4288 		return -1;
4289 	}
4290 
4291 	for (prev = prev_insn_same_sec(file, insn);
4292 	     prev;
4293 	     prev = prev_insn_same_sec(file, prev)) {
4294 		u64 offset;
4295 
4296 		if (prev->type != INSN_NOP)
4297 			return 0;
4298 
4299 		offset = func->offset - prev->offset;
4300 
4301 		if (offset > opts.prefix)
4302 			return 0;
4303 
4304 		if (offset < opts.prefix)
4305 			continue;
4306 
4307 		if (!elf_create_symbol(file->elf, name, func->sec,
4308 				       GELF_ST_BIND(func->sym.st_info),
4309 				       GELF_ST_TYPE(func->sym.st_info),
4310 				       prev->offset, opts.prefix))
4311 			return -1;
4312 
4313 		break;
4314 	}
4315 
4316 	if (!prev)
4317 		return 0;
4318 
4319 	if (!insn->cfi) {
4320 		/*
4321 		 * This can happen if stack validation isn't enabled or the
4322 		 * function is annotated with STACK_FRAME_NON_STANDARD.
4323 		 */
4324 		return 0;
4325 	}
4326 
4327 	/* Propagate insn->cfi to the prefix code */
4328 	cfi = cfi_hash_find_or_add(insn->cfi);
4329 	for (; prev != insn; prev = next_insn_same_sec(file, prev))
4330 		prev->cfi = cfi;
4331 
4332 	return 0;
4333 }
4334 
4335 static int create_prefix_symbols(struct objtool_file *file)
4336 {
4337 	struct section *sec;
4338 	struct symbol *func;
4339 
4340 	for_each_sec(file->elf, sec) {
4341 		if (!is_text_sec(sec))
4342 			continue;
4343 
4344 		sec_for_each_sym(sec, func) {
4345 			if (create_prefix_symbol(file, func))
4346 				return -1;
4347 		}
4348 	}
4349 
4350 	return 0;
4351 }
4352 
4353 static int validate_symbol(struct objtool_file *file, struct section *sec,
4354 			   struct symbol *sym, struct insn_state *state)
4355 {
4356 	struct instruction *insn;
4357 	struct symbol *func;
4358 	int ret;
4359 
4360 	if (!sym->len) {
4361 		WARN("%s() is missing an ELF size annotation", sym->name);
4362 		return 1;
4363 	}
4364 
4365 	if (sym->pfunc != sym || sym->alias != sym)
4366 		return 0;
4367 
4368 	insn = find_insn(file, sec, sym->offset);
4369 	if (!insn || insn->visited)
4370 		return 0;
4371 
4372 	if (opts.uaccess)
4373 		state->uaccess = sym->uaccess_safe;
4374 
4375 	func = insn_func(insn);
4376 
4377 	if (opts.checksum)
4378 		checksum_init(func);
4379 
4380 	ret = validate_branch(file, func, insn, *state);
4381 	if (ret)
4382 		BT_INSN(insn, "<=== (sym)");
4383 
4384 	if (opts.checksum)
4385 		checksum_finish(func);
4386 
4387 	return ret;
4388 }
4389 
4390 static int validate_section(struct objtool_file *file, struct section *sec)
4391 {
4392 	struct insn_state state;
4393 	struct symbol *func;
4394 	int warnings = 0;
4395 
4396 	sec_for_each_sym(sec, func) {
4397 		if (!is_func_sym(func))
4398 			continue;
4399 
4400 		init_insn_state(file, &state, sec);
4401 		set_func_state(&state.cfi);
4402 
4403 		warnings += validate_symbol(file, sec, func, &state);
4404 	}
4405 
4406 	return warnings;
4407 }
4408 
4409 static int validate_noinstr_sections(struct objtool_file *file)
4410 {
4411 	struct section *sec;
4412 	int warnings = 0;
4413 
4414 	sec = find_section_by_name(file->elf, ".noinstr.text");
4415 	if (sec) {
4416 		warnings += validate_section(file, sec);
4417 		warnings += validate_unwind_hints(file, sec);
4418 	}
4419 
4420 	sec = find_section_by_name(file->elf, ".entry.text");
4421 	if (sec) {
4422 		warnings += validate_section(file, sec);
4423 		warnings += validate_unwind_hints(file, sec);
4424 	}
4425 
4426 	sec = find_section_by_name(file->elf, ".cpuidle.text");
4427 	if (sec) {
4428 		warnings += validate_section(file, sec);
4429 		warnings += validate_unwind_hints(file, sec);
4430 	}
4431 
4432 	return warnings;
4433 }
4434 
4435 static int validate_functions(struct objtool_file *file)
4436 {
4437 	struct section *sec;
4438 	int warnings = 0;
4439 
4440 	for_each_sec(file->elf, sec) {
4441 		if (!is_text_sec(sec))
4442 			continue;
4443 
4444 		warnings += validate_section(file, sec);
4445 	}
4446 
4447 	return warnings;
4448 }
4449 
4450 static void mark_endbr_used(struct instruction *insn)
4451 {
4452 	if (!list_empty(&insn->call_node))
4453 		list_del_init(&insn->call_node);
4454 }
4455 
4456 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4457 {
4458 	struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4459 	struct instruction *first;
4460 
4461 	if (!sym)
4462 		return false;
4463 
4464 	first = find_insn(file, sym->sec, sym->offset);
4465 	if (!first)
4466 		return false;
4467 
4468 	if (first->type != INSN_ENDBR && !first->noendbr)
4469 		return false;
4470 
4471 	return insn->offset == sym->offset + sym->len;
4472 }
4473 
4474 static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn,
4475 			       struct instruction *dest)
4476 {
4477 	if (dest->type == INSN_ENDBR) {
4478 		mark_endbr_used(dest);
4479 		return 0;
4480 	}
4481 
4482 	if (insn_func(dest) && insn_func(insn) &&
4483 	    insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
4484 		/*
4485 		 * Anything from->to self is either _THIS_IP_ or
4486 		 * IRET-to-self.
4487 		 *
4488 		 * There is no sane way to annotate _THIS_IP_ since the
4489 		 * compiler treats the relocation as a constant and is
4490 		 * happy to fold in offsets, skewing any annotation we
4491 		 * do, leading to vast amounts of false-positives.
4492 		 *
4493 		 * There's also compiler generated _THIS_IP_ through
4494 		 * KCOV and such which we have no hope of annotating.
4495 		 *
4496 		 * As such, blanket accept self-references without
4497 		 * issue.
4498 		 */
4499 		return 0;
4500 	}
4501 
4502 	/*
4503 	 * Accept anything ANNOTATE_NOENDBR.
4504 	 */
4505 	if (dest->noendbr)
4506 		return 0;
4507 
4508 	/*
4509 	 * Accept if this is the instruction after a symbol
4510 	 * that is (no)endbr -- typical code-range usage.
4511 	 */
4512 	if (noendbr_range(file, dest))
4513 		return 0;
4514 
4515 	WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4516 	return 1;
4517 }
4518 
4519 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4520 {
4521 	struct instruction *dest;
4522 	struct reloc *reloc;
4523 	unsigned long off;
4524 	int warnings = 0;
4525 
4526 	/*
4527 	 * Looking for function pointer load relocations.  Ignore
4528 	 * direct/indirect branches:
4529 	 */
4530 	switch (insn->type) {
4531 
4532 	case INSN_CALL:
4533 	case INSN_CALL_DYNAMIC:
4534 	case INSN_JUMP_CONDITIONAL:
4535 	case INSN_JUMP_UNCONDITIONAL:
4536 	case INSN_JUMP_DYNAMIC:
4537 	case INSN_JUMP_DYNAMIC_CONDITIONAL:
4538 	case INSN_RETURN:
4539 	case INSN_NOP:
4540 		return 0;
4541 
4542 	case INSN_LEA_RIP:
4543 		if (!insn_reloc(file, insn)) {
4544 			/* local function pointer reference without reloc */
4545 
4546 			off = arch_jump_destination(insn);
4547 
4548 			dest = find_insn(file, insn->sec, off);
4549 			if (!dest) {
4550 				WARN_INSN(insn, "corrupt function pointer reference");
4551 				return 1;
4552 			}
4553 
4554 			return __validate_ibt_insn(file, insn, dest);
4555 		}
4556 		break;
4557 
4558 	default:
4559 		break;
4560 	}
4561 
4562 	for (reloc = insn_reloc(file, insn);
4563 	     reloc;
4564 	     reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4565 					      reloc_offset(reloc) + 1,
4566 					      (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
4567 
4568 		off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc);
4569 
4570 		dest = find_insn(file, reloc->sym->sec, off);
4571 		if (!dest)
4572 			continue;
4573 
4574 		warnings += __validate_ibt_insn(file, insn, dest);
4575 	}
4576 
4577 	return warnings;
4578 }
4579 
4580 static int validate_ibt_data_reloc(struct objtool_file *file,
4581 				   struct reloc *reloc)
4582 {
4583 	struct instruction *dest;
4584 
4585 	dest = find_insn(file, reloc->sym->sec,
4586 			 reloc->sym->offset + reloc_addend(reloc));
4587 	if (!dest)
4588 		return 0;
4589 
4590 	if (dest->type == INSN_ENDBR) {
4591 		mark_endbr_used(dest);
4592 		return 0;
4593 	}
4594 
4595 	if (dest->noendbr)
4596 		return 0;
4597 
4598 	WARN_FUNC(reloc->sec->base, reloc_offset(reloc),
4599 		  "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4600 
4601 	return 1;
4602 }
4603 
4604 /*
4605  * Validate IBT rules and remove used ENDBR instructions from the seal list.
4606  * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4607  * NOPs) later, in create_ibt_endbr_seal_sections().
4608  */
4609 static int validate_ibt(struct objtool_file *file)
4610 {
4611 	struct section *sec;
4612 	struct reloc *reloc;
4613 	struct instruction *insn;
4614 	int warnings = 0;
4615 
4616 	for_each_insn(file, insn)
4617 		warnings += validate_ibt_insn(file, insn);
4618 
4619 	for_each_sec(file->elf, sec) {
4620 
4621 		/* Already done by validate_ibt_insn() */
4622 		if (is_text_sec(sec))
4623 			continue;
4624 
4625 		if (!sec->rsec)
4626 			continue;
4627 
4628 		/*
4629 		 * These sections can reference text addresses, but not with
4630 		 * the intent to indirect branch to them.
4631 		 */
4632 		if ((!strncmp(sec->name, ".discard", 8) &&
4633 		     strcmp(sec->name, ".discard.ibt_endbr_noseal"))	||
4634 		    !strncmp(sec->name, ".debug", 6)			||
4635 		    !strcmp(sec->name, ".altinstructions")		||
4636 		    !strcmp(sec->name, ".ibt_endbr_seal")		||
4637 		    !strcmp(sec->name, ".kcfi_traps")			||
4638 		    !strcmp(sec->name, ".orc_unwind_ip")		||
4639 		    !strcmp(sec->name, ".retpoline_sites")		||
4640 		    !strcmp(sec->name, ".smp_locks")			||
4641 		    !strcmp(sec->name, ".static_call_sites")		||
4642 		    !strcmp(sec->name, "_error_injection_whitelist")	||
4643 		    !strcmp(sec->name, "_kprobe_blacklist")		||
4644 		    !strcmp(sec->name, "__bug_table")			||
4645 		    !strcmp(sec->name, "__ex_table")			||
4646 		    !strcmp(sec->name, "__jump_table")			||
4647 		    !strcmp(sec->name, "__klp_funcs")			||
4648 		    !strcmp(sec->name, "__mcount_loc")			||
4649 		    !strcmp(sec->name, ".llvm.call-graph-profile")	||
4650 		    !strcmp(sec->name, ".llvm_bb_addr_map")		||
4651 		    !strcmp(sec->name, "__tracepoints")			||
4652 		    !strcmp(sec->name, "__patchable_function_entries"))
4653 			continue;
4654 
4655 		for_each_reloc(sec->rsec, reloc)
4656 			warnings += validate_ibt_data_reloc(file, reloc);
4657 	}
4658 
4659 	return warnings;
4660 }
4661 
4662 static int validate_sls(struct objtool_file *file)
4663 {
4664 	struct instruction *insn, *next_insn;
4665 	int warnings = 0;
4666 
4667 	for_each_insn(file, insn) {
4668 		next_insn = next_insn_same_sec(file, insn);
4669 
4670 		if (insn->retpoline_safe)
4671 			continue;
4672 
4673 		switch (insn->type) {
4674 		case INSN_RETURN:
4675 			if (!next_insn || next_insn->type != INSN_TRAP) {
4676 				WARN_INSN(insn, "missing int3 after ret");
4677 				warnings++;
4678 			}
4679 
4680 			break;
4681 		case INSN_JUMP_DYNAMIC:
4682 			if (!next_insn || next_insn->type != INSN_TRAP) {
4683 				WARN_INSN(insn, "missing int3 after indirect jump");
4684 				warnings++;
4685 			}
4686 			break;
4687 		default:
4688 			break;
4689 		}
4690 	}
4691 
4692 	return warnings;
4693 }
4694 
4695 static int validate_reachable_instructions(struct objtool_file *file)
4696 {
4697 	struct instruction *insn, *prev_insn;
4698 	struct symbol *call_dest;
4699 	int warnings = 0;
4700 
4701 	if (file->ignore_unreachables)
4702 		return 0;
4703 
4704 	for_each_insn(file, insn) {
4705 		if (insn->visited || ignore_unreachable_insn(file, insn))
4706 			continue;
4707 
4708 		prev_insn = prev_insn_same_sec(file, insn);
4709 		if (prev_insn && prev_insn->dead_end) {
4710 			call_dest = insn_call_dest(prev_insn);
4711 			if (call_dest) {
4712 				WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h",
4713 					  call_dest->name);
4714 				warnings++;
4715 				continue;
4716 			}
4717 		}
4718 
4719 		WARN_INSN(insn, "unreachable instruction");
4720 		warnings++;
4721 	}
4722 
4723 	return warnings;
4724 }
4725 
4726 __weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
4727 {
4728 	unsigned int type = reloc_type(reloc);
4729 	size_t sz = elf_addr_size(elf);
4730 
4731 	return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32);
4732 }
4733 
4734 static int check_abs_references(struct objtool_file *file)
4735 {
4736 	struct section *sec;
4737 	struct reloc *reloc;
4738 	int ret = 0;
4739 
4740 	for_each_sec(file->elf, sec) {
4741 		/* absolute references in non-loadable sections are fine */
4742 		if (!(sec->sh.sh_flags & SHF_ALLOC))
4743 			continue;
4744 
4745 		/* section must have an associated .rela section */
4746 		if (!sec->rsec)
4747 			continue;
4748 
4749 		/*
4750 		 * Special case for compiler generated metadata that is not
4751 		 * consumed until after boot.
4752 		 */
4753 		if (!strcmp(sec->name, "__patchable_function_entries"))
4754 			continue;
4755 
4756 		for_each_reloc(sec->rsec, reloc) {
4757 			if (arch_absolute_reloc(file->elf, reloc)) {
4758 				WARN("section %s has absolute relocation at offset 0x%llx",
4759 				     sec->name, (unsigned long long)reloc_offset(reloc));
4760 				ret++;
4761 			}
4762 		}
4763 	}
4764 	return ret;
4765 }
4766 
4767 struct insn_chunk {
4768 	void *addr;
4769 	struct insn_chunk *next;
4770 };
4771 
4772 /*
4773  * Reduce peak RSS usage by freeing insns memory before writing the ELF file,
4774  * which can trigger more allocations for .debug_* sections whose data hasn't
4775  * been read yet.
4776  */
4777 static void free_insns(struct objtool_file *file)
4778 {
4779 	struct instruction *insn;
4780 	struct insn_chunk *chunks = NULL, *chunk;
4781 
4782 	for_each_insn(file, insn) {
4783 		if (!insn->idx) {
4784 			chunk = malloc(sizeof(*chunk));
4785 			chunk->addr = insn;
4786 			chunk->next = chunks;
4787 			chunks = chunk;
4788 		}
4789 	}
4790 
4791 	for (chunk = chunks; chunk; chunk = chunk->next)
4792 		free(chunk->addr);
4793 }
4794 
4795 int check(struct objtool_file *file)
4796 {
4797 	struct disas_context *disas_ctx;
4798 	int ret = 0, warnings = 0;
4799 
4800 	arch_initial_func_cfi_state(&initial_func_cfi);
4801 	init_cfi_state(&init_cfi);
4802 	init_cfi_state(&func_cfi);
4803 	set_func_state(&func_cfi);
4804 	init_cfi_state(&force_undefined_cfi);
4805 	force_undefined_cfi.force_undefined = true;
4806 
4807 	if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
4808 		ret = -1;
4809 		goto out;
4810 	}
4811 
4812 	cfi_hash_add(&init_cfi);
4813 	cfi_hash_add(&func_cfi);
4814 
4815 	ret = checksum_debug_init(file);
4816 	if (ret)
4817 		goto out;
4818 
4819 	ret = decode_sections(file);
4820 	if (ret)
4821 		goto out;
4822 
4823 	if (!nr_insns)
4824 		goto out;
4825 
4826 	if (opts.retpoline)
4827 		warnings += validate_retpoline(file);
4828 
4829 	if (validate_branch_enabled()) {
4830 		int w = 0;
4831 
4832 		w += validate_functions(file);
4833 		w += validate_unwind_hints(file, NULL);
4834 		if (!w)
4835 			w += validate_reachable_instructions(file);
4836 
4837 		warnings += w;
4838 
4839 	} else if (opts.noinstr) {
4840 		warnings += validate_noinstr_sections(file);
4841 	}
4842 
4843 	if (opts.unret) {
4844 		/*
4845 		 * Must be after validate_branch() and friends, it plays
4846 		 * further games with insn->visited.
4847 		 */
4848 		warnings += validate_unrets(file);
4849 	}
4850 
4851 	if (opts.ibt)
4852 		warnings += validate_ibt(file);
4853 
4854 	if (opts.sls)
4855 		warnings += validate_sls(file);
4856 
4857 	if (opts.static_call) {
4858 		ret = create_static_call_sections(file);
4859 		if (ret)
4860 			goto out;
4861 	}
4862 
4863 	if (opts.retpoline) {
4864 		ret = create_retpoline_sites_sections(file);
4865 		if (ret)
4866 			goto out;
4867 	}
4868 
4869 	if (opts.cfi) {
4870 		ret = create_cfi_sections(file);
4871 		if (ret)
4872 			goto out;
4873 	}
4874 
4875 	if (opts.rethunk) {
4876 		ret = create_return_sites_sections(file);
4877 		if (ret)
4878 			goto out;
4879 
4880 		if (opts.hack_skylake) {
4881 			ret = create_direct_call_sections(file);
4882 			if (ret)
4883 				goto out;
4884 		}
4885 	}
4886 
4887 	if (opts.mcount) {
4888 		ret = create_mcount_loc_sections(file);
4889 		if (ret)
4890 			goto out;
4891 	}
4892 
4893 	if (opts.prefix) {
4894 		ret = create_prefix_symbols(file);
4895 		if (ret)
4896 			goto out;
4897 	}
4898 
4899 	if (opts.ibt) {
4900 		ret = create_ibt_endbr_seal_sections(file);
4901 		if (ret)
4902 			goto out;
4903 	}
4904 
4905 	if (opts.noabs)
4906 		warnings += check_abs_references(file);
4907 
4908 	if (opts.checksum) {
4909 		ret = create_sym_checksum_section(file);
4910 		if (ret)
4911 			goto out;
4912 	}
4913 
4914 	if (opts.orc && nr_insns) {
4915 		ret = orc_create(file);
4916 		if (ret)
4917 			goto out;
4918 	}
4919 
4920 	if (opts.stats) {
4921 		printf("nr_insns_visited: %ld\n", nr_insns_visited);
4922 		printf("nr_cfi: %ld\n", nr_cfi);
4923 		printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4924 		printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4925 	}
4926 
4927 out:
4928 	if (!ret && !warnings) {
4929 		free_insns(file);
4930 		return 0;
4931 	}
4932 
4933 	if (opts.werror && warnings)
4934 		ret = 1;
4935 
4936 	if (opts.verbose) {
4937 		if (opts.werror && warnings)
4938 			WARN("%d warning(s) upgraded to errors", warnings);
4939 		disas_ctx = disas_context_create(file);
4940 		if (disas_ctx) {
4941 			disas_warned_funcs(disas_ctx);
4942 			disas_context_destroy(disas_ctx);
4943 		}
4944 	}
4945 
4946 	free_insns(file);
4947 
4948 	if (opts.backup && make_backup())
4949 		return 1;
4950 
4951 	return ret;
4952 }
4953