xref: /linux/tools/objtool/check.c (revision dea622e183d34e6a4f90acfee9abb605885432bf)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4  */
5 
6 #define _GNU_SOURCE /* memmem() */
7 #include <fnmatch.h>
8 #include <string.h>
9 #include <stdlib.h>
10 #include <inttypes.h>
11 #include <sys/mman.h>
12 
13 #include <objtool/builtin.h>
14 #include <objtool/cfi.h>
15 #include <objtool/arch.h>
16 #include <objtool/disas.h>
17 #include <objtool/check.h>
18 #include <objtool/special.h>
19 #include <objtool/trace.h>
20 #include <objtool/warn.h>
21 #include <objtool/checksum.h>
22 #include <objtool/util.h>
23 
24 #include <linux/objtool_types.h>
25 #include <linux/hashtable.h>
26 #include <linux/kernel.h>
27 #include <linux/static_call_types.h>
28 #include <linux/string.h>
29 
30 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
31 
32 static struct cfi_init_state initial_func_cfi;
33 static struct cfi_state init_cfi;
34 static struct cfi_state func_cfi;
35 static struct cfi_state force_undefined_cfi;
36 
37 struct disas_context *objtool_disas_ctx;
38 
39 size_t sym_name_max_len;
40 
find_insn(struct objtool_file * file,struct section * sec,unsigned long offset)41 struct instruction *find_insn(struct objtool_file *file,
42 			      struct section *sec, unsigned long offset)
43 {
44 	struct instruction *insn;
45 
46 	hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
47 		if (insn->sec == sec && insn->offset == offset)
48 			return insn;
49 	}
50 
51 	return NULL;
52 }
53 
next_insn_same_sec(struct objtool_file * file,struct instruction * insn)54 struct instruction *next_insn_same_sec(struct objtool_file *file,
55 				       struct instruction *insn)
56 {
57 	if (insn->idx == INSN_CHUNK_MAX)
58 		return find_insn(file, insn->sec, insn->offset + insn->len);
59 
60 	insn++;
61 	if (!insn->len)
62 		return NULL;
63 
64 	return insn;
65 }
66 
next_insn_same_func(struct objtool_file * file,struct instruction * insn)67 static struct instruction *next_insn_same_func(struct objtool_file *file,
68 					       struct instruction *insn)
69 {
70 	struct instruction *next = next_insn_same_sec(file, insn);
71 	struct symbol *func = insn_func(insn);
72 
73 	if (!func)
74 		return NULL;
75 
76 	if (next && insn_func(next) == func)
77 		return next;
78 
79 	/* Check if we're already in the subfunction: */
80 	if (func == func->cfunc)
81 		return NULL;
82 
83 	/* Move to the subfunction: */
84 	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
85 }
86 
prev_insn_same_sec(struct objtool_file * file,struct instruction * insn)87 static struct instruction *prev_insn_same_sec(struct objtool_file *file,
88 					      struct instruction *insn)
89 {
90 	if (insn->idx == 0) {
91 		if (insn->prev_len)
92 			return find_insn(file, insn->sec, insn->offset - insn->prev_len);
93 		return NULL;
94 	}
95 
96 	return insn - 1;
97 }
98 
prev_insn_same_sym(struct objtool_file * file,struct instruction * insn)99 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
100 					      struct instruction *insn)
101 {
102 	struct instruction *prev = prev_insn_same_sec(file, insn);
103 
104 	if (prev && insn_func(prev) == insn_func(insn))
105 		return prev;
106 
107 	return NULL;
108 }
109 
110 #define for_each_insn(file, insn)					\
111 	for (struct section *__sec, *__fake = (struct section *)1;	\
112 	     __fake; __fake = NULL)					\
113 		for_each_sec(file->elf, __sec)				\
114 			sec_for_each_insn(file, __sec, insn)
115 
116 #define func_for_each_insn(file, func, insn)				\
117 	for (insn = find_insn(file, func->sec, func->offset);		\
118 	     insn;							\
119 	     insn = next_insn_same_func(file, insn))
120 
121 #define sym_for_each_insn(file, sym, insn)				\
122 	for (insn = find_insn(file, sym->sec, sym->offset);		\
123 	     insn && insn->offset < sym->offset + sym->len;		\
124 	     insn = next_insn_same_sec(file, insn))
125 
126 #define sym_for_each_insn_continue_reverse(file, sym, insn)		\
127 	for (insn = prev_insn_same_sec(file, insn);			\
128 	     insn && insn->offset >= sym->offset;			\
129 	     insn = prev_insn_same_sec(file, insn))
130 
131 #define sec_for_each_insn_from(file, insn)				\
132 	for (; insn; insn = next_insn_same_sec(file, insn))
133 
134 #define sec_for_each_insn_continue(file, insn)				\
135 	for (insn = next_insn_same_sec(file, insn); insn;		\
136 	     insn = next_insn_same_sec(file, insn))
137 
insn_jump_table(struct instruction * insn)138 static inline struct reloc *insn_jump_table(struct instruction *insn)
139 {
140 	if (insn->type == INSN_JUMP_DYNAMIC ||
141 	    insn->type == INSN_CALL_DYNAMIC)
142 		return insn->_jump_table;
143 
144 	return NULL;
145 }
146 
insn_jump_table_size(struct instruction * insn)147 static inline unsigned long insn_jump_table_size(struct instruction *insn)
148 {
149 	if (insn->type == INSN_JUMP_DYNAMIC ||
150 	    insn->type == INSN_CALL_DYNAMIC)
151 		return insn->_jump_table_size;
152 
153 	return 0;
154 }
155 
is_jump_table_jump(struct instruction * insn)156 static bool is_jump_table_jump(struct instruction *insn)
157 {
158 	struct alt_group *alt_group = insn->alt_group;
159 
160 	if (insn_jump_table(insn))
161 		return true;
162 
163 	/* Retpoline alternative for a jump table? */
164 	return alt_group && alt_group->orig_group &&
165 	       insn_jump_table(alt_group->orig_group->first_insn);
166 }
167 
is_sibling_call(struct instruction * insn)168 static bool is_sibling_call(struct instruction *insn)
169 {
170 	/*
171 	 * Assume only STT_FUNC calls have jump-tables.
172 	 */
173 	if (insn_func(insn)) {
174 		/* An indirect jump is either a sibling call or a jump to a table. */
175 		if (insn->type == INSN_JUMP_DYNAMIC)
176 			return !is_jump_table_jump(insn);
177 	}
178 
179 	/* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
180 	return (is_static_jump(insn) && insn_call_dest(insn));
181 }
182 
183 /*
184  * Checks if a function is a Rust "noreturn" one.
185  */
is_rust_noreturn(const struct symbol * func)186 static bool is_rust_noreturn(const struct symbol *func)
187 {
188 	/*
189 	 * If it does not start with "_R", then it is not a Rust symbol.
190 	 */
191 	if (strncmp(func->name, "_R", 2))
192 		return false;
193 
194 	/*
195 	 * These are just heuristics -- we do not control the precise symbol
196 	 * name, due to the crate disambiguators (which depend on the compiler)
197 	 * as well as changes to the source code itself between versions (since
198 	 * these come from the Rust standard library).
199 	 */
200 	return str_ends_with(func->name, "_4core3num20from_str_radix_panic")				||
201 	       str_ends_with(func->name, "_4core3num22from_ascii_radix_panic")				||
202 	       str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail")		||
203 	       str_ends_with(func->name, "_4core6option13expect_failed")				||
204 	       str_ends_with(func->name, "_4core6option13unwrap_failed")				||
205 	       str_ends_with(func->name, "_4core6result13unwrap_failed")				||
206 	       str_ends_with(func->name, "_4core9panicking5panic")					||
207 	       str_ends_with(func->name, "_4core9panicking9panic_fmt")					||
208 	       str_ends_with(func->name, "_4core9panicking14panic_explicit")				||
209 	       str_ends_with(func->name, "_4core9panicking14panic_nounwind")				||
210 	       str_ends_with(func->name, "_4core9panicking18panic_bounds_check")			||
211 	       str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt")			||
212 	       str_ends_with(func->name, "_4core9panicking19assert_failed_inner")			||
213 	       str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference")		||
214 	       str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference")	||
215 	       str_ends_with(func->name, "_7___rustc17rust_begin_unwind")				||
216 	       strstr(func->name, "_4core9panicking13assert_failed")					||
217 	       strstr(func->name, "_4core9panicking11panic_const24panic_const_")			||
218 	       (strstr(func->name, "_4core5slice5index") &&
219 		strstr(func->name, "slice_") &&
220 		str_ends_with(func->name, "_fail"));
221 }
222 
223 /*
224  * This checks to see if the given function is a "noreturn" function.
225  *
226  * For global functions which are outside the scope of this object file, we
227  * have to keep a manual list of them.
228  *
229  * For local functions, we have to detect them manually by simply looking for
230  * the lack of a return instruction.
231  */
__dead_end_function(struct objtool_file * file,struct symbol * func,int recursion)232 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
233 				int recursion)
234 {
235 	int i;
236 	struct instruction *insn;
237 	bool empty = true;
238 
239 #define NORETURN(func) __stringify(func),
240 	static const char * const global_noreturns[] = {
241 #include "noreturns.h"
242 	};
243 #undef NORETURN
244 
245 	if (!func)
246 		return false;
247 
248 	if (!is_local_sym(func)) {
249 		if (is_rust_noreturn(func))
250 			return true;
251 
252 		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
253 			if (!strcmp(func->name, global_noreturns[i]))
254 				return true;
255 	}
256 
257 	if (is_weak_sym(func))
258 		return false;
259 
260 	if (!func->len)
261 		return false;
262 
263 	insn = find_insn(file, func->sec, func->offset);
264 	if (!insn || !insn_func(insn))
265 		return false;
266 
267 	func_for_each_insn(file, func, insn) {
268 		empty = false;
269 
270 		if (insn->type == INSN_RETURN)
271 			return false;
272 	}
273 
274 	if (empty)
275 		return false;
276 
277 	/*
278 	 * A function can have a sibling call instead of a return.  In that
279 	 * case, the function's dead-end status depends on whether the target
280 	 * of the sibling call returns.
281 	 */
282 	func_for_each_insn(file, func, insn) {
283 		if (is_sibling_call(insn)) {
284 			struct instruction *dest = insn->jump_dest;
285 
286 			if (!dest)
287 				/* sibling call to another file */
288 				return false;
289 
290 			/* local sibling call */
291 			if (recursion == 5) {
292 				/*
293 				 * Infinite recursion: two functions have
294 				 * sibling calls to each other.  This is a very
295 				 * rare case.  It means they aren't dead ends.
296 				 */
297 				return false;
298 			}
299 
300 			return __dead_end_function(file, insn_func(dest), recursion+1);
301 		}
302 	}
303 
304 	return true;
305 }
306 
dead_end_function(struct objtool_file * file,struct symbol * func)307 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
308 {
309 	return __dead_end_function(file, func, 0);
310 }
311 
init_cfi_state(struct cfi_state * cfi)312 static void init_cfi_state(struct cfi_state *cfi)
313 {
314 	int i;
315 
316 	for (i = 0; i < CFI_NUM_REGS; i++) {
317 		cfi->regs[i].base = CFI_UNDEFINED;
318 		cfi->vals[i].base = CFI_UNDEFINED;
319 	}
320 	cfi->cfa.base = CFI_UNDEFINED;
321 	cfi->drap_reg = CFI_UNDEFINED;
322 	cfi->drap_offset = -1;
323 }
324 
init_insn_state(struct objtool_file * file,struct insn_state * state,struct section * sec)325 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
326 			    struct section *sec)
327 {
328 	memset(state, 0, sizeof(*state));
329 	init_cfi_state(&state->cfi);
330 
331 	if (opts.noinstr && sec)
332 		state->noinstr = sec->noinstr;
333 }
334 
cfi_alloc(void)335 static struct cfi_state *cfi_alloc(void)
336 {
337 	struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
338 	if (!cfi) {
339 		ERROR_GLIBC("calloc");
340 		exit(1);
341 	}
342 	nr_cfi++;
343 	return cfi;
344 }
345 
346 static int cfi_bits;
347 static struct hlist_head *cfi_hash;
348 
cficmp(struct cfi_state * cfi1,struct cfi_state * cfi2)349 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
350 {
351 	return memcmp((void *)cfi1 + sizeof(cfi1->hash),
352 		      (void *)cfi2 + sizeof(cfi2->hash),
353 		      sizeof(struct cfi_state) - sizeof(struct hlist_node));
354 }
355 
cfi_key(struct cfi_state * cfi)356 static inline u32 cfi_key(struct cfi_state *cfi)
357 {
358 	return jhash((void *)cfi + sizeof(cfi->hash),
359 		     sizeof(*cfi) - sizeof(cfi->hash), 0);
360 }
361 
cfi_hash_find_or_add(struct cfi_state * cfi)362 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
363 {
364 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
365 	struct cfi_state *obj;
366 
367 	hlist_for_each_entry(obj, head, hash) {
368 		if (!cficmp(cfi, obj)) {
369 			nr_cfi_cache++;
370 			return obj;
371 		}
372 	}
373 
374 	obj = cfi_alloc();
375 	*obj = *cfi;
376 	hlist_add_head(&obj->hash, head);
377 
378 	return obj;
379 }
380 
cfi_hash_add(struct cfi_state * cfi)381 static void cfi_hash_add(struct cfi_state *cfi)
382 {
383 	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
384 
385 	hlist_add_head(&cfi->hash, head);
386 }
387 
cfi_hash_alloc(unsigned long size)388 static void *cfi_hash_alloc(unsigned long size)
389 {
390 	cfi_bits = max(10, ilog2(size));
391 	cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
392 			PROT_READ|PROT_WRITE,
393 			MAP_PRIVATE|MAP_ANON, -1, 0);
394 	if (cfi_hash == (void *)-1L) {
395 		ERROR_GLIBC("mmap fail cfi_hash");
396 		cfi_hash = NULL;
397 	}  else if (opts.stats) {
398 		printf("cfi_bits: %d\n", cfi_bits);
399 	}
400 
401 	return cfi_hash;
402 }
403 
404 static unsigned long nr_insns;
405 static unsigned long nr_insns_visited;
406 
407 /*
408  * Call the arch-specific instruction decoder for all the instructions and add
409  * them to the global instruction list.
410  */
decode_instructions(struct objtool_file * file)411 static int decode_instructions(struct objtool_file *file)
412 {
413 	struct section *sec;
414 	struct symbol *func;
415 	unsigned long offset;
416 	struct instruction *insn;
417 
418 	for_each_sec(file->elf, sec) {
419 		struct instruction *insns = NULL;
420 		u8 prev_len = 0;
421 		u8 idx = 0;
422 
423 		if (!is_text_sec(sec))
424 			continue;
425 
426 		if (strcmp(sec->name, ".altinstr_replacement") &&
427 		    strcmp(sec->name, ".altinstr_aux") &&
428 		    strncmp(sec->name, ".discard.", 9))
429 			sec->text = true;
430 
431 		if (!strcmp(sec->name, ".noinstr.text") ||
432 		    !strcmp(sec->name, ".entry.text") ||
433 		    !strcmp(sec->name, ".cpuidle.text") ||
434 		    !strncmp(sec->name, ".text..__x86.", 13))
435 			sec->noinstr = true;
436 
437 		/*
438 		 * .init.text code is ran before userspace and thus doesn't
439 		 * strictly need retpolines, except for modules which are
440 		 * loaded late, they very much do need retpoline in their
441 		 * .init.text
442 		 */
443 		if (!strcmp(sec->name, ".init.text") && !opts.module)
444 			sec->init = true;
445 
446 		for (offset = 0; offset < sec_size(sec); offset += insn->len) {
447 			if (!insns || idx == INSN_CHUNK_MAX) {
448 				insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn));
449 				if (!insns) {
450 					ERROR_GLIBC("calloc");
451 					return -1;
452 				}
453 				idx = 0;
454 			} else {
455 				idx++;
456 			}
457 			insn = &insns[idx];
458 			insn->idx = idx;
459 
460 			INIT_LIST_HEAD(&insn->call_node);
461 			insn->sec = sec;
462 			insn->offset = offset;
463 			insn->prev_len = prev_len;
464 
465 			if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn))
466 				return -1;
467 
468 			prev_len = insn->len;
469 
470 			/*
471 			 * By default, "ud2" is a dead end unless otherwise
472 			 * annotated, because GCC 7 inserts it for certain
473 			 * divide-by-zero cases.
474 			 */
475 			if (insn->type == INSN_BUG)
476 				insn->dead_end = true;
477 
478 			hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
479 			nr_insns++;
480 		}
481 
482 		sec_for_each_sym(sec, func) {
483 			if (!is_notype_sym(func) && !is_func_sym(func))
484 				continue;
485 
486 			if (func->offset == sec_size(sec)) {
487 				/* Heuristic: likely an "end" symbol */
488 				if (is_notype_sym(func))
489 					continue;
490 				ERROR("%s(): STT_FUNC at end of section", func->name);
491 				return -1;
492 			}
493 
494 			if (func->embedded_insn || func->alias != func)
495 				continue;
496 
497 			if (!find_insn(file, sec, func->offset)) {
498 				ERROR("%s(): can't find starting instruction", func->name);
499 				return -1;
500 			}
501 
502 			sym_for_each_insn(file, func, insn) {
503 				insn->sym = func;
504 				if (is_func_sym(func) &&
505 				    insn->type == INSN_ENDBR &&
506 				    list_empty(&insn->call_node)) {
507 					if (insn->offset == func->offset) {
508 						list_add_tail(&insn->call_node, &file->endbr_list);
509 						file->nr_endbr++;
510 					} else {
511 						file->nr_endbr_int++;
512 					}
513 				}
514 			}
515 		}
516 	}
517 
518 	if (opts.stats)
519 		printf("nr_insns: %lu\n", nr_insns);
520 
521 	return 0;
522 }
523 
524 /*
525  * Known pv_ops*[] arrays.
526  */
527 static struct {
528 	const char *name;
529 	int idx_off;
530 } pv_ops_tables[] = {
531 	{ .name = "pv_ops", },
532 	{ .name = "pv_ops_lock", },
533 	{ .name = NULL, .idx_off = -1 }
534 };
535 
536 /*
537  * Get index offset for a pv_ops* array.
538  */
pv_ops_idx_off(const char * symname)539 int pv_ops_idx_off(const char *symname)
540 {
541 	int idx;
542 
543 	for (idx = 0; pv_ops_tables[idx].name; idx++) {
544 		if (!strcmp(symname, pv_ops_tables[idx].name))
545 			break;
546 	}
547 
548 	return pv_ops_tables[idx].idx_off;
549 }
550 
551 /*
552  * Read a pv_ops*[] .data table to find the static initialized values.
553  */
add_pv_ops(struct objtool_file * file,int pv_ops_idx)554 static int add_pv_ops(struct objtool_file *file, int pv_ops_idx)
555 {
556 	struct symbol *sym, *func;
557 	unsigned long off, end;
558 	struct reloc *reloc;
559 	int idx, idx_off;
560 	const char *symname;
561 
562 	symname = pv_ops_tables[pv_ops_idx].name;
563 	sym = find_symbol_by_name(file->elf, symname);
564 	if (!sym) {
565 		ERROR("Unknown pv_ops array %s", symname);
566 		return -1;
567 	}
568 
569 	off = sym->offset;
570 	end = off + sym->len;
571 	idx_off = pv_ops_tables[pv_ops_idx].idx_off;
572 	if (idx_off < 0) {
573 		ERROR("pv_ops array %s has unknown index offset", symname);
574 		return -1;
575 	}
576 
577 	for (;;) {
578 		reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
579 		if (!reloc)
580 			break;
581 
582 		idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
583 
584 		func = reloc->sym;
585 		if (is_sec_sym(func))
586 			func = find_symbol_by_offset(reloc->sym->sec,
587 						     reloc_addend(reloc));
588 		if (!func) {
589 			ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc),
590 				   "can't find func at %s[%d]", symname, idx);
591 			return -1;
592 		}
593 
594 		if (objtool_pv_add(file, idx + idx_off, func))
595 			return -1;
596 
597 		off = reloc_offset(reloc) + 1;
598 		if (off > end)
599 			break;
600 	}
601 
602 	return 0;
603 }
604 
605 /*
606  * Allocate and initialize file->pv_ops[].
607  */
init_pv_ops(struct objtool_file * file)608 static int init_pv_ops(struct objtool_file *file)
609 {
610 	struct symbol *sym;
611 	int idx, nr;
612 
613 	if (!opts.noinstr)
614 		return 0;
615 
616 	file->pv_ops = NULL;
617 
618 	nr = 0;
619 	for (idx = 0; pv_ops_tables[idx].name; idx++) {
620 		sym = find_symbol_by_name(file->elf, pv_ops_tables[idx].name);
621 		if (!sym) {
622 			pv_ops_tables[idx].idx_off = -1;
623 			continue;
624 		}
625 		pv_ops_tables[idx].idx_off = nr;
626 		nr += sym->len / sizeof(unsigned long);
627 	}
628 
629 	if (nr == 0)
630 		return 0;
631 
632 	file->pv_ops = calloc(nr, sizeof(struct pv_state));
633 	if (!file->pv_ops) {
634 		ERROR_GLIBC("calloc");
635 		return -1;
636 	}
637 
638 	for (idx = 0; idx < nr; idx++)
639 		INIT_LIST_HEAD(&file->pv_ops[idx].targets);
640 
641 	for (idx = 0; pv_ops_tables[idx].name; idx++) {
642 		if (pv_ops_tables[idx].idx_off < 0)
643 			continue;
644 		if (add_pv_ops(file, idx))
645 			return -1;
646 	}
647 
648 	return 0;
649 }
650 
is_livepatch_module(struct objtool_file * file)651 static bool is_livepatch_module(struct objtool_file *file)
652 {
653 	struct section *sec;
654 
655 	if (!opts.module)
656 		return false;
657 
658 	sec = find_section_by_name(file->elf, ".modinfo");
659 	if (!sec)
660 		return false;
661 
662 	return memmem(sec->data->d_buf, sec_size(sec), "\0livepatch=Y", 12);
663 }
664 
create_static_call_sections(struct objtool_file * file)665 static int create_static_call_sections(struct objtool_file *file)
666 {
667 	struct static_call_site *site;
668 	struct section *sec;
669 	struct instruction *insn;
670 	struct symbol *key_sym;
671 	char *key_name, *tmp;
672 	int idx;
673 
674 	sec = find_section_by_name(file->elf, ".static_call_sites");
675 	if (sec) {
676 		/*
677 		 * Livepatch modules may have already extracted the static call
678 		 * site entries to take advantage of vmlinux static call
679 		 * privileges.
680 		 */
681 		if (!file->klp)
682 			WARN("file already has .static_call_sites section, skipping");
683 
684 		return 0;
685 	}
686 
687 	if (list_empty(&file->static_call_list))
688 		return 0;
689 
690 	idx = 0;
691 	list_for_each_entry(insn, &file->static_call_list, call_node)
692 		idx++;
693 
694 	sec = elf_create_section_pair(file->elf, ".static_call_sites",
695 				      sizeof(*site), idx, idx * 2);
696 	if (!sec)
697 		return -1;
698 
699 	/* Allow modules to modify the low bits of static_call_site::key */
700 	sec->sh.sh_flags |= SHF_WRITE;
701 
702 	idx = 0;
703 	list_for_each_entry(insn, &file->static_call_list, call_node) {
704 
705 		/* populate reloc for 'addr' */
706 		if (!elf_init_reloc_text_sym(file->elf, sec,
707 					     idx * sizeof(*site), idx * 2,
708 					     insn->sec, insn->offset))
709 			return -1;
710 
711 		/* find key symbol */
712 		key_name = strdup(insn_call_dest(insn)->name);
713 		if (!key_name) {
714 			ERROR_GLIBC("strdup");
715 			return -1;
716 		}
717 		if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
718 			    STATIC_CALL_TRAMP_PREFIX_LEN)) {
719 			ERROR("static_call: trampoline name malformed: %s", key_name);
720 			return -1;
721 		}
722 		tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
723 		memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
724 
725 		key_sym = find_symbol_by_name(file->elf, tmp);
726 		if (!key_sym) {
727 			if (!opts.module) {
728 				ERROR("static_call: can't find static_call_key symbol: %s", tmp);
729 				return -1;
730 			}
731 
732 			/*
733 			 * For modules(), the key might not be exported, which
734 			 * means the module can make static calls but isn't
735 			 * allowed to change them.
736 			 *
737 			 * In that case we temporarily set the key to be the
738 			 * trampoline address.  This is fixed up in
739 			 * static_call_add_module().
740 			 */
741 			key_sym = insn_call_dest(insn);
742 		}
743 
744 		/* populate reloc for 'key' */
745 		if (!elf_init_reloc_data_sym(file->elf, sec,
746 					     idx * sizeof(*site) + 4,
747 					     (idx * 2) + 1, key_sym,
748 					     is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
749 			return -1;
750 
751 		idx++;
752 	}
753 
754 	return 0;
755 }
756 
create_retpoline_sites_sections(struct objtool_file * file)757 static int create_retpoline_sites_sections(struct objtool_file *file)
758 {
759 	struct instruction *insn;
760 	struct section *sec;
761 	int idx;
762 
763 	sec = find_section_by_name(file->elf, ".retpoline_sites");
764 	if (sec) {
765 		WARN("file already has .retpoline_sites, skipping");
766 		return 0;
767 	}
768 
769 	idx = 0;
770 	list_for_each_entry(insn, &file->retpoline_call_list, call_node)
771 		idx++;
772 
773 	if (!idx)
774 		return 0;
775 
776 	sec = elf_create_section_pair(file->elf, ".retpoline_sites",
777 				      sizeof(int), idx, idx);
778 	if (!sec)
779 		return -1;
780 
781 	idx = 0;
782 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
783 
784 		if (!elf_init_reloc_text_sym(file->elf, sec,
785 					     idx * sizeof(int), idx,
786 					     insn->sec, insn->offset))
787 			return -1;
788 
789 		idx++;
790 	}
791 
792 	return 0;
793 }
794 
create_return_sites_sections(struct objtool_file * file)795 static int create_return_sites_sections(struct objtool_file *file)
796 {
797 	struct instruction *insn;
798 	struct section *sec;
799 	int idx;
800 
801 	sec = find_section_by_name(file->elf, ".return_sites");
802 	if (sec) {
803 		WARN("file already has .return_sites, skipping");
804 		return 0;
805 	}
806 
807 	idx = 0;
808 	list_for_each_entry(insn, &file->return_thunk_list, call_node)
809 		idx++;
810 
811 	if (!idx)
812 		return 0;
813 
814 	sec = elf_create_section_pair(file->elf, ".return_sites",
815 				      sizeof(int), idx, idx);
816 	if (!sec)
817 		return -1;
818 
819 	idx = 0;
820 	list_for_each_entry(insn, &file->return_thunk_list, call_node) {
821 
822 		if (!elf_init_reloc_text_sym(file->elf, sec,
823 					     idx * sizeof(int), idx,
824 					     insn->sec, insn->offset))
825 			return -1;
826 
827 		idx++;
828 	}
829 
830 	return 0;
831 }
832 
create_ibt_endbr_seal_sections(struct objtool_file * file)833 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
834 {
835 	struct instruction *insn;
836 	struct section *sec;
837 	int idx;
838 
839 	sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
840 	if (sec) {
841 		WARN("file already has .ibt_endbr_seal, skipping");
842 		return 0;
843 	}
844 
845 	idx = 0;
846 	list_for_each_entry(insn, &file->endbr_list, call_node)
847 		idx++;
848 
849 	if (opts.stats) {
850 		printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
851 		printf("ibt: ENDBR inside functions:  %d\n", file->nr_endbr_int);
852 		printf("ibt: superfluous ENDBR:       %d\n", idx);
853 	}
854 
855 	if (!idx)
856 		return 0;
857 
858 	sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal",
859 				      sizeof(int), idx, idx);
860 	if (!sec)
861 		return -1;
862 
863 	idx = 0;
864 	list_for_each_entry(insn, &file->endbr_list, call_node) {
865 
866 		int *site = (int *)sec->data->d_buf + idx;
867 		struct symbol *sym = insn->sym;
868 		*site = 0;
869 
870 		if (opts.module && sym && is_func_sym(sym) &&
871 		    insn->offset == sym->offset &&
872 		    (!strcmp(sym->name, "init_module") ||
873 		     !strcmp(sym->name, "cleanup_module"))) {
874 			ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
875 			      sym->name);
876 			return -1;
877 		}
878 
879 		if (!elf_init_reloc_text_sym(file->elf, sec,
880 					     idx * sizeof(int), idx,
881 					     insn->sec, insn->offset))
882 			return -1;
883 
884 		idx++;
885 	}
886 
887 	return 0;
888 }
889 
create_cfi_sections(struct objtool_file * file)890 static int create_cfi_sections(struct objtool_file *file)
891 {
892 	struct section *sec;
893 	struct symbol *sym;
894 	int idx;
895 
896 	sec = find_section_by_name(file->elf, ".cfi_sites");
897 	if (sec) {
898 		WARN("file already has .cfi_sites section, skipping");
899 		return 0;
900 	}
901 
902 	idx = 0;
903 	for_each_sym(file->elf, sym) {
904 		if (!is_func_sym(sym))
905 			continue;
906 
907 		if (strncmp(sym->name, "__cfi_", 6))
908 			continue;
909 
910 		idx++;
911 	}
912 
913 	sec = elf_create_section_pair(file->elf, ".cfi_sites",
914 				      sizeof(unsigned int), idx, idx);
915 	if (!sec)
916 		return -1;
917 
918 	idx = 0;
919 	for_each_sym(file->elf, sym) {
920 		if (!is_func_sym(sym))
921 			continue;
922 
923 		if (strncmp(sym->name, "__cfi_", 6))
924 			continue;
925 
926 		if (!elf_init_reloc_text_sym(file->elf, sec,
927 					     idx * sizeof(unsigned int), idx,
928 					     sym->sec, sym->offset))
929 			return -1;
930 
931 		idx++;
932 	}
933 
934 	return 0;
935 }
936 
create_mcount_loc_sections(struct objtool_file * file)937 static int create_mcount_loc_sections(struct objtool_file *file)
938 {
939 	size_t addr_size = elf_addr_size(file->elf);
940 	struct instruction *insn;
941 	struct section *sec;
942 	int idx;
943 
944 	sec = find_section_by_name(file->elf, "__mcount_loc");
945 	if (sec) {
946 		/*
947 		 * Livepatch modules have already extracted their __mcount_loc
948 		 * entries to cover the !CONFIG_FTRACE_MCOUNT_USE_OBJTOOL case.
949 		 */
950 		if (!file->klp)
951 			WARN("file already has __mcount_loc section, skipping");
952 
953 		return 0;
954 	}
955 
956 	if (list_empty(&file->mcount_loc_list))
957 		return 0;
958 
959 	idx = 0;
960 	list_for_each_entry(insn, &file->mcount_loc_list, call_node)
961 		idx++;
962 
963 	sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size,
964 				      idx, idx);
965 	if (!sec)
966 		return -1;
967 
968 	sec->sh.sh_addralign = addr_size;
969 
970 	idx = 0;
971 	list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
972 
973 		struct reloc *reloc;
974 
975 		reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx,
976 					       insn->sec, insn->offset);
977 		if (!reloc)
978 			return -1;
979 
980 		set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32);
981 
982 		idx++;
983 	}
984 
985 	return 0;
986 }
987 
create_direct_call_sections(struct objtool_file * file)988 static int create_direct_call_sections(struct objtool_file *file)
989 {
990 	struct instruction *insn;
991 	struct section *sec;
992 	int idx;
993 
994 	sec = find_section_by_name(file->elf, ".call_sites");
995 	if (sec) {
996 		WARN("file already has .call_sites section, skipping");
997 		return 0;
998 	}
999 
1000 	if (list_empty(&file->call_list))
1001 		return 0;
1002 
1003 	idx = 0;
1004 	list_for_each_entry(insn, &file->call_list, call_node)
1005 		idx++;
1006 
1007 	sec = elf_create_section_pair(file->elf, ".call_sites",
1008 				      sizeof(unsigned int), idx, idx);
1009 	if (!sec)
1010 		return -1;
1011 
1012 	idx = 0;
1013 	list_for_each_entry(insn, &file->call_list, call_node) {
1014 
1015 		if (!elf_init_reloc_text_sym(file->elf, sec,
1016 					     idx * sizeof(unsigned int), idx,
1017 					     insn->sec, insn->offset))
1018 			return -1;
1019 
1020 		idx++;
1021 	}
1022 
1023 	return 0;
1024 }
1025 
1026 #ifdef BUILD_KLP
create_sym_checksum_section(struct objtool_file * file)1027 static int create_sym_checksum_section(struct objtool_file *file)
1028 {
1029 	struct section *sec;
1030 	struct symbol *sym;
1031 	unsigned int idx = 0;
1032 	struct sym_checksum *checksum;
1033 	size_t entsize = sizeof(struct sym_checksum);
1034 
1035 	sec = find_section_by_name(file->elf, ".discard.sym_checksum");
1036 	if (sec) {
1037 		if (!opts.dryrun)
1038 			WARN("file already has .discard.sym_checksum section, skipping");
1039 
1040 		return 0;
1041 	}
1042 
1043 	for_each_sym(file->elf, sym)
1044 		if (sym->csum.checksum)
1045 			idx++;
1046 
1047 	if (!idx)
1048 		return 0;
1049 
1050 	sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize,
1051 				      idx, idx);
1052 	if (!sec)
1053 		return -1;
1054 
1055 	idx = 0;
1056 	for_each_sym(file->elf, sym) {
1057 		if (!sym->csum.checksum)
1058 			continue;
1059 
1060 		if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize,
1061 				    sym, 0, R_TEXT64))
1062 			return -1;
1063 
1064 		checksum = (struct sym_checksum *)sec->data->d_buf + idx;
1065 		checksum->addr = 0; /* reloc */
1066 		checksum->checksum = sym->csum.checksum;
1067 
1068 		mark_sec_changed(file->elf, sec, true);
1069 
1070 		idx++;
1071 	}
1072 
1073 	return 0;
1074 }
1075 #else
create_sym_checksum_section(struct objtool_file * file)1076 static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; }
1077 #endif
1078 
1079 /*
1080  * Warnings shouldn't be reported for ignored functions.
1081  */
add_ignores(struct objtool_file * file)1082 static int add_ignores(struct objtool_file *file)
1083 {
1084 	struct section *rsec;
1085 	struct symbol *func;
1086 	struct reloc *reloc;
1087 
1088 	rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1089 	if (!rsec)
1090 		return 0;
1091 
1092 	for_each_reloc(rsec, reloc) {
1093 		switch (reloc->sym->type) {
1094 		case STT_FUNC:
1095 			func = reloc->sym;
1096 			break;
1097 
1098 		case STT_SECTION:
1099 			func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc));
1100 			if (!func)
1101 				continue;
1102 			break;
1103 
1104 		default:
1105 			ERROR("unexpected relocation symbol type in %s: %d",
1106 			      rsec->name, reloc->sym->type);
1107 			return -1;
1108 		}
1109 
1110 		func->ignore = true;
1111 		if (func->cfunc)
1112 			func->cfunc->ignore = true;
1113 	}
1114 
1115 	return 0;
1116 }
1117 
1118 /*
1119  * This is a whitelist of functions that is allowed to be called with AC set.
1120  * The list is meant to be minimal and only contains compiler instrumentation
1121  * ABI and a few functions used to implement *_{to,from}_user() functions.
1122  *
1123  * These functions must not directly change AC, but may PUSHF/POPF.
1124  */
1125 static const char *uaccess_safe_builtin[] = {
1126 	/* KASAN */
1127 	"kasan_report",
1128 	"kasan_check_range",
1129 	/* KASAN out-of-line */
1130 	"__asan_loadN_noabort",
1131 	"__asan_load1_noabort",
1132 	"__asan_load2_noabort",
1133 	"__asan_load4_noabort",
1134 	"__asan_load8_noabort",
1135 	"__asan_load16_noabort",
1136 	"__asan_storeN_noabort",
1137 	"__asan_store1_noabort",
1138 	"__asan_store2_noabort",
1139 	"__asan_store4_noabort",
1140 	"__asan_store8_noabort",
1141 	"__asan_store16_noabort",
1142 	"__kasan_check_read",
1143 	"__kasan_check_write",
1144 	/* KASAN in-line */
1145 	"__asan_report_load_n_noabort",
1146 	"__asan_report_load1_noabort",
1147 	"__asan_report_load2_noabort",
1148 	"__asan_report_load4_noabort",
1149 	"__asan_report_load8_noabort",
1150 	"__asan_report_load16_noabort",
1151 	"__asan_report_store_n_noabort",
1152 	"__asan_report_store1_noabort",
1153 	"__asan_report_store2_noabort",
1154 	"__asan_report_store4_noabort",
1155 	"__asan_report_store8_noabort",
1156 	"__asan_report_store16_noabort",
1157 	/* KCSAN */
1158 	"__kcsan_check_access",
1159 	"__kcsan_mb",
1160 	"__kcsan_wmb",
1161 	"__kcsan_rmb",
1162 	"__kcsan_release",
1163 	"kcsan_found_watchpoint",
1164 	"kcsan_setup_watchpoint",
1165 	"kcsan_check_scoped_accesses",
1166 	"kcsan_disable_current",
1167 	"kcsan_enable_current_nowarn",
1168 	/* KCSAN/TSAN */
1169 	"__tsan_func_entry",
1170 	"__tsan_func_exit",
1171 	"__tsan_read_range",
1172 	"__tsan_write_range",
1173 	"__tsan_read1",
1174 	"__tsan_read2",
1175 	"__tsan_read4",
1176 	"__tsan_read8",
1177 	"__tsan_read16",
1178 	"__tsan_write1",
1179 	"__tsan_write2",
1180 	"__tsan_write4",
1181 	"__tsan_write8",
1182 	"__tsan_write16",
1183 	"__tsan_read_write1",
1184 	"__tsan_read_write2",
1185 	"__tsan_read_write4",
1186 	"__tsan_read_write8",
1187 	"__tsan_read_write16",
1188 	"__tsan_volatile_read1",
1189 	"__tsan_volatile_read2",
1190 	"__tsan_volatile_read4",
1191 	"__tsan_volatile_read8",
1192 	"__tsan_volatile_read16",
1193 	"__tsan_volatile_write1",
1194 	"__tsan_volatile_write2",
1195 	"__tsan_volatile_write4",
1196 	"__tsan_volatile_write8",
1197 	"__tsan_volatile_write16",
1198 	"__tsan_atomic8_load",
1199 	"__tsan_atomic16_load",
1200 	"__tsan_atomic32_load",
1201 	"__tsan_atomic64_load",
1202 	"__tsan_atomic8_store",
1203 	"__tsan_atomic16_store",
1204 	"__tsan_atomic32_store",
1205 	"__tsan_atomic64_store",
1206 	"__tsan_atomic8_exchange",
1207 	"__tsan_atomic16_exchange",
1208 	"__tsan_atomic32_exchange",
1209 	"__tsan_atomic64_exchange",
1210 	"__tsan_atomic8_fetch_add",
1211 	"__tsan_atomic16_fetch_add",
1212 	"__tsan_atomic32_fetch_add",
1213 	"__tsan_atomic64_fetch_add",
1214 	"__tsan_atomic8_fetch_sub",
1215 	"__tsan_atomic16_fetch_sub",
1216 	"__tsan_atomic32_fetch_sub",
1217 	"__tsan_atomic64_fetch_sub",
1218 	"__tsan_atomic8_fetch_and",
1219 	"__tsan_atomic16_fetch_and",
1220 	"__tsan_atomic32_fetch_and",
1221 	"__tsan_atomic64_fetch_and",
1222 	"__tsan_atomic8_fetch_or",
1223 	"__tsan_atomic16_fetch_or",
1224 	"__tsan_atomic32_fetch_or",
1225 	"__tsan_atomic64_fetch_or",
1226 	"__tsan_atomic8_fetch_xor",
1227 	"__tsan_atomic16_fetch_xor",
1228 	"__tsan_atomic32_fetch_xor",
1229 	"__tsan_atomic64_fetch_xor",
1230 	"__tsan_atomic8_fetch_nand",
1231 	"__tsan_atomic16_fetch_nand",
1232 	"__tsan_atomic32_fetch_nand",
1233 	"__tsan_atomic64_fetch_nand",
1234 	"__tsan_atomic8_compare_exchange_strong",
1235 	"__tsan_atomic16_compare_exchange_strong",
1236 	"__tsan_atomic32_compare_exchange_strong",
1237 	"__tsan_atomic64_compare_exchange_strong",
1238 	"__tsan_atomic8_compare_exchange_weak",
1239 	"__tsan_atomic16_compare_exchange_weak",
1240 	"__tsan_atomic32_compare_exchange_weak",
1241 	"__tsan_atomic64_compare_exchange_weak",
1242 	"__tsan_atomic8_compare_exchange_val",
1243 	"__tsan_atomic16_compare_exchange_val",
1244 	"__tsan_atomic32_compare_exchange_val",
1245 	"__tsan_atomic64_compare_exchange_val",
1246 	"__tsan_atomic_thread_fence",
1247 	"__tsan_atomic_signal_fence",
1248 	"__tsan_unaligned_read16",
1249 	"__tsan_unaligned_write16",
1250 	/* KCOV */
1251 	"write_comp_data",
1252 	"check_kcov_mode",
1253 	"__sanitizer_cov_trace_pc",
1254 	"__sanitizer_cov_trace_const_cmp1",
1255 	"__sanitizer_cov_trace_const_cmp2",
1256 	"__sanitizer_cov_trace_const_cmp4",
1257 	"__sanitizer_cov_trace_const_cmp8",
1258 	"__sanitizer_cov_trace_cmp1",
1259 	"__sanitizer_cov_trace_cmp2",
1260 	"__sanitizer_cov_trace_cmp4",
1261 	"__sanitizer_cov_trace_cmp8",
1262 	"__sanitizer_cov_trace_switch",
1263 	/* KMSAN */
1264 	"kmsan_copy_to_user",
1265 	"kmsan_disable_current",
1266 	"kmsan_enable_current",
1267 	"kmsan_report",
1268 	"kmsan_unpoison_entry_regs",
1269 	"kmsan_unpoison_memory",
1270 	"__msan_chain_origin",
1271 	"__msan_get_context_state",
1272 	"__msan_instrument_asm_store",
1273 	"__msan_metadata_ptr_for_load_1",
1274 	"__msan_metadata_ptr_for_load_2",
1275 	"__msan_metadata_ptr_for_load_4",
1276 	"__msan_metadata_ptr_for_load_8",
1277 	"__msan_metadata_ptr_for_load_n",
1278 	"__msan_metadata_ptr_for_store_1",
1279 	"__msan_metadata_ptr_for_store_2",
1280 	"__msan_metadata_ptr_for_store_4",
1281 	"__msan_metadata_ptr_for_store_8",
1282 	"__msan_metadata_ptr_for_store_n",
1283 	"__msan_poison_alloca",
1284 	"__msan_warning",
1285 	/* UBSAN */
1286 	"ubsan_type_mismatch_common",
1287 	"__ubsan_handle_type_mismatch",
1288 	"__ubsan_handle_type_mismatch_v1",
1289 	"__ubsan_handle_shift_out_of_bounds",
1290 	"__ubsan_handle_load_invalid_value",
1291 	/* KSTACK_ERASE */
1292 	"__sanitizer_cov_stack_depth",
1293 	/* TRACE_BRANCH_PROFILING */
1294 	"ftrace_likely_update",
1295 	/* STACKPROTECTOR */
1296 	"__stack_chk_fail",
1297 	/* misc */
1298 	"csum_partial_copy_generic",
1299 	"copy_mc_fragile",
1300 	"copy_mc_fragile_handle_tail",
1301 	"copy_mc_enhanced_fast_string",
1302 	"rep_stos_alternative",
1303 	"rep_movs_alternative",
1304 	"__copy_user_nocache",
1305 	NULL
1306 };
1307 
add_uaccess_safe(struct objtool_file * file)1308 static void add_uaccess_safe(struct objtool_file *file)
1309 {
1310 	struct symbol *func;
1311 	const char **name;
1312 
1313 	if (!opts.uaccess)
1314 		return;
1315 
1316 	for (name = uaccess_safe_builtin; *name; name++) {
1317 		func = find_symbol_by_name(file->elf, *name);
1318 		if (!func)
1319 			continue;
1320 
1321 		func->uaccess_safe = true;
1322 	}
1323 }
1324 
1325 /*
1326  * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
1327  * will be added to the .retpoline_sites section.
1328  */
arch_is_retpoline(struct symbol * sym)1329 __weak bool arch_is_retpoline(struct symbol *sym)
1330 {
1331 	return false;
1332 }
1333 
1334 /*
1335  * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
1336  * will be added to the .return_sites section.
1337  */
arch_is_rethunk(struct symbol * sym)1338 __weak bool arch_is_rethunk(struct symbol *sym)
1339 {
1340 	return false;
1341 }
1342 
1343 /*
1344  * Symbols that are embedded inside other instructions, because sometimes crazy
1345  * code exists. These are mostly ignored for validation purposes.
1346  */
arch_is_embedded_insn(struct symbol * sym)1347 __weak bool arch_is_embedded_insn(struct symbol *sym)
1348 {
1349 	return false;
1350 }
1351 
insn_reloc(struct objtool_file * file,struct instruction * insn)1352 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1353 {
1354 	struct reloc *reloc;
1355 
1356 	if (insn->no_reloc)
1357 		return NULL;
1358 
1359 	if (!file)
1360 		return NULL;
1361 
1362 	reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1363 					 insn->offset, insn->len);
1364 	if (!reloc) {
1365 		insn->no_reloc = 1;
1366 		return NULL;
1367 	}
1368 
1369 	return reloc;
1370 }
1371 
remove_insn_ops(struct instruction * insn)1372 static void remove_insn_ops(struct instruction *insn)
1373 {
1374 	struct stack_op *op, *next;
1375 
1376 	for (op = insn->stack_ops; op; op = next) {
1377 		next = op->next;
1378 		free(op);
1379 	}
1380 	insn->stack_ops = NULL;
1381 }
1382 
annotate_call_site(struct objtool_file * file,struct instruction * insn,bool sibling)1383 static int annotate_call_site(struct objtool_file *file,
1384 			       struct instruction *insn, bool sibling)
1385 {
1386 	struct reloc *reloc = insn_reloc(file, insn);
1387 	struct symbol *sym = insn_call_dest(insn);
1388 
1389 	if (!sym)
1390 		sym = reloc->sym;
1391 
1392 	if (sym->static_call_tramp) {
1393 		list_add_tail(&insn->call_node, &file->static_call_list);
1394 		return 0;
1395 	}
1396 
1397 	if (sym->retpoline_thunk) {
1398 		list_add_tail(&insn->call_node, &file->retpoline_call_list);
1399 		return 0;
1400 	}
1401 
1402 	/*
1403 	 * Many compilers cannot disable KCOV or sanitizer calls with a function
1404 	 * attribute so they need a little help, NOP out any such calls from
1405 	 * noinstr text.
1406 	 */
1407 	if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1408 		if (reloc)
1409 			set_reloc_type(file->elf, reloc, R_NONE);
1410 
1411 		if (elf_write_insn(file->elf, insn->sec,
1412 				   insn->offset, insn->len,
1413 				   sibling ? arch_ret_insn(insn->len)
1414 					   : arch_nop_insn(insn->len))) {
1415 			return -1;
1416 		}
1417 
1418 		insn->type = sibling ? INSN_RETURN : INSN_NOP;
1419 
1420 		if (sibling) {
1421 			/*
1422 			 * We've replaced the tail-call JMP insn by two new
1423 			 * insn: RET; INT3, except we only have a single struct
1424 			 * insn here. Mark it retpoline_safe to avoid the SLS
1425 			 * warning, instead of adding another insn.
1426 			 */
1427 			insn->retpoline_safe = true;
1428 		}
1429 
1430 		return 0;
1431 	}
1432 
1433 	if (opts.mcount && sym->fentry) {
1434 		if (sibling)
1435 			WARN_INSN(insn, "tail call to __fentry__ !?!?");
1436 		if (opts.mnop) {
1437 			if (reloc)
1438 				set_reloc_type(file->elf, reloc, R_NONE);
1439 
1440 			if (elf_write_insn(file->elf, insn->sec,
1441 					   insn->offset, insn->len,
1442 					   arch_nop_insn(insn->len))) {
1443 				return -1;
1444 			}
1445 
1446 			insn->type = INSN_NOP;
1447 		}
1448 
1449 		list_add_tail(&insn->call_node, &file->mcount_loc_list);
1450 		return 0;
1451 	}
1452 
1453 	if (insn->type == INSN_CALL && !insn->sec->init &&
1454 	    !insn->_call_dest->embedded_insn)
1455 		list_add_tail(&insn->call_node, &file->call_list);
1456 
1457 	if (!sibling && dead_end_function(file, sym))
1458 		insn->dead_end = true;
1459 
1460 	return 0;
1461 }
1462 
add_call_dest(struct objtool_file * file,struct instruction * insn,struct symbol * dest,bool sibling)1463 static int add_call_dest(struct objtool_file *file, struct instruction *insn,
1464 			  struct symbol *dest, bool sibling)
1465 {
1466 	insn->_call_dest = dest;
1467 	if (!dest)
1468 		return 0;
1469 
1470 	/*
1471 	 * Whatever stack impact regular CALLs have, should be undone
1472 	 * by the RETURN of the called function.
1473 	 *
1474 	 * Annotated intra-function calls retain the stack_ops but
1475 	 * are converted to JUMP, see read_intra_function_calls().
1476 	 */
1477 	remove_insn_ops(insn);
1478 
1479 	return annotate_call_site(file, insn, sibling);
1480 }
1481 
add_retpoline_call(struct objtool_file * file,struct instruction * insn)1482 static int add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1483 {
1484 	/*
1485 	 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1486 	 * so convert them accordingly.
1487 	 */
1488 	switch (insn->type) {
1489 	case INSN_CALL:
1490 		insn->type = INSN_CALL_DYNAMIC;
1491 		break;
1492 	case INSN_JUMP_UNCONDITIONAL:
1493 		insn->type = INSN_JUMP_DYNAMIC;
1494 		break;
1495 	case INSN_JUMP_CONDITIONAL:
1496 		insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1497 		break;
1498 	default:
1499 		return 0;
1500 	}
1501 
1502 	insn->retpoline_safe = true;
1503 
1504 	/*
1505 	 * Whatever stack impact regular CALLs have, should be undone
1506 	 * by the RETURN of the called function.
1507 	 *
1508 	 * Annotated intra-function calls retain the stack_ops but
1509 	 * are converted to JUMP, see read_intra_function_calls().
1510 	 */
1511 	remove_insn_ops(insn);
1512 
1513 	return annotate_call_site(file, insn, false);
1514 }
1515 
add_return_call(struct objtool_file * file,struct instruction * insn,bool add)1516 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1517 {
1518 	/*
1519 	 * Return thunk tail calls are really just returns in disguise,
1520 	 * so convert them accordingly.
1521 	 */
1522 	insn->type = INSN_RETURN;
1523 	insn->retpoline_safe = true;
1524 
1525 	if (add)
1526 		list_add_tail(&insn->call_node, &file->return_thunk_list);
1527 }
1528 
is_first_func_insn(struct objtool_file * file,struct instruction * insn)1529 static bool is_first_func_insn(struct objtool_file *file,
1530 			       struct instruction *insn)
1531 {
1532 	struct symbol *func = insn_func(insn);
1533 
1534 	if (!func)
1535 		return false;
1536 
1537 	if (insn->offset == func->offset)
1538 		return true;
1539 
1540 	/* Allow direct CALL/JMP past ENDBR */
1541 	if (opts.ibt) {
1542 		struct instruction *prev = prev_insn_same_sym(file, insn);
1543 
1544 		if (prev && prev->type == INSN_ENDBR &&
1545 		    insn->offset == func->offset + prev->len)
1546 			return true;
1547 	}
1548 
1549 	return false;
1550 }
1551 
1552 /*
1553  * Find the destination instructions for all jumps.
1554  */
add_jump_destinations(struct objtool_file * file)1555 static int add_jump_destinations(struct objtool_file *file)
1556 {
1557 	struct instruction *insn;
1558 	struct reloc *reloc;
1559 
1560 	for_each_insn(file, insn) {
1561 		struct symbol *func = insn_func(insn);
1562 		struct instruction *dest_insn;
1563 		struct section *dest_sec;
1564 		struct symbol *dest_sym;
1565 		unsigned long dest_off;
1566 
1567 		if (!is_static_jump(insn))
1568 			continue;
1569 
1570 		if (insn->jump_dest) {
1571 			/*
1572 			 * handle_group_alt() may have previously set
1573 			 * 'jump_dest' for some alternatives.
1574 			 */
1575 			continue;
1576 		}
1577 
1578 		reloc = insn_reloc(file, insn);
1579 		if (!reloc) {
1580 			dest_sec = insn->sec;
1581 			dest_off = arch_jump_destination(insn);
1582 			dest_sym = dest_sec->sym;
1583 		} else {
1584 			dest_sym = reloc->sym;
1585 			if (is_undef_sym(dest_sym)) {
1586 				if (dest_sym->retpoline_thunk) {
1587 					if (add_retpoline_call(file, insn))
1588 						return -1;
1589 					continue;
1590 				}
1591 
1592 				if (dest_sym->return_thunk) {
1593 					add_return_call(file, insn, true);
1594 					continue;
1595 				}
1596 
1597 				/* External symbol */
1598 				if (func) {
1599 					/* External sibling call */
1600 					if (add_call_dest(file, insn, dest_sym, true))
1601 						return -1;
1602 					continue;
1603 				}
1604 
1605 				/* Non-func asm code jumping to external symbol */
1606 				continue;
1607 			}
1608 
1609 			dest_sec = dest_sym->sec;
1610 			dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc);
1611 		}
1612 
1613 		dest_insn = find_insn(file, dest_sec, dest_off);
1614 		if (!dest_insn) {
1615 			struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1616 
1617 			/*
1618 			 * retbleed_untrain_ret() jumps to
1619 			 * __x86_return_thunk(), but objtool can't find
1620 			 * the thunk's starting RET instruction,
1621 			 * because the RET is also in the middle of
1622 			 * another instruction.  Objtool only knows
1623 			 * about the outer instruction.
1624 			 */
1625 			if (sym && sym->embedded_insn) {
1626 				add_return_call(file, insn, false);
1627 				continue;
1628 			}
1629 
1630 			/*
1631 			 * GCOV/KCOV dead code can jump to the end of
1632 			 * the function/section.
1633 			 */
1634 			if (file->ignore_unreachables && func &&
1635 			    dest_sec == insn->sec &&
1636 			    dest_off == func->offset + func->len)
1637 				continue;
1638 
1639 			ERROR_INSN(insn, "can't find jump dest instruction at %s",
1640 				   offstr(dest_sec, dest_off));
1641 			return -1;
1642 		}
1643 
1644 		if (!dest_sym || is_sec_sym(dest_sym)) {
1645 			dest_sym = dest_insn->sym;
1646 			if (!dest_sym)
1647 				goto set_jump_dest;
1648 		}
1649 
1650 		if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) {
1651 			if (add_retpoline_call(file, insn))
1652 				return -1;
1653 			continue;
1654 		}
1655 
1656 		if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) {
1657 			add_return_call(file, insn, true);
1658 			continue;
1659 		}
1660 
1661 		if (!insn->sym || insn->sym->pfunc == dest_sym->pfunc)
1662 			goto set_jump_dest;
1663 
1664 		/*
1665 		 * Internal cross-function jump.
1666 		 */
1667 
1668 		if (is_first_func_insn(file, dest_insn)) {
1669 			/* Internal sibling call */
1670 			if (add_call_dest(file, insn, dest_sym, true))
1671 				return -1;
1672 			continue;
1673 		}
1674 
1675 set_jump_dest:
1676 		insn->jump_dest = dest_insn;
1677 	}
1678 
1679 	return 0;
1680 }
1681 
find_call_destination(struct section * sec,unsigned long offset)1682 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1683 {
1684 	struct symbol *call_dest;
1685 
1686 	call_dest = find_func_by_offset(sec, offset);
1687 	if (!call_dest)
1688 		call_dest = find_symbol_by_offset(sec, offset);
1689 
1690 	return call_dest;
1691 }
1692 
1693 /*
1694  * Find the destination instructions for all calls.
1695  */
add_call_destinations(struct objtool_file * file)1696 static int add_call_destinations(struct objtool_file *file)
1697 {
1698 	struct instruction *insn;
1699 	unsigned long dest_off;
1700 	struct symbol *dest;
1701 	struct reloc *reloc;
1702 
1703 	for_each_insn(file, insn) {
1704 		struct symbol *func = insn_func(insn);
1705 		if (insn->type != INSN_CALL)
1706 			continue;
1707 
1708 		reloc = insn_reloc(file, insn);
1709 		if (!reloc) {
1710 			dest_off = arch_jump_destination(insn);
1711 			dest = find_call_destination(insn->sec, dest_off);
1712 
1713 			if (add_call_dest(file, insn, dest, false))
1714 				return -1;
1715 
1716 			if (func && func->ignore)
1717 				continue;
1718 
1719 			if (!insn_call_dest(insn)) {
1720 				ERROR_INSN(insn, "unannotated intra-function call");
1721 				return -1;
1722 			}
1723 
1724 			if (func && !is_func_sym(insn_call_dest(insn))) {
1725 				ERROR_INSN(insn, "unsupported call to non-function");
1726 				return -1;
1727 			}
1728 
1729 		} else if (is_sec_sym(reloc->sym)) {
1730 			dest_off = arch_insn_adjusted_addend(insn, reloc);
1731 			dest = find_call_destination(reloc->sym->sec, dest_off);
1732 			if (!dest) {
1733 				ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1734 					   reloc->sym->sec->name, dest_off);
1735 				return -1;
1736 			}
1737 
1738 			if (add_call_dest(file, insn, dest, false))
1739 				return -1;
1740 
1741 		} else if (reloc->sym->retpoline_thunk) {
1742 			if (add_retpoline_call(file, insn))
1743 				return -1;
1744 
1745 		} else {
1746 			if (add_call_dest(file, insn, reloc->sym, false))
1747 				return -1;
1748 		}
1749 	}
1750 
1751 	return 0;
1752 }
1753 
1754 /*
1755  * The .alternatives section requires some extra special care over and above
1756  * other special sections because alternatives are patched in place.
1757  */
handle_group_alt(struct objtool_file * file,struct special_alt * special_alt,struct instruction * orig_insn,struct instruction ** new_insn)1758 static int handle_group_alt(struct objtool_file *file,
1759 			    struct special_alt *special_alt,
1760 			    struct instruction *orig_insn,
1761 			    struct instruction **new_insn)
1762 {
1763 	struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1764 	struct alt_group *orig_alt_group, *new_alt_group;
1765 	unsigned long dest_off;
1766 
1767 	orig_alt_group = orig_insn->alt_group;
1768 	if (!orig_alt_group) {
1769 		struct instruction *last_orig_insn = NULL;
1770 
1771 		orig_alt_group = calloc(1, sizeof(*orig_alt_group));
1772 		if (!orig_alt_group) {
1773 			ERROR_GLIBC("calloc");
1774 			return -1;
1775 		}
1776 		orig_alt_group->cfi = calloc(special_alt->orig_len,
1777 					     sizeof(struct cfi_state *));
1778 		if (!orig_alt_group->cfi) {
1779 			ERROR_GLIBC("calloc");
1780 			return -1;
1781 		}
1782 
1783 		insn = orig_insn;
1784 		sec_for_each_insn_from(file, insn) {
1785 			if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1786 				break;
1787 
1788 			insn->alt_group = orig_alt_group;
1789 			last_orig_insn = insn;
1790 		}
1791 		orig_alt_group->orig_group = NULL;
1792 		orig_alt_group->first_insn = orig_insn;
1793 		orig_alt_group->last_insn = last_orig_insn;
1794 		orig_alt_group->nop = NULL;
1795 		orig_alt_group->ignore = orig_insn->ignore_alts;
1796 		orig_alt_group->feature = 0;
1797 	} else {
1798 		if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1799 		    orig_alt_group->first_insn->offset != special_alt->orig_len) {
1800 			ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1801 				   orig_alt_group->last_insn->offset +
1802 				   orig_alt_group->last_insn->len -
1803 				   orig_alt_group->first_insn->offset,
1804 				   special_alt->orig_len);
1805 			return -1;
1806 		}
1807 	}
1808 
1809 	new_alt_group = calloc(1, sizeof(*new_alt_group));
1810 	if (!new_alt_group) {
1811 		ERROR_GLIBC("calloc");
1812 		return -1;
1813 	}
1814 
1815 	if (special_alt->new_len < special_alt->orig_len) {
1816 		/*
1817 		 * Insert a fake nop at the end to make the replacement
1818 		 * alt_group the same size as the original.  This is needed to
1819 		 * allow propagate_alt_cfi() to do its magic.  When the last
1820 		 * instruction affects the stack, the instruction after it (the
1821 		 * nop) will propagate the new state to the shared CFI array.
1822 		 */
1823 		nop = calloc(1, sizeof(*nop));
1824 		if (!nop) {
1825 			ERROR_GLIBC("calloc");
1826 			return -1;
1827 		}
1828 		memset(nop, 0, sizeof(*nop));
1829 
1830 		nop->sec = special_alt->new_sec;
1831 		nop->offset = special_alt->new_off + special_alt->new_len;
1832 		nop->len = special_alt->orig_len - special_alt->new_len;
1833 		nop->type = INSN_NOP;
1834 		nop->sym = orig_insn->sym;
1835 		nop->alt_group = new_alt_group;
1836 		nop->fake = 1;
1837 	}
1838 
1839 	if (!special_alt->new_len) {
1840 		*new_insn = nop;
1841 		goto end;
1842 	}
1843 
1844 	insn = *new_insn;
1845 	sec_for_each_insn_from(file, insn) {
1846 		struct reloc *alt_reloc;
1847 
1848 		if (insn->offset >= special_alt->new_off + special_alt->new_len)
1849 			break;
1850 
1851 		last_new_insn = insn;
1852 
1853 		insn->sym = orig_insn->sym;
1854 		insn->alt_group = new_alt_group;
1855 
1856 		/*
1857 		 * Since alternative replacement code is copy/pasted by the
1858 		 * kernel after applying relocations, generally such code can't
1859 		 * have relative-address relocation references to outside the
1860 		 * .altinstr_replacement section, unless the arch's
1861 		 * alternatives code can adjust the relative offsets
1862 		 * accordingly.
1863 		 */
1864 		alt_reloc = insn_reloc(file, insn);
1865 		if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1866 		    !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1867 
1868 			ERROR_INSN(insn, "unsupported relocation in alternatives section");
1869 			return -1;
1870 		}
1871 
1872 		if (!is_static_jump(insn))
1873 			continue;
1874 
1875 		if (!insn->immediate)
1876 			continue;
1877 
1878 		dest_off = arch_jump_destination(insn);
1879 		if (dest_off == special_alt->new_off + special_alt->new_len) {
1880 			insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1881 			if (!insn->jump_dest) {
1882 				ERROR_INSN(insn, "can't find alternative jump destination");
1883 				return -1;
1884 			}
1885 		}
1886 	}
1887 
1888 	if (!last_new_insn) {
1889 		ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1890 			   "can't find last new alternative instruction");
1891 		return -1;
1892 	}
1893 
1894 end:
1895 	new_alt_group->orig_group = orig_alt_group;
1896 	new_alt_group->first_insn = *new_insn;
1897 	new_alt_group->last_insn = last_new_insn;
1898 	new_alt_group->nop = nop;
1899 	new_alt_group->ignore = (*new_insn)->ignore_alts;
1900 	new_alt_group->cfi = orig_alt_group->cfi;
1901 	new_alt_group->feature = special_alt->feature;
1902 	return 0;
1903 }
1904 
1905 /*
1906  * A jump table entry can either convert a nop to a jump or a jump to a nop.
1907  * If the original instruction is a jump, make the alt entry an effective nop
1908  * by just skipping the original instruction.
1909  */
handle_jump_alt(struct objtool_file * file,struct special_alt * special_alt,struct instruction * orig_insn,struct instruction ** new_insn)1910 static int handle_jump_alt(struct objtool_file *file,
1911 			   struct special_alt *special_alt,
1912 			   struct instruction *orig_insn,
1913 			   struct instruction **new_insn)
1914 {
1915 	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1916 	    orig_insn->type != INSN_NOP) {
1917 
1918 		ERROR_INSN(orig_insn, "unsupported instruction at jump label");
1919 		return -1;
1920 	}
1921 
1922 	if (opts.hack_jump_label && special_alt->key_addend & 2) {
1923 		struct reloc *reloc = insn_reloc(file, orig_insn);
1924 
1925 		if (reloc)
1926 			set_reloc_type(file->elf, reloc, R_NONE);
1927 
1928 		if (elf_write_insn(file->elf, orig_insn->sec,
1929 				   orig_insn->offset, orig_insn->len,
1930 				   arch_nop_insn(orig_insn->len))) {
1931 			return -1;
1932 		}
1933 
1934 		orig_insn->type = INSN_NOP;
1935 	}
1936 
1937 	if (orig_insn->type == INSN_NOP) {
1938 		if (orig_insn->len == 2)
1939 			file->jl_nop_short++;
1940 		else
1941 			file->jl_nop_long++;
1942 
1943 		return 0;
1944 	}
1945 
1946 	if (orig_insn->len == 2)
1947 		file->jl_short++;
1948 	else
1949 		file->jl_long++;
1950 
1951 	*new_insn = next_insn_same_sec(file, orig_insn);
1952 	return 0;
1953 }
1954 
1955 /*
1956  * Read all the special sections which have alternate instructions which can be
1957  * patched in or redirected to at runtime.  Each instruction having alternate
1958  * instruction(s) has them added to its insn->alts list, which will be
1959  * traversed in validate_branch().
1960  */
add_special_section_alts(struct objtool_file * file)1961 static int add_special_section_alts(struct objtool_file *file)
1962 {
1963 	struct list_head special_alts;
1964 	struct instruction *orig_insn, *new_insn;
1965 	struct special_alt *special_alt, *tmp;
1966 	enum alternative_type alt_type;
1967 	struct alternative *alt;
1968 	struct alternative *a;
1969 
1970 	if (special_get_alts(file->elf, &special_alts))
1971 		return -1;
1972 
1973 	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1974 
1975 		orig_insn = find_insn(file, special_alt->orig_sec,
1976 				      special_alt->orig_off);
1977 		if (!orig_insn) {
1978 			ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off,
1979 				   "special: can't find orig instruction");
1980 			return -1;
1981 		}
1982 
1983 		new_insn = NULL;
1984 		if (!special_alt->group || special_alt->new_len) {
1985 			new_insn = find_insn(file, special_alt->new_sec,
1986 					     special_alt->new_off);
1987 			if (!new_insn) {
1988 				ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1989 					   "special: can't find new instruction");
1990 				return -1;
1991 			}
1992 		}
1993 
1994 		if (special_alt->group) {
1995 			if (!special_alt->orig_len) {
1996 				ERROR_INSN(orig_insn, "empty alternative entry");
1997 				continue;
1998 			}
1999 
2000 			if (handle_group_alt(file, special_alt, orig_insn, &new_insn))
2001 				return -1;
2002 
2003 			alt_type = ALT_TYPE_INSTRUCTIONS;
2004 
2005 		} else if (special_alt->jump_or_nop) {
2006 			if (handle_jump_alt(file, special_alt, orig_insn, &new_insn))
2007 				return -1;
2008 
2009 			alt_type = ALT_TYPE_JUMP_TABLE;
2010 		} else {
2011 			alt_type = ALT_TYPE_EX_TABLE;
2012 		}
2013 
2014 		alt = calloc(1, sizeof(*alt));
2015 		if (!alt) {
2016 			ERROR_GLIBC("calloc");
2017 			return -1;
2018 		}
2019 
2020 		alt->insn = new_insn;
2021 		alt->type = alt_type;
2022 		alt->next = NULL;
2023 
2024 		/*
2025 		 * Store alternatives in the same order they have been
2026 		 * defined.
2027 		 */
2028 		if (!orig_insn->alts) {
2029 			orig_insn->alts = alt;
2030 		} else {
2031 			for (a = orig_insn->alts; a->next; a = a->next)
2032 				;
2033 			a->next = alt;
2034 		}
2035 
2036 		list_del(&special_alt->list);
2037 		free(special_alt);
2038 	}
2039 
2040 	if (opts.stats) {
2041 		printf("jl\\\tNOP\tJMP\n");
2042 		printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
2043 		printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
2044 	}
2045 
2046 	return 0;
2047 }
2048 
arch_jump_table_sym_offset(struct reloc * reloc,struct reloc * table)2049 __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
2050 {
2051 	return reloc->sym->offset + reloc_addend(reloc);
2052 }
2053 
add_jump_table(struct objtool_file * file,struct instruction * insn)2054 static int add_jump_table(struct objtool_file *file, struct instruction *insn)
2055 {
2056 	unsigned long table_size = insn_jump_table_size(insn);
2057 	struct symbol *pfunc = insn_func(insn)->pfunc;
2058 	struct reloc *table = insn_jump_table(insn);
2059 	struct instruction *dest_insn;
2060 	unsigned int prev_offset = 0;
2061 	struct reloc *reloc = table;
2062 	struct alternative *alt;
2063 	unsigned long sym_offset;
2064 
2065 	/*
2066 	 * Each @reloc is a switch table relocation which points to the target
2067 	 * instruction.
2068 	 */
2069 	for_each_reloc_from(table->sec, reloc) {
2070 
2071 		/* Check for the end of the table: */
2072 		if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
2073 			break;
2074 		if (reloc != table && is_jump_table(reloc))
2075 			break;
2076 
2077 		/* Make sure the table entries are consecutive: */
2078 		if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc))
2079 			break;
2080 
2081 		sym_offset = arch_jump_table_sym_offset(reloc, table);
2082 
2083 		/* Detect function pointers from contiguous objects: */
2084 		if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset)
2085 			break;
2086 
2087 		/*
2088 		 * Clang sometimes leaves dangling unused jump table entries
2089 		 * which point to the end of the function.  Ignore them.
2090 		 */
2091 		if (reloc->sym->sec == pfunc->sec &&
2092 		    sym_offset == pfunc->offset + pfunc->len)
2093 			goto next;
2094 
2095 		dest_insn = find_insn(file, reloc->sym->sec, sym_offset);
2096 		if (!dest_insn)
2097 			break;
2098 
2099 		/* Make sure the destination is in the same function: */
2100 		if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2101 			break;
2102 
2103 		alt = calloc(1, sizeof(*alt));
2104 		if (!alt) {
2105 			ERROR_GLIBC("calloc");
2106 			return -1;
2107 		}
2108 
2109 		alt->insn = dest_insn;
2110 		alt->next = insn->alts;
2111 		insn->alts = alt;
2112 next:
2113 		prev_offset = reloc_offset(reloc);
2114 	}
2115 
2116 	if (!prev_offset) {
2117 		ERROR_INSN(insn, "can't find switch jump table");
2118 		return -1;
2119 	}
2120 
2121 	return 0;
2122 }
2123 
2124 /*
2125  * find_jump_table() - Given a dynamic jump, find the switch jump table
2126  * associated with it.
2127  */
find_jump_table(struct objtool_file * file,struct symbol * func,struct instruction * insn)2128 static void find_jump_table(struct objtool_file *file, struct symbol *func,
2129 			    struct instruction *insn)
2130 {
2131 	struct reloc *table_reloc;
2132 	struct instruction *dest_insn, *orig_insn = insn;
2133 	unsigned long table_size;
2134 	unsigned long sym_offset;
2135 
2136 	/*
2137 	 * Backward search using the @first_jump_src links, these help avoid
2138 	 * much of the 'in between' code. Which avoids us getting confused by
2139 	 * it.
2140 	 */
2141 	for (;
2142 	     insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2143 	     insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2144 
2145 		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2146 			break;
2147 
2148 		/* allow small jumps within the range */
2149 		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2150 		    insn->jump_dest &&
2151 		    (insn->jump_dest->offset <= insn->offset ||
2152 		     insn->jump_dest->offset > orig_insn->offset))
2153 			break;
2154 
2155 		table_reloc = arch_find_switch_table(file, insn, &table_size);
2156 		if (!table_reloc)
2157 			continue;
2158 
2159 		sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc);
2160 
2161 		dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset);
2162 		if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2163 			continue;
2164 
2165 		set_jump_table(table_reloc);
2166 		orig_insn->_jump_table = table_reloc;
2167 		orig_insn->_jump_table_size = table_size;
2168 
2169 		break;
2170 	}
2171 }
2172 
2173 /*
2174  * First pass: Mark the head of each jump table so that in the next pass,
2175  * we know when a given jump table ends and the next one starts.
2176  */
mark_func_jump_tables(struct objtool_file * file,struct symbol * func)2177 static void mark_func_jump_tables(struct objtool_file *file,
2178 				    struct symbol *func)
2179 {
2180 	struct instruction *insn, *last = NULL;
2181 
2182 	func_for_each_insn(file, func, insn) {
2183 		if (!last)
2184 			last = insn;
2185 
2186 		/*
2187 		 * Store back-pointers for forward jumps such
2188 		 * that find_jump_table() can back-track using those and
2189 		 * avoid some potentially confusing code.
2190 		 */
2191 		if (insn->jump_dest &&
2192 		    insn->jump_dest->offset > insn->offset &&
2193 		    !insn->jump_dest->first_jump_src) {
2194 
2195 			insn->jump_dest->first_jump_src = insn;
2196 			last = insn->jump_dest;
2197 		}
2198 
2199 		if (insn->type != INSN_JUMP_DYNAMIC)
2200 			continue;
2201 
2202 		find_jump_table(file, func, insn);
2203 	}
2204 }
2205 
add_func_jump_tables(struct objtool_file * file,struct symbol * func)2206 static int add_func_jump_tables(struct objtool_file *file,
2207 				  struct symbol *func)
2208 {
2209 	struct instruction *insn;
2210 
2211 	func_for_each_insn(file, func, insn) {
2212 		if (!insn_jump_table(insn))
2213 			continue;
2214 
2215 		if (add_jump_table(file, insn))
2216 			return -1;
2217 	}
2218 
2219 	return 0;
2220 }
2221 
2222 /*
2223  * For some switch statements, gcc generates a jump table in the .rodata
2224  * section which contains a list of addresses within the function to jump to.
2225  * This finds these jump tables and adds them to the insn->alts lists.
2226  */
add_jump_table_alts(struct objtool_file * file)2227 static int add_jump_table_alts(struct objtool_file *file)
2228 {
2229 	struct symbol *func;
2230 
2231 	if (!file->rodata)
2232 		return 0;
2233 
2234 	for_each_sym(file->elf, func) {
2235 		if (!is_func_sym(func) || func->alias != func)
2236 			continue;
2237 
2238 		mark_func_jump_tables(file, func);
2239 		if (add_func_jump_tables(file, func))
2240 			return -1;
2241 	}
2242 
2243 	return 0;
2244 }
2245 
set_func_state(struct cfi_state * state)2246 static void set_func_state(struct cfi_state *state)
2247 {
2248 	state->cfa = initial_func_cfi.cfa;
2249 	memcpy(&state->regs, &initial_func_cfi.regs,
2250 	       CFI_NUM_REGS * sizeof(struct cfi_reg));
2251 	state->stack_size = initial_func_cfi.cfa.offset;
2252 	state->type = UNWIND_HINT_TYPE_CALL;
2253 }
2254 
read_unwind_hints(struct objtool_file * file)2255 static int read_unwind_hints(struct objtool_file *file)
2256 {
2257 	struct cfi_state cfi = init_cfi;
2258 	struct section *sec;
2259 	struct unwind_hint *hint;
2260 	struct instruction *insn;
2261 	struct reloc *reloc;
2262 	unsigned long offset;
2263 	int i;
2264 
2265 	sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2266 	if (!sec)
2267 		return 0;
2268 
2269 	if (!sec->rsec) {
2270 		ERROR("missing .rela.discard.unwind_hints section");
2271 		return -1;
2272 	}
2273 
2274 	if (sec_size(sec) % sizeof(struct unwind_hint)) {
2275 		ERROR("struct unwind_hint size mismatch");
2276 		return -1;
2277 	}
2278 
2279 	file->hints = true;
2280 
2281 	for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) {
2282 		hint = (struct unwind_hint *)sec->data->d_buf + i;
2283 
2284 		reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2285 		if (!reloc) {
2286 			ERROR("can't find reloc for unwind_hints[%d]", i);
2287 			return -1;
2288 		}
2289 
2290 		offset = reloc->sym->offset + reloc_addend(reloc);
2291 
2292 		insn = find_insn(file, reloc->sym->sec, offset);
2293 		if (!insn) {
2294 			ERROR("can't find insn for unwind_hints[%d]", i);
2295 			return -1;
2296 		}
2297 
2298 		insn->hint = true;
2299 
2300 		if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) {
2301 			insn->cfi = &force_undefined_cfi;
2302 			continue;
2303 		}
2304 
2305 		if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2306 			insn->hint = false;
2307 			insn->save = true;
2308 			continue;
2309 		}
2310 
2311 		if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2312 			insn->restore = true;
2313 			continue;
2314 		}
2315 
2316 		if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2317 			struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2318 
2319 			if (sym && is_global_sym(sym)) {
2320 				if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2321 					ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2322 					return -1;
2323 				}
2324 			}
2325 		}
2326 
2327 		if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2328 			insn->cfi = &func_cfi;
2329 			continue;
2330 		}
2331 
2332 		if (insn->cfi)
2333 			cfi = *(insn->cfi);
2334 
2335 		if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2336 			ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2337 			return -1;
2338 		}
2339 
2340 		cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2341 		cfi.type = hint->type;
2342 		cfi.signal = hint->signal;
2343 
2344 		insn->cfi = cfi_hash_find_or_add(&cfi);
2345 	}
2346 
2347 	return 0;
2348 }
2349 
read_annotate(struct objtool_file * file,int (* func)(struct objtool_file * file,int type,struct instruction * insn))2350 static int read_annotate(struct objtool_file *file,
2351 			 int (*func)(struct objtool_file *file, int type, struct instruction *insn))
2352 {
2353 	struct section *sec;
2354 	struct instruction *insn;
2355 	struct reloc *reloc;
2356 	uint64_t offset;
2357 	int type;
2358 
2359 	sec = find_section_by_name(file->elf, ".discard.annotate_insn");
2360 	if (!sec)
2361 		return 0;
2362 
2363 	if (!sec->rsec)
2364 		return 0;
2365 
2366 	if (sec->sh.sh_entsize != 8) {
2367 		static bool warned = false;
2368 		if (!warned && opts.verbose) {
2369 			WARN("%s: dodgy linker, sh_entsize != 8", sec->name);
2370 			warned = true;
2371 		}
2372 		sec->sh.sh_entsize = 8;
2373 	}
2374 
2375 	if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) {
2376 		ERROR("bad .discard.annotate_insn section: missing relocs");
2377 		return -1;
2378 	}
2379 
2380 	for_each_reloc(sec->rsec, reloc) {
2381 		type = annotype(file->elf, sec, reloc);
2382 		offset = reloc->sym->offset + reloc_addend(reloc);
2383 		insn = find_insn(file, reloc->sym->sec, offset);
2384 
2385 		if (!insn) {
2386 			ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
2387 			return -1;
2388 		}
2389 
2390 		if (func(file, type, insn))
2391 			return -1;
2392 	}
2393 
2394 	return 0;
2395 }
2396 
__annotate_early(struct objtool_file * file,int type,struct instruction * insn)2397 static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn)
2398 {
2399 	switch (type) {
2400 
2401 	/* Must be before add_special_section_alts() */
2402 	case ANNOTYPE_IGNORE_ALTS:
2403 		insn->ignore_alts = true;
2404 		break;
2405 
2406 	/*
2407 	 * Must be before read_unwind_hints() since that needs insn->noendbr.
2408 	 */
2409 	case ANNOTYPE_NOENDBR:
2410 		insn->noendbr = 1;
2411 		break;
2412 
2413 	default:
2414 		break;
2415 	}
2416 
2417 	return 0;
2418 }
2419 
__annotate_ifc(struct objtool_file * file,int type,struct instruction * insn)2420 static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn)
2421 {
2422 	unsigned long dest_off;
2423 
2424 	if (type != ANNOTYPE_INTRA_FUNCTION_CALL)
2425 		return 0;
2426 
2427 	if (insn->type != INSN_CALL) {
2428 		ERROR_INSN(insn, "intra_function_call not a direct call");
2429 		return -1;
2430 	}
2431 
2432 	/*
2433 	 * Treat intra-function CALLs as JMPs, but with a stack_op.
2434 	 * See add_call_destinations(), which strips stack_ops from
2435 	 * normal CALLs.
2436 	 */
2437 	insn->type = INSN_JUMP_UNCONDITIONAL;
2438 
2439 	dest_off = arch_jump_destination(insn);
2440 	insn->jump_dest = find_insn(file, insn->sec, dest_off);
2441 	if (!insn->jump_dest) {
2442 		ERROR_INSN(insn, "can't find call dest at %s+0x%lx",
2443 			   insn->sec->name, dest_off);
2444 		return -1;
2445 	}
2446 
2447 	return 0;
2448 }
2449 
__annotate_late(struct objtool_file * file,int type,struct instruction * insn)2450 static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn)
2451 {
2452 	struct symbol *sym;
2453 
2454 	switch (type) {
2455 	case ANNOTYPE_NOENDBR:
2456 		/* early */
2457 		break;
2458 
2459 	case ANNOTYPE_RETPOLINE_SAFE:
2460 		if (insn->type != INSN_JUMP_DYNAMIC &&
2461 		    insn->type != INSN_CALL_DYNAMIC &&
2462 		    insn->type != INSN_RETURN &&
2463 		    insn->type != INSN_NOP) {
2464 			ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2465 			return -1;
2466 		}
2467 
2468 		insn->retpoline_safe = true;
2469 		break;
2470 
2471 	case ANNOTYPE_INSTR_BEGIN:
2472 		insn->instr++;
2473 		break;
2474 
2475 	case ANNOTYPE_INSTR_END:
2476 		insn->instr--;
2477 		break;
2478 
2479 	case ANNOTYPE_UNRET_BEGIN:
2480 		insn->unret = 1;
2481 		break;
2482 
2483 	case ANNOTYPE_IGNORE_ALTS:
2484 		/* early */
2485 		break;
2486 
2487 	case ANNOTYPE_INTRA_FUNCTION_CALL:
2488 		/* ifc */
2489 		break;
2490 
2491 	case ANNOTYPE_REACHABLE:
2492 		insn->dead_end = false;
2493 		break;
2494 
2495 	case ANNOTYPE_NOCFI:
2496 		sym = insn->sym;
2497 		if (!sym) {
2498 			ERROR_INSN(insn, "dodgy NOCFI annotation");
2499 			return -1;
2500 		}
2501 		insn->sym->nocfi = 1;
2502 		break;
2503 
2504 	default:
2505 		ERROR_INSN(insn, "Unknown annotation type: %d", type);
2506 		return -1;
2507 	}
2508 
2509 	return 0;
2510 }
2511 
2512 /*
2513  * Return true if name matches an instrumentation function, where calls to that
2514  * function from noinstr code can safely be removed, but compilers won't do so.
2515  */
is_profiling_func(const char * name)2516 static bool is_profiling_func(const char *name)
2517 {
2518 	/*
2519 	 * Many compilers cannot disable KCOV with a function attribute.
2520 	 */
2521 	if (!strncmp(name, "__sanitizer_cov_", 16))
2522 		return true;
2523 
2524 	return false;
2525 }
2526 
classify_symbols(struct objtool_file * file)2527 static int classify_symbols(struct objtool_file *file)
2528 {
2529 	struct symbol *func;
2530 	size_t len;
2531 
2532 	for_each_sym(file->elf, func) {
2533 		if (is_notype_sym(func) && strstarts(func->name, ".L"))
2534 			func->local_label = true;
2535 
2536 		if (!is_global_sym(func))
2537 			continue;
2538 
2539 		if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2540 			     strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2541 			func->static_call_tramp = true;
2542 
2543 		if (arch_is_retpoline(func))
2544 			func->retpoline_thunk = true;
2545 
2546 		if (arch_is_rethunk(func))
2547 			func->return_thunk = true;
2548 
2549 		if (arch_is_embedded_insn(func))
2550 			func->embedded_insn = true;
2551 
2552 		if (arch_ftrace_match(func->name))
2553 			func->fentry = true;
2554 
2555 		if (is_profiling_func(func->name))
2556 			func->profiling_func = true;
2557 
2558 		len = strlen(func->name);
2559 		if (len > sym_name_max_len)
2560 			sym_name_max_len = len;
2561 	}
2562 
2563 	return 0;
2564 }
2565 
mark_rodata(struct objtool_file * file)2566 static void mark_rodata(struct objtool_file *file)
2567 {
2568 	struct section *sec;
2569 	bool found = false;
2570 
2571 	/*
2572 	 * Search for the following rodata sections, each of which can
2573 	 * potentially contain jump tables:
2574 	 *
2575 	 * - .rodata: can contain GCC switch tables
2576 	 * - .rodata.<func>: same, if -fdata-sections is being used
2577 	 * - .data.rel.ro.c_jump_table: contains C annotated jump tables
2578 	 *
2579 	 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2580 	 */
2581 	for_each_sec(file->elf, sec) {
2582 		if ((!strncmp(sec->name, ".rodata", 7) &&
2583 		     !strstr(sec->name, ".str1.")) ||
2584 		    !strncmp(sec->name, ".data.rel.ro", 12)) {
2585 			sec->rodata = true;
2586 			found = true;
2587 		}
2588 	}
2589 
2590 	file->rodata = found;
2591 }
2592 
mark_holes(struct objtool_file * file)2593 static void mark_holes(struct objtool_file *file)
2594 {
2595 	struct instruction *insn;
2596 	bool in_hole = false;
2597 
2598 	if (!opts.link)
2599 		return;
2600 
2601 	/*
2602 	 * Whole archive runs might encounter dead code from weak symbols.
2603 	 * This is where the linker will have dropped the weak symbol in
2604 	 * favour of a regular symbol, but leaves the code in place.
2605 	 */
2606 	for_each_insn(file, insn) {
2607 		if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) {
2608 			in_hole = false;
2609 			continue;
2610 		}
2611 
2612 		/* Skip function padding and pfx code */
2613 		if (!in_hole && insn->type == INSN_NOP)
2614 			continue;
2615 
2616 		in_hole = true;
2617 		insn->hole = 1;
2618 
2619 		/*
2620 		 * If this hole jumps to a .cold function, mark it ignore.
2621 		 */
2622 		if (insn->jump_dest) {
2623 			struct symbol *dest_func = insn_func(insn->jump_dest);
2624 
2625 			if (dest_func && dest_func->cold)
2626 				dest_func->ignore = true;
2627 		}
2628 	}
2629 }
2630 
validate_branch_enabled(void)2631 static bool validate_branch_enabled(void)
2632 {
2633 	return opts.stackval ||
2634 	       opts.orc ||
2635 	       opts.uaccess ||
2636 	       opts.checksum;
2637 }
2638 
decode_sections(struct objtool_file * file)2639 static int decode_sections(struct objtool_file *file)
2640 {
2641 	file->klp = is_livepatch_module(file);
2642 
2643 	mark_rodata(file);
2644 
2645 	if (init_pv_ops(file))
2646 		return -1;
2647 
2648 	/*
2649 	 * Must be before add_{jump_call}_destination.
2650 	 */
2651 	if (classify_symbols(file))
2652 		return -1;
2653 
2654 	if (decode_instructions(file))
2655 		return -1;
2656 
2657 	if (add_ignores(file))
2658 		return -1;
2659 
2660 	add_uaccess_safe(file);
2661 
2662 	if (read_annotate(file, __annotate_early))
2663 		return -1;
2664 
2665 	/*
2666 	 * Must be before add_jump_destinations(), which depends on 'func'
2667 	 * being set for alternatives, to enable proper sibling call detection.
2668 	 */
2669 	if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label || opts.disas) {
2670 		if (add_special_section_alts(file))
2671 			return -1;
2672 	}
2673 
2674 	if (add_jump_destinations(file))
2675 		return -1;
2676 
2677 	/*
2678 	 * Must be before add_call_destination(); it changes INSN_CALL to
2679 	 * INSN_JUMP.
2680 	 */
2681 	if (read_annotate(file, __annotate_ifc))
2682 		return -1;
2683 
2684 	if (add_call_destinations(file))
2685 		return -1;
2686 
2687 	if (add_jump_table_alts(file))
2688 		return -1;
2689 
2690 	if (read_unwind_hints(file))
2691 		return -1;
2692 
2693 	/* Must be after add_jump_destinations() */
2694 	mark_holes(file);
2695 
2696 	/*
2697 	 * Must be after add_call_destinations() such that it can override
2698 	 * dead_end_function() marks.
2699 	 */
2700 	if (read_annotate(file, __annotate_late))
2701 		return -1;
2702 
2703 	return 0;
2704 }
2705 
is_special_call(struct instruction * insn)2706 static bool is_special_call(struct instruction *insn)
2707 {
2708 	if (insn->type == INSN_CALL) {
2709 		struct symbol *dest = insn_call_dest(insn);
2710 
2711 		if (!dest)
2712 			return false;
2713 
2714 		if (dest->fentry || dest->embedded_insn)
2715 			return true;
2716 	}
2717 
2718 	return false;
2719 }
2720 
has_modified_stack_frame(struct instruction * insn,struct insn_state * state)2721 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2722 {
2723 	struct cfi_state *cfi = &state->cfi;
2724 	int i;
2725 
2726 	if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2727 		return true;
2728 
2729 	if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2730 		return true;
2731 
2732 	if (cfi->stack_size != initial_func_cfi.cfa.offset)
2733 		return true;
2734 
2735 	for (i = 0; i < CFI_NUM_REGS; i++) {
2736 		if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2737 		    cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2738 			return true;
2739 	}
2740 
2741 	return false;
2742 }
2743 
check_reg_frame_pos(const struct cfi_reg * reg,int expected_offset)2744 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2745 				int expected_offset)
2746 {
2747 	return reg->base == CFI_CFA &&
2748 	       reg->offset == expected_offset;
2749 }
2750 
has_valid_stack_frame(struct insn_state * state)2751 static bool has_valid_stack_frame(struct insn_state *state)
2752 {
2753 	struct cfi_state *cfi = &state->cfi;
2754 
2755 	if (cfi->cfa.base == CFI_BP &&
2756 	    check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2757 	    check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2758 		return true;
2759 
2760 	if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2761 		return true;
2762 
2763 	return false;
2764 }
2765 
update_cfi_state_regs(struct instruction * insn,struct cfi_state * cfi,struct stack_op * op)2766 static int update_cfi_state_regs(struct instruction *insn,
2767 				  struct cfi_state *cfi,
2768 				  struct stack_op *op)
2769 {
2770 	struct cfi_reg *cfa = &cfi->cfa;
2771 
2772 	if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2773 		return 0;
2774 
2775 	/* push */
2776 	if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2777 		cfa->offset += 8;
2778 
2779 	/* pop */
2780 	if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2781 		cfa->offset -= 8;
2782 
2783 	/* add immediate to sp */
2784 	if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2785 	    op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2786 		cfa->offset -= op->src.offset;
2787 
2788 	return 0;
2789 }
2790 
save_reg(struct cfi_state * cfi,unsigned char reg,int base,int offset)2791 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2792 {
2793 	if (arch_callee_saved_reg(reg) &&
2794 	    cfi->regs[reg].base == CFI_UNDEFINED) {
2795 		cfi->regs[reg].base = base;
2796 		cfi->regs[reg].offset = offset;
2797 	}
2798 }
2799 
restore_reg(struct cfi_state * cfi,unsigned char reg)2800 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2801 {
2802 	cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2803 	cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2804 }
2805 
2806 /*
2807  * A note about DRAP stack alignment:
2808  *
2809  * GCC has the concept of a DRAP register, which is used to help keep track of
2810  * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
2811  * register.  The typical DRAP pattern is:
2812  *
2813  *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
2814  *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
2815  *   41 ff 72 f8		pushq  -0x8(%r10)
2816  *   55				push   %rbp
2817  *   48 89 e5			mov    %rsp,%rbp
2818  *				(more pushes)
2819  *   41 52			push   %r10
2820  *				...
2821  *   41 5a			pop    %r10
2822  *				(more pops)
2823  *   5d				pop    %rbp
2824  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2825  *   c3				retq
2826  *
2827  * There are some variations in the epilogues, like:
2828  *
2829  *   5b				pop    %rbx
2830  *   41 5a			pop    %r10
2831  *   41 5c			pop    %r12
2832  *   41 5d			pop    %r13
2833  *   41 5e			pop    %r14
2834  *   c9				leaveq
2835  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2836  *   c3				retq
2837  *
2838  * and:
2839  *
2840  *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
2841  *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
2842  *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
2843  *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
2844  *   c9				leaveq
2845  *   49 8d 62 f8		lea    -0x8(%r10),%rsp
2846  *   c3				retq
2847  *
2848  * Sometimes r13 is used as the DRAP register, in which case it's saved and
2849  * restored beforehand:
2850  *
2851  *   41 55			push   %r13
2852  *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
2853  *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
2854  *				...
2855  *   49 8d 65 f0		lea    -0x10(%r13),%rsp
2856  *   41 5d			pop    %r13
2857  *   c3				retq
2858  */
update_cfi_state(struct instruction * insn,struct instruction * next_insn,struct cfi_state * cfi,struct stack_op * op)2859 static int update_cfi_state(struct instruction *insn,
2860 			    struct instruction *next_insn,
2861 			    struct cfi_state *cfi, struct stack_op *op)
2862 {
2863 	struct cfi_reg *cfa = &cfi->cfa;
2864 	struct cfi_reg *regs = cfi->regs;
2865 
2866 	/* ignore UNWIND_HINT_UNDEFINED regions */
2867 	if (cfi->force_undefined)
2868 		return 0;
2869 
2870 	/* stack operations don't make sense with an undefined CFA */
2871 	if (cfa->base == CFI_UNDEFINED) {
2872 		if (insn_func(insn)) {
2873 			WARN_INSN(insn, "undefined stack state");
2874 			return 1;
2875 		}
2876 		return 0;
2877 	}
2878 
2879 	if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2880 	    cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2881 		return update_cfi_state_regs(insn, cfi, op);
2882 
2883 	switch (op->dest.type) {
2884 
2885 	case OP_DEST_REG:
2886 		switch (op->src.type) {
2887 
2888 		case OP_SRC_REG:
2889 			if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2890 			    cfa->base == CFI_SP &&
2891 			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2892 
2893 				/* mov %rsp, %rbp */
2894 				cfa->base = op->dest.reg;
2895 				cfi->bp_scratch = false;
2896 			}
2897 
2898 			else if (op->src.reg == CFI_SP &&
2899 				 op->dest.reg == CFI_BP && cfi->drap) {
2900 
2901 				/* drap: mov %rsp, %rbp */
2902 				regs[CFI_BP].base = CFI_BP;
2903 				regs[CFI_BP].offset = -cfi->stack_size;
2904 				cfi->bp_scratch = false;
2905 			}
2906 
2907 			else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2908 
2909 				/*
2910 				 * mov %rsp, %reg
2911 				 *
2912 				 * This is needed for the rare case where GCC
2913 				 * does:
2914 				 *
2915 				 *   mov    %rsp, %rax
2916 				 *   ...
2917 				 *   mov    %rax, %rsp
2918 				 */
2919 				cfi->vals[op->dest.reg].base = CFI_CFA;
2920 				cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2921 			}
2922 
2923 			else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2924 				 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2925 
2926 				/*
2927 				 * mov %rbp, %rsp
2928 				 *
2929 				 * Restore the original stack pointer (Clang).
2930 				 */
2931 				cfi->stack_size = -cfi->regs[CFI_BP].offset;
2932 			}
2933 
2934 			else if (op->dest.reg == cfa->base) {
2935 
2936 				/* mov %reg, %rsp */
2937 				if (cfa->base == CFI_SP &&
2938 				    cfi->vals[op->src.reg].base == CFI_CFA) {
2939 
2940 					/*
2941 					 * This is needed for the rare case
2942 					 * where GCC does something dumb like:
2943 					 *
2944 					 *   lea    0x8(%rsp), %rcx
2945 					 *   ...
2946 					 *   mov    %rcx, %rsp
2947 					 */
2948 					cfa->offset = -cfi->vals[op->src.reg].offset;
2949 					cfi->stack_size = cfa->offset;
2950 
2951 				} else if (cfa->base == CFI_SP &&
2952 					   cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2953 					   cfi->vals[op->src.reg].offset == cfa->offset) {
2954 
2955 					/*
2956 					 * Stack swizzle:
2957 					 *
2958 					 * 1: mov %rsp, (%[tos])
2959 					 * 2: mov %[tos], %rsp
2960 					 *    ...
2961 					 * 3: pop %rsp
2962 					 *
2963 					 * Where:
2964 					 *
2965 					 * 1 - places a pointer to the previous
2966 					 *     stack at the Top-of-Stack of the
2967 					 *     new stack.
2968 					 *
2969 					 * 2 - switches to the new stack.
2970 					 *
2971 					 * 3 - pops the Top-of-Stack to restore
2972 					 *     the original stack.
2973 					 *
2974 					 * Note: we set base to SP_INDIRECT
2975 					 * here and preserve offset. Therefore
2976 					 * when the unwinder reaches ToS it
2977 					 * will dereference SP and then add the
2978 					 * offset to find the next frame, IOW:
2979 					 * (%rsp) + offset.
2980 					 */
2981 					cfa->base = CFI_SP_INDIRECT;
2982 
2983 				} else {
2984 					cfa->base = CFI_UNDEFINED;
2985 					cfa->offset = 0;
2986 				}
2987 			}
2988 
2989 			else if (op->dest.reg == CFI_SP &&
2990 				 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2991 				 cfi->vals[op->src.reg].offset == cfa->offset) {
2992 
2993 				/*
2994 				 * The same stack swizzle case 2) as above. But
2995 				 * because we can't change cfa->base, case 3)
2996 				 * will become a regular POP. Pretend we're a
2997 				 * PUSH so things don't go unbalanced.
2998 				 */
2999 				cfi->stack_size += 8;
3000 			}
3001 
3002 			else if (cfi->vals[op->src.reg].base == CFI_CFA) {
3003 				/*
3004 				 * Clang RSP musical chairs:
3005 				 *
3006 				 *   mov %rsp, %rdx [handled above]
3007 				 *   ...
3008 				 *   mov %rdx, %rbx [handled here]
3009 				 *   ...
3010 				 *   mov %rbx, %rsp [handled above]
3011 				 */
3012 				cfi->vals[op->dest.reg].base = CFI_CFA;
3013 				cfi->vals[op->dest.reg].offset = cfi->vals[op->src.reg].offset;
3014 			}
3015 
3016 
3017 			break;
3018 
3019 		case OP_SRC_ADD:
3020 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
3021 
3022 				/* add imm, %rsp */
3023 				cfi->stack_size -= op->src.offset;
3024 				if (cfa->base == CFI_SP)
3025 					cfa->offset -= op->src.offset;
3026 				break;
3027 			}
3028 
3029 			if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
3030 			    insn->sym->frame_pointer) {
3031 				/* addi.d fp,sp,imm on LoongArch */
3032 				if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
3033 					cfa->base = CFI_BP;
3034 					cfa->offset = 0;
3035 				}
3036 				break;
3037 			}
3038 
3039 			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
3040 				/* addi.d sp,fp,imm on LoongArch */
3041 				if (cfa->base == CFI_BP && cfa->offset == 0) {
3042 					if (insn->sym->frame_pointer) {
3043 						cfa->base = CFI_SP;
3044 						cfa->offset = -op->src.offset;
3045 					}
3046 				} else {
3047 					/* lea disp(%rbp), %rsp */
3048 					cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
3049 				}
3050 				break;
3051 			}
3052 
3053 			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
3054 
3055 				/* drap: lea disp(%rsp), %drap */
3056 				cfi->drap_reg = op->dest.reg;
3057 
3058 				/*
3059 				 * lea disp(%rsp), %reg
3060 				 *
3061 				 * This is needed for the rare case where GCC
3062 				 * does something dumb like:
3063 				 *
3064 				 *   lea    0x8(%rsp), %rcx
3065 				 *   ...
3066 				 *   mov    %rcx, %rsp
3067 				 */
3068 				cfi->vals[op->dest.reg].base = CFI_CFA;
3069 				cfi->vals[op->dest.reg].offset = \
3070 					-cfi->stack_size + op->src.offset;
3071 
3072 				break;
3073 			}
3074 
3075 			if (cfi->drap && op->dest.reg == CFI_SP &&
3076 			    op->src.reg == cfi->drap_reg) {
3077 
3078 				 /* drap: lea disp(%drap), %rsp */
3079 				cfa->base = CFI_SP;
3080 				cfa->offset = cfi->stack_size = -op->src.offset;
3081 				cfi->drap_reg = CFI_UNDEFINED;
3082 				cfi->drap = false;
3083 				break;
3084 			}
3085 
3086 			if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
3087 				WARN_INSN(insn, "unsupported stack register modification");
3088 				return -1;
3089 			}
3090 
3091 			break;
3092 
3093 		case OP_SRC_AND:
3094 			if (op->dest.reg != CFI_SP ||
3095 			    (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
3096 			    (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
3097 				WARN_INSN(insn, "unsupported stack pointer realignment");
3098 				return -1;
3099 			}
3100 
3101 			if (cfi->drap_reg != CFI_UNDEFINED) {
3102 				/* drap: and imm, %rsp */
3103 				cfa->base = cfi->drap_reg;
3104 				cfa->offset = cfi->stack_size = 0;
3105 				cfi->drap = true;
3106 			}
3107 
3108 			/*
3109 			 * Older versions of GCC (4.8ish) realign the stack
3110 			 * without DRAP, with a frame pointer.
3111 			 */
3112 
3113 			break;
3114 
3115 		case OP_SRC_POP:
3116 		case OP_SRC_POPF:
3117 			if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3118 
3119 				/* pop %rsp; # restore from a stack swizzle */
3120 				cfa->base = CFI_SP;
3121 				break;
3122 			}
3123 
3124 			if (!cfi->drap && op->dest.reg == cfa->base) {
3125 
3126 				/* pop %rbp */
3127 				cfa->base = CFI_SP;
3128 			}
3129 
3130 			if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3131 			    op->dest.reg == cfi->drap_reg &&
3132 			    cfi->drap_offset == -cfi->stack_size) {
3133 
3134 				/* drap: pop %drap */
3135 				cfa->base = cfi->drap_reg;
3136 				cfa->offset = 0;
3137 				cfi->drap_offset = -1;
3138 
3139 			} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3140 
3141 				/* pop %reg */
3142 				restore_reg(cfi, op->dest.reg);
3143 			}
3144 
3145 			cfi->stack_size -= 8;
3146 			if (cfa->base == CFI_SP)
3147 				cfa->offset -= 8;
3148 
3149 			break;
3150 
3151 		case OP_SRC_REG_INDIRECT:
3152 			if (!cfi->drap && op->dest.reg == cfa->base &&
3153 			    op->dest.reg == CFI_BP) {
3154 
3155 				/* mov disp(%rsp), %rbp */
3156 				cfa->base = CFI_SP;
3157 				cfa->offset = cfi->stack_size;
3158 			}
3159 
3160 			if (cfi->drap && op->src.reg == CFI_BP &&
3161 			    op->src.offset == cfi->drap_offset) {
3162 
3163 				/* drap: mov disp(%rbp), %drap */
3164 				cfa->base = cfi->drap_reg;
3165 				cfa->offset = 0;
3166 				cfi->drap_offset = -1;
3167 			}
3168 
3169 			if (cfi->drap && op->src.reg == CFI_BP &&
3170 			    op->src.offset == regs[op->dest.reg].offset) {
3171 
3172 				/* drap: mov disp(%rbp), %reg */
3173 				restore_reg(cfi, op->dest.reg);
3174 
3175 			} else if (op->src.reg == cfa->base &&
3176 			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3177 
3178 				/* mov disp(%rbp), %reg */
3179 				/* mov disp(%rsp), %reg */
3180 				restore_reg(cfi, op->dest.reg);
3181 
3182 			} else if (op->src.reg == CFI_SP &&
3183 				   op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3184 
3185 				/* mov disp(%rsp), %reg */
3186 				restore_reg(cfi, op->dest.reg);
3187 			}
3188 
3189 			break;
3190 
3191 		default:
3192 			WARN_INSN(insn, "unknown stack-related instruction");
3193 			return -1;
3194 		}
3195 
3196 		break;
3197 
3198 	case OP_DEST_PUSH:
3199 	case OP_DEST_PUSHF:
3200 		cfi->stack_size += 8;
3201 		if (cfa->base == CFI_SP)
3202 			cfa->offset += 8;
3203 
3204 		if (op->src.type != OP_SRC_REG)
3205 			break;
3206 
3207 		if (cfi->drap) {
3208 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3209 
3210 				/* drap: push %drap */
3211 				cfa->base = CFI_BP_INDIRECT;
3212 				cfa->offset = -cfi->stack_size;
3213 
3214 				/* save drap so we know when to restore it */
3215 				cfi->drap_offset = -cfi->stack_size;
3216 
3217 			} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3218 
3219 				/* drap: push %rbp */
3220 				cfi->stack_size = 0;
3221 
3222 			} else {
3223 
3224 				/* drap: push %reg */
3225 				save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3226 			}
3227 
3228 		} else {
3229 
3230 			/* push %reg */
3231 			save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3232 		}
3233 
3234 		/* detect when asm code uses rbp as a scratch register */
3235 		if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3236 		    cfa->base != CFI_BP)
3237 			cfi->bp_scratch = true;
3238 		break;
3239 
3240 	case OP_DEST_REG_INDIRECT:
3241 
3242 		if (cfi->drap) {
3243 			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3244 
3245 				/* drap: mov %drap, disp(%rbp) */
3246 				cfa->base = CFI_BP_INDIRECT;
3247 				cfa->offset = op->dest.offset;
3248 
3249 				/* save drap offset so we know when to restore it */
3250 				cfi->drap_offset = op->dest.offset;
3251 			} else {
3252 
3253 				/* drap: mov reg, disp(%rbp) */
3254 				save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3255 			}
3256 
3257 		} else if (op->dest.reg == cfa->base) {
3258 
3259 			/* mov reg, disp(%rbp) */
3260 			/* mov reg, disp(%rsp) */
3261 			save_reg(cfi, op->src.reg, CFI_CFA,
3262 				 op->dest.offset - cfi->cfa.offset);
3263 
3264 		} else if (op->dest.reg == CFI_SP) {
3265 
3266 			/* mov reg, disp(%rsp) */
3267 			save_reg(cfi, op->src.reg, CFI_CFA,
3268 				 op->dest.offset - cfi->stack_size);
3269 
3270 		} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3271 
3272 			/* mov %rsp, (%reg); # setup a stack swizzle. */
3273 			cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3274 			cfi->vals[op->dest.reg].offset = cfa->offset;
3275 		}
3276 
3277 		break;
3278 
3279 	case OP_DEST_MEM:
3280 		if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3281 			WARN_INSN(insn, "unknown stack-related memory operation");
3282 			return -1;
3283 		}
3284 
3285 		/* pop mem */
3286 		cfi->stack_size -= 8;
3287 		if (cfa->base == CFI_SP)
3288 			cfa->offset -= 8;
3289 
3290 		break;
3291 
3292 	default:
3293 		WARN_INSN(insn, "unknown stack-related instruction");
3294 		return -1;
3295 	}
3296 
3297 	return 0;
3298 }
3299 
3300 /*
3301  * The stack layouts of alternatives instructions can sometimes diverge when
3302  * they have stack modifications.  That's fine as long as the potential stack
3303  * layouts don't conflict at any given potential instruction boundary.
3304  *
3305  * Flatten the CFIs of the different alternative code streams (both original
3306  * and replacement) into a single shared CFI array which can be used to detect
3307  * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3308  */
propagate_alt_cfi(struct objtool_file * file,struct instruction * insn)3309 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3310 {
3311 	struct cfi_state **alt_cfi;
3312 	int group_off;
3313 
3314 	if (!insn->alt_group)
3315 		return 0;
3316 
3317 	if (!insn->cfi) {
3318 		WARN("CFI missing");
3319 		return -1;
3320 	}
3321 
3322 	alt_cfi = insn->alt_group->cfi;
3323 	group_off = insn->offset - insn->alt_group->first_insn->offset;
3324 
3325 	if (!alt_cfi[group_off]) {
3326 		alt_cfi[group_off] = insn->cfi;
3327 	} else {
3328 		if (cficmp(alt_cfi[group_off], insn->cfi)) {
3329 			struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3330 			struct instruction *orig = orig_group->first_insn;
3331 			WARN_INSN(orig, "stack layout conflict in alternatives: %s",
3332 				  offstr(insn->sec, insn->offset));
3333 			return -1;
3334 		}
3335 	}
3336 
3337 	return 0;
3338 }
3339 
handle_insn_ops(struct instruction * insn,struct instruction * next_insn,struct insn_state * state)3340 static int noinline handle_insn_ops(struct instruction *insn,
3341 				    struct instruction *next_insn,
3342 				    struct insn_state *state)
3343 {
3344 	struct insn_state prev_state __maybe_unused = *state;
3345 	struct stack_op *op;
3346 	int ret = 0;
3347 
3348 	for (op = insn->stack_ops; op; op = op->next) {
3349 
3350 		ret = update_cfi_state(insn, next_insn, &state->cfi, op);
3351 		if (ret)
3352 			goto done;
3353 
3354 		if (!opts.uaccess || !insn->alt_group)
3355 			continue;
3356 
3357 		if (op->dest.type == OP_DEST_PUSHF) {
3358 			if (!state->uaccess_stack) {
3359 				state->uaccess_stack = 1;
3360 			} else if (state->uaccess_stack >> 31) {
3361 				WARN_INSN(insn, "PUSHF stack exhausted");
3362 				ret = 1;
3363 				goto done;
3364 			}
3365 			state->uaccess_stack <<= 1;
3366 			state->uaccess_stack  |= state->uaccess;
3367 		}
3368 
3369 		if (op->src.type == OP_SRC_POPF) {
3370 			if (state->uaccess_stack) {
3371 				state->uaccess = state->uaccess_stack & 1;
3372 				state->uaccess_stack >>= 1;
3373 				if (state->uaccess_stack == 1)
3374 					state->uaccess_stack = 0;
3375 			}
3376 		}
3377 	}
3378 
3379 done:
3380 	TRACE_INSN_STATE(insn, &prev_state, state);
3381 
3382 	return ret;
3383 }
3384 
insn_cfi_match(struct instruction * insn,struct cfi_state * cfi2)3385 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3386 {
3387 	struct cfi_state *cfi1 = insn->cfi;
3388 	int i;
3389 
3390 	if (!cfi1) {
3391 		WARN("CFI missing");
3392 		return false;
3393 	}
3394 
3395 	if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3396 
3397 		WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3398 			  cfi1->cfa.base, cfi1->cfa.offset,
3399 			  cfi2->cfa.base, cfi2->cfa.offset);
3400 		return false;
3401 
3402 	}
3403 
3404 	if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3405 		for (i = 0; i < CFI_NUM_REGS; i++) {
3406 
3407 			if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg)))
3408 				continue;
3409 
3410 			WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3411 				  i, cfi1->regs[i].base, cfi1->regs[i].offset,
3412 				  i, cfi2->regs[i].base, cfi2->regs[i].offset);
3413 		}
3414 		return false;
3415 	}
3416 
3417 	if (cfi1->type != cfi2->type) {
3418 
3419 		WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3420 			  cfi1->type, cfi2->type);
3421 		return false;
3422 	}
3423 
3424 	if (cfi1->drap != cfi2->drap ||
3425 		   (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3426 		   (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3427 
3428 		WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3429 			  cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3430 			  cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3431 		return false;
3432 	}
3433 
3434 	return true;
3435 }
3436 
func_uaccess_safe(struct symbol * func)3437 static inline bool func_uaccess_safe(struct symbol *func)
3438 {
3439 	if (func)
3440 		return func->uaccess_safe;
3441 
3442 	return false;
3443 }
3444 
call_dest_name(struct instruction * insn)3445 static inline const char *call_dest_name(struct instruction *insn)
3446 {
3447 	static char pvname[19];
3448 	struct reloc *reloc;
3449 	int idx;
3450 
3451 	if (insn_call_dest(insn))
3452 		return insn_call_dest(insn)->name;
3453 
3454 	reloc = insn_reloc(NULL, insn);
3455 	if (reloc && !strcmp(reloc->sym->name, "pv_ops")) {
3456 		idx = (reloc_addend(reloc) / sizeof(void *));
3457 		snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3458 		return pvname;
3459 	}
3460 
3461 	return "{dynamic}";
3462 }
3463 
pv_call_dest(struct objtool_file * file,struct instruction * insn)3464 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3465 {
3466 	struct symbol *target;
3467 	struct reloc *reloc;
3468 	int idx;
3469 
3470 	reloc = insn_reloc(file, insn);
3471 	if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
3472 		return false;
3473 
3474 	idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *);
3475 
3476 	if (file->pv_ops[idx].clean)
3477 		return true;
3478 
3479 	file->pv_ops[idx].clean = true;
3480 
3481 	list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3482 		if (!target->sec->noinstr) {
3483 			WARN("pv_ops[%d]: %s", idx, target->name);
3484 			file->pv_ops[idx].clean = false;
3485 		}
3486 	}
3487 
3488 	return file->pv_ops[idx].clean;
3489 }
3490 
noinstr_call_dest(struct objtool_file * file,struct instruction * insn,struct symbol * func)3491 static inline bool noinstr_call_dest(struct objtool_file *file,
3492 				     struct instruction *insn,
3493 				     struct symbol *func)
3494 {
3495 	/*
3496 	 * We can't deal with indirect function calls at present;
3497 	 * assume they're instrumented.
3498 	 */
3499 	if (!func) {
3500 		if (file->pv_ops)
3501 			return pv_call_dest(file, insn);
3502 
3503 		return false;
3504 	}
3505 
3506 	/*
3507 	 * If the symbol is from a noinstr section; we good.
3508 	 */
3509 	if (func->sec->noinstr)
3510 		return true;
3511 
3512 	/*
3513 	 * If the symbol is a static_call trampoline, we can't tell.
3514 	 */
3515 	if (func->static_call_tramp)
3516 		return true;
3517 
3518 	/*
3519 	 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3520 	 * something 'BAD' happened. At the risk of taking the machine down,
3521 	 * let them proceed to get the message out.
3522 	 */
3523 	if (!strncmp(func->name, "__ubsan_handle_", 15))
3524 		return true;
3525 
3526 	return false;
3527 }
3528 
validate_call(struct objtool_file * file,struct instruction * insn,struct insn_state * state)3529 static int validate_call(struct objtool_file *file,
3530 			 struct instruction *insn,
3531 			 struct insn_state *state)
3532 {
3533 	if (state->noinstr && state->instr <= 0 &&
3534 	    !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3535 		WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3536 		return 1;
3537 	}
3538 
3539 	if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3540 		WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3541 		return 1;
3542 	}
3543 
3544 	if (state->df) {
3545 		WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3546 		return 1;
3547 	}
3548 
3549 	return 0;
3550 }
3551 
validate_sibling_call(struct objtool_file * file,struct instruction * insn,struct insn_state * state)3552 static int validate_sibling_call(struct objtool_file *file,
3553 				 struct instruction *insn,
3554 				 struct insn_state *state)
3555 {
3556 	if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3557 		WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3558 		return 1;
3559 	}
3560 
3561 	return validate_call(file, insn, state);
3562 }
3563 
validate_return(struct symbol * func,struct instruction * insn,struct insn_state * state)3564 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3565 {
3566 	if (state->noinstr && state->instr > 0) {
3567 		WARN_INSN(insn, "return with instrumentation enabled");
3568 		return 1;
3569 	}
3570 
3571 	if (state->uaccess && !func_uaccess_safe(func)) {
3572 		WARN_INSN(insn, "return with UACCESS enabled");
3573 		return 1;
3574 	}
3575 
3576 	if (!state->uaccess && func_uaccess_safe(func)) {
3577 		WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3578 		return 1;
3579 	}
3580 
3581 	if (state->df) {
3582 		WARN_INSN(insn, "return with DF set");
3583 		return 1;
3584 	}
3585 
3586 	if (func && has_modified_stack_frame(insn, state)) {
3587 		WARN_INSN(insn, "return with modified stack frame");
3588 		return 1;
3589 	}
3590 
3591 	if (state->cfi.bp_scratch) {
3592 		WARN_INSN(insn, "BP used as a scratch register");
3593 		return 1;
3594 	}
3595 
3596 	return 0;
3597 }
3598 
next_insn_to_validate(struct objtool_file * file,struct instruction * insn)3599 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3600 						 struct instruction *insn)
3601 {
3602 	struct alt_group *alt_group = insn->alt_group;
3603 
3604 	/*
3605 	 * Simulate the fact that alternatives are patched in-place.  When the
3606 	 * end of a replacement alt_group is reached, redirect objtool flow to
3607 	 * the end of the original alt_group.
3608 	 *
3609 	 * insn->alts->insn -> alt_group->first_insn
3610 	 *		       ...
3611 	 *		       alt_group->last_insn
3612 	 *		       [alt_group->nop]      -> next(orig_group->last_insn)
3613 	 */
3614 	if (alt_group) {
3615 		if (alt_group->nop) {
3616 			/* ->nop implies ->orig_group */
3617 			if (insn == alt_group->last_insn)
3618 				return alt_group->nop;
3619 			if (insn == alt_group->nop)
3620 				goto next_orig;
3621 		}
3622 		if (insn == alt_group->last_insn && alt_group->orig_group)
3623 			goto next_orig;
3624 	}
3625 
3626 	return next_insn_same_sec(file, insn);
3627 
3628 next_orig:
3629 	return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3630 }
3631 
skip_alt_group(struct instruction * insn)3632 static bool skip_alt_group(struct instruction *insn)
3633 {
3634 	struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
3635 
3636 	if (!insn->alt_group)
3637 		return false;
3638 
3639 	/* ANNOTATE_IGNORE_ALTERNATIVE */
3640 	if (insn->alt_group->ignore) {
3641 		TRACE_ALT(insn, "alt group ignored");
3642 		return true;
3643 	}
3644 
3645 	/*
3646 	 * For NOP patched with CLAC/STAC, only follow the latter to avoid
3647 	 * impossible code paths combining patched CLAC with unpatched STAC
3648 	 * or vice versa.
3649 	 *
3650 	 * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
3651 	 * requested not to do that to avoid hurting .s file readability
3652 	 * around CLAC/STAC alternative sites.
3653 	 */
3654 
3655 	if (!alt_insn)
3656 		return false;
3657 
3658 	/* Don't override ASM_{CLAC,STAC}_UNSAFE */
3659 	if (alt_insn->alt_group && alt_insn->alt_group->ignore)
3660 		return false;
3661 
3662 	return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
3663 }
3664 
checksum_debug_init(struct objtool_file * file)3665 static int checksum_debug_init(struct objtool_file *file)
3666 {
3667 	char *dup, *s;
3668 
3669 	if (!opts.debug_checksum)
3670 		return 0;
3671 
3672 	dup = strdup(opts.debug_checksum);
3673 	if (!dup) {
3674 		ERROR_GLIBC("strdup");
3675 		return -1;
3676 	}
3677 
3678 	s = dup;
3679 	while (*s) {
3680 		struct symbol *func;
3681 		char *comma;
3682 
3683 		comma = strchr(s, ',');
3684 		if (comma)
3685 			*comma = '\0';
3686 
3687 		func = find_symbol_by_name(file->elf, s);
3688 		if (!func || !is_func_sym(func))
3689 			WARN("--debug-checksum: can't find '%s'", s);
3690 		else
3691 			func->debug_checksum = 1;
3692 
3693 		if (!comma)
3694 			break;
3695 
3696 		s = comma + 1;
3697 	}
3698 
3699 	free(dup);
3700 	return 0;
3701 }
3702 
checksum_update_insn(struct objtool_file * file,struct symbol * func,struct instruction * insn)3703 static void checksum_update_insn(struct objtool_file *file, struct symbol *func,
3704 				 struct instruction *insn)
3705 {
3706 	struct reloc *reloc = insn_reloc(file, insn);
3707 	unsigned long offset;
3708 	struct symbol *sym;
3709 
3710 	if (insn->fake)
3711 		return;
3712 
3713 	checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len);
3714 
3715 	if (!reloc) {
3716 		struct symbol *call_dest = insn_call_dest(insn);
3717 
3718 		if (call_dest)
3719 			checksum_update(func, insn, call_dest->demangled_name,
3720 					strlen(call_dest->demangled_name));
3721 		return;
3722 	}
3723 
3724 	sym = reloc->sym;
3725 	offset = arch_insn_adjusted_addend(insn, reloc);
3726 
3727 	if (is_string_sec(sym->sec)) {
3728 		char *str;
3729 
3730 		str = sym->sec->data->d_buf + sym->offset + offset;
3731 		checksum_update(func, insn, str, strlen(str));
3732 		return;
3733 	}
3734 
3735 	if (is_sec_sym(sym)) {
3736 		sym = find_symbol_containing(reloc->sym->sec, offset);
3737 		if (!sym)
3738 			return;
3739 
3740 		offset -= sym->offset;
3741 	}
3742 
3743 	checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name));
3744 	checksum_update(func, insn, &offset, sizeof(offset));
3745 }
3746 
3747 static int validate_branch(struct objtool_file *file, struct symbol *func,
3748 			   struct instruction *insn, struct insn_state state);
3749 static int do_validate_branch(struct objtool_file *file, struct symbol *func,
3750 			      struct instruction *insn, struct insn_state *state);
3751 
validate_insn(struct objtool_file * file,struct symbol * func,struct instruction * insn,struct insn_state * statep,struct instruction * prev_insn,struct instruction * next_insn,bool * dead_end)3752 static int validate_insn(struct objtool_file *file, struct symbol *func,
3753 			 struct instruction *insn, struct insn_state *statep,
3754 			 struct instruction *prev_insn, struct instruction *next_insn,
3755 			 bool *dead_end)
3756 {
3757 	char *alt_name __maybe_unused = NULL;
3758 	struct alternative *alt;
3759 	u8 visited;
3760 	int ret;
3761 
3762 	/*
3763 	 * Any returns before the end of this function are effectively dead
3764 	 * ends, i.e. validate_branch() has reached the end of the branch.
3765 	 */
3766 	*dead_end = true;
3767 
3768 	visited = VISITED_BRANCH << statep->uaccess;
3769 	if (insn->visited & VISITED_BRANCH_MASK) {
3770 		if (!insn->hint && !insn_cfi_match(insn, &statep->cfi))
3771 			return 1;
3772 
3773 		if (insn->visited & visited) {
3774 			TRACE_INSN(insn, "already visited");
3775 			return 0;
3776 		}
3777 	} else {
3778 		nr_insns_visited++;
3779 	}
3780 
3781 	if (statep->noinstr)
3782 		statep->instr += insn->instr;
3783 
3784 	if (insn->hint) {
3785 		if (insn->restore) {
3786 			struct instruction *save_insn, *i;
3787 
3788 			i = insn;
3789 			save_insn = NULL;
3790 
3791 			sym_for_each_insn_continue_reverse(file, func, i) {
3792 				if (i->save) {
3793 					save_insn = i;
3794 					break;
3795 				}
3796 			}
3797 
3798 			if (!save_insn) {
3799 				WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3800 				return 1;
3801 			}
3802 
3803 			if (!save_insn->visited) {
3804 				/*
3805 				 * If the restore hint insn is at the
3806 				 * beginning of a basic block and was
3807 				 * branched to from elsewhere, and the
3808 				 * save insn hasn't been visited yet,
3809 				 * defer following this branch for now.
3810 				 * It will be seen later via the
3811 				 * straight-line path.
3812 				 */
3813 				if (!prev_insn) {
3814 					TRACE_INSN(insn, "defer restore");
3815 					return 0;
3816 				}
3817 
3818 				WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3819 				return 1;
3820 			}
3821 
3822 			insn->cfi = save_insn->cfi;
3823 			nr_cfi_reused++;
3824 		}
3825 
3826 		statep->cfi = *insn->cfi;
3827 	} else {
3828 		/* XXX track if we actually changed statep->cfi */
3829 
3830 		if (prev_insn && !cficmp(prev_insn->cfi, &statep->cfi)) {
3831 			insn->cfi = prev_insn->cfi;
3832 			nr_cfi_reused++;
3833 		} else {
3834 			insn->cfi = cfi_hash_find_or_add(&statep->cfi);
3835 		}
3836 	}
3837 
3838 	insn->visited |= visited;
3839 
3840 	if (propagate_alt_cfi(file, insn))
3841 		return 1;
3842 
3843 	if (insn->alts) {
3844 		for (alt = insn->alts; alt; alt = alt->next) {
3845 			TRACE_ALT_BEGIN(insn, alt, alt_name);
3846 			ret = validate_branch(file, func, alt->insn, *statep);
3847 			TRACE_ALT_END(insn, alt, alt_name);
3848 			if (ret) {
3849 				BT_INSN(insn, "(alt)");
3850 				return ret;
3851 			}
3852 		}
3853 		TRACE_ALT_INFO_NOADDR(insn, "/ ", "DEFAULT");
3854 	}
3855 
3856 	if (skip_alt_group(insn))
3857 		return 0;
3858 
3859 	if (handle_insn_ops(insn, next_insn, statep))
3860 		return 1;
3861 
3862 	switch (insn->type) {
3863 
3864 	case INSN_RETURN:
3865 		TRACE_INSN(insn, "return");
3866 		return validate_return(func, insn, statep);
3867 
3868 	case INSN_CALL:
3869 	case INSN_CALL_DYNAMIC:
3870 		if (insn->type == INSN_CALL)
3871 			TRACE_INSN(insn, "call");
3872 		else
3873 			TRACE_INSN(insn, "indirect call");
3874 
3875 		ret = validate_call(file, insn, statep);
3876 		if (ret)
3877 			return ret;
3878 
3879 		if (opts.stackval && func && !is_special_call(insn) &&
3880 		    !has_valid_stack_frame(statep)) {
3881 			WARN_INSN(insn, "call without frame pointer save/setup");
3882 			return 1;
3883 		}
3884 
3885 		break;
3886 
3887 	case INSN_JUMP_CONDITIONAL:
3888 	case INSN_JUMP_UNCONDITIONAL:
3889 		if (is_sibling_call(insn)) {
3890 			TRACE_INSN(insn, "sibling call");
3891 			ret = validate_sibling_call(file, insn, statep);
3892 			if (ret)
3893 				return ret;
3894 
3895 		} else if (insn->jump_dest) {
3896 			if (insn->type == INSN_JUMP_UNCONDITIONAL)
3897 				TRACE_INSN(insn, "unconditional jump");
3898 			else
3899 				TRACE_INSN(insn, "jump taken");
3900 
3901 			ret = validate_branch(file, func, insn->jump_dest, *statep);
3902 			if (ret) {
3903 				BT_INSN(insn, "(branch)");
3904 				return ret;
3905 			}
3906 		}
3907 
3908 		if (insn->type == INSN_JUMP_UNCONDITIONAL)
3909 			return 0;
3910 
3911 		TRACE_INSN(insn, "jump not taken");
3912 		break;
3913 
3914 	case INSN_JUMP_DYNAMIC:
3915 	case INSN_JUMP_DYNAMIC_CONDITIONAL:
3916 		TRACE_INSN(insn, "indirect jump");
3917 		if (is_sibling_call(insn)) {
3918 			ret = validate_sibling_call(file, insn, statep);
3919 			if (ret)
3920 				return ret;
3921 		}
3922 
3923 		if (insn->type == INSN_JUMP_DYNAMIC)
3924 			return 0;
3925 
3926 		break;
3927 
3928 	case INSN_SYSCALL:
3929 		TRACE_INSN(insn, "syscall");
3930 		if (func && (!next_insn || !next_insn->hint)) {
3931 			WARN_INSN(insn, "unsupported instruction in callable function");
3932 			return 1;
3933 		}
3934 
3935 		break;
3936 
3937 	case INSN_SYSRET:
3938 		TRACE_INSN(insn, "sysret");
3939 		if (func && (!next_insn || !next_insn->hint)) {
3940 			WARN_INSN(insn, "unsupported instruction in callable function");
3941 			return 1;
3942 		}
3943 
3944 		return 0;
3945 
3946 	case INSN_STAC:
3947 		TRACE_INSN(insn, "stac");
3948 		if (!opts.uaccess)
3949 			break;
3950 
3951 		if (statep->uaccess) {
3952 			WARN_INSN(insn, "recursive UACCESS enable");
3953 			return 1;
3954 		}
3955 
3956 		statep->uaccess = true;
3957 		break;
3958 
3959 	case INSN_CLAC:
3960 		TRACE_INSN(insn, "clac");
3961 		if (!opts.uaccess)
3962 			break;
3963 
3964 		if (!statep->uaccess && func) {
3965 			WARN_INSN(insn, "redundant UACCESS disable");
3966 			return 1;
3967 		}
3968 
3969 		if (func_uaccess_safe(func) && !statep->uaccess_stack) {
3970 			WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3971 			return 1;
3972 		}
3973 
3974 		statep->uaccess = false;
3975 		break;
3976 
3977 	case INSN_STD:
3978 		TRACE_INSN(insn, "std");
3979 		if (statep->df) {
3980 			WARN_INSN(insn, "recursive STD");
3981 			return 1;
3982 		}
3983 
3984 		statep->df = true;
3985 		break;
3986 
3987 	case INSN_CLD:
3988 		TRACE_INSN(insn, "cld");
3989 		if (!statep->df && func) {
3990 			WARN_INSN(insn, "redundant CLD");
3991 			return 1;
3992 		}
3993 
3994 		statep->df = false;
3995 		break;
3996 
3997 	default:
3998 		break;
3999 	}
4000 
4001 	if (insn->dead_end)
4002 		TRACE_INSN(insn, "dead end");
4003 
4004 	*dead_end = insn->dead_end;
4005 	return 0;
4006 }
4007 
4008 /*
4009  * Follow the branch starting at the given instruction, and recursively follow
4010  * any other branches (jumps).  Meanwhile, track the frame pointer state at
4011  * each instruction and validate all the rules described in
4012  * tools/objtool/Documentation/objtool.txt.
4013  */
do_validate_branch(struct objtool_file * file,struct symbol * func,struct instruction * insn,struct insn_state * state)4014 static int do_validate_branch(struct objtool_file *file, struct symbol *func,
4015 			      struct instruction *insn, struct insn_state *state)
4016 {
4017 	struct instruction *next_insn, *prev_insn = NULL;
4018 	bool dead_end;
4019 	int ret;
4020 
4021 	if (func && func->ignore)
4022 		return 0;
4023 
4024 	do {
4025 		insn->trace = 0;
4026 		next_insn = next_insn_to_validate(file, insn);
4027 
4028 		if (opts.checksum && func && insn->sec)
4029 			checksum_update_insn(file, func, insn);
4030 
4031 		if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
4032 			/* Ignore KCFI type preambles, which always fall through */
4033 			if (is_prefix_func(func))
4034 				return 0;
4035 
4036 			if (file->ignore_unreachables)
4037 				return 0;
4038 
4039 			WARN("%s() falls through to next function %s()",
4040 			     func->name, insn_func(insn)->name);
4041 			func->warned = 1;
4042 
4043 			return 1;
4044 		}
4045 
4046 		ret = validate_insn(file, func, insn, state, prev_insn, next_insn,
4047 				    &dead_end);
4048 
4049 		if (!insn->trace) {
4050 			if (ret)
4051 				TRACE_INSN(insn, "warning (%d)", ret);
4052 			else
4053 				TRACE_INSN(insn, NULL);
4054 		}
4055 
4056 		if (!dead_end && !next_insn) {
4057 			if (state->cfi.cfa.base == CFI_UNDEFINED)
4058 				return 0;
4059 			if (file->ignore_unreachables)
4060 				return 0;
4061 
4062 			WARN("%s%sunexpected end of section %s",
4063 			     func ? func->name : "", func ? "(): " : "",
4064 			     insn->sec->name);
4065 			return 1;
4066 		}
4067 
4068 		prev_insn = insn;
4069 		insn = next_insn;
4070 
4071 	} while (!dead_end);
4072 
4073 	return ret;
4074 }
4075 
validate_branch(struct objtool_file * file,struct symbol * func,struct instruction * insn,struct insn_state state)4076 static int validate_branch(struct objtool_file *file, struct symbol *func,
4077 			   struct instruction *insn, struct insn_state state)
4078 {
4079 	int ret;
4080 
4081 	trace_depth_inc();
4082 	ret = do_validate_branch(file, func, insn, &state);
4083 	trace_depth_dec();
4084 
4085 	return ret;
4086 }
4087 
validate_unwind_hint(struct objtool_file * file,struct instruction * insn,struct insn_state * state)4088 static int validate_unwind_hint(struct objtool_file *file,
4089 				  struct instruction *insn,
4090 				  struct insn_state *state)
4091 {
4092 	if (insn->hint && !insn->visited) {
4093 		struct symbol *func = insn_func(insn);
4094 		int ret;
4095 
4096 		if (opts.checksum)
4097 			checksum_init(func);
4098 
4099 		ret = validate_branch(file, func, insn, *state);
4100 		if (ret)
4101 			BT_INSN(insn, "<=== (hint)");
4102 		return ret;
4103 	}
4104 
4105 	return 0;
4106 }
4107 
validate_unwind_hints(struct objtool_file * file,struct section * sec)4108 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
4109 {
4110 	struct instruction *insn;
4111 	struct insn_state state;
4112 	int warnings = 0;
4113 
4114 	if (!file->hints)
4115 		return 0;
4116 
4117 	init_insn_state(file, &state, sec);
4118 
4119 	if (sec) {
4120 		sec_for_each_insn(file, sec, insn)
4121 			warnings += validate_unwind_hint(file, insn, &state);
4122 	} else {
4123 		for_each_insn(file, insn)
4124 			warnings += validate_unwind_hint(file, insn, &state);
4125 	}
4126 
4127 	return warnings;
4128 }
4129 
4130 /*
4131  * Validate rethunk entry constraint: must untrain RET before the first RET.
4132  *
4133  * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
4134  * before an actual RET instruction.
4135  */
validate_unret(struct objtool_file * file,struct instruction * insn)4136 static int validate_unret(struct objtool_file *file, struct instruction *insn)
4137 {
4138 	struct instruction *next, *dest;
4139 	int ret;
4140 
4141 	for (;;) {
4142 		next = next_insn_to_validate(file, insn);
4143 
4144 		if (insn->visited & VISITED_UNRET)
4145 			return 0;
4146 
4147 		insn->visited |= VISITED_UNRET;
4148 
4149 		if (insn->alts) {
4150 			struct alternative *alt;
4151 			for (alt = insn->alts; alt; alt = alt->next) {
4152 				ret = validate_unret(file, alt->insn);
4153 				if (ret) {
4154 					BT_INSN(insn, "(alt)");
4155 					return ret;
4156 				}
4157 			}
4158 		}
4159 
4160 		switch (insn->type) {
4161 
4162 		case INSN_CALL_DYNAMIC:
4163 		case INSN_JUMP_DYNAMIC:
4164 		case INSN_JUMP_DYNAMIC_CONDITIONAL:
4165 			WARN_INSN(insn, "early indirect call");
4166 			return 1;
4167 
4168 		case INSN_JUMP_UNCONDITIONAL:
4169 		case INSN_JUMP_CONDITIONAL:
4170 			if (!is_sibling_call(insn)) {
4171 				if (!insn->jump_dest) {
4172 					WARN_INSN(insn, "unresolved jump target after linking?!?");
4173 					return 1;
4174 				}
4175 				ret = validate_unret(file, insn->jump_dest);
4176 				if (ret) {
4177 					BT_INSN(insn, "(branch%s)",
4178 						insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
4179 					return ret;
4180 				}
4181 
4182 				if (insn->type == INSN_JUMP_UNCONDITIONAL)
4183 					return 0;
4184 
4185 				break;
4186 			}
4187 
4188 			/* fallthrough */
4189 		case INSN_CALL:
4190 			dest = find_insn(file, insn_call_dest(insn)->sec,
4191 					 insn_call_dest(insn)->offset);
4192 			if (!dest) {
4193 				WARN("Unresolved function after linking!?: %s",
4194 				     insn_call_dest(insn)->name);
4195 				return 1;
4196 			}
4197 
4198 			ret = validate_unret(file, dest);
4199 			if (ret) {
4200 				BT_INSN(insn, "(call)");
4201 				return ret;
4202 			}
4203 			/*
4204 			 * If a call returns without error, it must have seen UNTRAIN_RET.
4205 			 * Therefore any non-error return is a success.
4206 			 */
4207 			return 0;
4208 
4209 		case INSN_RETURN:
4210 			WARN_INSN(insn, "RET before UNTRAIN");
4211 			return 1;
4212 
4213 		case INSN_SYSCALL:
4214 			break;
4215 
4216 		case INSN_SYSRET:
4217 			return 0;
4218 
4219 		case INSN_NOP:
4220 			if (insn->retpoline_safe)
4221 				return 0;
4222 			break;
4223 
4224 		default:
4225 			break;
4226 		}
4227 
4228 		if (insn->dead_end)
4229 			return 0;
4230 
4231 		if (!next) {
4232 			WARN_INSN(insn, "teh end!");
4233 			return 1;
4234 		}
4235 		insn = next;
4236 	}
4237 
4238 	return 0;
4239 }
4240 
4241 /*
4242  * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
4243  * VALIDATE_UNRET_END before RET.
4244  */
validate_unrets(struct objtool_file * file)4245 static int validate_unrets(struct objtool_file *file)
4246 {
4247 	struct instruction *insn;
4248 	int warnings = 0;
4249 
4250 	for_each_insn(file, insn) {
4251 		if (!insn->unret)
4252 			continue;
4253 
4254 		warnings += validate_unret(file, insn);
4255 	}
4256 
4257 	return warnings;
4258 }
4259 
validate_retpoline(struct objtool_file * file)4260 static int validate_retpoline(struct objtool_file *file)
4261 {
4262 	struct instruction *insn;
4263 	int warnings = 0;
4264 
4265 	for_each_insn(file, insn) {
4266 		if (insn->type != INSN_JUMP_DYNAMIC &&
4267 		    insn->type != INSN_CALL_DYNAMIC &&
4268 		    insn->type != INSN_RETURN)
4269 			continue;
4270 
4271 		if (insn->retpoline_safe)
4272 			continue;
4273 
4274 		if (insn->sec->init)
4275 			continue;
4276 
4277 		if (insn->type == INSN_RETURN) {
4278 			if (opts.rethunk) {
4279 				WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build");
4280 				warnings++;
4281 			}
4282 			continue;
4283 		}
4284 
4285 		WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
4286 			  insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
4287 		warnings++;
4288 	}
4289 
4290 	if (!opts.cfi)
4291 		return warnings;
4292 
4293 	/*
4294 	 * kCFI call sites look like:
4295 	 *
4296 	 *     movl $(-0x12345678), %r10d
4297 	 *     addl -4(%r11), %r10d
4298 	 *     jz 1f
4299 	 *     ud2
4300 	 *  1: cs call __x86_indirect_thunk_r11
4301 	 *
4302 	 * Verify all indirect calls are kCFI adorned by checking for the
4303 	 * UD2. Notably, doing __nocfi calls to regular (cfi) functions is
4304 	 * broken.
4305 	 */
4306 	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
4307 		struct symbol *sym = insn->sym;
4308 
4309 		if (sym && (sym->type == STT_NOTYPE ||
4310 			    sym->type == STT_FUNC) && !sym->nocfi) {
4311 			struct instruction *prev =
4312 				prev_insn_same_sym(file, insn);
4313 
4314 			if (!prev || prev->type != INSN_BUG) {
4315 				WARN_INSN(insn, "no-cfi indirect call!");
4316 				warnings++;
4317 			}
4318 		}
4319 	}
4320 
4321 	return warnings;
4322 }
4323 
is_kasan_insn(struct instruction * insn)4324 static bool is_kasan_insn(struct instruction *insn)
4325 {
4326 	return (insn->type == INSN_CALL &&
4327 		!strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4328 }
4329 
is_ubsan_insn(struct instruction * insn)4330 static bool is_ubsan_insn(struct instruction *insn)
4331 {
4332 	return (insn->type == INSN_CALL &&
4333 		!strcmp(insn_call_dest(insn)->name,
4334 			"__ubsan_handle_builtin_unreachable"));
4335 }
4336 
ignore_unreachable_insn(struct objtool_file * file,struct instruction * insn)4337 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4338 {
4339 	struct symbol *func = insn_func(insn);
4340 	struct instruction *prev_insn;
4341 	int i;
4342 
4343 	if (insn->type == INSN_NOP || insn->type == INSN_TRAP ||
4344 	    insn->hole || (func && func->ignore))
4345 		return true;
4346 
4347 	/*
4348 	 * Ignore alternative replacement instructions.  This can happen
4349 	 * when a whitelisted function uses one of the ALTERNATIVE macros.
4350 	 */
4351 	if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4352 	    !strcmp(insn->sec->name, ".altinstr_aux"))
4353 		return true;
4354 
4355 	if (!func)
4356 		return false;
4357 
4358 	if (func->static_call_tramp)
4359 		return true;
4360 
4361 	/*
4362 	 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4363 	 * __builtin_unreachable().  The BUG() macro has an unreachable() after
4364 	 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4365 	 * (or occasionally a JMP to UD2).
4366 	 *
4367 	 * It may also insert a UD2 after calling a __noreturn function.
4368 	 */
4369 	prev_insn = prev_insn_same_sec(file, insn);
4370 	if (prev_insn && prev_insn->dead_end &&
4371 	    (insn->type == INSN_BUG ||
4372 	     (insn->type == INSN_JUMP_UNCONDITIONAL &&
4373 	      insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4374 		return true;
4375 
4376 	/*
4377 	 * Check if this (or a subsequent) instruction is related to
4378 	 * CONFIG_UBSAN or CONFIG_KASAN.
4379 	 *
4380 	 * End the search at 5 instructions to avoid going into the weeds.
4381 	 */
4382 	for (i = 0; i < 5; i++) {
4383 
4384 		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4385 			return true;
4386 
4387 		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4388 			if (insn->jump_dest &&
4389 			    insn_func(insn->jump_dest) == func) {
4390 				insn = insn->jump_dest;
4391 				continue;
4392 			}
4393 
4394 			break;
4395 		}
4396 
4397 		if (insn->offset + insn->len >= func->offset + func->len)
4398 			break;
4399 
4400 		insn = next_insn_same_sec(file, insn);
4401 	}
4402 
4403 	return false;
4404 }
4405 
4406 /*
4407  * For FineIBT or kCFI, a certain number of bytes preceding the function may be
4408  * NOPs.  Those NOPs may be rewritten at runtime and executed, so give them a
4409  * proper function name: __pfx_<func>.
4410  *
4411  * The NOPs may not exist for the following cases:
4412  *
4413  *   - compiler cloned functions (*.cold, *.part0, etc)
4414  *   - asm functions created with inline asm or without SYM_FUNC_START()
4415  *
4416  * Also, the function may already have a prefix from a previous objtool run
4417  * (livepatch extracted functions, or manually running objtool multiple times).
4418  *
4419  * So return 0 if the NOPs are missing or the function already has a prefix
4420  * symbol.
4421  */
create_prefix_symbol(struct objtool_file * file,struct symbol * func)4422 static int create_prefix_symbol(struct objtool_file *file, struct symbol *func)
4423 {
4424 	struct instruction *insn, *prev;
4425 	char name[SYM_NAME_LEN];
4426 	struct cfi_state *cfi;
4427 
4428 	if (!is_func_sym(func) || is_prefix_func(func) ||
4429 	    func->cold || func->static_call_tramp)
4430 		return 0;
4431 
4432 	if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) {
4433 		WARN("%s: symbol name too long, can't create __pfx_ symbol",
4434 		      func->name);
4435 		return 0;
4436 	}
4437 
4438 	if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name))
4439 		return -1;
4440 
4441 	if (file->klp) {
4442 		struct symbol *pfx;
4443 
4444 		pfx = find_symbol_by_offset(func->sec, func->offset - opts.prefix);
4445 		if (pfx && is_prefix_func(pfx) && !strcmp(pfx->name, name))
4446 			return 0;
4447 	}
4448 
4449 	insn = find_insn(file, func->sec, func->offset);
4450 	if (!insn) {
4451 		WARN("%s: can't find starting instruction", func->name);
4452 		return -1;
4453 	}
4454 
4455 	for (prev = prev_insn_same_sec(file, insn);
4456 	     prev;
4457 	     prev = prev_insn_same_sec(file, prev)) {
4458 		u64 offset;
4459 
4460 		if (prev->type != INSN_NOP)
4461 			return 0;
4462 
4463 		offset = func->offset - prev->offset;
4464 
4465 		if (offset > opts.prefix)
4466 			return 0;
4467 
4468 		if (offset < opts.prefix)
4469 			continue;
4470 
4471 		if (!elf_create_symbol(file->elf, name, func->sec,
4472 				       GELF_ST_BIND(func->sym.st_info),
4473 				       GELF_ST_TYPE(func->sym.st_info),
4474 				       prev->offset, opts.prefix))
4475 			return -1;
4476 
4477 		break;
4478 	}
4479 
4480 	if (!prev)
4481 		return 0;
4482 
4483 	if (!insn->cfi) {
4484 		/*
4485 		 * This can happen if stack validation isn't enabled or the
4486 		 * function is annotated with STACK_FRAME_NON_STANDARD.
4487 		 */
4488 		return 0;
4489 	}
4490 
4491 	/* Propagate insn->cfi to the prefix code */
4492 	cfi = cfi_hash_find_or_add(insn->cfi);
4493 	for (; prev != insn; prev = next_insn_same_sec(file, prev))
4494 		prev->cfi = cfi;
4495 
4496 	return 0;
4497 }
4498 
create_prefix_symbols(struct objtool_file * file)4499 static int create_prefix_symbols(struct objtool_file *file)
4500 {
4501 	struct section *sec;
4502 	struct symbol *func;
4503 
4504 	for_each_sec(file->elf, sec) {
4505 		if (!is_text_sec(sec))
4506 			continue;
4507 
4508 		sec_for_each_sym(sec, func) {
4509 			if (create_prefix_symbol(file, func))
4510 				return -1;
4511 		}
4512 	}
4513 
4514 	return 0;
4515 }
4516 
validate_symbol(struct objtool_file * file,struct section * sec,struct symbol * sym,struct insn_state * state)4517 static int validate_symbol(struct objtool_file *file, struct section *sec,
4518 			   struct symbol *sym, struct insn_state *state)
4519 {
4520 	struct instruction *insn;
4521 	struct symbol *func;
4522 	int ret;
4523 
4524 	if (!sym->len) {
4525 		WARN("%s() is missing an ELF size annotation", sym->name);
4526 		return 1;
4527 	}
4528 
4529 	if (sym->pfunc != sym || sym->alias != sym)
4530 		return 0;
4531 
4532 	insn = find_insn(file, sec, sym->offset);
4533 	if (!insn || insn->visited)
4534 		return 0;
4535 
4536 	if (opts.uaccess)
4537 		state->uaccess = sym->uaccess_safe;
4538 
4539 	func = insn_func(insn);
4540 
4541 	if (opts.checksum)
4542 		checksum_init(func);
4543 
4544 	if (opts.trace && !fnmatch(opts.trace, sym->name, 0)) {
4545 		trace_enable();
4546 		TRACE("%s: validation begin\n", sym->name);
4547 	}
4548 
4549 	ret = validate_branch(file, func, insn, *state);
4550 	if (ret)
4551 		BT_INSN(insn, "<=== (sym)");
4552 
4553 	TRACE("%s: validation %s\n\n", sym->name, ret ? "failed" : "end");
4554 	trace_disable();
4555 
4556 	if (opts.checksum)
4557 		checksum_finish(func);
4558 
4559 	return ret;
4560 }
4561 
validate_section(struct objtool_file * file,struct section * sec)4562 static int validate_section(struct objtool_file *file, struct section *sec)
4563 {
4564 	struct insn_state state;
4565 	struct symbol *func;
4566 	int warnings = 0;
4567 
4568 	sec_for_each_sym(sec, func) {
4569 		if (!is_func_sym(func))
4570 			continue;
4571 
4572 		init_insn_state(file, &state, sec);
4573 		set_func_state(&state.cfi);
4574 
4575 		warnings += validate_symbol(file, sec, func, &state);
4576 	}
4577 
4578 	return warnings;
4579 }
4580 
validate_noinstr_sections(struct objtool_file * file)4581 static int validate_noinstr_sections(struct objtool_file *file)
4582 {
4583 	struct section *sec;
4584 	int warnings = 0;
4585 
4586 	sec = find_section_by_name(file->elf, ".noinstr.text");
4587 	if (sec) {
4588 		warnings += validate_section(file, sec);
4589 		warnings += validate_unwind_hints(file, sec);
4590 	}
4591 
4592 	sec = find_section_by_name(file->elf, ".entry.text");
4593 	if (sec) {
4594 		warnings += validate_section(file, sec);
4595 		warnings += validate_unwind_hints(file, sec);
4596 	}
4597 
4598 	sec = find_section_by_name(file->elf, ".cpuidle.text");
4599 	if (sec) {
4600 		warnings += validate_section(file, sec);
4601 		warnings += validate_unwind_hints(file, sec);
4602 	}
4603 
4604 	return warnings;
4605 }
4606 
validate_functions(struct objtool_file * file)4607 static int validate_functions(struct objtool_file *file)
4608 {
4609 	struct section *sec;
4610 	int warnings = 0;
4611 
4612 	for_each_sec(file->elf, sec) {
4613 		if (!is_text_sec(sec))
4614 			continue;
4615 
4616 		warnings += validate_section(file, sec);
4617 	}
4618 
4619 	return warnings;
4620 }
4621 
mark_endbr_used(struct instruction * insn)4622 static void mark_endbr_used(struct instruction *insn)
4623 {
4624 	if (!list_empty(&insn->call_node))
4625 		list_del_init(&insn->call_node);
4626 }
4627 
noendbr_range(struct objtool_file * file,struct instruction * insn)4628 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4629 {
4630 	struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4631 	struct instruction *first;
4632 
4633 	if (!sym)
4634 		return false;
4635 
4636 	first = find_insn(file, sym->sec, sym->offset);
4637 	if (!first)
4638 		return false;
4639 
4640 	if (first->type != INSN_ENDBR && !first->noendbr)
4641 		return false;
4642 
4643 	return insn->offset == sym->offset + sym->len;
4644 }
4645 
__validate_ibt_insn(struct objtool_file * file,struct instruction * insn,struct instruction * dest)4646 static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn,
4647 			       struct instruction *dest)
4648 {
4649 	if (dest->type == INSN_ENDBR) {
4650 		mark_endbr_used(dest);
4651 		return 0;
4652 	}
4653 
4654 	if (insn_func(dest) && insn_func(insn) &&
4655 	    insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
4656 		/*
4657 		 * Anything from->to self is either _THIS_IP_ or
4658 		 * IRET-to-self.
4659 		 *
4660 		 * There is no sane way to annotate _THIS_IP_ since the
4661 		 * compiler treats the relocation as a constant and is
4662 		 * happy to fold in offsets, skewing any annotation we
4663 		 * do, leading to vast amounts of false-positives.
4664 		 *
4665 		 * There's also compiler generated _THIS_IP_ through
4666 		 * KCOV and such which we have no hope of annotating.
4667 		 *
4668 		 * As such, blanket accept self-references without
4669 		 * issue.
4670 		 */
4671 		return 0;
4672 	}
4673 
4674 	/*
4675 	 * Accept anything ANNOTATE_NOENDBR.
4676 	 */
4677 	if (dest->noendbr)
4678 		return 0;
4679 
4680 	/*
4681 	 * Accept if this is the instruction after a symbol
4682 	 * that is (no)endbr -- typical code-range usage.
4683 	 */
4684 	if (noendbr_range(file, dest))
4685 		return 0;
4686 
4687 	WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4688 	return 1;
4689 }
4690 
validate_ibt_insn(struct objtool_file * file,struct instruction * insn)4691 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4692 {
4693 	struct instruction *dest;
4694 	struct reloc *reloc;
4695 	unsigned long off;
4696 	int warnings = 0;
4697 
4698 	/*
4699 	 * Looking for function pointer load relocations.  Ignore
4700 	 * direct/indirect branches:
4701 	 */
4702 	switch (insn->type) {
4703 
4704 	case INSN_CALL:
4705 	case INSN_CALL_DYNAMIC:
4706 	case INSN_JUMP_CONDITIONAL:
4707 	case INSN_JUMP_UNCONDITIONAL:
4708 	case INSN_JUMP_DYNAMIC:
4709 	case INSN_JUMP_DYNAMIC_CONDITIONAL:
4710 	case INSN_RETURN:
4711 	case INSN_NOP:
4712 		return 0;
4713 
4714 	case INSN_LEA_RIP:
4715 		if (!insn_reloc(file, insn)) {
4716 			/* local function pointer reference without reloc */
4717 
4718 			off = arch_jump_destination(insn);
4719 
4720 			dest = find_insn(file, insn->sec, off);
4721 			if (!dest) {
4722 				WARN_INSN(insn, "corrupt function pointer reference");
4723 				return 1;
4724 			}
4725 
4726 			return __validate_ibt_insn(file, insn, dest);
4727 		}
4728 		break;
4729 
4730 	default:
4731 		break;
4732 	}
4733 
4734 	for (reloc = insn_reloc(file, insn);
4735 	     reloc;
4736 	     reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4737 					      reloc_offset(reloc) + 1,
4738 					      (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
4739 
4740 		off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc);
4741 
4742 		dest = find_insn(file, reloc->sym->sec, off);
4743 		if (!dest)
4744 			continue;
4745 
4746 		warnings += __validate_ibt_insn(file, insn, dest);
4747 	}
4748 
4749 	return warnings;
4750 }
4751 
validate_ibt_data_reloc(struct objtool_file * file,struct reloc * reloc)4752 static int validate_ibt_data_reloc(struct objtool_file *file,
4753 				   struct reloc *reloc)
4754 {
4755 	struct instruction *dest;
4756 
4757 	dest = find_insn(file, reloc->sym->sec,
4758 			 reloc->sym->offset + reloc_addend(reloc));
4759 	if (!dest)
4760 		return 0;
4761 
4762 	if (dest->type == INSN_ENDBR) {
4763 		mark_endbr_used(dest);
4764 		return 0;
4765 	}
4766 
4767 	if (dest->noendbr)
4768 		return 0;
4769 
4770 	WARN_FUNC(reloc->sec->base, reloc_offset(reloc),
4771 		  "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4772 
4773 	return 1;
4774 }
4775 
4776 /*
4777  * Validate IBT rules and remove used ENDBR instructions from the seal list.
4778  * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4779  * NOPs) later, in create_ibt_endbr_seal_sections().
4780  */
validate_ibt(struct objtool_file * file)4781 static int validate_ibt(struct objtool_file *file)
4782 {
4783 	struct section *sec;
4784 	struct reloc *reloc;
4785 	struct instruction *insn;
4786 	int warnings = 0;
4787 
4788 	for_each_insn(file, insn)
4789 		warnings += validate_ibt_insn(file, insn);
4790 
4791 	for_each_sec(file->elf, sec) {
4792 
4793 		/* Already done by validate_ibt_insn() */
4794 		if (is_text_sec(sec))
4795 			continue;
4796 
4797 		if (!sec->rsec)
4798 			continue;
4799 
4800 		/*
4801 		 * These sections can reference text addresses, but not with
4802 		 * the intent to indirect branch to them.
4803 		 */
4804 		if ((!strncmp(sec->name, ".discard", 8) &&
4805 		     strcmp(sec->name, ".discard.ibt_endbr_noseal"))	||
4806 		    !strncmp(sec->name, ".debug", 6)			||
4807 		    !strcmp(sec->name, ".altinstructions")		||
4808 		    !strcmp(sec->name, ".ibt_endbr_seal")		||
4809 		    !strcmp(sec->name, ".kcfi_traps")			||
4810 		    !strcmp(sec->name, ".orc_unwind_ip")		||
4811 		    !strcmp(sec->name, ".retpoline_sites")		||
4812 		    !strcmp(sec->name, ".smp_locks")			||
4813 		    !strcmp(sec->name, ".static_call_sites")		||
4814 		    !strcmp(sec->name, "_error_injection_whitelist")	||
4815 		    !strcmp(sec->name, "_kprobe_blacklist")		||
4816 		    !strcmp(sec->name, "__bug_table")			||
4817 		    !strcmp(sec->name, "__ex_table")			||
4818 		    !strcmp(sec->name, "__jump_table")			||
4819 		    !strcmp(sec->name, ".init.klp_funcs")		||
4820 		    !strcmp(sec->name, "__mcount_loc")			||
4821 		    !strcmp(sec->name, ".llvm.call-graph-profile")	||
4822 		    !strcmp(sec->name, ".llvm_bb_addr_map")		||
4823 		    !strcmp(sec->name, "__tracepoints")			||
4824 		    !strcmp(sec->name, ".return_sites")			||
4825 		    !strcmp(sec->name, ".call_sites")			||
4826 		    !strcmp(sec->name, "__patchable_function_entries"))
4827 			continue;
4828 
4829 		for_each_reloc(sec->rsec, reloc)
4830 			warnings += validate_ibt_data_reloc(file, reloc);
4831 	}
4832 
4833 	return warnings;
4834 }
4835 
validate_sls(struct objtool_file * file)4836 static int validate_sls(struct objtool_file *file)
4837 {
4838 	struct instruction *insn, *next_insn;
4839 	int warnings = 0;
4840 
4841 	for_each_insn(file, insn) {
4842 		next_insn = next_insn_same_sec(file, insn);
4843 
4844 		if (insn->retpoline_safe)
4845 			continue;
4846 
4847 		switch (insn->type) {
4848 		case INSN_RETURN:
4849 			if (!next_insn || next_insn->type != INSN_TRAP) {
4850 				WARN_INSN(insn, "missing int3 after ret");
4851 				warnings++;
4852 			}
4853 
4854 			break;
4855 		case INSN_JUMP_DYNAMIC:
4856 			if (!next_insn || next_insn->type != INSN_TRAP) {
4857 				WARN_INSN(insn, "missing int3 after indirect jump");
4858 				warnings++;
4859 			}
4860 			break;
4861 		default:
4862 			break;
4863 		}
4864 	}
4865 
4866 	return warnings;
4867 }
4868 
validate_reachable_instructions(struct objtool_file * file)4869 static int validate_reachable_instructions(struct objtool_file *file)
4870 {
4871 	struct instruction *insn, *prev_insn;
4872 	struct symbol *call_dest;
4873 	int warnings = 0;
4874 
4875 	if (file->ignore_unreachables)
4876 		return 0;
4877 
4878 	for_each_insn(file, insn) {
4879 		if (insn->visited || ignore_unreachable_insn(file, insn))
4880 			continue;
4881 
4882 		prev_insn = prev_insn_same_sec(file, insn);
4883 		if (prev_insn && prev_insn->dead_end) {
4884 			call_dest = insn_call_dest(prev_insn);
4885 			if (call_dest) {
4886 				WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h",
4887 					  call_dest->name);
4888 				warnings++;
4889 				continue;
4890 			}
4891 		}
4892 
4893 		WARN_INSN(insn, "unreachable instruction");
4894 		warnings++;
4895 	}
4896 
4897 	return warnings;
4898 }
4899 
arch_absolute_reloc(struct elf * elf,struct reloc * reloc)4900 __weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
4901 {
4902 	unsigned int type = reloc_type(reloc);
4903 	size_t sz = elf_addr_size(elf);
4904 
4905 	return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32);
4906 }
4907 
check_abs_references(struct objtool_file * file)4908 static int check_abs_references(struct objtool_file *file)
4909 {
4910 	struct section *sec;
4911 	struct reloc *reloc;
4912 	int ret = 0;
4913 
4914 	for_each_sec(file->elf, sec) {
4915 		/* absolute references in non-loadable sections are fine */
4916 		if (!(sec->sh.sh_flags & SHF_ALLOC))
4917 			continue;
4918 
4919 		/* section must have an associated .rela section */
4920 		if (!sec->rsec)
4921 			continue;
4922 
4923 		/*
4924 		 * Special case for compiler generated metadata that is not
4925 		 * consumed until after boot.
4926 		 */
4927 		if (!strcmp(sec->name, "__patchable_function_entries"))
4928 			continue;
4929 
4930 		for_each_reloc(sec->rsec, reloc) {
4931 			if (arch_absolute_reloc(file->elf, reloc)) {
4932 				WARN("section %s has absolute relocation at offset 0x%llx",
4933 				     sec->name, (unsigned long long)reloc_offset(reloc));
4934 				ret++;
4935 			}
4936 		}
4937 	}
4938 	return ret;
4939 }
4940 
4941 struct insn_chunk {
4942 	void *addr;
4943 	struct insn_chunk *next;
4944 };
4945 
4946 /*
4947  * Reduce peak RSS usage by freeing insns memory before writing the ELF file,
4948  * which can trigger more allocations for .debug_* sections whose data hasn't
4949  * been read yet.
4950  */
free_insns(struct objtool_file * file)4951 static void free_insns(struct objtool_file *file)
4952 {
4953 	struct instruction *insn;
4954 	struct insn_chunk *chunks = NULL, *chunk;
4955 
4956 	for_each_insn(file, insn) {
4957 		if (!insn->idx) {
4958 			chunk = malloc(sizeof(*chunk));
4959 			chunk->addr = insn;
4960 			chunk->next = chunks;
4961 			chunks = chunk;
4962 		}
4963 	}
4964 
4965 	for (chunk = chunks; chunk; chunk = chunk->next)
4966 		free(chunk->addr);
4967 }
4968 
objtool_disas_insn(struct instruction * insn)4969 const char *objtool_disas_insn(struct instruction *insn)
4970 {
4971 	struct disas_context *dctx = objtool_disas_ctx;
4972 
4973 	if (!dctx)
4974 		return "";
4975 
4976 	disas_insn(dctx, insn);
4977 	return disas_result(dctx);
4978 }
4979 
check(struct objtool_file * file)4980 int check(struct objtool_file *file)
4981 {
4982 	struct disas_context *disas_ctx = NULL;
4983 	int ret = 0, warnings = 0;
4984 
4985 	/*
4986 	 * Create a disassembly context if we might disassemble any
4987 	 * instruction or function.
4988 	 */
4989 	if (opts.verbose || opts.backtrace || opts.trace || opts.disas) {
4990 		disas_ctx = disas_context_create(file);
4991 		if (!disas_ctx) {
4992 			opts.disas = false;
4993 			opts.trace = false;
4994 		}
4995 		objtool_disas_ctx = disas_ctx;
4996 	}
4997 
4998 	arch_initial_func_cfi_state(&initial_func_cfi);
4999 	init_cfi_state(&init_cfi);
5000 	init_cfi_state(&func_cfi);
5001 	set_func_state(&func_cfi);
5002 	init_cfi_state(&force_undefined_cfi);
5003 	force_undefined_cfi.force_undefined = true;
5004 
5005 	if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
5006 		ret = -1;
5007 		goto out;
5008 	}
5009 
5010 	cfi_hash_add(&init_cfi);
5011 	cfi_hash_add(&func_cfi);
5012 
5013 	ret = checksum_debug_init(file);
5014 	if (ret)
5015 		goto out;
5016 
5017 	ret = decode_sections(file);
5018 	if (ret)
5019 		goto out;
5020 
5021 	if (!nr_insns)
5022 		goto out;
5023 
5024 	if (opts.retpoline)
5025 		warnings += validate_retpoline(file);
5026 
5027 	if (validate_branch_enabled()) {
5028 		int w = 0;
5029 
5030 		w += validate_functions(file);
5031 		w += validate_unwind_hints(file, NULL);
5032 		if (!w)
5033 			w += validate_reachable_instructions(file);
5034 
5035 		warnings += w;
5036 
5037 	} else if (opts.noinstr) {
5038 		warnings += validate_noinstr_sections(file);
5039 	}
5040 
5041 	if (opts.unret) {
5042 		/*
5043 		 * Must be after validate_branch() and friends, it plays
5044 		 * further games with insn->visited.
5045 		 */
5046 		warnings += validate_unrets(file);
5047 	}
5048 
5049 	if (opts.ibt)
5050 		warnings += validate_ibt(file);
5051 
5052 	if (opts.sls)
5053 		warnings += validate_sls(file);
5054 
5055 	if (opts.static_call) {
5056 		ret = create_static_call_sections(file);
5057 		if (ret)
5058 			goto out;
5059 	}
5060 
5061 	if (opts.retpoline) {
5062 		ret = create_retpoline_sites_sections(file);
5063 		if (ret)
5064 			goto out;
5065 	}
5066 
5067 	if (opts.cfi) {
5068 		ret = create_cfi_sections(file);
5069 		if (ret)
5070 			goto out;
5071 	}
5072 
5073 	if (opts.rethunk) {
5074 		ret = create_return_sites_sections(file);
5075 		if (ret)
5076 			goto out;
5077 
5078 		if (opts.hack_skylake) {
5079 			ret = create_direct_call_sections(file);
5080 			if (ret)
5081 				goto out;
5082 		}
5083 	}
5084 
5085 	if (opts.mcount) {
5086 		ret = create_mcount_loc_sections(file);
5087 		if (ret)
5088 			goto out;
5089 	}
5090 
5091 	if (opts.prefix) {
5092 		ret = create_prefix_symbols(file);
5093 		if (ret)
5094 			goto out;
5095 	}
5096 
5097 	if (opts.ibt) {
5098 		ret = create_ibt_endbr_seal_sections(file);
5099 		if (ret)
5100 			goto out;
5101 	}
5102 
5103 	if (opts.noabs)
5104 		warnings += check_abs_references(file);
5105 
5106 	if (opts.checksum) {
5107 		ret = create_sym_checksum_section(file);
5108 		if (ret)
5109 			goto out;
5110 	}
5111 
5112 	if (opts.orc && nr_insns) {
5113 		ret = orc_create(file);
5114 		if (ret)
5115 			goto out;
5116 	}
5117 
5118 	if (opts.stats) {
5119 		printf("nr_insns_visited: %ld\n", nr_insns_visited);
5120 		printf("nr_cfi: %ld\n", nr_cfi);
5121 		printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
5122 		printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
5123 	}
5124 
5125 out:
5126 	if (ret || warnings) {
5127 		if (opts.werror && warnings)
5128 			ret = 1;
5129 
5130 		if (opts.verbose) {
5131 			if (opts.werror && warnings)
5132 				WARN("%d warning(s) upgraded to errors", warnings);
5133 			disas_warned_funcs(disas_ctx);
5134 		}
5135 	}
5136 
5137 	if (opts.disas)
5138 		disas_funcs(disas_ctx);
5139 
5140 	if (disas_ctx) {
5141 		disas_context_destroy(disas_ctx);
5142 		objtool_disas_ctx = NULL;
5143 	}
5144 
5145 	free_insns(file);
5146 
5147 	if (!ret && !warnings)
5148 		return 0;
5149 
5150 	if (opts.backup && make_backup())
5151 		return 1;
5152 
5153 	return ret;
5154 }
5155