xref: /linux/arch/x86/kernel/alternative.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
3 
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
10 #include <linux/highmem.h>
11 #include <linux/mm.h>
12 #include <linux/vmalloc.h>
13 #include <linux/memory.h>
14 #include <linux/stop_machine.h>
15 #include <linux/slab.h>
16 #include <linux/kdebug.h>
17 #include <linux/kprobes.h>
18 #include <linux/mmu_context.h>
19 #include <linux/bsearch.h>
20 #include <linux/sync_core.h>
21 #include <asm/text-patching.h>
22 #include <asm/alternative.h>
23 #include <asm/sections.h>
24 #include <asm/mce.h>
25 #include <asm/nmi.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/insn.h>
29 #include <asm/io.h>
30 #include <asm/fixmap.h>
31 #include <asm/paravirt.h>
32 #include <asm/asm-prototypes.h>
33 #include <asm/cfi.h>
34 
35 int __read_mostly alternatives_patched;
36 
37 EXPORT_SYMBOL_GPL(alternatives_patched);
38 
39 #define MAX_PATCH_LEN (255-1)
40 
41 #define DA_ALL		(~0)
42 #define DA_ALT		0x01
43 #define DA_RET		0x02
44 #define DA_RETPOLINE	0x04
45 #define DA_ENDBR	0x08
46 #define DA_SMP		0x10
47 
48 static unsigned int debug_alternative;
49 
debug_alt(char * str)50 static int __init debug_alt(char *str)
51 {
52 	if (str && *str == '=')
53 		str++;
54 
55 	if (!str || kstrtouint(str, 0, &debug_alternative))
56 		debug_alternative = DA_ALL;
57 
58 	return 1;
59 }
60 __setup("debug-alternative", debug_alt);
61 
62 static int noreplace_smp;
63 
setup_noreplace_smp(char * str)64 static int __init setup_noreplace_smp(char *str)
65 {
66 	noreplace_smp = 1;
67 	return 1;
68 }
69 __setup("noreplace-smp", setup_noreplace_smp);
70 
71 #define DPRINTK(type, fmt, args...)					\
72 do {									\
73 	if (debug_alternative & DA_##type)				\
74 		printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args);		\
75 } while (0)
76 
77 #define DUMP_BYTES(type, buf, len, fmt, args...)			\
78 do {									\
79 	if (unlikely(debug_alternative & DA_##type)) {			\
80 		int j;							\
81 									\
82 		if (!(len))						\
83 			break;						\
84 									\
85 		printk(KERN_DEBUG pr_fmt(fmt), ##args);			\
86 		for (j = 0; j < (len) - 1; j++)				\
87 			printk(KERN_CONT "%02hhx ", buf[j]);		\
88 		printk(KERN_CONT "%02hhx\n", buf[j]);			\
89 	}								\
90 } while (0)
91 
92 static const unsigned char x86nops[] =
93 {
94 	BYTES_NOP1,
95 	BYTES_NOP2,
96 	BYTES_NOP3,
97 	BYTES_NOP4,
98 	BYTES_NOP5,
99 	BYTES_NOP6,
100 	BYTES_NOP7,
101 	BYTES_NOP8,
102 #ifdef CONFIG_64BIT
103 	BYTES_NOP9,
104 	BYTES_NOP10,
105 	BYTES_NOP11,
106 #endif
107 };
108 
109 const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
110 {
111 	NULL,
112 	x86nops,
113 	x86nops + 1,
114 	x86nops + 1 + 2,
115 	x86nops + 1 + 2 + 3,
116 	x86nops + 1 + 2 + 3 + 4,
117 	x86nops + 1 + 2 + 3 + 4 + 5,
118 	x86nops + 1 + 2 + 3 + 4 + 5 + 6,
119 	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
120 #ifdef CONFIG_64BIT
121 	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
122 	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9,
123 	x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10,
124 #endif
125 };
126 
127 /*
128  * Nomenclature for variable names to simplify and clarify this code and ease
129  * any potential staring at it:
130  *
131  * @instr: source address of the original instructions in the kernel text as
132  * generated by the compiler.
133  *
134  * @buf: temporary buffer on which the patching operates. This buffer is
135  * eventually text-poked into the kernel image.
136  *
137  * @replacement/@repl: pointer to the opcodes which are replacing @instr, located
138  * in the .altinstr_replacement section.
139  */
140 
141 /*
142  * Fill the buffer with a single effective instruction of size @len.
143  *
144  * In order not to issue an ORC stack depth tracking CFI entry (Call Frame Info)
145  * for every single-byte NOP, try to generate the maximally available NOP of
146  * size <= ASM_NOP_MAX such that only a single CFI entry is generated (vs one for
147  * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
148  * *jump* over instead of executing long and daft NOPs.
149  */
add_nop(u8 * buf,unsigned int len)150 static void add_nop(u8 *buf, unsigned int len)
151 {
152 	u8 *target = buf + len;
153 
154 	if (!len)
155 		return;
156 
157 	if (len <= ASM_NOP_MAX) {
158 		memcpy(buf, x86_nops[len], len);
159 		return;
160 	}
161 
162 	if (len < 128) {
163 		__text_gen_insn(buf, JMP8_INSN_OPCODE, buf, target, JMP8_INSN_SIZE);
164 		buf += JMP8_INSN_SIZE;
165 	} else {
166 		__text_gen_insn(buf, JMP32_INSN_OPCODE, buf, target, JMP32_INSN_SIZE);
167 		buf += JMP32_INSN_SIZE;
168 	}
169 
170 	for (;buf < target; buf++)
171 		*buf = INT3_INSN_OPCODE;
172 }
173 
174 extern s32 __retpoline_sites[], __retpoline_sites_end[];
175 extern s32 __return_sites[], __return_sites_end[];
176 extern s32 __cfi_sites[], __cfi_sites_end[];
177 extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
178 extern s32 __smp_locks[], __smp_locks_end[];
179 void text_poke_early(void *addr, const void *opcode, size_t len);
180 
181 /*
182  * Matches NOP and NOPL, not any of the other possible NOPs.
183  */
insn_is_nop(struct insn * insn)184 static bool insn_is_nop(struct insn *insn)
185 {
186 	/* Anything NOP, but no REP NOP */
187 	if (insn->opcode.bytes[0] == 0x90 &&
188 	    (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3))
189 		return true;
190 
191 	/* NOPL */
192 	if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F)
193 		return true;
194 
195 	/* TODO: more nops */
196 
197 	return false;
198 }
199 
200 /*
201  * Find the offset of the first non-NOP instruction starting at @offset
202  * but no further than @len.
203  */
skip_nops(u8 * buf,int offset,int len)204 static int skip_nops(u8 *buf, int offset, int len)
205 {
206 	struct insn insn;
207 
208 	for (; offset < len; offset += insn.length) {
209 		if (insn_decode_kernel(&insn, &buf[offset]))
210 			break;
211 
212 		if (!insn_is_nop(&insn))
213 			break;
214 	}
215 
216 	return offset;
217 }
218 
219 /*
220  * "noinline" to cause control flow change and thus invalidate I$ and
221  * cause refetch after modification.
222  */
optimize_nops(const u8 * const instr,u8 * buf,size_t len)223 static void noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
224 {
225 	for (int next, i = 0; i < len; i = next) {
226 		struct insn insn;
227 
228 		if (insn_decode_kernel(&insn, &buf[i]))
229 			return;
230 
231 		next = i + insn.length;
232 
233 		if (insn_is_nop(&insn)) {
234 			int nop = i;
235 
236 			/* Has the NOP already been optimized? */
237 			if (i + insn.length == len)
238 				return;
239 
240 			next = skip_nops(buf, next, len);
241 
242 			add_nop(buf + nop, next - nop);
243 			DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, next);
244 		}
245 	}
246 }
247 
248 /*
249  * In this context, "source" is where the instructions are placed in the
250  * section .altinstr_replacement, for example during kernel build by the
251  * toolchain.
252  * "Destination" is where the instructions are being patched in by this
253  * machinery.
254  *
255  * The source offset is:
256  *
257  *   src_imm = target - src_next_ip                  (1)
258  *
259  * and the target offset is:
260  *
261  *   dst_imm = target - dst_next_ip                  (2)
262  *
263  * so rework (1) as an expression for target like:
264  *
265  *   target = src_imm + src_next_ip                  (1a)
266  *
267  * and substitute in (2) to get:
268  *
269  *   dst_imm = (src_imm + src_next_ip) - dst_next_ip (3)
270  *
271  * Now, since the instruction stream is 'identical' at src and dst (it
272  * is being copied after all) it can be stated that:
273  *
274  *   src_next_ip = src + ip_offset
275  *   dst_next_ip = dst + ip_offset                   (4)
276  *
277  * Substitute (4) in (3) and observe ip_offset being cancelled out to
278  * obtain:
279  *
280  *   dst_imm = src_imm + (src + ip_offset) - (dst + ip_offset)
281  *           = src_imm + src - dst + ip_offset - ip_offset
282  *           = src_imm + src - dst                   (5)
283  *
284  * IOW, only the relative displacement of the code block matters.
285  */
286 
287 #define apply_reloc_n(n_, p_, d_)				\
288 	do {							\
289 		s32 v = *(s##n_ *)(p_);				\
290 		v += (d_);					\
291 		BUG_ON((v >> 31) != (v >> (n_-1)));		\
292 		*(s##n_ *)(p_) = (s##n_)v;			\
293 	} while (0)
294 
295 
296 static __always_inline
apply_reloc(int n,void * ptr,uintptr_t diff)297 void apply_reloc(int n, void *ptr, uintptr_t diff)
298 {
299 	switch (n) {
300 	case 1: apply_reloc_n(8, ptr, diff); break;
301 	case 2: apply_reloc_n(16, ptr, diff); break;
302 	case 4: apply_reloc_n(32, ptr, diff); break;
303 	default: BUG();
304 	}
305 }
306 
307 static __always_inline
need_reloc(unsigned long offset,u8 * src,size_t src_len)308 bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
309 {
310 	u8 *target = src + offset;
311 	/*
312 	 * If the target is inside the patched block, it's relative to the
313 	 * block itself and does not need relocation.
314 	 */
315 	return (target < src || target > src + src_len);
316 }
317 
__apply_relocation(u8 * buf,const u8 * const instr,size_t instrlen,u8 * repl,size_t repl_len)318 static void __apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
319 {
320 	for (int next, i = 0; i < instrlen; i = next) {
321 		struct insn insn;
322 
323 		if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i])))
324 			return;
325 
326 		next = i + insn.length;
327 
328 		switch (insn.opcode.bytes[0]) {
329 		case 0x0f:
330 			if (insn.opcode.bytes[1] < 0x80 ||
331 			    insn.opcode.bytes[1] > 0x8f)
332 				break;
333 
334 			fallthrough;	/* Jcc.d32 */
335 		case 0x70 ... 0x7f:	/* Jcc.d8 */
336 		case JMP8_INSN_OPCODE:
337 		case JMP32_INSN_OPCODE:
338 		case CALL_INSN_OPCODE:
339 			if (need_reloc(next + insn.immediate.value, repl, repl_len)) {
340 				apply_reloc(insn.immediate.nbytes,
341 					    buf + i + insn_offset_immediate(&insn),
342 					    repl - instr);
343 			}
344 
345 			/*
346 			 * Where possible, convert JMP.d32 into JMP.d8.
347 			 */
348 			if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) {
349 				s32 imm = insn.immediate.value;
350 				imm += repl - instr;
351 				imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE;
352 				if ((imm >> 31) == (imm >> 7)) {
353 					buf[i+0] = JMP8_INSN_OPCODE;
354 					buf[i+1] = (s8)imm;
355 
356 					memset(&buf[i+2], INT3_INSN_OPCODE, insn.length - 2);
357 				}
358 			}
359 			break;
360 		}
361 
362 		if (insn_rip_relative(&insn)) {
363 			if (need_reloc(next + insn.displacement.value, repl, repl_len)) {
364 				apply_reloc(insn.displacement.nbytes,
365 					    buf + i + insn_offset_displacement(&insn),
366 					    repl - instr);
367 			}
368 		}
369 	}
370 }
371 
apply_relocation(u8 * buf,const u8 * const instr,size_t instrlen,u8 * repl,size_t repl_len)372 void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
373 {
374 	__apply_relocation(buf, instr, instrlen, repl, repl_len);
375 	optimize_nops(instr, buf, instrlen);
376 }
377 
378 /* Low-level backend functions usable from alternative code replacements. */
379 DEFINE_ASM_FUNC(nop_func, "", .entry.text);
380 EXPORT_SYMBOL_GPL(nop_func);
381 
BUG_func(void)382 noinstr void BUG_func(void)
383 {
384 	BUG();
385 }
386 EXPORT_SYMBOL(BUG_func);
387 
388 #define CALL_RIP_REL_OPCODE	0xff
389 #define CALL_RIP_REL_MODRM	0x15
390 
391 /*
392  * Rewrite the "call BUG_func" replacement to point to the target of the
393  * indirect pv_ops call "call *disp(%ip)".
394  */
alt_replace_call(u8 * instr,u8 * insn_buff,struct alt_instr * a)395 static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
396 {
397 	void *target, *bug = &BUG_func;
398 	s32 disp;
399 
400 	if (a->replacementlen != 5 || insn_buff[0] != CALL_INSN_OPCODE) {
401 		pr_err("ALT_FLAG_DIRECT_CALL set for a non-call replacement instruction\n");
402 		BUG();
403 	}
404 
405 	if (a->instrlen != 6 ||
406 	    instr[0] != CALL_RIP_REL_OPCODE ||
407 	    instr[1] != CALL_RIP_REL_MODRM) {
408 		pr_err("ALT_FLAG_DIRECT_CALL set for unrecognized indirect call\n");
409 		BUG();
410 	}
411 
412 	/* Skip CALL_RIP_REL_OPCODE and CALL_RIP_REL_MODRM */
413 	disp = *(s32 *)(instr + 2);
414 #ifdef CONFIG_X86_64
415 	/* ff 15 00 00 00 00   call   *0x0(%rip) */
416 	/* target address is stored at "next instruction + disp". */
417 	target = *(void **)(instr + a->instrlen + disp);
418 #else
419 	/* ff 15 00 00 00 00   call   *0x0 */
420 	/* target address is stored at disp. */
421 	target = *(void **)disp;
422 #endif
423 	if (!target)
424 		target = bug;
425 
426 	/* (BUG_func - .) + (target - BUG_func) := target - . */
427 	*(s32 *)(insn_buff + 1) += target - bug;
428 
429 	if (target == &nop_func)
430 		return 0;
431 
432 	return 5;
433 }
434 
instr_va(struct alt_instr * i)435 static inline u8 * instr_va(struct alt_instr *i)
436 {
437 	return (u8 *)&i->instr_offset + i->instr_offset;
438 }
439 
440 /*
441  * Replace instructions with better alternatives for this CPU type. This runs
442  * before SMP is initialized to avoid SMP problems with self modifying code.
443  * This implies that asymmetric systems where APs have less capabilities than
444  * the boot processor are not handled. Tough. Make sure you disable such
445  * features by hand.
446  *
447  * Marked "noinline" to cause control flow change and thus insn cache
448  * to refetch changed I$ lines.
449  */
apply_alternatives(struct alt_instr * start,struct alt_instr * end)450 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
451 						  struct alt_instr *end)
452 {
453 	u8 insn_buff[MAX_PATCH_LEN];
454 	u8 *instr, *replacement;
455 	struct alt_instr *a, *b;
456 
457 	DPRINTK(ALT, "alt table %px, -> %px", start, end);
458 
459 	/*
460 	 * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
461 	 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
462 	 * During the process, KASAN becomes confused seeing partial LA57
463 	 * conversion and triggers a false-positive out-of-bound report.
464 	 *
465 	 * Disable KASAN until the patching is complete.
466 	 */
467 	kasan_disable_current();
468 
469 	/*
470 	 * The scan order should be from start to end. A later scanned
471 	 * alternative code can overwrite previously scanned alternative code.
472 	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
473 	 * patch code.
474 	 *
475 	 * So be careful if you want to change the scan order to any other
476 	 * order.
477 	 */
478 	for (a = start; a < end; a++) {
479 		int insn_buff_sz = 0;
480 
481 		/*
482 		 * In case of nested ALTERNATIVE()s the outer alternative might
483 		 * add more padding. To ensure consistent patching find the max
484 		 * padding for all alt_instr entries for this site (nested
485 		 * alternatives result in consecutive entries).
486 		 */
487 		for (b = a+1; b < end && instr_va(b) == instr_va(a); b++) {
488 			u8 len = max(a->instrlen, b->instrlen);
489 			a->instrlen = b->instrlen = len;
490 		}
491 
492 		instr = instr_va(a);
493 		replacement = (u8 *)&a->repl_offset + a->repl_offset;
494 		BUG_ON(a->instrlen > sizeof(insn_buff));
495 		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
496 
497 		/*
498 		 * Patch if either:
499 		 * - feature is present
500 		 * - feature not present but ALT_FLAG_NOT is set to mean,
501 		 *   patch if feature is *NOT* present.
502 		 */
503 		if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
504 			memcpy(insn_buff, instr, a->instrlen);
505 			optimize_nops(instr, insn_buff, a->instrlen);
506 			text_poke_early(instr, insn_buff, a->instrlen);
507 			continue;
508 		}
509 
510 		DPRINTK(ALT, "feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d) flags: 0x%x",
511 			a->cpuid >> 5,
512 			a->cpuid & 0x1f,
513 			instr, instr, a->instrlen,
514 			replacement, a->replacementlen, a->flags);
515 
516 		memcpy(insn_buff, replacement, a->replacementlen);
517 		insn_buff_sz = a->replacementlen;
518 
519 		if (a->flags & ALT_FLAG_DIRECT_CALL) {
520 			insn_buff_sz = alt_replace_call(instr, insn_buff, a);
521 			if (insn_buff_sz < 0)
522 				continue;
523 		}
524 
525 		for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
526 			insn_buff[insn_buff_sz] = 0x90;
527 
528 		apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);
529 
530 		DUMP_BYTES(ALT, instr, a->instrlen, "%px:   old_insn: ", instr);
531 		DUMP_BYTES(ALT, replacement, a->replacementlen, "%px:   rpl_insn: ", replacement);
532 		DUMP_BYTES(ALT, insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
533 
534 		text_poke_early(instr, insn_buff, insn_buff_sz);
535 	}
536 
537 	kasan_enable_current();
538 }
539 
is_jcc32(struct insn * insn)540 static inline bool is_jcc32(struct insn *insn)
541 {
542 	/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
543 	return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
544 }
545 
546 #if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_OBJTOOL)
547 
548 /*
549  * CALL/JMP *%\reg
550  */
emit_indirect(int op,int reg,u8 * bytes)551 static int emit_indirect(int op, int reg, u8 *bytes)
552 {
553 	int i = 0;
554 	u8 modrm;
555 
556 	switch (op) {
557 	case CALL_INSN_OPCODE:
558 		modrm = 0x10; /* Reg = 2; CALL r/m */
559 		break;
560 
561 	case JMP32_INSN_OPCODE:
562 		modrm = 0x20; /* Reg = 4; JMP r/m */
563 		break;
564 
565 	default:
566 		WARN_ON_ONCE(1);
567 		return -1;
568 	}
569 
570 	if (reg >= 8) {
571 		bytes[i++] = 0x41; /* REX.B prefix */
572 		reg -= 8;
573 	}
574 
575 	modrm |= 0xc0; /* Mod = 3 */
576 	modrm += reg;
577 
578 	bytes[i++] = 0xff; /* opcode */
579 	bytes[i++] = modrm;
580 
581 	return i;
582 }
583 
emit_call_track_retpoline(void * addr,struct insn * insn,int reg,u8 * bytes)584 static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
585 {
586 	u8 op = insn->opcode.bytes[0];
587 	int i = 0;
588 
589 	/*
590 	 * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional
591 	 * tail-calls. Deal with them.
592 	 */
593 	if (is_jcc32(insn)) {
594 		bytes[i++] = op;
595 		op = insn->opcode.bytes[1];
596 		goto clang_jcc;
597 	}
598 
599 	if (insn->length == 6)
600 		bytes[i++] = 0x2e; /* CS-prefix */
601 
602 	switch (op) {
603 	case CALL_INSN_OPCODE:
604 		__text_gen_insn(bytes+i, op, addr+i,
605 				__x86_indirect_call_thunk_array[reg],
606 				CALL_INSN_SIZE);
607 		i += CALL_INSN_SIZE;
608 		break;
609 
610 	case JMP32_INSN_OPCODE:
611 clang_jcc:
612 		__text_gen_insn(bytes+i, op, addr+i,
613 				__x86_indirect_jump_thunk_array[reg],
614 				JMP32_INSN_SIZE);
615 		i += JMP32_INSN_SIZE;
616 		break;
617 
618 	default:
619 		WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr);
620 		return -1;
621 	}
622 
623 	WARN_ON_ONCE(i != insn->length);
624 
625 	return i;
626 }
627 
628 /*
629  * Rewrite the compiler generated retpoline thunk calls.
630  *
631  * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
632  * indirect instructions, avoiding the extra indirection.
633  *
634  * For example, convert:
635  *
636  *   CALL __x86_indirect_thunk_\reg
637  *
638  * into:
639  *
640  *   CALL *%\reg
641  *
642  * It also tries to inline spectre_v2=retpoline,lfence when size permits.
643  */
patch_retpoline(void * addr,struct insn * insn,u8 * bytes)644 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
645 {
646 	retpoline_thunk_t *target;
647 	int reg, ret, i = 0;
648 	u8 op, cc;
649 
650 	target = addr + insn->length + insn->immediate.value;
651 	reg = target - __x86_indirect_thunk_array;
652 
653 	if (WARN_ON_ONCE(reg & ~0xf))
654 		return -1;
655 
656 	/* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
657 	BUG_ON(reg == 4);
658 
659 	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
660 	    !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
661 		if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
662 			return emit_call_track_retpoline(addr, insn, reg, bytes);
663 
664 		return -1;
665 	}
666 
667 	op = insn->opcode.bytes[0];
668 
669 	/*
670 	 * Convert:
671 	 *
672 	 *   Jcc.d32 __x86_indirect_thunk_\reg
673 	 *
674 	 * into:
675 	 *
676 	 *   Jncc.d8 1f
677 	 *   [ LFENCE ]
678 	 *   JMP *%\reg
679 	 *   [ NOP ]
680 	 * 1:
681 	 */
682 	if (is_jcc32(insn)) {
683 		cc = insn->opcode.bytes[1] & 0xf;
684 		cc ^= 1; /* invert condition */
685 
686 		bytes[i++] = 0x70 + cc;        /* Jcc.d8 */
687 		bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */
688 
689 		/* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */
690 		op = JMP32_INSN_OPCODE;
691 	}
692 
693 	/*
694 	 * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
695 	 */
696 	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
697 		bytes[i++] = 0x0f;
698 		bytes[i++] = 0xae;
699 		bytes[i++] = 0xe8; /* LFENCE */
700 	}
701 
702 	ret = emit_indirect(op, reg, bytes + i);
703 	if (ret < 0)
704 		return ret;
705 	i += ret;
706 
707 	/*
708 	 * The compiler is supposed to EMIT an INT3 after every unconditional
709 	 * JMP instruction due to AMD BTC. However, if the compiler is too old
710 	 * or MITIGATION_SLS isn't enabled, we still need an INT3 after
711 	 * indirect JMPs even on Intel.
712 	 */
713 	if (op == JMP32_INSN_OPCODE && i < insn->length)
714 		bytes[i++] = INT3_INSN_OPCODE;
715 
716 	for (; i < insn->length;)
717 		bytes[i++] = BYTES_NOP1;
718 
719 	return i;
720 }
721 
722 /*
723  * Generated by 'objtool --retpoline'.
724  */
apply_retpolines(s32 * start,s32 * end)725 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
726 {
727 	s32 *s;
728 
729 	for (s = start; s < end; s++) {
730 		void *addr = (void *)s + *s;
731 		struct insn insn;
732 		int len, ret;
733 		u8 bytes[16];
734 		u8 op1, op2;
735 
736 		ret = insn_decode_kernel(&insn, addr);
737 		if (WARN_ON_ONCE(ret < 0))
738 			continue;
739 
740 		op1 = insn.opcode.bytes[0];
741 		op2 = insn.opcode.bytes[1];
742 
743 		switch (op1) {
744 		case CALL_INSN_OPCODE:
745 		case JMP32_INSN_OPCODE:
746 			break;
747 
748 		case 0x0f: /* escape */
749 			if (op2 >= 0x80 && op2 <= 0x8f)
750 				break;
751 			fallthrough;
752 		default:
753 			WARN_ON_ONCE(1);
754 			continue;
755 		}
756 
757 		DPRINTK(RETPOLINE, "retpoline at: %pS (%px) len: %d to: %pS",
758 			addr, addr, insn.length,
759 			addr + insn.length + insn.immediate.value);
760 
761 		len = patch_retpoline(addr, &insn, bytes);
762 		if (len == insn.length) {
763 			optimize_nops(addr, bytes, len);
764 			DUMP_BYTES(RETPOLINE, ((u8*)addr),  len, "%px: orig: ", addr);
765 			DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
766 			text_poke_early(addr, bytes, len);
767 		}
768 	}
769 }
770 
771 #ifdef CONFIG_MITIGATION_RETHUNK
772 
773 /*
774  * Rewrite the compiler generated return thunk tail-calls.
775  *
776  * For example, convert:
777  *
778  *   JMP __x86_return_thunk
779  *
780  * into:
781  *
782  *   RET
783  */
patch_return(void * addr,struct insn * insn,u8 * bytes)784 static int patch_return(void *addr, struct insn *insn, u8 *bytes)
785 {
786 	int i = 0;
787 
788 	/* Patch the custom return thunks... */
789 	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
790 		i = JMP32_INSN_SIZE;
791 		__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
792 	} else {
793 		/* ... or patch them out if not needed. */
794 		bytes[i++] = RET_INSN_OPCODE;
795 	}
796 
797 	for (; i < insn->length;)
798 		bytes[i++] = INT3_INSN_OPCODE;
799 	return i;
800 }
801 
apply_returns(s32 * start,s32 * end)802 void __init_or_module noinline apply_returns(s32 *start, s32 *end)
803 {
804 	s32 *s;
805 
806 	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
807 		static_call_force_reinit();
808 
809 	for (s = start; s < end; s++) {
810 		void *dest = NULL, *addr = (void *)s + *s;
811 		struct insn insn;
812 		int len, ret;
813 		u8 bytes[16];
814 		u8 op;
815 
816 		ret = insn_decode_kernel(&insn, addr);
817 		if (WARN_ON_ONCE(ret < 0))
818 			continue;
819 
820 		op = insn.opcode.bytes[0];
821 		if (op == JMP32_INSN_OPCODE)
822 			dest = addr + insn.length + insn.immediate.value;
823 
824 		if (__static_call_fixup(addr, op, dest) ||
825 		    WARN_ONCE(dest != &__x86_return_thunk,
826 			      "missing return thunk: %pS-%pS: %*ph",
827 			      addr, dest, 5, addr))
828 			continue;
829 
830 		DPRINTK(RET, "return thunk at: %pS (%px) len: %d to: %pS",
831 			addr, addr, insn.length,
832 			addr + insn.length + insn.immediate.value);
833 
834 		len = patch_return(addr, &insn, bytes);
835 		if (len == insn.length) {
836 			DUMP_BYTES(RET, ((u8*)addr),  len, "%px: orig: ", addr);
837 			DUMP_BYTES(RET, ((u8*)bytes), len, "%px: repl: ", addr);
838 			text_poke_early(addr, bytes, len);
839 		}
840 	}
841 }
842 #else
apply_returns(s32 * start,s32 * end)843 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
844 #endif /* CONFIG_MITIGATION_RETHUNK */
845 
846 #else /* !CONFIG_MITIGATION_RETPOLINE || !CONFIG_OBJTOOL */
847 
apply_retpolines(s32 * start,s32 * end)848 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
apply_returns(s32 * start,s32 * end)849 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
850 
851 #endif /* CONFIG_MITIGATION_RETPOLINE && CONFIG_OBJTOOL */
852 
853 #ifdef CONFIG_X86_KERNEL_IBT
854 
855 static void poison_cfi(void *addr);
856 
poison_endbr(void * addr,bool warn)857 static void __init_or_module poison_endbr(void *addr, bool warn)
858 {
859 	u32 endbr, poison = gen_endbr_poison();
860 
861 	if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr)))
862 		return;
863 
864 	if (!is_endbr(endbr)) {
865 		WARN_ON_ONCE(warn);
866 		return;
867 	}
868 
869 	DPRINTK(ENDBR, "ENDBR at: %pS (%px)", addr, addr);
870 
871 	/*
872 	 * When we have IBT, the lack of ENDBR will trigger #CP
873 	 */
874 	DUMP_BYTES(ENDBR, ((u8*)addr), 4, "%px: orig: ", addr);
875 	DUMP_BYTES(ENDBR, ((u8*)&poison), 4, "%px: repl: ", addr);
876 	text_poke_early(addr, &poison, 4);
877 }
878 
879 /*
880  * Generated by: objtool --ibt
881  *
882  * Seal the functions for indirect calls by clobbering the ENDBR instructions
883  * and the kCFI hash value.
884  */
apply_seal_endbr(s32 * start,s32 * end)885 void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
886 {
887 	s32 *s;
888 
889 	for (s = start; s < end; s++) {
890 		void *addr = (void *)s + *s;
891 
892 		poison_endbr(addr, true);
893 		if (IS_ENABLED(CONFIG_FINEIBT))
894 			poison_cfi(addr - 16);
895 	}
896 }
897 
898 #else
899 
apply_seal_endbr(s32 * start,s32 * end)900 void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
901 
902 #endif /* CONFIG_X86_KERNEL_IBT */
903 
904 #ifdef CONFIG_CFI_AUTO_DEFAULT
905 #define __CFI_DEFAULT	CFI_AUTO
906 #elif defined(CONFIG_CFI_CLANG)
907 #define __CFI_DEFAULT	CFI_KCFI
908 #else
909 #define __CFI_DEFAULT	CFI_OFF
910 #endif
911 
912 enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT;
913 
914 #ifdef CONFIG_CFI_CLANG
915 struct bpf_insn;
916 
917 /* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
918 extern unsigned int __bpf_prog_runX(const void *ctx,
919 				    const struct bpf_insn *insn);
920 
921 /*
922  * Force a reference to the external symbol so the compiler generates
923  * __kcfi_typid.
924  */
925 __ADDRESSABLE(__bpf_prog_runX);
926 
927 /* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
928 asm (
929 "	.pushsection	.data..ro_after_init,\"aw\",@progbits	\n"
930 "	.type	cfi_bpf_hash,@object				\n"
931 "	.globl	cfi_bpf_hash					\n"
932 "	.p2align	2, 0x0					\n"
933 "cfi_bpf_hash:							\n"
934 "	.long	__kcfi_typeid___bpf_prog_runX			\n"
935 "	.size	cfi_bpf_hash, 4					\n"
936 "	.popsection						\n"
937 );
938 
939 /* Must match bpf_callback_t */
940 extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
941 
942 __ADDRESSABLE(__bpf_callback_fn);
943 
944 /* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
945 asm (
946 "	.pushsection	.data..ro_after_init,\"aw\",@progbits	\n"
947 "	.type	cfi_bpf_subprog_hash,@object			\n"
948 "	.globl	cfi_bpf_subprog_hash				\n"
949 "	.p2align	2, 0x0					\n"
950 "cfi_bpf_subprog_hash:						\n"
951 "	.long	__kcfi_typeid___bpf_callback_fn			\n"
952 "	.size	cfi_bpf_subprog_hash, 4				\n"
953 "	.popsection						\n"
954 );
955 
cfi_get_func_hash(void * func)956 u32 cfi_get_func_hash(void *func)
957 {
958 	u32 hash;
959 
960 	func -= cfi_get_offset();
961 	switch (cfi_mode) {
962 	case CFI_FINEIBT:
963 		func += 7;
964 		break;
965 	case CFI_KCFI:
966 		func += 1;
967 		break;
968 	default:
969 		return 0;
970 	}
971 
972 	if (get_kernel_nofault(hash, func))
973 		return 0;
974 
975 	return hash;
976 }
977 #endif
978 
979 #ifdef CONFIG_FINEIBT
980 
981 static bool cfi_rand __ro_after_init = true;
982 static u32  cfi_seed __ro_after_init;
983 
984 /*
985  * Re-hash the CFI hash with a boot-time seed while making sure the result is
986  * not a valid ENDBR instruction.
987  */
cfi_rehash(u32 hash)988 static u32 cfi_rehash(u32 hash)
989 {
990 	hash ^= cfi_seed;
991 	while (unlikely(is_endbr(hash) || is_endbr(-hash))) {
992 		bool lsb = hash & 1;
993 		hash >>= 1;
994 		if (lsb)
995 			hash ^= 0x80200003;
996 	}
997 	return hash;
998 }
999 
cfi_parse_cmdline(char * str)1000 static __init int cfi_parse_cmdline(char *str)
1001 {
1002 	if (!str)
1003 		return -EINVAL;
1004 
1005 	while (str) {
1006 		char *next = strchr(str, ',');
1007 		if (next) {
1008 			*next = 0;
1009 			next++;
1010 		}
1011 
1012 		if (!strcmp(str, "auto")) {
1013 			cfi_mode = CFI_AUTO;
1014 		} else if (!strcmp(str, "off")) {
1015 			cfi_mode = CFI_OFF;
1016 			cfi_rand = false;
1017 		} else if (!strcmp(str, "kcfi")) {
1018 			cfi_mode = CFI_KCFI;
1019 		} else if (!strcmp(str, "fineibt")) {
1020 			cfi_mode = CFI_FINEIBT;
1021 		} else if (!strcmp(str, "norand")) {
1022 			cfi_rand = false;
1023 		} else {
1024 			pr_err("Ignoring unknown cfi option (%s).", str);
1025 		}
1026 
1027 		str = next;
1028 	}
1029 
1030 	return 0;
1031 }
1032 early_param("cfi", cfi_parse_cmdline);
1033 
1034 /*
1035  * kCFI						FineIBT
1036  *
1037  * __cfi_\func:					__cfi_\func:
1038  *	movl   $0x12345678,%eax		// 5	     endbr64			// 4
1039  *	nop					     subl   $0x12345678,%r10d   // 7
1040  *	nop					     jz     1f			// 2
1041  *	nop					     ud2			// 2
1042  *	nop					1:   nop			// 1
1043  *	nop
1044  *	nop
1045  *	nop
1046  *	nop
1047  *	nop
1048  *	nop
1049  *	nop
1050  *
1051  *
1052  * caller:					caller:
1053  *	movl	$(-0x12345678),%r10d	 // 6	     movl   $0x12345678,%r10d	// 6
1054  *	addl	$-15(%r11),%r10d	 // 4	     sub    $16,%r11		// 4
1055  *	je	1f			 // 2	     nop4			// 4
1056  *	ud2				 // 2
1057  * 1:	call	__x86_indirect_thunk_r11 // 5	     call   *%r11; nop2;	// 5
1058  *
1059  */
1060 
1061 asm(	".pushsection .rodata			\n"
1062 	"fineibt_preamble_start:		\n"
1063 	"	endbr64				\n"
1064 	"	subl	$0x12345678, %r10d	\n"
1065 	"	je	fineibt_preamble_end	\n"
1066 	"	ud2				\n"
1067 	"	nop				\n"
1068 	"fineibt_preamble_end:			\n"
1069 	".popsection\n"
1070 );
1071 
1072 extern u8 fineibt_preamble_start[];
1073 extern u8 fineibt_preamble_end[];
1074 
1075 #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start)
1076 #define fineibt_preamble_hash 7
1077 
1078 asm(	".pushsection .rodata			\n"
1079 	"fineibt_caller_start:			\n"
1080 	"	movl	$0x12345678, %r10d	\n"
1081 	"	sub	$16, %r11		\n"
1082 	ASM_NOP4
1083 	"fineibt_caller_end:			\n"
1084 	".popsection				\n"
1085 );
1086 
1087 extern u8 fineibt_caller_start[];
1088 extern u8 fineibt_caller_end[];
1089 
1090 #define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start)
1091 #define fineibt_caller_hash 2
1092 
1093 #define fineibt_caller_jmp (fineibt_caller_size - 2)
1094 
decode_preamble_hash(void * addr)1095 static u32 decode_preamble_hash(void *addr)
1096 {
1097 	u8 *p = addr;
1098 
1099 	/* b8 78 56 34 12          mov    $0x12345678,%eax */
1100 	if (p[0] == 0xb8)
1101 		return *(u32 *)(addr + 1);
1102 
1103 	return 0; /* invalid hash value */
1104 }
1105 
decode_caller_hash(void * addr)1106 static u32 decode_caller_hash(void *addr)
1107 {
1108 	u8 *p = addr;
1109 
1110 	/* 41 ba 78 56 34 12       mov    $0x12345678,%r10d */
1111 	if (p[0] == 0x41 && p[1] == 0xba)
1112 		return -*(u32 *)(addr + 2);
1113 
1114 	/* e8 0c 78 56 34 12	   jmp.d8  +12 */
1115 	if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp)
1116 		return -*(u32 *)(addr + 2);
1117 
1118 	return 0; /* invalid hash value */
1119 }
1120 
1121 /* .retpoline_sites */
cfi_disable_callers(s32 * start,s32 * end)1122 static int cfi_disable_callers(s32 *start, s32 *end)
1123 {
1124 	/*
1125 	 * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate
1126 	 * in tact for later usage. Also see decode_caller_hash() and
1127 	 * cfi_rewrite_callers().
1128 	 */
1129 	const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp };
1130 	s32 *s;
1131 
1132 	for (s = start; s < end; s++) {
1133 		void *addr = (void *)s + *s;
1134 		u32 hash;
1135 
1136 		addr -= fineibt_caller_size;
1137 		hash = decode_caller_hash(addr);
1138 		if (!hash) /* nocfi callers */
1139 			continue;
1140 
1141 		text_poke_early(addr, jmp, 2);
1142 	}
1143 
1144 	return 0;
1145 }
1146 
cfi_enable_callers(s32 * start,s32 * end)1147 static int cfi_enable_callers(s32 *start, s32 *end)
1148 {
1149 	/*
1150 	 * Re-enable kCFI, undo what cfi_disable_callers() did.
1151 	 */
1152 	const u8 mov[] = { 0x41, 0xba };
1153 	s32 *s;
1154 
1155 	for (s = start; s < end; s++) {
1156 		void *addr = (void *)s + *s;
1157 		u32 hash;
1158 
1159 		addr -= fineibt_caller_size;
1160 		hash = decode_caller_hash(addr);
1161 		if (!hash) /* nocfi callers */
1162 			continue;
1163 
1164 		text_poke_early(addr, mov, 2);
1165 	}
1166 
1167 	return 0;
1168 }
1169 
1170 /* .cfi_sites */
cfi_rand_preamble(s32 * start,s32 * end)1171 static int cfi_rand_preamble(s32 *start, s32 *end)
1172 {
1173 	s32 *s;
1174 
1175 	for (s = start; s < end; s++) {
1176 		void *addr = (void *)s + *s;
1177 		u32 hash;
1178 
1179 		hash = decode_preamble_hash(addr);
1180 		if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1181 			 addr, addr, 5, addr))
1182 			return -EINVAL;
1183 
1184 		hash = cfi_rehash(hash);
1185 		text_poke_early(addr + 1, &hash, 4);
1186 	}
1187 
1188 	return 0;
1189 }
1190 
cfi_rewrite_preamble(s32 * start,s32 * end)1191 static int cfi_rewrite_preamble(s32 *start, s32 *end)
1192 {
1193 	s32 *s;
1194 
1195 	for (s = start; s < end; s++) {
1196 		void *addr = (void *)s + *s;
1197 		u32 hash;
1198 
1199 		hash = decode_preamble_hash(addr);
1200 		if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1201 			 addr, addr, 5, addr))
1202 			return -EINVAL;
1203 
1204 		text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size);
1205 		WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678);
1206 		text_poke_early(addr + fineibt_preamble_hash, &hash, 4);
1207 	}
1208 
1209 	return 0;
1210 }
1211 
cfi_rewrite_endbr(s32 * start,s32 * end)1212 static void cfi_rewrite_endbr(s32 *start, s32 *end)
1213 {
1214 	s32 *s;
1215 
1216 	for (s = start; s < end; s++) {
1217 		void *addr = (void *)s + *s;
1218 
1219 		poison_endbr(addr+16, false);
1220 	}
1221 }
1222 
1223 /* .retpoline_sites */
cfi_rand_callers(s32 * start,s32 * end)1224 static int cfi_rand_callers(s32 *start, s32 *end)
1225 {
1226 	s32 *s;
1227 
1228 	for (s = start; s < end; s++) {
1229 		void *addr = (void *)s + *s;
1230 		u32 hash;
1231 
1232 		addr -= fineibt_caller_size;
1233 		hash = decode_caller_hash(addr);
1234 		if (hash) {
1235 			hash = -cfi_rehash(hash);
1236 			text_poke_early(addr + 2, &hash, 4);
1237 		}
1238 	}
1239 
1240 	return 0;
1241 }
1242 
cfi_rewrite_callers(s32 * start,s32 * end)1243 static int cfi_rewrite_callers(s32 *start, s32 *end)
1244 {
1245 	s32 *s;
1246 
1247 	for (s = start; s < end; s++) {
1248 		void *addr = (void *)s + *s;
1249 		u32 hash;
1250 
1251 		addr -= fineibt_caller_size;
1252 		hash = decode_caller_hash(addr);
1253 		if (hash) {
1254 			text_poke_early(addr, fineibt_caller_start, fineibt_caller_size);
1255 			WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678);
1256 			text_poke_early(addr + fineibt_caller_hash, &hash, 4);
1257 		}
1258 		/* rely on apply_retpolines() */
1259 	}
1260 
1261 	return 0;
1262 }
1263 
__apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi,bool builtin)1264 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1265 			    s32 *start_cfi, s32 *end_cfi, bool builtin)
1266 {
1267 	int ret;
1268 
1269 	if (WARN_ONCE(fineibt_preamble_size != 16,
1270 		      "FineIBT preamble wrong size: %ld", fineibt_preamble_size))
1271 		return;
1272 
1273 	if (cfi_mode == CFI_AUTO) {
1274 		cfi_mode = CFI_KCFI;
1275 		if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
1276 			cfi_mode = CFI_FINEIBT;
1277 	}
1278 
1279 	/*
1280 	 * Rewrite the callers to not use the __cfi_ stubs, such that we might
1281 	 * rewrite them. This disables all CFI. If this succeeds but any of the
1282 	 * later stages fails, we're without CFI.
1283 	 */
1284 	ret = cfi_disable_callers(start_retpoline, end_retpoline);
1285 	if (ret)
1286 		goto err;
1287 
1288 	if (cfi_rand) {
1289 		if (builtin) {
1290 			cfi_seed = get_random_u32();
1291 			cfi_bpf_hash = cfi_rehash(cfi_bpf_hash);
1292 			cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash);
1293 		}
1294 
1295 		ret = cfi_rand_preamble(start_cfi, end_cfi);
1296 		if (ret)
1297 			goto err;
1298 
1299 		ret = cfi_rand_callers(start_retpoline, end_retpoline);
1300 		if (ret)
1301 			goto err;
1302 	}
1303 
1304 	switch (cfi_mode) {
1305 	case CFI_OFF:
1306 		if (builtin)
1307 			pr_info("Disabling CFI\n");
1308 		return;
1309 
1310 	case CFI_KCFI:
1311 		ret = cfi_enable_callers(start_retpoline, end_retpoline);
1312 		if (ret)
1313 			goto err;
1314 
1315 		if (builtin)
1316 			pr_info("Using kCFI\n");
1317 		return;
1318 
1319 	case CFI_FINEIBT:
1320 		/* place the FineIBT preamble at func()-16 */
1321 		ret = cfi_rewrite_preamble(start_cfi, end_cfi);
1322 		if (ret)
1323 			goto err;
1324 
1325 		/* rewrite the callers to target func()-16 */
1326 		ret = cfi_rewrite_callers(start_retpoline, end_retpoline);
1327 		if (ret)
1328 			goto err;
1329 
1330 		/* now that nobody targets func()+0, remove ENDBR there */
1331 		cfi_rewrite_endbr(start_cfi, end_cfi);
1332 
1333 		if (builtin)
1334 			pr_info("Using FineIBT CFI\n");
1335 		return;
1336 
1337 	default:
1338 		break;
1339 	}
1340 
1341 err:
1342 	pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n");
1343 }
1344 
poison_hash(void * addr)1345 static inline void poison_hash(void *addr)
1346 {
1347 	*(u32 *)addr = 0;
1348 }
1349 
poison_cfi(void * addr)1350 static void poison_cfi(void *addr)
1351 {
1352 	switch (cfi_mode) {
1353 	case CFI_FINEIBT:
1354 		/*
1355 		 * __cfi_\func:
1356 		 *	osp nopl (%rax)
1357 		 *	subl	$0, %r10d
1358 		 *	jz	1f
1359 		 *	ud2
1360 		 * 1:	nop
1361 		 */
1362 		poison_endbr(addr, false);
1363 		poison_hash(addr + fineibt_preamble_hash);
1364 		break;
1365 
1366 	case CFI_KCFI:
1367 		/*
1368 		 * __cfi_\func:
1369 		 *	movl	$0, %eax
1370 		 *	.skip	11, 0x90
1371 		 */
1372 		poison_hash(addr + 1);
1373 		break;
1374 
1375 	default:
1376 		break;
1377 	}
1378 }
1379 
1380 #else
1381 
__apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi,bool builtin)1382 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1383 			    s32 *start_cfi, s32 *end_cfi, bool builtin)
1384 {
1385 }
1386 
1387 #ifdef CONFIG_X86_KERNEL_IBT
poison_cfi(void * addr)1388 static void poison_cfi(void *addr) { }
1389 #endif
1390 
1391 #endif
1392 
apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi)1393 void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1394 		   s32 *start_cfi, s32 *end_cfi)
1395 {
1396 	return __apply_fineibt(start_retpoline, end_retpoline,
1397 			       start_cfi, end_cfi,
1398 			       /* .builtin = */ false);
1399 }
1400 
1401 #ifdef CONFIG_SMP
alternatives_smp_lock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)1402 static void alternatives_smp_lock(const s32 *start, const s32 *end,
1403 				  u8 *text, u8 *text_end)
1404 {
1405 	const s32 *poff;
1406 
1407 	for (poff = start; poff < end; poff++) {
1408 		u8 *ptr = (u8 *)poff + *poff;
1409 
1410 		if (!*poff || ptr < text || ptr >= text_end)
1411 			continue;
1412 		/* turn DS segment override prefix into lock prefix */
1413 		if (*ptr == 0x3e)
1414 			text_poke(ptr, ((unsigned char []){0xf0}), 1);
1415 	}
1416 }
1417 
alternatives_smp_unlock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)1418 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
1419 				    u8 *text, u8 *text_end)
1420 {
1421 	const s32 *poff;
1422 
1423 	for (poff = start; poff < end; poff++) {
1424 		u8 *ptr = (u8 *)poff + *poff;
1425 
1426 		if (!*poff || ptr < text || ptr >= text_end)
1427 			continue;
1428 		/* turn lock prefix into DS segment override prefix */
1429 		if (*ptr == 0xf0)
1430 			text_poke(ptr, ((unsigned char []){0x3E}), 1);
1431 	}
1432 }
1433 
1434 struct smp_alt_module {
1435 	/* what is this ??? */
1436 	struct module	*mod;
1437 	char		*name;
1438 
1439 	/* ptrs to lock prefixes */
1440 	const s32	*locks;
1441 	const s32	*locks_end;
1442 
1443 	/* .text segment, needed to avoid patching init code ;) */
1444 	u8		*text;
1445 	u8		*text_end;
1446 
1447 	struct list_head next;
1448 };
1449 static LIST_HEAD(smp_alt_modules);
1450 static bool uniproc_patched = false;	/* protected by text_mutex */
1451 
alternatives_smp_module_add(struct module * mod,char * name,void * locks,void * locks_end,void * text,void * text_end)1452 void __init_or_module alternatives_smp_module_add(struct module *mod,
1453 						  char *name,
1454 						  void *locks, void *locks_end,
1455 						  void *text,  void *text_end)
1456 {
1457 	struct smp_alt_module *smp;
1458 
1459 	mutex_lock(&text_mutex);
1460 	if (!uniproc_patched)
1461 		goto unlock;
1462 
1463 	if (num_possible_cpus() == 1)
1464 		/* Don't bother remembering, we'll never have to undo it. */
1465 		goto smp_unlock;
1466 
1467 	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
1468 	if (NULL == smp)
1469 		/* we'll run the (safe but slow) SMP code then ... */
1470 		goto unlock;
1471 
1472 	smp->mod	= mod;
1473 	smp->name	= name;
1474 	smp->locks	= locks;
1475 	smp->locks_end	= locks_end;
1476 	smp->text	= text;
1477 	smp->text_end	= text_end;
1478 	DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n",
1479 		smp->locks, smp->locks_end,
1480 		smp->text, smp->text_end, smp->name);
1481 
1482 	list_add_tail(&smp->next, &smp_alt_modules);
1483 smp_unlock:
1484 	alternatives_smp_unlock(locks, locks_end, text, text_end);
1485 unlock:
1486 	mutex_unlock(&text_mutex);
1487 }
1488 
alternatives_smp_module_del(struct module * mod)1489 void __init_or_module alternatives_smp_module_del(struct module *mod)
1490 {
1491 	struct smp_alt_module *item;
1492 
1493 	mutex_lock(&text_mutex);
1494 	list_for_each_entry(item, &smp_alt_modules, next) {
1495 		if (mod != item->mod)
1496 			continue;
1497 		list_del(&item->next);
1498 		kfree(item);
1499 		break;
1500 	}
1501 	mutex_unlock(&text_mutex);
1502 }
1503 
alternatives_enable_smp(void)1504 void alternatives_enable_smp(void)
1505 {
1506 	struct smp_alt_module *mod;
1507 
1508 	/* Why bother if there are no other CPUs? */
1509 	BUG_ON(num_possible_cpus() == 1);
1510 
1511 	mutex_lock(&text_mutex);
1512 
1513 	if (uniproc_patched) {
1514 		pr_info("switching to SMP code\n");
1515 		BUG_ON(num_online_cpus() != 1);
1516 		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
1517 		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
1518 		list_for_each_entry(mod, &smp_alt_modules, next)
1519 			alternatives_smp_lock(mod->locks, mod->locks_end,
1520 					      mod->text, mod->text_end);
1521 		uniproc_patched = false;
1522 	}
1523 	mutex_unlock(&text_mutex);
1524 }
1525 
1526 /*
1527  * Return 1 if the address range is reserved for SMP-alternatives.
1528  * Must hold text_mutex.
1529  */
alternatives_text_reserved(void * start,void * end)1530 int alternatives_text_reserved(void *start, void *end)
1531 {
1532 	struct smp_alt_module *mod;
1533 	const s32 *poff;
1534 	u8 *text_start = start;
1535 	u8 *text_end = end;
1536 
1537 	lockdep_assert_held(&text_mutex);
1538 
1539 	list_for_each_entry(mod, &smp_alt_modules, next) {
1540 		if (mod->text > text_end || mod->text_end < text_start)
1541 			continue;
1542 		for (poff = mod->locks; poff < mod->locks_end; poff++) {
1543 			const u8 *ptr = (const u8 *)poff + *poff;
1544 
1545 			if (text_start <= ptr && text_end > ptr)
1546 				return 1;
1547 		}
1548 	}
1549 
1550 	return 0;
1551 }
1552 #endif /* CONFIG_SMP */
1553 
1554 /*
1555  * Self-test for the INT3 based CALL emulation code.
1556  *
1557  * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
1558  * properly and that there is a stack gap between the INT3 frame and the
1559  * previous context. Without this gap doing a virtual PUSH on the interrupted
1560  * stack would corrupt the INT3 IRET frame.
1561  *
1562  * See entry_{32,64}.S for more details.
1563  */
1564 
1565 /*
1566  * We define the int3_magic() function in assembly to control the calling
1567  * convention such that we can 'call' it from assembly.
1568  */
1569 
1570 extern void int3_magic(unsigned int *ptr); /* defined in asm */
1571 
1572 asm (
1573 "	.pushsection	.init.text, \"ax\", @progbits\n"
1574 "	.type		int3_magic, @function\n"
1575 "int3_magic:\n"
1576 	ANNOTATE_NOENDBR
1577 "	movl	$1, (%" _ASM_ARG1 ")\n"
1578 	ASM_RET
1579 "	.size		int3_magic, .-int3_magic\n"
1580 "	.popsection\n"
1581 );
1582 
1583 extern void int3_selftest_ip(void); /* defined in asm below */
1584 
1585 static int __init
int3_exception_notify(struct notifier_block * self,unsigned long val,void * data)1586 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1587 {
1588 	unsigned long selftest = (unsigned long)&int3_selftest_ip;
1589 	struct die_args *args = data;
1590 	struct pt_regs *regs = args->regs;
1591 
1592 	OPTIMIZER_HIDE_VAR(selftest);
1593 
1594 	if (!regs || user_mode(regs))
1595 		return NOTIFY_DONE;
1596 
1597 	if (val != DIE_INT3)
1598 		return NOTIFY_DONE;
1599 
1600 	if (regs->ip - INT3_INSN_SIZE != selftest)
1601 		return NOTIFY_DONE;
1602 
1603 	int3_emulate_call(regs, (unsigned long)&int3_magic);
1604 	return NOTIFY_STOP;
1605 }
1606 
1607 /* Must be noinline to ensure uniqueness of int3_selftest_ip. */
int3_selftest(void)1608 static noinline void __init int3_selftest(void)
1609 {
1610 	static __initdata struct notifier_block int3_exception_nb = {
1611 		.notifier_call	= int3_exception_notify,
1612 		.priority	= INT_MAX-1, /* last */
1613 	};
1614 	unsigned int val = 0;
1615 
1616 	BUG_ON(register_die_notifier(&int3_exception_nb));
1617 
1618 	/*
1619 	 * Basically: int3_magic(&val); but really complicated :-)
1620 	 *
1621 	 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
1622 	 * notifier above will emulate CALL for us.
1623 	 */
1624 	asm volatile ("int3_selftest_ip:\n\t"
1625 		      ANNOTATE_NOENDBR
1626 		      "    int3; nop; nop; nop; nop\n\t"
1627 		      : ASM_CALL_CONSTRAINT
1628 		      : __ASM_SEL_RAW(a, D) (&val)
1629 		      : "memory");
1630 
1631 	BUG_ON(val != 1);
1632 
1633 	unregister_die_notifier(&int3_exception_nb);
1634 }
1635 
1636 static __initdata int __alt_reloc_selftest_addr;
1637 
1638 extern void __init __alt_reloc_selftest(void *arg);
__alt_reloc_selftest(void * arg)1639 __visible noinline void __init __alt_reloc_selftest(void *arg)
1640 {
1641 	WARN_ON(arg != &__alt_reloc_selftest_addr);
1642 }
1643 
alt_reloc_selftest(void)1644 static noinline void __init alt_reloc_selftest(void)
1645 {
1646 	/*
1647 	 * Tests apply_relocation().
1648 	 *
1649 	 * This has a relative immediate (CALL) in a place other than the first
1650 	 * instruction and additionally on x86_64 we get a RIP-relative LEA:
1651 	 *
1652 	 *   lea    0x0(%rip),%rdi  # 5d0: R_X86_64_PC32    .init.data+0x5566c
1653 	 *   call   +0              # 5d5: R_X86_64_PLT32   __alt_reloc_selftest-0x4
1654 	 *
1655 	 * Getting this wrong will either crash and burn or tickle the WARN
1656 	 * above.
1657 	 */
1658 	asm_inline volatile (
1659 		ALTERNATIVE("", "lea %[mem], %%" _ASM_ARG1 "; call __alt_reloc_selftest;", X86_FEATURE_ALWAYS)
1660 		: ASM_CALL_CONSTRAINT
1661 		: [mem] "m" (__alt_reloc_selftest_addr)
1662 		: _ASM_ARG1
1663 	);
1664 }
1665 
alternative_instructions(void)1666 void __init alternative_instructions(void)
1667 {
1668 	int3_selftest();
1669 
1670 	/*
1671 	 * The patching is not fully atomic, so try to avoid local
1672 	 * interruptions that might execute the to be patched code.
1673 	 * Other CPUs are not running.
1674 	 */
1675 	stop_nmi();
1676 
1677 	/*
1678 	 * Don't stop machine check exceptions while patching.
1679 	 * MCEs only happen when something got corrupted and in this
1680 	 * case we must do something about the corruption.
1681 	 * Ignoring it is worse than an unlikely patching race.
1682 	 * Also machine checks tend to be broadcast and if one CPU
1683 	 * goes into machine check the others follow quickly, so we don't
1684 	 * expect a machine check to cause undue problems during to code
1685 	 * patching.
1686 	 */
1687 
1688 	/*
1689 	 * Make sure to set (artificial) features depending on used paravirt
1690 	 * functions which can later influence alternative patching.
1691 	 */
1692 	paravirt_set_cap();
1693 
1694 	__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
1695 			__cfi_sites, __cfi_sites_end, true);
1696 
1697 	/*
1698 	 * Rewrite the retpolines, must be done before alternatives since
1699 	 * those can rewrite the retpoline thunks.
1700 	 */
1701 	apply_retpolines(__retpoline_sites, __retpoline_sites_end);
1702 	apply_returns(__return_sites, __return_sites_end);
1703 
1704 	apply_alternatives(__alt_instructions, __alt_instructions_end);
1705 
1706 	/*
1707 	 * Now all calls are established. Apply the call thunks if
1708 	 * required.
1709 	 */
1710 	callthunks_patch_builtin_calls();
1711 
1712 	/*
1713 	 * Seal all functions that do not have their address taken.
1714 	 */
1715 	apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
1716 
1717 #ifdef CONFIG_SMP
1718 	/* Patch to UP if other cpus not imminent. */
1719 	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
1720 		uniproc_patched = true;
1721 		alternatives_smp_module_add(NULL, "core kernel",
1722 					    __smp_locks, __smp_locks_end,
1723 					    _text, _etext);
1724 	}
1725 
1726 	if (!uniproc_patched || num_possible_cpus() == 1) {
1727 		free_init_pages("SMP alternatives",
1728 				(unsigned long)__smp_locks,
1729 				(unsigned long)__smp_locks_end);
1730 	}
1731 #endif
1732 
1733 	restart_nmi();
1734 	alternatives_patched = 1;
1735 
1736 	alt_reloc_selftest();
1737 }
1738 
1739 /**
1740  * text_poke_early - Update instructions on a live kernel at boot time
1741  * @addr: address to modify
1742  * @opcode: source of the copy
1743  * @len: length to copy
1744  *
1745  * When you use this code to patch more than one byte of an instruction
1746  * you need to make sure that other CPUs cannot execute this code in parallel.
1747  * Also no thread must be currently preempted in the middle of these
1748  * instructions. And on the local CPU you need to be protected against NMI or
1749  * MCE handlers seeing an inconsistent instruction while you patch.
1750  */
text_poke_early(void * addr,const void * opcode,size_t len)1751 void __init_or_module text_poke_early(void *addr, const void *opcode,
1752 				      size_t len)
1753 {
1754 	unsigned long flags;
1755 
1756 	if (boot_cpu_has(X86_FEATURE_NX) &&
1757 	    is_module_text_address((unsigned long)addr)) {
1758 		/*
1759 		 * Modules text is marked initially as non-executable, so the
1760 		 * code cannot be running and speculative code-fetches are
1761 		 * prevented. Just change the code.
1762 		 */
1763 		memcpy(addr, opcode, len);
1764 	} else {
1765 		local_irq_save(flags);
1766 		memcpy(addr, opcode, len);
1767 		sync_core();
1768 		local_irq_restore(flags);
1769 
1770 		/*
1771 		 * Could also do a CLFLUSH here to speed up CPU recovery; but
1772 		 * that causes hangs on some VIA CPUs.
1773 		 */
1774 	}
1775 }
1776 
1777 typedef struct {
1778 	struct mm_struct *mm;
1779 } temp_mm_state_t;
1780 
1781 /*
1782  * Using a temporary mm allows to set temporary mappings that are not accessible
1783  * by other CPUs. Such mappings are needed to perform sensitive memory writes
1784  * that override the kernel memory protections (e.g., W^X), without exposing the
1785  * temporary page-table mappings that are required for these write operations to
1786  * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
1787  * mapping is torn down.
1788  *
1789  * Context: The temporary mm needs to be used exclusively by a single core. To
1790  *          harden security IRQs must be disabled while the temporary mm is
1791  *          loaded, thereby preventing interrupt handler bugs from overriding
1792  *          the kernel memory protection.
1793  */
use_temporary_mm(struct mm_struct * mm)1794 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
1795 {
1796 	temp_mm_state_t temp_state;
1797 
1798 	lockdep_assert_irqs_disabled();
1799 
1800 	/*
1801 	 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
1802 	 * with a stale address space WITHOUT being in lazy mode after
1803 	 * restoring the previous mm.
1804 	 */
1805 	if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
1806 		leave_mm();
1807 
1808 	temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1809 	switch_mm_irqs_off(NULL, mm, current);
1810 
1811 	/*
1812 	 * If breakpoints are enabled, disable them while the temporary mm is
1813 	 * used. Userspace might set up watchpoints on addresses that are used
1814 	 * in the temporary mm, which would lead to wrong signals being sent or
1815 	 * crashes.
1816 	 *
1817 	 * Note that breakpoints are not disabled selectively, which also causes
1818 	 * kernel breakpoints (e.g., perf's) to be disabled. This might be
1819 	 * undesirable, but still seems reasonable as the code that runs in the
1820 	 * temporary mm should be short.
1821 	 */
1822 	if (hw_breakpoint_active())
1823 		hw_breakpoint_disable();
1824 
1825 	return temp_state;
1826 }
1827 
unuse_temporary_mm(temp_mm_state_t prev_state)1828 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
1829 {
1830 	lockdep_assert_irqs_disabled();
1831 	switch_mm_irqs_off(NULL, prev_state.mm, current);
1832 
1833 	/*
1834 	 * Restore the breakpoints if they were disabled before the temporary mm
1835 	 * was loaded.
1836 	 */
1837 	if (hw_breakpoint_active())
1838 		hw_breakpoint_restore();
1839 }
1840 
1841 __ro_after_init struct mm_struct *poking_mm;
1842 __ro_after_init unsigned long poking_addr;
1843 
text_poke_memcpy(void * dst,const void * src,size_t len)1844 static void text_poke_memcpy(void *dst, const void *src, size_t len)
1845 {
1846 	memcpy(dst, src, len);
1847 }
1848 
text_poke_memset(void * dst,const void * src,size_t len)1849 static void text_poke_memset(void *dst, const void *src, size_t len)
1850 {
1851 	int c = *(const int *)src;
1852 
1853 	memset(dst, c, len);
1854 }
1855 
1856 typedef void text_poke_f(void *dst, const void *src, size_t len);
1857 
__text_poke(text_poke_f func,void * addr,const void * src,size_t len)1858 static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len)
1859 {
1860 	bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
1861 	struct page *pages[2] = {NULL};
1862 	temp_mm_state_t prev;
1863 	unsigned long flags;
1864 	pte_t pte, *ptep;
1865 	spinlock_t *ptl;
1866 	pgprot_t pgprot;
1867 
1868 	/*
1869 	 * While boot memory allocator is running we cannot use struct pages as
1870 	 * they are not yet initialized. There is no way to recover.
1871 	 */
1872 	BUG_ON(!after_bootmem);
1873 
1874 	if (!core_kernel_text((unsigned long)addr)) {
1875 		pages[0] = vmalloc_to_page(addr);
1876 		if (cross_page_boundary)
1877 			pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1878 	} else {
1879 		pages[0] = virt_to_page(addr);
1880 		WARN_ON(!PageReserved(pages[0]));
1881 		if (cross_page_boundary)
1882 			pages[1] = virt_to_page(addr + PAGE_SIZE);
1883 	}
1884 	/*
1885 	 * If something went wrong, crash and burn since recovery paths are not
1886 	 * implemented.
1887 	 */
1888 	BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
1889 
1890 	/*
1891 	 * Map the page without the global bit, as TLB flushing is done with
1892 	 * flush_tlb_mm_range(), which is intended for non-global PTEs.
1893 	 */
1894 	pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
1895 
1896 	/*
1897 	 * The lock is not really needed, but this allows to avoid open-coding.
1898 	 */
1899 	ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
1900 
1901 	/*
1902 	 * This must not fail; preallocated in poking_init().
1903 	 */
1904 	VM_BUG_ON(!ptep);
1905 
1906 	local_irq_save(flags);
1907 
1908 	pte = mk_pte(pages[0], pgprot);
1909 	set_pte_at(poking_mm, poking_addr, ptep, pte);
1910 
1911 	if (cross_page_boundary) {
1912 		pte = mk_pte(pages[1], pgprot);
1913 		set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
1914 	}
1915 
1916 	/*
1917 	 * Loading the temporary mm behaves as a compiler barrier, which
1918 	 * guarantees that the PTE will be set at the time memcpy() is done.
1919 	 */
1920 	prev = use_temporary_mm(poking_mm);
1921 
1922 	kasan_disable_current();
1923 	func((u8 *)poking_addr + offset_in_page(addr), src, len);
1924 	kasan_enable_current();
1925 
1926 	/*
1927 	 * Ensure that the PTE is only cleared after the instructions of memcpy
1928 	 * were issued by using a compiler barrier.
1929 	 */
1930 	barrier();
1931 
1932 	pte_clear(poking_mm, poking_addr, ptep);
1933 	if (cross_page_boundary)
1934 		pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
1935 
1936 	/*
1937 	 * Loading the previous page-table hierarchy requires a serializing
1938 	 * instruction that already allows the core to see the updated version.
1939 	 * Xen-PV is assumed to serialize execution in a similar manner.
1940 	 */
1941 	unuse_temporary_mm(prev);
1942 
1943 	/*
1944 	 * Flushing the TLB might involve IPIs, which would require enabled
1945 	 * IRQs, but not if the mm is not used, as it is in this point.
1946 	 */
1947 	flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
1948 			   (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
1949 			   PAGE_SHIFT, false);
1950 
1951 	if (func == text_poke_memcpy) {
1952 		/*
1953 		 * If the text does not match what we just wrote then something is
1954 		 * fundamentally screwy; there's nothing we can really do about that.
1955 		 */
1956 		BUG_ON(memcmp(addr, src, len));
1957 	}
1958 
1959 	local_irq_restore(flags);
1960 	pte_unmap_unlock(ptep, ptl);
1961 	return addr;
1962 }
1963 
1964 /**
1965  * text_poke - Update instructions on a live kernel
1966  * @addr: address to modify
1967  * @opcode: source of the copy
1968  * @len: length to copy
1969  *
1970  * Only atomic text poke/set should be allowed when not doing early patching.
1971  * It means the size must be writable atomically and the address must be aligned
1972  * in a way that permits an atomic write. It also makes sure we fit on a single
1973  * page.
1974  *
1975  * Note that the caller must ensure that if the modified code is part of a
1976  * module, the module would not be removed during poking. This can be achieved
1977  * by registering a module notifier, and ordering module removal and patching
1978  * through a mutex.
1979  */
text_poke(void * addr,const void * opcode,size_t len)1980 void *text_poke(void *addr, const void *opcode, size_t len)
1981 {
1982 	lockdep_assert_held(&text_mutex);
1983 
1984 	return __text_poke(text_poke_memcpy, addr, opcode, len);
1985 }
1986 
1987 /**
1988  * text_poke_kgdb - Update instructions on a live kernel by kgdb
1989  * @addr: address to modify
1990  * @opcode: source of the copy
1991  * @len: length to copy
1992  *
1993  * Only atomic text poke/set should be allowed when not doing early patching.
1994  * It means the size must be writable atomically and the address must be aligned
1995  * in a way that permits an atomic write. It also makes sure we fit on a single
1996  * page.
1997  *
1998  * Context: should only be used by kgdb, which ensures no other core is running,
1999  *	    despite the fact it does not hold the text_mutex.
2000  */
text_poke_kgdb(void * addr,const void * opcode,size_t len)2001 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
2002 {
2003 	return __text_poke(text_poke_memcpy, addr, opcode, len);
2004 }
2005 
text_poke_copy_locked(void * addr,const void * opcode,size_t len,bool core_ok)2006 void *text_poke_copy_locked(void *addr, const void *opcode, size_t len,
2007 			    bool core_ok)
2008 {
2009 	unsigned long start = (unsigned long)addr;
2010 	size_t patched = 0;
2011 
2012 	if (WARN_ON_ONCE(!core_ok && core_kernel_text(start)))
2013 		return NULL;
2014 
2015 	while (patched < len) {
2016 		unsigned long ptr = start + patched;
2017 		size_t s;
2018 
2019 		s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2020 
2021 		__text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s);
2022 		patched += s;
2023 	}
2024 	return addr;
2025 }
2026 
2027 /**
2028  * text_poke_copy - Copy instructions into (an unused part of) RX memory
2029  * @addr: address to modify
2030  * @opcode: source of the copy
2031  * @len: length to copy, could be more than 2x PAGE_SIZE
2032  *
2033  * Not safe against concurrent execution; useful for JITs to dump
2034  * new code blocks into unused regions of RX memory. Can be used in
2035  * conjunction with synchronize_rcu_tasks() to wait for existing
2036  * execution to quiesce after having made sure no existing functions
2037  * pointers are live.
2038  */
text_poke_copy(void * addr,const void * opcode,size_t len)2039 void *text_poke_copy(void *addr, const void *opcode, size_t len)
2040 {
2041 	mutex_lock(&text_mutex);
2042 	addr = text_poke_copy_locked(addr, opcode, len, false);
2043 	mutex_unlock(&text_mutex);
2044 	return addr;
2045 }
2046 
2047 /**
2048  * text_poke_set - memset into (an unused part of) RX memory
2049  * @addr: address to modify
2050  * @c: the byte to fill the area with
2051  * @len: length to copy, could be more than 2x PAGE_SIZE
2052  *
2053  * This is useful to overwrite unused regions of RX memory with illegal
2054  * instructions.
2055  */
text_poke_set(void * addr,int c,size_t len)2056 void *text_poke_set(void *addr, int c, size_t len)
2057 {
2058 	unsigned long start = (unsigned long)addr;
2059 	size_t patched = 0;
2060 
2061 	if (WARN_ON_ONCE(core_kernel_text(start)))
2062 		return NULL;
2063 
2064 	mutex_lock(&text_mutex);
2065 	while (patched < len) {
2066 		unsigned long ptr = start + patched;
2067 		size_t s;
2068 
2069 		s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2070 
2071 		__text_poke(text_poke_memset, (void *)ptr, (void *)&c, s);
2072 		patched += s;
2073 	}
2074 	mutex_unlock(&text_mutex);
2075 	return addr;
2076 }
2077 
do_sync_core(void * info)2078 static void do_sync_core(void *info)
2079 {
2080 	sync_core();
2081 }
2082 
text_poke_sync(void)2083 void text_poke_sync(void)
2084 {
2085 	on_each_cpu(do_sync_core, NULL, 1);
2086 }
2087 
2088 /*
2089  * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
2090  * this thing. When len == 6 everything is prefixed with 0x0f and we map
2091  * opcode to Jcc.d8, using len to distinguish.
2092  */
2093 struct text_poke_loc {
2094 	/* addr := _stext + rel_addr */
2095 	s32 rel_addr;
2096 	s32 disp;
2097 	u8 len;
2098 	u8 opcode;
2099 	const u8 text[POKE_MAX_OPCODE_SIZE];
2100 	/* see text_poke_bp_batch() */
2101 	u8 old;
2102 };
2103 
2104 struct bp_patching_desc {
2105 	struct text_poke_loc *vec;
2106 	int nr_entries;
2107 	atomic_t refs;
2108 };
2109 
2110 static struct bp_patching_desc bp_desc;
2111 
2112 static __always_inline
try_get_desc(void)2113 struct bp_patching_desc *try_get_desc(void)
2114 {
2115 	struct bp_patching_desc *desc = &bp_desc;
2116 
2117 	if (!raw_atomic_inc_not_zero(&desc->refs))
2118 		return NULL;
2119 
2120 	return desc;
2121 }
2122 
put_desc(void)2123 static __always_inline void put_desc(void)
2124 {
2125 	struct bp_patching_desc *desc = &bp_desc;
2126 
2127 	smp_mb__before_atomic();
2128 	raw_atomic_dec(&desc->refs);
2129 }
2130 
text_poke_addr(struct text_poke_loc * tp)2131 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
2132 {
2133 	return _stext + tp->rel_addr;
2134 }
2135 
patch_cmp(const void * key,const void * elt)2136 static __always_inline int patch_cmp(const void *key, const void *elt)
2137 {
2138 	struct text_poke_loc *tp = (struct text_poke_loc *) elt;
2139 
2140 	if (key < text_poke_addr(tp))
2141 		return -1;
2142 	if (key > text_poke_addr(tp))
2143 		return 1;
2144 	return 0;
2145 }
2146 
poke_int3_handler(struct pt_regs * regs)2147 noinstr int poke_int3_handler(struct pt_regs *regs)
2148 {
2149 	struct bp_patching_desc *desc;
2150 	struct text_poke_loc *tp;
2151 	int ret = 0;
2152 	void *ip;
2153 
2154 	if (user_mode(regs))
2155 		return 0;
2156 
2157 	/*
2158 	 * Having observed our INT3 instruction, we now must observe
2159 	 * bp_desc with non-zero refcount:
2160 	 *
2161 	 *	bp_desc.refs = 1		INT3
2162 	 *	WMB				RMB
2163 	 *	write INT3			if (bp_desc.refs != 0)
2164 	 */
2165 	smp_rmb();
2166 
2167 	desc = try_get_desc();
2168 	if (!desc)
2169 		return 0;
2170 
2171 	/*
2172 	 * Discount the INT3. See text_poke_bp_batch().
2173 	 */
2174 	ip = (void *) regs->ip - INT3_INSN_SIZE;
2175 
2176 	/*
2177 	 * Skip the binary search if there is a single member in the vector.
2178 	 */
2179 	if (unlikely(desc->nr_entries > 1)) {
2180 		tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
2181 				      sizeof(struct text_poke_loc),
2182 				      patch_cmp);
2183 		if (!tp)
2184 			goto out_put;
2185 	} else {
2186 		tp = desc->vec;
2187 		if (text_poke_addr(tp) != ip)
2188 			goto out_put;
2189 	}
2190 
2191 	ip += tp->len;
2192 
2193 	switch (tp->opcode) {
2194 	case INT3_INSN_OPCODE:
2195 		/*
2196 		 * Someone poked an explicit INT3, they'll want to handle it,
2197 		 * do not consume.
2198 		 */
2199 		goto out_put;
2200 
2201 	case RET_INSN_OPCODE:
2202 		int3_emulate_ret(regs);
2203 		break;
2204 
2205 	case CALL_INSN_OPCODE:
2206 		int3_emulate_call(regs, (long)ip + tp->disp);
2207 		break;
2208 
2209 	case JMP32_INSN_OPCODE:
2210 	case JMP8_INSN_OPCODE:
2211 		int3_emulate_jmp(regs, (long)ip + tp->disp);
2212 		break;
2213 
2214 	case 0x70 ... 0x7f: /* Jcc */
2215 		int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
2216 		break;
2217 
2218 	default:
2219 		BUG();
2220 	}
2221 
2222 	ret = 1;
2223 
2224 out_put:
2225 	put_desc();
2226 	return ret;
2227 }
2228 
2229 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
2230 static struct text_poke_loc tp_vec[TP_VEC_MAX];
2231 static int tp_vec_nr;
2232 
2233 /**
2234  * text_poke_bp_batch() -- update instructions on live kernel on SMP
2235  * @tp:			vector of instructions to patch
2236  * @nr_entries:		number of entries in the vector
2237  *
2238  * Modify multi-byte instruction by using int3 breakpoint on SMP.
2239  * We completely avoid stop_machine() here, and achieve the
2240  * synchronization using int3 breakpoint.
2241  *
2242  * The way it is done:
2243  *	- For each entry in the vector:
2244  *		- add a int3 trap to the address that will be patched
2245  *	- sync cores
2246  *	- For each entry in the vector:
2247  *		- update all but the first byte of the patched range
2248  *	- sync cores
2249  *	- For each entry in the vector:
2250  *		- replace the first byte (int3) by the first byte of
2251  *		  replacing opcode
2252  *	- sync cores
2253  */
text_poke_bp_batch(struct text_poke_loc * tp,unsigned int nr_entries)2254 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
2255 {
2256 	unsigned char int3 = INT3_INSN_OPCODE;
2257 	unsigned int i;
2258 	int do_sync;
2259 
2260 	lockdep_assert_held(&text_mutex);
2261 
2262 	bp_desc.vec = tp;
2263 	bp_desc.nr_entries = nr_entries;
2264 
2265 	/*
2266 	 * Corresponds to the implicit memory barrier in try_get_desc() to
2267 	 * ensure reading a non-zero refcount provides up to date bp_desc data.
2268 	 */
2269 	atomic_set_release(&bp_desc.refs, 1);
2270 
2271 	/*
2272 	 * Function tracing can enable thousands of places that need to be
2273 	 * updated. This can take quite some time, and with full kernel debugging
2274 	 * enabled, this could cause the softlockup watchdog to trigger.
2275 	 * This function gets called every 256 entries added to be patched.
2276 	 * Call cond_resched() here to make sure that other tasks can get scheduled
2277 	 * while processing all the functions being patched.
2278 	 */
2279 	cond_resched();
2280 
2281 	/*
2282 	 * Corresponding read barrier in int3 notifier for making sure the
2283 	 * nr_entries and handler are correctly ordered wrt. patching.
2284 	 */
2285 	smp_wmb();
2286 
2287 	/*
2288 	 * First step: add a int3 trap to the address that will be patched.
2289 	 */
2290 	for (i = 0; i < nr_entries; i++) {
2291 		tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
2292 		text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
2293 	}
2294 
2295 	text_poke_sync();
2296 
2297 	/*
2298 	 * Second step: update all but the first byte of the patched range.
2299 	 */
2300 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
2301 		u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
2302 		u8 _new[POKE_MAX_OPCODE_SIZE+1];
2303 		const u8 *new = tp[i].text;
2304 		int len = tp[i].len;
2305 
2306 		if (len - INT3_INSN_SIZE > 0) {
2307 			memcpy(old + INT3_INSN_SIZE,
2308 			       text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2309 			       len - INT3_INSN_SIZE);
2310 
2311 			if (len == 6) {
2312 				_new[0] = 0x0f;
2313 				memcpy(_new + 1, new, 5);
2314 				new = _new;
2315 			}
2316 
2317 			text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2318 				  new + INT3_INSN_SIZE,
2319 				  len - INT3_INSN_SIZE);
2320 
2321 			do_sync++;
2322 		}
2323 
2324 		/*
2325 		 * Emit a perf event to record the text poke, primarily to
2326 		 * support Intel PT decoding which must walk the executable code
2327 		 * to reconstruct the trace. The flow up to here is:
2328 		 *   - write INT3 byte
2329 		 *   - IPI-SYNC
2330 		 *   - write instruction tail
2331 		 * At this point the actual control flow will be through the
2332 		 * INT3 and handler and not hit the old or new instruction.
2333 		 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
2334 		 * can still be decoded. Subsequently:
2335 		 *   - emit RECORD_TEXT_POKE with the new instruction
2336 		 *   - IPI-SYNC
2337 		 *   - write first byte
2338 		 *   - IPI-SYNC
2339 		 * So before the text poke event timestamp, the decoder will see
2340 		 * either the old instruction flow or FUP/TIP of INT3. After the
2341 		 * text poke event timestamp, the decoder will see either the
2342 		 * new instruction flow or FUP/TIP of INT3. Thus decoders can
2343 		 * use the timestamp as the point at which to modify the
2344 		 * executable code.
2345 		 * The old instruction is recorded so that the event can be
2346 		 * processed forwards or backwards.
2347 		 */
2348 		perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
2349 	}
2350 
2351 	if (do_sync) {
2352 		/*
2353 		 * According to Intel, this core syncing is very likely
2354 		 * not necessary and we'd be safe even without it. But
2355 		 * better safe than sorry (plus there's not only Intel).
2356 		 */
2357 		text_poke_sync();
2358 	}
2359 
2360 	/*
2361 	 * Third step: replace the first byte (int3) by the first byte of
2362 	 * replacing opcode.
2363 	 */
2364 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
2365 		u8 byte = tp[i].text[0];
2366 
2367 		if (tp[i].len == 6)
2368 			byte = 0x0f;
2369 
2370 		if (byte == INT3_INSN_OPCODE)
2371 			continue;
2372 
2373 		text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
2374 		do_sync++;
2375 	}
2376 
2377 	if (do_sync)
2378 		text_poke_sync();
2379 
2380 	/*
2381 	 * Remove and wait for refs to be zero.
2382 	 */
2383 	if (!atomic_dec_and_test(&bp_desc.refs))
2384 		atomic_cond_read_acquire(&bp_desc.refs, !VAL);
2385 }
2386 
text_poke_loc_init(struct text_poke_loc * tp,void * addr,const void * opcode,size_t len,const void * emulate)2387 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
2388 			       const void *opcode, size_t len, const void *emulate)
2389 {
2390 	struct insn insn;
2391 	int ret, i = 0;
2392 
2393 	if (len == 6)
2394 		i = 1;
2395 	memcpy((void *)tp->text, opcode+i, len-i);
2396 	if (!emulate)
2397 		emulate = opcode;
2398 
2399 	ret = insn_decode_kernel(&insn, emulate);
2400 	BUG_ON(ret < 0);
2401 
2402 	tp->rel_addr = addr - (void *)_stext;
2403 	tp->len = len;
2404 	tp->opcode = insn.opcode.bytes[0];
2405 
2406 	if (is_jcc32(&insn)) {
2407 		/*
2408 		 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
2409 		 */
2410 		tp->opcode = insn.opcode.bytes[1] - 0x10;
2411 	}
2412 
2413 	switch (tp->opcode) {
2414 	case RET_INSN_OPCODE:
2415 	case JMP32_INSN_OPCODE:
2416 	case JMP8_INSN_OPCODE:
2417 		/*
2418 		 * Control flow instructions without implied execution of the
2419 		 * next instruction can be padded with INT3.
2420 		 */
2421 		for (i = insn.length; i < len; i++)
2422 			BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
2423 		break;
2424 
2425 	default:
2426 		BUG_ON(len != insn.length);
2427 	}
2428 
2429 	switch (tp->opcode) {
2430 	case INT3_INSN_OPCODE:
2431 	case RET_INSN_OPCODE:
2432 		break;
2433 
2434 	case CALL_INSN_OPCODE:
2435 	case JMP32_INSN_OPCODE:
2436 	case JMP8_INSN_OPCODE:
2437 	case 0x70 ... 0x7f: /* Jcc */
2438 		tp->disp = insn.immediate.value;
2439 		break;
2440 
2441 	default: /* assume NOP */
2442 		switch (len) {
2443 		case 2: /* NOP2 -- emulate as JMP8+0 */
2444 			BUG_ON(memcmp(emulate, x86_nops[len], len));
2445 			tp->opcode = JMP8_INSN_OPCODE;
2446 			tp->disp = 0;
2447 			break;
2448 
2449 		case 5: /* NOP5 -- emulate as JMP32+0 */
2450 			BUG_ON(memcmp(emulate, x86_nops[len], len));
2451 			tp->opcode = JMP32_INSN_OPCODE;
2452 			tp->disp = 0;
2453 			break;
2454 
2455 		default: /* unknown instruction */
2456 			BUG();
2457 		}
2458 		break;
2459 	}
2460 }
2461 
2462 /*
2463  * We hard rely on the tp_vec being ordered; ensure this is so by flushing
2464  * early if needed.
2465  */
tp_order_fail(void * addr)2466 static bool tp_order_fail(void *addr)
2467 {
2468 	struct text_poke_loc *tp;
2469 
2470 	if (!tp_vec_nr)
2471 		return false;
2472 
2473 	if (!addr) /* force */
2474 		return true;
2475 
2476 	tp = &tp_vec[tp_vec_nr - 1];
2477 	if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
2478 		return true;
2479 
2480 	return false;
2481 }
2482 
text_poke_flush(void * addr)2483 static void text_poke_flush(void *addr)
2484 {
2485 	if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
2486 		text_poke_bp_batch(tp_vec, tp_vec_nr);
2487 		tp_vec_nr = 0;
2488 	}
2489 }
2490 
text_poke_finish(void)2491 void text_poke_finish(void)
2492 {
2493 	text_poke_flush(NULL);
2494 }
2495 
text_poke_queue(void * addr,const void * opcode,size_t len,const void * emulate)2496 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
2497 {
2498 	struct text_poke_loc *tp;
2499 
2500 	text_poke_flush(addr);
2501 
2502 	tp = &tp_vec[tp_vec_nr++];
2503 	text_poke_loc_init(tp, addr, opcode, len, emulate);
2504 }
2505 
2506 /**
2507  * text_poke_bp() -- update instructions on live kernel on SMP
2508  * @addr:	address to patch
2509  * @opcode:	opcode of new instruction
2510  * @len:	length to copy
2511  * @emulate:	instruction to be emulated
2512  *
2513  * Update a single instruction with the vector in the stack, avoiding
2514  * dynamically allocated memory. This function should be used when it is
2515  * not possible to allocate memory.
2516  */
text_poke_bp(void * addr,const void * opcode,size_t len,const void * emulate)2517 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
2518 {
2519 	struct text_poke_loc tp;
2520 
2521 	text_poke_loc_init(&tp, addr, opcode, len, emulate);
2522 	text_poke_bp_batch(&tp, 1);
2523 }
2524