xref: /linux/arch/x86/kernel/alternative.c (revision 5469f160e6bf38b84eb237055868286e629b8d44)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
3 
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
10 #include <linux/highmem.h>
11 #include <linux/mm.h>
12 #include <linux/vmalloc.h>
13 #include <linux/memory.h>
14 #include <linux/stop_machine.h>
15 #include <linux/slab.h>
16 #include <linux/kdebug.h>
17 #include <linux/kprobes.h>
18 #include <linux/mmu_context.h>
19 #include <linux/bsearch.h>
20 #include <linux/sync_core.h>
21 #include <asm/text-patching.h>
22 #include <asm/alternative.h>
23 #include <asm/sections.h>
24 #include <asm/mce.h>
25 #include <asm/nmi.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/insn.h>
29 #include <asm/io.h>
30 #include <asm/fixmap.h>
31 #include <asm/paravirt.h>
32 
33 int __read_mostly alternatives_patched;
34 
35 EXPORT_SYMBOL_GPL(alternatives_patched);
36 
37 #define MAX_PATCH_LEN (255-1)
38 
39 static int __initdata_or_module debug_alternative;
40 
41 static int __init debug_alt(char *str)
42 {
43 	debug_alternative = 1;
44 	return 1;
45 }
46 __setup("debug-alternative", debug_alt);
47 
48 static int noreplace_smp;
49 
50 static int __init setup_noreplace_smp(char *str)
51 {
52 	noreplace_smp = 1;
53 	return 1;
54 }
55 __setup("noreplace-smp", setup_noreplace_smp);
56 
57 #define DPRINTK(fmt, args...)						\
58 do {									\
59 	if (debug_alternative)						\
60 		printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args);		\
61 } while (0)
62 
63 #define DUMP_BYTES(buf, len, fmt, args...)				\
64 do {									\
65 	if (unlikely(debug_alternative)) {				\
66 		int j;							\
67 									\
68 		if (!(len))						\
69 			break;						\
70 									\
71 		printk(KERN_DEBUG pr_fmt(fmt), ##args);			\
72 		for (j = 0; j < (len) - 1; j++)				\
73 			printk(KERN_CONT "%02hhx ", buf[j]);		\
74 		printk(KERN_CONT "%02hhx\n", buf[j]);			\
75 	}								\
76 } while (0)
77 
78 /*
79  * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
80  * that correspond to that nop. Getting from one nop to the next, we
81  * add to the array the offset that is equal to the sum of all sizes of
82  * nops preceding the one we are after.
83  *
84  * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
85  * nice symmetry of sizes of the previous nops.
86  */
87 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
88 static const unsigned char intelnops[] =
89 {
90 	GENERIC_NOP1,
91 	GENERIC_NOP2,
92 	GENERIC_NOP3,
93 	GENERIC_NOP4,
94 	GENERIC_NOP5,
95 	GENERIC_NOP6,
96 	GENERIC_NOP7,
97 	GENERIC_NOP8,
98 	GENERIC_NOP5_ATOMIC
99 };
100 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
101 {
102 	NULL,
103 	intelnops,
104 	intelnops + 1,
105 	intelnops + 1 + 2,
106 	intelnops + 1 + 2 + 3,
107 	intelnops + 1 + 2 + 3 + 4,
108 	intelnops + 1 + 2 + 3 + 4 + 5,
109 	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
110 	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
111 	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
112 };
113 #endif
114 
115 #ifdef K8_NOP1
116 static const unsigned char k8nops[] =
117 {
118 	K8_NOP1,
119 	K8_NOP2,
120 	K8_NOP3,
121 	K8_NOP4,
122 	K8_NOP5,
123 	K8_NOP6,
124 	K8_NOP7,
125 	K8_NOP8,
126 	K8_NOP5_ATOMIC
127 };
128 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
129 {
130 	NULL,
131 	k8nops,
132 	k8nops + 1,
133 	k8nops + 1 + 2,
134 	k8nops + 1 + 2 + 3,
135 	k8nops + 1 + 2 + 3 + 4,
136 	k8nops + 1 + 2 + 3 + 4 + 5,
137 	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
138 	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
139 	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
140 };
141 #endif
142 
143 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
144 static const unsigned char k7nops[] =
145 {
146 	K7_NOP1,
147 	K7_NOP2,
148 	K7_NOP3,
149 	K7_NOP4,
150 	K7_NOP5,
151 	K7_NOP6,
152 	K7_NOP7,
153 	K7_NOP8,
154 	K7_NOP5_ATOMIC
155 };
156 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
157 {
158 	NULL,
159 	k7nops,
160 	k7nops + 1,
161 	k7nops + 1 + 2,
162 	k7nops + 1 + 2 + 3,
163 	k7nops + 1 + 2 + 3 + 4,
164 	k7nops + 1 + 2 + 3 + 4 + 5,
165 	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
166 	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
167 	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
168 };
169 #endif
170 
171 #ifdef P6_NOP1
172 static const unsigned char p6nops[] =
173 {
174 	P6_NOP1,
175 	P6_NOP2,
176 	P6_NOP3,
177 	P6_NOP4,
178 	P6_NOP5,
179 	P6_NOP6,
180 	P6_NOP7,
181 	P6_NOP8,
182 	P6_NOP5_ATOMIC
183 };
184 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
185 {
186 	NULL,
187 	p6nops,
188 	p6nops + 1,
189 	p6nops + 1 + 2,
190 	p6nops + 1 + 2 + 3,
191 	p6nops + 1 + 2 + 3 + 4,
192 	p6nops + 1 + 2 + 3 + 4 + 5,
193 	p6nops + 1 + 2 + 3 + 4 + 5 + 6,
194 	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
195 	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
196 };
197 #endif
198 
199 /* Initialize these to a safe default */
200 #ifdef CONFIG_X86_64
201 const unsigned char * const *ideal_nops = p6_nops;
202 #else
203 const unsigned char * const *ideal_nops = intel_nops;
204 #endif
205 
206 void __init arch_init_ideal_nops(void)
207 {
208 	switch (boot_cpu_data.x86_vendor) {
209 	case X86_VENDOR_INTEL:
210 		/*
211 		 * Due to a decoder implementation quirk, some
212 		 * specific Intel CPUs actually perform better with
213 		 * the "k8_nops" than with the SDM-recommended NOPs.
214 		 */
215 		if (boot_cpu_data.x86 == 6 &&
216 		    boot_cpu_data.x86_model >= 0x0f &&
217 		    boot_cpu_data.x86_model != 0x1c &&
218 		    boot_cpu_data.x86_model != 0x26 &&
219 		    boot_cpu_data.x86_model != 0x27 &&
220 		    boot_cpu_data.x86_model < 0x30) {
221 			ideal_nops = k8_nops;
222 		} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
223 			   ideal_nops = p6_nops;
224 		} else {
225 #ifdef CONFIG_X86_64
226 			ideal_nops = k8_nops;
227 #else
228 			ideal_nops = intel_nops;
229 #endif
230 		}
231 		break;
232 
233 	case X86_VENDOR_HYGON:
234 		ideal_nops = p6_nops;
235 		return;
236 
237 	case X86_VENDOR_AMD:
238 		if (boot_cpu_data.x86 > 0xf) {
239 			ideal_nops = p6_nops;
240 			return;
241 		}
242 
243 		fallthrough;
244 
245 	default:
246 #ifdef CONFIG_X86_64
247 		ideal_nops = k8_nops;
248 #else
249 		if (boot_cpu_has(X86_FEATURE_K8))
250 			ideal_nops = k8_nops;
251 		else if (boot_cpu_has(X86_FEATURE_K7))
252 			ideal_nops = k7_nops;
253 		else
254 			ideal_nops = intel_nops;
255 #endif
256 	}
257 }
258 
259 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
260 static void __init_or_module add_nops(void *insns, unsigned int len)
261 {
262 	while (len > 0) {
263 		unsigned int noplen = len;
264 		if (noplen > ASM_NOP_MAX)
265 			noplen = ASM_NOP_MAX;
266 		memcpy(insns, ideal_nops[noplen], noplen);
267 		insns += noplen;
268 		len -= noplen;
269 	}
270 }
271 
272 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
273 extern s32 __smp_locks[], __smp_locks_end[];
274 void text_poke_early(void *addr, const void *opcode, size_t len);
275 
276 /*
277  * Are we looking at a near JMP with a 1 or 4-byte displacement.
278  */
279 static inline bool is_jmp(const u8 opcode)
280 {
281 	return opcode == 0xeb || opcode == 0xe9;
282 }
283 
284 static void __init_or_module
285 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
286 {
287 	u8 *next_rip, *tgt_rip;
288 	s32 n_dspl, o_dspl;
289 	int repl_len;
290 
291 	if (a->replacementlen != 5)
292 		return;
293 
294 	o_dspl = *(s32 *)(insn_buff + 1);
295 
296 	/* next_rip of the replacement JMP */
297 	next_rip = repl_insn + a->replacementlen;
298 	/* target rip of the replacement JMP */
299 	tgt_rip  = next_rip + o_dspl;
300 	n_dspl = tgt_rip - orig_insn;
301 
302 	DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
303 
304 	if (tgt_rip - orig_insn >= 0) {
305 		if (n_dspl - 2 <= 127)
306 			goto two_byte_jmp;
307 		else
308 			goto five_byte_jmp;
309 	/* negative offset */
310 	} else {
311 		if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
312 			goto two_byte_jmp;
313 		else
314 			goto five_byte_jmp;
315 	}
316 
317 two_byte_jmp:
318 	n_dspl -= 2;
319 
320 	insn_buff[0] = 0xeb;
321 	insn_buff[1] = (s8)n_dspl;
322 	add_nops(insn_buff + 2, 3);
323 
324 	repl_len = 2;
325 	goto done;
326 
327 five_byte_jmp:
328 	n_dspl -= 5;
329 
330 	insn_buff[0] = 0xe9;
331 	*(s32 *)&insn_buff[1] = n_dspl;
332 
333 	repl_len = 5;
334 
335 done:
336 
337 	DPRINTK("final displ: 0x%08x, JMP 0x%lx",
338 		n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
339 }
340 
341 /*
342  * "noinline" to cause control flow change and thus invalidate I$ and
343  * cause refetch after modification.
344  */
345 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
346 {
347 	unsigned long flags;
348 	int i;
349 
350 	for (i = 0; i < a->padlen; i++) {
351 		if (instr[i] != 0x90)
352 			return;
353 	}
354 
355 	local_irq_save(flags);
356 	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
357 	local_irq_restore(flags);
358 
359 	DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
360 		   instr, a->instrlen - a->padlen, a->padlen);
361 }
362 
363 /*
364  * Replace instructions with better alternatives for this CPU type. This runs
365  * before SMP is initialized to avoid SMP problems with self modifying code.
366  * This implies that asymmetric systems where APs have less capabilities than
367  * the boot processor are not handled. Tough. Make sure you disable such
368  * features by hand.
369  *
370  * Marked "noinline" to cause control flow change and thus insn cache
371  * to refetch changed I$ lines.
372  */
373 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
374 						  struct alt_instr *end)
375 {
376 	struct alt_instr *a;
377 	u8 *instr, *replacement;
378 	u8 insn_buff[MAX_PATCH_LEN];
379 
380 	DPRINTK("alt table %px, -> %px", start, end);
381 	/*
382 	 * The scan order should be from start to end. A later scanned
383 	 * alternative code can overwrite previously scanned alternative code.
384 	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
385 	 * patch code.
386 	 *
387 	 * So be careful if you want to change the scan order to any other
388 	 * order.
389 	 */
390 	for (a = start; a < end; a++) {
391 		int insn_buff_sz = 0;
392 		/* Mask away "NOT" flag bit for feature to test. */
393 		u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
394 
395 		instr = (u8 *)&a->instr_offset + a->instr_offset;
396 		replacement = (u8 *)&a->repl_offset + a->repl_offset;
397 		BUG_ON(a->instrlen > sizeof(insn_buff));
398 		BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
399 
400 		/*
401 		 * Patch if either:
402 		 * - feature is present
403 		 * - feature not present but ALTINSTR_FLAG_INV is set to mean,
404 		 *   patch if feature is *NOT* present.
405 		 */
406 		if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV)) {
407 			if (a->padlen > 1)
408 				optimize_nops(a, instr);
409 
410 			continue;
411 		}
412 
413 		DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
414 			(a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
415 			feature >> 5,
416 			feature & 0x1f,
417 			instr, instr, a->instrlen,
418 			replacement, a->replacementlen, a->padlen);
419 
420 		DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
421 		DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
422 
423 		memcpy(insn_buff, replacement, a->replacementlen);
424 		insn_buff_sz = a->replacementlen;
425 
426 		/*
427 		 * 0xe8 is a relative jump; fix the offset.
428 		 *
429 		 * Instruction length is checked before the opcode to avoid
430 		 * accessing uninitialized bytes for zero-length replacements.
431 		 */
432 		if (a->replacementlen == 5 && *insn_buff == 0xe8) {
433 			*(s32 *)(insn_buff + 1) += replacement - instr;
434 			DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
435 				*(s32 *)(insn_buff + 1),
436 				(unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
437 		}
438 
439 		if (a->replacementlen && is_jmp(replacement[0]))
440 			recompute_jump(a, instr, replacement, insn_buff);
441 
442 		if (a->instrlen > a->replacementlen) {
443 			add_nops(insn_buff + a->replacementlen,
444 				 a->instrlen - a->replacementlen);
445 			insn_buff_sz += a->instrlen - a->replacementlen;
446 		}
447 		DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
448 
449 		text_poke_early(instr, insn_buff, insn_buff_sz);
450 	}
451 }
452 
453 #ifdef CONFIG_SMP
454 static void alternatives_smp_lock(const s32 *start, const s32 *end,
455 				  u8 *text, u8 *text_end)
456 {
457 	const s32 *poff;
458 
459 	for (poff = start; poff < end; poff++) {
460 		u8 *ptr = (u8 *)poff + *poff;
461 
462 		if (!*poff || ptr < text || ptr >= text_end)
463 			continue;
464 		/* turn DS segment override prefix into lock prefix */
465 		if (*ptr == 0x3e)
466 			text_poke(ptr, ((unsigned char []){0xf0}), 1);
467 	}
468 }
469 
470 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
471 				    u8 *text, u8 *text_end)
472 {
473 	const s32 *poff;
474 
475 	for (poff = start; poff < end; poff++) {
476 		u8 *ptr = (u8 *)poff + *poff;
477 
478 		if (!*poff || ptr < text || ptr >= text_end)
479 			continue;
480 		/* turn lock prefix into DS segment override prefix */
481 		if (*ptr == 0xf0)
482 			text_poke(ptr, ((unsigned char []){0x3E}), 1);
483 	}
484 }
485 
486 struct smp_alt_module {
487 	/* what is this ??? */
488 	struct module	*mod;
489 	char		*name;
490 
491 	/* ptrs to lock prefixes */
492 	const s32	*locks;
493 	const s32	*locks_end;
494 
495 	/* .text segment, needed to avoid patching init code ;) */
496 	u8		*text;
497 	u8		*text_end;
498 
499 	struct list_head next;
500 };
501 static LIST_HEAD(smp_alt_modules);
502 static bool uniproc_patched = false;	/* protected by text_mutex */
503 
504 void __init_or_module alternatives_smp_module_add(struct module *mod,
505 						  char *name,
506 						  void *locks, void *locks_end,
507 						  void *text,  void *text_end)
508 {
509 	struct smp_alt_module *smp;
510 
511 	mutex_lock(&text_mutex);
512 	if (!uniproc_patched)
513 		goto unlock;
514 
515 	if (num_possible_cpus() == 1)
516 		/* Don't bother remembering, we'll never have to undo it. */
517 		goto smp_unlock;
518 
519 	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
520 	if (NULL == smp)
521 		/* we'll run the (safe but slow) SMP code then ... */
522 		goto unlock;
523 
524 	smp->mod	= mod;
525 	smp->name	= name;
526 	smp->locks	= locks;
527 	smp->locks_end	= locks_end;
528 	smp->text	= text;
529 	smp->text_end	= text_end;
530 	DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
531 		smp->locks, smp->locks_end,
532 		smp->text, smp->text_end, smp->name);
533 
534 	list_add_tail(&smp->next, &smp_alt_modules);
535 smp_unlock:
536 	alternatives_smp_unlock(locks, locks_end, text, text_end);
537 unlock:
538 	mutex_unlock(&text_mutex);
539 }
540 
541 void __init_or_module alternatives_smp_module_del(struct module *mod)
542 {
543 	struct smp_alt_module *item;
544 
545 	mutex_lock(&text_mutex);
546 	list_for_each_entry(item, &smp_alt_modules, next) {
547 		if (mod != item->mod)
548 			continue;
549 		list_del(&item->next);
550 		kfree(item);
551 		break;
552 	}
553 	mutex_unlock(&text_mutex);
554 }
555 
556 void alternatives_enable_smp(void)
557 {
558 	struct smp_alt_module *mod;
559 
560 	/* Why bother if there are no other CPUs? */
561 	BUG_ON(num_possible_cpus() == 1);
562 
563 	mutex_lock(&text_mutex);
564 
565 	if (uniproc_patched) {
566 		pr_info("switching to SMP code\n");
567 		BUG_ON(num_online_cpus() != 1);
568 		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
569 		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
570 		list_for_each_entry(mod, &smp_alt_modules, next)
571 			alternatives_smp_lock(mod->locks, mod->locks_end,
572 					      mod->text, mod->text_end);
573 		uniproc_patched = false;
574 	}
575 	mutex_unlock(&text_mutex);
576 }
577 
578 /*
579  * Return 1 if the address range is reserved for SMP-alternatives.
580  * Must hold text_mutex.
581  */
582 int alternatives_text_reserved(void *start, void *end)
583 {
584 	struct smp_alt_module *mod;
585 	const s32 *poff;
586 	u8 *text_start = start;
587 	u8 *text_end = end;
588 
589 	lockdep_assert_held(&text_mutex);
590 
591 	list_for_each_entry(mod, &smp_alt_modules, next) {
592 		if (mod->text > text_end || mod->text_end < text_start)
593 			continue;
594 		for (poff = mod->locks; poff < mod->locks_end; poff++) {
595 			const u8 *ptr = (const u8 *)poff + *poff;
596 
597 			if (text_start <= ptr && text_end > ptr)
598 				return 1;
599 		}
600 	}
601 
602 	return 0;
603 }
604 #endif /* CONFIG_SMP */
605 
606 #ifdef CONFIG_PARAVIRT
607 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
608 				     struct paravirt_patch_site *end)
609 {
610 	struct paravirt_patch_site *p;
611 	char insn_buff[MAX_PATCH_LEN];
612 
613 	for (p = start; p < end; p++) {
614 		unsigned int used;
615 
616 		BUG_ON(p->len > MAX_PATCH_LEN);
617 		/* prep the buffer with the original instructions */
618 		memcpy(insn_buff, p->instr, p->len);
619 		used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
620 
621 		BUG_ON(used > p->len);
622 
623 		/* Pad the rest with nops */
624 		add_nops(insn_buff + used, p->len - used);
625 		text_poke_early(p->instr, insn_buff, p->len);
626 	}
627 }
628 extern struct paravirt_patch_site __start_parainstructions[],
629 	__stop_parainstructions[];
630 #endif	/* CONFIG_PARAVIRT */
631 
632 /*
633  * Self-test for the INT3 based CALL emulation code.
634  *
635  * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
636  * properly and that there is a stack gap between the INT3 frame and the
637  * previous context. Without this gap doing a virtual PUSH on the interrupted
638  * stack would corrupt the INT3 IRET frame.
639  *
640  * See entry_{32,64}.S for more details.
641  */
642 
643 /*
644  * We define the int3_magic() function in assembly to control the calling
645  * convention such that we can 'call' it from assembly.
646  */
647 
648 extern void int3_magic(unsigned int *ptr); /* defined in asm */
649 
650 asm (
651 "	.pushsection	.init.text, \"ax\", @progbits\n"
652 "	.type		int3_magic, @function\n"
653 "int3_magic:\n"
654 "	movl	$1, (%" _ASM_ARG1 ")\n"
655 "	ret\n"
656 "	.size		int3_magic, .-int3_magic\n"
657 "	.popsection\n"
658 );
659 
660 extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
661 
662 static int __init
663 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
664 {
665 	struct die_args *args = data;
666 	struct pt_regs *regs = args->regs;
667 
668 	if (!regs || user_mode(regs))
669 		return NOTIFY_DONE;
670 
671 	if (val != DIE_INT3)
672 		return NOTIFY_DONE;
673 
674 	if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
675 		return NOTIFY_DONE;
676 
677 	int3_emulate_call(regs, (unsigned long)&int3_magic);
678 	return NOTIFY_STOP;
679 }
680 
681 static void __init int3_selftest(void)
682 {
683 	static __initdata struct notifier_block int3_exception_nb = {
684 		.notifier_call	= int3_exception_notify,
685 		.priority	= INT_MAX-1, /* last */
686 	};
687 	unsigned int val = 0;
688 
689 	BUG_ON(register_die_notifier(&int3_exception_nb));
690 
691 	/*
692 	 * Basically: int3_magic(&val); but really complicated :-)
693 	 *
694 	 * Stick the address of the INT3 instruction into int3_selftest_ip,
695 	 * then trigger the INT3, padded with NOPs to match a CALL instruction
696 	 * length.
697 	 */
698 	asm volatile ("1: int3; nop; nop; nop; nop\n\t"
699 		      ".pushsection .init.data,\"aw\"\n\t"
700 		      ".align " __ASM_SEL(4, 8) "\n\t"
701 		      ".type int3_selftest_ip, @object\n\t"
702 		      ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
703 		      "int3_selftest_ip:\n\t"
704 		      __ASM_SEL(.long, .quad) " 1b\n\t"
705 		      ".popsection\n\t"
706 		      : ASM_CALL_CONSTRAINT
707 		      : __ASM_SEL_RAW(a, D) (&val)
708 		      : "memory");
709 
710 	BUG_ON(val != 1);
711 
712 	unregister_die_notifier(&int3_exception_nb);
713 }
714 
715 void __init alternative_instructions(void)
716 {
717 	int3_selftest();
718 
719 	/*
720 	 * The patching is not fully atomic, so try to avoid local
721 	 * interruptions that might execute the to be patched code.
722 	 * Other CPUs are not running.
723 	 */
724 	stop_nmi();
725 
726 	/*
727 	 * Don't stop machine check exceptions while patching.
728 	 * MCEs only happen when something got corrupted and in this
729 	 * case we must do something about the corruption.
730 	 * Ignoring it is worse than an unlikely patching race.
731 	 * Also machine checks tend to be broadcast and if one CPU
732 	 * goes into machine check the others follow quickly, so we don't
733 	 * expect a machine check to cause undue problems during to code
734 	 * patching.
735 	 */
736 
737 	/*
738 	 * Paravirt patching and alternative patching can be combined to
739 	 * replace a function call with a short direct code sequence (e.g.
740 	 * by setting a constant return value instead of doing that in an
741 	 * external function).
742 	 * In order to make this work the following sequence is required:
743 	 * 1. set (artificial) features depending on used paravirt
744 	 *    functions which can later influence alternative patching
745 	 * 2. apply paravirt patching (generally replacing an indirect
746 	 *    function call with a direct one)
747 	 * 3. apply alternative patching (e.g. replacing a direct function
748 	 *    call with a custom code sequence)
749 	 * Doing paravirt patching after alternative patching would clobber
750 	 * the optimization of the custom code with a function call again.
751 	 */
752 	paravirt_set_cap();
753 
754 	/*
755 	 * First patch paravirt functions, such that we overwrite the indirect
756 	 * call with the direct call.
757 	 */
758 	apply_paravirt(__parainstructions, __parainstructions_end);
759 
760 	/*
761 	 * Then patch alternatives, such that those paravirt calls that are in
762 	 * alternatives can be overwritten by their immediate fragments.
763 	 */
764 	apply_alternatives(__alt_instructions, __alt_instructions_end);
765 
766 #ifdef CONFIG_SMP
767 	/* Patch to UP if other cpus not imminent. */
768 	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
769 		uniproc_patched = true;
770 		alternatives_smp_module_add(NULL, "core kernel",
771 					    __smp_locks, __smp_locks_end,
772 					    _text, _etext);
773 	}
774 
775 	if (!uniproc_patched || num_possible_cpus() == 1) {
776 		free_init_pages("SMP alternatives",
777 				(unsigned long)__smp_locks,
778 				(unsigned long)__smp_locks_end);
779 	}
780 #endif
781 
782 	restart_nmi();
783 	alternatives_patched = 1;
784 }
785 
786 /**
787  * text_poke_early - Update instructions on a live kernel at boot time
788  * @addr: address to modify
789  * @opcode: source of the copy
790  * @len: length to copy
791  *
792  * When you use this code to patch more than one byte of an instruction
793  * you need to make sure that other CPUs cannot execute this code in parallel.
794  * Also no thread must be currently preempted in the middle of these
795  * instructions. And on the local CPU you need to be protected against NMI or
796  * MCE handlers seeing an inconsistent instruction while you patch.
797  */
798 void __init_or_module text_poke_early(void *addr, const void *opcode,
799 				      size_t len)
800 {
801 	unsigned long flags;
802 
803 	if (boot_cpu_has(X86_FEATURE_NX) &&
804 	    is_module_text_address((unsigned long)addr)) {
805 		/*
806 		 * Modules text is marked initially as non-executable, so the
807 		 * code cannot be running and speculative code-fetches are
808 		 * prevented. Just change the code.
809 		 */
810 		memcpy(addr, opcode, len);
811 	} else {
812 		local_irq_save(flags);
813 		memcpy(addr, opcode, len);
814 		local_irq_restore(flags);
815 		sync_core();
816 
817 		/*
818 		 * Could also do a CLFLUSH here to speed up CPU recovery; but
819 		 * that causes hangs on some VIA CPUs.
820 		 */
821 	}
822 }
823 
824 typedef struct {
825 	struct mm_struct *mm;
826 } temp_mm_state_t;
827 
828 /*
829  * Using a temporary mm allows to set temporary mappings that are not accessible
830  * by other CPUs. Such mappings are needed to perform sensitive memory writes
831  * that override the kernel memory protections (e.g., W^X), without exposing the
832  * temporary page-table mappings that are required for these write operations to
833  * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
834  * mapping is torn down.
835  *
836  * Context: The temporary mm needs to be used exclusively by a single core. To
837  *          harden security IRQs must be disabled while the temporary mm is
838  *          loaded, thereby preventing interrupt handler bugs from overriding
839  *          the kernel memory protection.
840  */
841 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
842 {
843 	temp_mm_state_t temp_state;
844 
845 	lockdep_assert_irqs_disabled();
846 
847 	/*
848 	 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
849 	 * with a stale address space WITHOUT being in lazy mode after
850 	 * restoring the previous mm.
851 	 */
852 	if (this_cpu_read(cpu_tlbstate.is_lazy))
853 		leave_mm(smp_processor_id());
854 
855 	temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
856 	switch_mm_irqs_off(NULL, mm, current);
857 
858 	/*
859 	 * If breakpoints are enabled, disable them while the temporary mm is
860 	 * used. Userspace might set up watchpoints on addresses that are used
861 	 * in the temporary mm, which would lead to wrong signals being sent or
862 	 * crashes.
863 	 *
864 	 * Note that breakpoints are not disabled selectively, which also causes
865 	 * kernel breakpoints (e.g., perf's) to be disabled. This might be
866 	 * undesirable, but still seems reasonable as the code that runs in the
867 	 * temporary mm should be short.
868 	 */
869 	if (hw_breakpoint_active())
870 		hw_breakpoint_disable();
871 
872 	return temp_state;
873 }
874 
875 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
876 {
877 	lockdep_assert_irqs_disabled();
878 	switch_mm_irqs_off(NULL, prev_state.mm, current);
879 
880 	/*
881 	 * Restore the breakpoints if they were disabled before the temporary mm
882 	 * was loaded.
883 	 */
884 	if (hw_breakpoint_active())
885 		hw_breakpoint_restore();
886 }
887 
888 __ro_after_init struct mm_struct *poking_mm;
889 __ro_after_init unsigned long poking_addr;
890 
891 static void *__text_poke(void *addr, const void *opcode, size_t len)
892 {
893 	bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
894 	struct page *pages[2] = {NULL};
895 	temp_mm_state_t prev;
896 	unsigned long flags;
897 	pte_t pte, *ptep;
898 	spinlock_t *ptl;
899 	pgprot_t pgprot;
900 
901 	/*
902 	 * While boot memory allocator is running we cannot use struct pages as
903 	 * they are not yet initialized. There is no way to recover.
904 	 */
905 	BUG_ON(!after_bootmem);
906 
907 	if (!core_kernel_text((unsigned long)addr)) {
908 		pages[0] = vmalloc_to_page(addr);
909 		if (cross_page_boundary)
910 			pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
911 	} else {
912 		pages[0] = virt_to_page(addr);
913 		WARN_ON(!PageReserved(pages[0]));
914 		if (cross_page_boundary)
915 			pages[1] = virt_to_page(addr + PAGE_SIZE);
916 	}
917 	/*
918 	 * If something went wrong, crash and burn since recovery paths are not
919 	 * implemented.
920 	 */
921 	BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
922 
923 	/*
924 	 * Map the page without the global bit, as TLB flushing is done with
925 	 * flush_tlb_mm_range(), which is intended for non-global PTEs.
926 	 */
927 	pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
928 
929 	/*
930 	 * The lock is not really needed, but this allows to avoid open-coding.
931 	 */
932 	ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
933 
934 	/*
935 	 * This must not fail; preallocated in poking_init().
936 	 */
937 	VM_BUG_ON(!ptep);
938 
939 	local_irq_save(flags);
940 
941 	pte = mk_pte(pages[0], pgprot);
942 	set_pte_at(poking_mm, poking_addr, ptep, pte);
943 
944 	if (cross_page_boundary) {
945 		pte = mk_pte(pages[1], pgprot);
946 		set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
947 	}
948 
949 	/*
950 	 * Loading the temporary mm behaves as a compiler barrier, which
951 	 * guarantees that the PTE will be set at the time memcpy() is done.
952 	 */
953 	prev = use_temporary_mm(poking_mm);
954 
955 	kasan_disable_current();
956 	memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
957 	kasan_enable_current();
958 
959 	/*
960 	 * Ensure that the PTE is only cleared after the instructions of memcpy
961 	 * were issued by using a compiler barrier.
962 	 */
963 	barrier();
964 
965 	pte_clear(poking_mm, poking_addr, ptep);
966 	if (cross_page_boundary)
967 		pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
968 
969 	/*
970 	 * Loading the previous page-table hierarchy requires a serializing
971 	 * instruction that already allows the core to see the updated version.
972 	 * Xen-PV is assumed to serialize execution in a similar manner.
973 	 */
974 	unuse_temporary_mm(prev);
975 
976 	/*
977 	 * Flushing the TLB might involve IPIs, which would require enabled
978 	 * IRQs, but not if the mm is not used, as it is in this point.
979 	 */
980 	flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
981 			   (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
982 			   PAGE_SHIFT, false);
983 
984 	/*
985 	 * If the text does not match what we just wrote then something is
986 	 * fundamentally screwy; there's nothing we can really do about that.
987 	 */
988 	BUG_ON(memcmp(addr, opcode, len));
989 
990 	local_irq_restore(flags);
991 	pte_unmap_unlock(ptep, ptl);
992 	return addr;
993 }
994 
995 /**
996  * text_poke - Update instructions on a live kernel
997  * @addr: address to modify
998  * @opcode: source of the copy
999  * @len: length to copy
1000  *
1001  * Only atomic text poke/set should be allowed when not doing early patching.
1002  * It means the size must be writable atomically and the address must be aligned
1003  * in a way that permits an atomic write. It also makes sure we fit on a single
1004  * page.
1005  *
1006  * Note that the caller must ensure that if the modified code is part of a
1007  * module, the module would not be removed during poking. This can be achieved
1008  * by registering a module notifier, and ordering module removal and patching
1009  * trough a mutex.
1010  */
1011 void *text_poke(void *addr, const void *opcode, size_t len)
1012 {
1013 	lockdep_assert_held(&text_mutex);
1014 
1015 	return __text_poke(addr, opcode, len);
1016 }
1017 
1018 /**
1019  * text_poke_kgdb - Update instructions on a live kernel by kgdb
1020  * @addr: address to modify
1021  * @opcode: source of the copy
1022  * @len: length to copy
1023  *
1024  * Only atomic text poke/set should be allowed when not doing early patching.
1025  * It means the size must be writable atomically and the address must be aligned
1026  * in a way that permits an atomic write. It also makes sure we fit on a single
1027  * page.
1028  *
1029  * Context: should only be used by kgdb, which ensures no other core is running,
1030  *	    despite the fact it does not hold the text_mutex.
1031  */
1032 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
1033 {
1034 	return __text_poke(addr, opcode, len);
1035 }
1036 
1037 static void do_sync_core(void *info)
1038 {
1039 	sync_core();
1040 }
1041 
1042 void text_poke_sync(void)
1043 {
1044 	on_each_cpu(do_sync_core, NULL, 1);
1045 }
1046 
1047 struct text_poke_loc {
1048 	s32 rel_addr; /* addr := _stext + rel_addr */
1049 	s32 rel32;
1050 	u8 opcode;
1051 	const u8 text[POKE_MAX_OPCODE_SIZE];
1052 	u8 old;
1053 };
1054 
1055 struct bp_patching_desc {
1056 	struct text_poke_loc *vec;
1057 	int nr_entries;
1058 	atomic_t refs;
1059 };
1060 
1061 static struct bp_patching_desc *bp_desc;
1062 
1063 static __always_inline
1064 struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
1065 {
1066 	struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
1067 
1068 	if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
1069 		return NULL;
1070 
1071 	return desc;
1072 }
1073 
1074 static __always_inline void put_desc(struct bp_patching_desc *desc)
1075 {
1076 	smp_mb__before_atomic();
1077 	arch_atomic_dec(&desc->refs);
1078 }
1079 
1080 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
1081 {
1082 	return _stext + tp->rel_addr;
1083 }
1084 
1085 static __always_inline int patch_cmp(const void *key, const void *elt)
1086 {
1087 	struct text_poke_loc *tp = (struct text_poke_loc *) elt;
1088 
1089 	if (key < text_poke_addr(tp))
1090 		return -1;
1091 	if (key > text_poke_addr(tp))
1092 		return 1;
1093 	return 0;
1094 }
1095 
1096 noinstr int poke_int3_handler(struct pt_regs *regs)
1097 {
1098 	struct bp_patching_desc *desc;
1099 	struct text_poke_loc *tp;
1100 	int len, ret = 0;
1101 	void *ip;
1102 
1103 	if (user_mode(regs))
1104 		return 0;
1105 
1106 	/*
1107 	 * Having observed our INT3 instruction, we now must observe
1108 	 * bp_desc:
1109 	 *
1110 	 *	bp_desc = desc			INT3
1111 	 *	WMB				RMB
1112 	 *	write INT3			if (desc)
1113 	 */
1114 	smp_rmb();
1115 
1116 	desc = try_get_desc(&bp_desc);
1117 	if (!desc)
1118 		return 0;
1119 
1120 	/*
1121 	 * Discount the INT3. See text_poke_bp_batch().
1122 	 */
1123 	ip = (void *) regs->ip - INT3_INSN_SIZE;
1124 
1125 	/*
1126 	 * Skip the binary search if there is a single member in the vector.
1127 	 */
1128 	if (unlikely(desc->nr_entries > 1)) {
1129 		tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1130 				      sizeof(struct text_poke_loc),
1131 				      patch_cmp);
1132 		if (!tp)
1133 			goto out_put;
1134 	} else {
1135 		tp = desc->vec;
1136 		if (text_poke_addr(tp) != ip)
1137 			goto out_put;
1138 	}
1139 
1140 	len = text_opcode_size(tp->opcode);
1141 	ip += len;
1142 
1143 	switch (tp->opcode) {
1144 	case INT3_INSN_OPCODE:
1145 		/*
1146 		 * Someone poked an explicit INT3, they'll want to handle it,
1147 		 * do not consume.
1148 		 */
1149 		goto out_put;
1150 
1151 	case RET_INSN_OPCODE:
1152 		int3_emulate_ret(regs);
1153 		break;
1154 
1155 	case CALL_INSN_OPCODE:
1156 		int3_emulate_call(regs, (long)ip + tp->rel32);
1157 		break;
1158 
1159 	case JMP32_INSN_OPCODE:
1160 	case JMP8_INSN_OPCODE:
1161 		int3_emulate_jmp(regs, (long)ip + tp->rel32);
1162 		break;
1163 
1164 	default:
1165 		BUG();
1166 	}
1167 
1168 	ret = 1;
1169 
1170 out_put:
1171 	put_desc(desc);
1172 	return ret;
1173 }
1174 
1175 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1176 static struct text_poke_loc tp_vec[TP_VEC_MAX];
1177 static int tp_vec_nr;
1178 
1179 /**
1180  * text_poke_bp_batch() -- update instructions on live kernel on SMP
1181  * @tp:			vector of instructions to patch
1182  * @nr_entries:		number of entries in the vector
1183  *
1184  * Modify multi-byte instruction by using int3 breakpoint on SMP.
1185  * We completely avoid stop_machine() here, and achieve the
1186  * synchronization using int3 breakpoint.
1187  *
1188  * The way it is done:
1189  *	- For each entry in the vector:
1190  *		- add a int3 trap to the address that will be patched
1191  *	- sync cores
1192  *	- For each entry in the vector:
1193  *		- update all but the first byte of the patched range
1194  *	- sync cores
1195  *	- For each entry in the vector:
1196  *		- replace the first byte (int3) by the first byte of
1197  *		  replacing opcode
1198  *	- sync cores
1199  */
1200 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1201 {
1202 	struct bp_patching_desc desc = {
1203 		.vec = tp,
1204 		.nr_entries = nr_entries,
1205 		.refs = ATOMIC_INIT(1),
1206 	};
1207 	unsigned char int3 = INT3_INSN_OPCODE;
1208 	unsigned int i;
1209 	int do_sync;
1210 
1211 	lockdep_assert_held(&text_mutex);
1212 
1213 	smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
1214 
1215 	/*
1216 	 * Corresponding read barrier in int3 notifier for making sure the
1217 	 * nr_entries and handler are correctly ordered wrt. patching.
1218 	 */
1219 	smp_wmb();
1220 
1221 	/*
1222 	 * First step: add a int3 trap to the address that will be patched.
1223 	 */
1224 	for (i = 0; i < nr_entries; i++) {
1225 		tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
1226 		text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1227 	}
1228 
1229 	text_poke_sync();
1230 
1231 	/*
1232 	 * Second step: update all but the first byte of the patched range.
1233 	 */
1234 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
1235 		u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
1236 		int len = text_opcode_size(tp[i].opcode);
1237 
1238 		if (len - INT3_INSN_SIZE > 0) {
1239 			memcpy(old + INT3_INSN_SIZE,
1240 			       text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1241 			       len - INT3_INSN_SIZE);
1242 			text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1243 				  (const char *)tp[i].text + INT3_INSN_SIZE,
1244 				  len - INT3_INSN_SIZE);
1245 			do_sync++;
1246 		}
1247 
1248 		/*
1249 		 * Emit a perf event to record the text poke, primarily to
1250 		 * support Intel PT decoding which must walk the executable code
1251 		 * to reconstruct the trace. The flow up to here is:
1252 		 *   - write INT3 byte
1253 		 *   - IPI-SYNC
1254 		 *   - write instruction tail
1255 		 * At this point the actual control flow will be through the
1256 		 * INT3 and handler and not hit the old or new instruction.
1257 		 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1258 		 * can still be decoded. Subsequently:
1259 		 *   - emit RECORD_TEXT_POKE with the new instruction
1260 		 *   - IPI-SYNC
1261 		 *   - write first byte
1262 		 *   - IPI-SYNC
1263 		 * So before the text poke event timestamp, the decoder will see
1264 		 * either the old instruction flow or FUP/TIP of INT3. After the
1265 		 * text poke event timestamp, the decoder will see either the
1266 		 * new instruction flow or FUP/TIP of INT3. Thus decoders can
1267 		 * use the timestamp as the point at which to modify the
1268 		 * executable code.
1269 		 * The old instruction is recorded so that the event can be
1270 		 * processed forwards or backwards.
1271 		 */
1272 		perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
1273 				     tp[i].text, len);
1274 	}
1275 
1276 	if (do_sync) {
1277 		/*
1278 		 * According to Intel, this core syncing is very likely
1279 		 * not necessary and we'd be safe even without it. But
1280 		 * better safe than sorry (plus there's not only Intel).
1281 		 */
1282 		text_poke_sync();
1283 	}
1284 
1285 	/*
1286 	 * Third step: replace the first byte (int3) by the first byte of
1287 	 * replacing opcode.
1288 	 */
1289 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
1290 		if (tp[i].text[0] == INT3_INSN_OPCODE)
1291 			continue;
1292 
1293 		text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
1294 		do_sync++;
1295 	}
1296 
1297 	if (do_sync)
1298 		text_poke_sync();
1299 
1300 	/*
1301 	 * Remove and synchronize_rcu(), except we have a very primitive
1302 	 * refcount based completion.
1303 	 */
1304 	WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
1305 	if (!atomic_dec_and_test(&desc.refs))
1306 		atomic_cond_read_acquire(&desc.refs, !VAL);
1307 }
1308 
1309 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1310 			       const void *opcode, size_t len, const void *emulate)
1311 {
1312 	struct insn insn;
1313 
1314 	memcpy((void *)tp->text, opcode, len);
1315 	if (!emulate)
1316 		emulate = opcode;
1317 
1318 	kernel_insn_init(&insn, emulate, MAX_INSN_SIZE);
1319 	insn_get_length(&insn);
1320 
1321 	BUG_ON(!insn_complete(&insn));
1322 	BUG_ON(len != insn.length);
1323 
1324 	tp->rel_addr = addr - (void *)_stext;
1325 	tp->opcode = insn.opcode.bytes[0];
1326 
1327 	switch (tp->opcode) {
1328 	case INT3_INSN_OPCODE:
1329 	case RET_INSN_OPCODE:
1330 		break;
1331 
1332 	case CALL_INSN_OPCODE:
1333 	case JMP32_INSN_OPCODE:
1334 	case JMP8_INSN_OPCODE:
1335 		tp->rel32 = insn.immediate.value;
1336 		break;
1337 
1338 	default: /* assume NOP */
1339 		switch (len) {
1340 		case 2: /* NOP2 -- emulate as JMP8+0 */
1341 			BUG_ON(memcmp(emulate, ideal_nops[len], len));
1342 			tp->opcode = JMP8_INSN_OPCODE;
1343 			tp->rel32 = 0;
1344 			break;
1345 
1346 		case 5: /* NOP5 -- emulate as JMP32+0 */
1347 			BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
1348 			tp->opcode = JMP32_INSN_OPCODE;
1349 			tp->rel32 = 0;
1350 			break;
1351 
1352 		default: /* unknown instruction */
1353 			BUG();
1354 		}
1355 		break;
1356 	}
1357 }
1358 
1359 /*
1360  * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1361  * early if needed.
1362  */
1363 static bool tp_order_fail(void *addr)
1364 {
1365 	struct text_poke_loc *tp;
1366 
1367 	if (!tp_vec_nr)
1368 		return false;
1369 
1370 	if (!addr) /* force */
1371 		return true;
1372 
1373 	tp = &tp_vec[tp_vec_nr - 1];
1374 	if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
1375 		return true;
1376 
1377 	return false;
1378 }
1379 
1380 static void text_poke_flush(void *addr)
1381 {
1382 	if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1383 		text_poke_bp_batch(tp_vec, tp_vec_nr);
1384 		tp_vec_nr = 0;
1385 	}
1386 }
1387 
1388 void text_poke_finish(void)
1389 {
1390 	text_poke_flush(NULL);
1391 }
1392 
1393 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
1394 {
1395 	struct text_poke_loc *tp;
1396 
1397 	if (unlikely(system_state == SYSTEM_BOOTING)) {
1398 		text_poke_early(addr, opcode, len);
1399 		return;
1400 	}
1401 
1402 	text_poke_flush(addr);
1403 
1404 	tp = &tp_vec[tp_vec_nr++];
1405 	text_poke_loc_init(tp, addr, opcode, len, emulate);
1406 }
1407 
1408 /**
1409  * text_poke_bp() -- update instructions on live kernel on SMP
1410  * @addr:	address to patch
1411  * @opcode:	opcode of new instruction
1412  * @len:	length to copy
1413  * @emulate:	instruction to be emulated
1414  *
1415  * Update a single instruction with the vector in the stack, avoiding
1416  * dynamically allocated memory. This function should be used when it is
1417  * not possible to allocate memory.
1418  */
1419 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
1420 {
1421 	struct text_poke_loc tp;
1422 
1423 	if (unlikely(system_state == SYSTEM_BOOTING)) {
1424 		text_poke_early(addr, opcode, len);
1425 		return;
1426 	}
1427 
1428 	text_poke_loc_init(&tp, addr, opcode, len, emulate);
1429 	text_poke_bp_batch(&tp, 1);
1430 }
1431