xref: /linux/arch/powerpc/kernel/kvm.c (revision ec63e2a4897075e427c121d863bd89c44578094f)
1 /*
2  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3  * Copyright 2010-2011 Freescale Semiconductor, Inc.
4  *
5  * Authors:
6  *     Alexander Graf <agraf@suse.de>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License, version 2, as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
20  */
21 
22 #include <linux/kvm_host.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/kmemleak.h>
26 #include <linux/kvm_para.h>
27 #include <linux/slab.h>
28 #include <linux/of.h>
29 #include <linux/pagemap.h>
30 
31 #include <asm/reg.h>
32 #include <asm/sections.h>
33 #include <asm/cacheflush.h>
34 #include <asm/disassemble.h>
35 #include <asm/ppc-opcode.h>
36 #include <asm/epapr_hcalls.h>
37 
38 #define KVM_MAGIC_PAGE		(-4096L)
39 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
40 
41 #define KVM_INST_LWZ		0x80000000
42 #define KVM_INST_STW		0x90000000
43 #define KVM_INST_LD		0xe8000000
44 #define KVM_INST_STD		0xf8000000
45 #define KVM_INST_NOP		0x60000000
46 #define KVM_INST_B		0x48000000
47 #define KVM_INST_B_MASK		0x03ffffff
48 #define KVM_INST_B_MAX		0x01ffffff
49 #define KVM_INST_LI		0x38000000
50 
51 #define KVM_MASK_RT		0x03e00000
52 #define KVM_RT_30		0x03c00000
53 #define KVM_MASK_RB		0x0000f800
54 #define KVM_INST_MFMSR		0x7c0000a6
55 
56 #define SPR_FROM		0
57 #define SPR_TO			0x100
58 
59 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
60 				    (((sprn) & 0x1f) << 16) | \
61 				    (((sprn) & 0x3e0) << 6) | \
62 				    (moveto))
63 
64 #define KVM_INST_MFSPR(sprn)	KVM_INST_SPR(sprn, SPR_FROM)
65 #define KVM_INST_MTSPR(sprn)	KVM_INST_SPR(sprn, SPR_TO)
66 
67 #define KVM_INST_TLBSYNC	0x7c00046c
68 #define KVM_INST_MTMSRD_L0	0x7c000164
69 #define KVM_INST_MTMSRD_L1	0x7c010164
70 #define KVM_INST_MTMSR		0x7c000124
71 
72 #define KVM_INST_WRTEE		0x7c000106
73 #define KVM_INST_WRTEEI_0	0x7c000146
74 #define KVM_INST_WRTEEI_1	0x7c008146
75 
76 #define KVM_INST_MTSRIN		0x7c0001e4
77 
78 static bool kvm_patching_worked = true;
79 char kvm_tmp[1024 * 1024];
80 static int kvm_tmp_index;
81 
82 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
83 {
84 	*inst = new_inst;
85 	flush_icache_range((ulong)inst, (ulong)inst + 4);
86 }
87 
88 static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
89 {
90 #ifdef CONFIG_64BIT
91 	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
92 #else
93 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
94 #endif
95 }
96 
97 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
98 {
99 #ifdef CONFIG_64BIT
100 	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
101 #else
102 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
103 #endif
104 }
105 
106 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
107 {
108 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
109 }
110 
111 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
112 {
113 #ifdef CONFIG_64BIT
114 	kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
115 #else
116 	kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
117 #endif
118 }
119 
120 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
121 {
122 	kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
123 }
124 
125 static void kvm_patch_ins_nop(u32 *inst)
126 {
127 	kvm_patch_ins(inst, KVM_INST_NOP);
128 }
129 
130 static void kvm_patch_ins_b(u32 *inst, int addr)
131 {
132 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
133 	/* On relocatable kernels interrupts handlers and our code
134 	   can be in different regions, so we don't patch them */
135 
136 	if ((ulong)inst < (ulong)&__end_interrupts)
137 		return;
138 #endif
139 
140 	kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
141 }
142 
143 static u32 *kvm_alloc(int len)
144 {
145 	u32 *p;
146 
147 	if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
148 		printk(KERN_ERR "KVM: No more space (%d + %d)\n",
149 				kvm_tmp_index, len);
150 		kvm_patching_worked = false;
151 		return NULL;
152 	}
153 
154 	p = (void*)&kvm_tmp[kvm_tmp_index];
155 	kvm_tmp_index += len;
156 
157 	return p;
158 }
159 
160 extern u32 kvm_emulate_mtmsrd_branch_offs;
161 extern u32 kvm_emulate_mtmsrd_reg_offs;
162 extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
163 extern u32 kvm_emulate_mtmsrd_len;
164 extern u32 kvm_emulate_mtmsrd[];
165 
166 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
167 {
168 	u32 *p;
169 	int distance_start;
170 	int distance_end;
171 	ulong next_inst;
172 
173 	p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
174 	if (!p)
175 		return;
176 
177 	/* Find out where we are and put everything there */
178 	distance_start = (ulong)p - (ulong)inst;
179 	next_inst = ((ulong)inst + 4);
180 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
181 
182 	/* Make sure we only write valid b instructions */
183 	if (distance_start > KVM_INST_B_MAX) {
184 		kvm_patching_worked = false;
185 		return;
186 	}
187 
188 	/* Modify the chunk to fit the invocation */
189 	memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
190 	p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
191 	switch (get_rt(rt)) {
192 	case 30:
193 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
194 				 magic_var(scratch2), KVM_RT_30);
195 		break;
196 	case 31:
197 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
198 				 magic_var(scratch1), KVM_RT_30);
199 		break;
200 	default:
201 		p[kvm_emulate_mtmsrd_reg_offs] |= rt;
202 		break;
203 	}
204 
205 	p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
206 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
207 
208 	/* Patch the invocation */
209 	kvm_patch_ins_b(inst, distance_start);
210 }
211 
212 extern u32 kvm_emulate_mtmsr_branch_offs;
213 extern u32 kvm_emulate_mtmsr_reg1_offs;
214 extern u32 kvm_emulate_mtmsr_reg2_offs;
215 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
216 extern u32 kvm_emulate_mtmsr_len;
217 extern u32 kvm_emulate_mtmsr[];
218 
219 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
220 {
221 	u32 *p;
222 	int distance_start;
223 	int distance_end;
224 	ulong next_inst;
225 
226 	p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
227 	if (!p)
228 		return;
229 
230 	/* Find out where we are and put everything there */
231 	distance_start = (ulong)p - (ulong)inst;
232 	next_inst = ((ulong)inst + 4);
233 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
234 
235 	/* Make sure we only write valid b instructions */
236 	if (distance_start > KVM_INST_B_MAX) {
237 		kvm_patching_worked = false;
238 		return;
239 	}
240 
241 	/* Modify the chunk to fit the invocation */
242 	memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
243 	p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
244 
245 	/* Make clobbered registers work too */
246 	switch (get_rt(rt)) {
247 	case 30:
248 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
249 				 magic_var(scratch2), KVM_RT_30);
250 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
251 				 magic_var(scratch2), KVM_RT_30);
252 		break;
253 	case 31:
254 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
255 				 magic_var(scratch1), KVM_RT_30);
256 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
257 				 magic_var(scratch1), KVM_RT_30);
258 		break;
259 	default:
260 		p[kvm_emulate_mtmsr_reg1_offs] |= rt;
261 		p[kvm_emulate_mtmsr_reg2_offs] |= rt;
262 		break;
263 	}
264 
265 	p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
266 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
267 
268 	/* Patch the invocation */
269 	kvm_patch_ins_b(inst, distance_start);
270 }
271 
272 #ifdef CONFIG_BOOKE
273 
274 extern u32 kvm_emulate_wrtee_branch_offs;
275 extern u32 kvm_emulate_wrtee_reg_offs;
276 extern u32 kvm_emulate_wrtee_orig_ins_offs;
277 extern u32 kvm_emulate_wrtee_len;
278 extern u32 kvm_emulate_wrtee[];
279 
280 static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
281 {
282 	u32 *p;
283 	int distance_start;
284 	int distance_end;
285 	ulong next_inst;
286 
287 	p = kvm_alloc(kvm_emulate_wrtee_len * 4);
288 	if (!p)
289 		return;
290 
291 	/* Find out where we are and put everything there */
292 	distance_start = (ulong)p - (ulong)inst;
293 	next_inst = ((ulong)inst + 4);
294 	distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
295 
296 	/* Make sure we only write valid b instructions */
297 	if (distance_start > KVM_INST_B_MAX) {
298 		kvm_patching_worked = false;
299 		return;
300 	}
301 
302 	/* Modify the chunk to fit the invocation */
303 	memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
304 	p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
305 
306 	if (imm_one) {
307 		p[kvm_emulate_wrtee_reg_offs] =
308 			KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
309 	} else {
310 		/* Make clobbered registers work too */
311 		switch (get_rt(rt)) {
312 		case 30:
313 			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
314 					 magic_var(scratch2), KVM_RT_30);
315 			break;
316 		case 31:
317 			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
318 					 magic_var(scratch1), KVM_RT_30);
319 			break;
320 		default:
321 			p[kvm_emulate_wrtee_reg_offs] |= rt;
322 			break;
323 		}
324 	}
325 
326 	p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
327 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
328 
329 	/* Patch the invocation */
330 	kvm_patch_ins_b(inst, distance_start);
331 }
332 
333 extern u32 kvm_emulate_wrteei_0_branch_offs;
334 extern u32 kvm_emulate_wrteei_0_len;
335 extern u32 kvm_emulate_wrteei_0[];
336 
337 static void kvm_patch_ins_wrteei_0(u32 *inst)
338 {
339 	u32 *p;
340 	int distance_start;
341 	int distance_end;
342 	ulong next_inst;
343 
344 	p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
345 	if (!p)
346 		return;
347 
348 	/* Find out where we are and put everything there */
349 	distance_start = (ulong)p - (ulong)inst;
350 	next_inst = ((ulong)inst + 4);
351 	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
352 
353 	/* Make sure we only write valid b instructions */
354 	if (distance_start > KVM_INST_B_MAX) {
355 		kvm_patching_worked = false;
356 		return;
357 	}
358 
359 	memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
360 	p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
361 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
362 
363 	/* Patch the invocation */
364 	kvm_patch_ins_b(inst, distance_start);
365 }
366 
367 #endif
368 
369 #ifdef CONFIG_PPC_BOOK3S_32
370 
371 extern u32 kvm_emulate_mtsrin_branch_offs;
372 extern u32 kvm_emulate_mtsrin_reg1_offs;
373 extern u32 kvm_emulate_mtsrin_reg2_offs;
374 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
375 extern u32 kvm_emulate_mtsrin_len;
376 extern u32 kvm_emulate_mtsrin[];
377 
378 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
379 {
380 	u32 *p;
381 	int distance_start;
382 	int distance_end;
383 	ulong next_inst;
384 
385 	p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
386 	if (!p)
387 		return;
388 
389 	/* Find out where we are and put everything there */
390 	distance_start = (ulong)p - (ulong)inst;
391 	next_inst = ((ulong)inst + 4);
392 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
393 
394 	/* Make sure we only write valid b instructions */
395 	if (distance_start > KVM_INST_B_MAX) {
396 		kvm_patching_worked = false;
397 		return;
398 	}
399 
400 	/* Modify the chunk to fit the invocation */
401 	memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
402 	p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
403 	p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
404 	p[kvm_emulate_mtsrin_reg2_offs] |= rt;
405 	p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
406 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
407 
408 	/* Patch the invocation */
409 	kvm_patch_ins_b(inst, distance_start);
410 }
411 
412 #endif
413 
414 static void kvm_map_magic_page(void *data)
415 {
416 	u32 *features = data;
417 
418 	ulong in[8] = {0};
419 	ulong out[8];
420 
421 	in[0] = KVM_MAGIC_PAGE;
422 	in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
423 
424 	epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
425 
426 	*features = out[0];
427 }
428 
429 static void kvm_check_ins(u32 *inst, u32 features)
430 {
431 	u32 _inst = *inst;
432 	u32 inst_no_rt = _inst & ~KVM_MASK_RT;
433 	u32 inst_rt = _inst & KVM_MASK_RT;
434 
435 	switch (inst_no_rt) {
436 	/* Loads */
437 	case KVM_INST_MFMSR:
438 		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
439 		break;
440 	case KVM_INST_MFSPR(SPRN_SPRG0):
441 		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
442 		break;
443 	case KVM_INST_MFSPR(SPRN_SPRG1):
444 		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
445 		break;
446 	case KVM_INST_MFSPR(SPRN_SPRG2):
447 		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
448 		break;
449 	case KVM_INST_MFSPR(SPRN_SPRG3):
450 		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
451 		break;
452 	case KVM_INST_MFSPR(SPRN_SRR0):
453 		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
454 		break;
455 	case KVM_INST_MFSPR(SPRN_SRR1):
456 		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
457 		break;
458 #ifdef CONFIG_BOOKE
459 	case KVM_INST_MFSPR(SPRN_DEAR):
460 #else
461 	case KVM_INST_MFSPR(SPRN_DAR):
462 #endif
463 		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
464 		break;
465 	case KVM_INST_MFSPR(SPRN_DSISR):
466 		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
467 		break;
468 
469 #ifdef CONFIG_PPC_BOOK3E_MMU
470 	case KVM_INST_MFSPR(SPRN_MAS0):
471 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
472 			kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
473 		break;
474 	case KVM_INST_MFSPR(SPRN_MAS1):
475 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
476 			kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
477 		break;
478 	case KVM_INST_MFSPR(SPRN_MAS2):
479 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
480 			kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
481 		break;
482 	case KVM_INST_MFSPR(SPRN_MAS3):
483 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
484 			kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
485 		break;
486 	case KVM_INST_MFSPR(SPRN_MAS4):
487 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
488 			kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
489 		break;
490 	case KVM_INST_MFSPR(SPRN_MAS6):
491 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
492 			kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
493 		break;
494 	case KVM_INST_MFSPR(SPRN_MAS7):
495 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
496 			kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
497 		break;
498 #endif /* CONFIG_PPC_BOOK3E_MMU */
499 
500 	case KVM_INST_MFSPR(SPRN_SPRG4):
501 #ifdef CONFIG_BOOKE
502 	case KVM_INST_MFSPR(SPRN_SPRG4R):
503 #endif
504 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
505 			kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
506 		break;
507 	case KVM_INST_MFSPR(SPRN_SPRG5):
508 #ifdef CONFIG_BOOKE
509 	case KVM_INST_MFSPR(SPRN_SPRG5R):
510 #endif
511 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
512 			kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
513 		break;
514 	case KVM_INST_MFSPR(SPRN_SPRG6):
515 #ifdef CONFIG_BOOKE
516 	case KVM_INST_MFSPR(SPRN_SPRG6R):
517 #endif
518 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
519 			kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
520 		break;
521 	case KVM_INST_MFSPR(SPRN_SPRG7):
522 #ifdef CONFIG_BOOKE
523 	case KVM_INST_MFSPR(SPRN_SPRG7R):
524 #endif
525 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
526 			kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
527 		break;
528 
529 #ifdef CONFIG_BOOKE
530 	case KVM_INST_MFSPR(SPRN_ESR):
531 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
532 			kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
533 		break;
534 #endif
535 
536 	case KVM_INST_MFSPR(SPRN_PIR):
537 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
538 			kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
539 		break;
540 
541 
542 	/* Stores */
543 	case KVM_INST_MTSPR(SPRN_SPRG0):
544 		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
545 		break;
546 	case KVM_INST_MTSPR(SPRN_SPRG1):
547 		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
548 		break;
549 	case KVM_INST_MTSPR(SPRN_SPRG2):
550 		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
551 		break;
552 	case KVM_INST_MTSPR(SPRN_SPRG3):
553 		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
554 		break;
555 	case KVM_INST_MTSPR(SPRN_SRR0):
556 		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
557 		break;
558 	case KVM_INST_MTSPR(SPRN_SRR1):
559 		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
560 		break;
561 #ifdef CONFIG_BOOKE
562 	case KVM_INST_MTSPR(SPRN_DEAR):
563 #else
564 	case KVM_INST_MTSPR(SPRN_DAR):
565 #endif
566 		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
567 		break;
568 	case KVM_INST_MTSPR(SPRN_DSISR):
569 		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
570 		break;
571 #ifdef CONFIG_PPC_BOOK3E_MMU
572 	case KVM_INST_MTSPR(SPRN_MAS0):
573 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
574 			kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
575 		break;
576 	case KVM_INST_MTSPR(SPRN_MAS1):
577 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
578 			kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
579 		break;
580 	case KVM_INST_MTSPR(SPRN_MAS2):
581 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
582 			kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
583 		break;
584 	case KVM_INST_MTSPR(SPRN_MAS3):
585 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
586 			kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
587 		break;
588 	case KVM_INST_MTSPR(SPRN_MAS4):
589 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
590 			kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
591 		break;
592 	case KVM_INST_MTSPR(SPRN_MAS6):
593 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
594 			kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
595 		break;
596 	case KVM_INST_MTSPR(SPRN_MAS7):
597 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
598 			kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
599 		break;
600 #endif /* CONFIG_PPC_BOOK3E_MMU */
601 
602 	case KVM_INST_MTSPR(SPRN_SPRG4):
603 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
604 			kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
605 		break;
606 	case KVM_INST_MTSPR(SPRN_SPRG5):
607 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
608 			kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
609 		break;
610 	case KVM_INST_MTSPR(SPRN_SPRG6):
611 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
612 			kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
613 		break;
614 	case KVM_INST_MTSPR(SPRN_SPRG7):
615 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
616 			kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
617 		break;
618 
619 #ifdef CONFIG_BOOKE
620 	case KVM_INST_MTSPR(SPRN_ESR):
621 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
622 			kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
623 		break;
624 #endif
625 
626 	/* Nops */
627 	case KVM_INST_TLBSYNC:
628 		kvm_patch_ins_nop(inst);
629 		break;
630 
631 	/* Rewrites */
632 	case KVM_INST_MTMSRD_L1:
633 		kvm_patch_ins_mtmsrd(inst, inst_rt);
634 		break;
635 	case KVM_INST_MTMSR:
636 	case KVM_INST_MTMSRD_L0:
637 		kvm_patch_ins_mtmsr(inst, inst_rt);
638 		break;
639 #ifdef CONFIG_BOOKE
640 	case KVM_INST_WRTEE:
641 		kvm_patch_ins_wrtee(inst, inst_rt, 0);
642 		break;
643 #endif
644 	}
645 
646 	switch (inst_no_rt & ~KVM_MASK_RB) {
647 #ifdef CONFIG_PPC_BOOK3S_32
648 	case KVM_INST_MTSRIN:
649 		if (features & KVM_MAGIC_FEAT_SR) {
650 			u32 inst_rb = _inst & KVM_MASK_RB;
651 			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
652 		}
653 		break;
654 #endif
655 	}
656 
657 	switch (_inst) {
658 #ifdef CONFIG_BOOKE
659 	case KVM_INST_WRTEEI_0:
660 		kvm_patch_ins_wrteei_0(inst);
661 		break;
662 
663 	case KVM_INST_WRTEEI_1:
664 		kvm_patch_ins_wrtee(inst, 0, 1);
665 		break;
666 #endif
667 	}
668 }
669 
670 extern u32 kvm_template_start[];
671 extern u32 kvm_template_end[];
672 
673 static void kvm_use_magic_page(void)
674 {
675 	u32 *p;
676 	u32 *start, *end;
677 	u32 features;
678 
679 	/* Tell the host to map the magic page to -4096 on all CPUs */
680 	on_each_cpu(kvm_map_magic_page, &features, 1);
681 
682 	/* Quick self-test to see if the mapping works */
683 	if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
684 		kvm_patching_worked = false;
685 		return;
686 	}
687 
688 	/* Now loop through all code and find instructions */
689 	start = (void*)_stext;
690 	end = (void*)_etext;
691 
692 	/*
693 	 * Being interrupted in the middle of patching would
694 	 * be bad for SPRG4-7, which KVM can't keep in sync
695 	 * with emulated accesses because reads don't trap.
696 	 */
697 	local_irq_disable();
698 
699 	for (p = start; p < end; p++) {
700 		/* Avoid patching the template code */
701 		if (p >= kvm_template_start && p < kvm_template_end) {
702 			p = kvm_template_end - 1;
703 			continue;
704 		}
705 		kvm_check_ins(p, features);
706 	}
707 
708 	local_irq_enable();
709 
710 	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
711 			 kvm_patching_worked ? "worked" : "failed");
712 }
713 
714 static __init void kvm_free_tmp(void)
715 {
716 	/*
717 	 * Inform kmemleak about the hole in the .bss section since the
718 	 * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
719 	 */
720 	kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
721 			   ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
722 	free_reserved_area(&kvm_tmp[kvm_tmp_index],
723 			   &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
724 }
725 
726 static int __init kvm_guest_init(void)
727 {
728 	if (!kvm_para_available())
729 		goto free_tmp;
730 
731 	if (!epapr_paravirt_enabled)
732 		goto free_tmp;
733 
734 	if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
735 		kvm_use_magic_page();
736 
737 #ifdef CONFIG_PPC_BOOK3S_64
738 	/* Enable napping */
739 	powersave_nap = 1;
740 #endif
741 
742 free_tmp:
743 	kvm_free_tmp();
744 
745 	return 0;
746 }
747 
748 postcore_initcall(kvm_guest_init);
749