xref: /linux/arch/mips/kvm/tlb.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7  * TLB handlers run from KSEG0
8  *
9  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
10  * Authors: Sanjay Lal <sanjayl@kymasys.com>
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
20 
21 #include <asm/cpu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlb.h>
27 
28 #undef CONFIG_MIPS_MT
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
31 
32 #define KVM_GUEST_PC_TLB    0
33 #define KVM_GUEST_SP_TLB    1
34 
35 #define PRIx64 "llx"
36 
37 atomic_t kvm_mips_instance;
38 EXPORT_SYMBOL(kvm_mips_instance);
39 
40 /* These function pointers are initialized once the KVM module is loaded */
41 pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
42 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
43 
44 void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
45 EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
46 
47 bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
48 EXPORT_SYMBOL(kvm_mips_is_error_pfn);
49 
50 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
51 {
52 	return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
53 }
54 
55 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
56 {
57 	return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
58 }
59 
60 inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
61 {
62 	return vcpu->kvm->arch.commpage_tlb;
63 }
64 
65 /* Structure defining an tlb entry data set. */
66 
67 void kvm_mips_dump_host_tlbs(void)
68 {
69 	unsigned long old_entryhi;
70 	unsigned long old_pagemask;
71 	struct kvm_mips_tlb tlb;
72 	unsigned long flags;
73 	int i;
74 
75 	local_irq_save(flags);
76 
77 	old_entryhi = read_c0_entryhi();
78 	old_pagemask = read_c0_pagemask();
79 
80 	kvm_info("HOST TLBs:\n");
81 	kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
82 
83 	for (i = 0; i < current_cpu_data.tlbsize; i++) {
84 		write_c0_index(i);
85 		mtc0_tlbw_hazard();
86 
87 		tlb_read();
88 		tlbw_use_hazard();
89 
90 		tlb.tlb_hi = read_c0_entryhi();
91 		tlb.tlb_lo0 = read_c0_entrylo0();
92 		tlb.tlb_lo1 = read_c0_entrylo1();
93 		tlb.tlb_mask = read_c0_pagemask();
94 
95 		kvm_info("TLB%c%3d Hi 0x%08lx ",
96 			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
97 			 i, tlb.tlb_hi);
98 		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
99 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
100 			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
101 			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
102 			 (tlb.tlb_lo0 >> 3) & 7);
103 		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
104 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
105 			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
106 			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
107 			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
108 	}
109 	write_c0_entryhi(old_entryhi);
110 	write_c0_pagemask(old_pagemask);
111 	mtc0_tlbw_hazard();
112 	local_irq_restore(flags);
113 }
114 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
115 
116 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
117 {
118 	struct mips_coproc *cop0 = vcpu->arch.cop0;
119 	struct kvm_mips_tlb tlb;
120 	int i;
121 
122 	kvm_info("Guest TLBs:\n");
123 	kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
124 
125 	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
126 		tlb = vcpu->arch.guest_tlb[i];
127 		kvm_info("TLB%c%3d Hi 0x%08lx ",
128 			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
129 			 i, tlb.tlb_hi);
130 		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
131 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
132 			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
133 			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
134 			 (tlb.tlb_lo0 >> 3) & 7);
135 		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
136 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
137 			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
138 			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
139 			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
140 	}
141 }
142 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
143 
144 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
145 {
146 	int srcu_idx, err = 0;
147 	pfn_t pfn;
148 
149 	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
150 		return 0;
151 
152 	srcu_idx = srcu_read_lock(&kvm->srcu);
153 	pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
154 
155 	if (kvm_mips_is_error_pfn(pfn)) {
156 		kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
157 		err = -EFAULT;
158 		goto out;
159 	}
160 
161 	kvm->arch.guest_pmap[gfn] = pfn;
162 out:
163 	srcu_read_unlock(&kvm->srcu, srcu_idx);
164 	return err;
165 }
166 
167 /* Translate guest KSEG0 addresses to Host PA */
168 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
169 						    unsigned long gva)
170 {
171 	gfn_t gfn;
172 	uint32_t offset = gva & ~PAGE_MASK;
173 	struct kvm *kvm = vcpu->kvm;
174 
175 	if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
176 		kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
177 			__builtin_return_address(0), gva);
178 		return KVM_INVALID_PAGE;
179 	}
180 
181 	gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
182 
183 	if (gfn >= kvm->arch.guest_pmap_npages) {
184 		kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
185 			gva);
186 		return KVM_INVALID_PAGE;
187 	}
188 
189 	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
190 		return KVM_INVALID_ADDR;
191 
192 	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
193 }
194 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
195 
196 /* XXXKYMA: Must be called with interrupts disabled */
197 /* set flush_dcache_mask == 0 if no dcache flush required */
198 int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
199 			    unsigned long entrylo0, unsigned long entrylo1,
200 			    int flush_dcache_mask)
201 {
202 	unsigned long flags;
203 	unsigned long old_entryhi;
204 	int idx;
205 
206 	local_irq_save(flags);
207 
208 	old_entryhi = read_c0_entryhi();
209 	write_c0_entryhi(entryhi);
210 	mtc0_tlbw_hazard();
211 
212 	tlb_probe();
213 	tlb_probe_hazard();
214 	idx = read_c0_index();
215 
216 	if (idx > current_cpu_data.tlbsize) {
217 		kvm_err("%s: Invalid Index: %d\n", __func__, idx);
218 		kvm_mips_dump_host_tlbs();
219 		return -1;
220 	}
221 
222 	write_c0_entrylo0(entrylo0);
223 	write_c0_entrylo1(entrylo1);
224 	mtc0_tlbw_hazard();
225 
226 	if (idx < 0)
227 		tlb_write_random();
228 	else
229 		tlb_write_indexed();
230 	tlbw_use_hazard();
231 
232 	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
233 		  vcpu->arch.pc, idx, read_c0_entryhi(),
234 		  read_c0_entrylo0(), read_c0_entrylo1());
235 
236 	/* Flush D-cache */
237 	if (flush_dcache_mask) {
238 		if (entrylo0 & MIPS3_PG_V) {
239 			++vcpu->stat.flush_dcache_exits;
240 			flush_data_cache_page((entryhi & VPN2_MASK) &
241 					      ~flush_dcache_mask);
242 		}
243 		if (entrylo1 & MIPS3_PG_V) {
244 			++vcpu->stat.flush_dcache_exits;
245 			flush_data_cache_page(((entryhi & VPN2_MASK) &
246 					       ~flush_dcache_mask) |
247 					      (0x1 << PAGE_SHIFT));
248 		}
249 	}
250 
251 	/* Restore old ASID */
252 	write_c0_entryhi(old_entryhi);
253 	mtc0_tlbw_hazard();
254 	tlbw_use_hazard();
255 	local_irq_restore(flags);
256 	return 0;
257 }
258 
259 /* XXXKYMA: Must be called with interrupts disabled */
260 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
261 				    struct kvm_vcpu *vcpu)
262 {
263 	gfn_t gfn;
264 	pfn_t pfn0, pfn1;
265 	unsigned long vaddr = 0;
266 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
267 	int even;
268 	struct kvm *kvm = vcpu->kvm;
269 	const int flush_dcache_mask = 0;
270 
271 	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
272 		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
273 		kvm_mips_dump_host_tlbs();
274 		return -1;
275 	}
276 
277 	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
278 	if (gfn >= kvm->arch.guest_pmap_npages) {
279 		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
280 			gfn, badvaddr);
281 		kvm_mips_dump_host_tlbs();
282 		return -1;
283 	}
284 	even = !(gfn & 0x1);
285 	vaddr = badvaddr & (PAGE_MASK << 1);
286 
287 	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
288 		return -1;
289 
290 	if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
291 		return -1;
292 
293 	if (even) {
294 		pfn0 = kvm->arch.guest_pmap[gfn];
295 		pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
296 	} else {
297 		pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
298 		pfn1 = kvm->arch.guest_pmap[gfn];
299 	}
300 
301 	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
302 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
303 		   (1 << 2) | (0x1 << 1);
304 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
305 		   (1 << 2) | (0x1 << 1);
306 
307 	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
308 				       flush_dcache_mask);
309 }
310 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
311 
312 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
313 	struct kvm_vcpu *vcpu)
314 {
315 	pfn_t pfn0, pfn1;
316 	unsigned long flags, old_entryhi = 0, vaddr = 0;
317 	unsigned long entrylo0 = 0, entrylo1 = 0;
318 
319 	pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
320 	pfn1 = 0;
321 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
322 		   (1 << 2) | (0x1 << 1);
323 	entrylo1 = 0;
324 
325 	local_irq_save(flags);
326 
327 	old_entryhi = read_c0_entryhi();
328 	vaddr = badvaddr & (PAGE_MASK << 1);
329 	write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
330 	mtc0_tlbw_hazard();
331 	write_c0_entrylo0(entrylo0);
332 	mtc0_tlbw_hazard();
333 	write_c0_entrylo1(entrylo1);
334 	mtc0_tlbw_hazard();
335 	write_c0_index(kvm_mips_get_commpage_asid(vcpu));
336 	mtc0_tlbw_hazard();
337 	tlb_write_indexed();
338 	mtc0_tlbw_hazard();
339 	tlbw_use_hazard();
340 
341 	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
342 		  vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
343 		  read_c0_entrylo0(), read_c0_entrylo1());
344 
345 	/* Restore old ASID */
346 	write_c0_entryhi(old_entryhi);
347 	mtc0_tlbw_hazard();
348 	tlbw_use_hazard();
349 	local_irq_restore(flags);
350 
351 	return 0;
352 }
353 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
354 
355 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
356 					 struct kvm_mips_tlb *tlb,
357 					 unsigned long *hpa0,
358 					 unsigned long *hpa1)
359 {
360 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
361 	struct kvm *kvm = vcpu->kvm;
362 	pfn_t pfn0, pfn1;
363 
364 	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
365 		pfn0 = 0;
366 		pfn1 = 0;
367 	} else {
368 		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
369 					   >> PAGE_SHIFT) < 0)
370 			return -1;
371 
372 		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
373 					   >> PAGE_SHIFT) < 0)
374 			return -1;
375 
376 		pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
377 					    >> PAGE_SHIFT];
378 		pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
379 					    >> PAGE_SHIFT];
380 	}
381 
382 	if (hpa0)
383 		*hpa0 = pfn0 << PAGE_SHIFT;
384 
385 	if (hpa1)
386 		*hpa1 = pfn1 << PAGE_SHIFT;
387 
388 	/* Get attributes from the Guest TLB */
389 	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
390 					       kvm_mips_get_kernel_asid(vcpu) :
391 					       kvm_mips_get_user_asid(vcpu));
392 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
393 		   (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
394 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
395 		   (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
396 
397 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
398 		  tlb->tlb_lo0, tlb->tlb_lo1);
399 
400 	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
401 				       tlb->tlb_mask);
402 }
403 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
404 
405 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
406 {
407 	int i;
408 	int index = -1;
409 	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
410 
411 	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
412 		if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
413 		    TLB_HI_ASID_HIT(tlb[i], entryhi)) {
414 			index = i;
415 			break;
416 		}
417 	}
418 
419 	kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
420 		  __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
421 
422 	return index;
423 }
424 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
425 
426 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
427 {
428 	unsigned long old_entryhi, flags;
429 	int idx;
430 
431 	local_irq_save(flags);
432 
433 	old_entryhi = read_c0_entryhi();
434 
435 	if (KVM_GUEST_KERNEL_MODE(vcpu))
436 		write_c0_entryhi((vaddr & VPN2_MASK) |
437 				 kvm_mips_get_kernel_asid(vcpu));
438 	else {
439 		write_c0_entryhi((vaddr & VPN2_MASK) |
440 				 kvm_mips_get_user_asid(vcpu));
441 	}
442 
443 	mtc0_tlbw_hazard();
444 
445 	tlb_probe();
446 	tlb_probe_hazard();
447 	idx = read_c0_index();
448 
449 	/* Restore old ASID */
450 	write_c0_entryhi(old_entryhi);
451 	mtc0_tlbw_hazard();
452 	tlbw_use_hazard();
453 
454 	local_irq_restore(flags);
455 
456 	kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
457 
458 	return idx;
459 }
460 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
461 
462 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
463 {
464 	int idx;
465 	unsigned long flags, old_entryhi;
466 
467 	local_irq_save(flags);
468 
469 	old_entryhi = read_c0_entryhi();
470 
471 	write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
472 	mtc0_tlbw_hazard();
473 
474 	tlb_probe();
475 	tlb_probe_hazard();
476 	idx = read_c0_index();
477 
478 	if (idx >= current_cpu_data.tlbsize)
479 		BUG();
480 
481 	if (idx > 0) {
482 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
483 		mtc0_tlbw_hazard();
484 
485 		write_c0_entrylo0(0);
486 		mtc0_tlbw_hazard();
487 
488 		write_c0_entrylo1(0);
489 		mtc0_tlbw_hazard();
490 
491 		tlb_write_indexed();
492 		mtc0_tlbw_hazard();
493 	}
494 
495 	write_c0_entryhi(old_entryhi);
496 	mtc0_tlbw_hazard();
497 	tlbw_use_hazard();
498 
499 	local_irq_restore(flags);
500 
501 	if (idx > 0)
502 		kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
503 			  (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
504 
505 	return 0;
506 }
507 EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
508 
509 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
510 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
511 {
512 	unsigned long flags, old_entryhi;
513 
514 	if (index >= current_cpu_data.tlbsize)
515 		BUG();
516 
517 	local_irq_save(flags);
518 
519 	old_entryhi = read_c0_entryhi();
520 
521 	write_c0_entryhi(UNIQUE_ENTRYHI(index));
522 	mtc0_tlbw_hazard();
523 
524 	write_c0_index(index);
525 	mtc0_tlbw_hazard();
526 
527 	write_c0_entrylo0(0);
528 	mtc0_tlbw_hazard();
529 
530 	write_c0_entrylo1(0);
531 	mtc0_tlbw_hazard();
532 
533 	tlb_write_indexed();
534 	mtc0_tlbw_hazard();
535 	tlbw_use_hazard();
536 
537 	write_c0_entryhi(old_entryhi);
538 	mtc0_tlbw_hazard();
539 	tlbw_use_hazard();
540 
541 	local_irq_restore(flags);
542 
543 	return 0;
544 }
545 
546 void kvm_mips_flush_host_tlb(int skip_kseg0)
547 {
548 	unsigned long flags;
549 	unsigned long old_entryhi, entryhi;
550 	unsigned long old_pagemask;
551 	int entry = 0;
552 	int maxentry = current_cpu_data.tlbsize;
553 
554 	local_irq_save(flags);
555 
556 	old_entryhi = read_c0_entryhi();
557 	old_pagemask = read_c0_pagemask();
558 
559 	/* Blast 'em all away. */
560 	for (entry = 0; entry < maxentry; entry++) {
561 		write_c0_index(entry);
562 		mtc0_tlbw_hazard();
563 
564 		if (skip_kseg0) {
565 			tlb_read();
566 			tlbw_use_hazard();
567 
568 			entryhi = read_c0_entryhi();
569 
570 			/* Don't blow away guest kernel entries */
571 			if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
572 				continue;
573 		}
574 
575 		/* Make sure all entries differ. */
576 		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
577 		mtc0_tlbw_hazard();
578 		write_c0_entrylo0(0);
579 		mtc0_tlbw_hazard();
580 		write_c0_entrylo1(0);
581 		mtc0_tlbw_hazard();
582 
583 		tlb_write_indexed();
584 		mtc0_tlbw_hazard();
585 	}
586 
587 	tlbw_use_hazard();
588 
589 	write_c0_entryhi(old_entryhi);
590 	write_c0_pagemask(old_pagemask);
591 	mtc0_tlbw_hazard();
592 	tlbw_use_hazard();
593 
594 	local_irq_restore(flags);
595 }
596 EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
597 
598 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
599 			     struct kvm_vcpu *vcpu)
600 {
601 	unsigned long asid = asid_cache(cpu);
602 
603 	asid += ASID_INC;
604 	if (!(asid & ASID_MASK)) {
605 		if (cpu_has_vtag_icache)
606 			flush_icache_all();
607 
608 		kvm_local_flush_tlb_all();      /* start new asid cycle */
609 
610 		if (!asid)      /* fix version if needed */
611 			asid = ASID_FIRST_VERSION;
612 	}
613 
614 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
615 }
616 
617 void kvm_local_flush_tlb_all(void)
618 {
619 	unsigned long flags;
620 	unsigned long old_ctx;
621 	int entry = 0;
622 
623 	local_irq_save(flags);
624 	/* Save old context and create impossible VPN2 value */
625 	old_ctx = read_c0_entryhi();
626 	write_c0_entrylo0(0);
627 	write_c0_entrylo1(0);
628 
629 	/* Blast 'em all away. */
630 	while (entry < current_cpu_data.tlbsize) {
631 		/* Make sure all entries differ. */
632 		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
633 		write_c0_index(entry);
634 		mtc0_tlbw_hazard();
635 		tlb_write_indexed();
636 		entry++;
637 	}
638 	tlbw_use_hazard();
639 	write_c0_entryhi(old_ctx);
640 	mtc0_tlbw_hazard();
641 
642 	local_irq_restore(flags);
643 }
644 EXPORT_SYMBOL(kvm_local_flush_tlb_all);
645 
646 /**
647  * kvm_mips_migrate_count() - Migrate timer.
648  * @vcpu:	Virtual CPU.
649  *
650  * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
651  * if it was running prior to being cancelled.
652  *
653  * Must be called when the VCPU is migrated to a different CPU to ensure that
654  * timer expiry during guest execution interrupts the guest and causes the
655  * interrupt to be delivered in a timely manner.
656  */
657 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
658 {
659 	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
660 		hrtimer_restart(&vcpu->arch.comparecount_timer);
661 }
662 
663 /* Restore ASID once we are scheduled back after preemption */
664 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
665 {
666 	unsigned long flags;
667 	int newasid = 0;
668 
669 	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
670 
671 	/* Alocate new kernel and user ASIDs if needed */
672 
673 	local_irq_save(flags);
674 
675 	if (((vcpu->arch.
676 	      guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
677 		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
678 		vcpu->arch.guest_kernel_asid[cpu] =
679 		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
680 		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
681 		vcpu->arch.guest_user_asid[cpu] =
682 		    vcpu->arch.guest_user_mm.context.asid[cpu];
683 		newasid++;
684 
685 		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
686 			  cpu_context(cpu, current->mm));
687 		kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
688 			  cpu, vcpu->arch.guest_kernel_asid[cpu]);
689 		kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
690 			  vcpu->arch.guest_user_asid[cpu]);
691 	}
692 
693 	if (vcpu->arch.last_sched_cpu != cpu) {
694 		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
695 			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
696 		/*
697 		 * Migrate the timer interrupt to the current CPU so that it
698 		 * always interrupts the guest and synchronously triggers a
699 		 * guest timer interrupt.
700 		 */
701 		kvm_mips_migrate_count(vcpu);
702 	}
703 
704 	if (!newasid) {
705 		/*
706 		 * If we preempted while the guest was executing, then reload
707 		 * the pre-empted ASID
708 		 */
709 		if (current->flags & PF_VCPU) {
710 			write_c0_entryhi(vcpu->arch.
711 					 preempt_entryhi & ASID_MASK);
712 			ehb();
713 		}
714 	} else {
715 		/* New ASIDs were allocated for the VM */
716 
717 		/*
718 		 * Were we in guest context? If so then the pre-empted ASID is
719 		 * no longer valid, we need to set it to what it should be based
720 		 * on the mode of the Guest (Kernel/User)
721 		 */
722 		if (current->flags & PF_VCPU) {
723 			if (KVM_GUEST_KERNEL_MODE(vcpu))
724 				write_c0_entryhi(vcpu->arch.
725 						 guest_kernel_asid[cpu] &
726 						 ASID_MASK);
727 			else
728 				write_c0_entryhi(vcpu->arch.
729 						 guest_user_asid[cpu] &
730 						 ASID_MASK);
731 			ehb();
732 		}
733 	}
734 
735 	local_irq_restore(flags);
736 
737 }
738 EXPORT_SYMBOL(kvm_arch_vcpu_load);
739 
740 /* ASID can change if another task is scheduled during preemption */
741 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
742 {
743 	unsigned long flags;
744 	uint32_t cpu;
745 
746 	local_irq_save(flags);
747 
748 	cpu = smp_processor_id();
749 
750 	vcpu->arch.preempt_entryhi = read_c0_entryhi();
751 	vcpu->arch.last_sched_cpu = cpu;
752 
753 	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
754 	     ASID_VERSION_MASK)) {
755 		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
756 			  cpu_context(cpu, current->mm));
757 		drop_mmu_context(current->mm, cpu);
758 	}
759 	write_c0_entryhi(cpu_asid(cpu, current->mm));
760 	ehb();
761 
762 	local_irq_restore(flags);
763 }
764 EXPORT_SYMBOL(kvm_arch_vcpu_put);
765 
766 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
767 {
768 	struct mips_coproc *cop0 = vcpu->arch.cop0;
769 	unsigned long paddr, flags, vpn2, asid;
770 	uint32_t inst;
771 	int index;
772 
773 	if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
774 	    KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
775 		local_irq_save(flags);
776 		index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
777 		if (index >= 0) {
778 			inst = *(opc);
779 		} else {
780 			vpn2 = (unsigned long) opc & VPN2_MASK;
781 			asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
782 			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
783 			if (index < 0) {
784 				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
785 					__func__, opc, vcpu, read_c0_entryhi());
786 				kvm_mips_dump_host_tlbs();
787 				local_irq_restore(flags);
788 				return KVM_INVALID_INST;
789 			}
790 			kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
791 							     &vcpu->arch.
792 							     guest_tlb[index],
793 							     NULL, NULL);
794 			inst = *(opc);
795 		}
796 		local_irq_restore(flags);
797 	} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
798 		paddr =
799 		    kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
800 							  (unsigned long) opc);
801 		inst = *(uint32_t *) CKSEG0ADDR(paddr);
802 	} else {
803 		kvm_err("%s: illegal address: %p\n", __func__, opc);
804 		return KVM_INVALID_INST;
805 	}
806 
807 	return inst;
808 }
809 EXPORT_SYMBOL(kvm_get_inst);
810