xref: /linux/arch/mips/include/asm/kvm_host.h (revision 3efc57369a0ce8f76bf0804f7e673982384e4ac9)
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
7 * Authors: Sanjay Lal <sanjayl@kymasys.com>
8 */
9 
10 #ifndef __MIPS_KVM_HOST_H__
11 #define __MIPS_KVM_HOST_H__
12 
13 #include <linux/cpumask.h>
14 #include <linux/mutex.h>
15 #include <linux/hrtimer.h>
16 #include <linux/interrupt.h>
17 #include <linux/types.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_types.h>
20 #include <linux/threads.h>
21 #include <linux/spinlock.h>
22 
23 #include <asm/asm.h>
24 #include <asm/inst.h>
25 #include <asm/mipsregs.h>
26 
27 #include <kvm/iodev.h>
28 
29 /* MIPS KVM register ids */
30 #define MIPS_CP0_32(_R, _S)					\
31 	(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
32 
33 #define MIPS_CP0_64(_R, _S)					\
34 	(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
35 
36 #define KVM_REG_MIPS_CP0_INDEX		MIPS_CP0_32(0, 0)
37 #define KVM_REG_MIPS_CP0_ENTRYLO0	MIPS_CP0_64(2, 0)
38 #define KVM_REG_MIPS_CP0_ENTRYLO1	MIPS_CP0_64(3, 0)
39 #define KVM_REG_MIPS_CP0_CONTEXT	MIPS_CP0_64(4, 0)
40 #define KVM_REG_MIPS_CP0_CONTEXTCONFIG	MIPS_CP0_32(4, 1)
41 #define KVM_REG_MIPS_CP0_USERLOCAL	MIPS_CP0_64(4, 2)
42 #define KVM_REG_MIPS_CP0_XCONTEXTCONFIG	MIPS_CP0_64(4, 3)
43 #define KVM_REG_MIPS_CP0_PAGEMASK	MIPS_CP0_32(5, 0)
44 #define KVM_REG_MIPS_CP0_PAGEGRAIN	MIPS_CP0_32(5, 1)
45 #define KVM_REG_MIPS_CP0_SEGCTL0	MIPS_CP0_64(5, 2)
46 #define KVM_REG_MIPS_CP0_SEGCTL1	MIPS_CP0_64(5, 3)
47 #define KVM_REG_MIPS_CP0_SEGCTL2	MIPS_CP0_64(5, 4)
48 #define KVM_REG_MIPS_CP0_PWBASE		MIPS_CP0_64(5, 5)
49 #define KVM_REG_MIPS_CP0_PWFIELD	MIPS_CP0_64(5, 6)
50 #define KVM_REG_MIPS_CP0_PWSIZE		MIPS_CP0_64(5, 7)
51 #define KVM_REG_MIPS_CP0_WIRED		MIPS_CP0_32(6, 0)
52 #define KVM_REG_MIPS_CP0_PWCTL		MIPS_CP0_32(6, 6)
53 #define KVM_REG_MIPS_CP0_HWRENA		MIPS_CP0_32(7, 0)
54 #define KVM_REG_MIPS_CP0_BADVADDR	MIPS_CP0_64(8, 0)
55 #define KVM_REG_MIPS_CP0_BADINSTR	MIPS_CP0_32(8, 1)
56 #define KVM_REG_MIPS_CP0_BADINSTRP	MIPS_CP0_32(8, 2)
57 #define KVM_REG_MIPS_CP0_COUNT		MIPS_CP0_32(9, 0)
58 #define KVM_REG_MIPS_CP0_ENTRYHI	MIPS_CP0_64(10, 0)
59 #define KVM_REG_MIPS_CP0_COMPARE	MIPS_CP0_32(11, 0)
60 #define KVM_REG_MIPS_CP0_STATUS		MIPS_CP0_32(12, 0)
61 #define KVM_REG_MIPS_CP0_INTCTL		MIPS_CP0_32(12, 1)
62 #define KVM_REG_MIPS_CP0_CAUSE		MIPS_CP0_32(13, 0)
63 #define KVM_REG_MIPS_CP0_EPC		MIPS_CP0_64(14, 0)
64 #define KVM_REG_MIPS_CP0_PRID		MIPS_CP0_32(15, 0)
65 #define KVM_REG_MIPS_CP0_EBASE		MIPS_CP0_64(15, 1)
66 #define KVM_REG_MIPS_CP0_CONFIG		MIPS_CP0_32(16, 0)
67 #define KVM_REG_MIPS_CP0_CONFIG1	MIPS_CP0_32(16, 1)
68 #define KVM_REG_MIPS_CP0_CONFIG2	MIPS_CP0_32(16, 2)
69 #define KVM_REG_MIPS_CP0_CONFIG3	MIPS_CP0_32(16, 3)
70 #define KVM_REG_MIPS_CP0_CONFIG4	MIPS_CP0_32(16, 4)
71 #define KVM_REG_MIPS_CP0_CONFIG5	MIPS_CP0_32(16, 5)
72 #define KVM_REG_MIPS_CP0_CONFIG6	MIPS_CP0_32(16, 6)
73 #define KVM_REG_MIPS_CP0_CONFIG7	MIPS_CP0_32(16, 7)
74 #define KVM_REG_MIPS_CP0_MAARI		MIPS_CP0_64(17, 2)
75 #define KVM_REG_MIPS_CP0_XCONTEXT	MIPS_CP0_64(20, 0)
76 #define KVM_REG_MIPS_CP0_DIAG		MIPS_CP0_32(22, 0)
77 #define KVM_REG_MIPS_CP0_ERROREPC	MIPS_CP0_64(30, 0)
78 #define KVM_REG_MIPS_CP0_KSCRATCH1	MIPS_CP0_64(31, 2)
79 #define KVM_REG_MIPS_CP0_KSCRATCH2	MIPS_CP0_64(31, 3)
80 #define KVM_REG_MIPS_CP0_KSCRATCH3	MIPS_CP0_64(31, 4)
81 #define KVM_REG_MIPS_CP0_KSCRATCH4	MIPS_CP0_64(31, 5)
82 #define KVM_REG_MIPS_CP0_KSCRATCH5	MIPS_CP0_64(31, 6)
83 #define KVM_REG_MIPS_CP0_KSCRATCH6	MIPS_CP0_64(31, 7)
84 
85 
86 #define KVM_MAX_VCPUS		16
87 
88 #define KVM_HALT_POLL_NS_DEFAULT 500000
89 
90 extern unsigned long GUESTID_MASK;
91 extern unsigned long GUESTID_FIRST_VERSION;
92 extern unsigned long GUESTID_VERSION_MASK;
93 
94 #define KVM_INVALID_ADDR		0xdeadbeef
95 
96 /*
97  * EVA has overlapping user & kernel address spaces, so user VAs may be >
98  * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
99  * PAGE_OFFSET.
100  */
101 
102 #define KVM_HVA_ERR_BAD			(-1UL)
103 #define KVM_HVA_ERR_RO_BAD		(-2UL)
104 
kvm_is_error_hva(unsigned long addr)105 static inline bool kvm_is_error_hva(unsigned long addr)
106 {
107 	return IS_ERR_VALUE(addr);
108 }
109 
110 struct kvm_vm_stat {
111 	struct kvm_vm_stat_generic generic;
112 };
113 
114 struct kvm_vcpu_stat {
115 	struct kvm_vcpu_stat_generic generic;
116 	u64 wait_exits;
117 	u64 cache_exits;
118 	u64 signal_exits;
119 	u64 int_exits;
120 	u64 cop_unusable_exits;
121 	u64 tlbmod_exits;
122 	u64 tlbmiss_ld_exits;
123 	u64 tlbmiss_st_exits;
124 	u64 addrerr_st_exits;
125 	u64 addrerr_ld_exits;
126 	u64 syscall_exits;
127 	u64 resvd_inst_exits;
128 	u64 break_inst_exits;
129 	u64 trap_inst_exits;
130 	u64 msa_fpe_exits;
131 	u64 fpe_exits;
132 	u64 msa_disabled_exits;
133 	u64 flush_dcache_exits;
134 	u64 vz_gpsi_exits;
135 	u64 vz_gsfc_exits;
136 	u64 vz_hc_exits;
137 	u64 vz_grr_exits;
138 	u64 vz_gva_exits;
139 	u64 vz_ghfc_exits;
140 	u64 vz_gpa_exits;
141 	u64 vz_resvd_exits;
142 #ifdef CONFIG_CPU_LOONGSON64
143 	u64 vz_cpucfg_exits;
144 #endif
145 };
146 
147 struct kvm_arch_memory_slot {
148 };
149 
150 #ifdef CONFIG_CPU_LOONGSON64
151 struct ipi_state {
152 	uint32_t status;
153 	uint32_t en;
154 	uint32_t set;
155 	uint32_t clear;
156 	uint64_t buf[4];
157 };
158 
159 struct loongson_kvm_ipi;
160 
161 struct ipi_io_device {
162 	int node_id;
163 	struct loongson_kvm_ipi *ipi;
164 	struct kvm_io_device device;
165 };
166 
167 struct loongson_kvm_ipi {
168 	spinlock_t lock;
169 	struct kvm *kvm;
170 	struct ipi_state ipistate[16];
171 	struct ipi_io_device dev_ipi[4];
172 };
173 #endif
174 
175 struct kvm_arch {
176 	/* Guest physical mm */
177 	struct mm_struct gpa_mm;
178 	/* Mask of CPUs needing GPA ASID flush */
179 	cpumask_t asid_flush_mask;
180 #ifdef CONFIG_CPU_LOONGSON64
181 	struct loongson_kvm_ipi ipi;
182 #endif
183 };
184 
185 #define N_MIPS_COPROC_REGS	32
186 #define N_MIPS_COPROC_SEL	8
187 
188 struct mips_coproc {
189 	unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
190 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
191 	unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
192 #endif
193 };
194 
195 /*
196  * Coprocessor 0 register names
197  */
198 #define MIPS_CP0_TLB_INDEX	0
199 #define MIPS_CP0_TLB_RANDOM	1
200 #define MIPS_CP0_TLB_LOW	2
201 #define MIPS_CP0_TLB_LO0	2
202 #define MIPS_CP0_TLB_LO1	3
203 #define MIPS_CP0_TLB_CONTEXT	4
204 #define MIPS_CP0_TLB_PG_MASK	5
205 #define MIPS_CP0_TLB_WIRED	6
206 #define MIPS_CP0_HWRENA		7
207 #define MIPS_CP0_BAD_VADDR	8
208 #define MIPS_CP0_COUNT		9
209 #define MIPS_CP0_TLB_HI		10
210 #define MIPS_CP0_COMPARE	11
211 #define MIPS_CP0_STATUS		12
212 #define MIPS_CP0_CAUSE		13
213 #define MIPS_CP0_EXC_PC		14
214 #define MIPS_CP0_PRID		15
215 #define MIPS_CP0_CONFIG		16
216 #define MIPS_CP0_LLADDR		17
217 #define MIPS_CP0_WATCH_LO	18
218 #define MIPS_CP0_WATCH_HI	19
219 #define MIPS_CP0_TLB_XCONTEXT	20
220 #define MIPS_CP0_DIAG		22
221 #define MIPS_CP0_ECC		26
222 #define MIPS_CP0_CACHE_ERR	27
223 #define MIPS_CP0_TAG_LO		28
224 #define MIPS_CP0_TAG_HI		29
225 #define MIPS_CP0_ERROR_PC	30
226 #define MIPS_CP0_DEBUG		23
227 #define MIPS_CP0_DEPC		24
228 #define MIPS_CP0_PERFCNT	25
229 #define MIPS_CP0_ERRCTL		26
230 #define MIPS_CP0_DATA_LO	28
231 #define MIPS_CP0_DATA_HI	29
232 #define MIPS_CP0_DESAVE		31
233 
234 #define MIPS_CP0_CONFIG_SEL	0
235 #define MIPS_CP0_CONFIG1_SEL	1
236 #define MIPS_CP0_CONFIG2_SEL	2
237 #define MIPS_CP0_CONFIG3_SEL	3
238 #define MIPS_CP0_CONFIG4_SEL	4
239 #define MIPS_CP0_CONFIG5_SEL	5
240 
241 #define MIPS_CP0_GUESTCTL2	10
242 #define MIPS_CP0_GUESTCTL2_SEL	5
243 #define MIPS_CP0_GTOFFSET	12
244 #define MIPS_CP0_GTOFFSET_SEL	7
245 
246 /* Resume Flags */
247 #define RESUME_FLAG_DR		(1<<0)	/* Reload guest nonvolatile state? */
248 #define RESUME_FLAG_HOST	(1<<1)	/* Resume host? */
249 
250 #define RESUME_GUEST		0
251 #define RESUME_GUEST_DR		RESUME_FLAG_DR
252 #define RESUME_HOST		RESUME_FLAG_HOST
253 
254 enum emulation_result {
255 	EMULATE_DONE,		/* no further processing */
256 	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
257 	EMULATE_FAIL,		/* can't emulate this instruction */
258 	EMULATE_WAIT,		/* WAIT instruction */
259 	EMULATE_PRIV_FAIL,
260 	EMULATE_EXCEPT,		/* A guest exception has been generated */
261 	EMULATE_HYPERCALL,	/* HYPCALL instruction */
262 };
263 
264 #if defined(CONFIG_64BIT)
265 #define VPN2_MASK		GENMASK(cpu_vmbits - 1, 13)
266 #else
267 #define VPN2_MASK		0xffffe000
268 #endif
269 #define KVM_ENTRYHI_ASID	cpu_asid_mask(&boot_cpu_data)
270 #define TLB_IS_GLOBAL(x)	((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
271 #define TLB_VPN2(x)		((x).tlb_hi & VPN2_MASK)
272 #define TLB_ASID(x)		((x).tlb_hi & KVM_ENTRYHI_ASID)
273 #define TLB_LO_IDX(x, va)	(((va) >> PAGE_SHIFT) & 1)
274 #define TLB_IS_VALID(x, va)	((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
275 #define TLB_IS_DIRTY(x, va)	((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
276 #define TLB_HI_VPN2_HIT(x, y)	((TLB_VPN2(x) & ~(x).tlb_mask) ==	\
277 				 ((y) & VPN2_MASK & ~(x).tlb_mask))
278 #define TLB_HI_ASID_HIT(x, y)	(TLB_IS_GLOBAL(x) ||			\
279 				 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
280 
281 struct kvm_mips_tlb {
282 	long tlb_mask;
283 	long tlb_hi;
284 	long tlb_lo[2];
285 };
286 
287 #define KVM_MIPS_AUX_FPU	0x1
288 #define KVM_MIPS_AUX_MSA	0x2
289 
290 struct kvm_vcpu_arch {
291 	void *guest_ebase;
292 	int (*vcpu_run)(struct kvm_vcpu *vcpu);
293 
294 	/* Host registers preserved across guest mode execution */
295 	unsigned long host_stack;
296 	unsigned long host_gp;
297 	unsigned long host_pgd;
298 	unsigned long host_entryhi;
299 
300 	/* Host CP0 registers used when handling exits from guest */
301 	unsigned long host_cp0_badvaddr;
302 	unsigned long host_cp0_epc;
303 	u32 host_cp0_cause;
304 	u32 host_cp0_guestctl0;
305 	u32 host_cp0_badinstr;
306 	u32 host_cp0_badinstrp;
307 
308 	/* GPRS */
309 	unsigned long gprs[32];
310 	unsigned long hi;
311 	unsigned long lo;
312 	unsigned long pc;
313 
314 	/* FPU State */
315 	struct mips_fpu_struct fpu;
316 	/* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
317 	unsigned int aux_inuse;
318 
319 	/* COP0 State */
320 	struct mips_coproc cop0;
321 
322 	/* Resume PC after MMIO completion */
323 	unsigned long io_pc;
324 	/* GPR used as IO source/target */
325 	u32 io_gpr;
326 
327 	struct hrtimer comparecount_timer;
328 	/* Count timer control KVM register */
329 	u32 count_ctl;
330 	/* Count bias from the raw time */
331 	u32 count_bias;
332 	/* Frequency of timer in Hz */
333 	u32 count_hz;
334 	/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
335 	s64 count_dyn_bias;
336 	/* Resume time */
337 	ktime_t count_resume;
338 	/* Period of timer tick in ns */
339 	u64 count_period;
340 
341 	/* Bitmask of exceptions that are pending */
342 	unsigned long pending_exceptions;
343 
344 	/* Bitmask of pending exceptions to be cleared */
345 	unsigned long pending_exceptions_clr;
346 
347 	/* Cache some mmu pages needed inside spinlock regions */
348 	struct kvm_mmu_memory_cache mmu_page_cache;
349 
350 	/* vcpu's vzguestid is different on each host cpu in an smp system */
351 	u32 vzguestid[NR_CPUS];
352 
353 	/* wired guest TLB entries */
354 	struct kvm_mips_tlb *wired_tlb;
355 	unsigned int wired_tlb_limit;
356 	unsigned int wired_tlb_used;
357 
358 	/* emulated guest MAAR registers */
359 	unsigned long maar[6];
360 
361 	/* Last CPU the VCPU state was loaded on */
362 	int last_sched_cpu;
363 	/* Last CPU the VCPU actually executed guest code on */
364 	int last_exec_cpu;
365 
366 	/* WAIT executed */
367 	int wait;
368 
369 	u8 fpu_enabled;
370 	u8 msa_enabled;
371 };
372 
_kvm_atomic_set_c0_guest_reg(unsigned long * reg,unsigned long val)373 static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
374 						unsigned long val)
375 {
376 	unsigned long temp;
377 	do {
378 		__asm__ __volatile__(
379 		"	.set	push				\n"
380 		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
381 		"	"__stringify(LONG_LL)	" %0, %1	\n"
382 		"	or	%0, %2				\n"
383 		"	"__stringify(LONG_SC)	" %0, %1	\n"
384 		"	.set	pop				\n"
385 		: "=&r" (temp), "+m" (*reg)
386 		: "r" (val));
387 	} while (unlikely(!temp));
388 }
389 
_kvm_atomic_clear_c0_guest_reg(unsigned long * reg,unsigned long val)390 static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
391 						  unsigned long val)
392 {
393 	unsigned long temp;
394 	do {
395 		__asm__ __volatile__(
396 		"	.set	push				\n"
397 		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
398 		"	"__stringify(LONG_LL)	" %0, %1	\n"
399 		"	and	%0, %2				\n"
400 		"	"__stringify(LONG_SC)	" %0, %1	\n"
401 		"	.set	pop				\n"
402 		: "=&r" (temp), "+m" (*reg)
403 		: "r" (~val));
404 	} while (unlikely(!temp));
405 }
406 
_kvm_atomic_change_c0_guest_reg(unsigned long * reg,unsigned long change,unsigned long val)407 static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
408 						   unsigned long change,
409 						   unsigned long val)
410 {
411 	unsigned long temp;
412 	do {
413 		__asm__ __volatile__(
414 		"	.set	push				\n"
415 		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
416 		"	"__stringify(LONG_LL)	" %0, %1	\n"
417 		"	and	%0, %2				\n"
418 		"	or	%0, %3				\n"
419 		"	"__stringify(LONG_SC)	" %0, %1	\n"
420 		"	.set	pop				\n"
421 		: "=&r" (temp), "+m" (*reg)
422 		: "r" (~change), "r" (val & change));
423 	} while (unlikely(!temp));
424 }
425 
426 /* Guest register types, used in accessor build below */
427 #define __KVMT32	u32
428 #define __KVMTl	unsigned long
429 
430 /*
431  * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
432  * These operate on the saved guest C0 state in RAM.
433  */
434 
435 /* Generate saved context simple accessors */
436 #define __BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
437 static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
438 {									\
439 	return cop0->reg[(_reg)][(sel)];				\
440 }									\
441 static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0,	\
442 					   __KVMT##type val)		\
443 {									\
444 	cop0->reg[(_reg)][(sel)] = val;					\
445 }
446 
447 /* Generate saved context bitwise modifiers */
448 #define __BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
449 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0,	\
450 					 __KVMT##type val)		\
451 {									\
452 	cop0->reg[(_reg)][(sel)] |= val;				\
453 }									\
454 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0,	\
455 					   __KVMT##type val)		\
456 {									\
457 	cop0->reg[(_reg)][(sel)] &= ~val;				\
458 }									\
459 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0,	\
460 					    __KVMT##type mask,		\
461 					    __KVMT##type val)		\
462 {									\
463 	unsigned long _mask = mask;					\
464 	cop0->reg[(_reg)][(sel)] &= ~_mask;				\
465 	cop0->reg[(_reg)][(sel)] |= val & _mask;			\
466 }
467 
468 /* Generate saved context atomic bitwise modifiers */
469 #define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel)			\
470 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0,	\
471 					 __KVMT##type val)		\
472 {									\
473 	_kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val);	\
474 }									\
475 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0,	\
476 					   __KVMT##type val)		\
477 {									\
478 	_kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val);	\
479 }									\
480 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0,	\
481 					    __KVMT##type mask,		\
482 					    __KVMT##type val)		\
483 {									\
484 	_kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
485 					val);				\
486 }
487 
488 /*
489  * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
490  * These operate on the VZ guest C0 context in hardware.
491  */
492 
493 /* Generate VZ guest context simple accessors */
494 #define __BUILD_KVM_RW_VZ(name, type, _reg, sel)			\
495 static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
496 {									\
497 	return read_gc0_##name();					\
498 }									\
499 static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0,	\
500 					   __KVMT##type val)		\
501 {									\
502 	write_gc0_##name(val);						\
503 }
504 
505 /* Generate VZ guest context bitwise modifiers */
506 #define __BUILD_KVM_SET_VZ(name, type, _reg, sel)			\
507 static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0,	\
508 					 __KVMT##type val)		\
509 {									\
510 	set_gc0_##name(val);						\
511 }									\
512 static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0,	\
513 					   __KVMT##type val)		\
514 {									\
515 	clear_gc0_##name(val);						\
516 }									\
517 static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0,	\
518 					    __KVMT##type mask,		\
519 					    __KVMT##type val)		\
520 {									\
521 	change_gc0_##name(mask, val);					\
522 }
523 
524 /* Generate VZ guest context save/restore to/from saved context */
525 #define __BUILD_KVM_SAVE_VZ(name, _reg, sel)			\
526 static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0)	\
527 {									\
528 	write_gc0_##name(cop0->reg[(_reg)][(sel)]);			\
529 }									\
530 static inline void kvm_save_gc0_##name(struct mips_coproc *cop0)	\
531 {									\
532 	cop0->reg[(_reg)][(sel)] = read_gc0_##name();			\
533 }
534 
535 /*
536  * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
537  * These wrap a set of operations to provide them with a different name.
538  */
539 
540 /* Generate simple accessor wrapper */
541 #define __BUILD_KVM_RW_WRAP(name1, name2, type)				\
542 static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0)	\
543 {									\
544 	return kvm_read_##name2(cop0);					\
545 }									\
546 static inline void kvm_write_##name1(struct mips_coproc *cop0,		\
547 				     __KVMT##type val)			\
548 {									\
549 	kvm_write_##name2(cop0, val);					\
550 }
551 
552 /* Generate bitwise modifier wrapper */
553 #define __BUILD_KVM_SET_WRAP(name1, name2, type)			\
554 static inline void kvm_set_##name1(struct mips_coproc *cop0,		\
555 				   __KVMT##type val)			\
556 {									\
557 	kvm_set_##name2(cop0, val);					\
558 }									\
559 static inline void kvm_clear_##name1(struct mips_coproc *cop0,		\
560 				     __KVMT##type val)			\
561 {									\
562 	kvm_clear_##name2(cop0, val);					\
563 }									\
564 static inline void kvm_change_##name1(struct mips_coproc *cop0,		\
565 				      __KVMT##type mask,		\
566 				      __KVMT##type val)			\
567 {									\
568 	kvm_change_##name2(cop0, mask, val);				\
569 }
570 
571 /*
572  * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
573  * These generate accessors operating on the saved context in RAM, and wrap them
574  * with the common guest C0 accessors (for use by common emulation code).
575  */
576 
577 #define __BUILD_KVM_RW_SW(name, type, _reg, sel)			\
578 	__BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
579 	__BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
580 
581 #define __BUILD_KVM_SET_SW(name, type, _reg, sel)			\
582 	__BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
583 	__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
584 
585 #define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel)			\
586 	__BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel)			\
587 	__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
588 
589 /*
590  * VZ (hardware assisted virtualisation)
591  * These macros use the active guest state in VZ mode (hardware registers),
592  */
593 
594 /*
595  * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
596  * These generate accessors operating on the VZ guest context in hardware, and
597  * wrap them with the common guest C0 accessors (for use by common emulation
598  * code).
599  *
600  * Accessors operating on the saved context in RAM are also generated to allow
601  * convenient explicit saving and restoring of the state.
602  */
603 
604 #define __BUILD_KVM_RW_HW(name, type, _reg, sel)			\
605 	__BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
606 	__BUILD_KVM_RW_VZ(name, type, _reg, sel)			\
607 	__BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type)	\
608 	__BUILD_KVM_SAVE_VZ(name, _reg, sel)
609 
610 #define __BUILD_KVM_SET_HW(name, type, _reg, sel)			\
611 	__BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
612 	__BUILD_KVM_SET_VZ(name, type, _reg, sel)			\
613 	__BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
614 
615 /*
616  * We can't do atomic modifications of COP0 state if hardware can modify it.
617  * Races must be handled explicitly.
618  */
619 #define __BUILD_KVM_ATOMIC_HW	__BUILD_KVM_SET_HW
620 
621 /*
622  * Define accessors for CP0 registers that are accessible to the guest. These
623  * are primarily used by common emulation code, which may need to access the
624  * registers differently depending on the implementation.
625  *
626  *    fns_hw/sw    name     type    reg num         select
627  */
628 __BUILD_KVM_RW_HW(index,          32, MIPS_CP0_TLB_INDEX,    0)
629 __BUILD_KVM_RW_HW(entrylo0,       l,  MIPS_CP0_TLB_LO0,      0)
630 __BUILD_KVM_RW_HW(entrylo1,       l,  MIPS_CP0_TLB_LO1,      0)
631 __BUILD_KVM_RW_HW(context,        l,  MIPS_CP0_TLB_CONTEXT,  0)
632 __BUILD_KVM_RW_HW(contextconfig,  32, MIPS_CP0_TLB_CONTEXT,  1)
633 __BUILD_KVM_RW_HW(userlocal,      l,  MIPS_CP0_TLB_CONTEXT,  2)
634 __BUILD_KVM_RW_HW(xcontextconfig, l,  MIPS_CP0_TLB_CONTEXT,  3)
635 __BUILD_KVM_RW_HW(pagemask,       l,  MIPS_CP0_TLB_PG_MASK,  0)
636 __BUILD_KVM_RW_HW(pagegrain,      32, MIPS_CP0_TLB_PG_MASK,  1)
637 __BUILD_KVM_RW_HW(segctl0,        l,  MIPS_CP0_TLB_PG_MASK,  2)
638 __BUILD_KVM_RW_HW(segctl1,        l,  MIPS_CP0_TLB_PG_MASK,  3)
639 __BUILD_KVM_RW_HW(segctl2,        l,  MIPS_CP0_TLB_PG_MASK,  4)
640 __BUILD_KVM_RW_HW(pwbase,         l,  MIPS_CP0_TLB_PG_MASK,  5)
641 __BUILD_KVM_RW_HW(pwfield,        l,  MIPS_CP0_TLB_PG_MASK,  6)
642 __BUILD_KVM_RW_HW(pwsize,         l,  MIPS_CP0_TLB_PG_MASK,  7)
643 __BUILD_KVM_RW_HW(wired,          32, MIPS_CP0_TLB_WIRED,    0)
644 __BUILD_KVM_RW_HW(pwctl,          32, MIPS_CP0_TLB_WIRED,    6)
645 __BUILD_KVM_RW_HW(hwrena,         32, MIPS_CP0_HWRENA,       0)
646 __BUILD_KVM_RW_HW(badvaddr,       l,  MIPS_CP0_BAD_VADDR,    0)
647 __BUILD_KVM_RW_HW(badinstr,       32, MIPS_CP0_BAD_VADDR,    1)
648 __BUILD_KVM_RW_HW(badinstrp,      32, MIPS_CP0_BAD_VADDR,    2)
649 __BUILD_KVM_RW_SW(count,          32, MIPS_CP0_COUNT,        0)
650 __BUILD_KVM_RW_HW(entryhi,        l,  MIPS_CP0_TLB_HI,       0)
651 __BUILD_KVM_RW_HW(compare,        32, MIPS_CP0_COMPARE,      0)
652 __BUILD_KVM_RW_HW(status,         32, MIPS_CP0_STATUS,       0)
653 __BUILD_KVM_RW_HW(intctl,         32, MIPS_CP0_STATUS,       1)
654 __BUILD_KVM_RW_HW(cause,          32, MIPS_CP0_CAUSE,        0)
655 __BUILD_KVM_RW_HW(epc,            l,  MIPS_CP0_EXC_PC,       0)
656 __BUILD_KVM_RW_SW(prid,           32, MIPS_CP0_PRID,         0)
657 __BUILD_KVM_RW_HW(ebase,          l,  MIPS_CP0_PRID,         1)
658 __BUILD_KVM_RW_HW(config,         32, MIPS_CP0_CONFIG,       0)
659 __BUILD_KVM_RW_HW(config1,        32, MIPS_CP0_CONFIG,       1)
660 __BUILD_KVM_RW_HW(config2,        32, MIPS_CP0_CONFIG,       2)
661 __BUILD_KVM_RW_HW(config3,        32, MIPS_CP0_CONFIG,       3)
662 __BUILD_KVM_RW_HW(config4,        32, MIPS_CP0_CONFIG,       4)
663 __BUILD_KVM_RW_HW(config5,        32, MIPS_CP0_CONFIG,       5)
664 __BUILD_KVM_RW_HW(config6,        32, MIPS_CP0_CONFIG,       6)
665 __BUILD_KVM_RW_HW(config7,        32, MIPS_CP0_CONFIG,       7)
666 __BUILD_KVM_RW_SW(maari,          l,  MIPS_CP0_LLADDR,       2)
667 __BUILD_KVM_RW_HW(xcontext,       l,  MIPS_CP0_TLB_XCONTEXT, 0)
668 __BUILD_KVM_RW_HW(errorepc,       l,  MIPS_CP0_ERROR_PC,     0)
669 __BUILD_KVM_RW_HW(kscratch1,      l,  MIPS_CP0_DESAVE,       2)
670 __BUILD_KVM_RW_HW(kscratch2,      l,  MIPS_CP0_DESAVE,       3)
671 __BUILD_KVM_RW_HW(kscratch3,      l,  MIPS_CP0_DESAVE,       4)
672 __BUILD_KVM_RW_HW(kscratch4,      l,  MIPS_CP0_DESAVE,       5)
673 __BUILD_KVM_RW_HW(kscratch5,      l,  MIPS_CP0_DESAVE,       6)
674 __BUILD_KVM_RW_HW(kscratch6,      l,  MIPS_CP0_DESAVE,       7)
675 
676 /* Bitwise operations (on HW state) */
677 __BUILD_KVM_SET_HW(status,        32, MIPS_CP0_STATUS,       0)
678 /* Cause can be modified asynchronously from hardirq hrtimer callback */
679 __BUILD_KVM_ATOMIC_HW(cause,      32, MIPS_CP0_CAUSE,        0)
680 __BUILD_KVM_SET_HW(ebase,         l,  MIPS_CP0_PRID,         1)
681 
682 /* Bitwise operations (on saved state) */
683 __BUILD_KVM_SET_SAVED(config,     32, MIPS_CP0_CONFIG,       0)
684 __BUILD_KVM_SET_SAVED(config1,    32, MIPS_CP0_CONFIG,       1)
685 __BUILD_KVM_SET_SAVED(config2,    32, MIPS_CP0_CONFIG,       2)
686 __BUILD_KVM_SET_SAVED(config3,    32, MIPS_CP0_CONFIG,       3)
687 __BUILD_KVM_SET_SAVED(config4,    32, MIPS_CP0_CONFIG,       4)
688 __BUILD_KVM_SET_SAVED(config5,    32, MIPS_CP0_CONFIG,       5)
689 
690 /* Helpers */
691 
kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch * vcpu)692 static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
693 {
694 	return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
695 		vcpu->fpu_enabled;
696 }
697 
kvm_mips_guest_has_fpu(struct kvm_vcpu_arch * vcpu)698 static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
699 {
700 	return kvm_mips_guest_can_have_fpu(vcpu) &&
701 		kvm_read_c0_guest_config1(&vcpu->cop0) & MIPS_CONF1_FP;
702 }
703 
kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch * vcpu)704 static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
705 {
706 	return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
707 		vcpu->msa_enabled;
708 }
709 
kvm_mips_guest_has_msa(struct kvm_vcpu_arch * vcpu)710 static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
711 {
712 	return kvm_mips_guest_can_have_msa(vcpu) &&
713 		kvm_read_c0_guest_config3(&vcpu->cop0) & MIPS_CONF3_MSA;
714 }
715 
716 struct kvm_mips_callbacks {
717 	int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
718 	int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
719 	int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
720 	int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
721 	int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
722 	int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
723 	int (*handle_syscall)(struct kvm_vcpu *vcpu);
724 	int (*handle_res_inst)(struct kvm_vcpu *vcpu);
725 	int (*handle_break)(struct kvm_vcpu *vcpu);
726 	int (*handle_trap)(struct kvm_vcpu *vcpu);
727 	int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
728 	int (*handle_fpe)(struct kvm_vcpu *vcpu);
729 	int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
730 	int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
731 	int (*enable_virtualization_cpu)(void);
732 	void (*disable_virtualization_cpu)(void);
733 	int (*check_extension)(struct kvm *kvm, long ext);
734 	int (*vcpu_init)(struct kvm_vcpu *vcpu);
735 	void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
736 	int (*vcpu_setup)(struct kvm_vcpu *vcpu);
737 	void (*prepare_flush_shadow)(struct kvm *kvm);
738 	gpa_t (*gva_to_gpa)(gva_t gva);
739 	void (*queue_timer_int)(struct kvm_vcpu *vcpu);
740 	void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
741 	void (*queue_io_int)(struct kvm_vcpu *vcpu,
742 			     struct kvm_mips_interrupt *irq);
743 	void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
744 			       struct kvm_mips_interrupt *irq);
745 	int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
746 			   u32 cause);
747 	int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
748 			 u32 cause);
749 	unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
750 	int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
751 	int (*get_one_reg)(struct kvm_vcpu *vcpu,
752 			   const struct kvm_one_reg *reg, s64 *v);
753 	int (*set_one_reg)(struct kvm_vcpu *vcpu,
754 			   const struct kvm_one_reg *reg, s64 v);
755 	int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
756 	int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
757 	int (*vcpu_run)(struct kvm_vcpu *vcpu);
758 	void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
759 };
760 extern const struct kvm_mips_callbacks * const kvm_mips_callbacks;
761 int kvm_mips_emulation_init(void);
762 
763 /* Debug: dump vcpu state */
764 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
765 
766 extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
767 
768 /* Building of entry/exception code */
769 int kvm_mips_entry_setup(void);
770 void *kvm_mips_build_vcpu_run(void *addr);
771 void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
772 void *kvm_mips_build_exception(void *addr, void *handler);
773 void *kvm_mips_build_exit(void *addr);
774 
775 /* FPU/MSA context management */
776 void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
777 void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
778 void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
779 void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
780 void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
781 void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
782 void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
783 void kvm_own_fpu(struct kvm_vcpu *vcpu);
784 void kvm_own_msa(struct kvm_vcpu *vcpu);
785 void kvm_drop_fpu(struct kvm_vcpu *vcpu);
786 void kvm_lose_fpu(struct kvm_vcpu *vcpu);
787 
788 /* TLB handling */
789 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
790 				      struct kvm_vcpu *vcpu, bool write_fault);
791 
792 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
793 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
794 			    unsigned long *gpa);
795 void kvm_vz_local_flush_roottlb_all_guests(void);
796 void kvm_vz_local_flush_guesttlb_all(void);
797 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
798 			  unsigned int count);
799 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
800 			  unsigned int count);
801 #ifdef CONFIG_CPU_LOONGSON64
802 void kvm_loongson_clear_guest_vtlb(void);
803 void kvm_loongson_clear_guest_ftlb(void);
804 #endif
805 
806 /* MMU handling */
807 
808 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
809 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
810 pgd_t *kvm_pgd_alloc(void);
811 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
812 
813 /* Emulation */
814 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
815 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
816 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
817 
818 /**
819  * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
820  * @vcpu:	Virtual CPU.
821  *
822  * Returns:	Whether the TLBL exception was likely due to an instruction
823  *		fetch fault rather than a data load fault.
824  */
kvm_is_ifetch_fault(struct kvm_vcpu_arch * vcpu)825 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
826 {
827 	unsigned long badvaddr = vcpu->host_cp0_badvaddr;
828 	unsigned long epc = msk_isa16_mode(vcpu->pc);
829 	u32 cause = vcpu->host_cp0_cause;
830 
831 	if (epc == badvaddr)
832 		return true;
833 
834 	/*
835 	 * Branches may be 32-bit or 16-bit instructions.
836 	 * This isn't exact, but we don't really support MIPS16 or microMIPS yet
837 	 * in KVM anyway.
838 	 */
839 	if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
840 		return true;
841 
842 	return false;
843 }
844 
845 extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
846 
847 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
848 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
849 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
850 void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
851 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
852 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
853 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
854 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
855 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
856 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
857 
858 /* fairly internal functions requiring some care to use */
859 int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
860 ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
861 int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
862 			     u32 count, int min_drift);
863 
864 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
865 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
866 
867 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
868 					     u32 cause,
869 					     struct kvm_vcpu *vcpu);
870 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
871 					    u32 cause,
872 					    struct kvm_vcpu *vcpu);
873 
874 /* COP0 */
875 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
876 
877 /* Hypercalls (hypcall.c) */
878 
879 enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
880 					    union mips_instruction inst);
881 int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
882 
883 /* Misc */
884 extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
885 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
886 extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
887 			     struct kvm_mips_interrupt *irq);
888 
kvm_arch_sync_events(struct kvm * kvm)889 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)890 static inline void kvm_arch_free_memslot(struct kvm *kvm,
891 					 struct kvm_memory_slot *slot) {}
kvm_arch_memslots_updated(struct kvm * kvm,u64 gen)892 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu)893 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu)894 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
895 
896 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
897 
898 #endif /* __MIPS_KVM_HOST_H__ */
899