xref: /linux/arch/mips/kvm/vz.c (revision 3efc57369a0ce8f76bf0804f7e673982384e4ac9)
1  /*
2   * This file is subject to the terms and conditions of the GNU General Public
3   * License.  See the file "COPYING" in the main directory of this archive
4   * for more details.
5   *
6   * KVM/MIPS: Support for hardware virtualization extensions
7   *
8   * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9   * Authors: Yann Le Du <ledu@kymasys.com>
10   */
11  
12  #include <linux/errno.h>
13  #include <linux/err.h>
14  #include <linux/module.h>
15  #include <linux/preempt.h>
16  #include <linux/vmalloc.h>
17  #include <asm/cacheflush.h>
18  #include <asm/cacheops.h>
19  #include <asm/cmpxchg.h>
20  #include <asm/fpu.h>
21  #include <asm/hazards.h>
22  #include <asm/inst.h>
23  #include <asm/mmu_context.h>
24  #include <asm/r4kcache.h>
25  #include <asm/time.h>
26  #include <asm/tlb.h>
27  #include <asm/tlbex.h>
28  
29  #include <linux/kvm_host.h>
30  
31  #include "interrupt.h"
32  #ifdef CONFIG_CPU_LOONGSON64
33  #include "loongson_regs.h"
34  #endif
35  
36  #include "trace.h"
37  
38  /* Pointers to last VCPU loaded on each physical CPU */
39  static struct kvm_vcpu *last_vcpu[NR_CPUS];
40  /* Pointers to last VCPU executed on each physical CPU */
41  static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
42  
43  /*
44   * Number of guest VTLB entries to use, so we can catch inconsistency between
45   * CPUs.
46   */
47  static unsigned int kvm_vz_guest_vtlb_size;
48  
kvm_vz_read_gc0_ebase(void)49  static inline long kvm_vz_read_gc0_ebase(void)
50  {
51  	if (sizeof(long) == 8 && cpu_has_ebase_wg)
52  		return read_gc0_ebase_64();
53  	else
54  		return read_gc0_ebase();
55  }
56  
kvm_vz_write_gc0_ebase(long v)57  static inline void kvm_vz_write_gc0_ebase(long v)
58  {
59  	/*
60  	 * First write with WG=1 to write upper bits, then write again in case
61  	 * WG should be left at 0.
62  	 * write_gc0_ebase_64() is no longer UNDEFINED since R6.
63  	 */
64  	if (sizeof(long) == 8 &&
65  	    (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
66  		write_gc0_ebase_64(v | MIPS_EBASE_WG);
67  		write_gc0_ebase_64(v);
68  	} else {
69  		write_gc0_ebase(v | MIPS_EBASE_WG);
70  		write_gc0_ebase(v);
71  	}
72  }
73  
74  /*
75   * These Config bits may be writable by the guest:
76   * Config:	[K23, KU] (!TLB), K0
77   * Config1:	(none)
78   * Config2:	[TU, SU] (impl)
79   * Config3:	ISAOnExc
80   * Config4:	FTLBPageSize
81   * Config5:	K, CV, MSAEn, UFE, FRE, SBRI, UFR
82   */
83  
kvm_vz_config_guest_wrmask(struct kvm_vcpu * vcpu)84  static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
85  {
86  	return CONF_CM_CMASK;
87  }
88  
kvm_vz_config1_guest_wrmask(struct kvm_vcpu * vcpu)89  static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
90  {
91  	return 0;
92  }
93  
kvm_vz_config2_guest_wrmask(struct kvm_vcpu * vcpu)94  static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
95  {
96  	return 0;
97  }
98  
kvm_vz_config3_guest_wrmask(struct kvm_vcpu * vcpu)99  static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
100  {
101  	return MIPS_CONF3_ISA_OE;
102  }
103  
kvm_vz_config4_guest_wrmask(struct kvm_vcpu * vcpu)104  static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
105  {
106  	/* no need to be exact */
107  	return MIPS_CONF4_VFTLBPAGESIZE;
108  }
109  
kvm_vz_config5_guest_wrmask(struct kvm_vcpu * vcpu)110  static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
111  {
112  	unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
113  
114  	/* Permit MSAEn changes if MSA supported and enabled */
115  	if (kvm_mips_guest_has_msa(&vcpu->arch))
116  		mask |= MIPS_CONF5_MSAEN;
117  
118  	/*
119  	 * Permit guest FPU mode changes if FPU is enabled and the relevant
120  	 * feature exists according to FIR register.
121  	 */
122  	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
123  		if (cpu_has_ufr)
124  			mask |= MIPS_CONF5_UFR;
125  		if (cpu_has_fre)
126  			mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
127  	}
128  
129  	return mask;
130  }
131  
kvm_vz_config6_guest_wrmask(struct kvm_vcpu * vcpu)132  static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
133  {
134  	return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
135  }
136  
137  /*
138   * VZ optionally allows these additional Config bits to be written by root:
139   * Config:	M, [MT]
140   * Config1:	M, [MMUSize-1, C2, MD, PC, WR, CA], FP
141   * Config2:	M
142   * Config3:	M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
143   *		VInt, SP, CDMM, MT, SM, TL]
144   * Config4:	M, [VTLBSizeExt, MMUSizeExt]
145   * Config5:	MRP
146   */
147  
kvm_vz_config_user_wrmask(struct kvm_vcpu * vcpu)148  static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
149  {
150  	return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
151  }
152  
kvm_vz_config1_user_wrmask(struct kvm_vcpu * vcpu)153  static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
154  {
155  	unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
156  
157  	/* Permit FPU to be present if FPU is supported */
158  	if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
159  		mask |= MIPS_CONF1_FP;
160  
161  	return mask;
162  }
163  
kvm_vz_config2_user_wrmask(struct kvm_vcpu * vcpu)164  static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
165  {
166  	return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
167  }
168  
kvm_vz_config3_user_wrmask(struct kvm_vcpu * vcpu)169  static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
170  {
171  	unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
172  		MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
173  
174  	/* Permit MSA to be present if MSA is supported */
175  	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
176  		mask |= MIPS_CONF3_MSA;
177  
178  	return mask;
179  }
180  
kvm_vz_config4_user_wrmask(struct kvm_vcpu * vcpu)181  static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
182  {
183  	return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
184  }
185  
kvm_vz_config5_user_wrmask(struct kvm_vcpu * vcpu)186  static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
187  {
188  	return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
189  }
190  
kvm_vz_config6_user_wrmask(struct kvm_vcpu * vcpu)191  static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
192  {
193  	return kvm_vz_config6_guest_wrmask(vcpu) |
194  		LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
195  }
196  
kvm_vz_gva_to_gpa_cb(gva_t gva)197  static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
198  {
199  	/* VZ guest has already converted gva to gpa */
200  	return gva;
201  }
202  
kvm_vz_queue_irq(struct kvm_vcpu * vcpu,unsigned int priority)203  static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
204  {
205  	set_bit(priority, &vcpu->arch.pending_exceptions);
206  	clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
207  }
208  
kvm_vz_dequeue_irq(struct kvm_vcpu * vcpu,unsigned int priority)209  static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
210  {
211  	clear_bit(priority, &vcpu->arch.pending_exceptions);
212  	set_bit(priority, &vcpu->arch.pending_exceptions_clr);
213  }
214  
kvm_vz_queue_timer_int_cb(struct kvm_vcpu * vcpu)215  static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
216  {
217  	/*
218  	 * timer expiry is asynchronous to vcpu execution therefore defer guest
219  	 * cp0 accesses
220  	 */
221  	kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
222  }
223  
kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu * vcpu)224  static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
225  {
226  	/*
227  	 * timer expiry is asynchronous to vcpu execution therefore defer guest
228  	 * cp0 accesses
229  	 */
230  	kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
231  }
232  
kvm_vz_queue_io_int_cb(struct kvm_vcpu * vcpu,struct kvm_mips_interrupt * irq)233  static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
234  				   struct kvm_mips_interrupt *irq)
235  {
236  	int intr = (int)irq->irq;
237  
238  	/*
239  	 * interrupts are asynchronous to vcpu execution therefore defer guest
240  	 * cp0 accesses
241  	 */
242  	kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
243  }
244  
kvm_vz_dequeue_io_int_cb(struct kvm_vcpu * vcpu,struct kvm_mips_interrupt * irq)245  static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
246  				     struct kvm_mips_interrupt *irq)
247  {
248  	int intr = (int)irq->irq;
249  
250  	/*
251  	 * interrupts are asynchronous to vcpu execution therefore defer guest
252  	 * cp0 accesses
253  	 */
254  	kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
255  }
256  
kvm_vz_irq_deliver_cb(struct kvm_vcpu * vcpu,unsigned int priority,u32 cause)257  static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
258  				 u32 cause)
259  {
260  	u32 irq = (priority < MIPS_EXC_MAX) ?
261  		kvm_priority_to_irq[priority] : 0;
262  
263  	switch (priority) {
264  	case MIPS_EXC_INT_TIMER:
265  		set_gc0_cause(C_TI);
266  		break;
267  
268  	case MIPS_EXC_INT_IO_1:
269  	case MIPS_EXC_INT_IO_2:
270  	case MIPS_EXC_INT_IPI_1:
271  	case MIPS_EXC_INT_IPI_2:
272  		if (cpu_has_guestctl2)
273  			set_c0_guestctl2(irq);
274  		else
275  			set_gc0_cause(irq);
276  		break;
277  
278  	default:
279  		break;
280  	}
281  
282  	clear_bit(priority, &vcpu->arch.pending_exceptions);
283  	return 1;
284  }
285  
kvm_vz_irq_clear_cb(struct kvm_vcpu * vcpu,unsigned int priority,u32 cause)286  static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
287  			       u32 cause)
288  {
289  	u32 irq = (priority < MIPS_EXC_MAX) ?
290  		kvm_priority_to_irq[priority] : 0;
291  
292  	switch (priority) {
293  	case MIPS_EXC_INT_TIMER:
294  		/*
295  		 * Explicitly clear irq associated with Cause.IP[IPTI]
296  		 * if GuestCtl2 virtual interrupt register not
297  		 * supported or if not using GuestCtl2 Hardware Clear.
298  		 */
299  		if (cpu_has_guestctl2) {
300  			if (!(read_c0_guestctl2() & (irq << 14)))
301  				clear_c0_guestctl2(irq);
302  		} else {
303  			clear_gc0_cause(irq);
304  		}
305  		break;
306  
307  	case MIPS_EXC_INT_IO_1:
308  	case MIPS_EXC_INT_IO_2:
309  	case MIPS_EXC_INT_IPI_1:
310  	case MIPS_EXC_INT_IPI_2:
311  		/* Clear GuestCtl2.VIP irq if not using Hardware Clear */
312  		if (cpu_has_guestctl2) {
313  			if (!(read_c0_guestctl2() & (irq << 14)))
314  				clear_c0_guestctl2(irq);
315  		} else {
316  			clear_gc0_cause(irq);
317  		}
318  		break;
319  
320  	default:
321  		break;
322  	}
323  
324  	clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
325  	return 1;
326  }
327  
328  /*
329   * VZ guest timer handling.
330   */
331  
332  /**
333   * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
334   * @vcpu:	Virtual CPU.
335   *
336   * Returns:	true if the VZ GTOffset & real guest CP0_Count should be used
337   *		instead of software emulation of guest timer.
338   *		false otherwise.
339   */
kvm_vz_should_use_htimer(struct kvm_vcpu * vcpu)340  static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
341  {
342  	if (kvm_mips_count_disabled(vcpu))
343  		return false;
344  
345  	/* Chosen frequency must match real frequency */
346  	if (mips_hpt_frequency != vcpu->arch.count_hz)
347  		return false;
348  
349  	/* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
350  	if (current_cpu_data.gtoffset_mask != 0xffffffff)
351  		return false;
352  
353  	return true;
354  }
355  
356  /**
357   * _kvm_vz_restore_stimer() - Restore soft timer state.
358   * @vcpu:	Virtual CPU.
359   * @compare:	CP0_Compare register value, restored by caller.
360   * @cause:	CP0_Cause register to restore.
361   *
362   * Restore VZ state relating to the soft timer. The hard timer can be enabled
363   * later.
364   */
_kvm_vz_restore_stimer(struct kvm_vcpu * vcpu,u32 compare,u32 cause)365  static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
366  				   u32 cause)
367  {
368  	/*
369  	 * Avoid spurious counter interrupts by setting Guest CP0_Count to just
370  	 * after Guest CP0_Compare.
371  	 */
372  	write_c0_gtoffset(compare - read_c0_count());
373  
374  	back_to_back_c0_hazard();
375  	write_gc0_cause(cause);
376  }
377  
378  /**
379   * _kvm_vz_restore_htimer() - Restore hard timer state.
380   * @vcpu:	Virtual CPU.
381   * @compare:	CP0_Compare register value, restored by caller.
382   * @cause:	CP0_Cause register to restore.
383   *
384   * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
385   * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
386   */
_kvm_vz_restore_htimer(struct kvm_vcpu * vcpu,u32 compare,u32 cause)387  static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
388  				   u32 compare, u32 cause)
389  {
390  	u32 start_count, after_count;
391  	unsigned long flags;
392  
393  	/*
394  	 * Freeze the soft-timer and sync the guest CP0_Count with it. We do
395  	 * this with interrupts disabled to avoid latency.
396  	 */
397  	local_irq_save(flags);
398  	kvm_mips_freeze_hrtimer(vcpu, &start_count);
399  	write_c0_gtoffset(start_count - read_c0_count());
400  	local_irq_restore(flags);
401  
402  	/* restore guest CP0_Cause, as TI may already be set */
403  	back_to_back_c0_hazard();
404  	write_gc0_cause(cause);
405  
406  	/*
407  	 * The above sequence isn't atomic and would result in lost timer
408  	 * interrupts if we're not careful. Detect if a timer interrupt is due
409  	 * and assert it.
410  	 */
411  	back_to_back_c0_hazard();
412  	after_count = read_gc0_count();
413  	if (after_count - start_count > compare - start_count - 1)
414  		kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
415  }
416  
417  /**
418   * kvm_vz_restore_timer() - Restore timer state.
419   * @vcpu:	Virtual CPU.
420   *
421   * Restore soft timer state from saved context.
422   */
kvm_vz_restore_timer(struct kvm_vcpu * vcpu)423  static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
424  {
425  	struct mips_coproc *cop0 = &vcpu->arch.cop0;
426  	u32 cause, compare;
427  
428  	compare = kvm_read_sw_gc0_compare(cop0);
429  	cause = kvm_read_sw_gc0_cause(cop0);
430  
431  	write_gc0_compare(compare);
432  	_kvm_vz_restore_stimer(vcpu, compare, cause);
433  }
434  
435  /**
436   * kvm_vz_acquire_htimer() - Switch to hard timer state.
437   * @vcpu:	Virtual CPU.
438   *
439   * Restore hard timer state on top of existing soft timer state if possible.
440   *
441   * Since hard timer won't remain active over preemption, preemption should be
442   * disabled by the caller.
443   */
kvm_vz_acquire_htimer(struct kvm_vcpu * vcpu)444  void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
445  {
446  	u32 gctl0;
447  
448  	gctl0 = read_c0_guestctl0();
449  	if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
450  		/* enable guest access to hard timer */
451  		write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
452  
453  		_kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
454  				       read_gc0_cause());
455  	}
456  }
457  
458  /**
459   * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
460   * @vcpu:	Virtual CPU.
461   * @out_compare: Pointer to write compare value to.
462   * @out_cause:	Pointer to write cause value to.
463   *
464   * Save VZ guest timer state and switch to software emulation of guest CP0
465   * timer. The hard timer must already be in use, so preemption should be
466   * disabled.
467   */
_kvm_vz_save_htimer(struct kvm_vcpu * vcpu,u32 * out_compare,u32 * out_cause)468  static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
469  				u32 *out_compare, u32 *out_cause)
470  {
471  	u32 cause, compare, before_count, end_count;
472  	ktime_t before_time;
473  
474  	compare = read_gc0_compare();
475  	*out_compare = compare;
476  
477  	before_time = ktime_get();
478  
479  	/*
480  	 * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
481  	 * at which no pending timer interrupt is missing.
482  	 */
483  	before_count = read_gc0_count();
484  	back_to_back_c0_hazard();
485  	cause = read_gc0_cause();
486  	*out_cause = cause;
487  
488  	/*
489  	 * Record a final CP0_Count which we will transfer to the soft-timer.
490  	 * This is recorded *after* saving CP0_Cause, so we don't get any timer
491  	 * interrupts from just after the final CP0_Count point.
492  	 */
493  	back_to_back_c0_hazard();
494  	end_count = read_gc0_count();
495  
496  	/*
497  	 * The above sequence isn't atomic, so we could miss a timer interrupt
498  	 * between reading CP0_Cause and end_count. Detect and record any timer
499  	 * interrupt due between before_count and end_count.
500  	 */
501  	if (end_count - before_count > compare - before_count - 1)
502  		kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
503  
504  	/*
505  	 * Restore soft-timer, ignoring a small amount of negative drift due to
506  	 * delay between freeze_hrtimer and setting CP0_GTOffset.
507  	 */
508  	kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
509  }
510  
511  /**
512   * kvm_vz_save_timer() - Save guest timer state.
513   * @vcpu:	Virtual CPU.
514   *
515   * Save VZ guest timer state and switch to soft guest timer if hard timer was in
516   * use.
517   */
kvm_vz_save_timer(struct kvm_vcpu * vcpu)518  static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
519  {
520  	struct mips_coproc *cop0 = &vcpu->arch.cop0;
521  	u32 gctl0, compare, cause;
522  
523  	gctl0 = read_c0_guestctl0();
524  	if (gctl0 & MIPS_GCTL0_GT) {
525  		/* disable guest use of hard timer */
526  		write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
527  
528  		/* save hard timer state */
529  		_kvm_vz_save_htimer(vcpu, &compare, &cause);
530  	} else {
531  		compare = read_gc0_compare();
532  		cause = read_gc0_cause();
533  	}
534  
535  	/* save timer-related state to VCPU context */
536  	kvm_write_sw_gc0_cause(cop0, cause);
537  	kvm_write_sw_gc0_compare(cop0, compare);
538  }
539  
540  /**
541   * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
542   * @vcpu:	Virtual CPU.
543   *
544   * Transfers the state of the hard guest timer to the soft guest timer, leaving
545   * guest state intact so it can continue to be used with the soft timer.
546   */
kvm_vz_lose_htimer(struct kvm_vcpu * vcpu)547  void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
548  {
549  	u32 gctl0, compare, cause;
550  
551  	preempt_disable();
552  	gctl0 = read_c0_guestctl0();
553  	if (gctl0 & MIPS_GCTL0_GT) {
554  		/* disable guest use of timer */
555  		write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
556  
557  		/* switch to soft timer */
558  		_kvm_vz_save_htimer(vcpu, &compare, &cause);
559  
560  		/* leave soft timer in usable state */
561  		_kvm_vz_restore_stimer(vcpu, compare, cause);
562  	}
563  	preempt_enable();
564  }
565  
566  /**
567   * is_eva_access() - Find whether an instruction is an EVA memory accessor.
568   * @inst:	32-bit instruction encoding.
569   *
570   * Finds whether @inst encodes an EVA memory access instruction, which would
571   * indicate that emulation of it should access the user mode address space
572   * instead of the kernel mode address space. This matters for MUSUK segments
573   * which are TLB mapped for user mode but unmapped for kernel mode.
574   *
575   * Returns:	Whether @inst encodes an EVA accessor instruction.
576   */
is_eva_access(union mips_instruction inst)577  static bool is_eva_access(union mips_instruction inst)
578  {
579  	if (inst.spec3_format.opcode != spec3_op)
580  		return false;
581  
582  	switch (inst.spec3_format.func) {
583  	case lwle_op:
584  	case lwre_op:
585  	case cachee_op:
586  	case sbe_op:
587  	case she_op:
588  	case sce_op:
589  	case swe_op:
590  	case swle_op:
591  	case swre_op:
592  	case prefe_op:
593  	case lbue_op:
594  	case lhue_op:
595  	case lbe_op:
596  	case lhe_op:
597  	case lle_op:
598  	case lwe_op:
599  		return true;
600  	default:
601  		return false;
602  	}
603  }
604  
605  /**
606   * is_eva_am_mapped() - Find whether an access mode is mapped.
607   * @vcpu:	KVM VCPU state.
608   * @am:		3-bit encoded access mode.
609   * @eu:		Segment becomes unmapped and uncached when Status.ERL=1.
610   *
611   * Decode @am to find whether it encodes a mapped segment for the current VCPU
612   * state. Where necessary @eu and the actual instruction causing the fault are
613   * taken into account to make the decision.
614   *
615   * Returns:	Whether the VCPU faulted on a TLB mapped address.
616   */
is_eva_am_mapped(struct kvm_vcpu * vcpu,unsigned int am,bool eu)617  static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
618  {
619  	u32 am_lookup;
620  	int err;
621  
622  	/*
623  	 * Interpret access control mode. We assume address errors will already
624  	 * have been caught by the guest, leaving us with:
625  	 *      AM      UM  SM  KM  31..24 23..16
626  	 * UK    0 000          Unm   0      0
627  	 * MK    1 001          TLB   1
628  	 * MSK   2 010      TLB TLB   1
629  	 * MUSK  3 011  TLB TLB TLB   1
630  	 * MUSUK 4 100  TLB TLB Unm   0      1
631  	 * USK   5 101      Unm Unm   0      0
632  	 * -     6 110                0      0
633  	 * UUSK  7 111  Unm Unm Unm   0      0
634  	 *
635  	 * We shift a magic value by AM across the sign bit to find if always
636  	 * TLB mapped, and if not shift by 8 again to find if it depends on KM.
637  	 */
638  	am_lookup = 0x70080000 << am;
639  	if ((s32)am_lookup < 0) {
640  		/*
641  		 * MK, MSK, MUSK
642  		 * Always TLB mapped, unless SegCtl.EU && ERL
643  		 */
644  		if (!eu || !(read_gc0_status() & ST0_ERL))
645  			return true;
646  	} else {
647  		am_lookup <<= 8;
648  		if ((s32)am_lookup < 0) {
649  			union mips_instruction inst;
650  			unsigned int status;
651  			u32 *opc;
652  
653  			/*
654  			 * MUSUK
655  			 * TLB mapped if not in kernel mode
656  			 */
657  			status = read_gc0_status();
658  			if (!(status & (ST0_EXL | ST0_ERL)) &&
659  			    (status & ST0_KSU))
660  				return true;
661  			/*
662  			 * EVA access instructions in kernel
663  			 * mode access user address space.
664  			 */
665  			opc = (u32 *)vcpu->arch.pc;
666  			if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
667  				opc += 1;
668  			err = kvm_get_badinstr(opc, vcpu, &inst.word);
669  			if (!err && is_eva_access(inst))
670  				return true;
671  		}
672  	}
673  
674  	return false;
675  }
676  
677  /**
678   * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
679   * @vcpu:	KVM VCPU state.
680   * @gva:	Guest virtual address to convert.
681   * @gpa:	Output guest physical address.
682   *
683   * Convert a guest virtual address (GVA) which is valid according to the guest
684   * context, to a guest physical address (GPA).
685   *
686   * Returns:	0 on success.
687   *		-errno on failure.
688   */
kvm_vz_gva_to_gpa(struct kvm_vcpu * vcpu,unsigned long gva,unsigned long * gpa)689  static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
690  			     unsigned long *gpa)
691  {
692  	u32 gva32 = gva;
693  	unsigned long segctl;
694  
695  	if ((long)gva == (s32)gva32) {
696  		/* Handle canonical 32-bit virtual address */
697  		if (cpu_guest_has_segments) {
698  			unsigned long mask, pa;
699  
700  			switch (gva32 >> 29) {
701  			case 0:
702  			case 1: /* CFG5 (1GB) */
703  				segctl = read_gc0_segctl2() >> 16;
704  				mask = (unsigned long)0xfc0000000ull;
705  				break;
706  			case 2:
707  			case 3: /* CFG4 (1GB) */
708  				segctl = read_gc0_segctl2();
709  				mask = (unsigned long)0xfc0000000ull;
710  				break;
711  			case 4: /* CFG3 (512MB) */
712  				segctl = read_gc0_segctl1() >> 16;
713  				mask = (unsigned long)0xfe0000000ull;
714  				break;
715  			case 5: /* CFG2 (512MB) */
716  				segctl = read_gc0_segctl1();
717  				mask = (unsigned long)0xfe0000000ull;
718  				break;
719  			case 6: /* CFG1 (512MB) */
720  				segctl = read_gc0_segctl0() >> 16;
721  				mask = (unsigned long)0xfe0000000ull;
722  				break;
723  			case 7: /* CFG0 (512MB) */
724  				segctl = read_gc0_segctl0();
725  				mask = (unsigned long)0xfe0000000ull;
726  				break;
727  			default:
728  				/*
729  				 * GCC 4.9 isn't smart enough to figure out that
730  				 * segctl and mask are always initialised.
731  				 */
732  				unreachable();
733  			}
734  
735  			if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
736  					     segctl & 0x0008))
737  				goto tlb_mapped;
738  
739  			/* Unmapped, find guest physical address */
740  			pa = (segctl << 20) & mask;
741  			pa |= gva32 & ~mask;
742  			*gpa = pa;
743  			return 0;
744  		} else if ((s32)gva32 < (s32)0xc0000000) {
745  			/* legacy unmapped KSeg0 or KSeg1 */
746  			*gpa = gva32 & 0x1fffffff;
747  			return 0;
748  		}
749  #ifdef CONFIG_64BIT
750  	} else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
751  		/* XKPHYS */
752  		if (cpu_guest_has_segments) {
753  			/*
754  			 * Each of the 8 regions can be overridden by SegCtl2.XR
755  			 * to use SegCtl1.XAM.
756  			 */
757  			segctl = read_gc0_segctl2();
758  			if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
759  				segctl = read_gc0_segctl1();
760  				if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
761  						     0))
762  					goto tlb_mapped;
763  			}
764  
765  		}
766  		/*
767  		 * Traditionally fully unmapped.
768  		 * Bits 61:59 specify the CCA, which we can just mask off here.
769  		 * Bits 58:PABITS should be zero, but we shouldn't have got here
770  		 * if it wasn't.
771  		 */
772  		*gpa = gva & 0x07ffffffffffffff;
773  		return 0;
774  #endif
775  	}
776  
777  tlb_mapped:
778  	return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
779  }
780  
781  /**
782   * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
783   * @vcpu:	KVM VCPU state.
784   * @badvaddr:	Root BadVAddr.
785   * @gpa:	Output guest physical address.
786   *
787   * VZ implementations are permitted to report guest virtual addresses (GVA) in
788   * BadVAddr on a root exception during guest execution, instead of the more
789   * convenient guest physical addresses (GPA). When we get a GVA, this function
790   * converts it to a GPA, taking into account guest segmentation and guest TLB
791   * state.
792   *
793   * Returns:	0 on success.
794   *		-errno on failure.
795   */
kvm_vz_badvaddr_to_gpa(struct kvm_vcpu * vcpu,unsigned long badvaddr,unsigned long * gpa)796  static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
797  				  unsigned long *gpa)
798  {
799  	unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
800  				 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
801  
802  	/* If BadVAddr is GPA, then all is well in the world */
803  	if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
804  		*gpa = badvaddr;
805  		return 0;
806  	}
807  
808  	/* Otherwise we'd expect it to be GVA ... */
809  	if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
810  		 "Unexpected gexccode %#x\n", gexccode))
811  		return -EINVAL;
812  
813  	/* ... and we need to perform the GVA->GPA translation in software */
814  	return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
815  }
816  
kvm_trap_vz_no_handler(struct kvm_vcpu * vcpu)817  static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
818  {
819  	u32 *opc = (u32 *) vcpu->arch.pc;
820  	u32 cause = vcpu->arch.host_cp0_cause;
821  	u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
822  	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
823  	u32 inst = 0;
824  
825  	/*
826  	 *  Fetch the instruction.
827  	 */
828  	if (cause & CAUSEF_BD)
829  		opc += 1;
830  	kvm_get_badinstr(opc, vcpu, &inst);
831  
832  	kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
833  		exccode, opc, inst, badvaddr,
834  		read_gc0_status());
835  	kvm_arch_vcpu_dump_regs(vcpu);
836  	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
837  	return RESUME_HOST;
838  }
839  
mips_process_maar(unsigned int op,unsigned long val)840  static unsigned long mips_process_maar(unsigned int op, unsigned long val)
841  {
842  	/* Mask off unused bits */
843  	unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
844  
845  	if (read_gc0_pagegrain() & PG_ELPA)
846  		mask |= 0x00ffffff00000000ull;
847  	if (cpu_guest_has_mvh)
848  		mask |= MIPS_MAAR_VH;
849  
850  	/* Set or clear VH */
851  	if (op == mtc_op) {
852  		/* clear VH */
853  		val &= ~MIPS_MAAR_VH;
854  	} else if (op == dmtc_op) {
855  		/* set VH to match VL */
856  		val &= ~MIPS_MAAR_VH;
857  		if (val & MIPS_MAAR_VL)
858  			val |= MIPS_MAAR_VH;
859  	}
860  
861  	return val & mask;
862  }
863  
kvm_write_maari(struct kvm_vcpu * vcpu,unsigned long val)864  static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
865  {
866  	struct mips_coproc *cop0 = &vcpu->arch.cop0;
867  
868  	val &= MIPS_MAARI_INDEX;
869  	if (val == MIPS_MAARI_INDEX)
870  		kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
871  	else if (val < ARRAY_SIZE(vcpu->arch.maar))
872  		kvm_write_sw_gc0_maari(cop0, val);
873  }
874  
kvm_vz_gpsi_cop0(union mips_instruction inst,u32 * opc,u32 cause,struct kvm_vcpu * vcpu)875  static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
876  					      u32 *opc, u32 cause,
877  					      struct kvm_vcpu *vcpu)
878  {
879  	struct mips_coproc *cop0 = &vcpu->arch.cop0;
880  	enum emulation_result er = EMULATE_DONE;
881  	u32 rt, rd, sel;
882  	unsigned long curr_pc;
883  	unsigned long val;
884  
885  	/*
886  	 * Update PC and hold onto current PC in case there is
887  	 * an error and we want to rollback the PC
888  	 */
889  	curr_pc = vcpu->arch.pc;
890  	er = update_pc(vcpu, cause);
891  	if (er == EMULATE_FAIL)
892  		return er;
893  
894  	if (inst.co_format.co) {
895  		switch (inst.co_format.func) {
896  		case wait_op:
897  			er = kvm_mips_emul_wait(vcpu);
898  			break;
899  		default:
900  			er = EMULATE_FAIL;
901  		}
902  	} else {
903  		rt = inst.c0r_format.rt;
904  		rd = inst.c0r_format.rd;
905  		sel = inst.c0r_format.sel;
906  
907  		switch (inst.c0r_format.rs) {
908  		case dmfc_op:
909  		case mfc_op:
910  #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
911  			cop0->stat[rd][sel]++;
912  #endif
913  			if (rd == MIPS_CP0_COUNT &&
914  			    sel == 0) {			/* Count */
915  				val = kvm_mips_read_count(vcpu);
916  			} else if (rd == MIPS_CP0_COMPARE &&
917  				   sel == 0) {		/* Compare */
918  				val = read_gc0_compare();
919  			} else if (rd == MIPS_CP0_LLADDR &&
920  				   sel == 0) {		/* LLAddr */
921  				if (cpu_guest_has_rw_llb)
922  					val = read_gc0_lladdr() &
923  						MIPS_LLADDR_LLB;
924  				else
925  					val = 0;
926  			} else if (rd == MIPS_CP0_LLADDR &&
927  				   sel == 1 &&		/* MAAR */
928  				   cpu_guest_has_maar &&
929  				   !cpu_guest_has_dyn_maar) {
930  				/* MAARI must be in range */
931  				BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
932  						ARRAY_SIZE(vcpu->arch.maar));
933  				val = vcpu->arch.maar[
934  					kvm_read_sw_gc0_maari(cop0)];
935  			} else if ((rd == MIPS_CP0_PRID &&
936  				    (sel == 0 ||	/* PRid */
937  				     sel == 2 ||	/* CDMMBase */
938  				     sel == 3)) ||	/* CMGCRBase */
939  				   (rd == MIPS_CP0_STATUS &&
940  				    (sel == 2 ||	/* SRSCtl */
941  				     sel == 3)) ||	/* SRSMap */
942  				   (rd == MIPS_CP0_CONFIG &&
943  				    (sel == 6 ||	/* Config6 */
944  				     sel == 7)) ||	/* Config7 */
945  				   (rd == MIPS_CP0_LLADDR &&
946  				    (sel == 2) &&	/* MAARI */
947  				    cpu_guest_has_maar &&
948  				    !cpu_guest_has_dyn_maar) ||
949  				   (rd == MIPS_CP0_ERRCTL &&
950  				    (sel == 0))) {	/* ErrCtl */
951  				val = cop0->reg[rd][sel];
952  #ifdef CONFIG_CPU_LOONGSON64
953  			} else if (rd == MIPS_CP0_DIAG &&
954  				   (sel == 0)) {	/* Diag */
955  				val = cop0->reg[rd][sel];
956  #endif
957  			} else {
958  				val = 0;
959  				er = EMULATE_FAIL;
960  			}
961  
962  			if (er != EMULATE_FAIL) {
963  				/* Sign extend */
964  				if (inst.c0r_format.rs == mfc_op)
965  					val = (int)val;
966  				vcpu->arch.gprs[rt] = val;
967  			}
968  
969  			trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
970  					KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
971  				      KVM_TRACE_COP0(rd, sel), val);
972  			break;
973  
974  		case dmtc_op:
975  		case mtc_op:
976  #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
977  			cop0->stat[rd][sel]++;
978  #endif
979  			val = vcpu->arch.gprs[rt];
980  			trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
981  					KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
982  				      KVM_TRACE_COP0(rd, sel), val);
983  
984  			if (rd == MIPS_CP0_COUNT &&
985  			    sel == 0) {			/* Count */
986  				kvm_vz_lose_htimer(vcpu);
987  				kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
988  			} else if (rd == MIPS_CP0_COMPARE &&
989  				   sel == 0) {		/* Compare */
990  				kvm_mips_write_compare(vcpu,
991  						       vcpu->arch.gprs[rt],
992  						       true);
993  			} else if (rd == MIPS_CP0_LLADDR &&
994  				   sel == 0) {		/* LLAddr */
995  				/*
996  				 * P5600 generates GPSI on guest MTC0 LLAddr.
997  				 * Only allow the guest to clear LLB.
998  				 */
999  				if (cpu_guest_has_rw_llb &&
1000  				    !(val & MIPS_LLADDR_LLB))
1001  					write_gc0_lladdr(0);
1002  			} else if (rd == MIPS_CP0_LLADDR &&
1003  				   sel == 1 &&		/* MAAR */
1004  				   cpu_guest_has_maar &&
1005  				   !cpu_guest_has_dyn_maar) {
1006  				val = mips_process_maar(inst.c0r_format.rs,
1007  							val);
1008  
1009  				/* MAARI must be in range */
1010  				BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
1011  						ARRAY_SIZE(vcpu->arch.maar));
1012  				vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
1013  									val;
1014  			} else if (rd == MIPS_CP0_LLADDR &&
1015  				   (sel == 2) &&	/* MAARI */
1016  				   cpu_guest_has_maar &&
1017  				   !cpu_guest_has_dyn_maar) {
1018  				kvm_write_maari(vcpu, val);
1019  			} else if (rd == MIPS_CP0_CONFIG &&
1020  				   (sel == 6)) {
1021  				cop0->reg[rd][sel] = (int)val;
1022  			} else if (rd == MIPS_CP0_ERRCTL &&
1023  				   (sel == 0)) {	/* ErrCtl */
1024  				/* ignore the written value */
1025  #ifdef CONFIG_CPU_LOONGSON64
1026  			} else if (rd == MIPS_CP0_DIAG &&
1027  				   (sel == 0)) {	/* Diag */
1028  				unsigned long flags;
1029  
1030  				local_irq_save(flags);
1031  				if (val & LOONGSON_DIAG_BTB) {
1032  					/* Flush BTB */
1033  					set_c0_diag(LOONGSON_DIAG_BTB);
1034  				}
1035  				if (val & LOONGSON_DIAG_ITLB) {
1036  					/* Flush ITLB */
1037  					set_c0_diag(LOONGSON_DIAG_ITLB);
1038  				}
1039  				if (val & LOONGSON_DIAG_DTLB) {
1040  					/* Flush DTLB */
1041  					set_c0_diag(LOONGSON_DIAG_DTLB);
1042  				}
1043  				if (val & LOONGSON_DIAG_VTLB) {
1044  					/* Flush VTLB */
1045  					kvm_loongson_clear_guest_vtlb();
1046  				}
1047  				if (val & LOONGSON_DIAG_FTLB) {
1048  					/* Flush FTLB */
1049  					kvm_loongson_clear_guest_ftlb();
1050  				}
1051  				local_irq_restore(flags);
1052  #endif
1053  			} else {
1054  				er = EMULATE_FAIL;
1055  			}
1056  			break;
1057  
1058  		default:
1059  			er = EMULATE_FAIL;
1060  			break;
1061  		}
1062  	}
1063  	/* Rollback PC only if emulation was unsuccessful */
1064  	if (er == EMULATE_FAIL) {
1065  		kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
1066  			curr_pc, __func__, inst.word);
1067  
1068  		vcpu->arch.pc = curr_pc;
1069  	}
1070  
1071  	return er;
1072  }
1073  
kvm_vz_gpsi_cache(union mips_instruction inst,u32 * opc,u32 cause,struct kvm_vcpu * vcpu)1074  static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
1075  					       u32 *opc, u32 cause,
1076  					       struct kvm_vcpu *vcpu)
1077  {
1078  	enum emulation_result er = EMULATE_DONE;
1079  	u32 cache, op_inst, op, base;
1080  	s16 offset;
1081  	struct kvm_vcpu_arch *arch = &vcpu->arch;
1082  	unsigned long va, curr_pc;
1083  
1084  	/*
1085  	 * Update PC and hold onto current PC in case there is
1086  	 * an error and we want to rollback the PC
1087  	 */
1088  	curr_pc = vcpu->arch.pc;
1089  	er = update_pc(vcpu, cause);
1090  	if (er == EMULATE_FAIL)
1091  		return er;
1092  
1093  	base = inst.i_format.rs;
1094  	op_inst = inst.i_format.rt;
1095  	if (cpu_has_mips_r6)
1096  		offset = inst.spec3_format.simmediate;
1097  	else
1098  		offset = inst.i_format.simmediate;
1099  	cache = op_inst & CacheOp_Cache;
1100  	op = op_inst & CacheOp_Op;
1101  
1102  	va = arch->gprs[base] + offset;
1103  
1104  	kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1105  		  cache, op, base, arch->gprs[base], offset);
1106  
1107  	/* Secondary or tirtiary cache ops ignored */
1108  	if (cache != Cache_I && cache != Cache_D)
1109  		return EMULATE_DONE;
1110  
1111  	switch (op_inst) {
1112  	case Index_Invalidate_I:
1113  		flush_icache_line_indexed(va);
1114  		return EMULATE_DONE;
1115  	case Index_Writeback_Inv_D:
1116  		flush_dcache_line_indexed(va);
1117  		return EMULATE_DONE;
1118  	case Hit_Invalidate_I:
1119  	case Hit_Invalidate_D:
1120  	case Hit_Writeback_Inv_D:
1121  		if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
1122  			/* We can just flush entire icache */
1123  			local_flush_icache_range(0, 0);
1124  			return EMULATE_DONE;
1125  		}
1126  
1127  		/* So far, other platforms support guest hit cache ops */
1128  		break;
1129  	default:
1130  		break;
1131  	}
1132  
1133  	kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1134  		curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
1135  		offset);
1136  	/* Rollback PC */
1137  	vcpu->arch.pc = curr_pc;
1138  
1139  	return EMULATE_FAIL;
1140  }
1141  
1142  #ifdef CONFIG_CPU_LOONGSON64
kvm_vz_gpsi_lwc2(union mips_instruction inst,u32 * opc,u32 cause,struct kvm_vcpu * vcpu)1143  static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
1144  					      u32 *opc, u32 cause,
1145  					      struct kvm_vcpu *vcpu)
1146  {
1147  	unsigned int rs, rd;
1148  	unsigned int hostcfg;
1149  	unsigned long curr_pc;
1150  	enum emulation_result er = EMULATE_DONE;
1151  
1152  	/*
1153  	 * Update PC and hold onto current PC in case there is
1154  	 * an error and we want to rollback the PC
1155  	 */
1156  	curr_pc = vcpu->arch.pc;
1157  	er = update_pc(vcpu, cause);
1158  	if (er == EMULATE_FAIL)
1159  		return er;
1160  
1161  	rs = inst.loongson3_lscsr_format.rs;
1162  	rd = inst.loongson3_lscsr_format.rd;
1163  	switch (inst.loongson3_lscsr_format.fr) {
1164  	case 0x8:  /* Read CPUCFG */
1165  		++vcpu->stat.vz_cpucfg_exits;
1166  		hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
1167  
1168  		switch (vcpu->arch.gprs[rs]) {
1169  		case LOONGSON_CFG0:
1170  			vcpu->arch.gprs[rd] = 0x14c000;
1171  			break;
1172  		case LOONGSON_CFG1:
1173  			hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
1174  				    LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
1175  				    LOONGSON_CFG1_SFBP);
1176  			vcpu->arch.gprs[rd] = hostcfg;
1177  			break;
1178  		case LOONGSON_CFG2:
1179  			hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
1180  				    LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
1181  			vcpu->arch.gprs[rd] = hostcfg;
1182  			break;
1183  		case LOONGSON_CFG3:
1184  			vcpu->arch.gprs[rd] = hostcfg;
1185  			break;
1186  		default:
1187  			/* Don't export any other advanced features to guest */
1188  			vcpu->arch.gprs[rd] = 0;
1189  			break;
1190  		}
1191  		break;
1192  
1193  	default:
1194  		kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
1195  			inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
1196  		er = EMULATE_FAIL;
1197  		break;
1198  	}
1199  
1200  	/* Rollback PC only if emulation was unsuccessful */
1201  	if (er == EMULATE_FAIL) {
1202  		kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
1203  			curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
1204  
1205  		vcpu->arch.pc = curr_pc;
1206  	}
1207  
1208  	return er;
1209  }
1210  #endif
1211  
kvm_trap_vz_handle_gpsi(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)1212  static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
1213  						     struct kvm_vcpu *vcpu)
1214  {
1215  	enum emulation_result er = EMULATE_DONE;
1216  	struct kvm_vcpu_arch *arch = &vcpu->arch;
1217  	union mips_instruction inst;
1218  	int rd, rt, sel;
1219  	int err;
1220  
1221  	/*
1222  	 *  Fetch the instruction.
1223  	 */
1224  	if (cause & CAUSEF_BD)
1225  		opc += 1;
1226  	err = kvm_get_badinstr(opc, vcpu, &inst.word);
1227  	if (err)
1228  		return EMULATE_FAIL;
1229  
1230  	switch (inst.r_format.opcode) {
1231  	case cop0_op:
1232  		er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
1233  		break;
1234  #ifndef CONFIG_CPU_MIPSR6
1235  	case cache_op:
1236  		trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1237  		er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1238  		break;
1239  #endif
1240  #ifdef CONFIG_CPU_LOONGSON64
1241  	case lwc2_op:
1242  		er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
1243  		break;
1244  #endif
1245  	case spec3_op:
1246  		switch (inst.spec3_format.func) {
1247  #ifdef CONFIG_CPU_MIPSR6
1248  		case cache6_op:
1249  			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1250  			er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1251  			break;
1252  #endif
1253  		case rdhwr_op:
1254  			if (inst.r_format.rs || (inst.r_format.re >> 3))
1255  				goto unknown;
1256  
1257  			rd = inst.r_format.rd;
1258  			rt = inst.r_format.rt;
1259  			sel = inst.r_format.re & 0x7;
1260  
1261  			switch (rd) {
1262  			case MIPS_HWR_CC:	/* Read count register */
1263  				arch->gprs[rt] =
1264  					(long)(int)kvm_mips_read_count(vcpu);
1265  				break;
1266  			default:
1267  				trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1268  					      KVM_TRACE_HWR(rd, sel), 0);
1269  				goto unknown;
1270  			}
1271  
1272  			trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1273  				      KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1274  
1275  			er = update_pc(vcpu, cause);
1276  			break;
1277  		default:
1278  			goto unknown;
1279  		}
1280  		break;
1281  unknown:
1282  
1283  	default:
1284  		kvm_err("GPSI exception not supported (%p/%#x)\n",
1285  				opc, inst.word);
1286  		kvm_arch_vcpu_dump_regs(vcpu);
1287  		er = EMULATE_FAIL;
1288  		break;
1289  	}
1290  
1291  	return er;
1292  }
1293  
kvm_trap_vz_handle_gsfc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)1294  static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
1295  						     struct kvm_vcpu *vcpu)
1296  {
1297  	enum emulation_result er = EMULATE_DONE;
1298  	struct kvm_vcpu_arch *arch = &vcpu->arch;
1299  	union mips_instruction inst;
1300  	int err;
1301  
1302  	/*
1303  	 *  Fetch the instruction.
1304  	 */
1305  	if (cause & CAUSEF_BD)
1306  		opc += 1;
1307  	err = kvm_get_badinstr(opc, vcpu, &inst.word);
1308  	if (err)
1309  		return EMULATE_FAIL;
1310  
1311  	/* complete MTC0 on behalf of guest and advance EPC */
1312  	if (inst.c0r_format.opcode == cop0_op &&
1313  	    inst.c0r_format.rs == mtc_op &&
1314  	    inst.c0r_format.z == 0) {
1315  		int rt = inst.c0r_format.rt;
1316  		int rd = inst.c0r_format.rd;
1317  		int sel = inst.c0r_format.sel;
1318  		unsigned int val = arch->gprs[rt];
1319  		unsigned int old_val, change;
1320  
1321  		trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
1322  			      val);
1323  
1324  		if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1325  			/* FR bit should read as zero if no FPU */
1326  			if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1327  				val &= ~(ST0_CU1 | ST0_FR);
1328  
1329  			/*
1330  			 * Also don't allow FR to be set if host doesn't support
1331  			 * it.
1332  			 */
1333  			if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
1334  				val &= ~ST0_FR;
1335  
1336  			old_val = read_gc0_status();
1337  			change = val ^ old_val;
1338  
1339  			if (change & ST0_FR) {
1340  				/*
1341  				 * FPU and Vector register state is made
1342  				 * UNPREDICTABLE by a change of FR, so don't
1343  				 * even bother saving it.
1344  				 */
1345  				kvm_drop_fpu(vcpu);
1346  			}
1347  
1348  			/*
1349  			 * If MSA state is already live, it is undefined how it
1350  			 * interacts with FR=0 FPU state, and we don't want to
1351  			 * hit reserved instruction exceptions trying to save
1352  			 * the MSA state later when CU=1 && FR=1, so play it
1353  			 * safe and save it first.
1354  			 */
1355  			if (change & ST0_CU1 && !(val & ST0_FR) &&
1356  			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1357  				kvm_lose_fpu(vcpu);
1358  
1359  			write_gc0_status(val);
1360  		} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1361  			u32 old_cause = read_gc0_cause();
1362  			u32 change = old_cause ^ val;
1363  
1364  			/* DC bit enabling/disabling timer? */
1365  			if (change & CAUSEF_DC) {
1366  				if (val & CAUSEF_DC) {
1367  					kvm_vz_lose_htimer(vcpu);
1368  					kvm_mips_count_disable_cause(vcpu);
1369  				} else {
1370  					kvm_mips_count_enable_cause(vcpu);
1371  				}
1372  			}
1373  
1374  			/* Only certain bits are RW to the guest */
1375  			change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1376  				   CAUSEF_IP0 | CAUSEF_IP1);
1377  
1378  			/* WP can only be cleared */
1379  			change &= ~CAUSEF_WP | old_cause;
1380  
1381  			write_gc0_cause(old_cause ^ change);
1382  		} else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
1383  			write_gc0_intctl(val);
1384  		} else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1385  			old_val = read_gc0_config5();
1386  			change = val ^ old_val;
1387  			/* Handle changes in FPU/MSA modes */
1388  			preempt_disable();
1389  
1390  			/*
1391  			 * Propagate FRE changes immediately if the FPU
1392  			 * context is already loaded.
1393  			 */
1394  			if (change & MIPS_CONF5_FRE &&
1395  			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1396  				change_c0_config5(MIPS_CONF5_FRE, val);
1397  
1398  			preempt_enable();
1399  
1400  			val = old_val ^
1401  				(change & kvm_vz_config5_guest_wrmask(vcpu));
1402  			write_gc0_config5(val);
1403  		} else {
1404  			kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1405  			    opc, inst.word);
1406  			er = EMULATE_FAIL;
1407  		}
1408  
1409  		if (er != EMULATE_FAIL)
1410  			er = update_pc(vcpu, cause);
1411  	} else {
1412  		kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1413  			opc, inst.word);
1414  		er = EMULATE_FAIL;
1415  	}
1416  
1417  	return er;
1418  }
1419  
kvm_trap_vz_handle_ghfc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)1420  static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
1421  						     struct kvm_vcpu *vcpu)
1422  {
1423  	/*
1424  	 * Presumably this is due to MC (guest mode change), so lets trace some
1425  	 * relevant info.
1426  	 */
1427  	trace_kvm_guest_mode_change(vcpu);
1428  
1429  	return EMULATE_DONE;
1430  }
1431  
kvm_trap_vz_handle_hc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)1432  static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1433  						   struct kvm_vcpu *vcpu)
1434  {
1435  	enum emulation_result er;
1436  	union mips_instruction inst;
1437  	unsigned long curr_pc;
1438  	int err;
1439  
1440  	if (cause & CAUSEF_BD)
1441  		opc += 1;
1442  	err = kvm_get_badinstr(opc, vcpu, &inst.word);
1443  	if (err)
1444  		return EMULATE_FAIL;
1445  
1446  	/*
1447  	 * Update PC and hold onto current PC in case there is
1448  	 * an error and we want to rollback the PC
1449  	 */
1450  	curr_pc = vcpu->arch.pc;
1451  	er = update_pc(vcpu, cause);
1452  	if (er == EMULATE_FAIL)
1453  		return er;
1454  
1455  	er = kvm_mips_emul_hypcall(vcpu, inst);
1456  	if (er == EMULATE_FAIL)
1457  		vcpu->arch.pc = curr_pc;
1458  
1459  	return er;
1460  }
1461  
kvm_trap_vz_no_handler_guest_exit(u32 gexccode,u32 cause,u32 * opc,struct kvm_vcpu * vcpu)1462  static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1463  							u32 cause,
1464  							u32 *opc,
1465  							struct kvm_vcpu *vcpu)
1466  {
1467  	u32 inst;
1468  
1469  	/*
1470  	 *  Fetch the instruction.
1471  	 */
1472  	if (cause & CAUSEF_BD)
1473  		opc += 1;
1474  	kvm_get_badinstr(opc, vcpu, &inst);
1475  
1476  	kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x  Status: %#x\n",
1477  		gexccode, opc, inst, read_gc0_status());
1478  
1479  	return EMULATE_FAIL;
1480  }
1481  
kvm_trap_vz_handle_guest_exit(struct kvm_vcpu * vcpu)1482  static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1483  {
1484  	u32 *opc = (u32 *) vcpu->arch.pc;
1485  	u32 cause = vcpu->arch.host_cp0_cause;
1486  	enum emulation_result er = EMULATE_DONE;
1487  	u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1488  			MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1489  	int ret = RESUME_GUEST;
1490  
1491  	trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1492  	switch (gexccode) {
1493  	case MIPS_GCTL0_GEXC_GPSI:
1494  		++vcpu->stat.vz_gpsi_exits;
1495  		er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1496  		break;
1497  	case MIPS_GCTL0_GEXC_GSFC:
1498  		++vcpu->stat.vz_gsfc_exits;
1499  		er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1500  		break;
1501  	case MIPS_GCTL0_GEXC_HC:
1502  		++vcpu->stat.vz_hc_exits;
1503  		er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1504  		break;
1505  	case MIPS_GCTL0_GEXC_GRR:
1506  		++vcpu->stat.vz_grr_exits;
1507  		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1508  						       vcpu);
1509  		break;
1510  	case MIPS_GCTL0_GEXC_GVA:
1511  		++vcpu->stat.vz_gva_exits;
1512  		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1513  						       vcpu);
1514  		break;
1515  	case MIPS_GCTL0_GEXC_GHFC:
1516  		++vcpu->stat.vz_ghfc_exits;
1517  		er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
1518  		break;
1519  	case MIPS_GCTL0_GEXC_GPA:
1520  		++vcpu->stat.vz_gpa_exits;
1521  		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1522  						       vcpu);
1523  		break;
1524  	default:
1525  		++vcpu->stat.vz_resvd_exits;
1526  		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1527  						       vcpu);
1528  		break;
1529  
1530  	}
1531  
1532  	if (er == EMULATE_DONE) {
1533  		ret = RESUME_GUEST;
1534  	} else if (er == EMULATE_HYPERCALL) {
1535  		ret = kvm_mips_handle_hypcall(vcpu);
1536  	} else {
1537  		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1538  		ret = RESUME_HOST;
1539  	}
1540  	return ret;
1541  }
1542  
1543  /**
1544   * kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor.
1545   * @vcpu:	Virtual CPU context.
1546   *
1547   * Handle when the guest attempts to use a coprocessor which hasn't been allowed
1548   * by the root context.
1549   *
1550   * Return: value indicating whether to resume the host or the guest
1551   * 	   (RESUME_HOST or RESUME_GUEST)
1552   */
kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu * vcpu)1553  static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1554  {
1555  	u32 cause = vcpu->arch.host_cp0_cause;
1556  	enum emulation_result er = EMULATE_FAIL;
1557  	int ret = RESUME_GUEST;
1558  
1559  	if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1560  		/*
1561  		 * If guest FPU not present, the FPU operation should have been
1562  		 * treated as a reserved instruction!
1563  		 * If FPU already in use, we shouldn't get this at all.
1564  		 */
1565  		if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1566  			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1567  			preempt_enable();
1568  			return EMULATE_FAIL;
1569  		}
1570  
1571  		kvm_own_fpu(vcpu);
1572  		er = EMULATE_DONE;
1573  	}
1574  	/* other coprocessors not handled */
1575  
1576  	switch (er) {
1577  	case EMULATE_DONE:
1578  		ret = RESUME_GUEST;
1579  		break;
1580  
1581  	case EMULATE_FAIL:
1582  		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1583  		ret = RESUME_HOST;
1584  		break;
1585  
1586  	default:
1587  		BUG();
1588  	}
1589  	return ret;
1590  }
1591  
1592  /**
1593   * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1594   * @vcpu:	Virtual CPU context.
1595   *
1596   * Handle when the guest attempts to use MSA when it is disabled in the root
1597   * context.
1598   *
1599   * Return: value indicating whether to resume the host or the guest
1600   * 	   (RESUME_HOST or RESUME_GUEST)
1601   */
kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu * vcpu)1602  static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1603  {
1604  	/*
1605  	 * If MSA not present or not exposed to guest or FR=0, the MSA operation
1606  	 * should have been treated as a reserved instruction!
1607  	 * Same if CU1=1, FR=0.
1608  	 * If MSA already in use, we shouldn't get this at all.
1609  	 */
1610  	if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1611  	    (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1612  	    !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1613  	    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1614  		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1615  		return RESUME_HOST;
1616  	}
1617  
1618  	kvm_own_msa(vcpu);
1619  
1620  	return RESUME_GUEST;
1621  }
1622  
kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu * vcpu)1623  static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1624  {
1625  	struct kvm_run *run = vcpu->run;
1626  	u32 *opc = (u32 *) vcpu->arch.pc;
1627  	u32 cause = vcpu->arch.host_cp0_cause;
1628  	ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1629  	union mips_instruction inst;
1630  	enum emulation_result er = EMULATE_DONE;
1631  	int err, ret = RESUME_GUEST;
1632  
1633  	if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1634  		/* A code fetch fault doesn't count as an MMIO */
1635  		if (kvm_is_ifetch_fault(&vcpu->arch)) {
1636  			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1637  			return RESUME_HOST;
1638  		}
1639  
1640  		/* Fetch the instruction */
1641  		if (cause & CAUSEF_BD)
1642  			opc += 1;
1643  		err = kvm_get_badinstr(opc, vcpu, &inst.word);
1644  		if (err) {
1645  			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1646  			return RESUME_HOST;
1647  		}
1648  
1649  		/* Treat as MMIO */
1650  		er = kvm_mips_emulate_load(inst, cause, vcpu);
1651  		if (er == EMULATE_FAIL) {
1652  			kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1653  				opc, badvaddr);
1654  			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1655  		}
1656  	}
1657  
1658  	if (er == EMULATE_DONE) {
1659  		ret = RESUME_GUEST;
1660  	} else if (er == EMULATE_DO_MMIO) {
1661  		run->exit_reason = KVM_EXIT_MMIO;
1662  		ret = RESUME_HOST;
1663  	} else {
1664  		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1665  		ret = RESUME_HOST;
1666  	}
1667  	return ret;
1668  }
1669  
kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu * vcpu)1670  static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1671  {
1672  	struct kvm_run *run = vcpu->run;
1673  	u32 *opc = (u32 *) vcpu->arch.pc;
1674  	u32 cause = vcpu->arch.host_cp0_cause;
1675  	ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1676  	union mips_instruction inst;
1677  	enum emulation_result er = EMULATE_DONE;
1678  	int err;
1679  	int ret = RESUME_GUEST;
1680  
1681  	/* Just try the access again if we couldn't do the translation */
1682  	if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1683  		return RESUME_GUEST;
1684  	vcpu->arch.host_cp0_badvaddr = badvaddr;
1685  
1686  	if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1687  		/* Fetch the instruction */
1688  		if (cause & CAUSEF_BD)
1689  			opc += 1;
1690  		err = kvm_get_badinstr(opc, vcpu, &inst.word);
1691  		if (err) {
1692  			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1693  			return RESUME_HOST;
1694  		}
1695  
1696  		/* Treat as MMIO */
1697  		er = kvm_mips_emulate_store(inst, cause, vcpu);
1698  		if (er == EMULATE_FAIL) {
1699  			kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1700  				opc, badvaddr);
1701  			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1702  		}
1703  	}
1704  
1705  	if (er == EMULATE_DONE) {
1706  		ret = RESUME_GUEST;
1707  	} else if (er == EMULATE_DO_MMIO) {
1708  		run->exit_reason = KVM_EXIT_MMIO;
1709  		ret = RESUME_HOST;
1710  	} else {
1711  		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1712  		ret = RESUME_HOST;
1713  	}
1714  	return ret;
1715  }
1716  
1717  static u64 kvm_vz_get_one_regs[] = {
1718  	KVM_REG_MIPS_CP0_INDEX,
1719  	KVM_REG_MIPS_CP0_ENTRYLO0,
1720  	KVM_REG_MIPS_CP0_ENTRYLO1,
1721  	KVM_REG_MIPS_CP0_CONTEXT,
1722  	KVM_REG_MIPS_CP0_PAGEMASK,
1723  	KVM_REG_MIPS_CP0_PAGEGRAIN,
1724  	KVM_REG_MIPS_CP0_WIRED,
1725  	KVM_REG_MIPS_CP0_HWRENA,
1726  	KVM_REG_MIPS_CP0_BADVADDR,
1727  	KVM_REG_MIPS_CP0_COUNT,
1728  	KVM_REG_MIPS_CP0_ENTRYHI,
1729  	KVM_REG_MIPS_CP0_COMPARE,
1730  	KVM_REG_MIPS_CP0_STATUS,
1731  	KVM_REG_MIPS_CP0_INTCTL,
1732  	KVM_REG_MIPS_CP0_CAUSE,
1733  	KVM_REG_MIPS_CP0_EPC,
1734  	KVM_REG_MIPS_CP0_PRID,
1735  	KVM_REG_MIPS_CP0_EBASE,
1736  	KVM_REG_MIPS_CP0_CONFIG,
1737  	KVM_REG_MIPS_CP0_CONFIG1,
1738  	KVM_REG_MIPS_CP0_CONFIG2,
1739  	KVM_REG_MIPS_CP0_CONFIG3,
1740  	KVM_REG_MIPS_CP0_CONFIG4,
1741  	KVM_REG_MIPS_CP0_CONFIG5,
1742  	KVM_REG_MIPS_CP0_CONFIG6,
1743  #ifdef CONFIG_64BIT
1744  	KVM_REG_MIPS_CP0_XCONTEXT,
1745  #endif
1746  	KVM_REG_MIPS_CP0_ERROREPC,
1747  
1748  	KVM_REG_MIPS_COUNT_CTL,
1749  	KVM_REG_MIPS_COUNT_RESUME,
1750  	KVM_REG_MIPS_COUNT_HZ,
1751  };
1752  
1753  static u64 kvm_vz_get_one_regs_contextconfig[] = {
1754  	KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1755  #ifdef CONFIG_64BIT
1756  	KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1757  #endif
1758  };
1759  
1760  static u64 kvm_vz_get_one_regs_segments[] = {
1761  	KVM_REG_MIPS_CP0_SEGCTL0,
1762  	KVM_REG_MIPS_CP0_SEGCTL1,
1763  	KVM_REG_MIPS_CP0_SEGCTL2,
1764  };
1765  
1766  static u64 kvm_vz_get_one_regs_htw[] = {
1767  	KVM_REG_MIPS_CP0_PWBASE,
1768  	KVM_REG_MIPS_CP0_PWFIELD,
1769  	KVM_REG_MIPS_CP0_PWSIZE,
1770  	KVM_REG_MIPS_CP0_PWCTL,
1771  };
1772  
1773  static u64 kvm_vz_get_one_regs_kscratch[] = {
1774  	KVM_REG_MIPS_CP0_KSCRATCH1,
1775  	KVM_REG_MIPS_CP0_KSCRATCH2,
1776  	KVM_REG_MIPS_CP0_KSCRATCH3,
1777  	KVM_REG_MIPS_CP0_KSCRATCH4,
1778  	KVM_REG_MIPS_CP0_KSCRATCH5,
1779  	KVM_REG_MIPS_CP0_KSCRATCH6,
1780  };
1781  
kvm_vz_num_regs(struct kvm_vcpu * vcpu)1782  static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1783  {
1784  	unsigned long ret;
1785  
1786  	ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1787  	if (cpu_guest_has_userlocal)
1788  		++ret;
1789  	if (cpu_guest_has_badinstr)
1790  		++ret;
1791  	if (cpu_guest_has_badinstrp)
1792  		++ret;
1793  	if (cpu_guest_has_contextconfig)
1794  		ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1795  	if (cpu_guest_has_segments)
1796  		ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1797  	if (cpu_guest_has_htw || cpu_guest_has_ldpte)
1798  		ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1799  	if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
1800  		ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1801  	ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1802  
1803  	return ret;
1804  }
1805  
kvm_vz_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * indices)1806  static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1807  {
1808  	u64 index;
1809  	unsigned int i;
1810  
1811  	if (copy_to_user(indices, kvm_vz_get_one_regs,
1812  			 sizeof(kvm_vz_get_one_regs)))
1813  		return -EFAULT;
1814  	indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1815  
1816  	if (cpu_guest_has_userlocal) {
1817  		index = KVM_REG_MIPS_CP0_USERLOCAL;
1818  		if (copy_to_user(indices, &index, sizeof(index)))
1819  			return -EFAULT;
1820  		++indices;
1821  	}
1822  	if (cpu_guest_has_badinstr) {
1823  		index = KVM_REG_MIPS_CP0_BADINSTR;
1824  		if (copy_to_user(indices, &index, sizeof(index)))
1825  			return -EFAULT;
1826  		++indices;
1827  	}
1828  	if (cpu_guest_has_badinstrp) {
1829  		index = KVM_REG_MIPS_CP0_BADINSTRP;
1830  		if (copy_to_user(indices, &index, sizeof(index)))
1831  			return -EFAULT;
1832  		++indices;
1833  	}
1834  	if (cpu_guest_has_contextconfig) {
1835  		if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1836  				 sizeof(kvm_vz_get_one_regs_contextconfig)))
1837  			return -EFAULT;
1838  		indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1839  	}
1840  	if (cpu_guest_has_segments) {
1841  		if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1842  				 sizeof(kvm_vz_get_one_regs_segments)))
1843  			return -EFAULT;
1844  		indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1845  	}
1846  	if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
1847  		if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1848  				 sizeof(kvm_vz_get_one_regs_htw)))
1849  			return -EFAULT;
1850  		indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1851  	}
1852  	if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
1853  		for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1854  			index = KVM_REG_MIPS_CP0_MAAR(i);
1855  			if (copy_to_user(indices, &index, sizeof(index)))
1856  				return -EFAULT;
1857  			++indices;
1858  		}
1859  
1860  		index = KVM_REG_MIPS_CP0_MAARI;
1861  		if (copy_to_user(indices, &index, sizeof(index)))
1862  			return -EFAULT;
1863  		++indices;
1864  	}
1865  	for (i = 0; i < 6; ++i) {
1866  		if (!cpu_guest_has_kscr(i + 2))
1867  			continue;
1868  
1869  		if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1870  				 sizeof(kvm_vz_get_one_regs_kscratch[i])))
1871  			return -EFAULT;
1872  		++indices;
1873  	}
1874  
1875  	return 0;
1876  }
1877  
entrylo_kvm_to_user(unsigned long v)1878  static inline s64 entrylo_kvm_to_user(unsigned long v)
1879  {
1880  	s64 mask, ret = v;
1881  
1882  	if (BITS_PER_LONG == 32) {
1883  		/*
1884  		 * KVM API exposes 64-bit version of the register, so move the
1885  		 * RI/XI bits up into place.
1886  		 */
1887  		mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1888  		ret &= ~mask;
1889  		ret |= ((s64)v & mask) << 32;
1890  	}
1891  	return ret;
1892  }
1893  
entrylo_user_to_kvm(s64 v)1894  static inline unsigned long entrylo_user_to_kvm(s64 v)
1895  {
1896  	unsigned long mask, ret = v;
1897  
1898  	if (BITS_PER_LONG == 32) {
1899  		/*
1900  		 * KVM API exposes 64-bit versiono of the register, so move the
1901  		 * RI/XI bits down into place.
1902  		 */
1903  		mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1904  		ret &= ~mask;
1905  		ret |= (v >> 32) & mask;
1906  	}
1907  	return ret;
1908  }
1909  
kvm_vz_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,s64 * v)1910  static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1911  			      const struct kvm_one_reg *reg,
1912  			      s64 *v)
1913  {
1914  	struct mips_coproc *cop0 = &vcpu->arch.cop0;
1915  	unsigned int idx;
1916  
1917  	switch (reg->id) {
1918  	case KVM_REG_MIPS_CP0_INDEX:
1919  		*v = (long)read_gc0_index();
1920  		break;
1921  	case KVM_REG_MIPS_CP0_ENTRYLO0:
1922  		*v = entrylo_kvm_to_user(read_gc0_entrylo0());
1923  		break;
1924  	case KVM_REG_MIPS_CP0_ENTRYLO1:
1925  		*v = entrylo_kvm_to_user(read_gc0_entrylo1());
1926  		break;
1927  	case KVM_REG_MIPS_CP0_CONTEXT:
1928  		*v = (long)read_gc0_context();
1929  		break;
1930  	case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1931  		if (!cpu_guest_has_contextconfig)
1932  			return -EINVAL;
1933  		*v = read_gc0_contextconfig();
1934  		break;
1935  	case KVM_REG_MIPS_CP0_USERLOCAL:
1936  		if (!cpu_guest_has_userlocal)
1937  			return -EINVAL;
1938  		*v = read_gc0_userlocal();
1939  		break;
1940  #ifdef CONFIG_64BIT
1941  	case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1942  		if (!cpu_guest_has_contextconfig)
1943  			return -EINVAL;
1944  		*v = read_gc0_xcontextconfig();
1945  		break;
1946  #endif
1947  	case KVM_REG_MIPS_CP0_PAGEMASK:
1948  		*v = (long)read_gc0_pagemask();
1949  		break;
1950  	case KVM_REG_MIPS_CP0_PAGEGRAIN:
1951  		*v = (long)read_gc0_pagegrain();
1952  		break;
1953  	case KVM_REG_MIPS_CP0_SEGCTL0:
1954  		if (!cpu_guest_has_segments)
1955  			return -EINVAL;
1956  		*v = read_gc0_segctl0();
1957  		break;
1958  	case KVM_REG_MIPS_CP0_SEGCTL1:
1959  		if (!cpu_guest_has_segments)
1960  			return -EINVAL;
1961  		*v = read_gc0_segctl1();
1962  		break;
1963  	case KVM_REG_MIPS_CP0_SEGCTL2:
1964  		if (!cpu_guest_has_segments)
1965  			return -EINVAL;
1966  		*v = read_gc0_segctl2();
1967  		break;
1968  	case KVM_REG_MIPS_CP0_PWBASE:
1969  		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1970  			return -EINVAL;
1971  		*v = read_gc0_pwbase();
1972  		break;
1973  	case KVM_REG_MIPS_CP0_PWFIELD:
1974  		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1975  			return -EINVAL;
1976  		*v = read_gc0_pwfield();
1977  		break;
1978  	case KVM_REG_MIPS_CP0_PWSIZE:
1979  		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1980  			return -EINVAL;
1981  		*v = read_gc0_pwsize();
1982  		break;
1983  	case KVM_REG_MIPS_CP0_WIRED:
1984  		*v = (long)read_gc0_wired();
1985  		break;
1986  	case KVM_REG_MIPS_CP0_PWCTL:
1987  		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1988  			return -EINVAL;
1989  		*v = read_gc0_pwctl();
1990  		break;
1991  	case KVM_REG_MIPS_CP0_HWRENA:
1992  		*v = (long)read_gc0_hwrena();
1993  		break;
1994  	case KVM_REG_MIPS_CP0_BADVADDR:
1995  		*v = (long)read_gc0_badvaddr();
1996  		break;
1997  	case KVM_REG_MIPS_CP0_BADINSTR:
1998  		if (!cpu_guest_has_badinstr)
1999  			return -EINVAL;
2000  		*v = read_gc0_badinstr();
2001  		break;
2002  	case KVM_REG_MIPS_CP0_BADINSTRP:
2003  		if (!cpu_guest_has_badinstrp)
2004  			return -EINVAL;
2005  		*v = read_gc0_badinstrp();
2006  		break;
2007  	case KVM_REG_MIPS_CP0_COUNT:
2008  		*v = kvm_mips_read_count(vcpu);
2009  		break;
2010  	case KVM_REG_MIPS_CP0_ENTRYHI:
2011  		*v = (long)read_gc0_entryhi();
2012  		break;
2013  	case KVM_REG_MIPS_CP0_COMPARE:
2014  		*v = (long)read_gc0_compare();
2015  		break;
2016  	case KVM_REG_MIPS_CP0_STATUS:
2017  		*v = (long)read_gc0_status();
2018  		break;
2019  	case KVM_REG_MIPS_CP0_INTCTL:
2020  		*v = read_gc0_intctl();
2021  		break;
2022  	case KVM_REG_MIPS_CP0_CAUSE:
2023  		*v = (long)read_gc0_cause();
2024  		break;
2025  	case KVM_REG_MIPS_CP0_EPC:
2026  		*v = (long)read_gc0_epc();
2027  		break;
2028  	case KVM_REG_MIPS_CP0_PRID:
2029  		switch (boot_cpu_type()) {
2030  		case CPU_CAVIUM_OCTEON3:
2031  			/* Octeon III has a read-only guest.PRid */
2032  			*v = read_gc0_prid();
2033  			break;
2034  		default:
2035  			*v = (long)kvm_read_c0_guest_prid(cop0);
2036  			break;
2037  		}
2038  		break;
2039  	case KVM_REG_MIPS_CP0_EBASE:
2040  		*v = kvm_vz_read_gc0_ebase();
2041  		break;
2042  	case KVM_REG_MIPS_CP0_CONFIG:
2043  		*v = read_gc0_config();
2044  		break;
2045  	case KVM_REG_MIPS_CP0_CONFIG1:
2046  		if (!cpu_guest_has_conf1)
2047  			return -EINVAL;
2048  		*v = read_gc0_config1();
2049  		break;
2050  	case KVM_REG_MIPS_CP0_CONFIG2:
2051  		if (!cpu_guest_has_conf2)
2052  			return -EINVAL;
2053  		*v = read_gc0_config2();
2054  		break;
2055  	case KVM_REG_MIPS_CP0_CONFIG3:
2056  		if (!cpu_guest_has_conf3)
2057  			return -EINVAL;
2058  		*v = read_gc0_config3();
2059  		break;
2060  	case KVM_REG_MIPS_CP0_CONFIG4:
2061  		if (!cpu_guest_has_conf4)
2062  			return -EINVAL;
2063  		*v = read_gc0_config4();
2064  		break;
2065  	case KVM_REG_MIPS_CP0_CONFIG5:
2066  		if (!cpu_guest_has_conf5)
2067  			return -EINVAL;
2068  		*v = read_gc0_config5();
2069  		break;
2070  	case KVM_REG_MIPS_CP0_CONFIG6:
2071  		*v = kvm_read_sw_gc0_config6(cop0);
2072  		break;
2073  	case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2074  		if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2075  			return -EINVAL;
2076  		idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2077  		if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2078  			return -EINVAL;
2079  		*v = vcpu->arch.maar[idx];
2080  		break;
2081  	case KVM_REG_MIPS_CP0_MAARI:
2082  		if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2083  			return -EINVAL;
2084  		*v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0);
2085  		break;
2086  #ifdef CONFIG_64BIT
2087  	case KVM_REG_MIPS_CP0_XCONTEXT:
2088  		*v = read_gc0_xcontext();
2089  		break;
2090  #endif
2091  	case KVM_REG_MIPS_CP0_ERROREPC:
2092  		*v = (long)read_gc0_errorepc();
2093  		break;
2094  	case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2095  		idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2096  		if (!cpu_guest_has_kscr(idx))
2097  			return -EINVAL;
2098  		switch (idx) {
2099  		case 2:
2100  			*v = (long)read_gc0_kscratch1();
2101  			break;
2102  		case 3:
2103  			*v = (long)read_gc0_kscratch2();
2104  			break;
2105  		case 4:
2106  			*v = (long)read_gc0_kscratch3();
2107  			break;
2108  		case 5:
2109  			*v = (long)read_gc0_kscratch4();
2110  			break;
2111  		case 6:
2112  			*v = (long)read_gc0_kscratch5();
2113  			break;
2114  		case 7:
2115  			*v = (long)read_gc0_kscratch6();
2116  			break;
2117  		}
2118  		break;
2119  	case KVM_REG_MIPS_COUNT_CTL:
2120  		*v = vcpu->arch.count_ctl;
2121  		break;
2122  	case KVM_REG_MIPS_COUNT_RESUME:
2123  		*v = ktime_to_ns(vcpu->arch.count_resume);
2124  		break;
2125  	case KVM_REG_MIPS_COUNT_HZ:
2126  		*v = vcpu->arch.count_hz;
2127  		break;
2128  	default:
2129  		return -EINVAL;
2130  	}
2131  	return 0;
2132  }
2133  
kvm_vz_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,s64 v)2134  static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
2135  			      const struct kvm_one_reg *reg,
2136  			      s64 v)
2137  {
2138  	struct mips_coproc *cop0 = &vcpu->arch.cop0;
2139  	unsigned int idx;
2140  	int ret = 0;
2141  	unsigned int cur, change;
2142  
2143  	switch (reg->id) {
2144  	case KVM_REG_MIPS_CP0_INDEX:
2145  		write_gc0_index(v);
2146  		break;
2147  	case KVM_REG_MIPS_CP0_ENTRYLO0:
2148  		write_gc0_entrylo0(entrylo_user_to_kvm(v));
2149  		break;
2150  	case KVM_REG_MIPS_CP0_ENTRYLO1:
2151  		write_gc0_entrylo1(entrylo_user_to_kvm(v));
2152  		break;
2153  	case KVM_REG_MIPS_CP0_CONTEXT:
2154  		write_gc0_context(v);
2155  		break;
2156  	case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
2157  		if (!cpu_guest_has_contextconfig)
2158  			return -EINVAL;
2159  		write_gc0_contextconfig(v);
2160  		break;
2161  	case KVM_REG_MIPS_CP0_USERLOCAL:
2162  		if (!cpu_guest_has_userlocal)
2163  			return -EINVAL;
2164  		write_gc0_userlocal(v);
2165  		break;
2166  #ifdef CONFIG_64BIT
2167  	case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
2168  		if (!cpu_guest_has_contextconfig)
2169  			return -EINVAL;
2170  		write_gc0_xcontextconfig(v);
2171  		break;
2172  #endif
2173  	case KVM_REG_MIPS_CP0_PAGEMASK:
2174  		write_gc0_pagemask(v);
2175  		break;
2176  	case KVM_REG_MIPS_CP0_PAGEGRAIN:
2177  		write_gc0_pagegrain(v);
2178  		break;
2179  	case KVM_REG_MIPS_CP0_SEGCTL0:
2180  		if (!cpu_guest_has_segments)
2181  			return -EINVAL;
2182  		write_gc0_segctl0(v);
2183  		break;
2184  	case KVM_REG_MIPS_CP0_SEGCTL1:
2185  		if (!cpu_guest_has_segments)
2186  			return -EINVAL;
2187  		write_gc0_segctl1(v);
2188  		break;
2189  	case KVM_REG_MIPS_CP0_SEGCTL2:
2190  		if (!cpu_guest_has_segments)
2191  			return -EINVAL;
2192  		write_gc0_segctl2(v);
2193  		break;
2194  	case KVM_REG_MIPS_CP0_PWBASE:
2195  		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2196  			return -EINVAL;
2197  		write_gc0_pwbase(v);
2198  		break;
2199  	case KVM_REG_MIPS_CP0_PWFIELD:
2200  		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2201  			return -EINVAL;
2202  		write_gc0_pwfield(v);
2203  		break;
2204  	case KVM_REG_MIPS_CP0_PWSIZE:
2205  		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2206  			return -EINVAL;
2207  		write_gc0_pwsize(v);
2208  		break;
2209  	case KVM_REG_MIPS_CP0_WIRED:
2210  		change_gc0_wired(MIPSR6_WIRED_WIRED, v);
2211  		break;
2212  	case KVM_REG_MIPS_CP0_PWCTL:
2213  		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2214  			return -EINVAL;
2215  		write_gc0_pwctl(v);
2216  		break;
2217  	case KVM_REG_MIPS_CP0_HWRENA:
2218  		write_gc0_hwrena(v);
2219  		break;
2220  	case KVM_REG_MIPS_CP0_BADVADDR:
2221  		write_gc0_badvaddr(v);
2222  		break;
2223  	case KVM_REG_MIPS_CP0_BADINSTR:
2224  		if (!cpu_guest_has_badinstr)
2225  			return -EINVAL;
2226  		write_gc0_badinstr(v);
2227  		break;
2228  	case KVM_REG_MIPS_CP0_BADINSTRP:
2229  		if (!cpu_guest_has_badinstrp)
2230  			return -EINVAL;
2231  		write_gc0_badinstrp(v);
2232  		break;
2233  	case KVM_REG_MIPS_CP0_COUNT:
2234  		kvm_mips_write_count(vcpu, v);
2235  		break;
2236  	case KVM_REG_MIPS_CP0_ENTRYHI:
2237  		write_gc0_entryhi(v);
2238  		break;
2239  	case KVM_REG_MIPS_CP0_COMPARE:
2240  		kvm_mips_write_compare(vcpu, v, false);
2241  		break;
2242  	case KVM_REG_MIPS_CP0_STATUS:
2243  		write_gc0_status(v);
2244  		break;
2245  	case KVM_REG_MIPS_CP0_INTCTL:
2246  		write_gc0_intctl(v);
2247  		break;
2248  	case KVM_REG_MIPS_CP0_CAUSE:
2249  		/*
2250  		 * If the timer is stopped or started (DC bit) it must look
2251  		 * atomic with changes to the timer interrupt pending bit (TI).
2252  		 * A timer interrupt should not happen in between.
2253  		 */
2254  		if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
2255  			if (v & CAUSEF_DC) {
2256  				/* disable timer first */
2257  				kvm_mips_count_disable_cause(vcpu);
2258  				change_gc0_cause((u32)~CAUSEF_DC, v);
2259  			} else {
2260  				/* enable timer last */
2261  				change_gc0_cause((u32)~CAUSEF_DC, v);
2262  				kvm_mips_count_enable_cause(vcpu);
2263  			}
2264  		} else {
2265  			write_gc0_cause(v);
2266  		}
2267  		break;
2268  	case KVM_REG_MIPS_CP0_EPC:
2269  		write_gc0_epc(v);
2270  		break;
2271  	case KVM_REG_MIPS_CP0_PRID:
2272  		switch (boot_cpu_type()) {
2273  		case CPU_CAVIUM_OCTEON3:
2274  			/* Octeon III has a guest.PRid, but its read-only */
2275  			break;
2276  		default:
2277  			kvm_write_c0_guest_prid(cop0, v);
2278  			break;
2279  		}
2280  		break;
2281  	case KVM_REG_MIPS_CP0_EBASE:
2282  		kvm_vz_write_gc0_ebase(v);
2283  		break;
2284  	case KVM_REG_MIPS_CP0_CONFIG:
2285  		cur = read_gc0_config();
2286  		change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
2287  		if (change) {
2288  			v = cur ^ change;
2289  			write_gc0_config(v);
2290  		}
2291  		break;
2292  	case KVM_REG_MIPS_CP0_CONFIG1:
2293  		if (!cpu_guest_has_conf1)
2294  			break;
2295  		cur = read_gc0_config1();
2296  		change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
2297  		if (change) {
2298  			v = cur ^ change;
2299  			write_gc0_config1(v);
2300  		}
2301  		break;
2302  	case KVM_REG_MIPS_CP0_CONFIG2:
2303  		if (!cpu_guest_has_conf2)
2304  			break;
2305  		cur = read_gc0_config2();
2306  		change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
2307  		if (change) {
2308  			v = cur ^ change;
2309  			write_gc0_config2(v);
2310  		}
2311  		break;
2312  	case KVM_REG_MIPS_CP0_CONFIG3:
2313  		if (!cpu_guest_has_conf3)
2314  			break;
2315  		cur = read_gc0_config3();
2316  		change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
2317  		if (change) {
2318  			v = cur ^ change;
2319  			write_gc0_config3(v);
2320  		}
2321  		break;
2322  	case KVM_REG_MIPS_CP0_CONFIG4:
2323  		if (!cpu_guest_has_conf4)
2324  			break;
2325  		cur = read_gc0_config4();
2326  		change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
2327  		if (change) {
2328  			v = cur ^ change;
2329  			write_gc0_config4(v);
2330  		}
2331  		break;
2332  	case KVM_REG_MIPS_CP0_CONFIG5:
2333  		if (!cpu_guest_has_conf5)
2334  			break;
2335  		cur = read_gc0_config5();
2336  		change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
2337  		if (change) {
2338  			v = cur ^ change;
2339  			write_gc0_config5(v);
2340  		}
2341  		break;
2342  	case KVM_REG_MIPS_CP0_CONFIG6:
2343  		cur = kvm_read_sw_gc0_config6(cop0);
2344  		change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
2345  		if (change) {
2346  			v = cur ^ change;
2347  			kvm_write_sw_gc0_config6(cop0, (int)v);
2348  		}
2349  		break;
2350  	case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2351  		if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2352  			return -EINVAL;
2353  		idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2354  		if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2355  			return -EINVAL;
2356  		vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2357  		break;
2358  	case KVM_REG_MIPS_CP0_MAARI:
2359  		if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2360  			return -EINVAL;
2361  		kvm_write_maari(vcpu, v);
2362  		break;
2363  #ifdef CONFIG_64BIT
2364  	case KVM_REG_MIPS_CP0_XCONTEXT:
2365  		write_gc0_xcontext(v);
2366  		break;
2367  #endif
2368  	case KVM_REG_MIPS_CP0_ERROREPC:
2369  		write_gc0_errorepc(v);
2370  		break;
2371  	case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2372  		idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2373  		if (!cpu_guest_has_kscr(idx))
2374  			return -EINVAL;
2375  		switch (idx) {
2376  		case 2:
2377  			write_gc0_kscratch1(v);
2378  			break;
2379  		case 3:
2380  			write_gc0_kscratch2(v);
2381  			break;
2382  		case 4:
2383  			write_gc0_kscratch3(v);
2384  			break;
2385  		case 5:
2386  			write_gc0_kscratch4(v);
2387  			break;
2388  		case 6:
2389  			write_gc0_kscratch5(v);
2390  			break;
2391  		case 7:
2392  			write_gc0_kscratch6(v);
2393  			break;
2394  		}
2395  		break;
2396  	case KVM_REG_MIPS_COUNT_CTL:
2397  		ret = kvm_mips_set_count_ctl(vcpu, v);
2398  		break;
2399  	case KVM_REG_MIPS_COUNT_RESUME:
2400  		ret = kvm_mips_set_count_resume(vcpu, v);
2401  		break;
2402  	case KVM_REG_MIPS_COUNT_HZ:
2403  		ret = kvm_mips_set_count_hz(vcpu, v);
2404  		break;
2405  	default:
2406  		return -EINVAL;
2407  	}
2408  	return ret;
2409  }
2410  
2411  #define guestid_cache(cpu)	(cpu_data[cpu].guestid_cache)
kvm_vz_get_new_guestid(unsigned long cpu,struct kvm_vcpu * vcpu)2412  static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
2413  {
2414  	unsigned long guestid = guestid_cache(cpu);
2415  
2416  	if (!(++guestid & GUESTID_MASK)) {
2417  		if (cpu_has_vtag_icache)
2418  			flush_icache_all();
2419  
2420  		if (!guestid)		/* fix version if needed */
2421  			guestid = GUESTID_FIRST_VERSION;
2422  
2423  		++guestid;		/* guestid 0 reserved for root */
2424  
2425  		/* start new guestid cycle */
2426  		kvm_vz_local_flush_roottlb_all_guests();
2427  		kvm_vz_local_flush_guesttlb_all();
2428  	}
2429  
2430  	guestid_cache(cpu) = guestid;
2431  }
2432  
2433  /* Returns 1 if the guest TLB may be clobbered */
kvm_vz_check_requests(struct kvm_vcpu * vcpu,int cpu)2434  static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2435  {
2436  	int ret = 0;
2437  	int i;
2438  
2439  	if (!kvm_request_pending(vcpu))
2440  		return 0;
2441  
2442  	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2443  		if (cpu_has_guestid) {
2444  			/* Drop all GuestIDs for this VCPU */
2445  			for_each_possible_cpu(i)
2446  				vcpu->arch.vzguestid[i] = 0;
2447  			/* This will clobber guest TLB contents too */
2448  			ret = 1;
2449  		}
2450  		/*
2451  		 * For Root ASID Dealias (RAD) we don't do anything here, but we
2452  		 * still need the request to ensure we recheck asid_flush_mask.
2453  		 * We can still return 0 as only the root TLB will be affected
2454  		 * by a root ASID flush.
2455  		 */
2456  	}
2457  
2458  	return ret;
2459  }
2460  
kvm_vz_vcpu_save_wired(struct kvm_vcpu * vcpu)2461  static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2462  {
2463  	unsigned int wired = read_gc0_wired();
2464  	struct kvm_mips_tlb *tlbs;
2465  	int i;
2466  
2467  	/* Expand the wired TLB array if necessary */
2468  	wired &= MIPSR6_WIRED_WIRED;
2469  	if (wired > vcpu->arch.wired_tlb_limit) {
2470  		tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2471  				sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2472  		if (WARN_ON(!tlbs)) {
2473  			/* Save whatever we can */
2474  			wired = vcpu->arch.wired_tlb_limit;
2475  		} else {
2476  			vcpu->arch.wired_tlb = tlbs;
2477  			vcpu->arch.wired_tlb_limit = wired;
2478  		}
2479  	}
2480  
2481  	if (wired)
2482  		/* Save wired entries from the guest TLB */
2483  		kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2484  	/* Invalidate any dropped entries since last time */
2485  	for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2486  		vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2487  		vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2488  		vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2489  		vcpu->arch.wired_tlb[i].tlb_mask = 0;
2490  	}
2491  	vcpu->arch.wired_tlb_used = wired;
2492  }
2493  
kvm_vz_vcpu_load_wired(struct kvm_vcpu * vcpu)2494  static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2495  {
2496  	/* Load wired entries into the guest TLB */
2497  	if (vcpu->arch.wired_tlb)
2498  		kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2499  				     vcpu->arch.wired_tlb_used);
2500  }
2501  
kvm_vz_vcpu_load_tlb(struct kvm_vcpu * vcpu,int cpu)2502  static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2503  {
2504  	struct kvm *kvm = vcpu->kvm;
2505  	struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2506  	bool migrated;
2507  
2508  	/*
2509  	 * Are we entering guest context on a different CPU to last time?
2510  	 * If so, the VCPU's guest TLB state on this CPU may be stale.
2511  	 */
2512  	migrated = (vcpu->arch.last_exec_cpu != cpu);
2513  	vcpu->arch.last_exec_cpu = cpu;
2514  
2515  	/*
2516  	 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
2517  	 * remains set until another vcpu is loaded in.  As a rule GuestRID
2518  	 * remains zeroed when in root context unless the kernel is busy
2519  	 * manipulating guest tlb entries.
2520  	 */
2521  	if (cpu_has_guestid) {
2522  		/*
2523  		 * Check if our GuestID is of an older version and thus invalid.
2524  		 *
2525  		 * We also discard the stored GuestID if we've executed on
2526  		 * another CPU, as the guest mappings may have changed without
2527  		 * hypervisor knowledge.
2528  		 */
2529  		if (migrated ||
2530  		    (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2531  					GUESTID_VERSION_MASK) {
2532  			kvm_vz_get_new_guestid(cpu, vcpu);
2533  			vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2534  			trace_kvm_guestid_change(vcpu,
2535  						 vcpu->arch.vzguestid[cpu]);
2536  		}
2537  
2538  		/* Restore GuestID */
2539  		change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2540  	} else {
2541  		/*
2542  		 * The Guest TLB only stores a single guest's TLB state, so
2543  		 * flush it if another VCPU has executed on this CPU.
2544  		 *
2545  		 * We also flush if we've executed on another CPU, as the guest
2546  		 * mappings may have changed without hypervisor knowledge.
2547  		 */
2548  		if (migrated || last_exec_vcpu[cpu] != vcpu)
2549  			kvm_vz_local_flush_guesttlb_all();
2550  		last_exec_vcpu[cpu] = vcpu;
2551  
2552  		/*
2553  		 * Root ASID dealiases guest GPA mappings in the root TLB.
2554  		 * Allocate new root ASID if needed.
2555  		 */
2556  		if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
2557  			get_new_mmu_context(gpa_mm);
2558  		else
2559  			check_mmu_context(gpa_mm);
2560  	}
2561  }
2562  
kvm_vz_vcpu_load(struct kvm_vcpu * vcpu,int cpu)2563  static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2564  {
2565  	struct mips_coproc *cop0 = &vcpu->arch.cop0;
2566  	bool migrated, all;
2567  
2568  	/*
2569  	 * Have we migrated to a different CPU?
2570  	 * If so, any old guest TLB state may be stale.
2571  	 */
2572  	migrated = (vcpu->arch.last_sched_cpu != cpu);
2573  
2574  	/*
2575  	 * Was this the last VCPU to run on this CPU?
2576  	 * If not, any old guest state from this VCPU will have been clobbered.
2577  	 */
2578  	all = migrated || (last_vcpu[cpu] != vcpu);
2579  	last_vcpu[cpu] = vcpu;
2580  
2581  	/*
2582  	 * Restore CP0_Wired unconditionally as we clear it after use, and
2583  	 * restore wired guest TLB entries (while in guest context).
2584  	 */
2585  	kvm_restore_gc0_wired(cop0);
2586  	if (current->flags & PF_VCPU) {
2587  		tlbw_use_hazard();
2588  		kvm_vz_vcpu_load_tlb(vcpu, cpu);
2589  		kvm_vz_vcpu_load_wired(vcpu);
2590  	}
2591  
2592  	/*
2593  	 * Restore timer state regardless, as e.g. Cause.TI can change over time
2594  	 * if left unmaintained.
2595  	 */
2596  	kvm_vz_restore_timer(vcpu);
2597  
2598  	/* Set MC bit if we want to trace guest mode changes */
2599  	if (kvm_trace_guest_mode_change)
2600  		set_c0_guestctl0(MIPS_GCTL0_MC);
2601  	else
2602  		clear_c0_guestctl0(MIPS_GCTL0_MC);
2603  
2604  	/* Don't bother restoring registers multiple times unless necessary */
2605  	if (!all)
2606  		return 0;
2607  
2608  	/*
2609  	 * Restore config registers first, as some implementations restrict
2610  	 * writes to other registers when the corresponding feature bits aren't
2611  	 * set. For example Status.CU1 cannot be set unless Config1.FP is set.
2612  	 */
2613  	kvm_restore_gc0_config(cop0);
2614  	if (cpu_guest_has_conf1)
2615  		kvm_restore_gc0_config1(cop0);
2616  	if (cpu_guest_has_conf2)
2617  		kvm_restore_gc0_config2(cop0);
2618  	if (cpu_guest_has_conf3)
2619  		kvm_restore_gc0_config3(cop0);
2620  	if (cpu_guest_has_conf4)
2621  		kvm_restore_gc0_config4(cop0);
2622  	if (cpu_guest_has_conf5)
2623  		kvm_restore_gc0_config5(cop0);
2624  	if (cpu_guest_has_conf6)
2625  		kvm_restore_gc0_config6(cop0);
2626  	if (cpu_guest_has_conf7)
2627  		kvm_restore_gc0_config7(cop0);
2628  
2629  	kvm_restore_gc0_index(cop0);
2630  	kvm_restore_gc0_entrylo0(cop0);
2631  	kvm_restore_gc0_entrylo1(cop0);
2632  	kvm_restore_gc0_context(cop0);
2633  	if (cpu_guest_has_contextconfig)
2634  		kvm_restore_gc0_contextconfig(cop0);
2635  #ifdef CONFIG_64BIT
2636  	kvm_restore_gc0_xcontext(cop0);
2637  	if (cpu_guest_has_contextconfig)
2638  		kvm_restore_gc0_xcontextconfig(cop0);
2639  #endif
2640  	kvm_restore_gc0_pagemask(cop0);
2641  	kvm_restore_gc0_pagegrain(cop0);
2642  	kvm_restore_gc0_hwrena(cop0);
2643  	kvm_restore_gc0_badvaddr(cop0);
2644  	kvm_restore_gc0_entryhi(cop0);
2645  	kvm_restore_gc0_status(cop0);
2646  	kvm_restore_gc0_intctl(cop0);
2647  	kvm_restore_gc0_epc(cop0);
2648  	kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2649  	if (cpu_guest_has_userlocal)
2650  		kvm_restore_gc0_userlocal(cop0);
2651  
2652  	kvm_restore_gc0_errorepc(cop0);
2653  
2654  	/* restore KScratch registers if enabled in guest */
2655  	if (cpu_guest_has_conf4) {
2656  		if (cpu_guest_has_kscr(2))
2657  			kvm_restore_gc0_kscratch1(cop0);
2658  		if (cpu_guest_has_kscr(3))
2659  			kvm_restore_gc0_kscratch2(cop0);
2660  		if (cpu_guest_has_kscr(4))
2661  			kvm_restore_gc0_kscratch3(cop0);
2662  		if (cpu_guest_has_kscr(5))
2663  			kvm_restore_gc0_kscratch4(cop0);
2664  		if (cpu_guest_has_kscr(6))
2665  			kvm_restore_gc0_kscratch5(cop0);
2666  		if (cpu_guest_has_kscr(7))
2667  			kvm_restore_gc0_kscratch6(cop0);
2668  	}
2669  
2670  	if (cpu_guest_has_badinstr)
2671  		kvm_restore_gc0_badinstr(cop0);
2672  	if (cpu_guest_has_badinstrp)
2673  		kvm_restore_gc0_badinstrp(cop0);
2674  
2675  	if (cpu_guest_has_segments) {
2676  		kvm_restore_gc0_segctl0(cop0);
2677  		kvm_restore_gc0_segctl1(cop0);
2678  		kvm_restore_gc0_segctl2(cop0);
2679  	}
2680  
2681  	/* restore HTW registers */
2682  	if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
2683  		kvm_restore_gc0_pwbase(cop0);
2684  		kvm_restore_gc0_pwfield(cop0);
2685  		kvm_restore_gc0_pwsize(cop0);
2686  		kvm_restore_gc0_pwctl(cop0);
2687  	}
2688  
2689  	/* restore Root.GuestCtl2 from unused Guest guestctl2 register */
2690  	if (cpu_has_guestctl2)
2691  		write_c0_guestctl2(
2692  			cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2693  
2694  	/*
2695  	 * We should clear linked load bit to break interrupted atomics. This
2696  	 * prevents a SC on the next VCPU from succeeding by matching a LL on
2697  	 * the previous VCPU.
2698  	 */
2699  	if (vcpu->kvm->created_vcpus > 1)
2700  		write_gc0_lladdr(0);
2701  
2702  	return 0;
2703  }
2704  
kvm_vz_vcpu_put(struct kvm_vcpu * vcpu,int cpu)2705  static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2706  {
2707  	struct mips_coproc *cop0 = &vcpu->arch.cop0;
2708  
2709  	if (current->flags & PF_VCPU)
2710  		kvm_vz_vcpu_save_wired(vcpu);
2711  
2712  	kvm_lose_fpu(vcpu);
2713  
2714  	kvm_save_gc0_index(cop0);
2715  	kvm_save_gc0_entrylo0(cop0);
2716  	kvm_save_gc0_entrylo1(cop0);
2717  	kvm_save_gc0_context(cop0);
2718  	if (cpu_guest_has_contextconfig)
2719  		kvm_save_gc0_contextconfig(cop0);
2720  #ifdef CONFIG_64BIT
2721  	kvm_save_gc0_xcontext(cop0);
2722  	if (cpu_guest_has_contextconfig)
2723  		kvm_save_gc0_xcontextconfig(cop0);
2724  #endif
2725  	kvm_save_gc0_pagemask(cop0);
2726  	kvm_save_gc0_pagegrain(cop0);
2727  	kvm_save_gc0_wired(cop0);
2728  	/* allow wired TLB entries to be overwritten */
2729  	clear_gc0_wired(MIPSR6_WIRED_WIRED);
2730  	kvm_save_gc0_hwrena(cop0);
2731  	kvm_save_gc0_badvaddr(cop0);
2732  	kvm_save_gc0_entryhi(cop0);
2733  	kvm_save_gc0_status(cop0);
2734  	kvm_save_gc0_intctl(cop0);
2735  	kvm_save_gc0_epc(cop0);
2736  	kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2737  	if (cpu_guest_has_userlocal)
2738  		kvm_save_gc0_userlocal(cop0);
2739  
2740  	/* only save implemented config registers */
2741  	kvm_save_gc0_config(cop0);
2742  	if (cpu_guest_has_conf1)
2743  		kvm_save_gc0_config1(cop0);
2744  	if (cpu_guest_has_conf2)
2745  		kvm_save_gc0_config2(cop0);
2746  	if (cpu_guest_has_conf3)
2747  		kvm_save_gc0_config3(cop0);
2748  	if (cpu_guest_has_conf4)
2749  		kvm_save_gc0_config4(cop0);
2750  	if (cpu_guest_has_conf5)
2751  		kvm_save_gc0_config5(cop0);
2752  	if (cpu_guest_has_conf6)
2753  		kvm_save_gc0_config6(cop0);
2754  	if (cpu_guest_has_conf7)
2755  		kvm_save_gc0_config7(cop0);
2756  
2757  	kvm_save_gc0_errorepc(cop0);
2758  
2759  	/* save KScratch registers if enabled in guest */
2760  	if (cpu_guest_has_conf4) {
2761  		if (cpu_guest_has_kscr(2))
2762  			kvm_save_gc0_kscratch1(cop0);
2763  		if (cpu_guest_has_kscr(3))
2764  			kvm_save_gc0_kscratch2(cop0);
2765  		if (cpu_guest_has_kscr(4))
2766  			kvm_save_gc0_kscratch3(cop0);
2767  		if (cpu_guest_has_kscr(5))
2768  			kvm_save_gc0_kscratch4(cop0);
2769  		if (cpu_guest_has_kscr(6))
2770  			kvm_save_gc0_kscratch5(cop0);
2771  		if (cpu_guest_has_kscr(7))
2772  			kvm_save_gc0_kscratch6(cop0);
2773  	}
2774  
2775  	if (cpu_guest_has_badinstr)
2776  		kvm_save_gc0_badinstr(cop0);
2777  	if (cpu_guest_has_badinstrp)
2778  		kvm_save_gc0_badinstrp(cop0);
2779  
2780  	if (cpu_guest_has_segments) {
2781  		kvm_save_gc0_segctl0(cop0);
2782  		kvm_save_gc0_segctl1(cop0);
2783  		kvm_save_gc0_segctl2(cop0);
2784  	}
2785  
2786  	/* save HTW registers if enabled in guest */
2787  	if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
2788  	    kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
2789  		kvm_save_gc0_pwbase(cop0);
2790  		kvm_save_gc0_pwfield(cop0);
2791  		kvm_save_gc0_pwsize(cop0);
2792  		kvm_save_gc0_pwctl(cop0);
2793  	}
2794  
2795  	kvm_vz_save_timer(vcpu);
2796  
2797  	/* save Root.GuestCtl2 in unused Guest guestctl2 register */
2798  	if (cpu_has_guestctl2)
2799  		cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2800  			read_c0_guestctl2();
2801  
2802  	return 0;
2803  }
2804  
2805  /**
2806   * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2807   * @size:	Number of guest VTLB entries (0 < @size <= root VTLB entries).
2808   *
2809   * Attempt to resize the guest VTLB by writing guest Config registers. This is
2810   * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2811   * entries in the root VTLB.
2812   *
2813   * Returns:	The resulting guest VTLB size.
2814   */
kvm_vz_resize_guest_vtlb(unsigned int size)2815  static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2816  {
2817  	unsigned int config4 = 0, ret = 0, limit;
2818  
2819  	/* Write MMUSize - 1 into guest Config registers */
2820  	if (cpu_guest_has_conf1)
2821  		change_gc0_config1(MIPS_CONF1_TLBS,
2822  				   (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2823  	if (cpu_guest_has_conf4) {
2824  		config4 = read_gc0_config4();
2825  		if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2826  		    MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2827  			config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2828  			config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2829  				MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2830  		} else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2831  			   MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2832  			config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2833  			config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2834  				MIPS_CONF4_MMUSIZEEXT_SHIFT;
2835  		}
2836  		write_gc0_config4(config4);
2837  	}
2838  
2839  	/*
2840  	 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
2841  	 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
2842  	 * not dropped)
2843  	 */
2844  	if (cpu_has_mips_r6) {
2845  		limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2846  						MIPSR6_WIRED_LIMIT_SHIFT;
2847  		if (size - 1 <= limit)
2848  			limit = 0;
2849  		write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2850  	}
2851  
2852  	/* Read back MMUSize - 1 */
2853  	back_to_back_c0_hazard();
2854  	if (cpu_guest_has_conf1)
2855  		ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2856  						MIPS_CONF1_TLBS_SHIFT;
2857  	if (config4) {
2858  		if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2859  		    MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2860  			ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2861  				MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2862  				MIPS_CONF1_TLBS_SIZE;
2863  		else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2864  			 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2865  			ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2866  				MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2867  				MIPS_CONF1_TLBS_SIZE;
2868  	}
2869  	return ret + 1;
2870  }
2871  
kvm_vz_enable_virtualization_cpu(void)2872  static int kvm_vz_enable_virtualization_cpu(void)
2873  {
2874  	unsigned int mmu_size, guest_mmu_size, ftlb_size;
2875  	u64 guest_cvmctl, cvmvmconfig;
2876  
2877  	switch (current_cpu_type()) {
2878  	case CPU_CAVIUM_OCTEON3:
2879  		/* Set up guest timer/perfcount IRQ lines */
2880  		guest_cvmctl = read_gc0_cvmctl();
2881  		guest_cvmctl &= ~CVMCTL_IPTI;
2882  		guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
2883  		guest_cvmctl &= ~CVMCTL_IPPCI;
2884  		guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
2885  		write_gc0_cvmctl(guest_cvmctl);
2886  
2887  		cvmvmconfig = read_c0_cvmvmconfig();
2888  		/* No I/O hole translation. */
2889  		cvmvmconfig |= CVMVMCONF_DGHT;
2890  		/* Halve the root MMU size */
2891  		mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2892  			    >> CVMVMCONF_MMUSIZEM1_S) + 1;
2893  		guest_mmu_size = mmu_size / 2;
2894  		mmu_size -= guest_mmu_size;
2895  		cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2896  		cvmvmconfig |= mmu_size - 1;
2897  		write_c0_cvmvmconfig(cvmvmconfig);
2898  
2899  		/* Update our records */
2900  		current_cpu_data.tlbsize = mmu_size;
2901  		current_cpu_data.tlbsizevtlb = mmu_size;
2902  		current_cpu_data.guest.tlbsize = guest_mmu_size;
2903  
2904  		/* Flush moved entries in new (guest) context */
2905  		kvm_vz_local_flush_guesttlb_all();
2906  		break;
2907  	default:
2908  		/*
2909  		 * ImgTec cores tend to use a shared root/guest TLB. To avoid
2910  		 * overlap of root wired and guest entries, the guest TLB may
2911  		 * need resizing.
2912  		 */
2913  		mmu_size = current_cpu_data.tlbsizevtlb;
2914  		ftlb_size = current_cpu_data.tlbsize - mmu_size;
2915  
2916  		/* Try switching to maximum guest VTLB size for flush */
2917  		guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2918  		current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2919  		kvm_vz_local_flush_guesttlb_all();
2920  
2921  		/*
2922  		 * Reduce to make space for root wired entries and at least 2
2923  		 * root non-wired entries. This does assume that long-term wired
2924  		 * entries won't be added later.
2925  		 */
2926  		guest_mmu_size = mmu_size - num_wired_entries() - 2;
2927  		guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2928  		current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2929  
2930  		/*
2931  		 * Write the VTLB size, but if another CPU has already written,
2932  		 * check it matches or we won't provide a consistent view to the
2933  		 * guest. If this ever happens it suggests an asymmetric number
2934  		 * of wired entries.
2935  		 */
2936  		if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2937  		    WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2938  			 "Available guest VTLB size mismatch"))
2939  			return -EINVAL;
2940  		break;
2941  	}
2942  
2943  	/*
2944  	 * Enable virtualization features granting guest direct control of
2945  	 * certain features:
2946  	 * CP0=1:	Guest coprocessor 0 context.
2947  	 * AT=Guest:	Guest MMU.
2948  	 * CG=1:	Hit (virtual address) CACHE operations (optional).
2949  	 * CF=1:	Guest Config registers.
2950  	 * CGI=1:	Indexed flush CACHE operations (optional).
2951  	 */
2952  	write_c0_guestctl0(MIPS_GCTL0_CP0 |
2953  			   (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2954  			   MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2955  	if (cpu_has_guestctl0ext) {
2956  		if (current_cpu_type() != CPU_LOONGSON64)
2957  			set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2958  		else
2959  			clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2960  	}
2961  
2962  	if (cpu_has_guestid) {
2963  		write_c0_guestctl1(0);
2964  		kvm_vz_local_flush_roottlb_all_guests();
2965  
2966  		GUESTID_MASK = current_cpu_data.guestid_mask;
2967  		GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2968  		GUESTID_VERSION_MASK = ~GUESTID_MASK;
2969  
2970  		current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2971  	}
2972  
2973  	/* clear any pending injected virtual guest interrupts */
2974  	if (cpu_has_guestctl2)
2975  		clear_c0_guestctl2(0x3f << 10);
2976  
2977  #ifdef CONFIG_CPU_LOONGSON64
2978  	/* Control guest CCA attribute */
2979  	if (cpu_has_csr())
2980  		csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
2981  #endif
2982  
2983  	return 0;
2984  }
2985  
kvm_vz_disable_virtualization_cpu(void)2986  static void kvm_vz_disable_virtualization_cpu(void)
2987  {
2988  	u64 cvmvmconfig;
2989  	unsigned int mmu_size;
2990  
2991  	/* Flush any remaining guest TLB entries */
2992  	kvm_vz_local_flush_guesttlb_all();
2993  
2994  	switch (current_cpu_type()) {
2995  	case CPU_CAVIUM_OCTEON3:
2996  		/*
2997  		 * Allocate whole TLB for root. Existing guest TLB entries will
2998  		 * change ownership to the root TLB. We should be safe though as
2999  		 * they've already been flushed above while in guest TLB.
3000  		 */
3001  		cvmvmconfig = read_c0_cvmvmconfig();
3002  		mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
3003  			    >> CVMVMCONF_MMUSIZEM1_S) + 1;
3004  		cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
3005  		cvmvmconfig |= mmu_size - 1;
3006  		write_c0_cvmvmconfig(cvmvmconfig);
3007  
3008  		/* Update our records */
3009  		current_cpu_data.tlbsize = mmu_size;
3010  		current_cpu_data.tlbsizevtlb = mmu_size;
3011  		current_cpu_data.guest.tlbsize = 0;
3012  
3013  		/* Flush moved entries in new (root) context */
3014  		local_flush_tlb_all();
3015  		break;
3016  	}
3017  
3018  	if (cpu_has_guestid) {
3019  		write_c0_guestctl1(0);
3020  		kvm_vz_local_flush_roottlb_all_guests();
3021  	}
3022  }
3023  
kvm_vz_check_extension(struct kvm * kvm,long ext)3024  static int kvm_vz_check_extension(struct kvm *kvm, long ext)
3025  {
3026  	int r;
3027  
3028  	switch (ext) {
3029  	case KVM_CAP_MIPS_VZ:
3030  		/* we wouldn't be here unless cpu_has_vz */
3031  		r = 1;
3032  		break;
3033  #ifdef CONFIG_64BIT
3034  	case KVM_CAP_MIPS_64BIT:
3035  		/* We support 64-bit registers/operations and addresses */
3036  		r = 2;
3037  		break;
3038  #endif
3039  	case KVM_CAP_IOEVENTFD:
3040  		r = 1;
3041  		break;
3042  	default:
3043  		r = 0;
3044  		break;
3045  	}
3046  
3047  	return r;
3048  }
3049  
kvm_vz_vcpu_init(struct kvm_vcpu * vcpu)3050  static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
3051  {
3052  	int i;
3053  
3054  	for_each_possible_cpu(i)
3055  		vcpu->arch.vzguestid[i] = 0;
3056  
3057  	return 0;
3058  }
3059  
kvm_vz_vcpu_uninit(struct kvm_vcpu * vcpu)3060  static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
3061  {
3062  	int cpu;
3063  
3064  	/*
3065  	 * If the VCPU is freed and reused as another VCPU, we don't want the
3066  	 * matching pointer wrongly hanging around in last_vcpu[] or
3067  	 * last_exec_vcpu[].
3068  	 */
3069  	for_each_possible_cpu(cpu) {
3070  		if (last_vcpu[cpu] == vcpu)
3071  			last_vcpu[cpu] = NULL;
3072  		if (last_exec_vcpu[cpu] == vcpu)
3073  			last_exec_vcpu[cpu] = NULL;
3074  	}
3075  }
3076  
kvm_vz_vcpu_setup(struct kvm_vcpu * vcpu)3077  static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
3078  {
3079  	struct mips_coproc *cop0 = &vcpu->arch.cop0;
3080  	unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
3081  
3082  	/*
3083  	 * Start off the timer at the same frequency as the host timer, but the
3084  	 * soft timer doesn't handle frequencies greater than 1GHz yet.
3085  	 */
3086  	if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
3087  		count_hz = mips_hpt_frequency;
3088  	kvm_mips_init_count(vcpu, count_hz);
3089  
3090  	/*
3091  	 * Initialize guest register state to valid architectural reset state.
3092  	 */
3093  
3094  	/* PageGrain */
3095  	if (cpu_has_mips_r5 || cpu_has_mips_r6)
3096  		kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
3097  	/* Wired */
3098  	if (cpu_has_mips_r6)
3099  		kvm_write_sw_gc0_wired(cop0,
3100  				       read_gc0_wired() & MIPSR6_WIRED_LIMIT);
3101  	/* Status */
3102  	kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
3103  	if (cpu_has_mips_r5 || cpu_has_mips_r6)
3104  		kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
3105  	/* IntCtl */
3106  	kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
3107  				(INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
3108  	/* PRId */
3109  	kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
3110  	/* EBase */
3111  	kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
3112  	/* Config */
3113  	kvm_save_gc0_config(cop0);
3114  	/* architecturally writable (e.g. from guest) */
3115  	kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
3116  				 _page_cachable_default >> _CACHE_SHIFT);
3117  	/* architecturally read only, but maybe writable from root */
3118  	kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
3119  	if (cpu_guest_has_conf1) {
3120  		kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
3121  		/* Config1 */
3122  		kvm_save_gc0_config1(cop0);
3123  		/* architecturally read only, but maybe writable from root */
3124  		kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2	|
3125  					       MIPS_CONF1_MD	|
3126  					       MIPS_CONF1_PC	|
3127  					       MIPS_CONF1_WR	|
3128  					       MIPS_CONF1_CA	|
3129  					       MIPS_CONF1_FP);
3130  	}
3131  	if (cpu_guest_has_conf2) {
3132  		kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
3133  		/* Config2 */
3134  		kvm_save_gc0_config2(cop0);
3135  	}
3136  	if (cpu_guest_has_conf3) {
3137  		kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
3138  		/* Config3 */
3139  		kvm_save_gc0_config3(cop0);
3140  		/* architecturally writable (e.g. from guest) */
3141  		kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
3142  		/* architecturally read only, but maybe writable from root */
3143  		kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA	|
3144  					       MIPS_CONF3_BPG	|
3145  					       MIPS_CONF3_ULRI	|
3146  					       MIPS_CONF3_DSP	|
3147  					       MIPS_CONF3_CTXTC	|
3148  					       MIPS_CONF3_ITL	|
3149  					       MIPS_CONF3_LPA	|
3150  					       MIPS_CONF3_VEIC	|
3151  					       MIPS_CONF3_VINT	|
3152  					       MIPS_CONF3_SP	|
3153  					       MIPS_CONF3_CDMM	|
3154  					       MIPS_CONF3_MT	|
3155  					       MIPS_CONF3_SM	|
3156  					       MIPS_CONF3_TL);
3157  	}
3158  	if (cpu_guest_has_conf4) {
3159  		kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
3160  		/* Config4 */
3161  		kvm_save_gc0_config4(cop0);
3162  	}
3163  	if (cpu_guest_has_conf5) {
3164  		kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
3165  		/* Config5 */
3166  		kvm_save_gc0_config5(cop0);
3167  		/* architecturally writable (e.g. from guest) */
3168  		kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K	|
3169  					       MIPS_CONF5_CV	|
3170  					       MIPS_CONF5_MSAEN	|
3171  					       MIPS_CONF5_UFE	|
3172  					       MIPS_CONF5_FRE	|
3173  					       MIPS_CONF5_SBRI	|
3174  					       MIPS_CONF5_UFR);
3175  		/* architecturally read only, but maybe writable from root */
3176  		kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
3177  	}
3178  
3179  	if (cpu_guest_has_contextconfig) {
3180  		/* ContextConfig */
3181  		kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
3182  #ifdef CONFIG_64BIT
3183  		/* XContextConfig */
3184  		/* bits SEGBITS-13+3:4 set */
3185  		kvm_write_sw_gc0_xcontextconfig(cop0,
3186  					((1ull << (cpu_vmbits - 13)) - 1) << 4);
3187  #endif
3188  	}
3189  
3190  	/* Implementation dependent, use the legacy layout */
3191  	if (cpu_guest_has_segments) {
3192  		/* SegCtl0, SegCtl1, SegCtl2 */
3193  		kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
3194  		kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
3195  				(_page_cachable_default >> _CACHE_SHIFT) <<
3196  						(16 + MIPS_SEGCFG_C_SHIFT));
3197  		kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
3198  	}
3199  
3200  	/* reset HTW registers */
3201  	if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
3202  		/* PWField */
3203  		kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
3204  		/* PWSize */
3205  		kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
3206  	}
3207  
3208  	/* start with no pending virtual guest interrupts */
3209  	if (cpu_has_guestctl2)
3210  		cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
3211  
3212  	/* Put PC at reset vector */
3213  	vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
3214  
3215  	return 0;
3216  }
3217  
kvm_vz_prepare_flush_shadow(struct kvm * kvm)3218  static void kvm_vz_prepare_flush_shadow(struct kvm *kvm)
3219  {
3220  	if (!cpu_has_guestid) {
3221  		/*
3222  		 * For each CPU there is a single GPA ASID used by all VCPUs in
3223  		 * the VM, so it doesn't make sense for the VCPUs to handle
3224  		 * invalidation of these ASIDs individually.
3225  		 *
3226  		 * Instead mark all CPUs as needing ASID invalidation in
3227  		 * asid_flush_mask, and kvm_flush_remote_tlbs(kvm) will
3228  		 * kick any running VCPUs so they check asid_flush_mask.
3229  		 */
3230  		cpumask_setall(&kvm->arch.asid_flush_mask);
3231  	}
3232  }
3233  
kvm_vz_vcpu_reenter(struct kvm_vcpu * vcpu)3234  static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
3235  {
3236  	int cpu = smp_processor_id();
3237  	int preserve_guest_tlb;
3238  
3239  	preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
3240  
3241  	if (preserve_guest_tlb)
3242  		kvm_vz_vcpu_save_wired(vcpu);
3243  
3244  	kvm_vz_vcpu_load_tlb(vcpu, cpu);
3245  
3246  	if (preserve_guest_tlb)
3247  		kvm_vz_vcpu_load_wired(vcpu);
3248  }
3249  
kvm_vz_vcpu_run(struct kvm_vcpu * vcpu)3250  static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
3251  {
3252  	int cpu = smp_processor_id();
3253  	int r;
3254  
3255  	kvm_vz_acquire_htimer(vcpu);
3256  	/* Check if we have any exceptions/interrupts pending */
3257  	kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
3258  
3259  	kvm_vz_check_requests(vcpu, cpu);
3260  	kvm_vz_vcpu_load_tlb(vcpu, cpu);
3261  	kvm_vz_vcpu_load_wired(vcpu);
3262  
3263  	r = vcpu->arch.vcpu_run(vcpu);
3264  
3265  	kvm_vz_vcpu_save_wired(vcpu);
3266  
3267  	return r;
3268  }
3269  
3270  static struct kvm_mips_callbacks kvm_vz_callbacks = {
3271  	.handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
3272  	.handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
3273  	.handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
3274  	.handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
3275  	.handle_addr_err_st = kvm_trap_vz_no_handler,
3276  	.handle_addr_err_ld = kvm_trap_vz_no_handler,
3277  	.handle_syscall = kvm_trap_vz_no_handler,
3278  	.handle_res_inst = kvm_trap_vz_no_handler,
3279  	.handle_break = kvm_trap_vz_no_handler,
3280  	.handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
3281  	.handle_guest_exit = kvm_trap_vz_handle_guest_exit,
3282  
3283  	.enable_virtualization_cpu = kvm_vz_enable_virtualization_cpu,
3284  	.disable_virtualization_cpu = kvm_vz_disable_virtualization_cpu,
3285  	.check_extension = kvm_vz_check_extension,
3286  	.vcpu_init = kvm_vz_vcpu_init,
3287  	.vcpu_uninit = kvm_vz_vcpu_uninit,
3288  	.vcpu_setup = kvm_vz_vcpu_setup,
3289  	.prepare_flush_shadow = kvm_vz_prepare_flush_shadow,
3290  	.gva_to_gpa = kvm_vz_gva_to_gpa_cb,
3291  	.queue_timer_int = kvm_vz_queue_timer_int_cb,
3292  	.dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
3293  	.queue_io_int = kvm_vz_queue_io_int_cb,
3294  	.dequeue_io_int = kvm_vz_dequeue_io_int_cb,
3295  	.irq_deliver = kvm_vz_irq_deliver_cb,
3296  	.irq_clear = kvm_vz_irq_clear_cb,
3297  	.num_regs = kvm_vz_num_regs,
3298  	.copy_reg_indices = kvm_vz_copy_reg_indices,
3299  	.get_one_reg = kvm_vz_get_one_reg,
3300  	.set_one_reg = kvm_vz_set_one_reg,
3301  	.vcpu_load = kvm_vz_vcpu_load,
3302  	.vcpu_put = kvm_vz_vcpu_put,
3303  	.vcpu_run = kvm_vz_vcpu_run,
3304  	.vcpu_reenter = kvm_vz_vcpu_reenter,
3305  };
3306  
3307  /* FIXME: Get rid of the callbacks now that trap-and-emulate is gone. */
3308  const struct kvm_mips_callbacks * const kvm_mips_callbacks = &kvm_vz_callbacks;
3309  
kvm_mips_emulation_init(void)3310  int kvm_mips_emulation_init(void)
3311  {
3312  	if (!cpu_has_vz)
3313  		return -ENODEV;
3314  
3315  	/*
3316  	 * VZ requires at least 2 KScratch registers, so it should have been
3317  	 * possible to allocate pgd_reg.
3318  	 */
3319  	if (WARN(pgd_reg == -1,
3320  		 "pgd_reg not allocated even though cpu_has_vz\n"))
3321  		return -ENODEV;
3322  
3323  	pr_info("Starting KVM with MIPS VZ extensions\n");
3324  	return 0;
3325  }
3326