xref: /linux/arch/loongarch/kvm/vcpu.c (revision b15dfdacd99dc0014413c71bc1157fc4e895ce68)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <asm/fpu.h>
8 #include <asm/lbt.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12 #include <asm/timex.h>
13 
14 #define CREATE_TRACE_POINTS
15 #include "trace.h"
16 
17 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
18 	KVM_GENERIC_VCPU_STATS(),
19 	STATS_DESC_COUNTER(VCPU, int_exits),
20 	STATS_DESC_COUNTER(VCPU, idle_exits),
21 	STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 	STATS_DESC_COUNTER(VCPU, signal_exits),
23 	STATS_DESC_COUNTER(VCPU, hypercall_exits),
24 	STATS_DESC_COUNTER(VCPU, ipi_read_exits),
25 	STATS_DESC_COUNTER(VCPU, ipi_write_exits),
26 	STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
27 	STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
28 	STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
29 	STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
30 };
31 
32 const struct kvm_stats_header kvm_vcpu_stats_header = {
33 	.name_size = KVM_STATS_NAME_SIZE,
34 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
35 	.id_offset = sizeof(struct kvm_stats_header),
36 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
37 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
38 		       sizeof(kvm_vcpu_stats_desc),
39 };
40 
41 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
42 {
43 	struct kvm_context *context;
44 
45 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
46 	context->perf_cntr[0] = read_csr_perfcntr0();
47 	context->perf_cntr[1] = read_csr_perfcntr1();
48 	context->perf_cntr[2] = read_csr_perfcntr2();
49 	context->perf_cntr[3] = read_csr_perfcntr3();
50 	context->perf_ctrl[0] = write_csr_perfctrl0(0);
51 	context->perf_ctrl[1] = write_csr_perfctrl1(0);
52 	context->perf_ctrl[2] = write_csr_perfctrl2(0);
53 	context->perf_ctrl[3] = write_csr_perfctrl3(0);
54 }
55 
56 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
57 {
58 	struct kvm_context *context;
59 
60 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
61 	write_csr_perfcntr0(context->perf_cntr[0]);
62 	write_csr_perfcntr1(context->perf_cntr[1]);
63 	write_csr_perfcntr2(context->perf_cntr[2]);
64 	write_csr_perfcntr3(context->perf_cntr[3]);
65 	write_csr_perfctrl0(context->perf_ctrl[0]);
66 	write_csr_perfctrl1(context->perf_ctrl[1]);
67 	write_csr_perfctrl2(context->perf_ctrl[2]);
68 	write_csr_perfctrl3(context->perf_ctrl[3]);
69 }
70 
71 
72 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
73 {
74 	struct loongarch_csrs *csr = vcpu->arch.csr;
75 
76 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
77 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
78 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
79 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
80 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
81 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
82 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
83 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
84 }
85 
86 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
87 {
88 	struct loongarch_csrs *csr = vcpu->arch.csr;
89 
90 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
91 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
92 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
93 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
94 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
95 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
96 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
97 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
98 }
99 
100 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
101 {
102 	unsigned long val;
103 
104 	if (!kvm_guest_has_pmu(&vcpu->arch))
105 		return -EINVAL;
106 
107 	kvm_save_host_pmu(vcpu);
108 
109 	/* Set PM0-PM(num) to guest */
110 	val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
111 	val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
112 	write_csr_gcfg(val);
113 
114 	kvm_restore_guest_pmu(vcpu);
115 
116 	return 0;
117 }
118 
119 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
120 {
121 	unsigned long val;
122 	struct loongarch_csrs *csr = vcpu->arch.csr;
123 
124 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
125 		return;
126 
127 	kvm_save_guest_pmu(vcpu);
128 
129 	/* Disable pmu access from guest */
130 	write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
131 
132 	/*
133 	 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
134 	 * exiting the guest, so that the next time trap into the guest.
135 	 * We don't need to deal with PMU CSRs contexts.
136 	 *
137 	 * Otherwise set the request bit KVM_REQ_PMU to restore guest PMU
138 	 * before entering guest VM
139 	 */
140 	val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
141 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
142 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
143 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
144 	if (!(val & KVM_PMU_EVENT_ENABLED))
145 		vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
146 	else
147 		kvm_make_request(KVM_REQ_PMU, vcpu);
148 
149 	kvm_restore_host_pmu(vcpu);
150 }
151 
152 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
153 {
154 	if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
155 		kvm_own_pmu(vcpu);
156 		vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
157 	}
158 }
159 
160 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
161 {
162 	u32 version;
163 	u64 steal;
164 	gpa_t gpa;
165 	struct kvm_memslots *slots;
166 	struct kvm_steal_time __user *st;
167 	struct gfn_to_hva_cache *ghc;
168 
169 	ghc = &vcpu->arch.st.cache;
170 	gpa = vcpu->arch.st.guest_addr;
171 	if (!(gpa & KVM_STEAL_PHYS_VALID))
172 		return;
173 
174 	gpa &= KVM_STEAL_PHYS_MASK;
175 	slots = kvm_memslots(vcpu->kvm);
176 	if (slots->generation != ghc->generation || gpa != ghc->gpa) {
177 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
178 			ghc->gpa = INVALID_GPA;
179 			return;
180 		}
181 	}
182 
183 	st = (struct kvm_steal_time __user *)ghc->hva;
184 	unsafe_get_user(version, &st->version, out);
185 	if (version & 1)
186 		version += 1; /* first time write, random junk */
187 
188 	version += 1;
189 	unsafe_put_user(version, &st->version, out);
190 	smp_wmb();
191 
192 	unsafe_get_user(steal, &st->steal, out);
193 	steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
194 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
195 	unsafe_put_user(steal, &st->steal, out);
196 
197 	smp_wmb();
198 	version += 1;
199 	unsafe_put_user(version, &st->version, out);
200 out:
201 	mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
202 }
203 
204 /*
205  * kvm_check_requests - check and handle pending vCPU requests
206  *
207  * Return: RESUME_GUEST if we should enter the guest
208  *         RESUME_HOST  if we should exit to userspace
209  */
210 static int kvm_check_requests(struct kvm_vcpu *vcpu)
211 {
212 	if (!kvm_request_pending(vcpu))
213 		return RESUME_GUEST;
214 
215 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
216 		vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
217 
218 	if (kvm_dirty_ring_check_request(vcpu))
219 		return RESUME_HOST;
220 
221 	if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
222 		kvm_update_stolen_time(vcpu);
223 
224 	return RESUME_GUEST;
225 }
226 
227 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
228 {
229 	lockdep_assert_irqs_disabled();
230 	if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
231 		if (vcpu->arch.flush_gpa != INVALID_GPA) {
232 			kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
233 			vcpu->arch.flush_gpa = INVALID_GPA;
234 		}
235 }
236 
237 /*
238  * Check and handle pending signal and vCPU requests etc
239  * Run with irq enabled and preempt enabled
240  *
241  * Return: RESUME_GUEST if we should enter the guest
242  *         RESUME_HOST  if we should exit to userspace
243  *         < 0 if we should exit to userspace, where the return value
244  *         indicates an error
245  */
246 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
247 {
248 	int idx, ret;
249 
250 	/*
251 	 * Check conditions before entering the guest
252 	 */
253 	ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
254 	if (ret < 0)
255 		return ret;
256 
257 	idx = srcu_read_lock(&vcpu->kvm->srcu);
258 	ret = kvm_check_requests(vcpu);
259 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
260 
261 	return ret;
262 }
263 
264 /*
265  * Called with irq enabled
266  *
267  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
268  *         Others if we should exit to userspace
269  */
270 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
271 {
272 	int ret;
273 
274 	do {
275 		ret = kvm_enter_guest_check(vcpu);
276 		if (ret != RESUME_GUEST)
277 			break;
278 
279 		/*
280 		 * Handle vcpu timer, interrupts, check requests and
281 		 * check vmid before vcpu enter guest
282 		 */
283 		local_irq_disable();
284 		kvm_deliver_intr(vcpu);
285 		kvm_deliver_exception(vcpu);
286 		/* Make sure the vcpu mode has been written */
287 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
288 		kvm_check_vpid(vcpu);
289 		kvm_check_pmu(vcpu);
290 
291 		/*
292 		 * Called after function kvm_check_vpid()
293 		 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
294 		 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
295 		 */
296 		kvm_late_check_requests(vcpu);
297 		vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
298 		/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
299 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
300 
301 		if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
302 			if (vcpu->arch.aux_inuse & KVM_LARCH_PMU) {
303 				kvm_lose_pmu(vcpu);
304 				kvm_make_request(KVM_REQ_PMU, vcpu);
305 			}
306 			/* make sure the vcpu mode has been written */
307 			smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
308 			local_irq_enable();
309 			ret = -EAGAIN;
310 		}
311 	} while (ret != RESUME_GUEST);
312 
313 	return ret;
314 }
315 
316 /*
317  * Return 1 for resume guest and "<= 0" for resume host.
318  */
319 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
320 {
321 	int ret = RESUME_GUEST;
322 	unsigned long estat = vcpu->arch.host_estat;
323 	u32 intr = estat & CSR_ESTAT_IS;
324 	u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
325 
326 	vcpu->mode = OUTSIDE_GUEST_MODE;
327 
328 	/* Set a default exit reason */
329 	run->exit_reason = KVM_EXIT_UNKNOWN;
330 
331 	kvm_lose_pmu(vcpu);
332 
333 	guest_timing_exit_irqoff();
334 	guest_state_exit_irqoff();
335 	local_irq_enable();
336 
337 	trace_kvm_exit(vcpu, ecode);
338 	if (ecode) {
339 		ret = kvm_handle_fault(vcpu, ecode);
340 	} else {
341 		WARN(!intr, "vm exiting with suspicious irq\n");
342 		++vcpu->stat.int_exits;
343 	}
344 
345 	if (ret == RESUME_GUEST)
346 		ret = kvm_pre_enter_guest(vcpu);
347 
348 	if (ret != RESUME_GUEST) {
349 		local_irq_disable();
350 		return ret;
351 	}
352 
353 	guest_timing_enter_irqoff();
354 	guest_state_enter_irqoff();
355 	trace_kvm_reenter(vcpu);
356 
357 	return RESUME_GUEST;
358 }
359 
360 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
361 {
362 	return !!(vcpu->arch.irq_pending) &&
363 		vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
364 }
365 
366 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
367 {
368 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
369 }
370 
371 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
372 {
373 	unsigned long val;
374 
375 	preempt_disable();
376 	val = gcsr_read(LOONGARCH_CSR_CRMD);
377 	preempt_enable();
378 
379 	return (val & CSR_PRMD_PPLV) == PLV_KERN;
380 }
381 
382 #ifdef CONFIG_GUEST_PERF_EVENTS
383 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
384 {
385 	return vcpu->arch.pc;
386 }
387 
388 /*
389  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
390  * arrived in guest context.  For LoongArch64, if PMU is not passthrough to VM,
391  * any event that arrives while a vCPU is loaded is considered to be "in guest".
392  */
393 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
394 {
395 	return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
396 }
397 #endif
398 
399 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
400 {
401 	return false;
402 }
403 
404 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
405 {
406 	return VM_FAULT_SIGBUS;
407 }
408 
409 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
410 				  struct kvm_translation *tr)
411 {
412 	return -EINVAL;
413 }
414 
415 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
416 {
417 	int ret;
418 
419 	/* Protect from TOD sync and vcpu_load/put() */
420 	preempt_disable();
421 	ret = kvm_pending_timer(vcpu) ||
422 		kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
423 	preempt_enable();
424 
425 	return ret;
426 }
427 
428 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
429 {
430 	int i;
431 
432 	kvm_debug("vCPU Register Dump:\n");
433 	kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
434 	kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
435 
436 	for (i = 0; i < 32; i += 4) {
437 		kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
438 		       vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
439 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
440 	}
441 
442 	kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
443 		  kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
444 		  kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
445 
446 	kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
447 
448 	return 0;
449 }
450 
451 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
452 				struct kvm_mp_state *mp_state)
453 {
454 	*mp_state = vcpu->arch.mp_state;
455 
456 	return 0;
457 }
458 
459 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
460 				struct kvm_mp_state *mp_state)
461 {
462 	int ret = 0;
463 
464 	switch (mp_state->mp_state) {
465 	case KVM_MP_STATE_RUNNABLE:
466 		vcpu->arch.mp_state = *mp_state;
467 		break;
468 	default:
469 		ret = -EINVAL;
470 	}
471 
472 	return ret;
473 }
474 
475 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
476 					struct kvm_guest_debug *dbg)
477 {
478 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
479 		return -EINVAL;
480 
481 	if (dbg->control & KVM_GUESTDBG_ENABLE)
482 		vcpu->guest_debug = dbg->control;
483 	else
484 		vcpu->guest_debug = 0;
485 
486 	return 0;
487 }
488 
489 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
490 {
491 	int cpuid;
492 	struct kvm_phyid_map *map;
493 	struct loongarch_csrs *csr = vcpu->arch.csr;
494 
495 	if (val >= KVM_MAX_PHYID)
496 		return -EINVAL;
497 
498 	map = vcpu->kvm->arch.phyid_map;
499 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
500 
501 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
502 	if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
503 		/* Discard duplicated CPUID set operation */
504 		if (cpuid == val) {
505 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
506 			return 0;
507 		}
508 
509 		/*
510 		 * CPUID is already set before
511 		 * Forbid changing to a different CPUID at runtime
512 		 */
513 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
514 		return -EINVAL;
515 	}
516 
517 	if (map->phys_map[val].enabled) {
518 		/* Discard duplicated CPUID set operation */
519 		if (vcpu == map->phys_map[val].vcpu) {
520 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
521 			return 0;
522 		}
523 
524 		/*
525 		 * New CPUID is already set with other vcpu
526 		 * Forbid sharing the same CPUID between different vcpus
527 		 */
528 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
529 		return -EINVAL;
530 	}
531 
532 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
533 	map->phys_map[val].enabled	= true;
534 	map->phys_map[val].vcpu		= vcpu;
535 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
536 
537 	return 0;
538 }
539 
540 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
541 {
542 	int cpuid;
543 	struct kvm_phyid_map *map;
544 	struct loongarch_csrs *csr = vcpu->arch.csr;
545 
546 	map = vcpu->kvm->arch.phyid_map;
547 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
548 
549 	if (cpuid >= KVM_MAX_PHYID)
550 		return;
551 
552 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
553 	if (map->phys_map[cpuid].enabled) {
554 		map->phys_map[cpuid].vcpu = NULL;
555 		map->phys_map[cpuid].enabled = false;
556 		kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
557 	}
558 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
559 }
560 
561 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
562 {
563 	struct kvm_phyid_map *map;
564 
565 	if (cpuid >= KVM_MAX_PHYID)
566 		return NULL;
567 
568 	map = kvm->arch.phyid_map;
569 	if (!map->phys_map[cpuid].enabled)
570 		return NULL;
571 
572 	return map->phys_map[cpuid].vcpu;
573 }
574 
575 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
576 {
577 	unsigned long gintc;
578 	struct loongarch_csrs *csr = vcpu->arch.csr;
579 
580 	if (get_gcsr_flag(id) & INVALID_GCSR)
581 		return -EINVAL;
582 
583 	if (id == LOONGARCH_CSR_ESTAT) {
584 		preempt_disable();
585 		vcpu_load(vcpu);
586 		/*
587 		 * Sync pending interrupts into ESTAT so that interrupt
588 		 * remains during VM migration stage
589 		 */
590 		kvm_deliver_intr(vcpu);
591 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
592 		vcpu_put(vcpu);
593 		preempt_enable();
594 
595 		/* ESTAT IP0~IP7 get from GINTC */
596 		gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
597 		*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
598 		return 0;
599 	}
600 
601 	/*
602 	 * Get software CSR state since software state is consistent
603 	 * with hardware for synchronous ioctl
604 	 */
605 	*val = kvm_read_sw_gcsr(csr, id);
606 
607 	return 0;
608 }
609 
610 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
611 {
612 	int ret = 0, gintc;
613 	struct loongarch_csrs *csr = vcpu->arch.csr;
614 
615 	if (get_gcsr_flag(id) & INVALID_GCSR)
616 		return -EINVAL;
617 
618 	if (id == LOONGARCH_CSR_CPUID)
619 		return kvm_set_cpuid(vcpu, val);
620 
621 	if (id == LOONGARCH_CSR_ESTAT) {
622 		/* ESTAT IP0~IP7 inject through GINTC */
623 		gintc = (val >> 2) & 0xff;
624 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
625 
626 		gintc = val & ~(0xffUL << 2);
627 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
628 
629 		return ret;
630 	}
631 
632 	kvm_write_sw_gcsr(csr, id, val);
633 
634 	/*
635 	 * After modifying the PMU CSR register value of the vcpu.
636 	 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
637 	 */
638 	if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
639 		unsigned long val;
640 
641 		val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
642 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
643 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
644 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
645 
646 		if (val & KVM_PMU_EVENT_ENABLED)
647 			kvm_make_request(KVM_REQ_PMU, vcpu);
648 	}
649 
650 	return ret;
651 }
652 
653 static int _kvm_get_cpucfg_mask(int id, u64 *v)
654 {
655 	if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
656 		return -EINVAL;
657 
658 	switch (id) {
659 	case LOONGARCH_CPUCFG0:
660 		*v = GENMASK(31, 0);
661 		return 0;
662 	case LOONGARCH_CPUCFG1:
663 		/* CPUCFG1_MSGINT is not supported by KVM */
664 		*v = GENMASK(25, 0);
665 		return 0;
666 	case LOONGARCH_CPUCFG2:
667 		/* CPUCFG2 features unconditionally supported by KVM */
668 		*v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
669 		     CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
670 		     CPUCFG2_LSPW | CPUCFG2_LAM;
671 		/*
672 		 * For the ISA extensions listed below, if one is supported
673 		 * by the host, then it is also supported by KVM.
674 		 */
675 		if (cpu_has_lsx)
676 			*v |= CPUCFG2_LSX;
677 		if (cpu_has_lasx)
678 			*v |= CPUCFG2_LASX;
679 		if (cpu_has_lbt_x86)
680 			*v |= CPUCFG2_X86BT;
681 		if (cpu_has_lbt_arm)
682 			*v |= CPUCFG2_ARMBT;
683 		if (cpu_has_lbt_mips)
684 			*v |= CPUCFG2_MIPSBT;
685 		if (cpu_has_ptw)
686 			*v |= CPUCFG2_PTW;
687 
688 		return 0;
689 	case LOONGARCH_CPUCFG3:
690 		*v = GENMASK(16, 0);
691 		return 0;
692 	case LOONGARCH_CPUCFG4:
693 	case LOONGARCH_CPUCFG5:
694 		*v = GENMASK(31, 0);
695 		return 0;
696 	case LOONGARCH_CPUCFG6:
697 		if (cpu_has_pmp)
698 			*v = GENMASK(14, 0);
699 		else
700 			*v = 0;
701 		return 0;
702 	case LOONGARCH_CPUCFG16:
703 		*v = GENMASK(16, 0);
704 		return 0;
705 	case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
706 		*v = GENMASK(30, 0);
707 		return 0;
708 	default:
709 		/*
710 		 * CPUCFG bits should be zero if reserved by HW or not
711 		 * supported by KVM.
712 		 */
713 		*v = 0;
714 		return 0;
715 	}
716 }
717 
718 static int kvm_check_cpucfg(int id, u64 val)
719 {
720 	int ret;
721 	u64 mask = 0;
722 
723 	ret = _kvm_get_cpucfg_mask(id, &mask);
724 	if (ret)
725 		return ret;
726 
727 	if (val & ~mask)
728 		/* Unsupported features and/or the higher 32 bits should not be set */
729 		return -EINVAL;
730 
731 	switch (id) {
732 	case LOONGARCH_CPUCFG2:
733 		if (!(val & CPUCFG2_LLFTP))
734 			/* Guests must have a constant timer */
735 			return -EINVAL;
736 		if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
737 			/* Single and double float point must both be set when FP is enabled */
738 			return -EINVAL;
739 		if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
740 			/* LSX architecturally implies FP but val does not satisfy that */
741 			return -EINVAL;
742 		if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
743 			/* LASX architecturally implies LSX and FP but val does not satisfy that */
744 			return -EINVAL;
745 		return 0;
746 	case LOONGARCH_CPUCFG6:
747 		if (val & CPUCFG6_PMP) {
748 			u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
749 			if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
750 				return -EINVAL;
751 			if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
752 				return -EINVAL;
753 			if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
754 				return -EINVAL;
755 		}
756 		return 0;
757 	default:
758 		/*
759 		 * Values for the other CPUCFG IDs are not being further validated
760 		 * besides the mask check above.
761 		 */
762 		return 0;
763 	}
764 }
765 
766 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
767 		const struct kvm_one_reg *reg, u64 *v)
768 {
769 	int id, ret = 0;
770 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
771 
772 	switch (type) {
773 	case KVM_REG_LOONGARCH_CSR:
774 		id = KVM_GET_IOC_CSR_IDX(reg->id);
775 		ret = _kvm_getcsr(vcpu, id, v);
776 		break;
777 	case KVM_REG_LOONGARCH_CPUCFG:
778 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
779 		if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
780 			*v = vcpu->arch.cpucfg[id];
781 		else
782 			ret = -EINVAL;
783 		break;
784 	case KVM_REG_LOONGARCH_LBT:
785 		if (!kvm_guest_has_lbt(&vcpu->arch))
786 			return -ENXIO;
787 
788 		switch (reg->id) {
789 		case KVM_REG_LOONGARCH_LBT_SCR0:
790 			*v = vcpu->arch.lbt.scr0;
791 			break;
792 		case KVM_REG_LOONGARCH_LBT_SCR1:
793 			*v = vcpu->arch.lbt.scr1;
794 			break;
795 		case KVM_REG_LOONGARCH_LBT_SCR2:
796 			*v = vcpu->arch.lbt.scr2;
797 			break;
798 		case KVM_REG_LOONGARCH_LBT_SCR3:
799 			*v = vcpu->arch.lbt.scr3;
800 			break;
801 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
802 			*v = vcpu->arch.lbt.eflags;
803 			break;
804 		case KVM_REG_LOONGARCH_LBT_FTOP:
805 			*v = vcpu->arch.fpu.ftop;
806 			break;
807 		default:
808 			ret = -EINVAL;
809 			break;
810 		}
811 		break;
812 	case KVM_REG_LOONGARCH_KVM:
813 		switch (reg->id) {
814 		case KVM_REG_LOONGARCH_COUNTER:
815 			*v = get_cycles() + vcpu->kvm->arch.time_offset;
816 			break;
817 		case KVM_REG_LOONGARCH_DEBUG_INST:
818 			*v = INSN_HVCL | KVM_HCALL_SWDBG;
819 			break;
820 		default:
821 			ret = -EINVAL;
822 			break;
823 		}
824 		break;
825 	default:
826 		ret = -EINVAL;
827 		break;
828 	}
829 
830 	return ret;
831 }
832 
833 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
834 {
835 	int ret = 0;
836 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
837 
838 	switch (size) {
839 	case KVM_REG_SIZE_U64:
840 		ret = kvm_get_one_reg(vcpu, reg, &v);
841 		if (ret)
842 			return ret;
843 		ret = put_user(v, (u64 __user *)(long)reg->addr);
844 		break;
845 	default:
846 		ret = -EINVAL;
847 		break;
848 	}
849 
850 	return ret;
851 }
852 
853 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
854 			const struct kvm_one_reg *reg, u64 v)
855 {
856 	int id, ret = 0;
857 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
858 
859 	switch (type) {
860 	case KVM_REG_LOONGARCH_CSR:
861 		id = KVM_GET_IOC_CSR_IDX(reg->id);
862 		ret = _kvm_setcsr(vcpu, id, v);
863 		break;
864 	case KVM_REG_LOONGARCH_CPUCFG:
865 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
866 		ret = kvm_check_cpucfg(id, v);
867 		if (ret)
868 			break;
869 		vcpu->arch.cpucfg[id] = (u32)v;
870 		if (id == LOONGARCH_CPUCFG6)
871 			vcpu->arch.max_pmu_csrid =
872 				LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
873 		break;
874 	case KVM_REG_LOONGARCH_LBT:
875 		if (!kvm_guest_has_lbt(&vcpu->arch))
876 			return -ENXIO;
877 
878 		switch (reg->id) {
879 		case KVM_REG_LOONGARCH_LBT_SCR0:
880 			vcpu->arch.lbt.scr0 = v;
881 			break;
882 		case KVM_REG_LOONGARCH_LBT_SCR1:
883 			vcpu->arch.lbt.scr1 = v;
884 			break;
885 		case KVM_REG_LOONGARCH_LBT_SCR2:
886 			vcpu->arch.lbt.scr2 = v;
887 			break;
888 		case KVM_REG_LOONGARCH_LBT_SCR3:
889 			vcpu->arch.lbt.scr3 = v;
890 			break;
891 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
892 			vcpu->arch.lbt.eflags = v;
893 			break;
894 		case KVM_REG_LOONGARCH_LBT_FTOP:
895 			vcpu->arch.fpu.ftop = v;
896 			break;
897 		default:
898 			ret = -EINVAL;
899 			break;
900 		}
901 		break;
902 	case KVM_REG_LOONGARCH_KVM:
903 		switch (reg->id) {
904 		case KVM_REG_LOONGARCH_COUNTER:
905 			/*
906 			 * gftoffset is relative with board, not vcpu
907 			 * only set for the first time for smp system
908 			 */
909 			if (vcpu->vcpu_id == 0)
910 				vcpu->kvm->arch.time_offset = (signed long)(v - get_cycles());
911 			break;
912 		case KVM_REG_LOONGARCH_VCPU_RESET:
913 			vcpu->arch.st.guest_addr = 0;
914 			memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
915 			memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
916 
917 			/*
918 			 * When vCPU reset, clear the ESTAT and GINTC registers
919 			 * Other CSR registers are cleared with function _kvm_setcsr().
920 			 */
921 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
922 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
923 			break;
924 		default:
925 			ret = -EINVAL;
926 			break;
927 		}
928 		break;
929 	default:
930 		ret = -EINVAL;
931 		break;
932 	}
933 
934 	return ret;
935 }
936 
937 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
938 {
939 	int ret = 0;
940 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
941 
942 	switch (size) {
943 	case KVM_REG_SIZE_U64:
944 		ret = get_user(v, (u64 __user *)(long)reg->addr);
945 		if (ret)
946 			return ret;
947 		break;
948 	default:
949 		return -EINVAL;
950 	}
951 
952 	return kvm_set_one_reg(vcpu, reg, v);
953 }
954 
955 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
956 {
957 	return -ENOIOCTLCMD;
958 }
959 
960 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
961 {
962 	return -ENOIOCTLCMD;
963 }
964 
965 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
966 {
967 	int i;
968 
969 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
970 		regs->gpr[i] = vcpu->arch.gprs[i];
971 
972 	regs->pc = vcpu->arch.pc;
973 
974 	return 0;
975 }
976 
977 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
978 {
979 	int i;
980 
981 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
982 		vcpu->arch.gprs[i] = regs->gpr[i];
983 
984 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
985 	vcpu->arch.pc = regs->pc;
986 
987 	return 0;
988 }
989 
990 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
991 				     struct kvm_enable_cap *cap)
992 {
993 	/* FPU is enabled by default, will support LSX/LASX later. */
994 	return -EINVAL;
995 }
996 
997 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
998 					 struct kvm_device_attr *attr)
999 {
1000 	switch (attr->attr) {
1001 	case LOONGARCH_CPUCFG2:
1002 	case LOONGARCH_CPUCFG6:
1003 		return 0;
1004 	case CPUCFG_KVM_FEATURE:
1005 		return 0;
1006 	default:
1007 		return -ENXIO;
1008 	}
1009 
1010 	return -ENXIO;
1011 }
1012 
1013 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
1014 					 struct kvm_device_attr *attr)
1015 {
1016 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1017 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1018 		return -ENXIO;
1019 
1020 	return 0;
1021 }
1022 
1023 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1024 				       struct kvm_device_attr *attr)
1025 {
1026 	int ret = -ENXIO;
1027 
1028 	switch (attr->group) {
1029 	case KVM_LOONGARCH_VCPU_CPUCFG:
1030 		ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1031 		break;
1032 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1033 		ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1034 		break;
1035 	default:
1036 		break;
1037 	}
1038 
1039 	return ret;
1040 }
1041 
1042 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1043 					 struct kvm_device_attr *attr)
1044 {
1045 	int ret = 0;
1046 	uint64_t val;
1047 	uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1048 
1049 	switch (attr->attr) {
1050 	case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1051 		ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1052 		if (ret)
1053 			return ret;
1054 		break;
1055 	case CPUCFG_KVM_FEATURE:
1056 		val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1057 		break;
1058 	default:
1059 		return -ENXIO;
1060 	}
1061 
1062 	put_user(val, uaddr);
1063 
1064 	return ret;
1065 }
1066 
1067 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1068 					 struct kvm_device_attr *attr)
1069 {
1070 	u64 gpa;
1071 	u64 __user *user = (u64 __user *)attr->addr;
1072 
1073 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1074 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1075 		return -ENXIO;
1076 
1077 	gpa = vcpu->arch.st.guest_addr;
1078 	if (put_user(gpa, user))
1079 		return -EFAULT;
1080 
1081 	return 0;
1082 }
1083 
1084 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1085 				       struct kvm_device_attr *attr)
1086 {
1087 	int ret = -ENXIO;
1088 
1089 	switch (attr->group) {
1090 	case KVM_LOONGARCH_VCPU_CPUCFG:
1091 		ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1092 		break;
1093 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1094 		ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1095 		break;
1096 	default:
1097 		break;
1098 	}
1099 
1100 	return ret;
1101 }
1102 
1103 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1104 					 struct kvm_device_attr *attr)
1105 {
1106 	u64 val, valid;
1107 	u64 __user *user = (u64 __user *)attr->addr;
1108 	struct kvm *kvm = vcpu->kvm;
1109 
1110 	switch (attr->attr) {
1111 	case CPUCFG_KVM_FEATURE:
1112 		if (get_user(val, user))
1113 			return -EFAULT;
1114 
1115 		valid = LOONGARCH_PV_FEAT_MASK;
1116 		if (val & ~valid)
1117 			return -EINVAL;
1118 
1119 		/* All vCPUs need set the same PV features */
1120 		if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1121 				&& ((kvm->arch.pv_features & valid) != val))
1122 			return -EINVAL;
1123 		kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1124 		return 0;
1125 	default:
1126 		return -ENXIO;
1127 	}
1128 }
1129 
1130 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1131 					 struct kvm_device_attr *attr)
1132 {
1133 	int idx, ret = 0;
1134 	u64 gpa, __user *user = (u64 __user *)attr->addr;
1135 	struct kvm *kvm = vcpu->kvm;
1136 
1137 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1138 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1139 		return -ENXIO;
1140 
1141 	if (get_user(gpa, user))
1142 		return -EFAULT;
1143 
1144 	if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1145 		return -EINVAL;
1146 
1147 	if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1148 		vcpu->arch.st.guest_addr = gpa;
1149 		return 0;
1150 	}
1151 
1152 	/* Check the address is in a valid memslot */
1153 	idx = srcu_read_lock(&kvm->srcu);
1154 	if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1155 		ret = -EINVAL;
1156 	srcu_read_unlock(&kvm->srcu, idx);
1157 
1158 	if (!ret) {
1159 		vcpu->arch.st.guest_addr = gpa;
1160 		vcpu->arch.st.last_steal = current->sched_info.run_delay;
1161 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1162 	}
1163 
1164 	return ret;
1165 }
1166 
1167 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1168 				       struct kvm_device_attr *attr)
1169 {
1170 	int ret = -ENXIO;
1171 
1172 	switch (attr->group) {
1173 	case KVM_LOONGARCH_VCPU_CPUCFG:
1174 		ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1175 		break;
1176 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1177 		ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1178 		break;
1179 	default:
1180 		break;
1181 	}
1182 
1183 	return ret;
1184 }
1185 
1186 long kvm_arch_vcpu_ioctl(struct file *filp,
1187 			 unsigned int ioctl, unsigned long arg)
1188 {
1189 	long r;
1190 	struct kvm_device_attr attr;
1191 	void __user *argp = (void __user *)arg;
1192 	struct kvm_vcpu *vcpu = filp->private_data;
1193 
1194 	/*
1195 	 * Only software CSR should be modified
1196 	 *
1197 	 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1198 	 * should be used. Since CSR registers owns by this vcpu, if switch
1199 	 * to other vcpus, other vcpus need reload CSR registers.
1200 	 *
1201 	 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1202 	 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1203 	 * aux_inuse flag and reload CSR registers form software.
1204 	 */
1205 
1206 	switch (ioctl) {
1207 	case KVM_SET_ONE_REG:
1208 	case KVM_GET_ONE_REG: {
1209 		struct kvm_one_reg reg;
1210 
1211 		r = -EFAULT;
1212 		if (copy_from_user(&reg, argp, sizeof(reg)))
1213 			break;
1214 		if (ioctl == KVM_SET_ONE_REG) {
1215 			r = kvm_set_reg(vcpu, &reg);
1216 			vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1217 		} else
1218 			r = kvm_get_reg(vcpu, &reg);
1219 		break;
1220 	}
1221 	case KVM_ENABLE_CAP: {
1222 		struct kvm_enable_cap cap;
1223 
1224 		r = -EFAULT;
1225 		if (copy_from_user(&cap, argp, sizeof(cap)))
1226 			break;
1227 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1228 		break;
1229 	}
1230 	case KVM_HAS_DEVICE_ATTR: {
1231 		r = -EFAULT;
1232 		if (copy_from_user(&attr, argp, sizeof(attr)))
1233 			break;
1234 		r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1235 		break;
1236 	}
1237 	case KVM_GET_DEVICE_ATTR: {
1238 		r = -EFAULT;
1239 		if (copy_from_user(&attr, argp, sizeof(attr)))
1240 			break;
1241 		r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1242 		break;
1243 	}
1244 	case KVM_SET_DEVICE_ATTR: {
1245 		r = -EFAULT;
1246 		if (copy_from_user(&attr, argp, sizeof(attr)))
1247 			break;
1248 		r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1249 		break;
1250 	}
1251 	default:
1252 		r = -ENOIOCTLCMD;
1253 		break;
1254 	}
1255 
1256 	return r;
1257 }
1258 
1259 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1260 {
1261 	int i = 0;
1262 
1263 	fpu->fcc = vcpu->arch.fpu.fcc;
1264 	fpu->fcsr = vcpu->arch.fpu.fcsr;
1265 	for (i = 0; i < NUM_FPU_REGS; i++)
1266 		memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1267 
1268 	return 0;
1269 }
1270 
1271 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1272 {
1273 	int i = 0;
1274 
1275 	vcpu->arch.fpu.fcc = fpu->fcc;
1276 	vcpu->arch.fpu.fcsr = fpu->fcsr;
1277 	for (i = 0; i < NUM_FPU_REGS; i++)
1278 		memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1279 
1280 	return 0;
1281 }
1282 
1283 #ifdef CONFIG_CPU_HAS_LBT
1284 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1285 {
1286 	if (!kvm_guest_has_lbt(&vcpu->arch))
1287 		return -EINVAL;
1288 
1289 	preempt_disable();
1290 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1291 		set_csr_euen(CSR_EUEN_LBTEN);
1292 		_restore_lbt(&vcpu->arch.lbt);
1293 		vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1294 	}
1295 	preempt_enable();
1296 
1297 	return 0;
1298 }
1299 
1300 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1301 {
1302 	preempt_disable();
1303 	if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1304 		_save_lbt(&vcpu->arch.lbt);
1305 		clear_csr_euen(CSR_EUEN_LBTEN);
1306 		vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1307 	}
1308 	preempt_enable();
1309 }
1310 
1311 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1312 {
1313 	/*
1314 	 * If TM is enabled, top register save/restore will
1315 	 * cause lbt exception, here enable lbt in advance
1316 	 */
1317 	if (fcsr & FPU_CSR_TM)
1318 		kvm_own_lbt(vcpu);
1319 }
1320 
1321 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1322 {
1323 	if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1324 		if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1325 			return;
1326 		kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1327 	}
1328 }
1329 #else
1330 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
1331 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
1332 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1333 #endif
1334 
1335 /* Enable FPU and restore context */
1336 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1337 {
1338 	preempt_disable();
1339 
1340 	/*
1341 	 * Enable FPU for guest
1342 	 * Set FR and FRE according to guest context
1343 	 */
1344 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1345 	set_csr_euen(CSR_EUEN_FPEN);
1346 
1347 	kvm_restore_fpu(&vcpu->arch.fpu);
1348 	vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1349 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1350 
1351 	preempt_enable();
1352 }
1353 
1354 #ifdef CONFIG_CPU_HAS_LSX
1355 /* Enable LSX and restore context */
1356 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1357 {
1358 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1359 		return -EINVAL;
1360 
1361 	preempt_disable();
1362 
1363 	/* Enable LSX for guest */
1364 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1365 	set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1366 	switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1367 	case KVM_LARCH_FPU:
1368 		/*
1369 		 * Guest FPU state already loaded,
1370 		 * only restore upper LSX state
1371 		 */
1372 		_restore_lsx_upper(&vcpu->arch.fpu);
1373 		break;
1374 	default:
1375 		/* Neither FP or LSX already active,
1376 		 * restore full LSX state
1377 		 */
1378 		kvm_restore_lsx(&vcpu->arch.fpu);
1379 		break;
1380 	}
1381 
1382 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1383 	vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1384 	preempt_enable();
1385 
1386 	return 0;
1387 }
1388 #endif
1389 
1390 #ifdef CONFIG_CPU_HAS_LASX
1391 /* Enable LASX and restore context */
1392 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1393 {
1394 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1395 		return -EINVAL;
1396 
1397 	preempt_disable();
1398 
1399 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1400 	set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1401 	switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1402 	case KVM_LARCH_LSX:
1403 	case KVM_LARCH_LSX | KVM_LARCH_FPU:
1404 		/* Guest LSX state already loaded, only restore upper LASX state */
1405 		_restore_lasx_upper(&vcpu->arch.fpu);
1406 		break;
1407 	case KVM_LARCH_FPU:
1408 		/* Guest FP state already loaded, only restore upper LSX & LASX state */
1409 		_restore_lsx_upper(&vcpu->arch.fpu);
1410 		_restore_lasx_upper(&vcpu->arch.fpu);
1411 		break;
1412 	default:
1413 		/* Neither FP or LSX already active, restore full LASX state */
1414 		kvm_restore_lasx(&vcpu->arch.fpu);
1415 		break;
1416 	}
1417 
1418 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1419 	vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1420 	preempt_enable();
1421 
1422 	return 0;
1423 }
1424 #endif
1425 
1426 /* Save context and disable FPU */
1427 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1428 {
1429 	preempt_disable();
1430 
1431 	kvm_check_fcsr_alive(vcpu);
1432 	if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1433 		kvm_save_lasx(&vcpu->arch.fpu);
1434 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1435 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1436 
1437 		/* Disable LASX & LSX & FPU */
1438 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1439 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1440 		kvm_save_lsx(&vcpu->arch.fpu);
1441 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1442 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1443 
1444 		/* Disable LSX & FPU */
1445 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1446 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1447 		kvm_save_fpu(&vcpu->arch.fpu);
1448 		vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1449 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1450 
1451 		/* Disable FPU */
1452 		clear_csr_euen(CSR_EUEN_FPEN);
1453 	}
1454 	kvm_lose_lbt(vcpu);
1455 
1456 	preempt_enable();
1457 }
1458 
1459 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1460 {
1461 	int intr = (int)irq->irq;
1462 
1463 	if (intr > 0)
1464 		kvm_queue_irq(vcpu, intr);
1465 	else if (intr < 0)
1466 		kvm_dequeue_irq(vcpu, -intr);
1467 	else {
1468 		kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1469 		return -EINVAL;
1470 	}
1471 
1472 	kvm_vcpu_kick(vcpu);
1473 
1474 	return 0;
1475 }
1476 
1477 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1478 			       unsigned int ioctl, unsigned long arg)
1479 {
1480 	void __user *argp = (void __user *)arg;
1481 	struct kvm_vcpu *vcpu = filp->private_data;
1482 
1483 	if (ioctl == KVM_INTERRUPT) {
1484 		struct kvm_interrupt irq;
1485 
1486 		if (copy_from_user(&irq, argp, sizeof(irq)))
1487 			return -EFAULT;
1488 
1489 		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1490 
1491 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1492 	}
1493 
1494 	return -ENOIOCTLCMD;
1495 }
1496 
1497 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1498 {
1499 	return 0;
1500 }
1501 
1502 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1503 {
1504 	unsigned long timer_hz;
1505 	struct loongarch_csrs *csr;
1506 
1507 	vcpu->arch.vpid = 0;
1508 	vcpu->arch.flush_gpa = INVALID_GPA;
1509 
1510 	hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1511 		      HRTIMER_MODE_ABS_PINNED_HARD);
1512 
1513 	/* Get GPA (=HVA) of PGD for kvm hypervisor */
1514 	vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1515 
1516 	/*
1517 	 * Get PGD for primary mmu, virtual address is used since there is
1518 	 * memory access after loading from CSR_PGD in tlb exception fast path.
1519 	 */
1520 	vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1521 
1522 	vcpu->arch.handle_exit = kvm_handle_exit;
1523 	vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1524 	vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1525 	if (!vcpu->arch.csr)
1526 		return -ENOMEM;
1527 
1528 	/*
1529 	 * All kvm exceptions share one exception entry, and host <-> guest
1530 	 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1531 	 */
1532 	vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1533 
1534 	/* Init */
1535 	vcpu->arch.last_sched_cpu = -1;
1536 
1537 	/* Init ipi_state lock */
1538 	spin_lock_init(&vcpu->arch.ipi_state.lock);
1539 
1540 	/*
1541 	 * Initialize guest register state to valid architectural reset state.
1542 	 */
1543 	timer_hz = calc_const_freq();
1544 	kvm_init_timer(vcpu, timer_hz);
1545 
1546 	/* Set Initialize mode for guest */
1547 	csr = vcpu->arch.csr;
1548 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1549 
1550 	/* Set cpuid */
1551 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1552 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1553 
1554 	/* Start with no pending virtual guest interrupts */
1555 	csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1556 
1557 	return 0;
1558 }
1559 
1560 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1561 {
1562 }
1563 
1564 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1565 {
1566 	int cpu;
1567 	struct kvm_context *context;
1568 
1569 	hrtimer_cancel(&vcpu->arch.swtimer);
1570 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1571 	kvm_drop_cpuid(vcpu);
1572 	kfree(vcpu->arch.csr);
1573 
1574 	/*
1575 	 * If the vCPU is freed and reused as another vCPU, we don't want the
1576 	 * matching pointer wrongly hanging around in last_vcpu.
1577 	 */
1578 	for_each_possible_cpu(cpu) {
1579 		context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1580 		if (context->last_vcpu == vcpu)
1581 			context->last_vcpu = NULL;
1582 	}
1583 }
1584 
1585 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1586 {
1587 	bool migrated;
1588 	struct kvm_context *context;
1589 	struct loongarch_csrs *csr = vcpu->arch.csr;
1590 
1591 	/*
1592 	 * Have we migrated to a different CPU?
1593 	 * If so, any old guest TLB state may be stale.
1594 	 */
1595 	migrated = (vcpu->arch.last_sched_cpu != cpu);
1596 
1597 	/*
1598 	 * Was this the last vCPU to run on this CPU?
1599 	 * If not, any old guest state from this vCPU will have been clobbered.
1600 	 */
1601 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1602 	if (migrated || (context->last_vcpu != vcpu))
1603 		vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1604 	context->last_vcpu = vcpu;
1605 
1606 	/* Restore timer state regardless */
1607 	kvm_restore_timer(vcpu);
1608 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1609 
1610 	/* Don't bother restoring registers multiple times unless necessary */
1611 	if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1612 		return 0;
1613 
1614 	write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1615 
1616 	/* Restore guest CSR registers */
1617 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1618 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1619 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1620 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1621 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1622 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1623 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1624 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1625 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1626 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1627 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1628 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1629 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1630 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1631 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1632 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1633 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1634 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1635 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1636 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1637 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1638 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1639 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1640 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1641 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1642 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1643 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1644 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1645 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1646 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1647 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1648 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1649 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1650 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1651 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1652 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1653 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1654 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1655 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1656 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1657 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1658 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1659 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1660 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1661 
1662 	/* Restore Root.GINTC from unused Guest.GINTC register */
1663 	write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1664 
1665 	/*
1666 	 * We should clear linked load bit to break interrupted atomics. This
1667 	 * prevents a SC on the next vCPU from succeeding by matching a LL on
1668 	 * the previous vCPU.
1669 	 */
1670 	if (vcpu->kvm->created_vcpus > 1)
1671 		set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1672 
1673 	vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1674 
1675 	return 0;
1676 }
1677 
1678 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1679 {
1680 	unsigned long flags;
1681 
1682 	local_irq_save(flags);
1683 	/* Restore guest state to registers */
1684 	_kvm_vcpu_load(vcpu, cpu);
1685 	local_irq_restore(flags);
1686 }
1687 
1688 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1689 {
1690 	struct loongarch_csrs *csr = vcpu->arch.csr;
1691 
1692 	kvm_lose_fpu(vcpu);
1693 
1694 	/*
1695 	 * Update CSR state from hardware if software CSR state is stale,
1696 	 * most CSR registers are kept unchanged during process context
1697 	 * switch except CSR registers like remaining timer tick value and
1698 	 * injected interrupt state.
1699 	 */
1700 	if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1701 		goto out;
1702 
1703 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1704 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1705 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1706 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1707 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1708 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1709 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1710 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1711 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1712 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1713 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1714 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1715 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1716 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1717 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1718 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1719 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1720 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1721 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1722 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1723 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1724 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1725 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1726 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1727 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1728 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1729 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1730 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1731 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1732 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1733 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1734 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1735 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1736 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1737 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1738 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1739 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1740 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1741 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1742 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1743 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1744 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1745 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1746 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1747 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1748 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1749 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1750 
1751 	vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1752 
1753 out:
1754 	kvm_save_timer(vcpu);
1755 	/* Save Root.GINTC into unused Guest.GINTC register */
1756 	csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1757 
1758 	return 0;
1759 }
1760 
1761 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1762 {
1763 	int cpu;
1764 	unsigned long flags;
1765 
1766 	local_irq_save(flags);
1767 	cpu = smp_processor_id();
1768 	vcpu->arch.last_sched_cpu = cpu;
1769 
1770 	/* Save guest state in registers */
1771 	_kvm_vcpu_put(vcpu, cpu);
1772 	local_irq_restore(flags);
1773 }
1774 
1775 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1776 {
1777 	int r = -EINTR;
1778 	struct kvm_run *run = vcpu->run;
1779 
1780 	if (vcpu->mmio_needed) {
1781 		if (!vcpu->mmio_is_write)
1782 			kvm_complete_mmio_read(vcpu, run);
1783 		vcpu->mmio_needed = 0;
1784 	}
1785 
1786 	switch (run->exit_reason) {
1787 	case KVM_EXIT_HYPERCALL:
1788 		kvm_complete_user_service(vcpu, run);
1789 		break;
1790 	case KVM_EXIT_LOONGARCH_IOCSR:
1791 		if (!run->iocsr_io.is_write)
1792 			kvm_complete_iocsr_read(vcpu, run);
1793 		break;
1794 	}
1795 
1796 	if (!vcpu->wants_to_run)
1797 		return r;
1798 
1799 	/* Clear exit_reason */
1800 	run->exit_reason = KVM_EXIT_UNKNOWN;
1801 	lose_fpu(1);
1802 	vcpu_load(vcpu);
1803 	kvm_sigset_activate(vcpu);
1804 	r = kvm_pre_enter_guest(vcpu);
1805 	if (r != RESUME_GUEST)
1806 		goto out;
1807 
1808 	guest_timing_enter_irqoff();
1809 	guest_state_enter_irqoff();
1810 	trace_kvm_enter(vcpu);
1811 	r = kvm_loongarch_ops->enter_guest(run, vcpu);
1812 
1813 	trace_kvm_out(vcpu);
1814 	/*
1815 	 * Guest exit is already recorded at kvm_handle_exit()
1816 	 * return value must not be RESUME_GUEST
1817 	 */
1818 	local_irq_enable();
1819 out:
1820 	kvm_sigset_deactivate(vcpu);
1821 	vcpu_put(vcpu);
1822 
1823 	return r;
1824 }
1825