xref: /linux/arch/loongarch/kvm/vcpu.c (revision b66451723c45b791fd2824d1b8f62fe498989e23)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/lbt.h>
10 #include <asm/loongarch.h>
11 #include <asm/setup.h>
12 #include <asm/time.h>
13 
14 #define CREATE_TRACE_POINTS
15 #include "trace.h"
16 
17 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
18 	KVM_GENERIC_VCPU_STATS(),
19 	STATS_DESC_COUNTER(VCPU, int_exits),
20 	STATS_DESC_COUNTER(VCPU, idle_exits),
21 	STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 	STATS_DESC_COUNTER(VCPU, signal_exits),
23 	STATS_DESC_COUNTER(VCPU, hypercall_exits),
24 	STATS_DESC_COUNTER(VCPU, ipi_read_exits),
25 	STATS_DESC_COUNTER(VCPU, ipi_write_exits),
26 	STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
27 	STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
28 	STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
29 	STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
30 };
31 
32 const struct kvm_stats_header kvm_vcpu_stats_header = {
33 	.name_size = KVM_STATS_NAME_SIZE,
34 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
35 	.id_offset = sizeof(struct kvm_stats_header),
36 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
37 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
38 		       sizeof(kvm_vcpu_stats_desc),
39 };
40 
41 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
42 {
43 	struct kvm_context *context;
44 
45 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
46 	context->perf_cntr[0] = read_csr_perfcntr0();
47 	context->perf_cntr[1] = read_csr_perfcntr1();
48 	context->perf_cntr[2] = read_csr_perfcntr2();
49 	context->perf_cntr[3] = read_csr_perfcntr3();
50 	context->perf_ctrl[0] = write_csr_perfctrl0(0);
51 	context->perf_ctrl[1] = write_csr_perfctrl1(0);
52 	context->perf_ctrl[2] = write_csr_perfctrl2(0);
53 	context->perf_ctrl[3] = write_csr_perfctrl3(0);
54 }
55 
56 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
57 {
58 	struct kvm_context *context;
59 
60 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
61 	write_csr_perfcntr0(context->perf_cntr[0]);
62 	write_csr_perfcntr1(context->perf_cntr[1]);
63 	write_csr_perfcntr2(context->perf_cntr[2]);
64 	write_csr_perfcntr3(context->perf_cntr[3]);
65 	write_csr_perfctrl0(context->perf_ctrl[0]);
66 	write_csr_perfctrl1(context->perf_ctrl[1]);
67 	write_csr_perfctrl2(context->perf_ctrl[2]);
68 	write_csr_perfctrl3(context->perf_ctrl[3]);
69 }
70 
71 
72 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
73 {
74 	struct loongarch_csrs *csr = vcpu->arch.csr;
75 
76 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
77 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
78 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
79 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
80 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
81 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
82 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
83 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
84 }
85 
86 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
87 {
88 	struct loongarch_csrs *csr = vcpu->arch.csr;
89 
90 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
91 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
92 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
93 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
94 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
95 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
96 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
97 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
98 }
99 
100 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
101 {
102 	unsigned long val;
103 
104 	if (!kvm_guest_has_pmu(&vcpu->arch))
105 		return -EINVAL;
106 
107 	kvm_save_host_pmu(vcpu);
108 
109 	/* Set PM0-PM(num) to guest */
110 	val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
111 	val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
112 	write_csr_gcfg(val);
113 
114 	kvm_restore_guest_pmu(vcpu);
115 
116 	return 0;
117 }
118 
119 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
120 {
121 	unsigned long val;
122 	struct loongarch_csrs *csr = vcpu->arch.csr;
123 
124 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
125 		return;
126 
127 	kvm_save_guest_pmu(vcpu);
128 
129 	/* Disable pmu access from guest */
130 	write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
131 
132 	/*
133 	 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
134 	 * exiting the guest, so that the next time trap into the guest.
135 	 * We don't need to deal with PMU CSRs contexts.
136 	 */
137 	val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
138 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
139 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
140 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
141 	if (!(val & KVM_PMU_EVENT_ENABLED))
142 		vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
143 
144 	kvm_restore_host_pmu(vcpu);
145 }
146 
147 static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
148 {
149 	if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
150 		kvm_make_request(KVM_REQ_PMU, vcpu);
151 }
152 
153 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
154 {
155 	if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
156 		kvm_own_pmu(vcpu);
157 		vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
158 	}
159 }
160 
161 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
162 {
163 	u32 version;
164 	u64 steal;
165 	gpa_t gpa;
166 	struct kvm_memslots *slots;
167 	struct kvm_steal_time __user *st;
168 	struct gfn_to_hva_cache *ghc;
169 
170 	ghc = &vcpu->arch.st.cache;
171 	gpa = vcpu->arch.st.guest_addr;
172 	if (!(gpa & KVM_STEAL_PHYS_VALID))
173 		return;
174 
175 	gpa &= KVM_STEAL_PHYS_MASK;
176 	slots = kvm_memslots(vcpu->kvm);
177 	if (slots->generation != ghc->generation || gpa != ghc->gpa) {
178 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
179 			ghc->gpa = INVALID_GPA;
180 			return;
181 		}
182 	}
183 
184 	st = (struct kvm_steal_time __user *)ghc->hva;
185 	unsafe_get_user(version, &st->version, out);
186 	if (version & 1)
187 		version += 1; /* first time write, random junk */
188 
189 	version += 1;
190 	unsafe_put_user(version, &st->version, out);
191 	smp_wmb();
192 
193 	unsafe_get_user(steal, &st->steal, out);
194 	steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
195 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
196 	unsafe_put_user(steal, &st->steal, out);
197 
198 	smp_wmb();
199 	version += 1;
200 	unsafe_put_user(version, &st->version, out);
201 out:
202 	mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
203 }
204 
205 /*
206  * kvm_check_requests - check and handle pending vCPU requests
207  *
208  * Return: RESUME_GUEST if we should enter the guest
209  *         RESUME_HOST  if we should exit to userspace
210  */
211 static int kvm_check_requests(struct kvm_vcpu *vcpu)
212 {
213 	if (!kvm_request_pending(vcpu))
214 		return RESUME_GUEST;
215 
216 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
217 		vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
218 
219 	if (kvm_dirty_ring_check_request(vcpu))
220 		return RESUME_HOST;
221 
222 	if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
223 		kvm_update_stolen_time(vcpu);
224 
225 	return RESUME_GUEST;
226 }
227 
228 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
229 {
230 	lockdep_assert_irqs_disabled();
231 	if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
232 		if (vcpu->arch.flush_gpa != INVALID_GPA) {
233 			kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
234 			vcpu->arch.flush_gpa = INVALID_GPA;
235 		}
236 }
237 
238 /*
239  * Check and handle pending signal and vCPU requests etc
240  * Run with irq enabled and preempt enabled
241  *
242  * Return: RESUME_GUEST if we should enter the guest
243  *         RESUME_HOST  if we should exit to userspace
244  *         < 0 if we should exit to userspace, where the return value
245  *         indicates an error
246  */
247 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
248 {
249 	int idx, ret;
250 
251 	/*
252 	 * Check conditions before entering the guest
253 	 */
254 	ret = xfer_to_guest_mode_handle_work(vcpu);
255 	if (ret < 0)
256 		return ret;
257 
258 	idx = srcu_read_lock(&vcpu->kvm->srcu);
259 	ret = kvm_check_requests(vcpu);
260 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
261 
262 	return ret;
263 }
264 
265 /*
266  * Called with irq enabled
267  *
268  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
269  *         Others if we should exit to userspace
270  */
271 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
272 {
273 	int ret;
274 
275 	do {
276 		ret = kvm_enter_guest_check(vcpu);
277 		if (ret != RESUME_GUEST)
278 			break;
279 
280 		/*
281 		 * Handle vcpu timer, interrupts, check requests and
282 		 * check vmid before vcpu enter guest
283 		 */
284 		local_irq_disable();
285 		kvm_deliver_intr(vcpu);
286 		kvm_deliver_exception(vcpu);
287 		/* Make sure the vcpu mode has been written */
288 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
289 		kvm_check_vpid(vcpu);
290 		kvm_check_pmu(vcpu);
291 
292 		/*
293 		 * Called after function kvm_check_vpid()
294 		 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
295 		 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
296 		 */
297 		kvm_late_check_requests(vcpu);
298 		vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
299 		/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
300 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
301 
302 		if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
303 			kvm_lose_pmu(vcpu);
304 			/* make sure the vcpu mode has been written */
305 			smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
306 			local_irq_enable();
307 			ret = -EAGAIN;
308 		}
309 	} while (ret != RESUME_GUEST);
310 
311 	return ret;
312 }
313 
314 /*
315  * Return 1 for resume guest and "<= 0" for resume host.
316  */
317 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
318 {
319 	int ret = RESUME_GUEST;
320 	unsigned long estat = vcpu->arch.host_estat;
321 	u32 intr = estat & CSR_ESTAT_IS;
322 	u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
323 
324 	vcpu->mode = OUTSIDE_GUEST_MODE;
325 
326 	/* Set a default exit reason */
327 	run->exit_reason = KVM_EXIT_UNKNOWN;
328 
329 	kvm_lose_pmu(vcpu);
330 
331 	guest_timing_exit_irqoff();
332 	guest_state_exit_irqoff();
333 	local_irq_enable();
334 
335 	trace_kvm_exit(vcpu, ecode);
336 	if (ecode) {
337 		ret = kvm_handle_fault(vcpu, ecode);
338 	} else {
339 		WARN(!intr, "vm exiting with suspicious irq\n");
340 		++vcpu->stat.int_exits;
341 	}
342 
343 	if (ret == RESUME_GUEST)
344 		ret = kvm_pre_enter_guest(vcpu);
345 
346 	if (ret != RESUME_GUEST) {
347 		local_irq_disable();
348 		return ret;
349 	}
350 
351 	guest_timing_enter_irqoff();
352 	guest_state_enter_irqoff();
353 	trace_kvm_reenter(vcpu);
354 
355 	return RESUME_GUEST;
356 }
357 
358 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
359 {
360 	return !!(vcpu->arch.irq_pending) &&
361 		vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
362 }
363 
364 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
365 {
366 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
367 }
368 
369 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
370 {
371 	unsigned long val;
372 
373 	preempt_disable();
374 	val = gcsr_read(LOONGARCH_CSR_CRMD);
375 	preempt_enable();
376 
377 	return (val & CSR_PRMD_PPLV) == PLV_KERN;
378 }
379 
380 #ifdef CONFIG_GUEST_PERF_EVENTS
381 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
382 {
383 	return vcpu->arch.pc;
384 }
385 
386 /*
387  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
388  * arrived in guest context.  For LoongArch64, if PMU is not passthrough to VM,
389  * any event that arrives while a vCPU is loaded is considered to be "in guest".
390  */
391 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
392 {
393 	return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
394 }
395 #endif
396 
397 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
398 {
399 	return false;
400 }
401 
402 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
403 {
404 	return VM_FAULT_SIGBUS;
405 }
406 
407 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
408 				  struct kvm_translation *tr)
409 {
410 	return -EINVAL;
411 }
412 
413 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
414 {
415 	int ret;
416 
417 	/* Protect from TOD sync and vcpu_load/put() */
418 	preempt_disable();
419 	ret = kvm_pending_timer(vcpu) ||
420 		kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
421 	preempt_enable();
422 
423 	return ret;
424 }
425 
426 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
427 {
428 	int i;
429 
430 	kvm_debug("vCPU Register Dump:\n");
431 	kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
432 	kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
433 
434 	for (i = 0; i < 32; i += 4) {
435 		kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
436 		       vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
437 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
438 	}
439 
440 	kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
441 		  kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
442 		  kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
443 
444 	kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
445 
446 	return 0;
447 }
448 
449 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
450 				struct kvm_mp_state *mp_state)
451 {
452 	*mp_state = vcpu->arch.mp_state;
453 
454 	return 0;
455 }
456 
457 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
458 				struct kvm_mp_state *mp_state)
459 {
460 	int ret = 0;
461 
462 	switch (mp_state->mp_state) {
463 	case KVM_MP_STATE_RUNNABLE:
464 		vcpu->arch.mp_state = *mp_state;
465 		break;
466 	default:
467 		ret = -EINVAL;
468 	}
469 
470 	return ret;
471 }
472 
473 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
474 					struct kvm_guest_debug *dbg)
475 {
476 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
477 		return -EINVAL;
478 
479 	if (dbg->control & KVM_GUESTDBG_ENABLE)
480 		vcpu->guest_debug = dbg->control;
481 	else
482 		vcpu->guest_debug = 0;
483 
484 	return 0;
485 }
486 
487 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
488 {
489 	int cpuid;
490 	struct kvm_phyid_map *map;
491 	struct loongarch_csrs *csr = vcpu->arch.csr;
492 
493 	if (val >= KVM_MAX_PHYID)
494 		return -EINVAL;
495 
496 	map = vcpu->kvm->arch.phyid_map;
497 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
498 
499 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
500 	if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
501 		/* Discard duplicated CPUID set operation */
502 		if (cpuid == val) {
503 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
504 			return 0;
505 		}
506 
507 		/*
508 		 * CPUID is already set before
509 		 * Forbid changing to a different CPUID at runtime
510 		 */
511 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
512 		return -EINVAL;
513 	}
514 
515 	if (map->phys_map[val].enabled) {
516 		/* Discard duplicated CPUID set operation */
517 		if (vcpu == map->phys_map[val].vcpu) {
518 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
519 			return 0;
520 		}
521 
522 		/*
523 		 * New CPUID is already set with other vcpu
524 		 * Forbid sharing the same CPUID between different vcpus
525 		 */
526 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
527 		return -EINVAL;
528 	}
529 
530 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
531 	map->phys_map[val].enabled	= true;
532 	map->phys_map[val].vcpu		= vcpu;
533 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
534 
535 	return 0;
536 }
537 
538 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
539 {
540 	int cpuid;
541 	struct kvm_phyid_map *map;
542 	struct loongarch_csrs *csr = vcpu->arch.csr;
543 
544 	map = vcpu->kvm->arch.phyid_map;
545 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
546 
547 	if (cpuid >= KVM_MAX_PHYID)
548 		return;
549 
550 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
551 	if (map->phys_map[cpuid].enabled) {
552 		map->phys_map[cpuid].vcpu = NULL;
553 		map->phys_map[cpuid].enabled = false;
554 		kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
555 	}
556 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
557 }
558 
559 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
560 {
561 	struct kvm_phyid_map *map;
562 
563 	if (cpuid >= KVM_MAX_PHYID)
564 		return NULL;
565 
566 	map = kvm->arch.phyid_map;
567 	if (!map->phys_map[cpuid].enabled)
568 		return NULL;
569 
570 	return map->phys_map[cpuid].vcpu;
571 }
572 
573 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
574 {
575 	unsigned long gintc;
576 	struct loongarch_csrs *csr = vcpu->arch.csr;
577 
578 	if (get_gcsr_flag(id) & INVALID_GCSR)
579 		return -EINVAL;
580 
581 	if (id == LOONGARCH_CSR_ESTAT) {
582 		preempt_disable();
583 		vcpu_load(vcpu);
584 		/*
585 		 * Sync pending interrupts into ESTAT so that interrupt
586 		 * remains during VM migration stage
587 		 */
588 		kvm_deliver_intr(vcpu);
589 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
590 		vcpu_put(vcpu);
591 		preempt_enable();
592 
593 		/* ESTAT IP0~IP7 get from GINTC */
594 		gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
595 		*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
596 		return 0;
597 	}
598 
599 	/*
600 	 * Get software CSR state since software state is consistent
601 	 * with hardware for synchronous ioctl
602 	 */
603 	*val = kvm_read_sw_gcsr(csr, id);
604 
605 	return 0;
606 }
607 
608 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
609 {
610 	int ret = 0, gintc;
611 	struct loongarch_csrs *csr = vcpu->arch.csr;
612 
613 	if (get_gcsr_flag(id) & INVALID_GCSR)
614 		return -EINVAL;
615 
616 	if (id == LOONGARCH_CSR_CPUID)
617 		return kvm_set_cpuid(vcpu, val);
618 
619 	if (id == LOONGARCH_CSR_ESTAT) {
620 		/* ESTAT IP0~IP7 inject through GINTC */
621 		gintc = (val >> 2) & 0xff;
622 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
623 
624 		gintc = val & ~(0xffUL << 2);
625 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
626 
627 		return ret;
628 	}
629 
630 	kvm_write_sw_gcsr(csr, id, val);
631 
632 	/*
633 	 * After modifying the PMU CSR register value of the vcpu.
634 	 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
635 	 */
636 	if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
637 		unsigned long val;
638 
639 		val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
640 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
641 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
642 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
643 
644 		if (val & KVM_PMU_EVENT_ENABLED)
645 			kvm_make_request(KVM_REQ_PMU, vcpu);
646 	}
647 
648 	return ret;
649 }
650 
651 static int _kvm_get_cpucfg_mask(int id, u64 *v)
652 {
653 	if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
654 		return -EINVAL;
655 
656 	switch (id) {
657 	case LOONGARCH_CPUCFG0:
658 		*v = GENMASK(31, 0);
659 		return 0;
660 	case LOONGARCH_CPUCFG1:
661 		/* CPUCFG1_MSGINT is not supported by KVM */
662 		*v = GENMASK(25, 0);
663 		return 0;
664 	case LOONGARCH_CPUCFG2:
665 		/* CPUCFG2 features unconditionally supported by KVM */
666 		*v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
667 		     CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
668 		     CPUCFG2_LSPW | CPUCFG2_LAM;
669 		/*
670 		 * For the ISA extensions listed below, if one is supported
671 		 * by the host, then it is also supported by KVM.
672 		 */
673 		if (cpu_has_lsx)
674 			*v |= CPUCFG2_LSX;
675 		if (cpu_has_lasx)
676 			*v |= CPUCFG2_LASX;
677 		if (cpu_has_lbt_x86)
678 			*v |= CPUCFG2_X86BT;
679 		if (cpu_has_lbt_arm)
680 			*v |= CPUCFG2_ARMBT;
681 		if (cpu_has_lbt_mips)
682 			*v |= CPUCFG2_MIPSBT;
683 		if (cpu_has_ptw)
684 			*v |= CPUCFG2_PTW;
685 
686 		return 0;
687 	case LOONGARCH_CPUCFG3:
688 		*v = GENMASK(16, 0);
689 		return 0;
690 	case LOONGARCH_CPUCFG4:
691 	case LOONGARCH_CPUCFG5:
692 		*v = GENMASK(31, 0);
693 		return 0;
694 	case LOONGARCH_CPUCFG6:
695 		if (cpu_has_pmp)
696 			*v = GENMASK(14, 0);
697 		else
698 			*v = 0;
699 		return 0;
700 	case LOONGARCH_CPUCFG16:
701 		*v = GENMASK(16, 0);
702 		return 0;
703 	case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
704 		*v = GENMASK(30, 0);
705 		return 0;
706 	default:
707 		/*
708 		 * CPUCFG bits should be zero if reserved by HW or not
709 		 * supported by KVM.
710 		 */
711 		*v = 0;
712 		return 0;
713 	}
714 }
715 
716 static int kvm_check_cpucfg(int id, u64 val)
717 {
718 	int ret;
719 	u64 mask = 0;
720 
721 	ret = _kvm_get_cpucfg_mask(id, &mask);
722 	if (ret)
723 		return ret;
724 
725 	if (val & ~mask)
726 		/* Unsupported features and/or the higher 32 bits should not be set */
727 		return -EINVAL;
728 
729 	switch (id) {
730 	case LOONGARCH_CPUCFG2:
731 		if (!(val & CPUCFG2_LLFTP))
732 			/* Guests must have a constant timer */
733 			return -EINVAL;
734 		if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
735 			/* Single and double float point must both be set when FP is enabled */
736 			return -EINVAL;
737 		if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
738 			/* LSX architecturally implies FP but val does not satisfy that */
739 			return -EINVAL;
740 		if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
741 			/* LASX architecturally implies LSX and FP but val does not satisfy that */
742 			return -EINVAL;
743 		return 0;
744 	case LOONGARCH_CPUCFG6:
745 		if (val & CPUCFG6_PMP) {
746 			u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
747 			if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
748 				return -EINVAL;
749 			if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
750 				return -EINVAL;
751 			if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
752 				return -EINVAL;
753 		}
754 		return 0;
755 	default:
756 		/*
757 		 * Values for the other CPUCFG IDs are not being further validated
758 		 * besides the mask check above.
759 		 */
760 		return 0;
761 	}
762 }
763 
764 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
765 		const struct kvm_one_reg *reg, u64 *v)
766 {
767 	int id, ret = 0;
768 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
769 
770 	switch (type) {
771 	case KVM_REG_LOONGARCH_CSR:
772 		id = KVM_GET_IOC_CSR_IDX(reg->id);
773 		ret = _kvm_getcsr(vcpu, id, v);
774 		break;
775 	case KVM_REG_LOONGARCH_CPUCFG:
776 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
777 		if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
778 			*v = vcpu->arch.cpucfg[id];
779 		else
780 			ret = -EINVAL;
781 		break;
782 	case KVM_REG_LOONGARCH_LBT:
783 		if (!kvm_guest_has_lbt(&vcpu->arch))
784 			return -ENXIO;
785 
786 		switch (reg->id) {
787 		case KVM_REG_LOONGARCH_LBT_SCR0:
788 			*v = vcpu->arch.lbt.scr0;
789 			break;
790 		case KVM_REG_LOONGARCH_LBT_SCR1:
791 			*v = vcpu->arch.lbt.scr1;
792 			break;
793 		case KVM_REG_LOONGARCH_LBT_SCR2:
794 			*v = vcpu->arch.lbt.scr2;
795 			break;
796 		case KVM_REG_LOONGARCH_LBT_SCR3:
797 			*v = vcpu->arch.lbt.scr3;
798 			break;
799 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
800 			*v = vcpu->arch.lbt.eflags;
801 			break;
802 		case KVM_REG_LOONGARCH_LBT_FTOP:
803 			*v = vcpu->arch.fpu.ftop;
804 			break;
805 		default:
806 			ret = -EINVAL;
807 			break;
808 		}
809 		break;
810 	case KVM_REG_LOONGARCH_KVM:
811 		switch (reg->id) {
812 		case KVM_REG_LOONGARCH_COUNTER:
813 			*v = drdtime() + vcpu->kvm->arch.time_offset;
814 			break;
815 		case KVM_REG_LOONGARCH_DEBUG_INST:
816 			*v = INSN_HVCL | KVM_HCALL_SWDBG;
817 			break;
818 		default:
819 			ret = -EINVAL;
820 			break;
821 		}
822 		break;
823 	default:
824 		ret = -EINVAL;
825 		break;
826 	}
827 
828 	return ret;
829 }
830 
831 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
832 {
833 	int ret = 0;
834 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
835 
836 	switch (size) {
837 	case KVM_REG_SIZE_U64:
838 		ret = kvm_get_one_reg(vcpu, reg, &v);
839 		if (ret)
840 			return ret;
841 		ret = put_user(v, (u64 __user *)(long)reg->addr);
842 		break;
843 	default:
844 		ret = -EINVAL;
845 		break;
846 	}
847 
848 	return ret;
849 }
850 
851 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
852 			const struct kvm_one_reg *reg, u64 v)
853 {
854 	int id, ret = 0;
855 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
856 
857 	switch (type) {
858 	case KVM_REG_LOONGARCH_CSR:
859 		id = KVM_GET_IOC_CSR_IDX(reg->id);
860 		ret = _kvm_setcsr(vcpu, id, v);
861 		break;
862 	case KVM_REG_LOONGARCH_CPUCFG:
863 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
864 		ret = kvm_check_cpucfg(id, v);
865 		if (ret)
866 			break;
867 		vcpu->arch.cpucfg[id] = (u32)v;
868 		if (id == LOONGARCH_CPUCFG6)
869 			vcpu->arch.max_pmu_csrid =
870 				LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
871 		break;
872 	case KVM_REG_LOONGARCH_LBT:
873 		if (!kvm_guest_has_lbt(&vcpu->arch))
874 			return -ENXIO;
875 
876 		switch (reg->id) {
877 		case KVM_REG_LOONGARCH_LBT_SCR0:
878 			vcpu->arch.lbt.scr0 = v;
879 			break;
880 		case KVM_REG_LOONGARCH_LBT_SCR1:
881 			vcpu->arch.lbt.scr1 = v;
882 			break;
883 		case KVM_REG_LOONGARCH_LBT_SCR2:
884 			vcpu->arch.lbt.scr2 = v;
885 			break;
886 		case KVM_REG_LOONGARCH_LBT_SCR3:
887 			vcpu->arch.lbt.scr3 = v;
888 			break;
889 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
890 			vcpu->arch.lbt.eflags = v;
891 			break;
892 		case KVM_REG_LOONGARCH_LBT_FTOP:
893 			vcpu->arch.fpu.ftop = v;
894 			break;
895 		default:
896 			ret = -EINVAL;
897 			break;
898 		}
899 		break;
900 	case KVM_REG_LOONGARCH_KVM:
901 		switch (reg->id) {
902 		case KVM_REG_LOONGARCH_COUNTER:
903 			/*
904 			 * gftoffset is relative with board, not vcpu
905 			 * only set for the first time for smp system
906 			 */
907 			if (vcpu->vcpu_id == 0)
908 				vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
909 			break;
910 		case KVM_REG_LOONGARCH_VCPU_RESET:
911 			vcpu->arch.st.guest_addr = 0;
912 			memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
913 			memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
914 
915 			/*
916 			 * When vCPU reset, clear the ESTAT and GINTC registers
917 			 * Other CSR registers are cleared with function _kvm_setcsr().
918 			 */
919 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
920 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
921 			break;
922 		default:
923 			ret = -EINVAL;
924 			break;
925 		}
926 		break;
927 	default:
928 		ret = -EINVAL;
929 		break;
930 	}
931 
932 	return ret;
933 }
934 
935 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
936 {
937 	int ret = 0;
938 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
939 
940 	switch (size) {
941 	case KVM_REG_SIZE_U64:
942 		ret = get_user(v, (u64 __user *)(long)reg->addr);
943 		if (ret)
944 			return ret;
945 		break;
946 	default:
947 		return -EINVAL;
948 	}
949 
950 	return kvm_set_one_reg(vcpu, reg, v);
951 }
952 
953 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
954 {
955 	return -ENOIOCTLCMD;
956 }
957 
958 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
959 {
960 	return -ENOIOCTLCMD;
961 }
962 
963 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
964 {
965 	int i;
966 
967 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
968 		regs->gpr[i] = vcpu->arch.gprs[i];
969 
970 	regs->pc = vcpu->arch.pc;
971 
972 	return 0;
973 }
974 
975 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
976 {
977 	int i;
978 
979 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
980 		vcpu->arch.gprs[i] = regs->gpr[i];
981 
982 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
983 	vcpu->arch.pc = regs->pc;
984 
985 	return 0;
986 }
987 
988 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
989 				     struct kvm_enable_cap *cap)
990 {
991 	/* FPU is enabled by default, will support LSX/LASX later. */
992 	return -EINVAL;
993 }
994 
995 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
996 					 struct kvm_device_attr *attr)
997 {
998 	switch (attr->attr) {
999 	case LOONGARCH_CPUCFG2:
1000 	case LOONGARCH_CPUCFG6:
1001 		return 0;
1002 	case CPUCFG_KVM_FEATURE:
1003 		return 0;
1004 	default:
1005 		return -ENXIO;
1006 	}
1007 
1008 	return -ENXIO;
1009 }
1010 
1011 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
1012 					 struct kvm_device_attr *attr)
1013 {
1014 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1015 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1016 		return -ENXIO;
1017 
1018 	return 0;
1019 }
1020 
1021 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1022 				       struct kvm_device_attr *attr)
1023 {
1024 	int ret = -ENXIO;
1025 
1026 	switch (attr->group) {
1027 	case KVM_LOONGARCH_VCPU_CPUCFG:
1028 		ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1029 		break;
1030 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1031 		ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1032 		break;
1033 	default:
1034 		break;
1035 	}
1036 
1037 	return ret;
1038 }
1039 
1040 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1041 					 struct kvm_device_attr *attr)
1042 {
1043 	int ret = 0;
1044 	uint64_t val;
1045 	uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1046 
1047 	switch (attr->attr) {
1048 	case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1049 		ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1050 		if (ret)
1051 			return ret;
1052 		break;
1053 	case CPUCFG_KVM_FEATURE:
1054 		val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1055 		break;
1056 	default:
1057 		return -ENXIO;
1058 	}
1059 
1060 	put_user(val, uaddr);
1061 
1062 	return ret;
1063 }
1064 
1065 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1066 					 struct kvm_device_attr *attr)
1067 {
1068 	u64 gpa;
1069 	u64 __user *user = (u64 __user *)attr->addr;
1070 
1071 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1072 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1073 		return -ENXIO;
1074 
1075 	gpa = vcpu->arch.st.guest_addr;
1076 	if (put_user(gpa, user))
1077 		return -EFAULT;
1078 
1079 	return 0;
1080 }
1081 
1082 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1083 				       struct kvm_device_attr *attr)
1084 {
1085 	int ret = -ENXIO;
1086 
1087 	switch (attr->group) {
1088 	case KVM_LOONGARCH_VCPU_CPUCFG:
1089 		ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1090 		break;
1091 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1092 		ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1093 		break;
1094 	default:
1095 		break;
1096 	}
1097 
1098 	return ret;
1099 }
1100 
1101 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1102 					 struct kvm_device_attr *attr)
1103 {
1104 	u64 val, valid;
1105 	u64 __user *user = (u64 __user *)attr->addr;
1106 	struct kvm *kvm = vcpu->kvm;
1107 
1108 	switch (attr->attr) {
1109 	case CPUCFG_KVM_FEATURE:
1110 		if (get_user(val, user))
1111 			return -EFAULT;
1112 
1113 		valid = LOONGARCH_PV_FEAT_MASK;
1114 		if (val & ~valid)
1115 			return -EINVAL;
1116 
1117 		/* All vCPUs need set the same PV features */
1118 		if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1119 				&& ((kvm->arch.pv_features & valid) != val))
1120 			return -EINVAL;
1121 		kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1122 		return 0;
1123 	default:
1124 		return -ENXIO;
1125 	}
1126 }
1127 
1128 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1129 					 struct kvm_device_attr *attr)
1130 {
1131 	int idx, ret = 0;
1132 	u64 gpa, __user *user = (u64 __user *)attr->addr;
1133 	struct kvm *kvm = vcpu->kvm;
1134 
1135 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1136 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1137 		return -ENXIO;
1138 
1139 	if (get_user(gpa, user))
1140 		return -EFAULT;
1141 
1142 	if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1143 		return -EINVAL;
1144 
1145 	if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1146 		vcpu->arch.st.guest_addr = gpa;
1147 		return 0;
1148 	}
1149 
1150 	/* Check the address is in a valid memslot */
1151 	idx = srcu_read_lock(&kvm->srcu);
1152 	if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1153 		ret = -EINVAL;
1154 	srcu_read_unlock(&kvm->srcu, idx);
1155 
1156 	if (!ret) {
1157 		vcpu->arch.st.guest_addr = gpa;
1158 		vcpu->arch.st.last_steal = current->sched_info.run_delay;
1159 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1160 	}
1161 
1162 	return ret;
1163 }
1164 
1165 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1166 				       struct kvm_device_attr *attr)
1167 {
1168 	int ret = -ENXIO;
1169 
1170 	switch (attr->group) {
1171 	case KVM_LOONGARCH_VCPU_CPUCFG:
1172 		ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1173 		break;
1174 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1175 		ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1176 		break;
1177 	default:
1178 		break;
1179 	}
1180 
1181 	return ret;
1182 }
1183 
1184 long kvm_arch_vcpu_ioctl(struct file *filp,
1185 			 unsigned int ioctl, unsigned long arg)
1186 {
1187 	long r;
1188 	struct kvm_device_attr attr;
1189 	void __user *argp = (void __user *)arg;
1190 	struct kvm_vcpu *vcpu = filp->private_data;
1191 
1192 	/*
1193 	 * Only software CSR should be modified
1194 	 *
1195 	 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1196 	 * should be used. Since CSR registers owns by this vcpu, if switch
1197 	 * to other vcpus, other vcpus need reload CSR registers.
1198 	 *
1199 	 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1200 	 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1201 	 * aux_inuse flag and reload CSR registers form software.
1202 	 */
1203 
1204 	switch (ioctl) {
1205 	case KVM_SET_ONE_REG:
1206 	case KVM_GET_ONE_REG: {
1207 		struct kvm_one_reg reg;
1208 
1209 		r = -EFAULT;
1210 		if (copy_from_user(&reg, argp, sizeof(reg)))
1211 			break;
1212 		if (ioctl == KVM_SET_ONE_REG) {
1213 			r = kvm_set_reg(vcpu, &reg);
1214 			vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1215 		} else
1216 			r = kvm_get_reg(vcpu, &reg);
1217 		break;
1218 	}
1219 	case KVM_ENABLE_CAP: {
1220 		struct kvm_enable_cap cap;
1221 
1222 		r = -EFAULT;
1223 		if (copy_from_user(&cap, argp, sizeof(cap)))
1224 			break;
1225 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1226 		break;
1227 	}
1228 	case KVM_HAS_DEVICE_ATTR: {
1229 		r = -EFAULT;
1230 		if (copy_from_user(&attr, argp, sizeof(attr)))
1231 			break;
1232 		r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1233 		break;
1234 	}
1235 	case KVM_GET_DEVICE_ATTR: {
1236 		r = -EFAULT;
1237 		if (copy_from_user(&attr, argp, sizeof(attr)))
1238 			break;
1239 		r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1240 		break;
1241 	}
1242 	case KVM_SET_DEVICE_ATTR: {
1243 		r = -EFAULT;
1244 		if (copy_from_user(&attr, argp, sizeof(attr)))
1245 			break;
1246 		r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1247 		break;
1248 	}
1249 	default:
1250 		r = -ENOIOCTLCMD;
1251 		break;
1252 	}
1253 
1254 	return r;
1255 }
1256 
1257 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1258 {
1259 	int i = 0;
1260 
1261 	fpu->fcc = vcpu->arch.fpu.fcc;
1262 	fpu->fcsr = vcpu->arch.fpu.fcsr;
1263 	for (i = 0; i < NUM_FPU_REGS; i++)
1264 		memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1265 
1266 	return 0;
1267 }
1268 
1269 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1270 {
1271 	int i = 0;
1272 
1273 	vcpu->arch.fpu.fcc = fpu->fcc;
1274 	vcpu->arch.fpu.fcsr = fpu->fcsr;
1275 	for (i = 0; i < NUM_FPU_REGS; i++)
1276 		memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1277 
1278 	return 0;
1279 }
1280 
1281 #ifdef CONFIG_CPU_HAS_LBT
1282 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1283 {
1284 	if (!kvm_guest_has_lbt(&vcpu->arch))
1285 		return -EINVAL;
1286 
1287 	preempt_disable();
1288 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1289 		set_csr_euen(CSR_EUEN_LBTEN);
1290 		_restore_lbt(&vcpu->arch.lbt);
1291 		vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1292 	}
1293 	preempt_enable();
1294 
1295 	return 0;
1296 }
1297 
1298 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1299 {
1300 	preempt_disable();
1301 	if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1302 		_save_lbt(&vcpu->arch.lbt);
1303 		clear_csr_euen(CSR_EUEN_LBTEN);
1304 		vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1305 	}
1306 	preempt_enable();
1307 }
1308 
1309 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1310 {
1311 	/*
1312 	 * If TM is enabled, top register save/restore will
1313 	 * cause lbt exception, here enable lbt in advance
1314 	 */
1315 	if (fcsr & FPU_CSR_TM)
1316 		kvm_own_lbt(vcpu);
1317 }
1318 
1319 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1320 {
1321 	if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1322 		if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1323 			return;
1324 		kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1325 	}
1326 }
1327 #else
1328 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
1329 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
1330 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1331 #endif
1332 
1333 /* Enable FPU and restore context */
1334 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1335 {
1336 	preempt_disable();
1337 
1338 	/*
1339 	 * Enable FPU for guest
1340 	 * Set FR and FRE according to guest context
1341 	 */
1342 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1343 	set_csr_euen(CSR_EUEN_FPEN);
1344 
1345 	kvm_restore_fpu(&vcpu->arch.fpu);
1346 	vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1347 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1348 
1349 	preempt_enable();
1350 }
1351 
1352 #ifdef CONFIG_CPU_HAS_LSX
1353 /* Enable LSX and restore context */
1354 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1355 {
1356 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1357 		return -EINVAL;
1358 
1359 	preempt_disable();
1360 
1361 	/* Enable LSX for guest */
1362 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1363 	set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1364 	switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1365 	case KVM_LARCH_FPU:
1366 		/*
1367 		 * Guest FPU state already loaded,
1368 		 * only restore upper LSX state
1369 		 */
1370 		_restore_lsx_upper(&vcpu->arch.fpu);
1371 		break;
1372 	default:
1373 		/* Neither FP or LSX already active,
1374 		 * restore full LSX state
1375 		 */
1376 		kvm_restore_lsx(&vcpu->arch.fpu);
1377 		break;
1378 	}
1379 
1380 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1381 	vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1382 	preempt_enable();
1383 
1384 	return 0;
1385 }
1386 #endif
1387 
1388 #ifdef CONFIG_CPU_HAS_LASX
1389 /* Enable LASX and restore context */
1390 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1391 {
1392 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1393 		return -EINVAL;
1394 
1395 	preempt_disable();
1396 
1397 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1398 	set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1399 	switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1400 	case KVM_LARCH_LSX:
1401 	case KVM_LARCH_LSX | KVM_LARCH_FPU:
1402 		/* Guest LSX state already loaded, only restore upper LASX state */
1403 		_restore_lasx_upper(&vcpu->arch.fpu);
1404 		break;
1405 	case KVM_LARCH_FPU:
1406 		/* Guest FP state already loaded, only restore upper LSX & LASX state */
1407 		_restore_lsx_upper(&vcpu->arch.fpu);
1408 		_restore_lasx_upper(&vcpu->arch.fpu);
1409 		break;
1410 	default:
1411 		/* Neither FP or LSX already active, restore full LASX state */
1412 		kvm_restore_lasx(&vcpu->arch.fpu);
1413 		break;
1414 	}
1415 
1416 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1417 	vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1418 	preempt_enable();
1419 
1420 	return 0;
1421 }
1422 #endif
1423 
1424 /* Save context and disable FPU */
1425 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1426 {
1427 	preempt_disable();
1428 
1429 	kvm_check_fcsr_alive(vcpu);
1430 	if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1431 		kvm_save_lasx(&vcpu->arch.fpu);
1432 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1433 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1434 
1435 		/* Disable LASX & LSX & FPU */
1436 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1437 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1438 		kvm_save_lsx(&vcpu->arch.fpu);
1439 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1440 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1441 
1442 		/* Disable LSX & FPU */
1443 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1444 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1445 		kvm_save_fpu(&vcpu->arch.fpu);
1446 		vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1447 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1448 
1449 		/* Disable FPU */
1450 		clear_csr_euen(CSR_EUEN_FPEN);
1451 	}
1452 	kvm_lose_lbt(vcpu);
1453 
1454 	preempt_enable();
1455 }
1456 
1457 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1458 {
1459 	int intr = (int)irq->irq;
1460 
1461 	if (intr > 0)
1462 		kvm_queue_irq(vcpu, intr);
1463 	else if (intr < 0)
1464 		kvm_dequeue_irq(vcpu, -intr);
1465 	else {
1466 		kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1467 		return -EINVAL;
1468 	}
1469 
1470 	kvm_vcpu_kick(vcpu);
1471 
1472 	return 0;
1473 }
1474 
1475 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1476 			       unsigned int ioctl, unsigned long arg)
1477 {
1478 	void __user *argp = (void __user *)arg;
1479 	struct kvm_vcpu *vcpu = filp->private_data;
1480 
1481 	if (ioctl == KVM_INTERRUPT) {
1482 		struct kvm_interrupt irq;
1483 
1484 		if (copy_from_user(&irq, argp, sizeof(irq)))
1485 			return -EFAULT;
1486 
1487 		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1488 
1489 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1490 	}
1491 
1492 	return -ENOIOCTLCMD;
1493 }
1494 
1495 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1496 {
1497 	return 0;
1498 }
1499 
1500 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1501 {
1502 	unsigned long timer_hz;
1503 	struct loongarch_csrs *csr;
1504 
1505 	vcpu->arch.vpid = 0;
1506 	vcpu->arch.flush_gpa = INVALID_GPA;
1507 
1508 	hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1509 		      HRTIMER_MODE_ABS_PINNED_HARD);
1510 
1511 	/* Get GPA (=HVA) of PGD for kvm hypervisor */
1512 	vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1513 
1514 	/*
1515 	 * Get PGD for primary mmu, virtual address is used since there is
1516 	 * memory access after loading from CSR_PGD in tlb exception fast path.
1517 	 */
1518 	vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1519 
1520 	vcpu->arch.handle_exit = kvm_handle_exit;
1521 	vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1522 	vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1523 	if (!vcpu->arch.csr)
1524 		return -ENOMEM;
1525 
1526 	/*
1527 	 * All kvm exceptions share one exception entry, and host <-> guest
1528 	 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1529 	 */
1530 	vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1531 
1532 	/* Init */
1533 	vcpu->arch.last_sched_cpu = -1;
1534 
1535 	/* Init ipi_state lock */
1536 	spin_lock_init(&vcpu->arch.ipi_state.lock);
1537 
1538 	/*
1539 	 * Initialize guest register state to valid architectural reset state.
1540 	 */
1541 	timer_hz = calc_const_freq();
1542 	kvm_init_timer(vcpu, timer_hz);
1543 
1544 	/* Set Initialize mode for guest */
1545 	csr = vcpu->arch.csr;
1546 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1547 
1548 	/* Set cpuid */
1549 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1550 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1551 
1552 	/* Start with no pending virtual guest interrupts */
1553 	csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1554 
1555 	return 0;
1556 }
1557 
1558 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1559 {
1560 }
1561 
1562 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1563 {
1564 	int cpu;
1565 	struct kvm_context *context;
1566 
1567 	hrtimer_cancel(&vcpu->arch.swtimer);
1568 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1569 	kvm_drop_cpuid(vcpu);
1570 	kfree(vcpu->arch.csr);
1571 
1572 	/*
1573 	 * If the vCPU is freed and reused as another vCPU, we don't want the
1574 	 * matching pointer wrongly hanging around in last_vcpu.
1575 	 */
1576 	for_each_possible_cpu(cpu) {
1577 		context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1578 		if (context->last_vcpu == vcpu)
1579 			context->last_vcpu = NULL;
1580 	}
1581 }
1582 
1583 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1584 {
1585 	bool migrated;
1586 	struct kvm_context *context;
1587 	struct loongarch_csrs *csr = vcpu->arch.csr;
1588 
1589 	/*
1590 	 * Have we migrated to a different CPU?
1591 	 * If so, any old guest TLB state may be stale.
1592 	 */
1593 	migrated = (vcpu->arch.last_sched_cpu != cpu);
1594 
1595 	/*
1596 	 * Was this the last vCPU to run on this CPU?
1597 	 * If not, any old guest state from this vCPU will have been clobbered.
1598 	 */
1599 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1600 	if (migrated || (context->last_vcpu != vcpu))
1601 		vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1602 	context->last_vcpu = vcpu;
1603 
1604 	/* Restore timer state regardless */
1605 	kvm_restore_timer(vcpu);
1606 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1607 
1608 	/* Restore hardware PMU CSRs */
1609 	kvm_restore_pmu(vcpu);
1610 
1611 	/* Don't bother restoring registers multiple times unless necessary */
1612 	if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1613 		return 0;
1614 
1615 	write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1616 
1617 	/* Restore guest CSR registers */
1618 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1619 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1620 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1621 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1622 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1623 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1624 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1625 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1626 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1627 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1628 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1629 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1630 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1631 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1632 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1633 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1634 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1635 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1636 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1637 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1638 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1639 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1640 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1641 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1642 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1643 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1644 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1645 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1646 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1647 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1648 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1649 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1650 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1651 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1652 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1653 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1654 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1655 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1656 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1657 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1658 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1659 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1660 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1661 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1662 
1663 	/* Restore Root.GINTC from unused Guest.GINTC register */
1664 	write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1665 
1666 	/*
1667 	 * We should clear linked load bit to break interrupted atomics. This
1668 	 * prevents a SC on the next vCPU from succeeding by matching a LL on
1669 	 * the previous vCPU.
1670 	 */
1671 	if (vcpu->kvm->created_vcpus > 1)
1672 		set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1673 
1674 	vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1675 
1676 	return 0;
1677 }
1678 
1679 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1680 {
1681 	unsigned long flags;
1682 
1683 	local_irq_save(flags);
1684 	/* Restore guest state to registers */
1685 	_kvm_vcpu_load(vcpu, cpu);
1686 	local_irq_restore(flags);
1687 }
1688 
1689 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1690 {
1691 	struct loongarch_csrs *csr = vcpu->arch.csr;
1692 
1693 	kvm_lose_fpu(vcpu);
1694 
1695 	/*
1696 	 * Update CSR state from hardware if software CSR state is stale,
1697 	 * most CSR registers are kept unchanged during process context
1698 	 * switch except CSR registers like remaining timer tick value and
1699 	 * injected interrupt state.
1700 	 */
1701 	if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1702 		goto out;
1703 
1704 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1705 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1706 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1707 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1708 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1709 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1710 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1711 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1712 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1713 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1714 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1715 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1716 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1717 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1718 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1719 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1720 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1721 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1722 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1723 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1724 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1725 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1726 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1727 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1728 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1729 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1730 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1731 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1732 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1733 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1734 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1735 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1736 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1737 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1738 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1739 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1740 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1741 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1742 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1743 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1744 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1745 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1746 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1747 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1748 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1749 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1750 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1751 
1752 	vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1753 
1754 out:
1755 	kvm_save_timer(vcpu);
1756 	/* Save Root.GINTC into unused Guest.GINTC register */
1757 	csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1758 
1759 	return 0;
1760 }
1761 
1762 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1763 {
1764 	int cpu;
1765 	unsigned long flags;
1766 
1767 	local_irq_save(flags);
1768 	cpu = smp_processor_id();
1769 	vcpu->arch.last_sched_cpu = cpu;
1770 
1771 	/* Save guest state in registers */
1772 	_kvm_vcpu_put(vcpu, cpu);
1773 	local_irq_restore(flags);
1774 }
1775 
1776 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1777 {
1778 	int r = -EINTR;
1779 	struct kvm_run *run = vcpu->run;
1780 
1781 	if (vcpu->mmio_needed) {
1782 		if (!vcpu->mmio_is_write)
1783 			kvm_complete_mmio_read(vcpu, run);
1784 		vcpu->mmio_needed = 0;
1785 	}
1786 
1787 	switch (run->exit_reason) {
1788 	case KVM_EXIT_HYPERCALL:
1789 		kvm_complete_user_service(vcpu, run);
1790 		break;
1791 	case KVM_EXIT_LOONGARCH_IOCSR:
1792 		if (!run->iocsr_io.is_write)
1793 			kvm_complete_iocsr_read(vcpu, run);
1794 		break;
1795 	}
1796 
1797 	if (!vcpu->wants_to_run)
1798 		return r;
1799 
1800 	/* Clear exit_reason */
1801 	run->exit_reason = KVM_EXIT_UNKNOWN;
1802 	lose_fpu(1);
1803 	vcpu_load(vcpu);
1804 	kvm_sigset_activate(vcpu);
1805 	r = kvm_pre_enter_guest(vcpu);
1806 	if (r != RESUME_GUEST)
1807 		goto out;
1808 
1809 	guest_timing_enter_irqoff();
1810 	guest_state_enter_irqoff();
1811 	trace_kvm_enter(vcpu);
1812 	r = kvm_loongarch_ops->enter_guest(run, vcpu);
1813 
1814 	trace_kvm_out(vcpu);
1815 	/*
1816 	 * Guest exit is already recorded at kvm_handle_exit()
1817 	 * return value must not be RESUME_GUEST
1818 	 */
1819 	local_irq_enable();
1820 out:
1821 	kvm_sigset_deactivate(vcpu);
1822 	vcpu_put(vcpu);
1823 
1824 	return r;
1825 }
1826