xref: /linux/arch/loongarch/kvm/vcpu.c (revision 40ccd6aa3e2e05be93394e3cd560c718dedfcc77)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12 
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15 
16 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
17 	KVM_GENERIC_VCPU_STATS(),
18 	STATS_DESC_COUNTER(VCPU, int_exits),
19 	STATS_DESC_COUNTER(VCPU, idle_exits),
20 	STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21 	STATS_DESC_COUNTER(VCPU, signal_exits),
22 	STATS_DESC_COUNTER(VCPU, hypercall_exits)
23 };
24 
25 const struct kvm_stats_header kvm_vcpu_stats_header = {
26 	.name_size = KVM_STATS_NAME_SIZE,
27 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
28 	.id_offset = sizeof(struct kvm_stats_header),
29 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
30 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
31 		       sizeof(kvm_vcpu_stats_desc),
32 };
33 
34 /*
35  * kvm_check_requests - check and handle pending vCPU requests
36  *
37  * Return: RESUME_GUEST if we should enter the guest
38  *         RESUME_HOST  if we should exit to userspace
39  */
40 static int kvm_check_requests(struct kvm_vcpu *vcpu)
41 {
42 	if (!kvm_request_pending(vcpu))
43 		return RESUME_GUEST;
44 
45 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
46 		vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
47 
48 	if (kvm_dirty_ring_check_request(vcpu))
49 		return RESUME_HOST;
50 
51 	return RESUME_GUEST;
52 }
53 
54 /*
55  * Check and handle pending signal and vCPU requests etc
56  * Run with irq enabled and preempt enabled
57  *
58  * Return: RESUME_GUEST if we should enter the guest
59  *         RESUME_HOST  if we should exit to userspace
60  *         < 0 if we should exit to userspace, where the return value
61  *         indicates an error
62  */
63 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
64 {
65 	int ret;
66 
67 	/*
68 	 * Check conditions before entering the guest
69 	 */
70 	ret = xfer_to_guest_mode_handle_work(vcpu);
71 	if (ret < 0)
72 		return ret;
73 
74 	ret = kvm_check_requests(vcpu);
75 
76 	return ret;
77 }
78 
79 /*
80  * Called with irq enabled
81  *
82  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
83  *         Others if we should exit to userspace
84  */
85 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
86 {
87 	int ret;
88 
89 	do {
90 		ret = kvm_enter_guest_check(vcpu);
91 		if (ret != RESUME_GUEST)
92 			break;
93 
94 		/*
95 		 * Handle vcpu timer, interrupts, check requests and
96 		 * check vmid before vcpu enter guest
97 		 */
98 		local_irq_disable();
99 		kvm_deliver_intr(vcpu);
100 		kvm_deliver_exception(vcpu);
101 		/* Make sure the vcpu mode has been written */
102 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
103 		kvm_check_vpid(vcpu);
104 		vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
105 		/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
106 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
107 
108 		if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
109 			/* make sure the vcpu mode has been written */
110 			smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
111 			local_irq_enable();
112 			ret = -EAGAIN;
113 		}
114 	} while (ret != RESUME_GUEST);
115 
116 	return ret;
117 }
118 
119 /*
120  * Return 1 for resume guest and "<= 0" for resume host.
121  */
122 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
123 {
124 	int ret = RESUME_GUEST;
125 	unsigned long estat = vcpu->arch.host_estat;
126 	u32 intr = estat & 0x1fff; /* Ignore NMI */
127 	u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
128 
129 	vcpu->mode = OUTSIDE_GUEST_MODE;
130 
131 	/* Set a default exit reason */
132 	run->exit_reason = KVM_EXIT_UNKNOWN;
133 
134 	guest_timing_exit_irqoff();
135 	guest_state_exit_irqoff();
136 	local_irq_enable();
137 
138 	trace_kvm_exit(vcpu, ecode);
139 	if (ecode) {
140 		ret = kvm_handle_fault(vcpu, ecode);
141 	} else {
142 		WARN(!intr, "vm exiting with suspicious irq\n");
143 		++vcpu->stat.int_exits;
144 	}
145 
146 	if (ret == RESUME_GUEST)
147 		ret = kvm_pre_enter_guest(vcpu);
148 
149 	if (ret != RESUME_GUEST) {
150 		local_irq_disable();
151 		return ret;
152 	}
153 
154 	guest_timing_enter_irqoff();
155 	guest_state_enter_irqoff();
156 	trace_kvm_reenter(vcpu);
157 
158 	return RESUME_GUEST;
159 }
160 
161 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
162 {
163 	return !!(vcpu->arch.irq_pending) &&
164 		vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
165 }
166 
167 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
168 {
169 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
170 }
171 
172 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
173 {
174 	return false;
175 }
176 
177 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
178 {
179 	return VM_FAULT_SIGBUS;
180 }
181 
182 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
183 				  struct kvm_translation *tr)
184 {
185 	return -EINVAL;
186 }
187 
188 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
189 {
190 	int ret;
191 
192 	/* Protect from TOD sync and vcpu_load/put() */
193 	preempt_disable();
194 	ret = kvm_pending_timer(vcpu) ||
195 		kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
196 	preempt_enable();
197 
198 	return ret;
199 }
200 
201 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
202 {
203 	int i;
204 
205 	kvm_debug("vCPU Register Dump:\n");
206 	kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
207 	kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
208 
209 	for (i = 0; i < 32; i += 4) {
210 		kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
211 		       vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
212 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
213 	}
214 
215 	kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
216 		  kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
217 		  kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
218 
219 	kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
220 
221 	return 0;
222 }
223 
224 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
225 				struct kvm_mp_state *mp_state)
226 {
227 	*mp_state = vcpu->arch.mp_state;
228 
229 	return 0;
230 }
231 
232 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
233 				struct kvm_mp_state *mp_state)
234 {
235 	int ret = 0;
236 
237 	switch (mp_state->mp_state) {
238 	case KVM_MP_STATE_RUNNABLE:
239 		vcpu->arch.mp_state = *mp_state;
240 		break;
241 	default:
242 		ret = -EINVAL;
243 	}
244 
245 	return ret;
246 }
247 
248 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
249 					struct kvm_guest_debug *dbg)
250 {
251 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
252 		return -EINVAL;
253 
254 	if (dbg->control & KVM_GUESTDBG_ENABLE)
255 		vcpu->guest_debug = dbg->control;
256 	else
257 		vcpu->guest_debug = 0;
258 
259 	return 0;
260 }
261 
262 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
263 {
264 	int cpuid;
265 	struct kvm_phyid_map *map;
266 	struct loongarch_csrs *csr = vcpu->arch.csr;
267 
268 	if (val >= KVM_MAX_PHYID)
269 		return -EINVAL;
270 
271 	map = vcpu->kvm->arch.phyid_map;
272 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
273 
274 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
275 	if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
276 		/* Discard duplicated CPUID set operation */
277 		if (cpuid == val) {
278 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
279 			return 0;
280 		}
281 
282 		/*
283 		 * CPUID is already set before
284 		 * Forbid changing to a different CPUID at runtime
285 		 */
286 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
287 		return -EINVAL;
288 	}
289 
290 	if (map->phys_map[val].enabled) {
291 		/* Discard duplicated CPUID set operation */
292 		if (vcpu == map->phys_map[val].vcpu) {
293 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
294 			return 0;
295 		}
296 
297 		/*
298 		 * New CPUID is already set with other vcpu
299 		 * Forbid sharing the same CPUID between different vcpus
300 		 */
301 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
302 		return -EINVAL;
303 	}
304 
305 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
306 	map->phys_map[val].enabled	= true;
307 	map->phys_map[val].vcpu		= vcpu;
308 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
309 
310 	return 0;
311 }
312 
313 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
314 {
315 	int cpuid;
316 	struct kvm_phyid_map *map;
317 	struct loongarch_csrs *csr = vcpu->arch.csr;
318 
319 	map = vcpu->kvm->arch.phyid_map;
320 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
321 
322 	if (cpuid >= KVM_MAX_PHYID)
323 		return;
324 
325 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
326 	if (map->phys_map[cpuid].enabled) {
327 		map->phys_map[cpuid].vcpu = NULL;
328 		map->phys_map[cpuid].enabled = false;
329 		kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
330 	}
331 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
332 }
333 
334 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
335 {
336 	struct kvm_phyid_map *map;
337 
338 	if (cpuid >= KVM_MAX_PHYID)
339 		return NULL;
340 
341 	map = kvm->arch.phyid_map;
342 	if (!map->phys_map[cpuid].enabled)
343 		return NULL;
344 
345 	return map->phys_map[cpuid].vcpu;
346 }
347 
348 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
349 {
350 	unsigned long gintc;
351 	struct loongarch_csrs *csr = vcpu->arch.csr;
352 
353 	if (get_gcsr_flag(id) & INVALID_GCSR)
354 		return -EINVAL;
355 
356 	if (id == LOONGARCH_CSR_ESTAT) {
357 		/* ESTAT IP0~IP7 get from GINTC */
358 		gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
359 		*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
360 		return 0;
361 	}
362 
363 	/*
364 	 * Get software CSR state since software state is consistent
365 	 * with hardware for synchronous ioctl
366 	 */
367 	*val = kvm_read_sw_gcsr(csr, id);
368 
369 	return 0;
370 }
371 
372 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
373 {
374 	int ret = 0, gintc;
375 	struct loongarch_csrs *csr = vcpu->arch.csr;
376 
377 	if (get_gcsr_flag(id) & INVALID_GCSR)
378 		return -EINVAL;
379 
380 	if (id == LOONGARCH_CSR_CPUID)
381 		return kvm_set_cpuid(vcpu, val);
382 
383 	if (id == LOONGARCH_CSR_ESTAT) {
384 		/* ESTAT IP0~IP7 inject through GINTC */
385 		gintc = (val >> 2) & 0xff;
386 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
387 
388 		gintc = val & ~(0xffUL << 2);
389 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
390 
391 		return ret;
392 	}
393 
394 	kvm_write_sw_gcsr(csr, id, val);
395 
396 	return ret;
397 }
398 
399 static int _kvm_get_cpucfg_mask(int id, u64 *v)
400 {
401 	if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
402 		return -EINVAL;
403 
404 	switch (id) {
405 	case LOONGARCH_CPUCFG0:
406 		*v = GENMASK(31, 0);
407 		return 0;
408 	case LOONGARCH_CPUCFG1:
409 		/* CPUCFG1_MSGINT is not supported by KVM */
410 		*v = GENMASK(25, 0);
411 		return 0;
412 	case LOONGARCH_CPUCFG2:
413 		/* CPUCFG2 features unconditionally supported by KVM */
414 		*v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
415 		     CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
416 		     CPUCFG2_LSPW | CPUCFG2_LAM;
417 		/*
418 		 * For the ISA extensions listed below, if one is supported
419 		 * by the host, then it is also supported by KVM.
420 		 */
421 		if (cpu_has_lsx)
422 			*v |= CPUCFG2_LSX;
423 		if (cpu_has_lasx)
424 			*v |= CPUCFG2_LASX;
425 
426 		return 0;
427 	case LOONGARCH_CPUCFG3:
428 		*v = GENMASK(16, 0);
429 		return 0;
430 	case LOONGARCH_CPUCFG4:
431 	case LOONGARCH_CPUCFG5:
432 		*v = GENMASK(31, 0);
433 		return 0;
434 	case LOONGARCH_CPUCFG16:
435 		*v = GENMASK(16, 0);
436 		return 0;
437 	case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
438 		*v = GENMASK(30, 0);
439 		return 0;
440 	default:
441 		/*
442 		 * CPUCFG bits should be zero if reserved by HW or not
443 		 * supported by KVM.
444 		 */
445 		*v = 0;
446 		return 0;
447 	}
448 }
449 
450 static int kvm_check_cpucfg(int id, u64 val)
451 {
452 	int ret;
453 	u64 mask = 0;
454 
455 	ret = _kvm_get_cpucfg_mask(id, &mask);
456 	if (ret)
457 		return ret;
458 
459 	if (val & ~mask)
460 		/* Unsupported features and/or the higher 32 bits should not be set */
461 		return -EINVAL;
462 
463 	switch (id) {
464 	case LOONGARCH_CPUCFG2:
465 		if (!(val & CPUCFG2_LLFTP))
466 			/* Guests must have a constant timer */
467 			return -EINVAL;
468 		if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
469 			/* Single and double float point must both be set when FP is enabled */
470 			return -EINVAL;
471 		if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
472 			/* LSX architecturally implies FP but val does not satisfy that */
473 			return -EINVAL;
474 		if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
475 			/* LASX architecturally implies LSX and FP but val does not satisfy that */
476 			return -EINVAL;
477 		return 0;
478 	default:
479 		/*
480 		 * Values for the other CPUCFG IDs are not being further validated
481 		 * besides the mask check above.
482 		 */
483 		return 0;
484 	}
485 }
486 
487 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
488 		const struct kvm_one_reg *reg, u64 *v)
489 {
490 	int id, ret = 0;
491 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
492 
493 	switch (type) {
494 	case KVM_REG_LOONGARCH_CSR:
495 		id = KVM_GET_IOC_CSR_IDX(reg->id);
496 		ret = _kvm_getcsr(vcpu, id, v);
497 		break;
498 	case KVM_REG_LOONGARCH_CPUCFG:
499 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
500 		if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
501 			*v = vcpu->arch.cpucfg[id];
502 		else
503 			ret = -EINVAL;
504 		break;
505 	case KVM_REG_LOONGARCH_KVM:
506 		switch (reg->id) {
507 		case KVM_REG_LOONGARCH_COUNTER:
508 			*v = drdtime() + vcpu->kvm->arch.time_offset;
509 			break;
510 		case KVM_REG_LOONGARCH_DEBUG_INST:
511 			*v = INSN_HVCL | KVM_HCALL_SWDBG;
512 			break;
513 		default:
514 			ret = -EINVAL;
515 			break;
516 		}
517 		break;
518 	default:
519 		ret = -EINVAL;
520 		break;
521 	}
522 
523 	return ret;
524 }
525 
526 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
527 {
528 	int ret = 0;
529 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
530 
531 	switch (size) {
532 	case KVM_REG_SIZE_U64:
533 		ret = kvm_get_one_reg(vcpu, reg, &v);
534 		if (ret)
535 			return ret;
536 		ret = put_user(v, (u64 __user *)(long)reg->addr);
537 		break;
538 	default:
539 		ret = -EINVAL;
540 		break;
541 	}
542 
543 	return ret;
544 }
545 
546 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
547 			const struct kvm_one_reg *reg, u64 v)
548 {
549 	int id, ret = 0;
550 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
551 
552 	switch (type) {
553 	case KVM_REG_LOONGARCH_CSR:
554 		id = KVM_GET_IOC_CSR_IDX(reg->id);
555 		ret = _kvm_setcsr(vcpu, id, v);
556 		break;
557 	case KVM_REG_LOONGARCH_CPUCFG:
558 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
559 		ret = kvm_check_cpucfg(id, v);
560 		if (ret)
561 			break;
562 		vcpu->arch.cpucfg[id] = (u32)v;
563 		break;
564 	case KVM_REG_LOONGARCH_KVM:
565 		switch (reg->id) {
566 		case KVM_REG_LOONGARCH_COUNTER:
567 			/*
568 			 * gftoffset is relative with board, not vcpu
569 			 * only set for the first time for smp system
570 			 */
571 			if (vcpu->vcpu_id == 0)
572 				vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
573 			break;
574 		case KVM_REG_LOONGARCH_VCPU_RESET:
575 			kvm_reset_timer(vcpu);
576 			memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
577 			memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
578 			break;
579 		default:
580 			ret = -EINVAL;
581 			break;
582 		}
583 		break;
584 	default:
585 		ret = -EINVAL;
586 		break;
587 	}
588 
589 	return ret;
590 }
591 
592 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
593 {
594 	int ret = 0;
595 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
596 
597 	switch (size) {
598 	case KVM_REG_SIZE_U64:
599 		ret = get_user(v, (u64 __user *)(long)reg->addr);
600 		if (ret)
601 			return ret;
602 		break;
603 	default:
604 		return -EINVAL;
605 	}
606 
607 	return kvm_set_one_reg(vcpu, reg, v);
608 }
609 
610 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
611 {
612 	return -ENOIOCTLCMD;
613 }
614 
615 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
616 {
617 	return -ENOIOCTLCMD;
618 }
619 
620 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
621 {
622 	int i;
623 
624 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
625 		regs->gpr[i] = vcpu->arch.gprs[i];
626 
627 	regs->pc = vcpu->arch.pc;
628 
629 	return 0;
630 }
631 
632 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
633 {
634 	int i;
635 
636 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
637 		vcpu->arch.gprs[i] = regs->gpr[i];
638 
639 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
640 	vcpu->arch.pc = regs->pc;
641 
642 	return 0;
643 }
644 
645 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
646 				     struct kvm_enable_cap *cap)
647 {
648 	/* FPU is enabled by default, will support LSX/LASX later. */
649 	return -EINVAL;
650 }
651 
652 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
653 					 struct kvm_device_attr *attr)
654 {
655 	switch (attr->attr) {
656 	case 2:
657 		return 0;
658 	default:
659 		return -ENXIO;
660 	}
661 
662 	return -ENXIO;
663 }
664 
665 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
666 				       struct kvm_device_attr *attr)
667 {
668 	int ret = -ENXIO;
669 
670 	switch (attr->group) {
671 	case KVM_LOONGARCH_VCPU_CPUCFG:
672 		ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
673 		break;
674 	default:
675 		break;
676 	}
677 
678 	return ret;
679 }
680 
681 static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
682 					 struct kvm_device_attr *attr)
683 {
684 	int ret = 0;
685 	uint64_t val;
686 	uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
687 
688 	ret = _kvm_get_cpucfg_mask(attr->attr, &val);
689 	if (ret)
690 		return ret;
691 
692 	put_user(val, uaddr);
693 
694 	return ret;
695 }
696 
697 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
698 				       struct kvm_device_attr *attr)
699 {
700 	int ret = -ENXIO;
701 
702 	switch (attr->group) {
703 	case KVM_LOONGARCH_VCPU_CPUCFG:
704 		ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
705 		break;
706 	default:
707 		break;
708 	}
709 
710 	return ret;
711 }
712 
713 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
714 					 struct kvm_device_attr *attr)
715 {
716 	return -ENXIO;
717 }
718 
719 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
720 				       struct kvm_device_attr *attr)
721 {
722 	int ret = -ENXIO;
723 
724 	switch (attr->group) {
725 	case KVM_LOONGARCH_VCPU_CPUCFG:
726 		ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
727 		break;
728 	default:
729 		break;
730 	}
731 
732 	return ret;
733 }
734 
735 long kvm_arch_vcpu_ioctl(struct file *filp,
736 			 unsigned int ioctl, unsigned long arg)
737 {
738 	long r;
739 	struct kvm_device_attr attr;
740 	void __user *argp = (void __user *)arg;
741 	struct kvm_vcpu *vcpu = filp->private_data;
742 
743 	/*
744 	 * Only software CSR should be modified
745 	 *
746 	 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
747 	 * should be used. Since CSR registers owns by this vcpu, if switch
748 	 * to other vcpus, other vcpus need reload CSR registers.
749 	 *
750 	 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
751 	 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
752 	 * aux_inuse flag and reload CSR registers form software.
753 	 */
754 
755 	switch (ioctl) {
756 	case KVM_SET_ONE_REG:
757 	case KVM_GET_ONE_REG: {
758 		struct kvm_one_reg reg;
759 
760 		r = -EFAULT;
761 		if (copy_from_user(&reg, argp, sizeof(reg)))
762 			break;
763 		if (ioctl == KVM_SET_ONE_REG) {
764 			r = kvm_set_reg(vcpu, &reg);
765 			vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
766 		} else
767 			r = kvm_get_reg(vcpu, &reg);
768 		break;
769 	}
770 	case KVM_ENABLE_CAP: {
771 		struct kvm_enable_cap cap;
772 
773 		r = -EFAULT;
774 		if (copy_from_user(&cap, argp, sizeof(cap)))
775 			break;
776 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
777 		break;
778 	}
779 	case KVM_HAS_DEVICE_ATTR: {
780 		r = -EFAULT;
781 		if (copy_from_user(&attr, argp, sizeof(attr)))
782 			break;
783 		r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
784 		break;
785 	}
786 	case KVM_GET_DEVICE_ATTR: {
787 		r = -EFAULT;
788 		if (copy_from_user(&attr, argp, sizeof(attr)))
789 			break;
790 		r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
791 		break;
792 	}
793 	case KVM_SET_DEVICE_ATTR: {
794 		r = -EFAULT;
795 		if (copy_from_user(&attr, argp, sizeof(attr)))
796 			break;
797 		r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
798 		break;
799 	}
800 	default:
801 		r = -ENOIOCTLCMD;
802 		break;
803 	}
804 
805 	return r;
806 }
807 
808 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
809 {
810 	int i = 0;
811 
812 	fpu->fcc = vcpu->arch.fpu.fcc;
813 	fpu->fcsr = vcpu->arch.fpu.fcsr;
814 	for (i = 0; i < NUM_FPU_REGS; i++)
815 		memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
816 
817 	return 0;
818 }
819 
820 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
821 {
822 	int i = 0;
823 
824 	vcpu->arch.fpu.fcc = fpu->fcc;
825 	vcpu->arch.fpu.fcsr = fpu->fcsr;
826 	for (i = 0; i < NUM_FPU_REGS; i++)
827 		memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
828 
829 	return 0;
830 }
831 
832 /* Enable FPU and restore context */
833 void kvm_own_fpu(struct kvm_vcpu *vcpu)
834 {
835 	preempt_disable();
836 
837 	/* Enable FPU */
838 	set_csr_euen(CSR_EUEN_FPEN);
839 
840 	kvm_restore_fpu(&vcpu->arch.fpu);
841 	vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
842 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
843 
844 	preempt_enable();
845 }
846 
847 #ifdef CONFIG_CPU_HAS_LSX
848 /* Enable LSX and restore context */
849 int kvm_own_lsx(struct kvm_vcpu *vcpu)
850 {
851 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
852 		return -EINVAL;
853 
854 	preempt_disable();
855 
856 	/* Enable LSX for guest */
857 	set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
858 	switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
859 	case KVM_LARCH_FPU:
860 		/*
861 		 * Guest FPU state already loaded,
862 		 * only restore upper LSX state
863 		 */
864 		_restore_lsx_upper(&vcpu->arch.fpu);
865 		break;
866 	default:
867 		/* Neither FP or LSX already active,
868 		 * restore full LSX state
869 		 */
870 		kvm_restore_lsx(&vcpu->arch.fpu);
871 		break;
872 	}
873 
874 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
875 	vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
876 	preempt_enable();
877 
878 	return 0;
879 }
880 #endif
881 
882 #ifdef CONFIG_CPU_HAS_LASX
883 /* Enable LASX and restore context */
884 int kvm_own_lasx(struct kvm_vcpu *vcpu)
885 {
886 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
887 		return -EINVAL;
888 
889 	preempt_disable();
890 
891 	set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
892 	switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
893 	case KVM_LARCH_LSX:
894 	case KVM_LARCH_LSX | KVM_LARCH_FPU:
895 		/* Guest LSX state already loaded, only restore upper LASX state */
896 		_restore_lasx_upper(&vcpu->arch.fpu);
897 		break;
898 	case KVM_LARCH_FPU:
899 		/* Guest FP state already loaded, only restore upper LSX & LASX state */
900 		_restore_lsx_upper(&vcpu->arch.fpu);
901 		_restore_lasx_upper(&vcpu->arch.fpu);
902 		break;
903 	default:
904 		/* Neither FP or LSX already active, restore full LASX state */
905 		kvm_restore_lasx(&vcpu->arch.fpu);
906 		break;
907 	}
908 
909 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
910 	vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
911 	preempt_enable();
912 
913 	return 0;
914 }
915 #endif
916 
917 /* Save context and disable FPU */
918 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
919 {
920 	preempt_disable();
921 
922 	if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
923 		kvm_save_lasx(&vcpu->arch.fpu);
924 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
925 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
926 
927 		/* Disable LASX & LSX & FPU */
928 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
929 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
930 		kvm_save_lsx(&vcpu->arch.fpu);
931 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
932 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
933 
934 		/* Disable LSX & FPU */
935 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
936 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
937 		kvm_save_fpu(&vcpu->arch.fpu);
938 		vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
939 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
940 
941 		/* Disable FPU */
942 		clear_csr_euen(CSR_EUEN_FPEN);
943 	}
944 
945 	preempt_enable();
946 }
947 
948 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
949 {
950 	int intr = (int)irq->irq;
951 
952 	if (intr > 0)
953 		kvm_queue_irq(vcpu, intr);
954 	else if (intr < 0)
955 		kvm_dequeue_irq(vcpu, -intr);
956 	else {
957 		kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
958 		return -EINVAL;
959 	}
960 
961 	kvm_vcpu_kick(vcpu);
962 
963 	return 0;
964 }
965 
966 long kvm_arch_vcpu_async_ioctl(struct file *filp,
967 			       unsigned int ioctl, unsigned long arg)
968 {
969 	void __user *argp = (void __user *)arg;
970 	struct kvm_vcpu *vcpu = filp->private_data;
971 
972 	if (ioctl == KVM_INTERRUPT) {
973 		struct kvm_interrupt irq;
974 
975 		if (copy_from_user(&irq, argp, sizeof(irq)))
976 			return -EFAULT;
977 
978 		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
979 
980 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
981 	}
982 
983 	return -ENOIOCTLCMD;
984 }
985 
986 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
987 {
988 	return 0;
989 }
990 
991 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
992 {
993 	unsigned long timer_hz;
994 	struct loongarch_csrs *csr;
995 
996 	vcpu->arch.vpid = 0;
997 
998 	hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
999 	vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
1000 
1001 	vcpu->arch.handle_exit = kvm_handle_exit;
1002 	vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1003 	vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1004 	if (!vcpu->arch.csr)
1005 		return -ENOMEM;
1006 
1007 	/*
1008 	 * All kvm exceptions share one exception entry, and host <-> guest
1009 	 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1010 	 */
1011 	vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1012 
1013 	/* Init */
1014 	vcpu->arch.last_sched_cpu = -1;
1015 
1016 	/*
1017 	 * Initialize guest register state to valid architectural reset state.
1018 	 */
1019 	timer_hz = calc_const_freq();
1020 	kvm_init_timer(vcpu, timer_hz);
1021 
1022 	/* Set Initialize mode for guest */
1023 	csr = vcpu->arch.csr;
1024 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1025 
1026 	/* Set cpuid */
1027 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1028 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1029 
1030 	/* Start with no pending virtual guest interrupts */
1031 	csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1032 
1033 	return 0;
1034 }
1035 
1036 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1037 {
1038 }
1039 
1040 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1041 {
1042 	int cpu;
1043 	struct kvm_context *context;
1044 
1045 	hrtimer_cancel(&vcpu->arch.swtimer);
1046 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1047 	kvm_drop_cpuid(vcpu);
1048 	kfree(vcpu->arch.csr);
1049 
1050 	/*
1051 	 * If the vCPU is freed and reused as another vCPU, we don't want the
1052 	 * matching pointer wrongly hanging around in last_vcpu.
1053 	 */
1054 	for_each_possible_cpu(cpu) {
1055 		context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1056 		if (context->last_vcpu == vcpu)
1057 			context->last_vcpu = NULL;
1058 	}
1059 }
1060 
1061 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1062 {
1063 	bool migrated;
1064 	struct kvm_context *context;
1065 	struct loongarch_csrs *csr = vcpu->arch.csr;
1066 
1067 	/*
1068 	 * Have we migrated to a different CPU?
1069 	 * If so, any old guest TLB state may be stale.
1070 	 */
1071 	migrated = (vcpu->arch.last_sched_cpu != cpu);
1072 
1073 	/*
1074 	 * Was this the last vCPU to run on this CPU?
1075 	 * If not, any old guest state from this vCPU will have been clobbered.
1076 	 */
1077 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1078 	if (migrated || (context->last_vcpu != vcpu))
1079 		vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1080 	context->last_vcpu = vcpu;
1081 
1082 	/* Restore timer state regardless */
1083 	kvm_restore_timer(vcpu);
1084 
1085 	/* Control guest page CCA attribute */
1086 	change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
1087 
1088 	/* Don't bother restoring registers multiple times unless necessary */
1089 	if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1090 		return 0;
1091 
1092 	write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1093 
1094 	/* Restore guest CSR registers */
1095 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1096 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1097 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1098 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1099 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1100 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1101 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1102 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1103 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1104 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1105 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1106 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1107 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1108 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1109 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1110 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1111 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1112 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1113 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1114 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1115 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1116 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1117 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1118 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1119 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1120 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1121 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1122 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1123 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1124 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1125 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1126 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1127 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1128 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1129 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1130 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1131 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1132 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1133 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1134 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1135 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1136 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1137 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1138 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1139 
1140 	/* Restore Root.GINTC from unused Guest.GINTC register */
1141 	write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1142 
1143 	/*
1144 	 * We should clear linked load bit to break interrupted atomics. This
1145 	 * prevents a SC on the next vCPU from succeeding by matching a LL on
1146 	 * the previous vCPU.
1147 	 */
1148 	if (vcpu->kvm->created_vcpus > 1)
1149 		set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1150 
1151 	vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1152 
1153 	return 0;
1154 }
1155 
1156 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1157 {
1158 	unsigned long flags;
1159 
1160 	local_irq_save(flags);
1161 	/* Restore guest state to registers */
1162 	_kvm_vcpu_load(vcpu, cpu);
1163 	local_irq_restore(flags);
1164 }
1165 
1166 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1167 {
1168 	struct loongarch_csrs *csr = vcpu->arch.csr;
1169 
1170 	kvm_lose_fpu(vcpu);
1171 
1172 	/*
1173 	 * Update CSR state from hardware if software CSR state is stale,
1174 	 * most CSR registers are kept unchanged during process context
1175 	 * switch except CSR registers like remaining timer tick value and
1176 	 * injected interrupt state.
1177 	 */
1178 	if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1179 		goto out;
1180 
1181 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1182 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1183 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1184 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1185 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1186 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1187 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1188 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1189 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1190 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1191 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1192 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1193 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1194 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1195 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1196 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1197 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1198 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1199 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1200 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1201 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1202 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1203 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1204 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1205 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1206 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1207 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1208 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1209 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1210 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1211 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1212 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1213 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1214 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1215 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1216 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1217 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1218 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1219 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1220 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1221 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1222 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1223 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1224 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1225 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1226 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1227 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1228 
1229 	vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1230 
1231 out:
1232 	kvm_save_timer(vcpu);
1233 	/* Save Root.GINTC into unused Guest.GINTC register */
1234 	csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1235 
1236 	return 0;
1237 }
1238 
1239 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1240 {
1241 	int cpu;
1242 	unsigned long flags;
1243 
1244 	local_irq_save(flags);
1245 	cpu = smp_processor_id();
1246 	vcpu->arch.last_sched_cpu = cpu;
1247 
1248 	/* Save guest state in registers */
1249 	_kvm_vcpu_put(vcpu, cpu);
1250 	local_irq_restore(flags);
1251 }
1252 
1253 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1254 {
1255 	int r = -EINTR;
1256 	struct kvm_run *run = vcpu->run;
1257 
1258 	if (vcpu->mmio_needed) {
1259 		if (!vcpu->mmio_is_write)
1260 			kvm_complete_mmio_read(vcpu, run);
1261 		vcpu->mmio_needed = 0;
1262 	}
1263 
1264 	if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
1265 		if (!run->iocsr_io.is_write)
1266 			kvm_complete_iocsr_read(vcpu, run);
1267 	}
1268 
1269 	if (run->immediate_exit)
1270 		return r;
1271 
1272 	/* Clear exit_reason */
1273 	run->exit_reason = KVM_EXIT_UNKNOWN;
1274 	lose_fpu(1);
1275 	vcpu_load(vcpu);
1276 	kvm_sigset_activate(vcpu);
1277 	r = kvm_pre_enter_guest(vcpu);
1278 	if (r != RESUME_GUEST)
1279 		goto out;
1280 
1281 	guest_timing_enter_irqoff();
1282 	guest_state_enter_irqoff();
1283 	trace_kvm_enter(vcpu);
1284 	r = kvm_loongarch_ops->enter_guest(run, vcpu);
1285 
1286 	trace_kvm_out(vcpu);
1287 	/*
1288 	 * Guest exit is already recorded at kvm_handle_exit()
1289 	 * return value must not be RESUME_GUEST
1290 	 */
1291 	local_irq_enable();
1292 out:
1293 	kvm_sigset_deactivate(vcpu);
1294 	vcpu_put(vcpu);
1295 
1296 	return r;
1297 }
1298