xref: /linux/arch/loongarch/kvm/vcpu.c (revision e33bda7ee50c3c20d80f5ca6dc5ca2cd37863518)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12 
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15 
16 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
17 	KVM_GENERIC_VCPU_STATS(),
18 	STATS_DESC_COUNTER(VCPU, int_exits),
19 	STATS_DESC_COUNTER(VCPU, idle_exits),
20 	STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21 	STATS_DESC_COUNTER(VCPU, signal_exits),
22 	STATS_DESC_COUNTER(VCPU, hypercall_exits)
23 };
24 
25 const struct kvm_stats_header kvm_vcpu_stats_header = {
26 	.name_size = KVM_STATS_NAME_SIZE,
27 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
28 	.id_offset = sizeof(struct kvm_stats_header),
29 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
30 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
31 		       sizeof(kvm_vcpu_stats_desc),
32 };
33 
34 /*
35  * kvm_check_requests - check and handle pending vCPU requests
36  *
37  * Return: RESUME_GUEST if we should enter the guest
38  *         RESUME_HOST  if we should exit to userspace
39  */
40 static int kvm_check_requests(struct kvm_vcpu *vcpu)
41 {
42 	if (!kvm_request_pending(vcpu))
43 		return RESUME_GUEST;
44 
45 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
46 		vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
47 
48 	if (kvm_dirty_ring_check_request(vcpu))
49 		return RESUME_HOST;
50 
51 	return RESUME_GUEST;
52 }
53 
54 /*
55  * Check and handle pending signal and vCPU requests etc
56  * Run with irq enabled and preempt enabled
57  *
58  * Return: RESUME_GUEST if we should enter the guest
59  *         RESUME_HOST  if we should exit to userspace
60  *         < 0 if we should exit to userspace, where the return value
61  *         indicates an error
62  */
63 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
64 {
65 	int ret;
66 
67 	/*
68 	 * Check conditions before entering the guest
69 	 */
70 	ret = xfer_to_guest_mode_handle_work(vcpu);
71 	if (ret < 0)
72 		return ret;
73 
74 	ret = kvm_check_requests(vcpu);
75 
76 	return ret;
77 }
78 
79 /*
80  * Called with irq enabled
81  *
82  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
83  *         Others if we should exit to userspace
84  */
85 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
86 {
87 	int ret;
88 
89 	do {
90 		ret = kvm_enter_guest_check(vcpu);
91 		if (ret != RESUME_GUEST)
92 			break;
93 
94 		/*
95 		 * Handle vcpu timer, interrupts, check requests and
96 		 * check vmid before vcpu enter guest
97 		 */
98 		local_irq_disable();
99 		kvm_deliver_intr(vcpu);
100 		kvm_deliver_exception(vcpu);
101 		/* Make sure the vcpu mode has been written */
102 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
103 		kvm_check_vpid(vcpu);
104 		vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
105 		/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
106 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
107 
108 		if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
109 			/* make sure the vcpu mode has been written */
110 			smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
111 			local_irq_enable();
112 			ret = -EAGAIN;
113 		}
114 	} while (ret != RESUME_GUEST);
115 
116 	return ret;
117 }
118 
119 /*
120  * Return 1 for resume guest and "<= 0" for resume host.
121  */
122 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
123 {
124 	int ret = RESUME_GUEST;
125 	unsigned long estat = vcpu->arch.host_estat;
126 	u32 intr = estat & 0x1fff; /* Ignore NMI */
127 	u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
128 
129 	vcpu->mode = OUTSIDE_GUEST_MODE;
130 
131 	/* Set a default exit reason */
132 	run->exit_reason = KVM_EXIT_UNKNOWN;
133 
134 	guest_timing_exit_irqoff();
135 	guest_state_exit_irqoff();
136 	local_irq_enable();
137 
138 	trace_kvm_exit(vcpu, ecode);
139 	if (ecode) {
140 		ret = kvm_handle_fault(vcpu, ecode);
141 	} else {
142 		WARN(!intr, "vm exiting with suspicious irq\n");
143 		++vcpu->stat.int_exits;
144 	}
145 
146 	if (ret == RESUME_GUEST)
147 		ret = kvm_pre_enter_guest(vcpu);
148 
149 	if (ret != RESUME_GUEST) {
150 		local_irq_disable();
151 		return ret;
152 	}
153 
154 	guest_timing_enter_irqoff();
155 	guest_state_enter_irqoff();
156 	trace_kvm_reenter(vcpu);
157 
158 	return RESUME_GUEST;
159 }
160 
161 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
162 {
163 	return !!(vcpu->arch.irq_pending) &&
164 		vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
165 }
166 
167 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
168 {
169 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
170 }
171 
172 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
173 {
174 	return false;
175 }
176 
177 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
178 {
179 	return VM_FAULT_SIGBUS;
180 }
181 
182 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
183 				  struct kvm_translation *tr)
184 {
185 	return -EINVAL;
186 }
187 
188 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
189 {
190 	int ret;
191 
192 	/* Protect from TOD sync and vcpu_load/put() */
193 	preempt_disable();
194 	ret = kvm_pending_timer(vcpu) ||
195 		kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
196 	preempt_enable();
197 
198 	return ret;
199 }
200 
201 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
202 {
203 	int i;
204 
205 	kvm_debug("vCPU Register Dump:\n");
206 	kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
207 	kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
208 
209 	for (i = 0; i < 32; i += 4) {
210 		kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
211 		       vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
212 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
213 	}
214 
215 	kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
216 		  kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
217 		  kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
218 
219 	kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
220 
221 	return 0;
222 }
223 
224 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
225 				struct kvm_mp_state *mp_state)
226 {
227 	*mp_state = vcpu->arch.mp_state;
228 
229 	return 0;
230 }
231 
232 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
233 				struct kvm_mp_state *mp_state)
234 {
235 	int ret = 0;
236 
237 	switch (mp_state->mp_state) {
238 	case KVM_MP_STATE_RUNNABLE:
239 		vcpu->arch.mp_state = *mp_state;
240 		break;
241 	default:
242 		ret = -EINVAL;
243 	}
244 
245 	return ret;
246 }
247 
248 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
249 					struct kvm_guest_debug *dbg)
250 {
251 	return -EINVAL;
252 }
253 
254 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
255 {
256 	int cpuid;
257 	struct kvm_phyid_map *map;
258 	struct loongarch_csrs *csr = vcpu->arch.csr;
259 
260 	if (val >= KVM_MAX_PHYID)
261 		return -EINVAL;
262 
263 	map = vcpu->kvm->arch.phyid_map;
264 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
265 
266 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
267 	if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
268 		/* Discard duplicated CPUID set operation */
269 		if (cpuid == val) {
270 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
271 			return 0;
272 		}
273 
274 		/*
275 		 * CPUID is already set before
276 		 * Forbid changing to a different CPUID at runtime
277 		 */
278 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
279 		return -EINVAL;
280 	}
281 
282 	if (map->phys_map[val].enabled) {
283 		/* Discard duplicated CPUID set operation */
284 		if (vcpu == map->phys_map[val].vcpu) {
285 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
286 			return 0;
287 		}
288 
289 		/*
290 		 * New CPUID is already set with other vcpu
291 		 * Forbid sharing the same CPUID between different vcpus
292 		 */
293 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
294 		return -EINVAL;
295 	}
296 
297 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
298 	map->phys_map[val].enabled	= true;
299 	map->phys_map[val].vcpu		= vcpu;
300 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
301 
302 	return 0;
303 }
304 
305 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
306 {
307 	int cpuid;
308 	struct kvm_phyid_map *map;
309 	struct loongarch_csrs *csr = vcpu->arch.csr;
310 
311 	map = vcpu->kvm->arch.phyid_map;
312 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
313 
314 	if (cpuid >= KVM_MAX_PHYID)
315 		return;
316 
317 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
318 	if (map->phys_map[cpuid].enabled) {
319 		map->phys_map[cpuid].vcpu = NULL;
320 		map->phys_map[cpuid].enabled = false;
321 		kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
322 	}
323 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
324 }
325 
326 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
327 {
328 	struct kvm_phyid_map *map;
329 
330 	if (cpuid >= KVM_MAX_PHYID)
331 		return NULL;
332 
333 	map = kvm->arch.phyid_map;
334 	if (!map->phys_map[cpuid].enabled)
335 		return NULL;
336 
337 	return map->phys_map[cpuid].vcpu;
338 }
339 
340 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
341 {
342 	unsigned long gintc;
343 	struct loongarch_csrs *csr = vcpu->arch.csr;
344 
345 	if (get_gcsr_flag(id) & INVALID_GCSR)
346 		return -EINVAL;
347 
348 	if (id == LOONGARCH_CSR_ESTAT) {
349 		/* ESTAT IP0~IP7 get from GINTC */
350 		gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
351 		*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
352 		return 0;
353 	}
354 
355 	/*
356 	 * Get software CSR state since software state is consistent
357 	 * with hardware for synchronous ioctl
358 	 */
359 	*val = kvm_read_sw_gcsr(csr, id);
360 
361 	return 0;
362 }
363 
364 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
365 {
366 	int ret = 0, gintc;
367 	struct loongarch_csrs *csr = vcpu->arch.csr;
368 
369 	if (get_gcsr_flag(id) & INVALID_GCSR)
370 		return -EINVAL;
371 
372 	if (id == LOONGARCH_CSR_CPUID)
373 		return kvm_set_cpuid(vcpu, val);
374 
375 	if (id == LOONGARCH_CSR_ESTAT) {
376 		/* ESTAT IP0~IP7 inject through GINTC */
377 		gintc = (val >> 2) & 0xff;
378 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
379 
380 		gintc = val & ~(0xffUL << 2);
381 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
382 
383 		return ret;
384 	}
385 
386 	kvm_write_sw_gcsr(csr, id, val);
387 
388 	return ret;
389 }
390 
391 static int _kvm_get_cpucfg_mask(int id, u64 *v)
392 {
393 	if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
394 		return -EINVAL;
395 
396 	switch (id) {
397 	case LOONGARCH_CPUCFG0:
398 		*v = GENMASK(31, 0);
399 		return 0;
400 	case LOONGARCH_CPUCFG1:
401 		/* CPUCFG1_MSGINT is not supported by KVM */
402 		*v = GENMASK(25, 0);
403 		return 0;
404 	case LOONGARCH_CPUCFG2:
405 		/* CPUCFG2 features unconditionally supported by KVM */
406 		*v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
407 		     CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
408 		     CPUCFG2_LSPW | CPUCFG2_LAM;
409 		/*
410 		 * For the ISA extensions listed below, if one is supported
411 		 * by the host, then it is also supported by KVM.
412 		 */
413 		if (cpu_has_lsx)
414 			*v |= CPUCFG2_LSX;
415 		if (cpu_has_lasx)
416 			*v |= CPUCFG2_LASX;
417 
418 		return 0;
419 	case LOONGARCH_CPUCFG3:
420 		*v = GENMASK(16, 0);
421 		return 0;
422 	case LOONGARCH_CPUCFG4:
423 	case LOONGARCH_CPUCFG5:
424 		*v = GENMASK(31, 0);
425 		return 0;
426 	case LOONGARCH_CPUCFG16:
427 		*v = GENMASK(16, 0);
428 		return 0;
429 	case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
430 		*v = GENMASK(30, 0);
431 		return 0;
432 	default:
433 		/*
434 		 * CPUCFG bits should be zero if reserved by HW or not
435 		 * supported by KVM.
436 		 */
437 		*v = 0;
438 		return 0;
439 	}
440 }
441 
442 static int kvm_check_cpucfg(int id, u64 val)
443 {
444 	int ret;
445 	u64 mask = 0;
446 
447 	ret = _kvm_get_cpucfg_mask(id, &mask);
448 	if (ret)
449 		return ret;
450 
451 	if (val & ~mask)
452 		/* Unsupported features and/or the higher 32 bits should not be set */
453 		return -EINVAL;
454 
455 	switch (id) {
456 	case LOONGARCH_CPUCFG2:
457 		if (!(val & CPUCFG2_LLFTP))
458 			/* Guests must have a constant timer */
459 			return -EINVAL;
460 		if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
461 			/* Single and double float point must both be set when FP is enabled */
462 			return -EINVAL;
463 		if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
464 			/* LSX architecturally implies FP but val does not satisfy that */
465 			return -EINVAL;
466 		if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
467 			/* LASX architecturally implies LSX and FP but val does not satisfy that */
468 			return -EINVAL;
469 		return 0;
470 	default:
471 		/*
472 		 * Values for the other CPUCFG IDs are not being further validated
473 		 * besides the mask check above.
474 		 */
475 		return 0;
476 	}
477 }
478 
479 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
480 		const struct kvm_one_reg *reg, u64 *v)
481 {
482 	int id, ret = 0;
483 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
484 
485 	switch (type) {
486 	case KVM_REG_LOONGARCH_CSR:
487 		id = KVM_GET_IOC_CSR_IDX(reg->id);
488 		ret = _kvm_getcsr(vcpu, id, v);
489 		break;
490 	case KVM_REG_LOONGARCH_CPUCFG:
491 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
492 		if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
493 			*v = vcpu->arch.cpucfg[id];
494 		else
495 			ret = -EINVAL;
496 		break;
497 	case KVM_REG_LOONGARCH_KVM:
498 		switch (reg->id) {
499 		case KVM_REG_LOONGARCH_COUNTER:
500 			*v = drdtime() + vcpu->kvm->arch.time_offset;
501 			break;
502 		default:
503 			ret = -EINVAL;
504 			break;
505 		}
506 		break;
507 	default:
508 		ret = -EINVAL;
509 		break;
510 	}
511 
512 	return ret;
513 }
514 
515 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
516 {
517 	int ret = 0;
518 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
519 
520 	switch (size) {
521 	case KVM_REG_SIZE_U64:
522 		ret = kvm_get_one_reg(vcpu, reg, &v);
523 		if (ret)
524 			return ret;
525 		ret = put_user(v, (u64 __user *)(long)reg->addr);
526 		break;
527 	default:
528 		ret = -EINVAL;
529 		break;
530 	}
531 
532 	return ret;
533 }
534 
535 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
536 			const struct kvm_one_reg *reg, u64 v)
537 {
538 	int id, ret = 0;
539 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
540 
541 	switch (type) {
542 	case KVM_REG_LOONGARCH_CSR:
543 		id = KVM_GET_IOC_CSR_IDX(reg->id);
544 		ret = _kvm_setcsr(vcpu, id, v);
545 		break;
546 	case KVM_REG_LOONGARCH_CPUCFG:
547 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
548 		ret = kvm_check_cpucfg(id, v);
549 		if (ret)
550 			break;
551 		vcpu->arch.cpucfg[id] = (u32)v;
552 		break;
553 	case KVM_REG_LOONGARCH_KVM:
554 		switch (reg->id) {
555 		case KVM_REG_LOONGARCH_COUNTER:
556 			/*
557 			 * gftoffset is relative with board, not vcpu
558 			 * only set for the first time for smp system
559 			 */
560 			if (vcpu->vcpu_id == 0)
561 				vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
562 			break;
563 		case KVM_REG_LOONGARCH_VCPU_RESET:
564 			kvm_reset_timer(vcpu);
565 			memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
566 			memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
567 			break;
568 		default:
569 			ret = -EINVAL;
570 			break;
571 		}
572 		break;
573 	default:
574 		ret = -EINVAL;
575 		break;
576 	}
577 
578 	return ret;
579 }
580 
581 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
582 {
583 	int ret = 0;
584 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
585 
586 	switch (size) {
587 	case KVM_REG_SIZE_U64:
588 		ret = get_user(v, (u64 __user *)(long)reg->addr);
589 		if (ret)
590 			return ret;
591 		break;
592 	default:
593 		return -EINVAL;
594 	}
595 
596 	return kvm_set_one_reg(vcpu, reg, v);
597 }
598 
599 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
600 {
601 	return -ENOIOCTLCMD;
602 }
603 
604 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
605 {
606 	return -ENOIOCTLCMD;
607 }
608 
609 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
610 {
611 	int i;
612 
613 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
614 		regs->gpr[i] = vcpu->arch.gprs[i];
615 
616 	regs->pc = vcpu->arch.pc;
617 
618 	return 0;
619 }
620 
621 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
622 {
623 	int i;
624 
625 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
626 		vcpu->arch.gprs[i] = regs->gpr[i];
627 
628 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
629 	vcpu->arch.pc = regs->pc;
630 
631 	return 0;
632 }
633 
634 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
635 				     struct kvm_enable_cap *cap)
636 {
637 	/* FPU is enabled by default, will support LSX/LASX later. */
638 	return -EINVAL;
639 }
640 
641 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
642 					 struct kvm_device_attr *attr)
643 {
644 	switch (attr->attr) {
645 	case 2:
646 		return 0;
647 	default:
648 		return -ENXIO;
649 	}
650 
651 	return -ENXIO;
652 }
653 
654 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
655 				       struct kvm_device_attr *attr)
656 {
657 	int ret = -ENXIO;
658 
659 	switch (attr->group) {
660 	case KVM_LOONGARCH_VCPU_CPUCFG:
661 		ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
662 		break;
663 	default:
664 		break;
665 	}
666 
667 	return ret;
668 }
669 
670 static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
671 					 struct kvm_device_attr *attr)
672 {
673 	int ret = 0;
674 	uint64_t val;
675 	uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
676 
677 	ret = _kvm_get_cpucfg_mask(attr->attr, &val);
678 	if (ret)
679 		return ret;
680 
681 	put_user(val, uaddr);
682 
683 	return ret;
684 }
685 
686 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
687 				       struct kvm_device_attr *attr)
688 {
689 	int ret = -ENXIO;
690 
691 	switch (attr->group) {
692 	case KVM_LOONGARCH_VCPU_CPUCFG:
693 		ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
694 		break;
695 	default:
696 		break;
697 	}
698 
699 	return ret;
700 }
701 
702 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
703 					 struct kvm_device_attr *attr)
704 {
705 	return -ENXIO;
706 }
707 
708 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
709 				       struct kvm_device_attr *attr)
710 {
711 	int ret = -ENXIO;
712 
713 	switch (attr->group) {
714 	case KVM_LOONGARCH_VCPU_CPUCFG:
715 		ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
716 		break;
717 	default:
718 		break;
719 	}
720 
721 	return ret;
722 }
723 
724 long kvm_arch_vcpu_ioctl(struct file *filp,
725 			 unsigned int ioctl, unsigned long arg)
726 {
727 	long r;
728 	struct kvm_device_attr attr;
729 	void __user *argp = (void __user *)arg;
730 	struct kvm_vcpu *vcpu = filp->private_data;
731 
732 	/*
733 	 * Only software CSR should be modified
734 	 *
735 	 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
736 	 * should be used. Since CSR registers owns by this vcpu, if switch
737 	 * to other vcpus, other vcpus need reload CSR registers.
738 	 *
739 	 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
740 	 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
741 	 * aux_inuse flag and reload CSR registers form software.
742 	 */
743 
744 	switch (ioctl) {
745 	case KVM_SET_ONE_REG:
746 	case KVM_GET_ONE_REG: {
747 		struct kvm_one_reg reg;
748 
749 		r = -EFAULT;
750 		if (copy_from_user(&reg, argp, sizeof(reg)))
751 			break;
752 		if (ioctl == KVM_SET_ONE_REG) {
753 			r = kvm_set_reg(vcpu, &reg);
754 			vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
755 		} else
756 			r = kvm_get_reg(vcpu, &reg);
757 		break;
758 	}
759 	case KVM_ENABLE_CAP: {
760 		struct kvm_enable_cap cap;
761 
762 		r = -EFAULT;
763 		if (copy_from_user(&cap, argp, sizeof(cap)))
764 			break;
765 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
766 		break;
767 	}
768 	case KVM_HAS_DEVICE_ATTR: {
769 		r = -EFAULT;
770 		if (copy_from_user(&attr, argp, sizeof(attr)))
771 			break;
772 		r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
773 		break;
774 	}
775 	case KVM_GET_DEVICE_ATTR: {
776 		r = -EFAULT;
777 		if (copy_from_user(&attr, argp, sizeof(attr)))
778 			break;
779 		r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
780 		break;
781 	}
782 	case KVM_SET_DEVICE_ATTR: {
783 		r = -EFAULT;
784 		if (copy_from_user(&attr, argp, sizeof(attr)))
785 			break;
786 		r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
787 		break;
788 	}
789 	default:
790 		r = -ENOIOCTLCMD;
791 		break;
792 	}
793 
794 	return r;
795 }
796 
797 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
798 {
799 	int i = 0;
800 
801 	fpu->fcc = vcpu->arch.fpu.fcc;
802 	fpu->fcsr = vcpu->arch.fpu.fcsr;
803 	for (i = 0; i < NUM_FPU_REGS; i++)
804 		memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
805 
806 	return 0;
807 }
808 
809 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
810 {
811 	int i = 0;
812 
813 	vcpu->arch.fpu.fcc = fpu->fcc;
814 	vcpu->arch.fpu.fcsr = fpu->fcsr;
815 	for (i = 0; i < NUM_FPU_REGS; i++)
816 		memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
817 
818 	return 0;
819 }
820 
821 /* Enable FPU and restore context */
822 void kvm_own_fpu(struct kvm_vcpu *vcpu)
823 {
824 	preempt_disable();
825 
826 	/* Enable FPU */
827 	set_csr_euen(CSR_EUEN_FPEN);
828 
829 	kvm_restore_fpu(&vcpu->arch.fpu);
830 	vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
831 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
832 
833 	preempt_enable();
834 }
835 
836 #ifdef CONFIG_CPU_HAS_LSX
837 /* Enable LSX and restore context */
838 int kvm_own_lsx(struct kvm_vcpu *vcpu)
839 {
840 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
841 		return -EINVAL;
842 
843 	preempt_disable();
844 
845 	/* Enable LSX for guest */
846 	set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
847 	switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
848 	case KVM_LARCH_FPU:
849 		/*
850 		 * Guest FPU state already loaded,
851 		 * only restore upper LSX state
852 		 */
853 		_restore_lsx_upper(&vcpu->arch.fpu);
854 		break;
855 	default:
856 		/* Neither FP or LSX already active,
857 		 * restore full LSX state
858 		 */
859 		kvm_restore_lsx(&vcpu->arch.fpu);
860 		break;
861 	}
862 
863 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
864 	vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
865 	preempt_enable();
866 
867 	return 0;
868 }
869 #endif
870 
871 #ifdef CONFIG_CPU_HAS_LASX
872 /* Enable LASX and restore context */
873 int kvm_own_lasx(struct kvm_vcpu *vcpu)
874 {
875 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
876 		return -EINVAL;
877 
878 	preempt_disable();
879 
880 	set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
881 	switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
882 	case KVM_LARCH_LSX:
883 	case KVM_LARCH_LSX | KVM_LARCH_FPU:
884 		/* Guest LSX state already loaded, only restore upper LASX state */
885 		_restore_lasx_upper(&vcpu->arch.fpu);
886 		break;
887 	case KVM_LARCH_FPU:
888 		/* Guest FP state already loaded, only restore upper LSX & LASX state */
889 		_restore_lsx_upper(&vcpu->arch.fpu);
890 		_restore_lasx_upper(&vcpu->arch.fpu);
891 		break;
892 	default:
893 		/* Neither FP or LSX already active, restore full LASX state */
894 		kvm_restore_lasx(&vcpu->arch.fpu);
895 		break;
896 	}
897 
898 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
899 	vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
900 	preempt_enable();
901 
902 	return 0;
903 }
904 #endif
905 
906 /* Save context and disable FPU */
907 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
908 {
909 	preempt_disable();
910 
911 	if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
912 		kvm_save_lasx(&vcpu->arch.fpu);
913 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
914 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
915 
916 		/* Disable LASX & LSX & FPU */
917 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
918 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
919 		kvm_save_lsx(&vcpu->arch.fpu);
920 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
921 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
922 
923 		/* Disable LSX & FPU */
924 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
925 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
926 		kvm_save_fpu(&vcpu->arch.fpu);
927 		vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
928 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
929 
930 		/* Disable FPU */
931 		clear_csr_euen(CSR_EUEN_FPEN);
932 	}
933 
934 	preempt_enable();
935 }
936 
937 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
938 {
939 	int intr = (int)irq->irq;
940 
941 	if (intr > 0)
942 		kvm_queue_irq(vcpu, intr);
943 	else if (intr < 0)
944 		kvm_dequeue_irq(vcpu, -intr);
945 	else {
946 		kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
947 		return -EINVAL;
948 	}
949 
950 	kvm_vcpu_kick(vcpu);
951 
952 	return 0;
953 }
954 
955 long kvm_arch_vcpu_async_ioctl(struct file *filp,
956 			       unsigned int ioctl, unsigned long arg)
957 {
958 	void __user *argp = (void __user *)arg;
959 	struct kvm_vcpu *vcpu = filp->private_data;
960 
961 	if (ioctl == KVM_INTERRUPT) {
962 		struct kvm_interrupt irq;
963 
964 		if (copy_from_user(&irq, argp, sizeof(irq)))
965 			return -EFAULT;
966 
967 		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
968 
969 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
970 	}
971 
972 	return -ENOIOCTLCMD;
973 }
974 
975 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
976 {
977 	return 0;
978 }
979 
980 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
981 {
982 	unsigned long timer_hz;
983 	struct loongarch_csrs *csr;
984 
985 	vcpu->arch.vpid = 0;
986 
987 	hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
988 	vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
989 
990 	vcpu->arch.handle_exit = kvm_handle_exit;
991 	vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
992 	vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
993 	if (!vcpu->arch.csr)
994 		return -ENOMEM;
995 
996 	/*
997 	 * All kvm exceptions share one exception entry, and host <-> guest
998 	 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
999 	 */
1000 	vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1001 
1002 	/* Init */
1003 	vcpu->arch.last_sched_cpu = -1;
1004 
1005 	/*
1006 	 * Initialize guest register state to valid architectural reset state.
1007 	 */
1008 	timer_hz = calc_const_freq();
1009 	kvm_init_timer(vcpu, timer_hz);
1010 
1011 	/* Set Initialize mode for guest */
1012 	csr = vcpu->arch.csr;
1013 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1014 
1015 	/* Set cpuid */
1016 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1017 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1018 
1019 	/* Start with no pending virtual guest interrupts */
1020 	csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1021 
1022 	return 0;
1023 }
1024 
1025 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1026 {
1027 }
1028 
1029 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1030 {
1031 	int cpu;
1032 	struct kvm_context *context;
1033 
1034 	hrtimer_cancel(&vcpu->arch.swtimer);
1035 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1036 	kvm_drop_cpuid(vcpu);
1037 	kfree(vcpu->arch.csr);
1038 
1039 	/*
1040 	 * If the vCPU is freed and reused as another vCPU, we don't want the
1041 	 * matching pointer wrongly hanging around in last_vcpu.
1042 	 */
1043 	for_each_possible_cpu(cpu) {
1044 		context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1045 		if (context->last_vcpu == vcpu)
1046 			context->last_vcpu = NULL;
1047 	}
1048 }
1049 
1050 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1051 {
1052 	bool migrated;
1053 	struct kvm_context *context;
1054 	struct loongarch_csrs *csr = vcpu->arch.csr;
1055 
1056 	/*
1057 	 * Have we migrated to a different CPU?
1058 	 * If so, any old guest TLB state may be stale.
1059 	 */
1060 	migrated = (vcpu->arch.last_sched_cpu != cpu);
1061 
1062 	/*
1063 	 * Was this the last vCPU to run on this CPU?
1064 	 * If not, any old guest state from this vCPU will have been clobbered.
1065 	 */
1066 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1067 	if (migrated || (context->last_vcpu != vcpu))
1068 		vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1069 	context->last_vcpu = vcpu;
1070 
1071 	/* Restore timer state regardless */
1072 	kvm_restore_timer(vcpu);
1073 
1074 	/* Control guest page CCA attribute */
1075 	change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
1076 
1077 	/* Don't bother restoring registers multiple times unless necessary */
1078 	if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1079 		return 0;
1080 
1081 	write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1082 
1083 	/* Restore guest CSR registers */
1084 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1085 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1086 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1087 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1088 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1089 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1090 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1091 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1092 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1093 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1094 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1095 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1096 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1097 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1098 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1099 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1100 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1101 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1102 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1103 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1104 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1105 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1106 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1107 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1108 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1109 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1110 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1111 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1112 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1113 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1114 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1115 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1116 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1117 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1118 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1119 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1120 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1121 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1122 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1123 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1124 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1125 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1126 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1127 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1128 
1129 	/* Restore Root.GINTC from unused Guest.GINTC register */
1130 	write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1131 
1132 	/*
1133 	 * We should clear linked load bit to break interrupted atomics. This
1134 	 * prevents a SC on the next vCPU from succeeding by matching a LL on
1135 	 * the previous vCPU.
1136 	 */
1137 	if (vcpu->kvm->created_vcpus > 1)
1138 		set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1139 
1140 	vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1141 
1142 	return 0;
1143 }
1144 
1145 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1146 {
1147 	unsigned long flags;
1148 
1149 	local_irq_save(flags);
1150 	/* Restore guest state to registers */
1151 	_kvm_vcpu_load(vcpu, cpu);
1152 	local_irq_restore(flags);
1153 }
1154 
1155 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1156 {
1157 	struct loongarch_csrs *csr = vcpu->arch.csr;
1158 
1159 	kvm_lose_fpu(vcpu);
1160 
1161 	/*
1162 	 * Update CSR state from hardware if software CSR state is stale,
1163 	 * most CSR registers are kept unchanged during process context
1164 	 * switch except CSR registers like remaining timer tick value and
1165 	 * injected interrupt state.
1166 	 */
1167 	if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1168 		goto out;
1169 
1170 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1171 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1172 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1173 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1174 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1175 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1176 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1177 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1178 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1179 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1180 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1181 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1182 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1183 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1184 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1185 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1186 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1187 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1188 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1189 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1190 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1191 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1192 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1193 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1194 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1195 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1196 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1197 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1198 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1199 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1200 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1201 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1202 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1203 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1204 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1205 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1206 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1207 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1208 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1209 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1210 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1211 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1212 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1213 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1214 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1215 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1216 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1217 
1218 	vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1219 
1220 out:
1221 	kvm_save_timer(vcpu);
1222 	/* Save Root.GINTC into unused Guest.GINTC register */
1223 	csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1224 
1225 	return 0;
1226 }
1227 
1228 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1229 {
1230 	int cpu;
1231 	unsigned long flags;
1232 
1233 	local_irq_save(flags);
1234 	cpu = smp_processor_id();
1235 	vcpu->arch.last_sched_cpu = cpu;
1236 
1237 	/* Save guest state in registers */
1238 	_kvm_vcpu_put(vcpu, cpu);
1239 	local_irq_restore(flags);
1240 }
1241 
1242 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1243 {
1244 	int r = -EINTR;
1245 	struct kvm_run *run = vcpu->run;
1246 
1247 	if (vcpu->mmio_needed) {
1248 		if (!vcpu->mmio_is_write)
1249 			kvm_complete_mmio_read(vcpu, run);
1250 		vcpu->mmio_needed = 0;
1251 	}
1252 
1253 	if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
1254 		if (!run->iocsr_io.is_write)
1255 			kvm_complete_iocsr_read(vcpu, run);
1256 	}
1257 
1258 	if (run->immediate_exit)
1259 		return r;
1260 
1261 	/* Clear exit_reason */
1262 	run->exit_reason = KVM_EXIT_UNKNOWN;
1263 	lose_fpu(1);
1264 	vcpu_load(vcpu);
1265 	kvm_sigset_activate(vcpu);
1266 	r = kvm_pre_enter_guest(vcpu);
1267 	if (r != RESUME_GUEST)
1268 		goto out;
1269 
1270 	guest_timing_enter_irqoff();
1271 	guest_state_enter_irqoff();
1272 	trace_kvm_enter(vcpu);
1273 	r = kvm_loongarch_ops->enter_guest(run, vcpu);
1274 
1275 	trace_kvm_out(vcpu);
1276 	/*
1277 	 * Guest exit is already recorded at kvm_handle_exit()
1278 	 * return value must not be RESUME_GUEST
1279 	 */
1280 	local_irq_enable();
1281 out:
1282 	kvm_sigset_deactivate(vcpu);
1283 	vcpu_put(vcpu);
1284 
1285 	return r;
1286 }
1287