xref: /linux/arch/loongarch/kvm/vcpu.c (revision e927c520e1ba6b6a0b2022adf5ba455ee1553fc1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <asm/fpu.h>
8 #include <asm/lbt.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12 
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15 
16 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
17 	KVM_GENERIC_VCPU_STATS(),
18 	STATS_DESC_COUNTER(VCPU, int_exits),
19 	STATS_DESC_COUNTER(VCPU, idle_exits),
20 	STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21 	STATS_DESC_COUNTER(VCPU, signal_exits),
22 	STATS_DESC_COUNTER(VCPU, hypercall_exits),
23 	STATS_DESC_COUNTER(VCPU, ipi_read_exits),
24 	STATS_DESC_COUNTER(VCPU, ipi_write_exits),
25 	STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
26 	STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
27 	STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
28 	STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
29 };
30 
31 const struct kvm_stats_header kvm_vcpu_stats_header = {
32 	.name_size = KVM_STATS_NAME_SIZE,
33 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
34 	.id_offset = sizeof(struct kvm_stats_header),
35 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
36 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
37 		       sizeof(kvm_vcpu_stats_desc),
38 };
39 
kvm_save_host_pmu(struct kvm_vcpu * vcpu)40 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
41 {
42 	struct kvm_context *context;
43 
44 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
45 	context->perf_cntr[0] = read_csr_perfcntr0();
46 	context->perf_cntr[1] = read_csr_perfcntr1();
47 	context->perf_cntr[2] = read_csr_perfcntr2();
48 	context->perf_cntr[3] = read_csr_perfcntr3();
49 	context->perf_ctrl[0] = write_csr_perfctrl0(0);
50 	context->perf_ctrl[1] = write_csr_perfctrl1(0);
51 	context->perf_ctrl[2] = write_csr_perfctrl2(0);
52 	context->perf_ctrl[3] = write_csr_perfctrl3(0);
53 }
54 
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)55 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
56 {
57 	struct kvm_context *context;
58 
59 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
60 	write_csr_perfcntr0(context->perf_cntr[0]);
61 	write_csr_perfcntr1(context->perf_cntr[1]);
62 	write_csr_perfcntr2(context->perf_cntr[2]);
63 	write_csr_perfcntr3(context->perf_cntr[3]);
64 	write_csr_perfctrl0(context->perf_ctrl[0]);
65 	write_csr_perfctrl1(context->perf_ctrl[1]);
66 	write_csr_perfctrl2(context->perf_ctrl[2]);
67 	write_csr_perfctrl3(context->perf_ctrl[3]);
68 }
69 
70 
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)71 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
72 {
73 	struct loongarch_csrs *csr = vcpu->arch.csr;
74 
75 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
76 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
77 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
78 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
79 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
80 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
81 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
82 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
83 }
84 
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)85 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
86 {
87 	struct loongarch_csrs *csr = vcpu->arch.csr;
88 
89 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
90 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
91 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
92 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
93 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
94 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
95 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
96 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
97 }
98 
kvm_own_pmu(struct kvm_vcpu * vcpu)99 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
100 {
101 	unsigned long val;
102 
103 	if (!kvm_guest_has_pmu(&vcpu->arch))
104 		return -EINVAL;
105 
106 	kvm_save_host_pmu(vcpu);
107 
108 	/* Set PM0-PM(num) to guest */
109 	val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
110 	val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
111 	write_csr_gcfg(val);
112 
113 	kvm_restore_guest_pmu(vcpu);
114 
115 	return 0;
116 }
117 
kvm_lose_pmu(struct kvm_vcpu * vcpu)118 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
119 {
120 	unsigned long val;
121 	struct loongarch_csrs *csr = vcpu->arch.csr;
122 
123 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
124 		return;
125 
126 	kvm_save_guest_pmu(vcpu);
127 
128 	/* Disable pmu access from guest */
129 	write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
130 
131 	/*
132 	 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
133 	 * exiting the guest, so that the next time trap into the guest.
134 	 * We don't need to deal with PMU CSRs contexts.
135 	 *
136 	 * Otherwise set the request bit KVM_REQ_PMU to restore guest PMU
137 	 * before entering guest VM
138 	 */
139 	val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
140 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
141 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
142 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
143 	if (!(val & KVM_PMU_EVENT_ENABLED))
144 		vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
145 	else
146 		kvm_make_request(KVM_REQ_PMU, vcpu);
147 
148 	kvm_restore_host_pmu(vcpu);
149 }
150 
kvm_check_pmu(struct kvm_vcpu * vcpu)151 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
152 {
153 	if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
154 		kvm_own_pmu(vcpu);
155 		vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
156 	}
157 }
158 
kvm_update_stolen_time(struct kvm_vcpu * vcpu)159 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
160 {
161 	u32 version;
162 	u64 steal;
163 	gpa_t gpa;
164 	struct kvm_memslots *slots;
165 	struct kvm_steal_time __user *st;
166 	struct gfn_to_hva_cache *ghc;
167 
168 	ghc = &vcpu->arch.st.cache;
169 	gpa = vcpu->arch.st.guest_addr;
170 	if (!(gpa & KVM_STEAL_PHYS_VALID))
171 		return;
172 
173 	gpa &= KVM_STEAL_PHYS_MASK;
174 	slots = kvm_memslots(vcpu->kvm);
175 	if (slots->generation != ghc->generation || gpa != ghc->gpa) {
176 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
177 			ghc->gpa = INVALID_GPA;
178 			return;
179 		}
180 	}
181 
182 	st = (struct kvm_steal_time __user *)ghc->hva;
183 	unsafe_get_user(version, &st->version, out);
184 	if (version & 1)
185 		version += 1; /* first time write, random junk */
186 
187 	version += 1;
188 	unsafe_put_user(version, &st->version, out);
189 	smp_wmb();
190 
191 	unsafe_get_user(steal, &st->steal, out);
192 	steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
193 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
194 	unsafe_put_user(steal, &st->steal, out);
195 
196 	smp_wmb();
197 	version += 1;
198 	unsafe_put_user(version, &st->version, out);
199 out:
200 	mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
201 }
202 
203 /*
204  * kvm_check_requests - check and handle pending vCPU requests
205  *
206  * Return: RESUME_GUEST if we should enter the guest
207  *         RESUME_HOST  if we should exit to userspace
208  */
kvm_check_requests(struct kvm_vcpu * vcpu)209 static int kvm_check_requests(struct kvm_vcpu *vcpu)
210 {
211 	if (!kvm_request_pending(vcpu))
212 		return RESUME_GUEST;
213 
214 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
215 		vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
216 
217 	if (kvm_dirty_ring_check_request(vcpu))
218 		return RESUME_HOST;
219 
220 	if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
221 		kvm_update_stolen_time(vcpu);
222 
223 	return RESUME_GUEST;
224 }
225 
kvm_late_check_requests(struct kvm_vcpu * vcpu)226 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
227 {
228 	lockdep_assert_irqs_disabled();
229 	if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
230 		if (vcpu->arch.flush_gpa != INVALID_GPA) {
231 			kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
232 			vcpu->arch.flush_gpa = INVALID_GPA;
233 		}
234 }
235 
236 /*
237  * Check and handle pending signal and vCPU requests etc
238  * Run with irq enabled and preempt enabled
239  *
240  * Return: RESUME_GUEST if we should enter the guest
241  *         RESUME_HOST  if we should exit to userspace
242  *         < 0 if we should exit to userspace, where the return value
243  *         indicates an error
244  */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)245 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
246 {
247 	int idx, ret;
248 
249 	/*
250 	 * Check conditions before entering the guest
251 	 */
252 	ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
253 	if (ret < 0)
254 		return ret;
255 
256 	idx = srcu_read_lock(&vcpu->kvm->srcu);
257 	ret = kvm_check_requests(vcpu);
258 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
259 
260 	return ret;
261 }
262 
263 /*
264  * Called with irq enabled
265  *
266  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
267  *         Others if we should exit to userspace
268  */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)269 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
270 {
271 	int ret;
272 
273 	do {
274 		ret = kvm_enter_guest_check(vcpu);
275 		if (ret != RESUME_GUEST)
276 			break;
277 
278 		/*
279 		 * Handle vcpu timer, interrupts, check requests and
280 		 * check vmid before vcpu enter guest
281 		 */
282 		local_irq_disable();
283 		kvm_deliver_intr(vcpu);
284 		kvm_deliver_exception(vcpu);
285 		/* Make sure the vcpu mode has been written */
286 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
287 		kvm_check_vpid(vcpu);
288 		kvm_check_pmu(vcpu);
289 
290 		/*
291 		 * Called after function kvm_check_vpid()
292 		 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
293 		 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
294 		 */
295 		kvm_late_check_requests(vcpu);
296 		vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
297 		/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
298 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
299 
300 		if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
301 			if (vcpu->arch.aux_inuse & KVM_LARCH_PMU) {
302 				kvm_lose_pmu(vcpu);
303 				kvm_make_request(KVM_REQ_PMU, vcpu);
304 			}
305 			/* make sure the vcpu mode has been written */
306 			smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
307 			local_irq_enable();
308 			ret = -EAGAIN;
309 		}
310 	} while (ret != RESUME_GUEST);
311 
312 	return ret;
313 }
314 
315 /*
316  * Return 1 for resume guest and "<= 0" for resume host.
317  */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)318 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
319 {
320 	int ret = RESUME_GUEST;
321 	unsigned long estat = vcpu->arch.host_estat;
322 	u32 intr = estat & CSR_ESTAT_IS;
323 	u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
324 
325 	vcpu->mode = OUTSIDE_GUEST_MODE;
326 
327 	/* Set a default exit reason */
328 	run->exit_reason = KVM_EXIT_UNKNOWN;
329 
330 	kvm_lose_pmu(vcpu);
331 
332 	guest_timing_exit_irqoff();
333 	guest_state_exit_irqoff();
334 	local_irq_enable();
335 
336 	trace_kvm_exit(vcpu, ecode);
337 	if (ecode) {
338 		ret = kvm_handle_fault(vcpu, ecode);
339 	} else {
340 		WARN(!intr, "vm exiting with suspicious irq\n");
341 		++vcpu->stat.int_exits;
342 	}
343 
344 	if (ret == RESUME_GUEST)
345 		ret = kvm_pre_enter_guest(vcpu);
346 
347 	if (ret != RESUME_GUEST) {
348 		local_irq_disable();
349 		return ret;
350 	}
351 
352 	guest_timing_enter_irqoff();
353 	guest_state_enter_irqoff();
354 	trace_kvm_reenter(vcpu);
355 
356 	return RESUME_GUEST;
357 }
358 
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)359 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
360 {
361 	return !!(vcpu->arch.irq_pending) &&
362 		vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
363 }
364 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)365 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
366 {
367 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
368 }
369 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)370 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
371 {
372 	unsigned long val;
373 
374 	preempt_disable();
375 	val = gcsr_read(LOONGARCH_CSR_CRMD);
376 	preempt_enable();
377 
378 	return (val & CSR_PRMD_PPLV) == PLV_KERN;
379 }
380 
381 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)382 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
383 {
384 	return vcpu->arch.pc;
385 }
386 
387 /*
388  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
389  * arrived in guest context.  For LoongArch64, if PMU is not passthrough to VM,
390  * any event that arrives while a vCPU is loaded is considered to be "in guest".
391  */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)392 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
393 {
394 	return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
395 }
396 #endif
397 
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)398 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
399 {
400 	return false;
401 }
402 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)403 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
404 {
405 	return VM_FAULT_SIGBUS;
406 }
407 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)408 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
409 				  struct kvm_translation *tr)
410 {
411 	return -EINVAL;
412 }
413 
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)414 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
415 {
416 	int ret;
417 
418 	/* Protect from TOD sync and vcpu_load/put() */
419 	preempt_disable();
420 	ret = kvm_pending_timer(vcpu) ||
421 		kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
422 	preempt_enable();
423 
424 	return ret;
425 }
426 
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)427 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
428 {
429 	int i;
430 
431 	kvm_debug("vCPU Register Dump:\n");
432 	kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
433 	kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
434 
435 	for (i = 0; i < 32; i += 4) {
436 		kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
437 		       vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
438 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
439 	}
440 
441 	kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
442 		  kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
443 		  kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
444 
445 	kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
446 
447 	return 0;
448 }
449 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)450 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
451 				struct kvm_mp_state *mp_state)
452 {
453 	*mp_state = vcpu->arch.mp_state;
454 
455 	return 0;
456 }
457 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)458 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
459 				struct kvm_mp_state *mp_state)
460 {
461 	int ret = 0;
462 
463 	switch (mp_state->mp_state) {
464 	case KVM_MP_STATE_RUNNABLE:
465 		vcpu->arch.mp_state = *mp_state;
466 		break;
467 	default:
468 		ret = -EINVAL;
469 	}
470 
471 	return ret;
472 }
473 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)474 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
475 					struct kvm_guest_debug *dbg)
476 {
477 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
478 		return -EINVAL;
479 
480 	if (dbg->control & KVM_GUESTDBG_ENABLE)
481 		vcpu->guest_debug = dbg->control;
482 	else
483 		vcpu->guest_debug = 0;
484 
485 	return 0;
486 }
487 
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)488 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
489 {
490 	int cpuid;
491 	struct kvm_phyid_map *map;
492 	struct loongarch_csrs *csr = vcpu->arch.csr;
493 
494 	if (val >= KVM_MAX_PHYID)
495 		return -EINVAL;
496 
497 	map = vcpu->kvm->arch.phyid_map;
498 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
499 
500 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
501 	if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
502 		/* Discard duplicated CPUID set operation */
503 		if (cpuid == val) {
504 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
505 			return 0;
506 		}
507 
508 		/*
509 		 * CPUID is already set before
510 		 * Forbid changing to a different CPUID at runtime
511 		 */
512 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
513 		return -EINVAL;
514 	}
515 
516 	if (map->phys_map[val].enabled) {
517 		/* Discard duplicated CPUID set operation */
518 		if (vcpu == map->phys_map[val].vcpu) {
519 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
520 			return 0;
521 		}
522 
523 		/*
524 		 * New CPUID is already set with other vcpu
525 		 * Forbid sharing the same CPUID between different vcpus
526 		 */
527 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
528 		return -EINVAL;
529 	}
530 
531 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
532 	map->phys_map[val].enabled	= true;
533 	map->phys_map[val].vcpu		= vcpu;
534 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
535 
536 	return 0;
537 }
538 
kvm_drop_cpuid(struct kvm_vcpu * vcpu)539 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
540 {
541 	int cpuid;
542 	struct kvm_phyid_map *map;
543 	struct loongarch_csrs *csr = vcpu->arch.csr;
544 
545 	map = vcpu->kvm->arch.phyid_map;
546 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
547 
548 	if (cpuid >= KVM_MAX_PHYID)
549 		return;
550 
551 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
552 	if (map->phys_map[cpuid].enabled) {
553 		map->phys_map[cpuid].vcpu = NULL;
554 		map->phys_map[cpuid].enabled = false;
555 		kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
556 	}
557 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
558 }
559 
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)560 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
561 {
562 	struct kvm_phyid_map *map;
563 
564 	if (cpuid >= KVM_MAX_PHYID)
565 		return NULL;
566 
567 	map = kvm->arch.phyid_map;
568 	if (!map->phys_map[cpuid].enabled)
569 		return NULL;
570 
571 	return map->phys_map[cpuid].vcpu;
572 }
573 
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)574 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
575 {
576 	unsigned long gintc;
577 	struct loongarch_csrs *csr = vcpu->arch.csr;
578 
579 	if (get_gcsr_flag(id) & INVALID_GCSR)
580 		return -EINVAL;
581 
582 	if (id == LOONGARCH_CSR_ESTAT) {
583 		preempt_disable();
584 		vcpu_load(vcpu);
585 		/*
586 		 * Sync pending interrupts into ESTAT so that interrupt
587 		 * remains during VM migration stage
588 		 */
589 		kvm_deliver_intr(vcpu);
590 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
591 		vcpu_put(vcpu);
592 		preempt_enable();
593 
594 		/* ESTAT IP0~IP7 get from GINTC */
595 		gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
596 		*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
597 		return 0;
598 	}
599 
600 	/*
601 	 * Get software CSR state since software state is consistent
602 	 * with hardware for synchronous ioctl
603 	 */
604 	*val = kvm_read_sw_gcsr(csr, id);
605 
606 	return 0;
607 }
608 
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)609 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
610 {
611 	int ret = 0, gintc;
612 	struct loongarch_csrs *csr = vcpu->arch.csr;
613 
614 	if (get_gcsr_flag(id) & INVALID_GCSR)
615 		return -EINVAL;
616 
617 	if (id == LOONGARCH_CSR_CPUID)
618 		return kvm_set_cpuid(vcpu, val);
619 
620 	if (id == LOONGARCH_CSR_ESTAT) {
621 		/* ESTAT IP0~IP7 inject through GINTC */
622 		gintc = (val >> 2) & 0xff;
623 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
624 
625 		gintc = val & ~(0xffUL << 2);
626 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
627 
628 		return ret;
629 	}
630 
631 	kvm_write_sw_gcsr(csr, id, val);
632 
633 	/*
634 	 * After modifying the PMU CSR register value of the vcpu.
635 	 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
636 	 */
637 	if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
638 		unsigned long val;
639 
640 		val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
641 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
642 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
643 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
644 
645 		if (val & KVM_PMU_EVENT_ENABLED)
646 			kvm_make_request(KVM_REQ_PMU, vcpu);
647 	}
648 
649 	return ret;
650 }
651 
_kvm_get_cpucfg_mask(int id,u64 * v)652 static int _kvm_get_cpucfg_mask(int id, u64 *v)
653 {
654 	if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
655 		return -EINVAL;
656 
657 	switch (id) {
658 	case LOONGARCH_CPUCFG0:
659 		*v = GENMASK(31, 0);
660 		return 0;
661 	case LOONGARCH_CPUCFG1:
662 		/* CPUCFG1_MSGINT is not supported by KVM */
663 		*v = GENMASK(25, 0);
664 		return 0;
665 	case LOONGARCH_CPUCFG2:
666 		/* CPUCFG2 features unconditionally supported by KVM */
667 		*v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
668 		     CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
669 		     CPUCFG2_LSPW | CPUCFG2_LAM;
670 		/*
671 		 * For the ISA extensions listed below, if one is supported
672 		 * by the host, then it is also supported by KVM.
673 		 */
674 		if (cpu_has_lsx)
675 			*v |= CPUCFG2_LSX;
676 		if (cpu_has_lasx)
677 			*v |= CPUCFG2_LASX;
678 		if (cpu_has_lbt_x86)
679 			*v |= CPUCFG2_X86BT;
680 		if (cpu_has_lbt_arm)
681 			*v |= CPUCFG2_ARMBT;
682 		if (cpu_has_lbt_mips)
683 			*v |= CPUCFG2_MIPSBT;
684 		if (cpu_has_ptw)
685 			*v |= CPUCFG2_PTW;
686 
687 		return 0;
688 	case LOONGARCH_CPUCFG3:
689 		*v = GENMASK(16, 0);
690 		return 0;
691 	case LOONGARCH_CPUCFG4:
692 	case LOONGARCH_CPUCFG5:
693 		*v = GENMASK(31, 0);
694 		return 0;
695 	case LOONGARCH_CPUCFG6:
696 		if (cpu_has_pmp)
697 			*v = GENMASK(14, 0);
698 		else
699 			*v = 0;
700 		return 0;
701 	case LOONGARCH_CPUCFG16:
702 		*v = GENMASK(16, 0);
703 		return 0;
704 	case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
705 		*v = GENMASK(30, 0);
706 		return 0;
707 	default:
708 		/*
709 		 * CPUCFG bits should be zero if reserved by HW or not
710 		 * supported by KVM.
711 		 */
712 		*v = 0;
713 		return 0;
714 	}
715 }
716 
kvm_check_cpucfg(int id,u64 val)717 static int kvm_check_cpucfg(int id, u64 val)
718 {
719 	int ret;
720 	u64 mask = 0;
721 
722 	ret = _kvm_get_cpucfg_mask(id, &mask);
723 	if (ret)
724 		return ret;
725 
726 	if (val & ~mask)
727 		/* Unsupported features and/or the higher 32 bits should not be set */
728 		return -EINVAL;
729 
730 	switch (id) {
731 	case LOONGARCH_CPUCFG2:
732 		if (!(val & CPUCFG2_LLFTP))
733 			/* Guests must have a constant timer */
734 			return -EINVAL;
735 		if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
736 			/* Single and double float point must both be set when FP is enabled */
737 			return -EINVAL;
738 		if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
739 			/* LSX architecturally implies FP but val does not satisfy that */
740 			return -EINVAL;
741 		if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
742 			/* LASX architecturally implies LSX and FP but val does not satisfy that */
743 			return -EINVAL;
744 		return 0;
745 	case LOONGARCH_CPUCFG6:
746 		if (val & CPUCFG6_PMP) {
747 			u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
748 			if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
749 				return -EINVAL;
750 			if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
751 				return -EINVAL;
752 			if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
753 				return -EINVAL;
754 		}
755 		return 0;
756 	default:
757 		/*
758 		 * Values for the other CPUCFG IDs are not being further validated
759 		 * besides the mask check above.
760 		 */
761 		return 0;
762 	}
763 }
764 
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)765 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
766 		const struct kvm_one_reg *reg, u64 *v)
767 {
768 	int id, ret = 0;
769 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
770 
771 	switch (type) {
772 	case KVM_REG_LOONGARCH_CSR:
773 		id = KVM_GET_IOC_CSR_IDX(reg->id);
774 		ret = _kvm_getcsr(vcpu, id, v);
775 		break;
776 	case KVM_REG_LOONGARCH_CPUCFG:
777 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
778 		if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
779 			*v = vcpu->arch.cpucfg[id];
780 		else
781 			ret = -EINVAL;
782 		break;
783 	case KVM_REG_LOONGARCH_LBT:
784 		if (!kvm_guest_has_lbt(&vcpu->arch))
785 			return -ENXIO;
786 
787 		switch (reg->id) {
788 		case KVM_REG_LOONGARCH_LBT_SCR0:
789 			*v = vcpu->arch.lbt.scr0;
790 			break;
791 		case KVM_REG_LOONGARCH_LBT_SCR1:
792 			*v = vcpu->arch.lbt.scr1;
793 			break;
794 		case KVM_REG_LOONGARCH_LBT_SCR2:
795 			*v = vcpu->arch.lbt.scr2;
796 			break;
797 		case KVM_REG_LOONGARCH_LBT_SCR3:
798 			*v = vcpu->arch.lbt.scr3;
799 			break;
800 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
801 			*v = vcpu->arch.lbt.eflags;
802 			break;
803 		case KVM_REG_LOONGARCH_LBT_FTOP:
804 			*v = vcpu->arch.fpu.ftop;
805 			break;
806 		default:
807 			ret = -EINVAL;
808 			break;
809 		}
810 		break;
811 	case KVM_REG_LOONGARCH_KVM:
812 		switch (reg->id) {
813 		case KVM_REG_LOONGARCH_COUNTER:
814 			*v = drdtime() + vcpu->kvm->arch.time_offset;
815 			break;
816 		case KVM_REG_LOONGARCH_DEBUG_INST:
817 			*v = INSN_HVCL | KVM_HCALL_SWDBG;
818 			break;
819 		default:
820 			ret = -EINVAL;
821 			break;
822 		}
823 		break;
824 	default:
825 		ret = -EINVAL;
826 		break;
827 	}
828 
829 	return ret;
830 }
831 
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)832 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
833 {
834 	int ret = 0;
835 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
836 
837 	switch (size) {
838 	case KVM_REG_SIZE_U64:
839 		ret = kvm_get_one_reg(vcpu, reg, &v);
840 		if (ret)
841 			return ret;
842 		ret = put_user(v, (u64 __user *)(long)reg->addr);
843 		break;
844 	default:
845 		ret = -EINVAL;
846 		break;
847 	}
848 
849 	return ret;
850 }
851 
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)852 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
853 			const struct kvm_one_reg *reg, u64 v)
854 {
855 	int id, ret = 0;
856 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
857 
858 	switch (type) {
859 	case KVM_REG_LOONGARCH_CSR:
860 		id = KVM_GET_IOC_CSR_IDX(reg->id);
861 		ret = _kvm_setcsr(vcpu, id, v);
862 		break;
863 	case KVM_REG_LOONGARCH_CPUCFG:
864 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
865 		ret = kvm_check_cpucfg(id, v);
866 		if (ret)
867 			break;
868 		vcpu->arch.cpucfg[id] = (u32)v;
869 		if (id == LOONGARCH_CPUCFG6)
870 			vcpu->arch.max_pmu_csrid =
871 				LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
872 		break;
873 	case KVM_REG_LOONGARCH_LBT:
874 		if (!kvm_guest_has_lbt(&vcpu->arch))
875 			return -ENXIO;
876 
877 		switch (reg->id) {
878 		case KVM_REG_LOONGARCH_LBT_SCR0:
879 			vcpu->arch.lbt.scr0 = v;
880 			break;
881 		case KVM_REG_LOONGARCH_LBT_SCR1:
882 			vcpu->arch.lbt.scr1 = v;
883 			break;
884 		case KVM_REG_LOONGARCH_LBT_SCR2:
885 			vcpu->arch.lbt.scr2 = v;
886 			break;
887 		case KVM_REG_LOONGARCH_LBT_SCR3:
888 			vcpu->arch.lbt.scr3 = v;
889 			break;
890 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
891 			vcpu->arch.lbt.eflags = v;
892 			break;
893 		case KVM_REG_LOONGARCH_LBT_FTOP:
894 			vcpu->arch.fpu.ftop = v;
895 			break;
896 		default:
897 			ret = -EINVAL;
898 			break;
899 		}
900 		break;
901 	case KVM_REG_LOONGARCH_KVM:
902 		switch (reg->id) {
903 		case KVM_REG_LOONGARCH_COUNTER:
904 			/*
905 			 * gftoffset is relative with board, not vcpu
906 			 * only set for the first time for smp system
907 			 */
908 			if (vcpu->vcpu_id == 0)
909 				vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
910 			break;
911 		case KVM_REG_LOONGARCH_VCPU_RESET:
912 			vcpu->arch.st.guest_addr = 0;
913 			memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
914 			memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
915 
916 			/*
917 			 * When vCPU reset, clear the ESTAT and GINTC registers
918 			 * Other CSR registers are cleared with function _kvm_setcsr().
919 			 */
920 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
921 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
922 			break;
923 		default:
924 			ret = -EINVAL;
925 			break;
926 		}
927 		break;
928 	default:
929 		ret = -EINVAL;
930 		break;
931 	}
932 
933 	return ret;
934 }
935 
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)936 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
937 {
938 	int ret = 0;
939 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
940 
941 	switch (size) {
942 	case KVM_REG_SIZE_U64:
943 		ret = get_user(v, (u64 __user *)(long)reg->addr);
944 		if (ret)
945 			return ret;
946 		break;
947 	default:
948 		return -EINVAL;
949 	}
950 
951 	return kvm_set_one_reg(vcpu, reg, v);
952 }
953 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)954 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
955 {
956 	return -ENOIOCTLCMD;
957 }
958 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)959 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
960 {
961 	return -ENOIOCTLCMD;
962 }
963 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)964 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
965 {
966 	int i;
967 
968 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
969 		regs->gpr[i] = vcpu->arch.gprs[i];
970 
971 	regs->pc = vcpu->arch.pc;
972 
973 	return 0;
974 }
975 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)976 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
977 {
978 	int i;
979 
980 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
981 		vcpu->arch.gprs[i] = regs->gpr[i];
982 
983 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
984 	vcpu->arch.pc = regs->pc;
985 
986 	return 0;
987 }
988 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)989 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
990 				     struct kvm_enable_cap *cap)
991 {
992 	/* FPU is enabled by default, will support LSX/LASX later. */
993 	return -EINVAL;
994 }
995 
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)996 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
997 					 struct kvm_device_attr *attr)
998 {
999 	switch (attr->attr) {
1000 	case LOONGARCH_CPUCFG2:
1001 	case LOONGARCH_CPUCFG6:
1002 		return 0;
1003 	case CPUCFG_KVM_FEATURE:
1004 		return 0;
1005 	default:
1006 		return -ENXIO;
1007 	}
1008 
1009 	return -ENXIO;
1010 }
1011 
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1012 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
1013 					 struct kvm_device_attr *attr)
1014 {
1015 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1016 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1017 		return -ENXIO;
1018 
1019 	return 0;
1020 }
1021 
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1022 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1023 				       struct kvm_device_attr *attr)
1024 {
1025 	int ret = -ENXIO;
1026 
1027 	switch (attr->group) {
1028 	case KVM_LOONGARCH_VCPU_CPUCFG:
1029 		ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1030 		break;
1031 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1032 		ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1033 		break;
1034 	default:
1035 		break;
1036 	}
1037 
1038 	return ret;
1039 }
1040 
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1041 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1042 					 struct kvm_device_attr *attr)
1043 {
1044 	int ret = 0;
1045 	uint64_t val;
1046 	uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1047 
1048 	switch (attr->attr) {
1049 	case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1050 		ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1051 		if (ret)
1052 			return ret;
1053 		break;
1054 	case CPUCFG_KVM_FEATURE:
1055 		val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1056 		break;
1057 	default:
1058 		return -ENXIO;
1059 	}
1060 
1061 	put_user(val, uaddr);
1062 
1063 	return ret;
1064 }
1065 
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1066 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1067 					 struct kvm_device_attr *attr)
1068 {
1069 	u64 gpa;
1070 	u64 __user *user = (u64 __user *)attr->addr;
1071 
1072 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1073 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1074 		return -ENXIO;
1075 
1076 	gpa = vcpu->arch.st.guest_addr;
1077 	if (put_user(gpa, user))
1078 		return -EFAULT;
1079 
1080 	return 0;
1081 }
1082 
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1083 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1084 				       struct kvm_device_attr *attr)
1085 {
1086 	int ret = -ENXIO;
1087 
1088 	switch (attr->group) {
1089 	case KVM_LOONGARCH_VCPU_CPUCFG:
1090 		ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1091 		break;
1092 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1093 		ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1094 		break;
1095 	default:
1096 		break;
1097 	}
1098 
1099 	return ret;
1100 }
1101 
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1102 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1103 					 struct kvm_device_attr *attr)
1104 {
1105 	u64 val, valid;
1106 	u64 __user *user = (u64 __user *)attr->addr;
1107 	struct kvm *kvm = vcpu->kvm;
1108 
1109 	switch (attr->attr) {
1110 	case CPUCFG_KVM_FEATURE:
1111 		if (get_user(val, user))
1112 			return -EFAULT;
1113 
1114 		valid = LOONGARCH_PV_FEAT_MASK;
1115 		if (val & ~valid)
1116 			return -EINVAL;
1117 
1118 		/* All vCPUs need set the same PV features */
1119 		if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1120 				&& ((kvm->arch.pv_features & valid) != val))
1121 			return -EINVAL;
1122 		kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1123 		return 0;
1124 	default:
1125 		return -ENXIO;
1126 	}
1127 }
1128 
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1129 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1130 					 struct kvm_device_attr *attr)
1131 {
1132 	int idx, ret = 0;
1133 	u64 gpa, __user *user = (u64 __user *)attr->addr;
1134 	struct kvm *kvm = vcpu->kvm;
1135 
1136 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1137 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1138 		return -ENXIO;
1139 
1140 	if (get_user(gpa, user))
1141 		return -EFAULT;
1142 
1143 	if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1144 		return -EINVAL;
1145 
1146 	if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1147 		vcpu->arch.st.guest_addr = gpa;
1148 		return 0;
1149 	}
1150 
1151 	/* Check the address is in a valid memslot */
1152 	idx = srcu_read_lock(&kvm->srcu);
1153 	if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1154 		ret = -EINVAL;
1155 	srcu_read_unlock(&kvm->srcu, idx);
1156 
1157 	if (!ret) {
1158 		vcpu->arch.st.guest_addr = gpa;
1159 		vcpu->arch.st.last_steal = current->sched_info.run_delay;
1160 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1161 	}
1162 
1163 	return ret;
1164 }
1165 
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1166 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1167 				       struct kvm_device_attr *attr)
1168 {
1169 	int ret = -ENXIO;
1170 
1171 	switch (attr->group) {
1172 	case KVM_LOONGARCH_VCPU_CPUCFG:
1173 		ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1174 		break;
1175 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1176 		ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1177 		break;
1178 	default:
1179 		break;
1180 	}
1181 
1182 	return ret;
1183 }
1184 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1185 long kvm_arch_vcpu_ioctl(struct file *filp,
1186 			 unsigned int ioctl, unsigned long arg)
1187 {
1188 	long r;
1189 	struct kvm_device_attr attr;
1190 	void __user *argp = (void __user *)arg;
1191 	struct kvm_vcpu *vcpu = filp->private_data;
1192 
1193 	/*
1194 	 * Only software CSR should be modified
1195 	 *
1196 	 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1197 	 * should be used. Since CSR registers owns by this vcpu, if switch
1198 	 * to other vcpus, other vcpus need reload CSR registers.
1199 	 *
1200 	 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1201 	 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1202 	 * aux_inuse flag and reload CSR registers form software.
1203 	 */
1204 
1205 	switch (ioctl) {
1206 	case KVM_SET_ONE_REG:
1207 	case KVM_GET_ONE_REG: {
1208 		struct kvm_one_reg reg;
1209 
1210 		r = -EFAULT;
1211 		if (copy_from_user(&reg, argp, sizeof(reg)))
1212 			break;
1213 		if (ioctl == KVM_SET_ONE_REG) {
1214 			r = kvm_set_reg(vcpu, &reg);
1215 			vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1216 		} else
1217 			r = kvm_get_reg(vcpu, &reg);
1218 		break;
1219 	}
1220 	case KVM_ENABLE_CAP: {
1221 		struct kvm_enable_cap cap;
1222 
1223 		r = -EFAULT;
1224 		if (copy_from_user(&cap, argp, sizeof(cap)))
1225 			break;
1226 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1227 		break;
1228 	}
1229 	case KVM_HAS_DEVICE_ATTR: {
1230 		r = -EFAULT;
1231 		if (copy_from_user(&attr, argp, sizeof(attr)))
1232 			break;
1233 		r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1234 		break;
1235 	}
1236 	case KVM_GET_DEVICE_ATTR: {
1237 		r = -EFAULT;
1238 		if (copy_from_user(&attr, argp, sizeof(attr)))
1239 			break;
1240 		r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1241 		break;
1242 	}
1243 	case KVM_SET_DEVICE_ATTR: {
1244 		r = -EFAULT;
1245 		if (copy_from_user(&attr, argp, sizeof(attr)))
1246 			break;
1247 		r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1248 		break;
1249 	}
1250 	default:
1251 		r = -ENOIOCTLCMD;
1252 		break;
1253 	}
1254 
1255 	return r;
1256 }
1257 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1258 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1259 {
1260 	int i = 0;
1261 
1262 	fpu->fcc = vcpu->arch.fpu.fcc;
1263 	fpu->fcsr = vcpu->arch.fpu.fcsr;
1264 	for (i = 0; i < NUM_FPU_REGS; i++)
1265 		memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1266 
1267 	return 0;
1268 }
1269 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1270 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1271 {
1272 	int i = 0;
1273 
1274 	vcpu->arch.fpu.fcc = fpu->fcc;
1275 	vcpu->arch.fpu.fcsr = fpu->fcsr;
1276 	for (i = 0; i < NUM_FPU_REGS; i++)
1277 		memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1278 
1279 	return 0;
1280 }
1281 
1282 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1283 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1284 {
1285 	if (!kvm_guest_has_lbt(&vcpu->arch))
1286 		return -EINVAL;
1287 
1288 	preempt_disable();
1289 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1290 		set_csr_euen(CSR_EUEN_LBTEN);
1291 		_restore_lbt(&vcpu->arch.lbt);
1292 		vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1293 	}
1294 	preempt_enable();
1295 
1296 	return 0;
1297 }
1298 
kvm_lose_lbt(struct kvm_vcpu * vcpu)1299 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1300 {
1301 	preempt_disable();
1302 	if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1303 		_save_lbt(&vcpu->arch.lbt);
1304 		clear_csr_euen(CSR_EUEN_LBTEN);
1305 		vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1306 	}
1307 	preempt_enable();
1308 }
1309 
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1310 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1311 {
1312 	/*
1313 	 * If TM is enabled, top register save/restore will
1314 	 * cause lbt exception, here enable lbt in advance
1315 	 */
1316 	if (fcsr & FPU_CSR_TM)
1317 		kvm_own_lbt(vcpu);
1318 }
1319 
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1320 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1321 {
1322 	if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1323 		if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1324 			return;
1325 		kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1326 	}
1327 }
1328 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1329 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1330 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1331 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1332 #endif
1333 
1334 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1335 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1336 {
1337 	preempt_disable();
1338 
1339 	/*
1340 	 * Enable FPU for guest
1341 	 * Set FR and FRE according to guest context
1342 	 */
1343 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1344 	set_csr_euen(CSR_EUEN_FPEN);
1345 
1346 	kvm_restore_fpu(&vcpu->arch.fpu);
1347 	vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1348 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1349 
1350 	preempt_enable();
1351 }
1352 
1353 #ifdef CONFIG_CPU_HAS_LSX
1354 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1355 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1356 {
1357 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1358 		return -EINVAL;
1359 
1360 	preempt_disable();
1361 
1362 	/* Enable LSX for guest */
1363 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1364 	set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1365 	switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1366 	case KVM_LARCH_FPU:
1367 		/*
1368 		 * Guest FPU state already loaded,
1369 		 * only restore upper LSX state
1370 		 */
1371 		_restore_lsx_upper(&vcpu->arch.fpu);
1372 		break;
1373 	default:
1374 		/* Neither FP or LSX already active,
1375 		 * restore full LSX state
1376 		 */
1377 		kvm_restore_lsx(&vcpu->arch.fpu);
1378 		break;
1379 	}
1380 
1381 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1382 	vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1383 	preempt_enable();
1384 
1385 	return 0;
1386 }
1387 #endif
1388 
1389 #ifdef CONFIG_CPU_HAS_LASX
1390 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1391 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1392 {
1393 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1394 		return -EINVAL;
1395 
1396 	preempt_disable();
1397 
1398 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1399 	set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1400 	switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1401 	case KVM_LARCH_LSX:
1402 	case KVM_LARCH_LSX | KVM_LARCH_FPU:
1403 		/* Guest LSX state already loaded, only restore upper LASX state */
1404 		_restore_lasx_upper(&vcpu->arch.fpu);
1405 		break;
1406 	case KVM_LARCH_FPU:
1407 		/* Guest FP state already loaded, only restore upper LSX & LASX state */
1408 		_restore_lsx_upper(&vcpu->arch.fpu);
1409 		_restore_lasx_upper(&vcpu->arch.fpu);
1410 		break;
1411 	default:
1412 		/* Neither FP or LSX already active, restore full LASX state */
1413 		kvm_restore_lasx(&vcpu->arch.fpu);
1414 		break;
1415 	}
1416 
1417 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1418 	vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1419 	preempt_enable();
1420 
1421 	return 0;
1422 }
1423 #endif
1424 
1425 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1426 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1427 {
1428 	preempt_disable();
1429 
1430 	kvm_check_fcsr_alive(vcpu);
1431 	if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1432 		kvm_save_lasx(&vcpu->arch.fpu);
1433 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1434 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1435 
1436 		/* Disable LASX & LSX & FPU */
1437 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1438 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1439 		kvm_save_lsx(&vcpu->arch.fpu);
1440 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1441 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1442 
1443 		/* Disable LSX & FPU */
1444 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1445 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1446 		kvm_save_fpu(&vcpu->arch.fpu);
1447 		vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1448 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1449 
1450 		/* Disable FPU */
1451 		clear_csr_euen(CSR_EUEN_FPEN);
1452 	}
1453 	kvm_lose_lbt(vcpu);
1454 
1455 	preempt_enable();
1456 }
1457 
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1458 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1459 {
1460 	int intr = (int)irq->irq;
1461 
1462 	if (intr > 0)
1463 		kvm_queue_irq(vcpu, intr);
1464 	else if (intr < 0)
1465 		kvm_dequeue_irq(vcpu, -intr);
1466 	else {
1467 		kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1468 		return -EINVAL;
1469 	}
1470 
1471 	kvm_vcpu_kick(vcpu);
1472 
1473 	return 0;
1474 }
1475 
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1476 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1477 			       unsigned int ioctl, unsigned long arg)
1478 {
1479 	void __user *argp = (void __user *)arg;
1480 	struct kvm_vcpu *vcpu = filp->private_data;
1481 
1482 	if (ioctl == KVM_INTERRUPT) {
1483 		struct kvm_interrupt irq;
1484 
1485 		if (copy_from_user(&irq, argp, sizeof(irq)))
1486 			return -EFAULT;
1487 
1488 		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1489 
1490 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1491 	}
1492 
1493 	return -ENOIOCTLCMD;
1494 }
1495 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1496 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1497 {
1498 	return 0;
1499 }
1500 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1501 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1502 {
1503 	unsigned long timer_hz;
1504 	struct loongarch_csrs *csr;
1505 
1506 	vcpu->arch.vpid = 0;
1507 	vcpu->arch.flush_gpa = INVALID_GPA;
1508 
1509 	hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1510 		      HRTIMER_MODE_ABS_PINNED_HARD);
1511 
1512 	/* Get GPA (=HVA) of PGD for kvm hypervisor */
1513 	vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1514 
1515 	/*
1516 	 * Get PGD for primary mmu, virtual address is used since there is
1517 	 * memory access after loading from CSR_PGD in tlb exception fast path.
1518 	 */
1519 	vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1520 
1521 	vcpu->arch.handle_exit = kvm_handle_exit;
1522 	vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1523 	vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1524 	if (!vcpu->arch.csr)
1525 		return -ENOMEM;
1526 
1527 	/*
1528 	 * All kvm exceptions share one exception entry, and host <-> guest
1529 	 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1530 	 */
1531 	vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1532 
1533 	/* Init */
1534 	vcpu->arch.last_sched_cpu = -1;
1535 
1536 	/* Init ipi_state lock */
1537 	spin_lock_init(&vcpu->arch.ipi_state.lock);
1538 
1539 	/*
1540 	 * Initialize guest register state to valid architectural reset state.
1541 	 */
1542 	timer_hz = calc_const_freq();
1543 	kvm_init_timer(vcpu, timer_hz);
1544 
1545 	/* Set Initialize mode for guest */
1546 	csr = vcpu->arch.csr;
1547 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1548 
1549 	/* Set cpuid */
1550 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1551 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1552 
1553 	/* Start with no pending virtual guest interrupts */
1554 	csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1555 
1556 	return 0;
1557 }
1558 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1559 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1560 {
1561 }
1562 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1563 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1564 {
1565 	int cpu;
1566 	struct kvm_context *context;
1567 
1568 	hrtimer_cancel(&vcpu->arch.swtimer);
1569 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1570 	kvm_drop_cpuid(vcpu);
1571 	kfree(vcpu->arch.csr);
1572 
1573 	/*
1574 	 * If the vCPU is freed and reused as another vCPU, we don't want the
1575 	 * matching pointer wrongly hanging around in last_vcpu.
1576 	 */
1577 	for_each_possible_cpu(cpu) {
1578 		context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1579 		if (context->last_vcpu == vcpu)
1580 			context->last_vcpu = NULL;
1581 	}
1582 }
1583 
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1584 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1585 {
1586 	bool migrated;
1587 	struct kvm_context *context;
1588 	struct loongarch_csrs *csr = vcpu->arch.csr;
1589 
1590 	/*
1591 	 * Have we migrated to a different CPU?
1592 	 * If so, any old guest TLB state may be stale.
1593 	 */
1594 	migrated = (vcpu->arch.last_sched_cpu != cpu);
1595 
1596 	/*
1597 	 * Was this the last vCPU to run on this CPU?
1598 	 * If not, any old guest state from this vCPU will have been clobbered.
1599 	 */
1600 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1601 	if (migrated || (context->last_vcpu != vcpu))
1602 		vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1603 	context->last_vcpu = vcpu;
1604 
1605 	/* Restore timer state regardless */
1606 	kvm_restore_timer(vcpu);
1607 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1608 
1609 	/* Don't bother restoring registers multiple times unless necessary */
1610 	if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1611 		return 0;
1612 
1613 	write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1614 
1615 	/* Restore guest CSR registers */
1616 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1617 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1618 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1619 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1620 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1621 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1622 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1623 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1624 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1625 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1626 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1627 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1628 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1629 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1630 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1631 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1632 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1633 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1634 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1635 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1636 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1637 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1638 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1639 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1640 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1641 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1642 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1643 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1644 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1645 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1646 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1647 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1648 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1649 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1650 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1651 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1652 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1653 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1654 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1655 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1656 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1657 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1658 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1659 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1660 
1661 	/* Restore Root.GINTC from unused Guest.GINTC register */
1662 	write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1663 
1664 	/*
1665 	 * We should clear linked load bit to break interrupted atomics. This
1666 	 * prevents a SC on the next vCPU from succeeding by matching a LL on
1667 	 * the previous vCPU.
1668 	 */
1669 	if (vcpu->kvm->created_vcpus > 1)
1670 		set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1671 
1672 	vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1673 
1674 	return 0;
1675 }
1676 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1677 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1678 {
1679 	unsigned long flags;
1680 
1681 	local_irq_save(flags);
1682 	/* Restore guest state to registers */
1683 	_kvm_vcpu_load(vcpu, cpu);
1684 	local_irq_restore(flags);
1685 }
1686 
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1687 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1688 {
1689 	struct loongarch_csrs *csr = vcpu->arch.csr;
1690 
1691 	kvm_lose_fpu(vcpu);
1692 
1693 	/*
1694 	 * Update CSR state from hardware if software CSR state is stale,
1695 	 * most CSR registers are kept unchanged during process context
1696 	 * switch except CSR registers like remaining timer tick value and
1697 	 * injected interrupt state.
1698 	 */
1699 	if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1700 		goto out;
1701 
1702 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1703 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1704 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1705 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1706 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1707 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1708 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1709 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1710 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1711 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1712 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1713 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1714 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1715 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1716 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1717 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1718 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1719 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1720 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1721 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1722 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1723 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1724 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1725 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1726 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1727 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1728 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1729 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1730 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1731 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1732 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1733 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1734 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1735 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1736 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1737 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1738 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1739 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1740 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1741 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1742 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1743 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1744 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1745 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1746 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1747 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1748 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1749 
1750 	vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1751 
1752 out:
1753 	kvm_save_timer(vcpu);
1754 	/* Save Root.GINTC into unused Guest.GINTC register */
1755 	csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1756 
1757 	return 0;
1758 }
1759 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1760 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1761 {
1762 	int cpu;
1763 	unsigned long flags;
1764 
1765 	local_irq_save(flags);
1766 	cpu = smp_processor_id();
1767 	vcpu->arch.last_sched_cpu = cpu;
1768 
1769 	/* Save guest state in registers */
1770 	_kvm_vcpu_put(vcpu, cpu);
1771 	local_irq_restore(flags);
1772 }
1773 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1774 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1775 {
1776 	int r = -EINTR;
1777 	struct kvm_run *run = vcpu->run;
1778 
1779 	if (vcpu->mmio_needed) {
1780 		if (!vcpu->mmio_is_write)
1781 			kvm_complete_mmio_read(vcpu, run);
1782 		vcpu->mmio_needed = 0;
1783 	}
1784 
1785 	switch (run->exit_reason) {
1786 	case KVM_EXIT_HYPERCALL:
1787 		kvm_complete_user_service(vcpu, run);
1788 		break;
1789 	case KVM_EXIT_LOONGARCH_IOCSR:
1790 		if (!run->iocsr_io.is_write)
1791 			kvm_complete_iocsr_read(vcpu, run);
1792 		break;
1793 	}
1794 
1795 	if (!vcpu->wants_to_run)
1796 		return r;
1797 
1798 	/* Clear exit_reason */
1799 	run->exit_reason = KVM_EXIT_UNKNOWN;
1800 	lose_fpu(1);
1801 	vcpu_load(vcpu);
1802 	kvm_sigset_activate(vcpu);
1803 	r = kvm_pre_enter_guest(vcpu);
1804 	if (r != RESUME_GUEST)
1805 		goto out;
1806 
1807 	guest_timing_enter_irqoff();
1808 	guest_state_enter_irqoff();
1809 	trace_kvm_enter(vcpu);
1810 	r = kvm_loongarch_ops->enter_guest(run, vcpu);
1811 
1812 	trace_kvm_out(vcpu);
1813 	/*
1814 	 * Guest exit is already recorded at kvm_handle_exit()
1815 	 * return value must not be RESUME_GUEST
1816 	 */
1817 	local_irq_enable();
1818 out:
1819 	kvm_sigset_deactivate(vcpu);
1820 	vcpu_put(vcpu);
1821 
1822 	return r;
1823 }
1824