xref: /linux/arch/loongarch/kvm/vcpu.c (revision 9551a26f17d9445eed497bd7c639d48dfc3c0af4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <asm/fpu.h>
8 #include <asm/lbt.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12 #include <asm/timex.h>
13 
14 #define CREATE_TRACE_POINTS
15 #include "trace.h"
16 
17 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
18 	KVM_GENERIC_VCPU_STATS(),
19 	STATS_DESC_COUNTER(VCPU, int_exits),
20 	STATS_DESC_COUNTER(VCPU, idle_exits),
21 	STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 	STATS_DESC_COUNTER(VCPU, signal_exits),
23 	STATS_DESC_COUNTER(VCPU, hypercall_exits),
24 	STATS_DESC_COUNTER(VCPU, ipi_read_exits),
25 	STATS_DESC_COUNTER(VCPU, ipi_write_exits),
26 	STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
27 	STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
28 	STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
29 	STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
30 };
31 
32 const struct kvm_stats_header kvm_vcpu_stats_header = {
33 	.name_size = KVM_STATS_NAME_SIZE,
34 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
35 	.id_offset = sizeof(struct kvm_stats_header),
36 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
37 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
38 		       sizeof(kvm_vcpu_stats_desc),
39 };
40 
kvm_save_host_pmu(struct kvm_vcpu * vcpu)41 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
42 {
43 	struct kvm_context *context;
44 
45 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
46 	context->perf_cntr[0] = read_csr_perfcntr0();
47 	context->perf_cntr[1] = read_csr_perfcntr1();
48 	context->perf_cntr[2] = read_csr_perfcntr2();
49 	context->perf_cntr[3] = read_csr_perfcntr3();
50 	context->perf_ctrl[0] = write_csr_perfctrl0(0);
51 	context->perf_ctrl[1] = write_csr_perfctrl1(0);
52 	context->perf_ctrl[2] = write_csr_perfctrl2(0);
53 	context->perf_ctrl[3] = write_csr_perfctrl3(0);
54 }
55 
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)56 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
57 {
58 	struct kvm_context *context;
59 
60 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
61 	write_csr_perfcntr0(context->perf_cntr[0]);
62 	write_csr_perfcntr1(context->perf_cntr[1]);
63 	write_csr_perfcntr2(context->perf_cntr[2]);
64 	write_csr_perfcntr3(context->perf_cntr[3]);
65 	write_csr_perfctrl0(context->perf_ctrl[0]);
66 	write_csr_perfctrl1(context->perf_ctrl[1]);
67 	write_csr_perfctrl2(context->perf_ctrl[2]);
68 	write_csr_perfctrl3(context->perf_ctrl[3]);
69 }
70 
71 
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)72 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
73 {
74 	struct loongarch_csrs *csr = vcpu->arch.csr;
75 
76 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
77 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
78 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
79 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
80 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
81 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
82 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
83 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
84 }
85 
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)86 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
87 {
88 	struct loongarch_csrs *csr = vcpu->arch.csr;
89 
90 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
91 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
92 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
93 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
94 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
95 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
96 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
97 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
98 }
99 
kvm_own_pmu(struct kvm_vcpu * vcpu)100 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
101 {
102 	unsigned long val;
103 
104 	if (!kvm_guest_has_pmu(&vcpu->arch))
105 		return -EINVAL;
106 
107 	kvm_save_host_pmu(vcpu);
108 
109 	/* Set PM0-PM(num) to guest */
110 	val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
111 	val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
112 	write_csr_gcfg(val);
113 
114 	kvm_restore_guest_pmu(vcpu);
115 
116 	return 0;
117 }
118 
kvm_lose_pmu(struct kvm_vcpu * vcpu)119 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
120 {
121 	unsigned long val;
122 	struct loongarch_csrs *csr = vcpu->arch.csr;
123 
124 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
125 		return;
126 
127 	kvm_save_guest_pmu(vcpu);
128 
129 	/* Disable pmu access from guest */
130 	write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
131 
132 	/*
133 	 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
134 	 * exiting the guest, so that the next time trap into the guest.
135 	 * We don't need to deal with PMU CSRs contexts.
136 	 *
137 	 * Otherwise set the request bit KVM_REQ_PMU to restore guest PMU
138 	 * before entering guest VM
139 	 */
140 	val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
141 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
142 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
143 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
144 	if (!(val & KVM_PMU_EVENT_ENABLED))
145 		vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
146 	else
147 		kvm_make_request(KVM_REQ_PMU, vcpu);
148 
149 	kvm_restore_host_pmu(vcpu);
150 }
151 
kvm_check_pmu(struct kvm_vcpu * vcpu)152 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
153 {
154 	if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
155 		kvm_own_pmu(vcpu);
156 		vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
157 	}
158 }
159 
kvm_update_stolen_time(struct kvm_vcpu * vcpu)160 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
161 {
162 	u32 version;
163 	u64 steal;
164 	gpa_t gpa;
165 	struct kvm_memslots *slots;
166 	struct kvm_steal_time __user *st;
167 	struct gfn_to_hva_cache *ghc;
168 
169 	ghc = &vcpu->arch.st.cache;
170 	gpa = vcpu->arch.st.guest_addr;
171 	if (!(gpa & KVM_STEAL_PHYS_VALID))
172 		return;
173 
174 	gpa &= KVM_STEAL_PHYS_MASK;
175 	slots = kvm_memslots(vcpu->kvm);
176 	if (slots->generation != ghc->generation || gpa != ghc->gpa) {
177 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
178 			ghc->gpa = INVALID_GPA;
179 			return;
180 		}
181 	}
182 
183 	st = (struct kvm_steal_time __user *)ghc->hva;
184 	unsafe_get_user(version, &st->version, out);
185 	if (version & 1)
186 		version += 1; /* first time write, random junk */
187 
188 	version += 1;
189 	unsafe_put_user(version, &st->version, out);
190 	smp_wmb();
191 
192 	unsafe_get_user(steal, &st->steal, out);
193 	steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
194 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
195 	unsafe_put_user(steal, &st->steal, out);
196 
197 	smp_wmb();
198 	version += 1;
199 	unsafe_put_user(version, &st->version, out);
200 out:
201 	mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
202 }
203 
204 /*
205  * kvm_check_requests - check and handle pending vCPU requests
206  *
207  * Return: RESUME_GUEST if we should enter the guest
208  *         RESUME_HOST  if we should exit to userspace
209  */
kvm_check_requests(struct kvm_vcpu * vcpu)210 static int kvm_check_requests(struct kvm_vcpu *vcpu)
211 {
212 	if (!kvm_request_pending(vcpu))
213 		return RESUME_GUEST;
214 
215 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
216 		vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
217 
218 	if (kvm_dirty_ring_check_request(vcpu))
219 		return RESUME_HOST;
220 
221 	if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
222 		kvm_update_stolen_time(vcpu);
223 
224 	return RESUME_GUEST;
225 }
226 
kvm_late_check_requests(struct kvm_vcpu * vcpu)227 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
228 {
229 	lockdep_assert_irqs_disabled();
230 	if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
231 		if (vcpu->arch.flush_gpa != INVALID_GPA) {
232 			kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
233 			vcpu->arch.flush_gpa = INVALID_GPA;
234 		}
235 }
236 
237 /*
238  * Check and handle pending signal and vCPU requests etc
239  * Run with irq enabled and preempt enabled
240  *
241  * Return: RESUME_GUEST if we should enter the guest
242  *         RESUME_HOST  if we should exit to userspace
243  *         < 0 if we should exit to userspace, where the return value
244  *         indicates an error
245  */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)246 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
247 {
248 	int idx, ret;
249 
250 	/*
251 	 * Check conditions before entering the guest
252 	 */
253 	ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
254 	if (ret < 0)
255 		return ret;
256 
257 	idx = srcu_read_lock(&vcpu->kvm->srcu);
258 	ret = kvm_check_requests(vcpu);
259 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
260 
261 	return ret;
262 }
263 
264 /*
265  * Called with irq enabled
266  *
267  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
268  *         Others if we should exit to userspace
269  */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)270 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
271 {
272 	int ret;
273 
274 	do {
275 		ret = kvm_enter_guest_check(vcpu);
276 		if (ret != RESUME_GUEST)
277 			break;
278 
279 		/*
280 		 * Handle vcpu timer, interrupts, check requests and
281 		 * check vmid before vcpu enter guest
282 		 */
283 		local_irq_disable();
284 		kvm_deliver_intr(vcpu);
285 		kvm_deliver_exception(vcpu);
286 		/* Make sure the vcpu mode has been written */
287 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
288 		kvm_check_vpid(vcpu);
289 		kvm_check_pmu(vcpu);
290 
291 		/*
292 		 * Called after function kvm_check_vpid()
293 		 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
294 		 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
295 		 */
296 		kvm_late_check_requests(vcpu);
297 		vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
298 		/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
299 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
300 
301 		if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
302 			if (vcpu->arch.aux_inuse & KVM_LARCH_PMU) {
303 				kvm_lose_pmu(vcpu);
304 				kvm_make_request(KVM_REQ_PMU, vcpu);
305 			}
306 			/* make sure the vcpu mode has been written */
307 			smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
308 			local_irq_enable();
309 			ret = -EAGAIN;
310 		}
311 	} while (ret != RESUME_GUEST);
312 
313 	return ret;
314 }
315 
316 /*
317  * Return 1 for resume guest and "<= 0" for resume host.
318  */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)319 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
320 {
321 	int ret = RESUME_GUEST;
322 	unsigned long estat = vcpu->arch.host_estat;
323 	u32 intr = estat & CSR_ESTAT_IS;
324 	u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
325 
326 	vcpu->mode = OUTSIDE_GUEST_MODE;
327 
328 	/* Set a default exit reason */
329 	run->exit_reason = KVM_EXIT_UNKNOWN;
330 
331 	kvm_lose_pmu(vcpu);
332 
333 	guest_timing_exit_irqoff();
334 	guest_state_exit_irqoff();
335 	local_irq_enable();
336 
337 	trace_kvm_exit(vcpu, ecode);
338 	if (ecode) {
339 		ret = kvm_handle_fault(vcpu, ecode);
340 	} else {
341 		WARN(!intr, "vm exiting with suspicious irq\n");
342 		++vcpu->stat.int_exits;
343 	}
344 
345 	if (ret == RESUME_GUEST)
346 		ret = kvm_pre_enter_guest(vcpu);
347 
348 	if (ret != RESUME_GUEST) {
349 		local_irq_disable();
350 		return ret;
351 	}
352 
353 	guest_timing_enter_irqoff();
354 	guest_state_enter_irqoff();
355 	trace_kvm_reenter(vcpu);
356 
357 	return RESUME_GUEST;
358 }
359 
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)360 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
361 {
362 	return !!(vcpu->arch.irq_pending) &&
363 		vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
364 }
365 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)366 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
367 {
368 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
369 }
370 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)371 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
372 {
373 	unsigned long val;
374 
375 	preempt_disable();
376 	val = gcsr_read(LOONGARCH_CSR_CRMD);
377 	preempt_enable();
378 
379 	return (val & CSR_PRMD_PPLV) == PLV_KERN;
380 }
381 
382 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)383 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
384 {
385 	return vcpu->arch.pc;
386 }
387 
388 /*
389  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
390  * arrived in guest context.  For LoongArch64, if PMU is not passthrough to VM,
391  * any event that arrives while a vCPU is loaded is considered to be "in guest".
392  */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)393 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
394 {
395 	return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
396 }
397 #endif
398 
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)399 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
400 {
401 	return false;
402 }
403 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)404 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
405 {
406 	return VM_FAULT_SIGBUS;
407 }
408 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)409 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
410 				  struct kvm_translation *tr)
411 {
412 	return -EINVAL;
413 }
414 
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)415 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
416 {
417 	int ret;
418 
419 	/* Protect from TOD sync and vcpu_load/put() */
420 	preempt_disable();
421 	ret = kvm_pending_timer(vcpu) ||
422 		kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
423 	preempt_enable();
424 
425 	return ret;
426 }
427 
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)428 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
429 {
430 	int i;
431 
432 	kvm_debug("vCPU Register Dump:\n");
433 	kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
434 	kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
435 
436 	for (i = 0; i < 32; i += 4) {
437 		kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
438 		       vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
439 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
440 	}
441 
442 	kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
443 		  kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
444 		  kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
445 
446 	kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
447 
448 	return 0;
449 }
450 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)451 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
452 				struct kvm_mp_state *mp_state)
453 {
454 	*mp_state = vcpu->arch.mp_state;
455 
456 	return 0;
457 }
458 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)459 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
460 				struct kvm_mp_state *mp_state)
461 {
462 	int ret = 0;
463 
464 	switch (mp_state->mp_state) {
465 	case KVM_MP_STATE_RUNNABLE:
466 		vcpu->arch.mp_state = *mp_state;
467 		break;
468 	default:
469 		ret = -EINVAL;
470 	}
471 
472 	return ret;
473 }
474 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)475 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
476 					struct kvm_guest_debug *dbg)
477 {
478 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
479 		return -EINVAL;
480 
481 	if (dbg->control & KVM_GUESTDBG_ENABLE)
482 		vcpu->guest_debug = dbg->control;
483 	else
484 		vcpu->guest_debug = 0;
485 
486 	return 0;
487 }
488 
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)489 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
490 {
491 	int cpuid;
492 	struct kvm_phyid_map *map;
493 	struct loongarch_csrs *csr = vcpu->arch.csr;
494 
495 	if (val >= KVM_MAX_PHYID)
496 		return -EINVAL;
497 
498 	map = vcpu->kvm->arch.phyid_map;
499 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
500 
501 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
502 	if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
503 		/* Discard duplicated CPUID set operation */
504 		if (cpuid == val) {
505 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
506 			return 0;
507 		}
508 
509 		/*
510 		 * CPUID is already set before
511 		 * Forbid changing to a different CPUID at runtime
512 		 */
513 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
514 		return -EINVAL;
515 	}
516 
517 	if (map->phys_map[val].enabled) {
518 		/* Discard duplicated CPUID set operation */
519 		if (vcpu == map->phys_map[val].vcpu) {
520 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
521 			return 0;
522 		}
523 
524 		/*
525 		 * New CPUID is already set with other vcpu
526 		 * Forbid sharing the same CPUID between different vcpus
527 		 */
528 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
529 		return -EINVAL;
530 	}
531 
532 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
533 	map->phys_map[val].enabled	= true;
534 	map->phys_map[val].vcpu		= vcpu;
535 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
536 
537 	return 0;
538 }
539 
kvm_drop_cpuid(struct kvm_vcpu * vcpu)540 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
541 {
542 	int cpuid;
543 	struct kvm_phyid_map *map;
544 	struct loongarch_csrs *csr = vcpu->arch.csr;
545 
546 	map = vcpu->kvm->arch.phyid_map;
547 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
548 
549 	if (cpuid >= KVM_MAX_PHYID)
550 		return;
551 
552 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
553 	if (map->phys_map[cpuid].enabled) {
554 		map->phys_map[cpuid].vcpu = NULL;
555 		map->phys_map[cpuid].enabled = false;
556 		kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
557 	}
558 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
559 }
560 
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)561 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
562 {
563 	struct kvm_phyid_map *map;
564 
565 	if (cpuid >= KVM_MAX_PHYID)
566 		return NULL;
567 
568 	map = kvm->arch.phyid_map;
569 	if (!map->phys_map[cpuid].enabled)
570 		return NULL;
571 
572 	return map->phys_map[cpuid].vcpu;
573 }
574 
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)575 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
576 {
577 	unsigned long gintc;
578 	struct loongarch_csrs *csr = vcpu->arch.csr;
579 
580 	if (get_gcsr_flag(id) & INVALID_GCSR)
581 		return -EINVAL;
582 
583 	if (id == LOONGARCH_CSR_ESTAT) {
584 		preempt_disable();
585 		vcpu_load(vcpu);
586 		/*
587 		 * Sync pending interrupts into ESTAT so that interrupt
588 		 * remains during VM migration stage
589 		 */
590 		kvm_deliver_intr(vcpu);
591 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
592 		vcpu_put(vcpu);
593 		preempt_enable();
594 
595 		/* ESTAT IP0~IP7 get from GINTC */
596 		gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
597 		*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
598 		return 0;
599 	}
600 
601 	/*
602 	 * Get software CSR state since software state is consistent
603 	 * with hardware for synchronous ioctl
604 	 */
605 	*val = kvm_read_sw_gcsr(csr, id);
606 
607 	return 0;
608 }
609 
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)610 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
611 {
612 	int ret = 0, gintc;
613 	struct loongarch_csrs *csr = vcpu->arch.csr;
614 
615 	if (get_gcsr_flag(id) & INVALID_GCSR)
616 		return -EINVAL;
617 
618 	if (id == LOONGARCH_CSR_CPUID)
619 		return kvm_set_cpuid(vcpu, val);
620 
621 	if (id == LOONGARCH_CSR_ESTAT) {
622 		/* ESTAT IP0~IP7 inject through GINTC */
623 		gintc = (val >> 2) & 0xff;
624 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
625 
626 		gintc = val & ~(0xffUL << 2);
627 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
628 
629 		return ret;
630 	}
631 
632 	kvm_write_sw_gcsr(csr, id, val);
633 
634 	/*
635 	 * After modifying the PMU CSR register value of the vcpu.
636 	 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
637 	 */
638 	if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
639 		unsigned long val;
640 
641 		val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
642 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
643 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
644 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
645 
646 		if (val & KVM_PMU_EVENT_ENABLED)
647 			kvm_make_request(KVM_REQ_PMU, vcpu);
648 	}
649 
650 	return ret;
651 }
652 
_kvm_get_cpucfg_mask(int id,u64 * v)653 static int _kvm_get_cpucfg_mask(int id, u64 *v)
654 {
655 	if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
656 		return -EINVAL;
657 
658 	switch (id) {
659 	case LOONGARCH_CPUCFG0:
660 		*v = GENMASK(31, 0);
661 		return 0;
662 	case LOONGARCH_CPUCFG1:
663 		*v = GENMASK(26, 0);
664 		return 0;
665 	case LOONGARCH_CPUCFG2:
666 		/* CPUCFG2 features unconditionally supported by KVM */
667 		*v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
668 		     CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
669 		     CPUCFG2_LSPW | CPUCFG2_LAM;
670 		/*
671 		 * For the ISA extensions listed below, if one is supported
672 		 * by the host, then it is also supported by KVM.
673 		 */
674 		if (cpu_has_lsx)
675 			*v |= CPUCFG2_LSX;
676 		if (cpu_has_lasx)
677 			*v |= CPUCFG2_LASX;
678 		if (cpu_has_lbt_x86)
679 			*v |= CPUCFG2_X86BT;
680 		if (cpu_has_lbt_arm)
681 			*v |= CPUCFG2_ARMBT;
682 		if (cpu_has_lbt_mips)
683 			*v |= CPUCFG2_MIPSBT;
684 		if (cpu_has_ptw)
685 			*v |= CPUCFG2_PTW;
686 
687 		return 0;
688 	case LOONGARCH_CPUCFG3:
689 		*v = GENMASK(16, 0);
690 		return 0;
691 	case LOONGARCH_CPUCFG4:
692 	case LOONGARCH_CPUCFG5:
693 		*v = GENMASK(31, 0);
694 		return 0;
695 	case LOONGARCH_CPUCFG6:
696 		if (cpu_has_pmp)
697 			*v = GENMASK(14, 0);
698 		else
699 			*v = 0;
700 		return 0;
701 	case LOONGARCH_CPUCFG16:
702 		*v = GENMASK(16, 0);
703 		return 0;
704 	case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
705 		*v = GENMASK(30, 0);
706 		return 0;
707 	default:
708 		/*
709 		 * CPUCFG bits should be zero if reserved by HW or not
710 		 * supported by KVM.
711 		 */
712 		*v = 0;
713 		return 0;
714 	}
715 }
716 
kvm_check_cpucfg(int id,u64 val)717 static int kvm_check_cpucfg(int id, u64 val)
718 {
719 	int ret;
720 	u64 mask = 0;
721 
722 	ret = _kvm_get_cpucfg_mask(id, &mask);
723 	if (ret)
724 		return ret;
725 
726 	if (val & ~mask)
727 		/* Unsupported features and/or the higher 32 bits should not be set */
728 		return -EINVAL;
729 
730 	switch (id) {
731 	case LOONGARCH_CPUCFG1:
732 		if ((val & CPUCFG1_MSGINT) && !cpu_has_msgint)
733 			return -EINVAL;
734 		return 0;
735 	case LOONGARCH_CPUCFG2:
736 		if (!(val & CPUCFG2_LLFTP))
737 			/* Guests must have a constant timer */
738 			return -EINVAL;
739 		if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
740 			/* Single and double float point must both be set when FP is enabled */
741 			return -EINVAL;
742 		if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
743 			/* LSX architecturally implies FP but val does not satisfy that */
744 			return -EINVAL;
745 		if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
746 			/* LASX architecturally implies LSX and FP but val does not satisfy that */
747 			return -EINVAL;
748 		return 0;
749 	case LOONGARCH_CPUCFG6:
750 		if (val & CPUCFG6_PMP) {
751 			u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
752 			if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
753 				return -EINVAL;
754 			if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
755 				return -EINVAL;
756 			if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
757 				return -EINVAL;
758 		}
759 		return 0;
760 	default:
761 		/*
762 		 * Values for the other CPUCFG IDs are not being further validated
763 		 * besides the mask check above.
764 		 */
765 		return 0;
766 	}
767 }
768 
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)769 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
770 		const struct kvm_one_reg *reg, u64 *v)
771 {
772 	int id, ret = 0;
773 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
774 
775 	switch (type) {
776 	case KVM_REG_LOONGARCH_CSR:
777 		id = KVM_GET_IOC_CSR_IDX(reg->id);
778 		ret = _kvm_getcsr(vcpu, id, v);
779 		break;
780 	case KVM_REG_LOONGARCH_CPUCFG:
781 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
782 		if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
783 			*v = vcpu->arch.cpucfg[id];
784 		else
785 			ret = -EINVAL;
786 		break;
787 	case KVM_REG_LOONGARCH_LBT:
788 		if (!kvm_guest_has_lbt(&vcpu->arch))
789 			return -ENXIO;
790 
791 		switch (reg->id) {
792 		case KVM_REG_LOONGARCH_LBT_SCR0:
793 			*v = vcpu->arch.lbt.scr0;
794 			break;
795 		case KVM_REG_LOONGARCH_LBT_SCR1:
796 			*v = vcpu->arch.lbt.scr1;
797 			break;
798 		case KVM_REG_LOONGARCH_LBT_SCR2:
799 			*v = vcpu->arch.lbt.scr2;
800 			break;
801 		case KVM_REG_LOONGARCH_LBT_SCR3:
802 			*v = vcpu->arch.lbt.scr3;
803 			break;
804 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
805 			*v = vcpu->arch.lbt.eflags;
806 			break;
807 		case KVM_REG_LOONGARCH_LBT_FTOP:
808 			*v = vcpu->arch.fpu.ftop;
809 			break;
810 		default:
811 			ret = -EINVAL;
812 			break;
813 		}
814 		break;
815 	case KVM_REG_LOONGARCH_KVM:
816 		switch (reg->id) {
817 		case KVM_REG_LOONGARCH_COUNTER:
818 			*v = get_cycles() + vcpu->kvm->arch.time_offset;
819 			break;
820 		case KVM_REG_LOONGARCH_DEBUG_INST:
821 			*v = INSN_HVCL | KVM_HCALL_SWDBG;
822 			break;
823 		default:
824 			ret = -EINVAL;
825 			break;
826 		}
827 		break;
828 	default:
829 		ret = -EINVAL;
830 		break;
831 	}
832 
833 	return ret;
834 }
835 
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)836 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
837 {
838 	int ret = 0;
839 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
840 
841 	switch (size) {
842 	case KVM_REG_SIZE_U64:
843 		ret = kvm_get_one_reg(vcpu, reg, &v);
844 		if (ret)
845 			return ret;
846 		ret = put_user(v, (u64 __user *)(long)reg->addr);
847 		break;
848 	default:
849 		ret = -EINVAL;
850 		break;
851 	}
852 
853 	return ret;
854 }
855 
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)856 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
857 			const struct kvm_one_reg *reg, u64 v)
858 {
859 	int id, ret = 0;
860 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
861 
862 	switch (type) {
863 	case KVM_REG_LOONGARCH_CSR:
864 		id = KVM_GET_IOC_CSR_IDX(reg->id);
865 		ret = _kvm_setcsr(vcpu, id, v);
866 		break;
867 	case KVM_REG_LOONGARCH_CPUCFG:
868 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
869 		ret = kvm_check_cpucfg(id, v);
870 		if (ret)
871 			break;
872 		vcpu->arch.cpucfg[id] = (u32)v;
873 		if (id == LOONGARCH_CPUCFG6)
874 			vcpu->arch.max_pmu_csrid =
875 				LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
876 		break;
877 	case KVM_REG_LOONGARCH_LBT:
878 		if (!kvm_guest_has_lbt(&vcpu->arch))
879 			return -ENXIO;
880 
881 		switch (reg->id) {
882 		case KVM_REG_LOONGARCH_LBT_SCR0:
883 			vcpu->arch.lbt.scr0 = v;
884 			break;
885 		case KVM_REG_LOONGARCH_LBT_SCR1:
886 			vcpu->arch.lbt.scr1 = v;
887 			break;
888 		case KVM_REG_LOONGARCH_LBT_SCR2:
889 			vcpu->arch.lbt.scr2 = v;
890 			break;
891 		case KVM_REG_LOONGARCH_LBT_SCR3:
892 			vcpu->arch.lbt.scr3 = v;
893 			break;
894 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
895 			vcpu->arch.lbt.eflags = v;
896 			break;
897 		case KVM_REG_LOONGARCH_LBT_FTOP:
898 			vcpu->arch.fpu.ftop = v;
899 			break;
900 		default:
901 			ret = -EINVAL;
902 			break;
903 		}
904 		break;
905 	case KVM_REG_LOONGARCH_KVM:
906 		switch (reg->id) {
907 		case KVM_REG_LOONGARCH_COUNTER:
908 			/*
909 			 * gftoffset is relative with board, not vcpu
910 			 * only set for the first time for smp system
911 			 */
912 			if (vcpu->vcpu_id == 0)
913 				vcpu->kvm->arch.time_offset = (signed long)(v - get_cycles());
914 			break;
915 		case KVM_REG_LOONGARCH_VCPU_RESET:
916 			vcpu->arch.st.guest_addr = 0;
917 			memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
918 			memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
919 
920 			/*
921 			 * When vCPU reset, clear the ESTAT and GINTC registers
922 			 * Other CSR registers are cleared with function _kvm_setcsr().
923 			 */
924 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
925 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
926 			break;
927 		default:
928 			ret = -EINVAL;
929 			break;
930 		}
931 		break;
932 	default:
933 		ret = -EINVAL;
934 		break;
935 	}
936 
937 	return ret;
938 }
939 
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)940 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
941 {
942 	int ret = 0;
943 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
944 
945 	switch (size) {
946 	case KVM_REG_SIZE_U64:
947 		ret = get_user(v, (u64 __user *)(long)reg->addr);
948 		if (ret)
949 			return ret;
950 		break;
951 	default:
952 		return -EINVAL;
953 	}
954 
955 	return kvm_set_one_reg(vcpu, reg, v);
956 }
957 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)958 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
959 {
960 	return -ENOIOCTLCMD;
961 }
962 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)963 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
964 {
965 	return -ENOIOCTLCMD;
966 }
967 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)968 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
969 {
970 	int i;
971 
972 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
973 		regs->gpr[i] = vcpu->arch.gprs[i];
974 
975 	regs->pc = vcpu->arch.pc;
976 
977 	return 0;
978 }
979 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)980 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
981 {
982 	int i;
983 
984 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
985 		vcpu->arch.gprs[i] = regs->gpr[i];
986 
987 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
988 	vcpu->arch.pc = regs->pc;
989 
990 	return 0;
991 }
992 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)993 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
994 				     struct kvm_enable_cap *cap)
995 {
996 	/* FPU is enabled by default, will support LSX/LASX later. */
997 	return -EINVAL;
998 }
999 
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1000 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
1001 					 struct kvm_device_attr *attr)
1002 {
1003 	switch (attr->attr) {
1004 	case LOONGARCH_CPUCFG2:
1005 	case LOONGARCH_CPUCFG6:
1006 		return 0;
1007 	case CPUCFG_KVM_FEATURE:
1008 		return 0;
1009 	default:
1010 		return -ENXIO;
1011 	}
1012 
1013 	return -ENXIO;
1014 }
1015 
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1016 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
1017 					 struct kvm_device_attr *attr)
1018 {
1019 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1020 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1021 		return -ENXIO;
1022 
1023 	return 0;
1024 }
1025 
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1026 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1027 				       struct kvm_device_attr *attr)
1028 {
1029 	int ret = -ENXIO;
1030 
1031 	switch (attr->group) {
1032 	case KVM_LOONGARCH_VCPU_CPUCFG:
1033 		ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1034 		break;
1035 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1036 		ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1037 		break;
1038 	default:
1039 		break;
1040 	}
1041 
1042 	return ret;
1043 }
1044 
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1045 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1046 					 struct kvm_device_attr *attr)
1047 {
1048 	int ret = 0;
1049 	uint64_t val;
1050 	uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1051 
1052 	switch (attr->attr) {
1053 	case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1054 		ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1055 		if (ret)
1056 			return ret;
1057 		break;
1058 	case CPUCFG_KVM_FEATURE:
1059 		val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1060 		break;
1061 	default:
1062 		return -ENXIO;
1063 	}
1064 
1065 	put_user(val, uaddr);
1066 
1067 	return ret;
1068 }
1069 
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1070 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1071 					 struct kvm_device_attr *attr)
1072 {
1073 	u64 gpa;
1074 	u64 __user *user = (u64 __user *)attr->addr;
1075 
1076 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1077 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1078 		return -ENXIO;
1079 
1080 	gpa = vcpu->arch.st.guest_addr;
1081 	if (put_user(gpa, user))
1082 		return -EFAULT;
1083 
1084 	return 0;
1085 }
1086 
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1087 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1088 				       struct kvm_device_attr *attr)
1089 {
1090 	int ret = -ENXIO;
1091 
1092 	switch (attr->group) {
1093 	case KVM_LOONGARCH_VCPU_CPUCFG:
1094 		ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1095 		break;
1096 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1097 		ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1098 		break;
1099 	default:
1100 		break;
1101 	}
1102 
1103 	return ret;
1104 }
1105 
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1106 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1107 					 struct kvm_device_attr *attr)
1108 {
1109 	u64 val, valid;
1110 	u64 __user *user = (u64 __user *)attr->addr;
1111 	struct kvm *kvm = vcpu->kvm;
1112 
1113 	switch (attr->attr) {
1114 	case CPUCFG_KVM_FEATURE:
1115 		if (get_user(val, user))
1116 			return -EFAULT;
1117 
1118 		valid = LOONGARCH_PV_FEAT_MASK;
1119 		if (val & ~valid)
1120 			return -EINVAL;
1121 
1122 		/* All vCPUs need set the same PV features */
1123 		if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1124 				&& ((kvm->arch.pv_features & valid) != val))
1125 			return -EINVAL;
1126 		kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1127 		return 0;
1128 	default:
1129 		return -ENXIO;
1130 	}
1131 }
1132 
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1133 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1134 					 struct kvm_device_attr *attr)
1135 {
1136 	int idx, ret = 0;
1137 	u64 gpa, __user *user = (u64 __user *)attr->addr;
1138 	struct kvm *kvm = vcpu->kvm;
1139 
1140 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1141 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1142 		return -ENXIO;
1143 
1144 	if (get_user(gpa, user))
1145 		return -EFAULT;
1146 
1147 	if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1148 		return -EINVAL;
1149 
1150 	if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1151 		vcpu->arch.st.guest_addr = gpa;
1152 		return 0;
1153 	}
1154 
1155 	/* Check the address is in a valid memslot */
1156 	idx = srcu_read_lock(&kvm->srcu);
1157 	if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1158 		ret = -EINVAL;
1159 	srcu_read_unlock(&kvm->srcu, idx);
1160 
1161 	if (!ret) {
1162 		vcpu->arch.st.guest_addr = gpa;
1163 		vcpu->arch.st.last_steal = current->sched_info.run_delay;
1164 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1165 	}
1166 
1167 	return ret;
1168 }
1169 
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1170 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1171 				       struct kvm_device_attr *attr)
1172 {
1173 	int ret = -ENXIO;
1174 
1175 	switch (attr->group) {
1176 	case KVM_LOONGARCH_VCPU_CPUCFG:
1177 		ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1178 		break;
1179 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1180 		ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1181 		break;
1182 	default:
1183 		break;
1184 	}
1185 
1186 	return ret;
1187 }
1188 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1189 long kvm_arch_vcpu_ioctl(struct file *filp,
1190 			 unsigned int ioctl, unsigned long arg)
1191 {
1192 	long r;
1193 	struct kvm_device_attr attr;
1194 	void __user *argp = (void __user *)arg;
1195 	struct kvm_vcpu *vcpu = filp->private_data;
1196 
1197 	/*
1198 	 * Only software CSR should be modified
1199 	 *
1200 	 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1201 	 * should be used. Since CSR registers owns by this vcpu, if switch
1202 	 * to other vcpus, other vcpus need reload CSR registers.
1203 	 *
1204 	 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1205 	 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1206 	 * aux_inuse flag and reload CSR registers form software.
1207 	 */
1208 
1209 	switch (ioctl) {
1210 	case KVM_SET_ONE_REG:
1211 	case KVM_GET_ONE_REG: {
1212 		struct kvm_one_reg reg;
1213 
1214 		r = -EFAULT;
1215 		if (copy_from_user(&reg, argp, sizeof(reg)))
1216 			break;
1217 		if (ioctl == KVM_SET_ONE_REG) {
1218 			r = kvm_set_reg(vcpu, &reg);
1219 			vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1220 		} else
1221 			r = kvm_get_reg(vcpu, &reg);
1222 		break;
1223 	}
1224 	case KVM_ENABLE_CAP: {
1225 		struct kvm_enable_cap cap;
1226 
1227 		r = -EFAULT;
1228 		if (copy_from_user(&cap, argp, sizeof(cap)))
1229 			break;
1230 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1231 		break;
1232 	}
1233 	case KVM_HAS_DEVICE_ATTR: {
1234 		r = -EFAULT;
1235 		if (copy_from_user(&attr, argp, sizeof(attr)))
1236 			break;
1237 		r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1238 		break;
1239 	}
1240 	case KVM_GET_DEVICE_ATTR: {
1241 		r = -EFAULT;
1242 		if (copy_from_user(&attr, argp, sizeof(attr)))
1243 			break;
1244 		r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1245 		break;
1246 	}
1247 	case KVM_SET_DEVICE_ATTR: {
1248 		r = -EFAULT;
1249 		if (copy_from_user(&attr, argp, sizeof(attr)))
1250 			break;
1251 		r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1252 		break;
1253 	}
1254 	default:
1255 		r = -ENOIOCTLCMD;
1256 		break;
1257 	}
1258 
1259 	return r;
1260 }
1261 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1262 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1263 {
1264 	int i = 0;
1265 
1266 	fpu->fcc = vcpu->arch.fpu.fcc;
1267 	fpu->fcsr = vcpu->arch.fpu.fcsr;
1268 	for (i = 0; i < NUM_FPU_REGS; i++)
1269 		memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1270 
1271 	return 0;
1272 }
1273 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1274 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1275 {
1276 	int i = 0;
1277 
1278 	vcpu->arch.fpu.fcc = fpu->fcc;
1279 	vcpu->arch.fpu.fcsr = fpu->fcsr;
1280 	for (i = 0; i < NUM_FPU_REGS; i++)
1281 		memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1282 
1283 	return 0;
1284 }
1285 
1286 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1287 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1288 {
1289 	if (!kvm_guest_has_lbt(&vcpu->arch))
1290 		return -EINVAL;
1291 
1292 	preempt_disable();
1293 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1294 		set_csr_euen(CSR_EUEN_LBTEN);
1295 		_restore_lbt(&vcpu->arch.lbt);
1296 		vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1297 	}
1298 	preempt_enable();
1299 
1300 	return 0;
1301 }
1302 
kvm_lose_lbt(struct kvm_vcpu * vcpu)1303 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1304 {
1305 	preempt_disable();
1306 	if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1307 		_save_lbt(&vcpu->arch.lbt);
1308 		clear_csr_euen(CSR_EUEN_LBTEN);
1309 		vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1310 	}
1311 	preempt_enable();
1312 }
1313 
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1314 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1315 {
1316 	/*
1317 	 * If TM is enabled, top register save/restore will
1318 	 * cause lbt exception, here enable lbt in advance
1319 	 */
1320 	if (fcsr & FPU_CSR_TM)
1321 		kvm_own_lbt(vcpu);
1322 }
1323 
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1324 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1325 {
1326 	if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1327 		if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1328 			return;
1329 		kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1330 	}
1331 }
1332 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1333 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1334 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1335 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1336 #endif
1337 
1338 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1339 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1340 {
1341 	preempt_disable();
1342 
1343 	/*
1344 	 * Enable FPU for guest
1345 	 * Set FR and FRE according to guest context
1346 	 */
1347 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1348 	set_csr_euen(CSR_EUEN_FPEN);
1349 
1350 	kvm_restore_fpu(&vcpu->arch.fpu);
1351 	vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1352 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1353 
1354 	preempt_enable();
1355 }
1356 
1357 #ifdef CONFIG_CPU_HAS_LSX
1358 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1359 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1360 {
1361 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1362 		return -EINVAL;
1363 
1364 	preempt_disable();
1365 
1366 	/* Enable LSX for guest */
1367 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1368 	set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1369 	switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1370 	case KVM_LARCH_FPU:
1371 		/*
1372 		 * Guest FPU state already loaded,
1373 		 * only restore upper LSX state
1374 		 */
1375 		_restore_lsx_upper(&vcpu->arch.fpu);
1376 		break;
1377 	default:
1378 		/* Neither FP or LSX already active,
1379 		 * restore full LSX state
1380 		 */
1381 		kvm_restore_lsx(&vcpu->arch.fpu);
1382 		break;
1383 	}
1384 
1385 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1386 	vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1387 	preempt_enable();
1388 
1389 	return 0;
1390 }
1391 #endif
1392 
1393 #ifdef CONFIG_CPU_HAS_LASX
1394 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1395 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1396 {
1397 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1398 		return -EINVAL;
1399 
1400 	preempt_disable();
1401 
1402 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1403 	set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1404 	switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1405 	case KVM_LARCH_LSX:
1406 	case KVM_LARCH_LSX | KVM_LARCH_FPU:
1407 		/* Guest LSX state already loaded, only restore upper LASX state */
1408 		_restore_lasx_upper(&vcpu->arch.fpu);
1409 		break;
1410 	case KVM_LARCH_FPU:
1411 		/* Guest FP state already loaded, only restore upper LSX & LASX state */
1412 		_restore_lsx_upper(&vcpu->arch.fpu);
1413 		_restore_lasx_upper(&vcpu->arch.fpu);
1414 		break;
1415 	default:
1416 		/* Neither FP or LSX already active, restore full LASX state */
1417 		kvm_restore_lasx(&vcpu->arch.fpu);
1418 		break;
1419 	}
1420 
1421 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1422 	vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1423 	preempt_enable();
1424 
1425 	return 0;
1426 }
1427 #endif
1428 
1429 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1430 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1431 {
1432 	preempt_disable();
1433 
1434 	kvm_check_fcsr_alive(vcpu);
1435 	if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1436 		kvm_save_lasx(&vcpu->arch.fpu);
1437 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1438 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1439 
1440 		/* Disable LASX & LSX & FPU */
1441 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1442 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1443 		kvm_save_lsx(&vcpu->arch.fpu);
1444 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1445 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1446 
1447 		/* Disable LSX & FPU */
1448 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1449 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1450 		kvm_save_fpu(&vcpu->arch.fpu);
1451 		vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1452 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1453 
1454 		/* Disable FPU */
1455 		clear_csr_euen(CSR_EUEN_FPEN);
1456 	}
1457 	kvm_lose_lbt(vcpu);
1458 
1459 	preempt_enable();
1460 }
1461 
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1462 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1463 {
1464 	int intr = (int)irq->irq;
1465 
1466 	if (intr > 0)
1467 		kvm_queue_irq(vcpu, intr);
1468 	else if (intr < 0)
1469 		kvm_dequeue_irq(vcpu, -intr);
1470 	else {
1471 		kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1472 		return -EINVAL;
1473 	}
1474 
1475 	kvm_vcpu_kick(vcpu);
1476 
1477 	return 0;
1478 }
1479 
kvm_arch_vcpu_unlocked_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1480 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
1481 				  unsigned long arg)
1482 {
1483 	void __user *argp = (void __user *)arg;
1484 	struct kvm_vcpu *vcpu = filp->private_data;
1485 
1486 	if (ioctl == KVM_INTERRUPT) {
1487 		struct kvm_interrupt irq;
1488 
1489 		if (copy_from_user(&irq, argp, sizeof(irq)))
1490 			return -EFAULT;
1491 
1492 		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1493 
1494 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1495 	}
1496 
1497 	return -ENOIOCTLCMD;
1498 }
1499 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1500 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1501 {
1502 	return 0;
1503 }
1504 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1505 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1506 {
1507 	unsigned long timer_hz;
1508 	struct loongarch_csrs *csr;
1509 
1510 	vcpu->arch.vpid = 0;
1511 	vcpu->arch.flush_gpa = INVALID_GPA;
1512 
1513 	hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1514 		      HRTIMER_MODE_ABS_PINNED_HARD);
1515 
1516 	/* Get GPA (=HVA) of PGD for kvm hypervisor */
1517 	vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1518 
1519 	/*
1520 	 * Get PGD for primary mmu, virtual address is used since there is
1521 	 * memory access after loading from CSR_PGD in tlb exception fast path.
1522 	 */
1523 	vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1524 
1525 	vcpu->arch.handle_exit = kvm_handle_exit;
1526 	vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1527 	vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1528 	if (!vcpu->arch.csr)
1529 		return -ENOMEM;
1530 
1531 	/*
1532 	 * All kvm exceptions share one exception entry, and host <-> guest
1533 	 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1534 	 */
1535 	vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1536 
1537 	/* Init */
1538 	vcpu->arch.last_sched_cpu = -1;
1539 
1540 	/* Init ipi_state lock */
1541 	spin_lock_init(&vcpu->arch.ipi_state.lock);
1542 
1543 	/*
1544 	 * Initialize guest register state to valid architectural reset state.
1545 	 */
1546 	timer_hz = calc_const_freq();
1547 	kvm_init_timer(vcpu, timer_hz);
1548 
1549 	/* Set Initialize mode for guest */
1550 	csr = vcpu->arch.csr;
1551 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1552 
1553 	/* Set cpuid */
1554 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1555 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1556 
1557 	/* Start with no pending virtual guest interrupts */
1558 	csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1559 
1560 	return 0;
1561 }
1562 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1563 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1564 {
1565 }
1566 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1567 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1568 {
1569 	int cpu;
1570 	struct kvm_context *context;
1571 
1572 	hrtimer_cancel(&vcpu->arch.swtimer);
1573 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1574 	kvm_drop_cpuid(vcpu);
1575 	kfree(vcpu->arch.csr);
1576 
1577 	/*
1578 	 * If the vCPU is freed and reused as another vCPU, we don't want the
1579 	 * matching pointer wrongly hanging around in last_vcpu.
1580 	 */
1581 	for_each_possible_cpu(cpu) {
1582 		context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1583 		if (context->last_vcpu == vcpu)
1584 			context->last_vcpu = NULL;
1585 	}
1586 }
1587 
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1588 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1589 {
1590 	bool migrated;
1591 	struct kvm_context *context;
1592 	struct loongarch_csrs *csr = vcpu->arch.csr;
1593 
1594 	/*
1595 	 * Have we migrated to a different CPU?
1596 	 * If so, any old guest TLB state may be stale.
1597 	 */
1598 	migrated = (vcpu->arch.last_sched_cpu != cpu);
1599 
1600 	/*
1601 	 * Was this the last vCPU to run on this CPU?
1602 	 * If not, any old guest state from this vCPU will have been clobbered.
1603 	 */
1604 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1605 	if (migrated || (context->last_vcpu != vcpu))
1606 		vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1607 	context->last_vcpu = vcpu;
1608 
1609 	/* Restore timer state regardless */
1610 	kvm_restore_timer(vcpu);
1611 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1612 
1613 	/* Don't bother restoring registers multiple times unless necessary */
1614 	if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1615 		return 0;
1616 
1617 	write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1618 
1619 	/* Restore guest CSR registers */
1620 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1621 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1622 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1623 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1624 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1625 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1626 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1627 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1628 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1629 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1630 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1631 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1632 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1633 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1634 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1635 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1636 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1637 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1638 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1639 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1640 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1641 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1642 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1643 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1644 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1645 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1646 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1647 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1648 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1649 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1650 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1651 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1652 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1653 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1654 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1655 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1656 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1657 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1658 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1659 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1660 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1661 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1662 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1663 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1664 	if (cpu_has_msgint) {
1665 		kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
1666 		kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
1667 		kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
1668 		kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
1669 	}
1670 
1671 	/* Restore Root.GINTC from unused Guest.GINTC register */
1672 	write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1673 
1674 	/*
1675 	 * We should clear linked load bit to break interrupted atomics. This
1676 	 * prevents a SC on the next vCPU from succeeding by matching a LL on
1677 	 * the previous vCPU.
1678 	 */
1679 	if (vcpu->kvm->created_vcpus > 1)
1680 		set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1681 
1682 	vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1683 
1684 	return 0;
1685 }
1686 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1687 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1688 {
1689 	unsigned long flags;
1690 
1691 	local_irq_save(flags);
1692 	/* Restore guest state to registers */
1693 	_kvm_vcpu_load(vcpu, cpu);
1694 	local_irq_restore(flags);
1695 }
1696 
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1697 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1698 {
1699 	struct loongarch_csrs *csr = vcpu->arch.csr;
1700 
1701 	kvm_lose_fpu(vcpu);
1702 
1703 	/*
1704 	 * Update CSR state from hardware if software CSR state is stale,
1705 	 * most CSR registers are kept unchanged during process context
1706 	 * switch except CSR registers like remaining timer tick value and
1707 	 * injected interrupt state.
1708 	 */
1709 	if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1710 		goto out;
1711 
1712 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1713 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1714 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1715 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1716 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1717 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1718 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1719 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1720 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1721 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1722 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1723 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1724 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1725 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1726 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1727 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1728 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1729 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1730 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1731 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1732 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1733 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1734 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1735 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1736 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1737 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1738 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1739 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1740 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1741 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1742 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1743 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1744 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1745 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1746 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1747 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1748 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1749 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1750 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1751 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1752 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1753 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1754 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1755 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1756 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1757 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1758 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1759 	if (cpu_has_msgint) {
1760 		kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
1761 		kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
1762 		kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
1763 		kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
1764 	}
1765 
1766 	vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1767 
1768 out:
1769 	kvm_save_timer(vcpu);
1770 	/* Save Root.GINTC into unused Guest.GINTC register */
1771 	csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1772 
1773 	return 0;
1774 }
1775 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1776 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1777 {
1778 	int cpu;
1779 	unsigned long flags;
1780 
1781 	local_irq_save(flags);
1782 	cpu = smp_processor_id();
1783 	vcpu->arch.last_sched_cpu = cpu;
1784 
1785 	/* Save guest state in registers */
1786 	_kvm_vcpu_put(vcpu, cpu);
1787 	local_irq_restore(flags);
1788 }
1789 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1790 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1791 {
1792 	int r = -EINTR;
1793 	struct kvm_run *run = vcpu->run;
1794 
1795 	if (vcpu->mmio_needed) {
1796 		if (!vcpu->mmio_is_write)
1797 			kvm_complete_mmio_read(vcpu, run);
1798 		vcpu->mmio_needed = 0;
1799 	}
1800 
1801 	switch (run->exit_reason) {
1802 	case KVM_EXIT_HYPERCALL:
1803 		kvm_complete_user_service(vcpu, run);
1804 		break;
1805 	case KVM_EXIT_LOONGARCH_IOCSR:
1806 		if (!run->iocsr_io.is_write)
1807 			kvm_complete_iocsr_read(vcpu, run);
1808 		break;
1809 	}
1810 
1811 	if (!vcpu->wants_to_run)
1812 		return r;
1813 
1814 	/* Clear exit_reason */
1815 	run->exit_reason = KVM_EXIT_UNKNOWN;
1816 	lose_fpu(1);
1817 	vcpu_load(vcpu);
1818 	kvm_sigset_activate(vcpu);
1819 	r = kvm_pre_enter_guest(vcpu);
1820 	if (r != RESUME_GUEST)
1821 		goto out;
1822 
1823 	guest_timing_enter_irqoff();
1824 	guest_state_enter_irqoff();
1825 	trace_kvm_enter(vcpu);
1826 	r = kvm_loongarch_ops->enter_guest(run, vcpu);
1827 
1828 	trace_kvm_out(vcpu);
1829 	/*
1830 	 * Guest exit is already recorded at kvm_handle_exit()
1831 	 * return value must not be RESUME_GUEST
1832 	 */
1833 	local_irq_enable();
1834 out:
1835 	kvm_sigset_deactivate(vcpu);
1836 	vcpu_put(vcpu);
1837 
1838 	return r;
1839 }
1840