xref: /linux/arch/loongarch/kvm/vcpu.c (revision 221533629550e920580ab428f13ffebf54063b95)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <asm/fpu.h>
8 #include <asm/lbt.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12 
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15 
16 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
17 	KVM_GENERIC_VCPU_STATS(),
18 	STATS_DESC_COUNTER(VCPU, int_exits),
19 	STATS_DESC_COUNTER(VCPU, idle_exits),
20 	STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21 	STATS_DESC_COUNTER(VCPU, signal_exits),
22 	STATS_DESC_COUNTER(VCPU, hypercall_exits),
23 	STATS_DESC_COUNTER(VCPU, ipi_read_exits),
24 	STATS_DESC_COUNTER(VCPU, ipi_write_exits),
25 	STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
26 	STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
27 	STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
28 	STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
29 };
30 
31 const struct kvm_stats_header kvm_vcpu_stats_header = {
32 	.name_size = KVM_STATS_NAME_SIZE,
33 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
34 	.id_offset = sizeof(struct kvm_stats_header),
35 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
36 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
37 		       sizeof(kvm_vcpu_stats_desc),
38 };
39 
kvm_save_host_pmu(struct kvm_vcpu * vcpu)40 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
41 {
42 	struct kvm_context *context;
43 
44 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
45 	context->perf_cntr[0] = read_csr_perfcntr0();
46 	context->perf_cntr[1] = read_csr_perfcntr1();
47 	context->perf_cntr[2] = read_csr_perfcntr2();
48 	context->perf_cntr[3] = read_csr_perfcntr3();
49 	context->perf_ctrl[0] = write_csr_perfctrl0(0);
50 	context->perf_ctrl[1] = write_csr_perfctrl1(0);
51 	context->perf_ctrl[2] = write_csr_perfctrl2(0);
52 	context->perf_ctrl[3] = write_csr_perfctrl3(0);
53 }
54 
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)55 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
56 {
57 	struct kvm_context *context;
58 
59 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
60 	write_csr_perfcntr0(context->perf_cntr[0]);
61 	write_csr_perfcntr1(context->perf_cntr[1]);
62 	write_csr_perfcntr2(context->perf_cntr[2]);
63 	write_csr_perfcntr3(context->perf_cntr[3]);
64 	write_csr_perfctrl0(context->perf_ctrl[0]);
65 	write_csr_perfctrl1(context->perf_ctrl[1]);
66 	write_csr_perfctrl2(context->perf_ctrl[2]);
67 	write_csr_perfctrl3(context->perf_ctrl[3]);
68 }
69 
70 
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)71 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
72 {
73 	struct loongarch_csrs *csr = vcpu->arch.csr;
74 
75 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
76 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
77 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
78 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
79 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
80 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
81 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
82 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
83 }
84 
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)85 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
86 {
87 	struct loongarch_csrs *csr = vcpu->arch.csr;
88 
89 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
90 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
91 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
92 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
93 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
94 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
95 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
96 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
97 }
98 
kvm_own_pmu(struct kvm_vcpu * vcpu)99 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
100 {
101 	unsigned long val;
102 
103 	if (!kvm_guest_has_pmu(&vcpu->arch))
104 		return -EINVAL;
105 
106 	kvm_save_host_pmu(vcpu);
107 
108 	/* Set PM0-PM(num) to guest */
109 	val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
110 	val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
111 	write_csr_gcfg(val);
112 
113 	kvm_restore_guest_pmu(vcpu);
114 
115 	return 0;
116 }
117 
kvm_lose_pmu(struct kvm_vcpu * vcpu)118 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
119 {
120 	unsigned long val;
121 	struct loongarch_csrs *csr = vcpu->arch.csr;
122 
123 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
124 		return;
125 
126 	kvm_save_guest_pmu(vcpu);
127 
128 	/* Disable pmu access from guest */
129 	write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
130 
131 	/*
132 	 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
133 	 * exiting the guest, so that the next time trap into the guest.
134 	 * We don't need to deal with PMU CSRs contexts.
135 	 */
136 	val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
137 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
138 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
139 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
140 	if (!(val & KVM_PMU_EVENT_ENABLED))
141 		vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
142 
143 	kvm_restore_host_pmu(vcpu);
144 }
145 
kvm_restore_pmu(struct kvm_vcpu * vcpu)146 static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
147 {
148 	if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
149 		kvm_make_request(KVM_REQ_PMU, vcpu);
150 }
151 
kvm_check_pmu(struct kvm_vcpu * vcpu)152 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
153 {
154 	if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
155 		kvm_own_pmu(vcpu);
156 		vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
157 	}
158 }
159 
kvm_update_stolen_time(struct kvm_vcpu * vcpu)160 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
161 {
162 	u32 version;
163 	u64 steal;
164 	gpa_t gpa;
165 	struct kvm_memslots *slots;
166 	struct kvm_steal_time __user *st;
167 	struct gfn_to_hva_cache *ghc;
168 
169 	ghc = &vcpu->arch.st.cache;
170 	gpa = vcpu->arch.st.guest_addr;
171 	if (!(gpa & KVM_STEAL_PHYS_VALID))
172 		return;
173 
174 	gpa &= KVM_STEAL_PHYS_MASK;
175 	slots = kvm_memslots(vcpu->kvm);
176 	if (slots->generation != ghc->generation || gpa != ghc->gpa) {
177 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
178 			ghc->gpa = INVALID_GPA;
179 			return;
180 		}
181 	}
182 
183 	st = (struct kvm_steal_time __user *)ghc->hva;
184 	unsafe_get_user(version, &st->version, out);
185 	if (version & 1)
186 		version += 1; /* first time write, random junk */
187 
188 	version += 1;
189 	unsafe_put_user(version, &st->version, out);
190 	smp_wmb();
191 
192 	unsafe_get_user(steal, &st->steal, out);
193 	steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
194 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
195 	unsafe_put_user(steal, &st->steal, out);
196 
197 	smp_wmb();
198 	version += 1;
199 	unsafe_put_user(version, &st->version, out);
200 out:
201 	mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
202 }
203 
204 /*
205  * kvm_check_requests - check and handle pending vCPU requests
206  *
207  * Return: RESUME_GUEST if we should enter the guest
208  *         RESUME_HOST  if we should exit to userspace
209  */
kvm_check_requests(struct kvm_vcpu * vcpu)210 static int kvm_check_requests(struct kvm_vcpu *vcpu)
211 {
212 	if (!kvm_request_pending(vcpu))
213 		return RESUME_GUEST;
214 
215 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
216 		vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
217 
218 	if (kvm_dirty_ring_check_request(vcpu))
219 		return RESUME_HOST;
220 
221 	if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
222 		kvm_update_stolen_time(vcpu);
223 
224 	return RESUME_GUEST;
225 }
226 
kvm_late_check_requests(struct kvm_vcpu * vcpu)227 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
228 {
229 	lockdep_assert_irqs_disabled();
230 	if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
231 		if (vcpu->arch.flush_gpa != INVALID_GPA) {
232 			kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
233 			vcpu->arch.flush_gpa = INVALID_GPA;
234 		}
235 }
236 
237 /*
238  * Check and handle pending signal and vCPU requests etc
239  * Run with irq enabled and preempt enabled
240  *
241  * Return: RESUME_GUEST if we should enter the guest
242  *         RESUME_HOST  if we should exit to userspace
243  *         < 0 if we should exit to userspace, where the return value
244  *         indicates an error
245  */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)246 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
247 {
248 	int idx, ret;
249 
250 	/*
251 	 * Check conditions before entering the guest
252 	 */
253 	ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
254 	if (ret < 0)
255 		return ret;
256 
257 	idx = srcu_read_lock(&vcpu->kvm->srcu);
258 	ret = kvm_check_requests(vcpu);
259 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
260 
261 	return ret;
262 }
263 
264 /*
265  * Called with irq enabled
266  *
267  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
268  *         Others if we should exit to userspace
269  */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)270 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
271 {
272 	int ret;
273 
274 	do {
275 		ret = kvm_enter_guest_check(vcpu);
276 		if (ret != RESUME_GUEST)
277 			break;
278 
279 		/*
280 		 * Handle vcpu timer, interrupts, check requests and
281 		 * check vmid before vcpu enter guest
282 		 */
283 		local_irq_disable();
284 		kvm_deliver_intr(vcpu);
285 		kvm_deliver_exception(vcpu);
286 		/* Make sure the vcpu mode has been written */
287 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
288 		kvm_check_vpid(vcpu);
289 		kvm_check_pmu(vcpu);
290 
291 		/*
292 		 * Called after function kvm_check_vpid()
293 		 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
294 		 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
295 		 */
296 		kvm_late_check_requests(vcpu);
297 		vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
298 		/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
299 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
300 
301 		if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
302 			kvm_lose_pmu(vcpu);
303 			/* make sure the vcpu mode has been written */
304 			smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
305 			local_irq_enable();
306 			ret = -EAGAIN;
307 		}
308 	} while (ret != RESUME_GUEST);
309 
310 	return ret;
311 }
312 
313 /*
314  * Return 1 for resume guest and "<= 0" for resume host.
315  */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)316 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
317 {
318 	int ret = RESUME_GUEST;
319 	unsigned long estat = vcpu->arch.host_estat;
320 	u32 intr = estat & CSR_ESTAT_IS;
321 	u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
322 
323 	vcpu->mode = OUTSIDE_GUEST_MODE;
324 
325 	/* Set a default exit reason */
326 	run->exit_reason = KVM_EXIT_UNKNOWN;
327 
328 	kvm_lose_pmu(vcpu);
329 
330 	guest_timing_exit_irqoff();
331 	guest_state_exit_irqoff();
332 	local_irq_enable();
333 
334 	trace_kvm_exit(vcpu, ecode);
335 	if (ecode) {
336 		ret = kvm_handle_fault(vcpu, ecode);
337 	} else {
338 		WARN(!intr, "vm exiting with suspicious irq\n");
339 		++vcpu->stat.int_exits;
340 	}
341 
342 	if (ret == RESUME_GUEST)
343 		ret = kvm_pre_enter_guest(vcpu);
344 
345 	if (ret != RESUME_GUEST) {
346 		local_irq_disable();
347 		return ret;
348 	}
349 
350 	guest_timing_enter_irqoff();
351 	guest_state_enter_irqoff();
352 	trace_kvm_reenter(vcpu);
353 
354 	return RESUME_GUEST;
355 }
356 
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)357 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
358 {
359 	return !!(vcpu->arch.irq_pending) &&
360 		vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
361 }
362 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)363 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
364 {
365 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
366 }
367 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)368 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
369 {
370 	unsigned long val;
371 
372 	preempt_disable();
373 	val = gcsr_read(LOONGARCH_CSR_CRMD);
374 	preempt_enable();
375 
376 	return (val & CSR_PRMD_PPLV) == PLV_KERN;
377 }
378 
379 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)380 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
381 {
382 	return vcpu->arch.pc;
383 }
384 
385 /*
386  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
387  * arrived in guest context.  For LoongArch64, if PMU is not passthrough to VM,
388  * any event that arrives while a vCPU is loaded is considered to be "in guest".
389  */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)390 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
391 {
392 	return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
393 }
394 #endif
395 
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)396 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
397 {
398 	return false;
399 }
400 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)401 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
402 {
403 	return VM_FAULT_SIGBUS;
404 }
405 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)406 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
407 				  struct kvm_translation *tr)
408 {
409 	return -EINVAL;
410 }
411 
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)412 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
413 {
414 	int ret;
415 
416 	/* Protect from TOD sync and vcpu_load/put() */
417 	preempt_disable();
418 	ret = kvm_pending_timer(vcpu) ||
419 		kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
420 	preempt_enable();
421 
422 	return ret;
423 }
424 
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)425 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
426 {
427 	int i;
428 
429 	kvm_debug("vCPU Register Dump:\n");
430 	kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
431 	kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
432 
433 	for (i = 0; i < 32; i += 4) {
434 		kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
435 		       vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
436 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
437 	}
438 
439 	kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
440 		  kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
441 		  kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
442 
443 	kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
444 
445 	return 0;
446 }
447 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)448 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
449 				struct kvm_mp_state *mp_state)
450 {
451 	*mp_state = vcpu->arch.mp_state;
452 
453 	return 0;
454 }
455 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)456 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
457 				struct kvm_mp_state *mp_state)
458 {
459 	int ret = 0;
460 
461 	switch (mp_state->mp_state) {
462 	case KVM_MP_STATE_RUNNABLE:
463 		vcpu->arch.mp_state = *mp_state;
464 		break;
465 	default:
466 		ret = -EINVAL;
467 	}
468 
469 	return ret;
470 }
471 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)472 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
473 					struct kvm_guest_debug *dbg)
474 {
475 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
476 		return -EINVAL;
477 
478 	if (dbg->control & KVM_GUESTDBG_ENABLE)
479 		vcpu->guest_debug = dbg->control;
480 	else
481 		vcpu->guest_debug = 0;
482 
483 	return 0;
484 }
485 
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)486 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
487 {
488 	int cpuid;
489 	struct kvm_phyid_map *map;
490 	struct loongarch_csrs *csr = vcpu->arch.csr;
491 
492 	if (val >= KVM_MAX_PHYID)
493 		return -EINVAL;
494 
495 	map = vcpu->kvm->arch.phyid_map;
496 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
497 
498 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
499 	if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
500 		/* Discard duplicated CPUID set operation */
501 		if (cpuid == val) {
502 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
503 			return 0;
504 		}
505 
506 		/*
507 		 * CPUID is already set before
508 		 * Forbid changing to a different CPUID at runtime
509 		 */
510 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
511 		return -EINVAL;
512 	}
513 
514 	if (map->phys_map[val].enabled) {
515 		/* Discard duplicated CPUID set operation */
516 		if (vcpu == map->phys_map[val].vcpu) {
517 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
518 			return 0;
519 		}
520 
521 		/*
522 		 * New CPUID is already set with other vcpu
523 		 * Forbid sharing the same CPUID between different vcpus
524 		 */
525 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
526 		return -EINVAL;
527 	}
528 
529 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
530 	map->phys_map[val].enabled	= true;
531 	map->phys_map[val].vcpu		= vcpu;
532 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
533 
534 	return 0;
535 }
536 
kvm_drop_cpuid(struct kvm_vcpu * vcpu)537 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
538 {
539 	int cpuid;
540 	struct kvm_phyid_map *map;
541 	struct loongarch_csrs *csr = vcpu->arch.csr;
542 
543 	map = vcpu->kvm->arch.phyid_map;
544 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
545 
546 	if (cpuid >= KVM_MAX_PHYID)
547 		return;
548 
549 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
550 	if (map->phys_map[cpuid].enabled) {
551 		map->phys_map[cpuid].vcpu = NULL;
552 		map->phys_map[cpuid].enabled = false;
553 		kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
554 	}
555 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
556 }
557 
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)558 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
559 {
560 	struct kvm_phyid_map *map;
561 
562 	if (cpuid >= KVM_MAX_PHYID)
563 		return NULL;
564 
565 	map = kvm->arch.phyid_map;
566 	if (!map->phys_map[cpuid].enabled)
567 		return NULL;
568 
569 	return map->phys_map[cpuid].vcpu;
570 }
571 
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)572 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
573 {
574 	unsigned long gintc;
575 	struct loongarch_csrs *csr = vcpu->arch.csr;
576 
577 	if (get_gcsr_flag(id) & INVALID_GCSR)
578 		return -EINVAL;
579 
580 	if (id == LOONGARCH_CSR_ESTAT) {
581 		preempt_disable();
582 		vcpu_load(vcpu);
583 		/*
584 		 * Sync pending interrupts into ESTAT so that interrupt
585 		 * remains during VM migration stage
586 		 */
587 		kvm_deliver_intr(vcpu);
588 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
589 		vcpu_put(vcpu);
590 		preempt_enable();
591 
592 		/* ESTAT IP0~IP7 get from GINTC */
593 		gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
594 		*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
595 		return 0;
596 	}
597 
598 	/*
599 	 * Get software CSR state since software state is consistent
600 	 * with hardware for synchronous ioctl
601 	 */
602 	*val = kvm_read_sw_gcsr(csr, id);
603 
604 	return 0;
605 }
606 
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)607 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
608 {
609 	int ret = 0, gintc;
610 	struct loongarch_csrs *csr = vcpu->arch.csr;
611 
612 	if (get_gcsr_flag(id) & INVALID_GCSR)
613 		return -EINVAL;
614 
615 	if (id == LOONGARCH_CSR_CPUID)
616 		return kvm_set_cpuid(vcpu, val);
617 
618 	if (id == LOONGARCH_CSR_ESTAT) {
619 		/* ESTAT IP0~IP7 inject through GINTC */
620 		gintc = (val >> 2) & 0xff;
621 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
622 
623 		gintc = val & ~(0xffUL << 2);
624 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
625 
626 		return ret;
627 	}
628 
629 	kvm_write_sw_gcsr(csr, id, val);
630 
631 	/*
632 	 * After modifying the PMU CSR register value of the vcpu.
633 	 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
634 	 */
635 	if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
636 		unsigned long val;
637 
638 		val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
639 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
640 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
641 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
642 
643 		if (val & KVM_PMU_EVENT_ENABLED)
644 			kvm_make_request(KVM_REQ_PMU, vcpu);
645 	}
646 
647 	return ret;
648 }
649 
_kvm_get_cpucfg_mask(int id,u64 * v)650 static int _kvm_get_cpucfg_mask(int id, u64 *v)
651 {
652 	if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
653 		return -EINVAL;
654 
655 	switch (id) {
656 	case LOONGARCH_CPUCFG0:
657 		*v = GENMASK(31, 0);
658 		return 0;
659 	case LOONGARCH_CPUCFG1:
660 		/* CPUCFG1_MSGINT is not supported by KVM */
661 		*v = GENMASK(25, 0);
662 		return 0;
663 	case LOONGARCH_CPUCFG2:
664 		/* CPUCFG2 features unconditionally supported by KVM */
665 		*v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
666 		     CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
667 		     CPUCFG2_LSPW | CPUCFG2_LAM;
668 		/*
669 		 * For the ISA extensions listed below, if one is supported
670 		 * by the host, then it is also supported by KVM.
671 		 */
672 		if (cpu_has_lsx)
673 			*v |= CPUCFG2_LSX;
674 		if (cpu_has_lasx)
675 			*v |= CPUCFG2_LASX;
676 		if (cpu_has_lbt_x86)
677 			*v |= CPUCFG2_X86BT;
678 		if (cpu_has_lbt_arm)
679 			*v |= CPUCFG2_ARMBT;
680 		if (cpu_has_lbt_mips)
681 			*v |= CPUCFG2_MIPSBT;
682 		if (cpu_has_ptw)
683 			*v |= CPUCFG2_PTW;
684 
685 		return 0;
686 	case LOONGARCH_CPUCFG3:
687 		*v = GENMASK(16, 0);
688 		return 0;
689 	case LOONGARCH_CPUCFG4:
690 	case LOONGARCH_CPUCFG5:
691 		*v = GENMASK(31, 0);
692 		return 0;
693 	case LOONGARCH_CPUCFG6:
694 		if (cpu_has_pmp)
695 			*v = GENMASK(14, 0);
696 		else
697 			*v = 0;
698 		return 0;
699 	case LOONGARCH_CPUCFG16:
700 		*v = GENMASK(16, 0);
701 		return 0;
702 	case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
703 		*v = GENMASK(30, 0);
704 		return 0;
705 	default:
706 		/*
707 		 * CPUCFG bits should be zero if reserved by HW or not
708 		 * supported by KVM.
709 		 */
710 		*v = 0;
711 		return 0;
712 	}
713 }
714 
kvm_check_cpucfg(int id,u64 val)715 static int kvm_check_cpucfg(int id, u64 val)
716 {
717 	int ret;
718 	u64 mask = 0;
719 
720 	ret = _kvm_get_cpucfg_mask(id, &mask);
721 	if (ret)
722 		return ret;
723 
724 	if (val & ~mask)
725 		/* Unsupported features and/or the higher 32 bits should not be set */
726 		return -EINVAL;
727 
728 	switch (id) {
729 	case LOONGARCH_CPUCFG2:
730 		if (!(val & CPUCFG2_LLFTP))
731 			/* Guests must have a constant timer */
732 			return -EINVAL;
733 		if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
734 			/* Single and double float point must both be set when FP is enabled */
735 			return -EINVAL;
736 		if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
737 			/* LSX architecturally implies FP but val does not satisfy that */
738 			return -EINVAL;
739 		if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
740 			/* LASX architecturally implies LSX and FP but val does not satisfy that */
741 			return -EINVAL;
742 		return 0;
743 	case LOONGARCH_CPUCFG6:
744 		if (val & CPUCFG6_PMP) {
745 			u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
746 			if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
747 				return -EINVAL;
748 			if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
749 				return -EINVAL;
750 			if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
751 				return -EINVAL;
752 		}
753 		return 0;
754 	default:
755 		/*
756 		 * Values for the other CPUCFG IDs are not being further validated
757 		 * besides the mask check above.
758 		 */
759 		return 0;
760 	}
761 }
762 
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)763 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
764 		const struct kvm_one_reg *reg, u64 *v)
765 {
766 	int id, ret = 0;
767 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
768 
769 	switch (type) {
770 	case KVM_REG_LOONGARCH_CSR:
771 		id = KVM_GET_IOC_CSR_IDX(reg->id);
772 		ret = _kvm_getcsr(vcpu, id, v);
773 		break;
774 	case KVM_REG_LOONGARCH_CPUCFG:
775 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
776 		if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
777 			*v = vcpu->arch.cpucfg[id];
778 		else
779 			ret = -EINVAL;
780 		break;
781 	case KVM_REG_LOONGARCH_LBT:
782 		if (!kvm_guest_has_lbt(&vcpu->arch))
783 			return -ENXIO;
784 
785 		switch (reg->id) {
786 		case KVM_REG_LOONGARCH_LBT_SCR0:
787 			*v = vcpu->arch.lbt.scr0;
788 			break;
789 		case KVM_REG_LOONGARCH_LBT_SCR1:
790 			*v = vcpu->arch.lbt.scr1;
791 			break;
792 		case KVM_REG_LOONGARCH_LBT_SCR2:
793 			*v = vcpu->arch.lbt.scr2;
794 			break;
795 		case KVM_REG_LOONGARCH_LBT_SCR3:
796 			*v = vcpu->arch.lbt.scr3;
797 			break;
798 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
799 			*v = vcpu->arch.lbt.eflags;
800 			break;
801 		case KVM_REG_LOONGARCH_LBT_FTOP:
802 			*v = vcpu->arch.fpu.ftop;
803 			break;
804 		default:
805 			ret = -EINVAL;
806 			break;
807 		}
808 		break;
809 	case KVM_REG_LOONGARCH_KVM:
810 		switch (reg->id) {
811 		case KVM_REG_LOONGARCH_COUNTER:
812 			*v = drdtime() + vcpu->kvm->arch.time_offset;
813 			break;
814 		case KVM_REG_LOONGARCH_DEBUG_INST:
815 			*v = INSN_HVCL | KVM_HCALL_SWDBG;
816 			break;
817 		default:
818 			ret = -EINVAL;
819 			break;
820 		}
821 		break;
822 	default:
823 		ret = -EINVAL;
824 		break;
825 	}
826 
827 	return ret;
828 }
829 
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)830 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
831 {
832 	int ret = 0;
833 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
834 
835 	switch (size) {
836 	case KVM_REG_SIZE_U64:
837 		ret = kvm_get_one_reg(vcpu, reg, &v);
838 		if (ret)
839 			return ret;
840 		ret = put_user(v, (u64 __user *)(long)reg->addr);
841 		break;
842 	default:
843 		ret = -EINVAL;
844 		break;
845 	}
846 
847 	return ret;
848 }
849 
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)850 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
851 			const struct kvm_one_reg *reg, u64 v)
852 {
853 	int id, ret = 0;
854 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
855 
856 	switch (type) {
857 	case KVM_REG_LOONGARCH_CSR:
858 		id = KVM_GET_IOC_CSR_IDX(reg->id);
859 		ret = _kvm_setcsr(vcpu, id, v);
860 		break;
861 	case KVM_REG_LOONGARCH_CPUCFG:
862 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
863 		ret = kvm_check_cpucfg(id, v);
864 		if (ret)
865 			break;
866 		vcpu->arch.cpucfg[id] = (u32)v;
867 		if (id == LOONGARCH_CPUCFG6)
868 			vcpu->arch.max_pmu_csrid =
869 				LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
870 		break;
871 	case KVM_REG_LOONGARCH_LBT:
872 		if (!kvm_guest_has_lbt(&vcpu->arch))
873 			return -ENXIO;
874 
875 		switch (reg->id) {
876 		case KVM_REG_LOONGARCH_LBT_SCR0:
877 			vcpu->arch.lbt.scr0 = v;
878 			break;
879 		case KVM_REG_LOONGARCH_LBT_SCR1:
880 			vcpu->arch.lbt.scr1 = v;
881 			break;
882 		case KVM_REG_LOONGARCH_LBT_SCR2:
883 			vcpu->arch.lbt.scr2 = v;
884 			break;
885 		case KVM_REG_LOONGARCH_LBT_SCR3:
886 			vcpu->arch.lbt.scr3 = v;
887 			break;
888 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
889 			vcpu->arch.lbt.eflags = v;
890 			break;
891 		case KVM_REG_LOONGARCH_LBT_FTOP:
892 			vcpu->arch.fpu.ftop = v;
893 			break;
894 		default:
895 			ret = -EINVAL;
896 			break;
897 		}
898 		break;
899 	case KVM_REG_LOONGARCH_KVM:
900 		switch (reg->id) {
901 		case KVM_REG_LOONGARCH_COUNTER:
902 			/*
903 			 * gftoffset is relative with board, not vcpu
904 			 * only set for the first time for smp system
905 			 */
906 			if (vcpu->vcpu_id == 0)
907 				vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
908 			break;
909 		case KVM_REG_LOONGARCH_VCPU_RESET:
910 			vcpu->arch.st.guest_addr = 0;
911 			memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
912 			memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
913 
914 			/*
915 			 * When vCPU reset, clear the ESTAT and GINTC registers
916 			 * Other CSR registers are cleared with function _kvm_setcsr().
917 			 */
918 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
919 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
920 			break;
921 		default:
922 			ret = -EINVAL;
923 			break;
924 		}
925 		break;
926 	default:
927 		ret = -EINVAL;
928 		break;
929 	}
930 
931 	return ret;
932 }
933 
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)934 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
935 {
936 	int ret = 0;
937 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
938 
939 	switch (size) {
940 	case KVM_REG_SIZE_U64:
941 		ret = get_user(v, (u64 __user *)(long)reg->addr);
942 		if (ret)
943 			return ret;
944 		break;
945 	default:
946 		return -EINVAL;
947 	}
948 
949 	return kvm_set_one_reg(vcpu, reg, v);
950 }
951 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)952 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
953 {
954 	return -ENOIOCTLCMD;
955 }
956 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)957 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
958 {
959 	return -ENOIOCTLCMD;
960 }
961 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)962 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
963 {
964 	int i;
965 
966 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
967 		regs->gpr[i] = vcpu->arch.gprs[i];
968 
969 	regs->pc = vcpu->arch.pc;
970 
971 	return 0;
972 }
973 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)974 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
975 {
976 	int i;
977 
978 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
979 		vcpu->arch.gprs[i] = regs->gpr[i];
980 
981 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
982 	vcpu->arch.pc = regs->pc;
983 
984 	return 0;
985 }
986 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)987 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
988 				     struct kvm_enable_cap *cap)
989 {
990 	/* FPU is enabled by default, will support LSX/LASX later. */
991 	return -EINVAL;
992 }
993 
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)994 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
995 					 struct kvm_device_attr *attr)
996 {
997 	switch (attr->attr) {
998 	case LOONGARCH_CPUCFG2:
999 	case LOONGARCH_CPUCFG6:
1000 		return 0;
1001 	case CPUCFG_KVM_FEATURE:
1002 		return 0;
1003 	default:
1004 		return -ENXIO;
1005 	}
1006 
1007 	return -ENXIO;
1008 }
1009 
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1010 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
1011 					 struct kvm_device_attr *attr)
1012 {
1013 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1014 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1015 		return -ENXIO;
1016 
1017 	return 0;
1018 }
1019 
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1020 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1021 				       struct kvm_device_attr *attr)
1022 {
1023 	int ret = -ENXIO;
1024 
1025 	switch (attr->group) {
1026 	case KVM_LOONGARCH_VCPU_CPUCFG:
1027 		ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1028 		break;
1029 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1030 		ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1031 		break;
1032 	default:
1033 		break;
1034 	}
1035 
1036 	return ret;
1037 }
1038 
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1039 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1040 					 struct kvm_device_attr *attr)
1041 {
1042 	int ret = 0;
1043 	uint64_t val;
1044 	uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1045 
1046 	switch (attr->attr) {
1047 	case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1048 		ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1049 		if (ret)
1050 			return ret;
1051 		break;
1052 	case CPUCFG_KVM_FEATURE:
1053 		val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1054 		break;
1055 	default:
1056 		return -ENXIO;
1057 	}
1058 
1059 	put_user(val, uaddr);
1060 
1061 	return ret;
1062 }
1063 
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1064 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1065 					 struct kvm_device_attr *attr)
1066 {
1067 	u64 gpa;
1068 	u64 __user *user = (u64 __user *)attr->addr;
1069 
1070 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1071 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1072 		return -ENXIO;
1073 
1074 	gpa = vcpu->arch.st.guest_addr;
1075 	if (put_user(gpa, user))
1076 		return -EFAULT;
1077 
1078 	return 0;
1079 }
1080 
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1081 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1082 				       struct kvm_device_attr *attr)
1083 {
1084 	int ret = -ENXIO;
1085 
1086 	switch (attr->group) {
1087 	case KVM_LOONGARCH_VCPU_CPUCFG:
1088 		ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1089 		break;
1090 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1091 		ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1092 		break;
1093 	default:
1094 		break;
1095 	}
1096 
1097 	return ret;
1098 }
1099 
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1100 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1101 					 struct kvm_device_attr *attr)
1102 {
1103 	u64 val, valid;
1104 	u64 __user *user = (u64 __user *)attr->addr;
1105 	struct kvm *kvm = vcpu->kvm;
1106 
1107 	switch (attr->attr) {
1108 	case CPUCFG_KVM_FEATURE:
1109 		if (get_user(val, user))
1110 			return -EFAULT;
1111 
1112 		valid = LOONGARCH_PV_FEAT_MASK;
1113 		if (val & ~valid)
1114 			return -EINVAL;
1115 
1116 		/* All vCPUs need set the same PV features */
1117 		if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1118 				&& ((kvm->arch.pv_features & valid) != val))
1119 			return -EINVAL;
1120 		kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1121 		return 0;
1122 	default:
1123 		return -ENXIO;
1124 	}
1125 }
1126 
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1127 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1128 					 struct kvm_device_attr *attr)
1129 {
1130 	int idx, ret = 0;
1131 	u64 gpa, __user *user = (u64 __user *)attr->addr;
1132 	struct kvm *kvm = vcpu->kvm;
1133 
1134 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1135 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1136 		return -ENXIO;
1137 
1138 	if (get_user(gpa, user))
1139 		return -EFAULT;
1140 
1141 	if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1142 		return -EINVAL;
1143 
1144 	if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1145 		vcpu->arch.st.guest_addr = gpa;
1146 		return 0;
1147 	}
1148 
1149 	/* Check the address is in a valid memslot */
1150 	idx = srcu_read_lock(&kvm->srcu);
1151 	if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1152 		ret = -EINVAL;
1153 	srcu_read_unlock(&kvm->srcu, idx);
1154 
1155 	if (!ret) {
1156 		vcpu->arch.st.guest_addr = gpa;
1157 		vcpu->arch.st.last_steal = current->sched_info.run_delay;
1158 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1159 	}
1160 
1161 	return ret;
1162 }
1163 
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1164 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1165 				       struct kvm_device_attr *attr)
1166 {
1167 	int ret = -ENXIO;
1168 
1169 	switch (attr->group) {
1170 	case KVM_LOONGARCH_VCPU_CPUCFG:
1171 		ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1172 		break;
1173 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1174 		ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1175 		break;
1176 	default:
1177 		break;
1178 	}
1179 
1180 	return ret;
1181 }
1182 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1183 long kvm_arch_vcpu_ioctl(struct file *filp,
1184 			 unsigned int ioctl, unsigned long arg)
1185 {
1186 	long r;
1187 	struct kvm_device_attr attr;
1188 	void __user *argp = (void __user *)arg;
1189 	struct kvm_vcpu *vcpu = filp->private_data;
1190 
1191 	/*
1192 	 * Only software CSR should be modified
1193 	 *
1194 	 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1195 	 * should be used. Since CSR registers owns by this vcpu, if switch
1196 	 * to other vcpus, other vcpus need reload CSR registers.
1197 	 *
1198 	 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1199 	 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1200 	 * aux_inuse flag and reload CSR registers form software.
1201 	 */
1202 
1203 	switch (ioctl) {
1204 	case KVM_SET_ONE_REG:
1205 	case KVM_GET_ONE_REG: {
1206 		struct kvm_one_reg reg;
1207 
1208 		r = -EFAULT;
1209 		if (copy_from_user(&reg, argp, sizeof(reg)))
1210 			break;
1211 		if (ioctl == KVM_SET_ONE_REG) {
1212 			r = kvm_set_reg(vcpu, &reg);
1213 			vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1214 		} else
1215 			r = kvm_get_reg(vcpu, &reg);
1216 		break;
1217 	}
1218 	case KVM_ENABLE_CAP: {
1219 		struct kvm_enable_cap cap;
1220 
1221 		r = -EFAULT;
1222 		if (copy_from_user(&cap, argp, sizeof(cap)))
1223 			break;
1224 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1225 		break;
1226 	}
1227 	case KVM_HAS_DEVICE_ATTR: {
1228 		r = -EFAULT;
1229 		if (copy_from_user(&attr, argp, sizeof(attr)))
1230 			break;
1231 		r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1232 		break;
1233 	}
1234 	case KVM_GET_DEVICE_ATTR: {
1235 		r = -EFAULT;
1236 		if (copy_from_user(&attr, argp, sizeof(attr)))
1237 			break;
1238 		r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1239 		break;
1240 	}
1241 	case KVM_SET_DEVICE_ATTR: {
1242 		r = -EFAULT;
1243 		if (copy_from_user(&attr, argp, sizeof(attr)))
1244 			break;
1245 		r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1246 		break;
1247 	}
1248 	default:
1249 		r = -ENOIOCTLCMD;
1250 		break;
1251 	}
1252 
1253 	return r;
1254 }
1255 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1256 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1257 {
1258 	int i = 0;
1259 
1260 	fpu->fcc = vcpu->arch.fpu.fcc;
1261 	fpu->fcsr = vcpu->arch.fpu.fcsr;
1262 	for (i = 0; i < NUM_FPU_REGS; i++)
1263 		memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1264 
1265 	return 0;
1266 }
1267 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1268 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1269 {
1270 	int i = 0;
1271 
1272 	vcpu->arch.fpu.fcc = fpu->fcc;
1273 	vcpu->arch.fpu.fcsr = fpu->fcsr;
1274 	for (i = 0; i < NUM_FPU_REGS; i++)
1275 		memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1276 
1277 	return 0;
1278 }
1279 
1280 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1281 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1282 {
1283 	if (!kvm_guest_has_lbt(&vcpu->arch))
1284 		return -EINVAL;
1285 
1286 	preempt_disable();
1287 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1288 		set_csr_euen(CSR_EUEN_LBTEN);
1289 		_restore_lbt(&vcpu->arch.lbt);
1290 		vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1291 	}
1292 	preempt_enable();
1293 
1294 	return 0;
1295 }
1296 
kvm_lose_lbt(struct kvm_vcpu * vcpu)1297 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1298 {
1299 	preempt_disable();
1300 	if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1301 		_save_lbt(&vcpu->arch.lbt);
1302 		clear_csr_euen(CSR_EUEN_LBTEN);
1303 		vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1304 	}
1305 	preempt_enable();
1306 }
1307 
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1308 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1309 {
1310 	/*
1311 	 * If TM is enabled, top register save/restore will
1312 	 * cause lbt exception, here enable lbt in advance
1313 	 */
1314 	if (fcsr & FPU_CSR_TM)
1315 		kvm_own_lbt(vcpu);
1316 }
1317 
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1318 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1319 {
1320 	if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1321 		if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1322 			return;
1323 		kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1324 	}
1325 }
1326 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1327 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1328 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1329 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1330 #endif
1331 
1332 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1333 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1334 {
1335 	preempt_disable();
1336 
1337 	/*
1338 	 * Enable FPU for guest
1339 	 * Set FR and FRE according to guest context
1340 	 */
1341 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1342 	set_csr_euen(CSR_EUEN_FPEN);
1343 
1344 	kvm_restore_fpu(&vcpu->arch.fpu);
1345 	vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1346 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1347 
1348 	preempt_enable();
1349 }
1350 
1351 #ifdef CONFIG_CPU_HAS_LSX
1352 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1353 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1354 {
1355 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1356 		return -EINVAL;
1357 
1358 	preempt_disable();
1359 
1360 	/* Enable LSX for guest */
1361 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1362 	set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1363 	switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1364 	case KVM_LARCH_FPU:
1365 		/*
1366 		 * Guest FPU state already loaded,
1367 		 * only restore upper LSX state
1368 		 */
1369 		_restore_lsx_upper(&vcpu->arch.fpu);
1370 		break;
1371 	default:
1372 		/* Neither FP or LSX already active,
1373 		 * restore full LSX state
1374 		 */
1375 		kvm_restore_lsx(&vcpu->arch.fpu);
1376 		break;
1377 	}
1378 
1379 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1380 	vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1381 	preempt_enable();
1382 
1383 	return 0;
1384 }
1385 #endif
1386 
1387 #ifdef CONFIG_CPU_HAS_LASX
1388 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1389 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1390 {
1391 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1392 		return -EINVAL;
1393 
1394 	preempt_disable();
1395 
1396 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1397 	set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1398 	switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1399 	case KVM_LARCH_LSX:
1400 	case KVM_LARCH_LSX | KVM_LARCH_FPU:
1401 		/* Guest LSX state already loaded, only restore upper LASX state */
1402 		_restore_lasx_upper(&vcpu->arch.fpu);
1403 		break;
1404 	case KVM_LARCH_FPU:
1405 		/* Guest FP state already loaded, only restore upper LSX & LASX state */
1406 		_restore_lsx_upper(&vcpu->arch.fpu);
1407 		_restore_lasx_upper(&vcpu->arch.fpu);
1408 		break;
1409 	default:
1410 		/* Neither FP or LSX already active, restore full LASX state */
1411 		kvm_restore_lasx(&vcpu->arch.fpu);
1412 		break;
1413 	}
1414 
1415 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1416 	vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1417 	preempt_enable();
1418 
1419 	return 0;
1420 }
1421 #endif
1422 
1423 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1424 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1425 {
1426 	preempt_disable();
1427 
1428 	kvm_check_fcsr_alive(vcpu);
1429 	if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1430 		kvm_save_lasx(&vcpu->arch.fpu);
1431 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1432 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1433 
1434 		/* Disable LASX & LSX & FPU */
1435 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1436 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1437 		kvm_save_lsx(&vcpu->arch.fpu);
1438 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1439 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1440 
1441 		/* Disable LSX & FPU */
1442 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1443 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1444 		kvm_save_fpu(&vcpu->arch.fpu);
1445 		vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1446 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1447 
1448 		/* Disable FPU */
1449 		clear_csr_euen(CSR_EUEN_FPEN);
1450 	}
1451 	kvm_lose_lbt(vcpu);
1452 
1453 	preempt_enable();
1454 }
1455 
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1456 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1457 {
1458 	int intr = (int)irq->irq;
1459 
1460 	if (intr > 0)
1461 		kvm_queue_irq(vcpu, intr);
1462 	else if (intr < 0)
1463 		kvm_dequeue_irq(vcpu, -intr);
1464 	else {
1465 		kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1466 		return -EINVAL;
1467 	}
1468 
1469 	kvm_vcpu_kick(vcpu);
1470 
1471 	return 0;
1472 }
1473 
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1474 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1475 			       unsigned int ioctl, unsigned long arg)
1476 {
1477 	void __user *argp = (void __user *)arg;
1478 	struct kvm_vcpu *vcpu = filp->private_data;
1479 
1480 	if (ioctl == KVM_INTERRUPT) {
1481 		struct kvm_interrupt irq;
1482 
1483 		if (copy_from_user(&irq, argp, sizeof(irq)))
1484 			return -EFAULT;
1485 
1486 		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1487 
1488 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1489 	}
1490 
1491 	return -ENOIOCTLCMD;
1492 }
1493 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1494 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1495 {
1496 	return 0;
1497 }
1498 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1499 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1500 {
1501 	unsigned long timer_hz;
1502 	struct loongarch_csrs *csr;
1503 
1504 	vcpu->arch.vpid = 0;
1505 	vcpu->arch.flush_gpa = INVALID_GPA;
1506 
1507 	hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1508 		      HRTIMER_MODE_ABS_PINNED_HARD);
1509 
1510 	/* Get GPA (=HVA) of PGD for kvm hypervisor */
1511 	vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1512 
1513 	/*
1514 	 * Get PGD for primary mmu, virtual address is used since there is
1515 	 * memory access after loading from CSR_PGD in tlb exception fast path.
1516 	 */
1517 	vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1518 
1519 	vcpu->arch.handle_exit = kvm_handle_exit;
1520 	vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1521 	vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1522 	if (!vcpu->arch.csr)
1523 		return -ENOMEM;
1524 
1525 	/*
1526 	 * All kvm exceptions share one exception entry, and host <-> guest
1527 	 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1528 	 */
1529 	vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1530 
1531 	/* Init */
1532 	vcpu->arch.last_sched_cpu = -1;
1533 
1534 	/* Init ipi_state lock */
1535 	spin_lock_init(&vcpu->arch.ipi_state.lock);
1536 
1537 	/*
1538 	 * Initialize guest register state to valid architectural reset state.
1539 	 */
1540 	timer_hz = calc_const_freq();
1541 	kvm_init_timer(vcpu, timer_hz);
1542 
1543 	/* Set Initialize mode for guest */
1544 	csr = vcpu->arch.csr;
1545 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1546 
1547 	/* Set cpuid */
1548 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1549 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1550 
1551 	/* Start with no pending virtual guest interrupts */
1552 	csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1553 
1554 	return 0;
1555 }
1556 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1557 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1558 {
1559 }
1560 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1561 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1562 {
1563 	int cpu;
1564 	struct kvm_context *context;
1565 
1566 	hrtimer_cancel(&vcpu->arch.swtimer);
1567 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1568 	kvm_drop_cpuid(vcpu);
1569 	kfree(vcpu->arch.csr);
1570 
1571 	/*
1572 	 * If the vCPU is freed and reused as another vCPU, we don't want the
1573 	 * matching pointer wrongly hanging around in last_vcpu.
1574 	 */
1575 	for_each_possible_cpu(cpu) {
1576 		context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1577 		if (context->last_vcpu == vcpu)
1578 			context->last_vcpu = NULL;
1579 	}
1580 }
1581 
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1582 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1583 {
1584 	bool migrated;
1585 	struct kvm_context *context;
1586 	struct loongarch_csrs *csr = vcpu->arch.csr;
1587 
1588 	/*
1589 	 * Have we migrated to a different CPU?
1590 	 * If so, any old guest TLB state may be stale.
1591 	 */
1592 	migrated = (vcpu->arch.last_sched_cpu != cpu);
1593 
1594 	/*
1595 	 * Was this the last vCPU to run on this CPU?
1596 	 * If not, any old guest state from this vCPU will have been clobbered.
1597 	 */
1598 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1599 	if (migrated || (context->last_vcpu != vcpu))
1600 		vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1601 	context->last_vcpu = vcpu;
1602 
1603 	/* Restore timer state regardless */
1604 	kvm_restore_timer(vcpu);
1605 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1606 
1607 	/* Restore hardware PMU CSRs */
1608 	kvm_restore_pmu(vcpu);
1609 
1610 	/* Don't bother restoring registers multiple times unless necessary */
1611 	if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1612 		return 0;
1613 
1614 	write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1615 
1616 	/* Restore guest CSR registers */
1617 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1618 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1619 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1620 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1621 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1622 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1623 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1624 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1625 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1626 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1627 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1628 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1629 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1630 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1631 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1632 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1633 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1634 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1635 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1636 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1637 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1638 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1639 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1640 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1641 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1642 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1643 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1644 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1645 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1646 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1647 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1648 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1649 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1650 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1651 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1652 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1653 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1654 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1655 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1656 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1657 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1658 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1659 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1660 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1661 
1662 	/* Restore Root.GINTC from unused Guest.GINTC register */
1663 	write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1664 
1665 	/*
1666 	 * We should clear linked load bit to break interrupted atomics. This
1667 	 * prevents a SC on the next vCPU from succeeding by matching a LL on
1668 	 * the previous vCPU.
1669 	 */
1670 	if (vcpu->kvm->created_vcpus > 1)
1671 		set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1672 
1673 	vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1674 
1675 	return 0;
1676 }
1677 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1678 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1679 {
1680 	unsigned long flags;
1681 
1682 	local_irq_save(flags);
1683 	/* Restore guest state to registers */
1684 	_kvm_vcpu_load(vcpu, cpu);
1685 	local_irq_restore(flags);
1686 }
1687 
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1688 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1689 {
1690 	struct loongarch_csrs *csr = vcpu->arch.csr;
1691 
1692 	kvm_lose_fpu(vcpu);
1693 
1694 	/*
1695 	 * Update CSR state from hardware if software CSR state is stale,
1696 	 * most CSR registers are kept unchanged during process context
1697 	 * switch except CSR registers like remaining timer tick value and
1698 	 * injected interrupt state.
1699 	 */
1700 	if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1701 		goto out;
1702 
1703 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1704 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1705 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1706 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1707 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1708 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1709 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1710 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1711 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1712 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1713 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1714 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1715 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1716 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1717 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1718 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1719 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1720 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1721 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1722 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1723 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1724 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1725 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1726 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1727 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1728 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1729 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1730 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1731 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1732 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1733 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1734 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1735 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1736 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1737 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1738 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1739 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1740 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1741 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1742 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1743 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1744 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1745 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1746 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1747 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1748 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1749 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1750 
1751 	vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1752 
1753 out:
1754 	kvm_save_timer(vcpu);
1755 	/* Save Root.GINTC into unused Guest.GINTC register */
1756 	csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1757 
1758 	return 0;
1759 }
1760 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1761 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1762 {
1763 	int cpu;
1764 	unsigned long flags;
1765 
1766 	local_irq_save(flags);
1767 	cpu = smp_processor_id();
1768 	vcpu->arch.last_sched_cpu = cpu;
1769 
1770 	/* Save guest state in registers */
1771 	_kvm_vcpu_put(vcpu, cpu);
1772 	local_irq_restore(flags);
1773 }
1774 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1775 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1776 {
1777 	int r = -EINTR;
1778 	struct kvm_run *run = vcpu->run;
1779 
1780 	if (vcpu->mmio_needed) {
1781 		if (!vcpu->mmio_is_write)
1782 			kvm_complete_mmio_read(vcpu, run);
1783 		vcpu->mmio_needed = 0;
1784 	}
1785 
1786 	switch (run->exit_reason) {
1787 	case KVM_EXIT_HYPERCALL:
1788 		kvm_complete_user_service(vcpu, run);
1789 		break;
1790 	case KVM_EXIT_LOONGARCH_IOCSR:
1791 		if (!run->iocsr_io.is_write)
1792 			kvm_complete_iocsr_read(vcpu, run);
1793 		break;
1794 	}
1795 
1796 	if (!vcpu->wants_to_run)
1797 		return r;
1798 
1799 	/* Clear exit_reason */
1800 	run->exit_reason = KVM_EXIT_UNKNOWN;
1801 	lose_fpu(1);
1802 	vcpu_load(vcpu);
1803 	kvm_sigset_activate(vcpu);
1804 	r = kvm_pre_enter_guest(vcpu);
1805 	if (r != RESUME_GUEST)
1806 		goto out;
1807 
1808 	guest_timing_enter_irqoff();
1809 	guest_state_enter_irqoff();
1810 	trace_kvm_enter(vcpu);
1811 	r = kvm_loongarch_ops->enter_guest(run, vcpu);
1812 
1813 	trace_kvm_out(vcpu);
1814 	/*
1815 	 * Guest exit is already recorded at kvm_handle_exit()
1816 	 * return value must not be RESUME_GUEST
1817 	 */
1818 	local_irq_enable();
1819 out:
1820 	kvm_sigset_deactivate(vcpu);
1821 	vcpu_put(vcpu);
1822 
1823 	return r;
1824 }
1825