1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <asm/fpu.h>
8 #include <asm/lbt.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12 #include <asm/timex.h>
13
14 #define CREATE_TRACE_POINTS
15 #include "trace.h"
16
17 const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
18 KVM_GENERIC_VCPU_STATS(),
19 STATS_DESC_COUNTER(VCPU, int_exits),
20 STATS_DESC_COUNTER(VCPU, idle_exits),
21 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 STATS_DESC_COUNTER(VCPU, signal_exits),
23 STATS_DESC_COUNTER(VCPU, hypercall_exits),
24 STATS_DESC_COUNTER(VCPU, ipi_read_exits),
25 STATS_DESC_COUNTER(VCPU, ipi_write_exits),
26 STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
27 STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
28 STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
29 STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
30 };
31
32 const struct kvm_stats_header kvm_vcpu_stats_header = {
33 .name_size = KVM_STATS_NAME_SIZE,
34 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
35 .id_offset = sizeof(struct kvm_stats_header),
36 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
37 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
38 sizeof(kvm_vcpu_stats_desc),
39 };
40
kvm_save_host_pmu(struct kvm_vcpu * vcpu)41 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
42 {
43 struct kvm_context *context;
44
45 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
46 context->perf_cntr[0] = read_csr_perfcntr0();
47 context->perf_cntr[1] = read_csr_perfcntr1();
48 context->perf_cntr[2] = read_csr_perfcntr2();
49 context->perf_cntr[3] = read_csr_perfcntr3();
50 context->perf_ctrl[0] = write_csr_perfctrl0(0);
51 context->perf_ctrl[1] = write_csr_perfctrl1(0);
52 context->perf_ctrl[2] = write_csr_perfctrl2(0);
53 context->perf_ctrl[3] = write_csr_perfctrl3(0);
54 }
55
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)56 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
57 {
58 struct kvm_context *context;
59
60 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
61 write_csr_perfcntr0(context->perf_cntr[0]);
62 write_csr_perfcntr1(context->perf_cntr[1]);
63 write_csr_perfcntr2(context->perf_cntr[2]);
64 write_csr_perfcntr3(context->perf_cntr[3]);
65 write_csr_perfctrl0(context->perf_ctrl[0]);
66 write_csr_perfctrl1(context->perf_ctrl[1]);
67 write_csr_perfctrl2(context->perf_ctrl[2]);
68 write_csr_perfctrl3(context->perf_ctrl[3]);
69 }
70
71
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)72 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
73 {
74 struct loongarch_csrs *csr = vcpu->arch.csr;
75
76 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
77 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
78 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
79 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
80 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
81 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
82 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
83 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
84 }
85
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)86 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
87 {
88 struct loongarch_csrs *csr = vcpu->arch.csr;
89
90 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
91 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
92 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
93 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
94 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
95 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
96 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
97 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
98 }
99
kvm_own_pmu(struct kvm_vcpu * vcpu)100 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
101 {
102 unsigned long val;
103
104 if (!kvm_guest_has_pmu(&vcpu->arch))
105 return -EINVAL;
106
107 kvm_save_host_pmu(vcpu);
108
109 /* Set PM0-PM(num) to guest */
110 val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
111 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
112 write_csr_gcfg(val);
113
114 kvm_restore_guest_pmu(vcpu);
115
116 return 0;
117 }
118
kvm_lose_pmu(struct kvm_vcpu * vcpu)119 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
120 {
121 unsigned long val;
122 struct loongarch_csrs *csr = vcpu->arch.csr;
123
124 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
125 return;
126
127 kvm_save_guest_pmu(vcpu);
128
129 /* Disable pmu access from guest */
130 write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
131
132 /*
133 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
134 * exiting the guest, so that the next time trap into the guest.
135 * We don't need to deal with PMU CSRs contexts.
136 *
137 * Otherwise set the request bit KVM_REQ_PMU to restore guest PMU
138 * before entering guest VM
139 */
140 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
141 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
142 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
143 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
144 if (!(val & KVM_PMU_EVENT_ENABLED))
145 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
146 else
147 kvm_make_request(KVM_REQ_PMU, vcpu);
148
149 kvm_restore_host_pmu(vcpu);
150 }
151
kvm_check_pmu(struct kvm_vcpu * vcpu)152 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
153 {
154 if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
155 kvm_own_pmu(vcpu);
156 vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
157 }
158 }
159
kvm_update_stolen_time(struct kvm_vcpu * vcpu)160 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
161 {
162 u32 version;
163 u64 steal;
164 gpa_t gpa;
165 struct kvm_memslots *slots;
166 struct kvm_steal_time __user *st;
167 struct gfn_to_hva_cache *ghc;
168
169 ghc = &vcpu->arch.st.cache;
170 gpa = vcpu->arch.st.guest_addr;
171 if (!(gpa & KVM_STEAL_PHYS_VALID))
172 return;
173
174 gpa &= KVM_STEAL_PHYS_MASK;
175 slots = kvm_memslots(vcpu->kvm);
176 if (slots->generation != ghc->generation || gpa != ghc->gpa) {
177 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
178 ghc->gpa = INVALID_GPA;
179 return;
180 }
181 }
182
183 st = (struct kvm_steal_time __user *)ghc->hva;
184 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
185 unsafe_put_user(0, &st->preempted, out);
186 vcpu->arch.st.preempted = 0;
187 }
188
189 unsafe_get_user(version, &st->version, out);
190 if (version & 1)
191 version += 1; /* first time write, random junk */
192
193 version += 1;
194 unsafe_put_user(version, &st->version, out);
195 smp_wmb();
196
197 unsafe_get_user(steal, &st->steal, out);
198 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
199 vcpu->arch.st.last_steal = current->sched_info.run_delay;
200 unsafe_put_user(steal, &st->steal, out);
201
202 smp_wmb();
203 version += 1;
204 unsafe_put_user(version, &st->version, out);
205 out:
206 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
207 }
208
209 /*
210 * kvm_check_requests - check and handle pending vCPU requests
211 *
212 * Return: RESUME_GUEST if we should enter the guest
213 * RESUME_HOST if we should exit to userspace
214 */
kvm_check_requests(struct kvm_vcpu * vcpu)215 static int kvm_check_requests(struct kvm_vcpu *vcpu)
216 {
217 if (!kvm_request_pending(vcpu))
218 return RESUME_GUEST;
219
220 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
221 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
222
223 if (kvm_dirty_ring_check_request(vcpu))
224 return RESUME_HOST;
225
226 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
227 kvm_update_stolen_time(vcpu);
228
229 return RESUME_GUEST;
230 }
231
kvm_late_check_requests(struct kvm_vcpu * vcpu)232 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
233 {
234 lockdep_assert_irqs_disabled();
235 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
236 if (vcpu->arch.flush_gpa != INVALID_GPA) {
237 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
238 vcpu->arch.flush_gpa = INVALID_GPA;
239 }
240
241 if (kvm_check_request(KVM_REQ_AUX_LOAD, vcpu)) {
242 switch (vcpu->arch.aux_ldtype) {
243 case KVM_LARCH_FPU:
244 kvm_own_fpu(vcpu);
245 break;
246 case KVM_LARCH_LSX:
247 kvm_own_lsx(vcpu);
248 break;
249 case KVM_LARCH_LASX:
250 kvm_own_lasx(vcpu);
251 break;
252 case KVM_LARCH_LBT:
253 kvm_own_lbt(vcpu);
254 break;
255 default:
256 break;
257 }
258
259 vcpu->arch.aux_ldtype = 0;
260 }
261 }
262
263 /*
264 * Check and handle pending signal and vCPU requests etc
265 * Run with irq enabled and preempt enabled
266 *
267 * Return: RESUME_GUEST if we should enter the guest
268 * RESUME_HOST if we should exit to userspace
269 * < 0 if we should exit to userspace, where the return value
270 * indicates an error
271 */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)272 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
273 {
274 int idx, ret;
275
276 /*
277 * Check conditions before entering the guest
278 */
279 ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
280 if (ret < 0)
281 return ret;
282
283 idx = srcu_read_lock(&vcpu->kvm->srcu);
284 ret = kvm_check_requests(vcpu);
285 srcu_read_unlock(&vcpu->kvm->srcu, idx);
286
287 return ret;
288 }
289
290 /*
291 * Called with irq enabled
292 *
293 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
294 * Others if we should exit to userspace
295 */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)296 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
297 {
298 int ret;
299
300 do {
301 ret = kvm_enter_guest_check(vcpu);
302 if (ret != RESUME_GUEST)
303 break;
304
305 /*
306 * Handle vcpu timer, interrupts, check requests and
307 * check vmid before vcpu enter guest
308 */
309 local_irq_disable();
310 kvm_deliver_intr(vcpu);
311 kvm_deliver_exception(vcpu);
312 /* Make sure the vcpu mode has been written */
313 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
314 kvm_check_vpid(vcpu);
315 kvm_check_pmu(vcpu);
316
317 /*
318 * Called after function kvm_check_vpid()
319 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
320 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
321 */
322 kvm_late_check_requests(vcpu);
323 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
324 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
325 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
326
327 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
328 if (vcpu->arch.aux_inuse & KVM_LARCH_PMU) {
329 kvm_lose_pmu(vcpu);
330 kvm_make_request(KVM_REQ_PMU, vcpu);
331 }
332 /* make sure the vcpu mode has been written */
333 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
334 local_irq_enable();
335 ret = -EAGAIN;
336 }
337 } while (ret != RESUME_GUEST);
338
339 return ret;
340 }
341
342 /*
343 * Return 1 for resume guest and "<= 0" for resume host.
344 */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)345 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
346 {
347 int ret = RESUME_GUEST;
348 unsigned long estat = vcpu->arch.host_estat;
349 u32 intr = estat & CSR_ESTAT_IS;
350 u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
351
352 vcpu->mode = OUTSIDE_GUEST_MODE;
353
354 /* Set a default exit reason */
355 run->exit_reason = KVM_EXIT_UNKNOWN;
356
357 kvm_lose_pmu(vcpu);
358
359 guest_timing_exit_irqoff();
360 guest_state_exit_irqoff();
361 local_irq_enable();
362
363 trace_kvm_exit(vcpu, ecode);
364 if (ecode) {
365 ret = kvm_handle_fault(vcpu, ecode);
366 } else {
367 WARN(!intr, "vm exiting with suspicious irq\n");
368 ++vcpu->stat.int_exits;
369 }
370
371 if (ret == RESUME_GUEST)
372 ret = kvm_pre_enter_guest(vcpu);
373
374 if (ret != RESUME_GUEST) {
375 local_irq_disable();
376 return ret;
377 }
378
379 guest_timing_enter_irqoff();
380 guest_state_enter_irqoff();
381 trace_kvm_reenter(vcpu);
382
383 return RESUME_GUEST;
384 }
385
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)386 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
387 {
388 return !!(vcpu->arch.irq_pending) &&
389 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
390 }
391
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)392 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
393 {
394 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
395 }
396
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)397 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
398 {
399 unsigned long val;
400
401 preempt_disable();
402 val = gcsr_read(LOONGARCH_CSR_CRMD);
403 preempt_enable();
404
405 return (val & CSR_PRMD_PPLV) == PLV_KERN;
406 }
407
408 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)409 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
410 {
411 return vcpu->arch.pc;
412 }
413
414 /*
415 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
416 * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM,
417 * any event that arrives while a vCPU is loaded is considered to be "in guest".
418 */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)419 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
420 {
421 return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
422 }
423 #endif
424
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)425 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
426 {
427 return false;
428 }
429
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)430 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
431 {
432 return VM_FAULT_SIGBUS;
433 }
434
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)435 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
436 struct kvm_translation *tr)
437 {
438 return -EINVAL;
439 }
440
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)441 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
442 {
443 int ret;
444
445 /* Protect from TOD sync and vcpu_load/put() */
446 preempt_disable();
447 ret = kvm_pending_timer(vcpu) ||
448 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
449 preempt_enable();
450
451 return ret;
452 }
453
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)454 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
455 {
456 int i;
457
458 kvm_debug("vCPU Register Dump:\n");
459 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
460 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
461
462 for (i = 0; i < 32; i += 4) {
463 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
464 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
465 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
466 }
467
468 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
469 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
470 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
471
472 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
473
474 return 0;
475 }
476
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)477 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
478 struct kvm_mp_state *mp_state)
479 {
480 *mp_state = vcpu->arch.mp_state;
481
482 return 0;
483 }
484
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)485 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
486 struct kvm_mp_state *mp_state)
487 {
488 int ret = 0;
489
490 switch (mp_state->mp_state) {
491 case KVM_MP_STATE_RUNNABLE:
492 vcpu->arch.mp_state = *mp_state;
493 break;
494 default:
495 ret = -EINVAL;
496 }
497
498 return ret;
499 }
500
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)501 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
502 struct kvm_guest_debug *dbg)
503 {
504 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
505 return -EINVAL;
506
507 if (dbg->control & KVM_GUESTDBG_ENABLE)
508 vcpu->guest_debug = dbg->control;
509 else
510 vcpu->guest_debug = 0;
511
512 return 0;
513 }
514
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)515 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
516 {
517 int cpuid;
518 struct kvm_phyid_map *map;
519 struct loongarch_csrs *csr = vcpu->arch.csr;
520
521 if (val >= KVM_MAX_PHYID)
522 return -EINVAL;
523
524 map = vcpu->kvm->arch.phyid_map;
525 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
526
527 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
528 if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
529 /* Discard duplicated CPUID set operation */
530 if (cpuid == val) {
531 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
532 return 0;
533 }
534
535 /*
536 * CPUID is already set before
537 * Forbid changing to a different CPUID at runtime
538 */
539 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
540 return -EINVAL;
541 }
542
543 if (map->phys_map[val].enabled) {
544 /* Discard duplicated CPUID set operation */
545 if (vcpu == map->phys_map[val].vcpu) {
546 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
547 return 0;
548 }
549
550 /*
551 * New CPUID is already set with other vcpu
552 * Forbid sharing the same CPUID between different vcpus
553 */
554 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
555 return -EINVAL;
556 }
557
558 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
559 map->phys_map[val].enabled = true;
560 map->phys_map[val].vcpu = vcpu;
561 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
562
563 return 0;
564 }
565
kvm_drop_cpuid(struct kvm_vcpu * vcpu)566 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
567 {
568 int cpuid;
569 struct kvm_phyid_map *map;
570 struct loongarch_csrs *csr = vcpu->arch.csr;
571
572 map = vcpu->kvm->arch.phyid_map;
573 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
574
575 if (cpuid >= KVM_MAX_PHYID)
576 return;
577
578 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
579 if (map->phys_map[cpuid].enabled) {
580 map->phys_map[cpuid].vcpu = NULL;
581 map->phys_map[cpuid].enabled = false;
582 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
583 }
584 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
585 }
586
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)587 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
588 {
589 struct kvm_phyid_map *map;
590
591 if (cpuid < 0)
592 return NULL;
593
594 if (cpuid >= KVM_MAX_PHYID)
595 return NULL;
596
597 map = kvm->arch.phyid_map;
598 if (!map->phys_map[cpuid].enabled)
599 return NULL;
600
601 return map->phys_map[cpuid].vcpu;
602 }
603
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)604 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
605 {
606 unsigned long gintc;
607 struct loongarch_csrs *csr = vcpu->arch.csr;
608
609 if (get_gcsr_flag(id) & INVALID_GCSR)
610 return -EINVAL;
611
612 if (id == LOONGARCH_CSR_ESTAT) {
613 preempt_disable();
614 vcpu_load(vcpu);
615 /*
616 * Sync pending interrupts into ESTAT so that interrupt
617 * remains during VM migration stage
618 */
619 kvm_deliver_intr(vcpu);
620 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
621 vcpu_put(vcpu);
622 preempt_enable();
623
624 /* ESTAT IP0~IP7 get from GINTC */
625 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
626 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
627 return 0;
628 }
629
630 /*
631 * Get software CSR state since software state is consistent
632 * with hardware for synchronous ioctl
633 */
634 *val = kvm_read_sw_gcsr(csr, id);
635
636 return 0;
637 }
638
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)639 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
640 {
641 int ret = 0, gintc;
642 struct loongarch_csrs *csr = vcpu->arch.csr;
643
644 if (get_gcsr_flag(id) & INVALID_GCSR)
645 return -EINVAL;
646
647 if (id == LOONGARCH_CSR_CPUID)
648 return kvm_set_cpuid(vcpu, val);
649
650 if (id == LOONGARCH_CSR_ESTAT) {
651 /* ESTAT IP0~IP7 inject through GINTC */
652 gintc = (val >> 2) & 0xff;
653 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
654
655 gintc = val & ~(0xffUL << 2);
656 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
657
658 return ret;
659 }
660
661 kvm_write_sw_gcsr(csr, id, val);
662
663 /*
664 * After modifying the PMU CSR register value of the vcpu.
665 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
666 */
667 if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
668 unsigned long val;
669
670 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
671 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
672 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
673 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
674
675 if (val & KVM_PMU_EVENT_ENABLED)
676 kvm_make_request(KVM_REQ_PMU, vcpu);
677 }
678
679 return ret;
680 }
681
_kvm_get_cpucfg_mask(int id,u64 * v)682 static int _kvm_get_cpucfg_mask(int id, u64 *v)
683 {
684 unsigned int config;
685
686 if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
687 return -EINVAL;
688
689 switch (id) {
690 case LOONGARCH_CPUCFG0:
691 *v = GENMASK(31, 0);
692 return 0;
693 case LOONGARCH_CPUCFG1:
694 *v = GENMASK(26, 0);
695 return 0;
696 case LOONGARCH_CPUCFG2:
697 /* CPUCFG2 features unconditionally supported by KVM */
698 *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
699 CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
700 CPUCFG2_LSPW | CPUCFG2_LAM;
701 /*
702 * For the ISA extensions listed below, if one is supported
703 * by the host, then it is also supported by KVM.
704 */
705 if (cpu_has_lsx)
706 *v |= CPUCFG2_LSX;
707 if (cpu_has_lasx)
708 *v |= CPUCFG2_LASX;
709 if (cpu_has_lbt_x86)
710 *v |= CPUCFG2_X86BT;
711 if (cpu_has_lbt_arm)
712 *v |= CPUCFG2_ARMBT;
713 if (cpu_has_lbt_mips)
714 *v |= CPUCFG2_MIPSBT;
715 if (cpu_has_ptw)
716 *v |= CPUCFG2_PTW;
717
718 config = read_cpucfg(LOONGARCH_CPUCFG2);
719 *v |= config & (CPUCFG2_FRECIPE | CPUCFG2_DIV32 | CPUCFG2_LAM_BH);
720 *v |= config & (CPUCFG2_LAMCAS | CPUCFG2_LLACQ_SCREL | CPUCFG2_SCQ);
721 return 0;
722 case LOONGARCH_CPUCFG3:
723 *v = GENMASK(23, 0);
724
725 /* VM does not support memory order and SFB setting */
726 config = read_cpucfg(LOONGARCH_CPUCFG3);
727 *v &= config & ~(CPUCFG3_SFB);
728 *v &= config & ~(CPUCFG3_ALDORDER_CAP | CPUCFG3_ASTORDER_CAP | CPUCFG3_SLDORDER_CAP);
729 return 0;
730 case LOONGARCH_CPUCFG4:
731 case LOONGARCH_CPUCFG5:
732 *v = GENMASK(31, 0);
733 return 0;
734 case LOONGARCH_CPUCFG6:
735 if (cpu_has_pmp)
736 *v = GENMASK(14, 0);
737 else
738 *v = 0;
739 return 0;
740 case LOONGARCH_CPUCFG16:
741 *v = GENMASK(16, 0);
742 return 0;
743 case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
744 *v = GENMASK(30, 0);
745 return 0;
746 default:
747 /*
748 * CPUCFG bits should be zero if reserved by HW or not
749 * supported by KVM.
750 */
751 *v = 0;
752 return 0;
753 }
754 }
755
kvm_check_cpucfg(int id,u64 val)756 static int kvm_check_cpucfg(int id, u64 val)
757 {
758 int ret;
759 u32 host;
760 u64 mask = 0;
761
762 ret = _kvm_get_cpucfg_mask(id, &mask);
763 if (ret)
764 return ret;
765
766 if (val & ~mask)
767 /* Unsupported features and/or the higher 32 bits should not be set */
768 return -EINVAL;
769
770 switch (id) {
771 case LOONGARCH_CPUCFG1:
772 if ((val & CPUCFG1_MSGINT) && !cpu_has_msgint)
773 return -EINVAL;
774 return 0;
775 case LOONGARCH_CPUCFG2:
776 if (!(val & CPUCFG2_LLFTP))
777 /* Guests must have a constant timer */
778 return -EINVAL;
779 if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
780 /* Single and double float point must both be set when FP is enabled */
781 return -EINVAL;
782 if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
783 /* LSX architecturally implies FP but val does not satisfy that */
784 return -EINVAL;
785 if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
786 /* LASX architecturally implies LSX and FP but val does not satisfy that */
787 return -EINVAL;
788 return 0;
789 case LOONGARCH_CPUCFG3:
790 host = read_cpucfg(LOONGARCH_CPUCFG3);
791 if ((val & CPUCFG3_RVAMAX) > (host & CPUCFG3_RVAMAX))
792 return -EINVAL;
793 if ((val & CPUCFG3_SPW_LVL) > (host & CPUCFG3_SPW_LVL))
794 return -EINVAL;
795 return 0;
796 case LOONGARCH_CPUCFG6:
797 if (val & CPUCFG6_PMP) {
798 host = read_cpucfg(LOONGARCH_CPUCFG6);
799 if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
800 return -EINVAL;
801 if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
802 return -EINVAL;
803 if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
804 return -EINVAL;
805 }
806 return 0;
807 default:
808 /*
809 * Values for the other CPUCFG IDs are not being further validated
810 * besides the mask check above.
811 */
812 return 0;
813 }
814 }
815
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)816 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
817 const struct kvm_one_reg *reg, u64 *v)
818 {
819 int id, ret = 0;
820 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
821
822 switch (type) {
823 case KVM_REG_LOONGARCH_CSR:
824 id = KVM_GET_IOC_CSR_IDX(reg->id);
825 ret = _kvm_getcsr(vcpu, id, v);
826 break;
827 case KVM_REG_LOONGARCH_CPUCFG:
828 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
829 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
830 *v = vcpu->arch.cpucfg[id];
831 else
832 ret = -EINVAL;
833 break;
834 case KVM_REG_LOONGARCH_LBT:
835 if (!kvm_guest_has_lbt(&vcpu->arch))
836 return -ENXIO;
837
838 switch (reg->id) {
839 case KVM_REG_LOONGARCH_LBT_SCR0:
840 *v = vcpu->arch.lbt.scr0;
841 break;
842 case KVM_REG_LOONGARCH_LBT_SCR1:
843 *v = vcpu->arch.lbt.scr1;
844 break;
845 case KVM_REG_LOONGARCH_LBT_SCR2:
846 *v = vcpu->arch.lbt.scr2;
847 break;
848 case KVM_REG_LOONGARCH_LBT_SCR3:
849 *v = vcpu->arch.lbt.scr3;
850 break;
851 case KVM_REG_LOONGARCH_LBT_EFLAGS:
852 *v = vcpu->arch.lbt.eflags;
853 break;
854 case KVM_REG_LOONGARCH_LBT_FTOP:
855 *v = vcpu->arch.fpu.ftop;
856 break;
857 default:
858 ret = -EINVAL;
859 break;
860 }
861 break;
862 case KVM_REG_LOONGARCH_KVM:
863 switch (reg->id) {
864 case KVM_REG_LOONGARCH_COUNTER:
865 *v = get_cycles() + vcpu->kvm->arch.time_offset;
866 break;
867 case KVM_REG_LOONGARCH_DEBUG_INST:
868 *v = INSN_HVCL | KVM_HCALL_SWDBG;
869 break;
870 default:
871 ret = -EINVAL;
872 break;
873 }
874 break;
875 default:
876 ret = -EINVAL;
877 break;
878 }
879
880 return ret;
881 }
882
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)883 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
884 {
885 int ret = 0;
886 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
887
888 switch (size) {
889 case KVM_REG_SIZE_U64:
890 ret = kvm_get_one_reg(vcpu, reg, &v);
891 if (ret)
892 return ret;
893 ret = put_user(v, (u64 __user *)(long)reg->addr);
894 break;
895 default:
896 ret = -EINVAL;
897 break;
898 }
899
900 return ret;
901 }
902
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)903 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
904 const struct kvm_one_reg *reg, u64 v)
905 {
906 int id, ret = 0;
907 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
908
909 switch (type) {
910 case KVM_REG_LOONGARCH_CSR:
911 id = KVM_GET_IOC_CSR_IDX(reg->id);
912 ret = _kvm_setcsr(vcpu, id, v);
913 break;
914 case KVM_REG_LOONGARCH_CPUCFG:
915 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
916 ret = kvm_check_cpucfg(id, v);
917 if (ret)
918 break;
919 vcpu->arch.cpucfg[id] = (u32)v;
920 if (id == LOONGARCH_CPUCFG6)
921 vcpu->arch.max_pmu_csrid =
922 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
923 break;
924 case KVM_REG_LOONGARCH_LBT:
925 if (!kvm_guest_has_lbt(&vcpu->arch))
926 return -ENXIO;
927
928 switch (reg->id) {
929 case KVM_REG_LOONGARCH_LBT_SCR0:
930 vcpu->arch.lbt.scr0 = v;
931 break;
932 case KVM_REG_LOONGARCH_LBT_SCR1:
933 vcpu->arch.lbt.scr1 = v;
934 break;
935 case KVM_REG_LOONGARCH_LBT_SCR2:
936 vcpu->arch.lbt.scr2 = v;
937 break;
938 case KVM_REG_LOONGARCH_LBT_SCR3:
939 vcpu->arch.lbt.scr3 = v;
940 break;
941 case KVM_REG_LOONGARCH_LBT_EFLAGS:
942 vcpu->arch.lbt.eflags = v;
943 break;
944 case KVM_REG_LOONGARCH_LBT_FTOP:
945 vcpu->arch.fpu.ftop = v;
946 break;
947 default:
948 ret = -EINVAL;
949 break;
950 }
951 break;
952 case KVM_REG_LOONGARCH_KVM:
953 switch (reg->id) {
954 case KVM_REG_LOONGARCH_COUNTER:
955 /*
956 * gftoffset is relative with board, not vcpu
957 * only set for the first time for smp system
958 */
959 if (vcpu->vcpu_id == 0)
960 vcpu->kvm->arch.time_offset = (signed long)(v - get_cycles());
961 break;
962 case KVM_REG_LOONGARCH_VCPU_RESET:
963 vcpu->arch.st.guest_addr = 0;
964 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
965 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
966
967 /*
968 * When vCPU reset, clear the ESTAT and GINTC registers
969 * Other CSR registers are cleared with function _kvm_setcsr().
970 */
971 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
972 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
973 break;
974 default:
975 ret = -EINVAL;
976 break;
977 }
978 break;
979 default:
980 ret = -EINVAL;
981 break;
982 }
983
984 return ret;
985 }
986
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)987 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
988 {
989 int ret = 0;
990 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
991
992 switch (size) {
993 case KVM_REG_SIZE_U64:
994 ret = get_user(v, (u64 __user *)(long)reg->addr);
995 if (ret)
996 return ret;
997 break;
998 default:
999 return -EINVAL;
1000 }
1001
1002 return kvm_set_one_reg(vcpu, reg, v);
1003 }
1004
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1005 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1006 {
1007 return -ENOIOCTLCMD;
1008 }
1009
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1010 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1011 {
1012 return -ENOIOCTLCMD;
1013 }
1014
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1015 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1016 {
1017 int i;
1018
1019 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1020 regs->gpr[i] = vcpu->arch.gprs[i];
1021
1022 regs->pc = vcpu->arch.pc;
1023
1024 return 0;
1025 }
1026
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1027 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1028 {
1029 int i;
1030
1031 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1032 vcpu->arch.gprs[i] = regs->gpr[i];
1033
1034 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1035 vcpu->arch.pc = regs->pc;
1036
1037 return 0;
1038 }
1039
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)1040 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1041 struct kvm_enable_cap *cap)
1042 {
1043 /* FPU is enabled by default, will support LSX/LASX later. */
1044 return -EINVAL;
1045 }
1046
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1047 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
1048 struct kvm_device_attr *attr)
1049 {
1050 switch (attr->attr) {
1051 case LOONGARCH_CPUCFG2:
1052 case LOONGARCH_CPUCFG6:
1053 return 0;
1054 case CPUCFG_KVM_FEATURE:
1055 return 0;
1056 default:
1057 return -ENXIO;
1058 }
1059
1060 return -ENXIO;
1061 }
1062
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1063 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
1064 struct kvm_device_attr *attr)
1065 {
1066 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1067 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1068 return -ENXIO;
1069
1070 return 0;
1071 }
1072
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1073 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1074 struct kvm_device_attr *attr)
1075 {
1076 int ret = -ENXIO;
1077
1078 switch (attr->group) {
1079 case KVM_LOONGARCH_VCPU_CPUCFG:
1080 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1081 break;
1082 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1083 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1084 break;
1085 default:
1086 break;
1087 }
1088
1089 return ret;
1090 }
1091
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1092 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1093 struct kvm_device_attr *attr)
1094 {
1095 int ret = 0;
1096 uint64_t val;
1097 uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1098
1099 switch (attr->attr) {
1100 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1101 ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1102 if (ret)
1103 return ret;
1104 break;
1105 case CPUCFG_KVM_FEATURE:
1106 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1107 break;
1108 default:
1109 return -ENXIO;
1110 }
1111
1112 put_user(val, uaddr);
1113
1114 return ret;
1115 }
1116
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1117 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1118 struct kvm_device_attr *attr)
1119 {
1120 u64 gpa;
1121 u64 __user *user = (u64 __user *)attr->addr;
1122
1123 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1124 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1125 return -ENXIO;
1126
1127 gpa = vcpu->arch.st.guest_addr;
1128 if (put_user(gpa, user))
1129 return -EFAULT;
1130
1131 return 0;
1132 }
1133
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1134 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1135 struct kvm_device_attr *attr)
1136 {
1137 int ret = -ENXIO;
1138
1139 switch (attr->group) {
1140 case KVM_LOONGARCH_VCPU_CPUCFG:
1141 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1142 break;
1143 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1144 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1145 break;
1146 default:
1147 break;
1148 }
1149
1150 return ret;
1151 }
1152
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1153 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1154 struct kvm_device_attr *attr)
1155 {
1156 u64 val, valid;
1157 u64 __user *user = (u64 __user *)attr->addr;
1158 struct kvm *kvm = vcpu->kvm;
1159
1160 switch (attr->attr) {
1161 case CPUCFG_KVM_FEATURE:
1162 if (get_user(val, user))
1163 return -EFAULT;
1164
1165 valid = LOONGARCH_PV_FEAT_MASK;
1166 if (val & ~valid)
1167 return -EINVAL;
1168
1169 /* All vCPUs need set the same PV features */
1170 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1171 && ((kvm->arch.pv_features & valid) != val))
1172 return -EINVAL;
1173 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1174 return 0;
1175 default:
1176 return -ENXIO;
1177 }
1178 }
1179
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1180 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1181 struct kvm_device_attr *attr)
1182 {
1183 int idx, ret = 0;
1184 u64 gpa, __user *user = (u64 __user *)attr->addr;
1185 struct kvm *kvm = vcpu->kvm;
1186
1187 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1188 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1189 return -ENXIO;
1190
1191 if (get_user(gpa, user))
1192 return -EFAULT;
1193
1194 if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1195 return -EINVAL;
1196
1197 if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1198 vcpu->arch.st.guest_addr = gpa;
1199 return 0;
1200 }
1201
1202 /* Check the address is in a valid memslot */
1203 idx = srcu_read_lock(&kvm->srcu);
1204 if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1205 ret = -EINVAL;
1206 srcu_read_unlock(&kvm->srcu, idx);
1207
1208 if (!ret) {
1209 vcpu->arch.st.guest_addr = gpa;
1210 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1211 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1212 }
1213
1214 return ret;
1215 }
1216
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1217 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1218 struct kvm_device_attr *attr)
1219 {
1220 int ret = -ENXIO;
1221
1222 switch (attr->group) {
1223 case KVM_LOONGARCH_VCPU_CPUCFG:
1224 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1225 break;
1226 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1227 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1228 break;
1229 default:
1230 break;
1231 }
1232
1233 return ret;
1234 }
1235
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1236 long kvm_arch_vcpu_ioctl(struct file *filp,
1237 unsigned int ioctl, unsigned long arg)
1238 {
1239 long r;
1240 struct kvm_device_attr attr;
1241 void __user *argp = (void __user *)arg;
1242 struct kvm_vcpu *vcpu = filp->private_data;
1243
1244 /*
1245 * Only software CSR should be modified
1246 *
1247 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1248 * should be used. Since CSR registers owns by this vcpu, if switch
1249 * to other vcpus, other vcpus need reload CSR registers.
1250 *
1251 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1252 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1253 * aux_inuse flag and reload CSR registers form software.
1254 */
1255
1256 switch (ioctl) {
1257 case KVM_SET_ONE_REG:
1258 case KVM_GET_ONE_REG: {
1259 struct kvm_one_reg reg;
1260
1261 r = -EFAULT;
1262 if (copy_from_user(®, argp, sizeof(reg)))
1263 break;
1264 if (ioctl == KVM_SET_ONE_REG) {
1265 r = kvm_set_reg(vcpu, ®);
1266 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1267 } else
1268 r = kvm_get_reg(vcpu, ®);
1269 break;
1270 }
1271 case KVM_ENABLE_CAP: {
1272 struct kvm_enable_cap cap;
1273
1274 r = -EFAULT;
1275 if (copy_from_user(&cap, argp, sizeof(cap)))
1276 break;
1277 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1278 break;
1279 }
1280 case KVM_HAS_DEVICE_ATTR: {
1281 r = -EFAULT;
1282 if (copy_from_user(&attr, argp, sizeof(attr)))
1283 break;
1284 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1285 break;
1286 }
1287 case KVM_GET_DEVICE_ATTR: {
1288 r = -EFAULT;
1289 if (copy_from_user(&attr, argp, sizeof(attr)))
1290 break;
1291 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1292 break;
1293 }
1294 case KVM_SET_DEVICE_ATTR: {
1295 r = -EFAULT;
1296 if (copy_from_user(&attr, argp, sizeof(attr)))
1297 break;
1298 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1299 break;
1300 }
1301 default:
1302 r = -ENOIOCTLCMD;
1303 break;
1304 }
1305
1306 return r;
1307 }
1308
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1309 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1310 {
1311 int i = 0;
1312
1313 fpu->fcc = vcpu->arch.fpu.fcc;
1314 fpu->fcsr = vcpu->arch.fpu.fcsr;
1315 for (i = 0; i < NUM_FPU_REGS; i++)
1316 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1317
1318 return 0;
1319 }
1320
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1321 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1322 {
1323 int i = 0;
1324
1325 vcpu->arch.fpu.fcc = fpu->fcc;
1326 vcpu->arch.fpu.fcsr = fpu->fcsr;
1327 for (i = 0; i < NUM_FPU_REGS; i++)
1328 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1329
1330 return 0;
1331 }
1332
1333 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1334 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1335 {
1336 if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1337 set_csr_euen(CSR_EUEN_LBTEN);
1338 _restore_lbt(&vcpu->arch.lbt);
1339 vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1340 }
1341
1342 return 0;
1343 }
1344
kvm_lose_lbt(struct kvm_vcpu * vcpu)1345 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1346 {
1347 preempt_disable();
1348 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1349 _save_lbt(&vcpu->arch.lbt);
1350 clear_csr_euen(CSR_EUEN_LBTEN);
1351 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1352 }
1353 preempt_enable();
1354 }
1355
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1356 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1357 {
1358 /*
1359 * If TM is enabled, top register save/restore will
1360 * cause lbt exception, here enable lbt in advance
1361 */
1362 if (fcsr & FPU_CSR_TM)
1363 kvm_own_lbt(vcpu);
1364 }
1365
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1366 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1367 {
1368 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1369 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1370 return;
1371 kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1372 }
1373 }
1374 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1375 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1376 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1377 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1378 #endif
1379
1380 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1381 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1382 {
1383 /*
1384 * Enable FPU for guest
1385 * Set FR and FRE according to guest context
1386 */
1387 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1388 set_csr_euen(CSR_EUEN_FPEN);
1389
1390 kvm_restore_fpu(&vcpu->arch.fpu);
1391 vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1392 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1393 }
1394
1395 #ifdef CONFIG_CPU_HAS_LSX
1396 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1397 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1398 {
1399 /* Enable LSX for guest */
1400 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1401 set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1402 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1403 case KVM_LARCH_FPU:
1404 /*
1405 * Guest FPU state already loaded,
1406 * only restore upper LSX state
1407 */
1408 _restore_lsx_upper(&vcpu->arch.fpu);
1409 break;
1410 default:
1411 /* Neither FP or LSX already active,
1412 * restore full LSX state
1413 */
1414 kvm_restore_lsx(&vcpu->arch.fpu);
1415 break;
1416 }
1417
1418 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1419 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1420
1421 return 0;
1422 }
1423 #endif
1424
1425 #ifdef CONFIG_CPU_HAS_LASX
1426 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1427 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1428 {
1429 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1430 set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1431 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1432 case KVM_LARCH_LSX:
1433 case KVM_LARCH_LSX | KVM_LARCH_FPU:
1434 /* Guest LSX state already loaded, only restore upper LASX state */
1435 _restore_lasx_upper(&vcpu->arch.fpu);
1436 break;
1437 case KVM_LARCH_FPU:
1438 /* Guest FP state already loaded, only restore upper LSX & LASX state */
1439 _restore_lsx_upper(&vcpu->arch.fpu);
1440 _restore_lasx_upper(&vcpu->arch.fpu);
1441 break;
1442 default:
1443 /* Neither FP or LSX already active, restore full LASX state */
1444 kvm_restore_lasx(&vcpu->arch.fpu);
1445 break;
1446 }
1447
1448 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1449 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1450
1451 return 0;
1452 }
1453 #endif
1454
1455 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1456 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1457 {
1458 preempt_disable();
1459
1460 kvm_check_fcsr_alive(vcpu);
1461 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1462 kvm_save_lasx(&vcpu->arch.fpu);
1463 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1464 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1465
1466 /* Disable LASX & LSX & FPU */
1467 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1468 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1469 kvm_save_lsx(&vcpu->arch.fpu);
1470 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1471 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1472
1473 /* Disable LSX & FPU */
1474 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1475 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1476 kvm_save_fpu(&vcpu->arch.fpu);
1477 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1478 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1479
1480 /* Disable FPU */
1481 clear_csr_euen(CSR_EUEN_FPEN);
1482 }
1483 kvm_lose_lbt(vcpu);
1484
1485 preempt_enable();
1486 }
1487
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1488 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1489 {
1490 int intr = (int)irq->irq;
1491
1492 if (intr > 0)
1493 kvm_queue_irq(vcpu, intr);
1494 else if (intr < 0)
1495 kvm_dequeue_irq(vcpu, -intr);
1496 else {
1497 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1498 return -EINVAL;
1499 }
1500
1501 kvm_vcpu_kick(vcpu);
1502
1503 return 0;
1504 }
1505
kvm_arch_vcpu_unlocked_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1506 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
1507 unsigned long arg)
1508 {
1509 void __user *argp = (void __user *)arg;
1510 struct kvm_vcpu *vcpu = filp->private_data;
1511
1512 if (ioctl == KVM_INTERRUPT) {
1513 struct kvm_interrupt irq;
1514
1515 if (copy_from_user(&irq, argp, sizeof(irq)))
1516 return -EFAULT;
1517
1518 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1519
1520 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1521 }
1522
1523 return -ENOIOCTLCMD;
1524 }
1525
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1526 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1527 {
1528 return 0;
1529 }
1530
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1531 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1532 {
1533 unsigned long timer_hz;
1534 struct loongarch_csrs *csr;
1535
1536 vcpu->arch.vpid = 0;
1537 vcpu->arch.flush_gpa = INVALID_GPA;
1538
1539 hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1540 HRTIMER_MODE_ABS_PINNED_HARD);
1541
1542 /* Get GPA (=HVA) of PGD for kvm hypervisor */
1543 vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1544
1545 /*
1546 * Get PGD for primary mmu, virtual address is used since there is
1547 * memory access after loading from CSR_PGD in tlb exception fast path.
1548 */
1549 vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1550
1551 vcpu->arch.handle_exit = kvm_handle_exit;
1552 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1553 vcpu->arch.csr = kzalloc_obj(struct loongarch_csrs);
1554 if (!vcpu->arch.csr)
1555 return -ENOMEM;
1556
1557 /*
1558 * All kvm exceptions share one exception entry, and host <-> guest
1559 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1560 */
1561 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1562
1563 /* Init */
1564 vcpu->arch.last_sched_cpu = -1;
1565
1566 /* Init ipi_state lock */
1567 spin_lock_init(&vcpu->arch.ipi_state.lock);
1568
1569 /*
1570 * Initialize guest register state to valid architectural reset state.
1571 */
1572 timer_hz = calc_const_freq();
1573 kvm_init_timer(vcpu, timer_hz);
1574
1575 /* Set Initialize mode for guest */
1576 csr = vcpu->arch.csr;
1577 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1578
1579 /* Set cpuid */
1580 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1581 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1582
1583 /* Start with no pending virtual guest interrupts */
1584 csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1585
1586 return 0;
1587 }
1588
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1589 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1590 {
1591 }
1592
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1593 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1594 {
1595 int cpu;
1596 struct kvm_context *context;
1597
1598 hrtimer_cancel(&vcpu->arch.swtimer);
1599 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1600 kvm_drop_cpuid(vcpu);
1601 kfree(vcpu->arch.csr);
1602
1603 /*
1604 * If the vCPU is freed and reused as another vCPU, we don't want the
1605 * matching pointer wrongly hanging around in last_vcpu.
1606 */
1607 for_each_possible_cpu(cpu) {
1608 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1609 if (context->last_vcpu == vcpu)
1610 context->last_vcpu = NULL;
1611 }
1612 }
1613
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1614 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1615 {
1616 bool migrated;
1617 struct kvm_context *context;
1618 struct loongarch_csrs *csr = vcpu->arch.csr;
1619
1620 /*
1621 * Have we migrated to a different CPU?
1622 * If so, any old guest TLB state may be stale.
1623 */
1624 migrated = (vcpu->arch.last_sched_cpu != cpu);
1625
1626 /*
1627 * Was this the last vCPU to run on this CPU?
1628 * If not, any old guest state from this vCPU will have been clobbered.
1629 */
1630 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1631 if (migrated || (context->last_vcpu != vcpu))
1632 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1633 context->last_vcpu = vcpu;
1634
1635 /* Restore timer state regardless */
1636 kvm_restore_timer(vcpu);
1637 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1638
1639 /* Don't bother restoring registers multiple times unless necessary */
1640 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1641 return 0;
1642
1643 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1644
1645 /* Restore guest CSR registers */
1646 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1647 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1648 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1649 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1650 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1651 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1652 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1653 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1654 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1655 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1656 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1657 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1658 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1659 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1660 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1661 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1662 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1663 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1664 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1665 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1666 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1667 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1668 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1669 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1670 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1671 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1672 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1673 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1674 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1675 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1676 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1677 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1678 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1679 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1680 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1681 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1682 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1683 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1684 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1685 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1686 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1687 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1688 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1689 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1690
1691 if (kvm_guest_has_msgint(&vcpu->arch)) {
1692 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_IPR);
1693 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
1694 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
1695 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
1696 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
1697 }
1698
1699 /* Restore Root.GINTC from unused Guest.GINTC register */
1700 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1701
1702 /*
1703 * We should clear linked load bit to break interrupted atomics. This
1704 * prevents a SC on the next vCPU from succeeding by matching a LL on
1705 * the previous vCPU.
1706 */
1707 if (vcpu->kvm->created_vcpus > 1)
1708 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1709
1710 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1711
1712 return 0;
1713 }
1714
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1715 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1716 {
1717 unsigned long flags;
1718
1719 local_irq_save(flags);
1720 /* Restore guest state to registers */
1721 _kvm_vcpu_load(vcpu, cpu);
1722 local_irq_restore(flags);
1723 }
1724
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1725 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1726 {
1727 struct loongarch_csrs *csr = vcpu->arch.csr;
1728
1729 kvm_lose_fpu(vcpu);
1730
1731 /*
1732 * Update CSR state from hardware if software CSR state is stale,
1733 * most CSR registers are kept unchanged during process context
1734 * switch except CSR registers like remaining timer tick value and
1735 * injected interrupt state.
1736 */
1737 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1738 goto out;
1739
1740 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1741 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1742 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1743 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1744 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1745 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1746 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1747 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1748 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1749 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1750 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1751 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1752 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1753 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1754 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1755 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1756 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1757 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1758 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1759 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1760 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1761 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1762 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1763 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1764 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1765 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1766 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1767 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1768 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1769 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1770 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1771 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1772 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1773 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1774 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1775 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1776 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1777 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1778 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1779 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1780 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1781 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1782 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1783 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1784 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1785 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1786 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1787
1788 if (kvm_guest_has_msgint(&vcpu->arch)) {
1789 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_IPR);
1790 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
1791 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
1792 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
1793 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
1794 }
1795
1796 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1797
1798 out:
1799 kvm_save_timer(vcpu);
1800 /* Save Root.GINTC into unused Guest.GINTC register */
1801 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1802
1803 return 0;
1804 }
1805
kvm_vcpu_set_pv_preempted(struct kvm_vcpu * vcpu)1806 static void kvm_vcpu_set_pv_preempted(struct kvm_vcpu *vcpu)
1807 {
1808 gpa_t gpa;
1809 struct gfn_to_hva_cache *ghc;
1810 struct kvm_memslots *slots;
1811 struct kvm_steal_time __user *st;
1812
1813 gpa = vcpu->arch.st.guest_addr;
1814 if (!(gpa & KVM_STEAL_PHYS_VALID))
1815 return;
1816
1817 /* vCPU may be preempted for many times */
1818 if (vcpu->arch.st.preempted)
1819 return;
1820
1821 /* This happens on process exit */
1822 if (unlikely(current->mm != vcpu->kvm->mm))
1823 return;
1824
1825 gpa &= KVM_STEAL_PHYS_MASK;
1826 ghc = &vcpu->arch.st.cache;
1827 slots = kvm_memslots(vcpu->kvm);
1828 if (slots->generation != ghc->generation || gpa != ghc->gpa) {
1829 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
1830 ghc->gpa = INVALID_GPA;
1831 return;
1832 }
1833 }
1834
1835 st = (struct kvm_steal_time __user *)ghc->hva;
1836 unsafe_put_user(KVM_VCPU_PREEMPTED, &st->preempted, out);
1837 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
1838 out:
1839 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
1840 }
1841
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1842 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1843 {
1844 int cpu, idx;
1845 unsigned long flags;
1846
1847 if (vcpu->preempted && kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
1848 /*
1849 * Take the srcu lock as memslots will be accessed to check
1850 * the gfn cache generation against the memslots generation.
1851 */
1852 idx = srcu_read_lock(&vcpu->kvm->srcu);
1853 kvm_vcpu_set_pv_preempted(vcpu);
1854 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1855 }
1856
1857 local_irq_save(flags);
1858 cpu = smp_processor_id();
1859 vcpu->arch.last_sched_cpu = cpu;
1860
1861 /* Save guest state in registers */
1862 _kvm_vcpu_put(vcpu, cpu);
1863 local_irq_restore(flags);
1864 }
1865
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1866 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1867 {
1868 int r = -EINTR;
1869 struct kvm_run *run = vcpu->run;
1870
1871 if (vcpu->mmio_needed) {
1872 if (!vcpu->mmio_is_write)
1873 kvm_complete_mmio_read(vcpu, run);
1874 vcpu->mmio_needed = 0;
1875 }
1876
1877 switch (run->exit_reason) {
1878 case KVM_EXIT_HYPERCALL:
1879 kvm_complete_user_service(vcpu, run);
1880 break;
1881 case KVM_EXIT_LOONGARCH_IOCSR:
1882 if (!run->iocsr_io.is_write)
1883 kvm_complete_iocsr_read(vcpu, run);
1884 break;
1885 }
1886
1887 if (!vcpu->wants_to_run)
1888 return r;
1889
1890 /* Clear exit_reason */
1891 run->exit_reason = KVM_EXIT_UNKNOWN;
1892 lose_fpu(1);
1893 vcpu_load(vcpu);
1894 kvm_sigset_activate(vcpu);
1895 r = kvm_pre_enter_guest(vcpu);
1896 if (r != RESUME_GUEST)
1897 goto out;
1898
1899 guest_timing_enter_irqoff();
1900 guest_state_enter_irqoff();
1901 trace_kvm_enter(vcpu);
1902 r = kvm_loongarch_ops->enter_guest(run, vcpu);
1903
1904 trace_kvm_out(vcpu);
1905 /*
1906 * Guest exit is already recorded at kvm_handle_exit()
1907 * return value must not be RESUME_GUEST
1908 */
1909 local_irq_enable();
1910 out:
1911 kvm_sigset_deactivate(vcpu);
1912 vcpu_put(vcpu);
1913
1914 return r;
1915 }
1916