1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/lbt.h>
10 #include <asm/loongarch.h>
11 #include <asm/setup.h>
12 #include <asm/time.h>
13
14 #define CREATE_TRACE_POINTS
15 #include "trace.h"
16
17 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
18 KVM_GENERIC_VCPU_STATS(),
19 STATS_DESC_COUNTER(VCPU, int_exits),
20 STATS_DESC_COUNTER(VCPU, idle_exits),
21 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 STATS_DESC_COUNTER(VCPU, signal_exits),
23 STATS_DESC_COUNTER(VCPU, hypercall_exits)
24 };
25
26 const struct kvm_stats_header kvm_vcpu_stats_header = {
27 .name_size = KVM_STATS_NAME_SIZE,
28 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
29 .id_offset = sizeof(struct kvm_stats_header),
30 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
31 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
32 sizeof(kvm_vcpu_stats_desc),
33 };
34
kvm_save_host_pmu(struct kvm_vcpu * vcpu)35 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
36 {
37 struct kvm_context *context;
38
39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
40 context->perf_cntr[0] = read_csr_perfcntr0();
41 context->perf_cntr[1] = read_csr_perfcntr1();
42 context->perf_cntr[2] = read_csr_perfcntr2();
43 context->perf_cntr[3] = read_csr_perfcntr3();
44 context->perf_ctrl[0] = write_csr_perfctrl0(0);
45 context->perf_ctrl[1] = write_csr_perfctrl1(0);
46 context->perf_ctrl[2] = write_csr_perfctrl2(0);
47 context->perf_ctrl[3] = write_csr_perfctrl3(0);
48 }
49
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)50 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
51 {
52 struct kvm_context *context;
53
54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
55 write_csr_perfcntr0(context->perf_cntr[0]);
56 write_csr_perfcntr1(context->perf_cntr[1]);
57 write_csr_perfcntr2(context->perf_cntr[2]);
58 write_csr_perfcntr3(context->perf_cntr[3]);
59 write_csr_perfctrl0(context->perf_ctrl[0]);
60 write_csr_perfctrl1(context->perf_ctrl[1]);
61 write_csr_perfctrl2(context->perf_ctrl[2]);
62 write_csr_perfctrl3(context->perf_ctrl[3]);
63 }
64
65
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)66 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
67 {
68 struct loongarch_csrs *csr = vcpu->arch.csr;
69
70 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
71 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
72 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
73 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
74 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
75 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
76 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
77 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
78 }
79
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)80 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
81 {
82 struct loongarch_csrs *csr = vcpu->arch.csr;
83
84 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
85 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
86 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
87 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
88 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
89 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
90 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
91 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
92 }
93
kvm_own_pmu(struct kvm_vcpu * vcpu)94 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
95 {
96 unsigned long val;
97
98 if (!kvm_guest_has_pmu(&vcpu->arch))
99 return -EINVAL;
100
101 kvm_save_host_pmu(vcpu);
102
103 /* Set PM0-PM(num) to guest */
104 val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
105 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
106 write_csr_gcfg(val);
107
108 kvm_restore_guest_pmu(vcpu);
109
110 return 0;
111 }
112
kvm_lose_pmu(struct kvm_vcpu * vcpu)113 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
114 {
115 unsigned long val;
116 struct loongarch_csrs *csr = vcpu->arch.csr;
117
118 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
119 return;
120
121 kvm_save_guest_pmu(vcpu);
122
123 /* Disable pmu access from guest */
124 write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
125
126 /*
127 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
128 * exiting the guest, so that the next time trap into the guest.
129 * We don't need to deal with PMU CSRs contexts.
130 */
131 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
132 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
133 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
134 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
135 if (!(val & KVM_PMU_EVENT_ENABLED))
136 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
137
138 kvm_restore_host_pmu(vcpu);
139 }
140
kvm_restore_pmu(struct kvm_vcpu * vcpu)141 static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
142 {
143 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
144 kvm_make_request(KVM_REQ_PMU, vcpu);
145 }
146
kvm_check_pmu(struct kvm_vcpu * vcpu)147 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
148 {
149 if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
150 kvm_own_pmu(vcpu);
151 vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
152 }
153 }
154
kvm_update_stolen_time(struct kvm_vcpu * vcpu)155 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
156 {
157 u32 version;
158 u64 steal;
159 gpa_t gpa;
160 struct kvm_memslots *slots;
161 struct kvm_steal_time __user *st;
162 struct gfn_to_hva_cache *ghc;
163
164 ghc = &vcpu->arch.st.cache;
165 gpa = vcpu->arch.st.guest_addr;
166 if (!(gpa & KVM_STEAL_PHYS_VALID))
167 return;
168
169 gpa &= KVM_STEAL_PHYS_MASK;
170 slots = kvm_memslots(vcpu->kvm);
171 if (slots->generation != ghc->generation || gpa != ghc->gpa) {
172 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
173 ghc->gpa = INVALID_GPA;
174 return;
175 }
176 }
177
178 st = (struct kvm_steal_time __user *)ghc->hva;
179 unsafe_get_user(version, &st->version, out);
180 if (version & 1)
181 version += 1; /* first time write, random junk */
182
183 version += 1;
184 unsafe_put_user(version, &st->version, out);
185 smp_wmb();
186
187 unsafe_get_user(steal, &st->steal, out);
188 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
189 vcpu->arch.st.last_steal = current->sched_info.run_delay;
190 unsafe_put_user(steal, &st->steal, out);
191
192 smp_wmb();
193 version += 1;
194 unsafe_put_user(version, &st->version, out);
195 out:
196 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
197 }
198
199 /*
200 * kvm_check_requests - check and handle pending vCPU requests
201 *
202 * Return: RESUME_GUEST if we should enter the guest
203 * RESUME_HOST if we should exit to userspace
204 */
kvm_check_requests(struct kvm_vcpu * vcpu)205 static int kvm_check_requests(struct kvm_vcpu *vcpu)
206 {
207 if (!kvm_request_pending(vcpu))
208 return RESUME_GUEST;
209
210 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
211 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
212
213 if (kvm_dirty_ring_check_request(vcpu))
214 return RESUME_HOST;
215
216 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
217 kvm_update_stolen_time(vcpu);
218
219 return RESUME_GUEST;
220 }
221
kvm_late_check_requests(struct kvm_vcpu * vcpu)222 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
223 {
224 lockdep_assert_irqs_disabled();
225 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
226 if (vcpu->arch.flush_gpa != INVALID_GPA) {
227 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
228 vcpu->arch.flush_gpa = INVALID_GPA;
229 }
230 }
231
232 /*
233 * Check and handle pending signal and vCPU requests etc
234 * Run with irq enabled and preempt enabled
235 *
236 * Return: RESUME_GUEST if we should enter the guest
237 * RESUME_HOST if we should exit to userspace
238 * < 0 if we should exit to userspace, where the return value
239 * indicates an error
240 */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)241 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
242 {
243 int idx, ret;
244
245 /*
246 * Check conditions before entering the guest
247 */
248 ret = xfer_to_guest_mode_handle_work(vcpu);
249 if (ret < 0)
250 return ret;
251
252 idx = srcu_read_lock(&vcpu->kvm->srcu);
253 ret = kvm_check_requests(vcpu);
254 srcu_read_unlock(&vcpu->kvm->srcu, idx);
255
256 return ret;
257 }
258
259 /*
260 * Called with irq enabled
261 *
262 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
263 * Others if we should exit to userspace
264 */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)265 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
266 {
267 int ret;
268
269 do {
270 ret = kvm_enter_guest_check(vcpu);
271 if (ret != RESUME_GUEST)
272 break;
273
274 /*
275 * Handle vcpu timer, interrupts, check requests and
276 * check vmid before vcpu enter guest
277 */
278 local_irq_disable();
279 kvm_deliver_intr(vcpu);
280 kvm_deliver_exception(vcpu);
281 /* Make sure the vcpu mode has been written */
282 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
283 kvm_check_vpid(vcpu);
284 kvm_check_pmu(vcpu);
285
286 /*
287 * Called after function kvm_check_vpid()
288 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
289 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
290 */
291 kvm_late_check_requests(vcpu);
292 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
293 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
294 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
295
296 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
297 /* make sure the vcpu mode has been written */
298 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
299 local_irq_enable();
300 ret = -EAGAIN;
301 }
302 } while (ret != RESUME_GUEST);
303
304 return ret;
305 }
306
307 /*
308 * Return 1 for resume guest and "<= 0" for resume host.
309 */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)310 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
311 {
312 int ret = RESUME_GUEST;
313 unsigned long estat = vcpu->arch.host_estat;
314 u32 intr = estat & 0x1fff; /* Ignore NMI */
315 u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
316
317 vcpu->mode = OUTSIDE_GUEST_MODE;
318
319 /* Set a default exit reason */
320 run->exit_reason = KVM_EXIT_UNKNOWN;
321
322 kvm_lose_pmu(vcpu);
323
324 guest_timing_exit_irqoff();
325 guest_state_exit_irqoff();
326 local_irq_enable();
327
328 trace_kvm_exit(vcpu, ecode);
329 if (ecode) {
330 ret = kvm_handle_fault(vcpu, ecode);
331 } else {
332 WARN(!intr, "vm exiting with suspicious irq\n");
333 ++vcpu->stat.int_exits;
334 }
335
336 if (ret == RESUME_GUEST)
337 ret = kvm_pre_enter_guest(vcpu);
338
339 if (ret != RESUME_GUEST) {
340 local_irq_disable();
341 return ret;
342 }
343
344 guest_timing_enter_irqoff();
345 guest_state_enter_irqoff();
346 trace_kvm_reenter(vcpu);
347
348 return RESUME_GUEST;
349 }
350
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)351 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
352 {
353 return !!(vcpu->arch.irq_pending) &&
354 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
355 }
356
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)357 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
358 {
359 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
360 }
361
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)362 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
363 {
364 return false;
365 }
366
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)367 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
368 {
369 return VM_FAULT_SIGBUS;
370 }
371
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)372 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
373 struct kvm_translation *tr)
374 {
375 return -EINVAL;
376 }
377
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)378 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
379 {
380 int ret;
381
382 /* Protect from TOD sync and vcpu_load/put() */
383 preempt_disable();
384 ret = kvm_pending_timer(vcpu) ||
385 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
386 preempt_enable();
387
388 return ret;
389 }
390
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)391 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
392 {
393 int i;
394
395 kvm_debug("vCPU Register Dump:\n");
396 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
397 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
398
399 for (i = 0; i < 32; i += 4) {
400 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
401 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
402 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
403 }
404
405 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
406 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
407 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
408
409 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
410
411 return 0;
412 }
413
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)414 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
415 struct kvm_mp_state *mp_state)
416 {
417 *mp_state = vcpu->arch.mp_state;
418
419 return 0;
420 }
421
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)422 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
423 struct kvm_mp_state *mp_state)
424 {
425 int ret = 0;
426
427 switch (mp_state->mp_state) {
428 case KVM_MP_STATE_RUNNABLE:
429 vcpu->arch.mp_state = *mp_state;
430 break;
431 default:
432 ret = -EINVAL;
433 }
434
435 return ret;
436 }
437
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)438 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
439 struct kvm_guest_debug *dbg)
440 {
441 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
442 return -EINVAL;
443
444 if (dbg->control & KVM_GUESTDBG_ENABLE)
445 vcpu->guest_debug = dbg->control;
446 else
447 vcpu->guest_debug = 0;
448
449 return 0;
450 }
451
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)452 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
453 {
454 int cpuid;
455 struct kvm_phyid_map *map;
456 struct loongarch_csrs *csr = vcpu->arch.csr;
457
458 if (val >= KVM_MAX_PHYID)
459 return -EINVAL;
460
461 map = vcpu->kvm->arch.phyid_map;
462 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
463
464 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
465 if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
466 /* Discard duplicated CPUID set operation */
467 if (cpuid == val) {
468 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
469 return 0;
470 }
471
472 /*
473 * CPUID is already set before
474 * Forbid changing to a different CPUID at runtime
475 */
476 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
477 return -EINVAL;
478 }
479
480 if (map->phys_map[val].enabled) {
481 /* Discard duplicated CPUID set operation */
482 if (vcpu == map->phys_map[val].vcpu) {
483 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
484 return 0;
485 }
486
487 /*
488 * New CPUID is already set with other vcpu
489 * Forbid sharing the same CPUID between different vcpus
490 */
491 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
492 return -EINVAL;
493 }
494
495 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
496 map->phys_map[val].enabled = true;
497 map->phys_map[val].vcpu = vcpu;
498 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
499
500 return 0;
501 }
502
kvm_drop_cpuid(struct kvm_vcpu * vcpu)503 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
504 {
505 int cpuid;
506 struct kvm_phyid_map *map;
507 struct loongarch_csrs *csr = vcpu->arch.csr;
508
509 map = vcpu->kvm->arch.phyid_map;
510 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
511
512 if (cpuid >= KVM_MAX_PHYID)
513 return;
514
515 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
516 if (map->phys_map[cpuid].enabled) {
517 map->phys_map[cpuid].vcpu = NULL;
518 map->phys_map[cpuid].enabled = false;
519 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
520 }
521 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
522 }
523
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)524 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
525 {
526 struct kvm_phyid_map *map;
527
528 if (cpuid >= KVM_MAX_PHYID)
529 return NULL;
530
531 map = kvm->arch.phyid_map;
532 if (!map->phys_map[cpuid].enabled)
533 return NULL;
534
535 return map->phys_map[cpuid].vcpu;
536 }
537
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)538 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
539 {
540 unsigned long gintc;
541 struct loongarch_csrs *csr = vcpu->arch.csr;
542
543 if (get_gcsr_flag(id) & INVALID_GCSR)
544 return -EINVAL;
545
546 if (id == LOONGARCH_CSR_ESTAT) {
547 preempt_disable();
548 vcpu_load(vcpu);
549 /*
550 * Sync pending interrupts into ESTAT so that interrupt
551 * remains during VM migration stage
552 */
553 kvm_deliver_intr(vcpu);
554 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
555 vcpu_put(vcpu);
556 preempt_enable();
557
558 /* ESTAT IP0~IP7 get from GINTC */
559 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
560 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
561 return 0;
562 }
563
564 /*
565 * Get software CSR state since software state is consistent
566 * with hardware for synchronous ioctl
567 */
568 *val = kvm_read_sw_gcsr(csr, id);
569
570 return 0;
571 }
572
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)573 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
574 {
575 int ret = 0, gintc;
576 struct loongarch_csrs *csr = vcpu->arch.csr;
577
578 if (get_gcsr_flag(id) & INVALID_GCSR)
579 return -EINVAL;
580
581 if (id == LOONGARCH_CSR_CPUID)
582 return kvm_set_cpuid(vcpu, val);
583
584 if (id == LOONGARCH_CSR_ESTAT) {
585 /* ESTAT IP0~IP7 inject through GINTC */
586 gintc = (val >> 2) & 0xff;
587 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
588
589 gintc = val & ~(0xffUL << 2);
590 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
591
592 return ret;
593 }
594
595 kvm_write_sw_gcsr(csr, id, val);
596
597 /*
598 * After modifying the PMU CSR register value of the vcpu.
599 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
600 */
601 if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
602 unsigned long val;
603
604 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
605 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
606 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
607 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
608
609 if (val & KVM_PMU_EVENT_ENABLED)
610 kvm_make_request(KVM_REQ_PMU, vcpu);
611 }
612
613 return ret;
614 }
615
_kvm_get_cpucfg_mask(int id,u64 * v)616 static int _kvm_get_cpucfg_mask(int id, u64 *v)
617 {
618 if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
619 return -EINVAL;
620
621 switch (id) {
622 case LOONGARCH_CPUCFG0:
623 *v = GENMASK(31, 0);
624 return 0;
625 case LOONGARCH_CPUCFG1:
626 /* CPUCFG1_MSGINT is not supported by KVM */
627 *v = GENMASK(25, 0);
628 return 0;
629 case LOONGARCH_CPUCFG2:
630 /* CPUCFG2 features unconditionally supported by KVM */
631 *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
632 CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
633 CPUCFG2_LSPW | CPUCFG2_LAM;
634 /*
635 * For the ISA extensions listed below, if one is supported
636 * by the host, then it is also supported by KVM.
637 */
638 if (cpu_has_lsx)
639 *v |= CPUCFG2_LSX;
640 if (cpu_has_lasx)
641 *v |= CPUCFG2_LASX;
642 if (cpu_has_lbt_x86)
643 *v |= CPUCFG2_X86BT;
644 if (cpu_has_lbt_arm)
645 *v |= CPUCFG2_ARMBT;
646 if (cpu_has_lbt_mips)
647 *v |= CPUCFG2_MIPSBT;
648
649 return 0;
650 case LOONGARCH_CPUCFG3:
651 *v = GENMASK(16, 0);
652 return 0;
653 case LOONGARCH_CPUCFG4:
654 case LOONGARCH_CPUCFG5:
655 *v = GENMASK(31, 0);
656 return 0;
657 case LOONGARCH_CPUCFG6:
658 if (cpu_has_pmp)
659 *v = GENMASK(14, 0);
660 else
661 *v = 0;
662 return 0;
663 case LOONGARCH_CPUCFG16:
664 *v = GENMASK(16, 0);
665 return 0;
666 case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
667 *v = GENMASK(30, 0);
668 return 0;
669 default:
670 /*
671 * CPUCFG bits should be zero if reserved by HW or not
672 * supported by KVM.
673 */
674 *v = 0;
675 return 0;
676 }
677 }
678
kvm_check_cpucfg(int id,u64 val)679 static int kvm_check_cpucfg(int id, u64 val)
680 {
681 int ret;
682 u64 mask = 0;
683
684 ret = _kvm_get_cpucfg_mask(id, &mask);
685 if (ret)
686 return ret;
687
688 if (val & ~mask)
689 /* Unsupported features and/or the higher 32 bits should not be set */
690 return -EINVAL;
691
692 switch (id) {
693 case LOONGARCH_CPUCFG2:
694 if (!(val & CPUCFG2_LLFTP))
695 /* Guests must have a constant timer */
696 return -EINVAL;
697 if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
698 /* Single and double float point must both be set when FP is enabled */
699 return -EINVAL;
700 if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
701 /* LSX architecturally implies FP but val does not satisfy that */
702 return -EINVAL;
703 if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
704 /* LASX architecturally implies LSX and FP but val does not satisfy that */
705 return -EINVAL;
706 return 0;
707 case LOONGARCH_CPUCFG6:
708 if (val & CPUCFG6_PMP) {
709 u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
710 if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
711 return -EINVAL;
712 if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
713 return -EINVAL;
714 if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
715 return -EINVAL;
716 }
717 return 0;
718 default:
719 /*
720 * Values for the other CPUCFG IDs are not being further validated
721 * besides the mask check above.
722 */
723 return 0;
724 }
725 }
726
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)727 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
728 const struct kvm_one_reg *reg, u64 *v)
729 {
730 int id, ret = 0;
731 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
732
733 switch (type) {
734 case KVM_REG_LOONGARCH_CSR:
735 id = KVM_GET_IOC_CSR_IDX(reg->id);
736 ret = _kvm_getcsr(vcpu, id, v);
737 break;
738 case KVM_REG_LOONGARCH_CPUCFG:
739 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
740 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
741 *v = vcpu->arch.cpucfg[id];
742 else
743 ret = -EINVAL;
744 break;
745 case KVM_REG_LOONGARCH_LBT:
746 if (!kvm_guest_has_lbt(&vcpu->arch))
747 return -ENXIO;
748
749 switch (reg->id) {
750 case KVM_REG_LOONGARCH_LBT_SCR0:
751 *v = vcpu->arch.lbt.scr0;
752 break;
753 case KVM_REG_LOONGARCH_LBT_SCR1:
754 *v = vcpu->arch.lbt.scr1;
755 break;
756 case KVM_REG_LOONGARCH_LBT_SCR2:
757 *v = vcpu->arch.lbt.scr2;
758 break;
759 case KVM_REG_LOONGARCH_LBT_SCR3:
760 *v = vcpu->arch.lbt.scr3;
761 break;
762 case KVM_REG_LOONGARCH_LBT_EFLAGS:
763 *v = vcpu->arch.lbt.eflags;
764 break;
765 case KVM_REG_LOONGARCH_LBT_FTOP:
766 *v = vcpu->arch.fpu.ftop;
767 break;
768 default:
769 ret = -EINVAL;
770 break;
771 }
772 break;
773 case KVM_REG_LOONGARCH_KVM:
774 switch (reg->id) {
775 case KVM_REG_LOONGARCH_COUNTER:
776 *v = drdtime() + vcpu->kvm->arch.time_offset;
777 break;
778 case KVM_REG_LOONGARCH_DEBUG_INST:
779 *v = INSN_HVCL | KVM_HCALL_SWDBG;
780 break;
781 default:
782 ret = -EINVAL;
783 break;
784 }
785 break;
786 default:
787 ret = -EINVAL;
788 break;
789 }
790
791 return ret;
792 }
793
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)794 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
795 {
796 int ret = 0;
797 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
798
799 switch (size) {
800 case KVM_REG_SIZE_U64:
801 ret = kvm_get_one_reg(vcpu, reg, &v);
802 if (ret)
803 return ret;
804 ret = put_user(v, (u64 __user *)(long)reg->addr);
805 break;
806 default:
807 ret = -EINVAL;
808 break;
809 }
810
811 return ret;
812 }
813
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)814 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
815 const struct kvm_one_reg *reg, u64 v)
816 {
817 int id, ret = 0;
818 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
819
820 switch (type) {
821 case KVM_REG_LOONGARCH_CSR:
822 id = KVM_GET_IOC_CSR_IDX(reg->id);
823 ret = _kvm_setcsr(vcpu, id, v);
824 break;
825 case KVM_REG_LOONGARCH_CPUCFG:
826 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
827 ret = kvm_check_cpucfg(id, v);
828 if (ret)
829 break;
830 vcpu->arch.cpucfg[id] = (u32)v;
831 if (id == LOONGARCH_CPUCFG6)
832 vcpu->arch.max_pmu_csrid =
833 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
834 break;
835 case KVM_REG_LOONGARCH_LBT:
836 if (!kvm_guest_has_lbt(&vcpu->arch))
837 return -ENXIO;
838
839 switch (reg->id) {
840 case KVM_REG_LOONGARCH_LBT_SCR0:
841 vcpu->arch.lbt.scr0 = v;
842 break;
843 case KVM_REG_LOONGARCH_LBT_SCR1:
844 vcpu->arch.lbt.scr1 = v;
845 break;
846 case KVM_REG_LOONGARCH_LBT_SCR2:
847 vcpu->arch.lbt.scr2 = v;
848 break;
849 case KVM_REG_LOONGARCH_LBT_SCR3:
850 vcpu->arch.lbt.scr3 = v;
851 break;
852 case KVM_REG_LOONGARCH_LBT_EFLAGS:
853 vcpu->arch.lbt.eflags = v;
854 break;
855 case KVM_REG_LOONGARCH_LBT_FTOP:
856 vcpu->arch.fpu.ftop = v;
857 break;
858 default:
859 ret = -EINVAL;
860 break;
861 }
862 break;
863 case KVM_REG_LOONGARCH_KVM:
864 switch (reg->id) {
865 case KVM_REG_LOONGARCH_COUNTER:
866 /*
867 * gftoffset is relative with board, not vcpu
868 * only set for the first time for smp system
869 */
870 if (vcpu->vcpu_id == 0)
871 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
872 break;
873 case KVM_REG_LOONGARCH_VCPU_RESET:
874 vcpu->arch.st.guest_addr = 0;
875 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
876 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
877 break;
878 default:
879 ret = -EINVAL;
880 break;
881 }
882 break;
883 default:
884 ret = -EINVAL;
885 break;
886 }
887
888 return ret;
889 }
890
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)891 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
892 {
893 int ret = 0;
894 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
895
896 switch (size) {
897 case KVM_REG_SIZE_U64:
898 ret = get_user(v, (u64 __user *)(long)reg->addr);
899 if (ret)
900 return ret;
901 break;
902 default:
903 return -EINVAL;
904 }
905
906 return kvm_set_one_reg(vcpu, reg, v);
907 }
908
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)909 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
910 {
911 return -ENOIOCTLCMD;
912 }
913
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)914 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
915 {
916 return -ENOIOCTLCMD;
917 }
918
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)919 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
920 {
921 int i;
922
923 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
924 regs->gpr[i] = vcpu->arch.gprs[i];
925
926 regs->pc = vcpu->arch.pc;
927
928 return 0;
929 }
930
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)931 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
932 {
933 int i;
934
935 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
936 vcpu->arch.gprs[i] = regs->gpr[i];
937
938 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
939 vcpu->arch.pc = regs->pc;
940
941 return 0;
942 }
943
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)944 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
945 struct kvm_enable_cap *cap)
946 {
947 /* FPU is enabled by default, will support LSX/LASX later. */
948 return -EINVAL;
949 }
950
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)951 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
952 struct kvm_device_attr *attr)
953 {
954 switch (attr->attr) {
955 case LOONGARCH_CPUCFG2:
956 case LOONGARCH_CPUCFG6:
957 return 0;
958 case CPUCFG_KVM_FEATURE:
959 return 0;
960 default:
961 return -ENXIO;
962 }
963
964 return -ENXIO;
965 }
966
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)967 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
968 struct kvm_device_attr *attr)
969 {
970 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
971 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
972 return -ENXIO;
973
974 return 0;
975 }
976
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)977 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
978 struct kvm_device_attr *attr)
979 {
980 int ret = -ENXIO;
981
982 switch (attr->group) {
983 case KVM_LOONGARCH_VCPU_CPUCFG:
984 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
985 break;
986 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
987 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
988 break;
989 default:
990 break;
991 }
992
993 return ret;
994 }
995
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)996 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
997 struct kvm_device_attr *attr)
998 {
999 int ret = 0;
1000 uint64_t val;
1001 uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1002
1003 switch (attr->attr) {
1004 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1005 ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1006 if (ret)
1007 return ret;
1008 break;
1009 case CPUCFG_KVM_FEATURE:
1010 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1011 break;
1012 default:
1013 return -ENXIO;
1014 }
1015
1016 put_user(val, uaddr);
1017
1018 return ret;
1019 }
1020
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1021 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1022 struct kvm_device_attr *attr)
1023 {
1024 u64 gpa;
1025 u64 __user *user = (u64 __user *)attr->addr;
1026
1027 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1028 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1029 return -ENXIO;
1030
1031 gpa = vcpu->arch.st.guest_addr;
1032 if (put_user(gpa, user))
1033 return -EFAULT;
1034
1035 return 0;
1036 }
1037
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1038 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1039 struct kvm_device_attr *attr)
1040 {
1041 int ret = -ENXIO;
1042
1043 switch (attr->group) {
1044 case KVM_LOONGARCH_VCPU_CPUCFG:
1045 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1046 break;
1047 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1048 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1049 break;
1050 default:
1051 break;
1052 }
1053
1054 return ret;
1055 }
1056
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1057 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1058 struct kvm_device_attr *attr)
1059 {
1060 u64 val, valid;
1061 u64 __user *user = (u64 __user *)attr->addr;
1062 struct kvm *kvm = vcpu->kvm;
1063
1064 switch (attr->attr) {
1065 case CPUCFG_KVM_FEATURE:
1066 if (get_user(val, user))
1067 return -EFAULT;
1068
1069 valid = LOONGARCH_PV_FEAT_MASK;
1070 if (val & ~valid)
1071 return -EINVAL;
1072
1073 /* All vCPUs need set the same PV features */
1074 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1075 && ((kvm->arch.pv_features & valid) != val))
1076 return -EINVAL;
1077 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1078 return 0;
1079 default:
1080 return -ENXIO;
1081 }
1082 }
1083
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1084 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1085 struct kvm_device_attr *attr)
1086 {
1087 int idx, ret = 0;
1088 u64 gpa, __user *user = (u64 __user *)attr->addr;
1089 struct kvm *kvm = vcpu->kvm;
1090
1091 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1092 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1093 return -ENXIO;
1094
1095 if (get_user(gpa, user))
1096 return -EFAULT;
1097
1098 if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1099 return -EINVAL;
1100
1101 if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1102 vcpu->arch.st.guest_addr = gpa;
1103 return 0;
1104 }
1105
1106 /* Check the address is in a valid memslot */
1107 idx = srcu_read_lock(&kvm->srcu);
1108 if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1109 ret = -EINVAL;
1110 srcu_read_unlock(&kvm->srcu, idx);
1111
1112 if (!ret) {
1113 vcpu->arch.st.guest_addr = gpa;
1114 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1115 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1116 }
1117
1118 return ret;
1119 }
1120
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1121 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1122 struct kvm_device_attr *attr)
1123 {
1124 int ret = -ENXIO;
1125
1126 switch (attr->group) {
1127 case KVM_LOONGARCH_VCPU_CPUCFG:
1128 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1129 break;
1130 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1131 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1132 break;
1133 default:
1134 break;
1135 }
1136
1137 return ret;
1138 }
1139
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1140 long kvm_arch_vcpu_ioctl(struct file *filp,
1141 unsigned int ioctl, unsigned long arg)
1142 {
1143 long r;
1144 struct kvm_device_attr attr;
1145 void __user *argp = (void __user *)arg;
1146 struct kvm_vcpu *vcpu = filp->private_data;
1147
1148 /*
1149 * Only software CSR should be modified
1150 *
1151 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1152 * should be used. Since CSR registers owns by this vcpu, if switch
1153 * to other vcpus, other vcpus need reload CSR registers.
1154 *
1155 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1156 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1157 * aux_inuse flag and reload CSR registers form software.
1158 */
1159
1160 switch (ioctl) {
1161 case KVM_SET_ONE_REG:
1162 case KVM_GET_ONE_REG: {
1163 struct kvm_one_reg reg;
1164
1165 r = -EFAULT;
1166 if (copy_from_user(®, argp, sizeof(reg)))
1167 break;
1168 if (ioctl == KVM_SET_ONE_REG) {
1169 r = kvm_set_reg(vcpu, ®);
1170 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1171 } else
1172 r = kvm_get_reg(vcpu, ®);
1173 break;
1174 }
1175 case KVM_ENABLE_CAP: {
1176 struct kvm_enable_cap cap;
1177
1178 r = -EFAULT;
1179 if (copy_from_user(&cap, argp, sizeof(cap)))
1180 break;
1181 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1182 break;
1183 }
1184 case KVM_HAS_DEVICE_ATTR: {
1185 r = -EFAULT;
1186 if (copy_from_user(&attr, argp, sizeof(attr)))
1187 break;
1188 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1189 break;
1190 }
1191 case KVM_GET_DEVICE_ATTR: {
1192 r = -EFAULT;
1193 if (copy_from_user(&attr, argp, sizeof(attr)))
1194 break;
1195 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1196 break;
1197 }
1198 case KVM_SET_DEVICE_ATTR: {
1199 r = -EFAULT;
1200 if (copy_from_user(&attr, argp, sizeof(attr)))
1201 break;
1202 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1203 break;
1204 }
1205 default:
1206 r = -ENOIOCTLCMD;
1207 break;
1208 }
1209
1210 return r;
1211 }
1212
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1213 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1214 {
1215 int i = 0;
1216
1217 fpu->fcc = vcpu->arch.fpu.fcc;
1218 fpu->fcsr = vcpu->arch.fpu.fcsr;
1219 for (i = 0; i < NUM_FPU_REGS; i++)
1220 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1221
1222 return 0;
1223 }
1224
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1225 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1226 {
1227 int i = 0;
1228
1229 vcpu->arch.fpu.fcc = fpu->fcc;
1230 vcpu->arch.fpu.fcsr = fpu->fcsr;
1231 for (i = 0; i < NUM_FPU_REGS; i++)
1232 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1233
1234 return 0;
1235 }
1236
1237 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1238 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1239 {
1240 if (!kvm_guest_has_lbt(&vcpu->arch))
1241 return -EINVAL;
1242
1243 preempt_disable();
1244 set_csr_euen(CSR_EUEN_LBTEN);
1245 _restore_lbt(&vcpu->arch.lbt);
1246 vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1247 preempt_enable();
1248
1249 return 0;
1250 }
1251
kvm_lose_lbt(struct kvm_vcpu * vcpu)1252 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1253 {
1254 preempt_disable();
1255 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1256 _save_lbt(&vcpu->arch.lbt);
1257 clear_csr_euen(CSR_EUEN_LBTEN);
1258 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1259 }
1260 preempt_enable();
1261 }
1262
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1263 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1264 {
1265 /*
1266 * If TM is enabled, top register save/restore will
1267 * cause lbt exception, here enable lbt in advance
1268 */
1269 if (fcsr & FPU_CSR_TM)
1270 kvm_own_lbt(vcpu);
1271 }
1272
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1273 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1274 {
1275 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1276 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1277 return;
1278 kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1279 }
1280 }
1281 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1282 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1283 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1284 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1285 #endif
1286
1287 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1288 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1289 {
1290 preempt_disable();
1291
1292 /*
1293 * Enable FPU for guest
1294 * Set FR and FRE according to guest context
1295 */
1296 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1297 set_csr_euen(CSR_EUEN_FPEN);
1298
1299 kvm_restore_fpu(&vcpu->arch.fpu);
1300 vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1301 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1302
1303 preempt_enable();
1304 }
1305
1306 #ifdef CONFIG_CPU_HAS_LSX
1307 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1308 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1309 {
1310 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1311 return -EINVAL;
1312
1313 preempt_disable();
1314
1315 /* Enable LSX for guest */
1316 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1317 set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1318 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1319 case KVM_LARCH_FPU:
1320 /*
1321 * Guest FPU state already loaded,
1322 * only restore upper LSX state
1323 */
1324 _restore_lsx_upper(&vcpu->arch.fpu);
1325 break;
1326 default:
1327 /* Neither FP or LSX already active,
1328 * restore full LSX state
1329 */
1330 kvm_restore_lsx(&vcpu->arch.fpu);
1331 break;
1332 }
1333
1334 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1335 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1336 preempt_enable();
1337
1338 return 0;
1339 }
1340 #endif
1341
1342 #ifdef CONFIG_CPU_HAS_LASX
1343 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1344 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1345 {
1346 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1347 return -EINVAL;
1348
1349 preempt_disable();
1350
1351 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1352 set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1353 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1354 case KVM_LARCH_LSX:
1355 case KVM_LARCH_LSX | KVM_LARCH_FPU:
1356 /* Guest LSX state already loaded, only restore upper LASX state */
1357 _restore_lasx_upper(&vcpu->arch.fpu);
1358 break;
1359 case KVM_LARCH_FPU:
1360 /* Guest FP state already loaded, only restore upper LSX & LASX state */
1361 _restore_lsx_upper(&vcpu->arch.fpu);
1362 _restore_lasx_upper(&vcpu->arch.fpu);
1363 break;
1364 default:
1365 /* Neither FP or LSX already active, restore full LASX state */
1366 kvm_restore_lasx(&vcpu->arch.fpu);
1367 break;
1368 }
1369
1370 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1371 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1372 preempt_enable();
1373
1374 return 0;
1375 }
1376 #endif
1377
1378 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1379 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1380 {
1381 preempt_disable();
1382
1383 kvm_check_fcsr_alive(vcpu);
1384 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1385 kvm_save_lasx(&vcpu->arch.fpu);
1386 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1387 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1388
1389 /* Disable LASX & LSX & FPU */
1390 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1391 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1392 kvm_save_lsx(&vcpu->arch.fpu);
1393 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1394 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1395
1396 /* Disable LSX & FPU */
1397 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1398 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1399 kvm_save_fpu(&vcpu->arch.fpu);
1400 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1401 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1402
1403 /* Disable FPU */
1404 clear_csr_euen(CSR_EUEN_FPEN);
1405 }
1406 kvm_lose_lbt(vcpu);
1407
1408 preempt_enable();
1409 }
1410
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1411 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1412 {
1413 int intr = (int)irq->irq;
1414
1415 if (intr > 0)
1416 kvm_queue_irq(vcpu, intr);
1417 else if (intr < 0)
1418 kvm_dequeue_irq(vcpu, -intr);
1419 else {
1420 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1421 return -EINVAL;
1422 }
1423
1424 kvm_vcpu_kick(vcpu);
1425
1426 return 0;
1427 }
1428
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1429 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1430 unsigned int ioctl, unsigned long arg)
1431 {
1432 void __user *argp = (void __user *)arg;
1433 struct kvm_vcpu *vcpu = filp->private_data;
1434
1435 if (ioctl == KVM_INTERRUPT) {
1436 struct kvm_interrupt irq;
1437
1438 if (copy_from_user(&irq, argp, sizeof(irq)))
1439 return -EFAULT;
1440
1441 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1442
1443 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1444 }
1445
1446 return -ENOIOCTLCMD;
1447 }
1448
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1449 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1450 {
1451 return 0;
1452 }
1453
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1454 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1455 {
1456 unsigned long timer_hz;
1457 struct loongarch_csrs *csr;
1458
1459 vcpu->arch.vpid = 0;
1460 vcpu->arch.flush_gpa = INVALID_GPA;
1461
1462 hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
1463 vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
1464
1465 vcpu->arch.handle_exit = kvm_handle_exit;
1466 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1467 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1468 if (!vcpu->arch.csr)
1469 return -ENOMEM;
1470
1471 /*
1472 * All kvm exceptions share one exception entry, and host <-> guest
1473 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1474 */
1475 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1476
1477 /* Init */
1478 vcpu->arch.last_sched_cpu = -1;
1479
1480 /* Init ipi_state lock */
1481 spin_lock_init(&vcpu->arch.ipi_state.lock);
1482
1483 /*
1484 * Initialize guest register state to valid architectural reset state.
1485 */
1486 timer_hz = calc_const_freq();
1487 kvm_init_timer(vcpu, timer_hz);
1488
1489 /* Set Initialize mode for guest */
1490 csr = vcpu->arch.csr;
1491 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1492
1493 /* Set cpuid */
1494 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1495 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1496
1497 /* Start with no pending virtual guest interrupts */
1498 csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1499
1500 return 0;
1501 }
1502
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1503 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1504 {
1505 }
1506
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1507 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1508 {
1509 int cpu;
1510 struct kvm_context *context;
1511
1512 hrtimer_cancel(&vcpu->arch.swtimer);
1513 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1514 kvm_drop_cpuid(vcpu);
1515 kfree(vcpu->arch.csr);
1516
1517 /*
1518 * If the vCPU is freed and reused as another vCPU, we don't want the
1519 * matching pointer wrongly hanging around in last_vcpu.
1520 */
1521 for_each_possible_cpu(cpu) {
1522 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1523 if (context->last_vcpu == vcpu)
1524 context->last_vcpu = NULL;
1525 }
1526 }
1527
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1528 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1529 {
1530 bool migrated;
1531 struct kvm_context *context;
1532 struct loongarch_csrs *csr = vcpu->arch.csr;
1533
1534 /*
1535 * Have we migrated to a different CPU?
1536 * If so, any old guest TLB state may be stale.
1537 */
1538 migrated = (vcpu->arch.last_sched_cpu != cpu);
1539
1540 /*
1541 * Was this the last vCPU to run on this CPU?
1542 * If not, any old guest state from this vCPU will have been clobbered.
1543 */
1544 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1545 if (migrated || (context->last_vcpu != vcpu))
1546 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1547 context->last_vcpu = vcpu;
1548
1549 /* Restore timer state regardless */
1550 kvm_restore_timer(vcpu);
1551
1552 /* Control guest page CCA attribute */
1553 change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
1554 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1555
1556 /* Restore hardware PMU CSRs */
1557 kvm_restore_pmu(vcpu);
1558
1559 /* Don't bother restoring registers multiple times unless necessary */
1560 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1561 return 0;
1562
1563 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1564
1565 /* Restore guest CSR registers */
1566 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1567 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1568 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1569 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1570 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1571 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1572 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1573 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1574 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1575 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1576 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1577 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1578 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1579 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1580 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1581 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1582 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1583 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1584 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1585 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1586 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1587 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1588 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1589 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1590 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1591 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1592 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1593 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1594 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1595 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1596 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1597 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1598 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1599 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1600 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1601 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1602 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1603 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1604 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1605 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1606 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1607 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1608 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1609 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1610
1611 /* Restore Root.GINTC from unused Guest.GINTC register */
1612 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1613
1614 /*
1615 * We should clear linked load bit to break interrupted atomics. This
1616 * prevents a SC on the next vCPU from succeeding by matching a LL on
1617 * the previous vCPU.
1618 */
1619 if (vcpu->kvm->created_vcpus > 1)
1620 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1621
1622 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1623
1624 return 0;
1625 }
1626
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1627 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1628 {
1629 unsigned long flags;
1630
1631 local_irq_save(flags);
1632 /* Restore guest state to registers */
1633 _kvm_vcpu_load(vcpu, cpu);
1634 local_irq_restore(flags);
1635 }
1636
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1637 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1638 {
1639 struct loongarch_csrs *csr = vcpu->arch.csr;
1640
1641 kvm_lose_fpu(vcpu);
1642
1643 /*
1644 * Update CSR state from hardware if software CSR state is stale,
1645 * most CSR registers are kept unchanged during process context
1646 * switch except CSR registers like remaining timer tick value and
1647 * injected interrupt state.
1648 */
1649 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1650 goto out;
1651
1652 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1653 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1654 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1655 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1656 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1657 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1658 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1659 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1660 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1661 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1662 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1663 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1664 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1665 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1666 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1667 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1668 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1669 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1670 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1671 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1672 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1673 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1674 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1675 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1676 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1677 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1678 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1679 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1680 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1681 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1682 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1683 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1684 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1685 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1686 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1687 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1688 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1689 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1690 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1691 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1692 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1693 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1694 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1695 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1696 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1697 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1698 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1699
1700 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1701
1702 out:
1703 kvm_save_timer(vcpu);
1704 /* Save Root.GINTC into unused Guest.GINTC register */
1705 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1706
1707 return 0;
1708 }
1709
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1710 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1711 {
1712 int cpu;
1713 unsigned long flags;
1714
1715 local_irq_save(flags);
1716 cpu = smp_processor_id();
1717 vcpu->arch.last_sched_cpu = cpu;
1718
1719 /* Save guest state in registers */
1720 _kvm_vcpu_put(vcpu, cpu);
1721 local_irq_restore(flags);
1722 }
1723
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1724 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1725 {
1726 int r = -EINTR;
1727 struct kvm_run *run = vcpu->run;
1728
1729 if (vcpu->mmio_needed) {
1730 if (!vcpu->mmio_is_write)
1731 kvm_complete_mmio_read(vcpu, run);
1732 vcpu->mmio_needed = 0;
1733 }
1734
1735 if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
1736 if (!run->iocsr_io.is_write)
1737 kvm_complete_iocsr_read(vcpu, run);
1738 }
1739
1740 if (!vcpu->wants_to_run)
1741 return r;
1742
1743 /* Clear exit_reason */
1744 run->exit_reason = KVM_EXIT_UNKNOWN;
1745 lose_fpu(1);
1746 vcpu_load(vcpu);
1747 kvm_sigset_activate(vcpu);
1748 r = kvm_pre_enter_guest(vcpu);
1749 if (r != RESUME_GUEST)
1750 goto out;
1751
1752 guest_timing_enter_irqoff();
1753 guest_state_enter_irqoff();
1754 trace_kvm_enter(vcpu);
1755 r = kvm_loongarch_ops->enter_guest(run, vcpu);
1756
1757 trace_kvm_out(vcpu);
1758 /*
1759 * Guest exit is already recorded at kvm_handle_exit()
1760 * return value must not be RESUME_GUEST
1761 */
1762 local_irq_enable();
1763 out:
1764 kvm_sigset_deactivate(vcpu);
1765 vcpu_put(vcpu);
1766
1767 return r;
1768 }
1769