1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <asm/fpu.h>
8 #include <asm/lbt.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15
16 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
17 KVM_GENERIC_VCPU_STATS(),
18 STATS_DESC_COUNTER(VCPU, int_exits),
19 STATS_DESC_COUNTER(VCPU, idle_exits),
20 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21 STATS_DESC_COUNTER(VCPU, signal_exits),
22 STATS_DESC_COUNTER(VCPU, hypercall_exits),
23 STATS_DESC_COUNTER(VCPU, ipi_read_exits),
24 STATS_DESC_COUNTER(VCPU, ipi_write_exits),
25 STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
26 STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
27 STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
28 STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
29 };
30
31 const struct kvm_stats_header kvm_vcpu_stats_header = {
32 .name_size = KVM_STATS_NAME_SIZE,
33 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
34 .id_offset = sizeof(struct kvm_stats_header),
35 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
36 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
37 sizeof(kvm_vcpu_stats_desc),
38 };
39
kvm_save_host_pmu(struct kvm_vcpu * vcpu)40 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
41 {
42 struct kvm_context *context;
43
44 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
45 context->perf_cntr[0] = read_csr_perfcntr0();
46 context->perf_cntr[1] = read_csr_perfcntr1();
47 context->perf_cntr[2] = read_csr_perfcntr2();
48 context->perf_cntr[3] = read_csr_perfcntr3();
49 context->perf_ctrl[0] = write_csr_perfctrl0(0);
50 context->perf_ctrl[1] = write_csr_perfctrl1(0);
51 context->perf_ctrl[2] = write_csr_perfctrl2(0);
52 context->perf_ctrl[3] = write_csr_perfctrl3(0);
53 }
54
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)55 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
56 {
57 struct kvm_context *context;
58
59 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
60 write_csr_perfcntr0(context->perf_cntr[0]);
61 write_csr_perfcntr1(context->perf_cntr[1]);
62 write_csr_perfcntr2(context->perf_cntr[2]);
63 write_csr_perfcntr3(context->perf_cntr[3]);
64 write_csr_perfctrl0(context->perf_ctrl[0]);
65 write_csr_perfctrl1(context->perf_ctrl[1]);
66 write_csr_perfctrl2(context->perf_ctrl[2]);
67 write_csr_perfctrl3(context->perf_ctrl[3]);
68 }
69
70
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)71 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
72 {
73 struct loongarch_csrs *csr = vcpu->arch.csr;
74
75 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
76 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
77 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
78 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
79 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
80 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
81 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
82 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
83 }
84
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)85 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
86 {
87 struct loongarch_csrs *csr = vcpu->arch.csr;
88
89 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
90 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
91 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
92 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
93 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
94 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
95 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
96 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
97 }
98
kvm_own_pmu(struct kvm_vcpu * vcpu)99 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
100 {
101 unsigned long val;
102
103 if (!kvm_guest_has_pmu(&vcpu->arch))
104 return -EINVAL;
105
106 kvm_save_host_pmu(vcpu);
107
108 /* Set PM0-PM(num) to guest */
109 val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
110 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
111 write_csr_gcfg(val);
112
113 kvm_restore_guest_pmu(vcpu);
114
115 return 0;
116 }
117
kvm_lose_pmu(struct kvm_vcpu * vcpu)118 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
119 {
120 unsigned long val;
121 struct loongarch_csrs *csr = vcpu->arch.csr;
122
123 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
124 return;
125
126 kvm_save_guest_pmu(vcpu);
127
128 /* Disable pmu access from guest */
129 write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
130
131 /*
132 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
133 * exiting the guest, so that the next time trap into the guest.
134 * We don't need to deal with PMU CSRs contexts.
135 *
136 * Otherwise set the request bit KVM_REQ_PMU to restore guest PMU
137 * before entering guest VM
138 */
139 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
140 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
141 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
142 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
143 if (!(val & KVM_PMU_EVENT_ENABLED))
144 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
145 else
146 kvm_make_request(KVM_REQ_PMU, vcpu);
147
148 kvm_restore_host_pmu(vcpu);
149 }
150
kvm_check_pmu(struct kvm_vcpu * vcpu)151 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
152 {
153 if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
154 kvm_own_pmu(vcpu);
155 vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
156 }
157 }
158
kvm_update_stolen_time(struct kvm_vcpu * vcpu)159 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
160 {
161 u32 version;
162 u64 steal;
163 gpa_t gpa;
164 struct kvm_memslots *slots;
165 struct kvm_steal_time __user *st;
166 struct gfn_to_hva_cache *ghc;
167
168 ghc = &vcpu->arch.st.cache;
169 gpa = vcpu->arch.st.guest_addr;
170 if (!(gpa & KVM_STEAL_PHYS_VALID))
171 return;
172
173 gpa &= KVM_STEAL_PHYS_MASK;
174 slots = kvm_memslots(vcpu->kvm);
175 if (slots->generation != ghc->generation || gpa != ghc->gpa) {
176 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
177 ghc->gpa = INVALID_GPA;
178 return;
179 }
180 }
181
182 st = (struct kvm_steal_time __user *)ghc->hva;
183 unsafe_get_user(version, &st->version, out);
184 if (version & 1)
185 version += 1; /* first time write, random junk */
186
187 version += 1;
188 unsafe_put_user(version, &st->version, out);
189 smp_wmb();
190
191 unsafe_get_user(steal, &st->steal, out);
192 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
193 vcpu->arch.st.last_steal = current->sched_info.run_delay;
194 unsafe_put_user(steal, &st->steal, out);
195
196 smp_wmb();
197 version += 1;
198 unsafe_put_user(version, &st->version, out);
199 out:
200 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
201 }
202
203 /*
204 * kvm_check_requests - check and handle pending vCPU requests
205 *
206 * Return: RESUME_GUEST if we should enter the guest
207 * RESUME_HOST if we should exit to userspace
208 */
kvm_check_requests(struct kvm_vcpu * vcpu)209 static int kvm_check_requests(struct kvm_vcpu *vcpu)
210 {
211 if (!kvm_request_pending(vcpu))
212 return RESUME_GUEST;
213
214 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
215 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
216
217 if (kvm_dirty_ring_check_request(vcpu))
218 return RESUME_HOST;
219
220 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
221 kvm_update_stolen_time(vcpu);
222
223 return RESUME_GUEST;
224 }
225
kvm_late_check_requests(struct kvm_vcpu * vcpu)226 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
227 {
228 lockdep_assert_irqs_disabled();
229 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
230 if (vcpu->arch.flush_gpa != INVALID_GPA) {
231 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
232 vcpu->arch.flush_gpa = INVALID_GPA;
233 }
234 }
235
236 /*
237 * Check and handle pending signal and vCPU requests etc
238 * Run with irq enabled and preempt enabled
239 *
240 * Return: RESUME_GUEST if we should enter the guest
241 * RESUME_HOST if we should exit to userspace
242 * < 0 if we should exit to userspace, where the return value
243 * indicates an error
244 */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)245 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
246 {
247 int idx, ret;
248
249 /*
250 * Check conditions before entering the guest
251 */
252 ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
253 if (ret < 0)
254 return ret;
255
256 idx = srcu_read_lock(&vcpu->kvm->srcu);
257 ret = kvm_check_requests(vcpu);
258 srcu_read_unlock(&vcpu->kvm->srcu, idx);
259
260 return ret;
261 }
262
263 /*
264 * Called with irq enabled
265 *
266 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
267 * Others if we should exit to userspace
268 */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)269 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
270 {
271 int ret;
272
273 do {
274 ret = kvm_enter_guest_check(vcpu);
275 if (ret != RESUME_GUEST)
276 break;
277
278 /*
279 * Handle vcpu timer, interrupts, check requests and
280 * check vmid before vcpu enter guest
281 */
282 local_irq_disable();
283 kvm_deliver_intr(vcpu);
284 kvm_deliver_exception(vcpu);
285 /* Make sure the vcpu mode has been written */
286 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
287 kvm_check_vpid(vcpu);
288 kvm_check_pmu(vcpu);
289
290 /*
291 * Called after function kvm_check_vpid()
292 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
293 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
294 */
295 kvm_late_check_requests(vcpu);
296 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
297 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
298 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
299
300 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
301 if (vcpu->arch.aux_inuse & KVM_LARCH_PMU) {
302 kvm_lose_pmu(vcpu);
303 kvm_make_request(KVM_REQ_PMU, vcpu);
304 }
305 /* make sure the vcpu mode has been written */
306 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
307 local_irq_enable();
308 ret = -EAGAIN;
309 }
310 } while (ret != RESUME_GUEST);
311
312 return ret;
313 }
314
315 /*
316 * Return 1 for resume guest and "<= 0" for resume host.
317 */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)318 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
319 {
320 int ret = RESUME_GUEST;
321 unsigned long estat = vcpu->arch.host_estat;
322 u32 intr = estat & CSR_ESTAT_IS;
323 u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
324
325 vcpu->mode = OUTSIDE_GUEST_MODE;
326
327 /* Set a default exit reason */
328 run->exit_reason = KVM_EXIT_UNKNOWN;
329
330 kvm_lose_pmu(vcpu);
331
332 guest_timing_exit_irqoff();
333 guest_state_exit_irqoff();
334 local_irq_enable();
335
336 trace_kvm_exit(vcpu, ecode);
337 if (ecode) {
338 ret = kvm_handle_fault(vcpu, ecode);
339 } else {
340 WARN(!intr, "vm exiting with suspicious irq\n");
341 ++vcpu->stat.int_exits;
342 }
343
344 if (ret == RESUME_GUEST)
345 ret = kvm_pre_enter_guest(vcpu);
346
347 if (ret != RESUME_GUEST) {
348 local_irq_disable();
349 return ret;
350 }
351
352 guest_timing_enter_irqoff();
353 guest_state_enter_irqoff();
354 trace_kvm_reenter(vcpu);
355
356 return RESUME_GUEST;
357 }
358
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)359 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
360 {
361 return !!(vcpu->arch.irq_pending) &&
362 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
363 }
364
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)365 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
366 {
367 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
368 }
369
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)370 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
371 {
372 unsigned long val;
373
374 preempt_disable();
375 val = gcsr_read(LOONGARCH_CSR_CRMD);
376 preempt_enable();
377
378 return (val & CSR_PRMD_PPLV) == PLV_KERN;
379 }
380
381 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)382 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
383 {
384 return vcpu->arch.pc;
385 }
386
387 /*
388 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
389 * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM,
390 * any event that arrives while a vCPU is loaded is considered to be "in guest".
391 */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)392 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
393 {
394 return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
395 }
396 #endif
397
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)398 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
399 {
400 return false;
401 }
402
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)403 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
404 {
405 return VM_FAULT_SIGBUS;
406 }
407
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)408 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
409 struct kvm_translation *tr)
410 {
411 return -EINVAL;
412 }
413
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)414 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
415 {
416 int ret;
417
418 /* Protect from TOD sync and vcpu_load/put() */
419 preempt_disable();
420 ret = kvm_pending_timer(vcpu) ||
421 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
422 preempt_enable();
423
424 return ret;
425 }
426
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)427 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
428 {
429 int i;
430
431 kvm_debug("vCPU Register Dump:\n");
432 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
433 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
434
435 for (i = 0; i < 32; i += 4) {
436 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
437 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
438 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
439 }
440
441 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
442 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
443 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
444
445 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
446
447 return 0;
448 }
449
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)450 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
451 struct kvm_mp_state *mp_state)
452 {
453 *mp_state = vcpu->arch.mp_state;
454
455 return 0;
456 }
457
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)458 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
459 struct kvm_mp_state *mp_state)
460 {
461 int ret = 0;
462
463 switch (mp_state->mp_state) {
464 case KVM_MP_STATE_RUNNABLE:
465 vcpu->arch.mp_state = *mp_state;
466 break;
467 default:
468 ret = -EINVAL;
469 }
470
471 return ret;
472 }
473
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)474 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
475 struct kvm_guest_debug *dbg)
476 {
477 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
478 return -EINVAL;
479
480 if (dbg->control & KVM_GUESTDBG_ENABLE)
481 vcpu->guest_debug = dbg->control;
482 else
483 vcpu->guest_debug = 0;
484
485 return 0;
486 }
487
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)488 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
489 {
490 int cpuid;
491 struct kvm_phyid_map *map;
492 struct loongarch_csrs *csr = vcpu->arch.csr;
493
494 if (val >= KVM_MAX_PHYID)
495 return -EINVAL;
496
497 map = vcpu->kvm->arch.phyid_map;
498 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
499
500 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
501 if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
502 /* Discard duplicated CPUID set operation */
503 if (cpuid == val) {
504 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
505 return 0;
506 }
507
508 /*
509 * CPUID is already set before
510 * Forbid changing to a different CPUID at runtime
511 */
512 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
513 return -EINVAL;
514 }
515
516 if (map->phys_map[val].enabled) {
517 /* Discard duplicated CPUID set operation */
518 if (vcpu == map->phys_map[val].vcpu) {
519 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
520 return 0;
521 }
522
523 /*
524 * New CPUID is already set with other vcpu
525 * Forbid sharing the same CPUID between different vcpus
526 */
527 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
528 return -EINVAL;
529 }
530
531 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
532 map->phys_map[val].enabled = true;
533 map->phys_map[val].vcpu = vcpu;
534 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
535
536 return 0;
537 }
538
kvm_drop_cpuid(struct kvm_vcpu * vcpu)539 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
540 {
541 int cpuid;
542 struct kvm_phyid_map *map;
543 struct loongarch_csrs *csr = vcpu->arch.csr;
544
545 map = vcpu->kvm->arch.phyid_map;
546 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
547
548 if (cpuid >= KVM_MAX_PHYID)
549 return;
550
551 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
552 if (map->phys_map[cpuid].enabled) {
553 map->phys_map[cpuid].vcpu = NULL;
554 map->phys_map[cpuid].enabled = false;
555 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
556 }
557 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
558 }
559
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)560 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
561 {
562 struct kvm_phyid_map *map;
563
564 if (cpuid >= KVM_MAX_PHYID)
565 return NULL;
566
567 map = kvm->arch.phyid_map;
568 if (!map->phys_map[cpuid].enabled)
569 return NULL;
570
571 return map->phys_map[cpuid].vcpu;
572 }
573
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)574 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
575 {
576 unsigned long gintc;
577 struct loongarch_csrs *csr = vcpu->arch.csr;
578
579 if (get_gcsr_flag(id) & INVALID_GCSR)
580 return -EINVAL;
581
582 if (id == LOONGARCH_CSR_ESTAT) {
583 preempt_disable();
584 vcpu_load(vcpu);
585 /*
586 * Sync pending interrupts into ESTAT so that interrupt
587 * remains during VM migration stage
588 */
589 kvm_deliver_intr(vcpu);
590 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
591 vcpu_put(vcpu);
592 preempt_enable();
593
594 /* ESTAT IP0~IP7 get from GINTC */
595 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
596 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
597 return 0;
598 }
599
600 /*
601 * Get software CSR state since software state is consistent
602 * with hardware for synchronous ioctl
603 */
604 *val = kvm_read_sw_gcsr(csr, id);
605
606 return 0;
607 }
608
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)609 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
610 {
611 int ret = 0, gintc;
612 struct loongarch_csrs *csr = vcpu->arch.csr;
613
614 if (get_gcsr_flag(id) & INVALID_GCSR)
615 return -EINVAL;
616
617 if (id == LOONGARCH_CSR_CPUID)
618 return kvm_set_cpuid(vcpu, val);
619
620 if (id == LOONGARCH_CSR_ESTAT) {
621 /* ESTAT IP0~IP7 inject through GINTC */
622 gintc = (val >> 2) & 0xff;
623 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
624
625 gintc = val & ~(0xffUL << 2);
626 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
627
628 return ret;
629 }
630
631 kvm_write_sw_gcsr(csr, id, val);
632
633 /*
634 * After modifying the PMU CSR register value of the vcpu.
635 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
636 */
637 if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
638 unsigned long val;
639
640 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
641 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
642 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
643 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
644
645 if (val & KVM_PMU_EVENT_ENABLED)
646 kvm_make_request(KVM_REQ_PMU, vcpu);
647 }
648
649 return ret;
650 }
651
_kvm_get_cpucfg_mask(int id,u64 * v)652 static int _kvm_get_cpucfg_mask(int id, u64 *v)
653 {
654 if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
655 return -EINVAL;
656
657 switch (id) {
658 case LOONGARCH_CPUCFG0:
659 *v = GENMASK(31, 0);
660 return 0;
661 case LOONGARCH_CPUCFG1:
662 *v = GENMASK(26, 0);
663 return 0;
664 case LOONGARCH_CPUCFG2:
665 /* CPUCFG2 features unconditionally supported by KVM */
666 *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
667 CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
668 CPUCFG2_LSPW | CPUCFG2_LAM;
669 /*
670 * For the ISA extensions listed below, if one is supported
671 * by the host, then it is also supported by KVM.
672 */
673 if (cpu_has_lsx)
674 *v |= CPUCFG2_LSX;
675 if (cpu_has_lasx)
676 *v |= CPUCFG2_LASX;
677 if (cpu_has_lbt_x86)
678 *v |= CPUCFG2_X86BT;
679 if (cpu_has_lbt_arm)
680 *v |= CPUCFG2_ARMBT;
681 if (cpu_has_lbt_mips)
682 *v |= CPUCFG2_MIPSBT;
683 if (cpu_has_ptw)
684 *v |= CPUCFG2_PTW;
685
686 return 0;
687 case LOONGARCH_CPUCFG3:
688 *v = GENMASK(16, 0);
689 return 0;
690 case LOONGARCH_CPUCFG4:
691 case LOONGARCH_CPUCFG5:
692 *v = GENMASK(31, 0);
693 return 0;
694 case LOONGARCH_CPUCFG6:
695 if (cpu_has_pmp)
696 *v = GENMASK(14, 0);
697 else
698 *v = 0;
699 return 0;
700 case LOONGARCH_CPUCFG16:
701 *v = GENMASK(16, 0);
702 return 0;
703 case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
704 *v = GENMASK(30, 0);
705 return 0;
706 default:
707 /*
708 * CPUCFG bits should be zero if reserved by HW or not
709 * supported by KVM.
710 */
711 *v = 0;
712 return 0;
713 }
714 }
715
kvm_check_cpucfg(int id,u64 val)716 static int kvm_check_cpucfg(int id, u64 val)
717 {
718 int ret;
719 u64 mask = 0;
720
721 ret = _kvm_get_cpucfg_mask(id, &mask);
722 if (ret)
723 return ret;
724
725 if (val & ~mask)
726 /* Unsupported features and/or the higher 32 bits should not be set */
727 return -EINVAL;
728
729 switch (id) {
730 case LOONGARCH_CPUCFG1:
731 if ((val & CPUCFG1_MSGINT) && !cpu_has_msgint)
732 return -EINVAL;
733 return 0;
734 case LOONGARCH_CPUCFG2:
735 if (!(val & CPUCFG2_LLFTP))
736 /* Guests must have a constant timer */
737 return -EINVAL;
738 if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
739 /* Single and double float point must both be set when FP is enabled */
740 return -EINVAL;
741 if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
742 /* LSX architecturally implies FP but val does not satisfy that */
743 return -EINVAL;
744 if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
745 /* LASX architecturally implies LSX and FP but val does not satisfy that */
746 return -EINVAL;
747 return 0;
748 case LOONGARCH_CPUCFG6:
749 if (val & CPUCFG6_PMP) {
750 u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
751 if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
752 return -EINVAL;
753 if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
754 return -EINVAL;
755 if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
756 return -EINVAL;
757 }
758 return 0;
759 default:
760 /*
761 * Values for the other CPUCFG IDs are not being further validated
762 * besides the mask check above.
763 */
764 return 0;
765 }
766 }
767
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)768 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
769 const struct kvm_one_reg *reg, u64 *v)
770 {
771 int id, ret = 0;
772 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
773
774 switch (type) {
775 case KVM_REG_LOONGARCH_CSR:
776 id = KVM_GET_IOC_CSR_IDX(reg->id);
777 ret = _kvm_getcsr(vcpu, id, v);
778 break;
779 case KVM_REG_LOONGARCH_CPUCFG:
780 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
781 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
782 *v = vcpu->arch.cpucfg[id];
783 else
784 ret = -EINVAL;
785 break;
786 case KVM_REG_LOONGARCH_LBT:
787 if (!kvm_guest_has_lbt(&vcpu->arch))
788 return -ENXIO;
789
790 switch (reg->id) {
791 case KVM_REG_LOONGARCH_LBT_SCR0:
792 *v = vcpu->arch.lbt.scr0;
793 break;
794 case KVM_REG_LOONGARCH_LBT_SCR1:
795 *v = vcpu->arch.lbt.scr1;
796 break;
797 case KVM_REG_LOONGARCH_LBT_SCR2:
798 *v = vcpu->arch.lbt.scr2;
799 break;
800 case KVM_REG_LOONGARCH_LBT_SCR3:
801 *v = vcpu->arch.lbt.scr3;
802 break;
803 case KVM_REG_LOONGARCH_LBT_EFLAGS:
804 *v = vcpu->arch.lbt.eflags;
805 break;
806 case KVM_REG_LOONGARCH_LBT_FTOP:
807 *v = vcpu->arch.fpu.ftop;
808 break;
809 default:
810 ret = -EINVAL;
811 break;
812 }
813 break;
814 case KVM_REG_LOONGARCH_KVM:
815 switch (reg->id) {
816 case KVM_REG_LOONGARCH_COUNTER:
817 *v = drdtime() + vcpu->kvm->arch.time_offset;
818 break;
819 case KVM_REG_LOONGARCH_DEBUG_INST:
820 *v = INSN_HVCL | KVM_HCALL_SWDBG;
821 break;
822 default:
823 ret = -EINVAL;
824 break;
825 }
826 break;
827 default:
828 ret = -EINVAL;
829 break;
830 }
831
832 return ret;
833 }
834
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)835 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
836 {
837 int ret = 0;
838 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
839
840 switch (size) {
841 case KVM_REG_SIZE_U64:
842 ret = kvm_get_one_reg(vcpu, reg, &v);
843 if (ret)
844 return ret;
845 ret = put_user(v, (u64 __user *)(long)reg->addr);
846 break;
847 default:
848 ret = -EINVAL;
849 break;
850 }
851
852 return ret;
853 }
854
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)855 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
856 const struct kvm_one_reg *reg, u64 v)
857 {
858 int id, ret = 0;
859 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
860
861 switch (type) {
862 case KVM_REG_LOONGARCH_CSR:
863 id = KVM_GET_IOC_CSR_IDX(reg->id);
864 ret = _kvm_setcsr(vcpu, id, v);
865 break;
866 case KVM_REG_LOONGARCH_CPUCFG:
867 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
868 ret = kvm_check_cpucfg(id, v);
869 if (ret)
870 break;
871 vcpu->arch.cpucfg[id] = (u32)v;
872 if (id == LOONGARCH_CPUCFG6)
873 vcpu->arch.max_pmu_csrid =
874 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
875 break;
876 case KVM_REG_LOONGARCH_LBT:
877 if (!kvm_guest_has_lbt(&vcpu->arch))
878 return -ENXIO;
879
880 switch (reg->id) {
881 case KVM_REG_LOONGARCH_LBT_SCR0:
882 vcpu->arch.lbt.scr0 = v;
883 break;
884 case KVM_REG_LOONGARCH_LBT_SCR1:
885 vcpu->arch.lbt.scr1 = v;
886 break;
887 case KVM_REG_LOONGARCH_LBT_SCR2:
888 vcpu->arch.lbt.scr2 = v;
889 break;
890 case KVM_REG_LOONGARCH_LBT_SCR3:
891 vcpu->arch.lbt.scr3 = v;
892 break;
893 case KVM_REG_LOONGARCH_LBT_EFLAGS:
894 vcpu->arch.lbt.eflags = v;
895 break;
896 case KVM_REG_LOONGARCH_LBT_FTOP:
897 vcpu->arch.fpu.ftop = v;
898 break;
899 default:
900 ret = -EINVAL;
901 break;
902 }
903 break;
904 case KVM_REG_LOONGARCH_KVM:
905 switch (reg->id) {
906 case KVM_REG_LOONGARCH_COUNTER:
907 /*
908 * gftoffset is relative with board, not vcpu
909 * only set for the first time for smp system
910 */
911 if (vcpu->vcpu_id == 0)
912 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
913 break;
914 case KVM_REG_LOONGARCH_VCPU_RESET:
915 vcpu->arch.st.guest_addr = 0;
916 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
917 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
918
919 /*
920 * When vCPU reset, clear the ESTAT and GINTC registers
921 * Other CSR registers are cleared with function _kvm_setcsr().
922 */
923 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
924 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
925 break;
926 default:
927 ret = -EINVAL;
928 break;
929 }
930 break;
931 default:
932 ret = -EINVAL;
933 break;
934 }
935
936 return ret;
937 }
938
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)939 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
940 {
941 int ret = 0;
942 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
943
944 switch (size) {
945 case KVM_REG_SIZE_U64:
946 ret = get_user(v, (u64 __user *)(long)reg->addr);
947 if (ret)
948 return ret;
949 break;
950 default:
951 return -EINVAL;
952 }
953
954 return kvm_set_one_reg(vcpu, reg, v);
955 }
956
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)957 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
958 {
959 return -ENOIOCTLCMD;
960 }
961
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)962 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
963 {
964 return -ENOIOCTLCMD;
965 }
966
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)967 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
968 {
969 int i;
970
971 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
972 regs->gpr[i] = vcpu->arch.gprs[i];
973
974 regs->pc = vcpu->arch.pc;
975
976 return 0;
977 }
978
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)979 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
980 {
981 int i;
982
983 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
984 vcpu->arch.gprs[i] = regs->gpr[i];
985
986 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
987 vcpu->arch.pc = regs->pc;
988
989 return 0;
990 }
991
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)992 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
993 struct kvm_enable_cap *cap)
994 {
995 /* FPU is enabled by default, will support LSX/LASX later. */
996 return -EINVAL;
997 }
998
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)999 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
1000 struct kvm_device_attr *attr)
1001 {
1002 switch (attr->attr) {
1003 case LOONGARCH_CPUCFG2:
1004 case LOONGARCH_CPUCFG6:
1005 return 0;
1006 case CPUCFG_KVM_FEATURE:
1007 return 0;
1008 default:
1009 return -ENXIO;
1010 }
1011
1012 return -ENXIO;
1013 }
1014
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1015 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
1016 struct kvm_device_attr *attr)
1017 {
1018 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1019 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1020 return -ENXIO;
1021
1022 return 0;
1023 }
1024
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1025 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1026 struct kvm_device_attr *attr)
1027 {
1028 int ret = -ENXIO;
1029
1030 switch (attr->group) {
1031 case KVM_LOONGARCH_VCPU_CPUCFG:
1032 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1033 break;
1034 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1035 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1036 break;
1037 default:
1038 break;
1039 }
1040
1041 return ret;
1042 }
1043
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1044 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1045 struct kvm_device_attr *attr)
1046 {
1047 int ret = 0;
1048 uint64_t val;
1049 uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1050
1051 switch (attr->attr) {
1052 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1053 ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1054 if (ret)
1055 return ret;
1056 break;
1057 case CPUCFG_KVM_FEATURE:
1058 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1059 break;
1060 default:
1061 return -ENXIO;
1062 }
1063
1064 put_user(val, uaddr);
1065
1066 return ret;
1067 }
1068
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1069 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1070 struct kvm_device_attr *attr)
1071 {
1072 u64 gpa;
1073 u64 __user *user = (u64 __user *)attr->addr;
1074
1075 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1076 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1077 return -ENXIO;
1078
1079 gpa = vcpu->arch.st.guest_addr;
1080 if (put_user(gpa, user))
1081 return -EFAULT;
1082
1083 return 0;
1084 }
1085
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1086 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1087 struct kvm_device_attr *attr)
1088 {
1089 int ret = -ENXIO;
1090
1091 switch (attr->group) {
1092 case KVM_LOONGARCH_VCPU_CPUCFG:
1093 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1094 break;
1095 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1096 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1097 break;
1098 default:
1099 break;
1100 }
1101
1102 return ret;
1103 }
1104
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1105 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1106 struct kvm_device_attr *attr)
1107 {
1108 u64 val, valid;
1109 u64 __user *user = (u64 __user *)attr->addr;
1110 struct kvm *kvm = vcpu->kvm;
1111
1112 switch (attr->attr) {
1113 case CPUCFG_KVM_FEATURE:
1114 if (get_user(val, user))
1115 return -EFAULT;
1116
1117 valid = LOONGARCH_PV_FEAT_MASK;
1118 if (val & ~valid)
1119 return -EINVAL;
1120
1121 /* All vCPUs need set the same PV features */
1122 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1123 && ((kvm->arch.pv_features & valid) != val))
1124 return -EINVAL;
1125 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1126 return 0;
1127 default:
1128 return -ENXIO;
1129 }
1130 }
1131
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1132 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1133 struct kvm_device_attr *attr)
1134 {
1135 int idx, ret = 0;
1136 u64 gpa, __user *user = (u64 __user *)attr->addr;
1137 struct kvm *kvm = vcpu->kvm;
1138
1139 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1140 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1141 return -ENXIO;
1142
1143 if (get_user(gpa, user))
1144 return -EFAULT;
1145
1146 if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1147 return -EINVAL;
1148
1149 if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1150 vcpu->arch.st.guest_addr = gpa;
1151 return 0;
1152 }
1153
1154 /* Check the address is in a valid memslot */
1155 idx = srcu_read_lock(&kvm->srcu);
1156 if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1157 ret = -EINVAL;
1158 srcu_read_unlock(&kvm->srcu, idx);
1159
1160 if (!ret) {
1161 vcpu->arch.st.guest_addr = gpa;
1162 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1163 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1164 }
1165
1166 return ret;
1167 }
1168
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1169 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1170 struct kvm_device_attr *attr)
1171 {
1172 int ret = -ENXIO;
1173
1174 switch (attr->group) {
1175 case KVM_LOONGARCH_VCPU_CPUCFG:
1176 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1177 break;
1178 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1179 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1180 break;
1181 default:
1182 break;
1183 }
1184
1185 return ret;
1186 }
1187
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1188 long kvm_arch_vcpu_ioctl(struct file *filp,
1189 unsigned int ioctl, unsigned long arg)
1190 {
1191 long r;
1192 struct kvm_device_attr attr;
1193 void __user *argp = (void __user *)arg;
1194 struct kvm_vcpu *vcpu = filp->private_data;
1195
1196 /*
1197 * Only software CSR should be modified
1198 *
1199 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1200 * should be used. Since CSR registers owns by this vcpu, if switch
1201 * to other vcpus, other vcpus need reload CSR registers.
1202 *
1203 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1204 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1205 * aux_inuse flag and reload CSR registers form software.
1206 */
1207
1208 switch (ioctl) {
1209 case KVM_SET_ONE_REG:
1210 case KVM_GET_ONE_REG: {
1211 struct kvm_one_reg reg;
1212
1213 r = -EFAULT;
1214 if (copy_from_user(®, argp, sizeof(reg)))
1215 break;
1216 if (ioctl == KVM_SET_ONE_REG) {
1217 r = kvm_set_reg(vcpu, ®);
1218 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1219 } else
1220 r = kvm_get_reg(vcpu, ®);
1221 break;
1222 }
1223 case KVM_ENABLE_CAP: {
1224 struct kvm_enable_cap cap;
1225
1226 r = -EFAULT;
1227 if (copy_from_user(&cap, argp, sizeof(cap)))
1228 break;
1229 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1230 break;
1231 }
1232 case KVM_HAS_DEVICE_ATTR: {
1233 r = -EFAULT;
1234 if (copy_from_user(&attr, argp, sizeof(attr)))
1235 break;
1236 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1237 break;
1238 }
1239 case KVM_GET_DEVICE_ATTR: {
1240 r = -EFAULT;
1241 if (copy_from_user(&attr, argp, sizeof(attr)))
1242 break;
1243 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1244 break;
1245 }
1246 case KVM_SET_DEVICE_ATTR: {
1247 r = -EFAULT;
1248 if (copy_from_user(&attr, argp, sizeof(attr)))
1249 break;
1250 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1251 break;
1252 }
1253 default:
1254 r = -ENOIOCTLCMD;
1255 break;
1256 }
1257
1258 return r;
1259 }
1260
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1261 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1262 {
1263 int i = 0;
1264
1265 fpu->fcc = vcpu->arch.fpu.fcc;
1266 fpu->fcsr = vcpu->arch.fpu.fcsr;
1267 for (i = 0; i < NUM_FPU_REGS; i++)
1268 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1269
1270 return 0;
1271 }
1272
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1273 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1274 {
1275 int i = 0;
1276
1277 vcpu->arch.fpu.fcc = fpu->fcc;
1278 vcpu->arch.fpu.fcsr = fpu->fcsr;
1279 for (i = 0; i < NUM_FPU_REGS; i++)
1280 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1281
1282 return 0;
1283 }
1284
1285 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1286 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1287 {
1288 if (!kvm_guest_has_lbt(&vcpu->arch))
1289 return -EINVAL;
1290
1291 preempt_disable();
1292 if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1293 set_csr_euen(CSR_EUEN_LBTEN);
1294 _restore_lbt(&vcpu->arch.lbt);
1295 vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1296 }
1297 preempt_enable();
1298
1299 return 0;
1300 }
1301
kvm_lose_lbt(struct kvm_vcpu * vcpu)1302 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1303 {
1304 preempt_disable();
1305 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1306 _save_lbt(&vcpu->arch.lbt);
1307 clear_csr_euen(CSR_EUEN_LBTEN);
1308 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1309 }
1310 preempt_enable();
1311 }
1312
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1313 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1314 {
1315 /*
1316 * If TM is enabled, top register save/restore will
1317 * cause lbt exception, here enable lbt in advance
1318 */
1319 if (fcsr & FPU_CSR_TM)
1320 kvm_own_lbt(vcpu);
1321 }
1322
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1323 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1324 {
1325 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1326 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1327 return;
1328 kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1329 }
1330 }
1331 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1332 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1333 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1334 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1335 #endif
1336
1337 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1338 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1339 {
1340 preempt_disable();
1341
1342 /*
1343 * Enable FPU for guest
1344 * Set FR and FRE according to guest context
1345 */
1346 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1347 set_csr_euen(CSR_EUEN_FPEN);
1348
1349 kvm_restore_fpu(&vcpu->arch.fpu);
1350 vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1351 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1352
1353 preempt_enable();
1354 }
1355
1356 #ifdef CONFIG_CPU_HAS_LSX
1357 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1358 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1359 {
1360 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1361 return -EINVAL;
1362
1363 preempt_disable();
1364
1365 /* Enable LSX for guest */
1366 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1367 set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1368 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1369 case KVM_LARCH_FPU:
1370 /*
1371 * Guest FPU state already loaded,
1372 * only restore upper LSX state
1373 */
1374 _restore_lsx_upper(&vcpu->arch.fpu);
1375 break;
1376 default:
1377 /* Neither FP or LSX already active,
1378 * restore full LSX state
1379 */
1380 kvm_restore_lsx(&vcpu->arch.fpu);
1381 break;
1382 }
1383
1384 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1385 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1386 preempt_enable();
1387
1388 return 0;
1389 }
1390 #endif
1391
1392 #ifdef CONFIG_CPU_HAS_LASX
1393 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1394 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1395 {
1396 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1397 return -EINVAL;
1398
1399 preempt_disable();
1400
1401 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1402 set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1403 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1404 case KVM_LARCH_LSX:
1405 case KVM_LARCH_LSX | KVM_LARCH_FPU:
1406 /* Guest LSX state already loaded, only restore upper LASX state */
1407 _restore_lasx_upper(&vcpu->arch.fpu);
1408 break;
1409 case KVM_LARCH_FPU:
1410 /* Guest FP state already loaded, only restore upper LSX & LASX state */
1411 _restore_lsx_upper(&vcpu->arch.fpu);
1412 _restore_lasx_upper(&vcpu->arch.fpu);
1413 break;
1414 default:
1415 /* Neither FP or LSX already active, restore full LASX state */
1416 kvm_restore_lasx(&vcpu->arch.fpu);
1417 break;
1418 }
1419
1420 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1421 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1422 preempt_enable();
1423
1424 return 0;
1425 }
1426 #endif
1427
1428 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1429 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1430 {
1431 preempt_disable();
1432
1433 kvm_check_fcsr_alive(vcpu);
1434 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1435 kvm_save_lasx(&vcpu->arch.fpu);
1436 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1437 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1438
1439 /* Disable LASX & LSX & FPU */
1440 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1441 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1442 kvm_save_lsx(&vcpu->arch.fpu);
1443 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1444 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1445
1446 /* Disable LSX & FPU */
1447 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1448 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1449 kvm_save_fpu(&vcpu->arch.fpu);
1450 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1451 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1452
1453 /* Disable FPU */
1454 clear_csr_euen(CSR_EUEN_FPEN);
1455 }
1456 kvm_lose_lbt(vcpu);
1457
1458 preempt_enable();
1459 }
1460
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1461 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1462 {
1463 int intr = (int)irq->irq;
1464
1465 if (intr > 0)
1466 kvm_queue_irq(vcpu, intr);
1467 else if (intr < 0)
1468 kvm_dequeue_irq(vcpu, -intr);
1469 else {
1470 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1471 return -EINVAL;
1472 }
1473
1474 kvm_vcpu_kick(vcpu);
1475
1476 return 0;
1477 }
1478
kvm_arch_vcpu_unlocked_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1479 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
1480 unsigned long arg)
1481 {
1482 void __user *argp = (void __user *)arg;
1483 struct kvm_vcpu *vcpu = filp->private_data;
1484
1485 if (ioctl == KVM_INTERRUPT) {
1486 struct kvm_interrupt irq;
1487
1488 if (copy_from_user(&irq, argp, sizeof(irq)))
1489 return -EFAULT;
1490
1491 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1492
1493 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1494 }
1495
1496 return -ENOIOCTLCMD;
1497 }
1498
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1499 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1500 {
1501 return 0;
1502 }
1503
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1504 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1505 {
1506 unsigned long timer_hz;
1507 struct loongarch_csrs *csr;
1508
1509 vcpu->arch.vpid = 0;
1510 vcpu->arch.flush_gpa = INVALID_GPA;
1511
1512 hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1513 HRTIMER_MODE_ABS_PINNED_HARD);
1514
1515 /* Get GPA (=HVA) of PGD for kvm hypervisor */
1516 vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1517
1518 /*
1519 * Get PGD for primary mmu, virtual address is used since there is
1520 * memory access after loading from CSR_PGD in tlb exception fast path.
1521 */
1522 vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1523
1524 vcpu->arch.handle_exit = kvm_handle_exit;
1525 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1526 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1527 if (!vcpu->arch.csr)
1528 return -ENOMEM;
1529
1530 /*
1531 * All kvm exceptions share one exception entry, and host <-> guest
1532 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1533 */
1534 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1535
1536 /* Init */
1537 vcpu->arch.last_sched_cpu = -1;
1538
1539 /* Init ipi_state lock */
1540 spin_lock_init(&vcpu->arch.ipi_state.lock);
1541
1542 /*
1543 * Initialize guest register state to valid architectural reset state.
1544 */
1545 timer_hz = calc_const_freq();
1546 kvm_init_timer(vcpu, timer_hz);
1547
1548 /* Set Initialize mode for guest */
1549 csr = vcpu->arch.csr;
1550 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1551
1552 /* Set cpuid */
1553 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1554 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1555
1556 /* Start with no pending virtual guest interrupts */
1557 csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1558
1559 return 0;
1560 }
1561
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1562 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1563 {
1564 }
1565
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1566 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1567 {
1568 int cpu;
1569 struct kvm_context *context;
1570
1571 hrtimer_cancel(&vcpu->arch.swtimer);
1572 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1573 kvm_drop_cpuid(vcpu);
1574 kfree(vcpu->arch.csr);
1575
1576 /*
1577 * If the vCPU is freed and reused as another vCPU, we don't want the
1578 * matching pointer wrongly hanging around in last_vcpu.
1579 */
1580 for_each_possible_cpu(cpu) {
1581 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1582 if (context->last_vcpu == vcpu)
1583 context->last_vcpu = NULL;
1584 }
1585 }
1586
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1587 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1588 {
1589 bool migrated;
1590 struct kvm_context *context;
1591 struct loongarch_csrs *csr = vcpu->arch.csr;
1592
1593 /*
1594 * Have we migrated to a different CPU?
1595 * If so, any old guest TLB state may be stale.
1596 */
1597 migrated = (vcpu->arch.last_sched_cpu != cpu);
1598
1599 /*
1600 * Was this the last vCPU to run on this CPU?
1601 * If not, any old guest state from this vCPU will have been clobbered.
1602 */
1603 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1604 if (migrated || (context->last_vcpu != vcpu))
1605 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1606 context->last_vcpu = vcpu;
1607
1608 /* Restore timer state regardless */
1609 kvm_restore_timer(vcpu);
1610 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1611
1612 /* Don't bother restoring registers multiple times unless necessary */
1613 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1614 return 0;
1615
1616 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1617
1618 /* Restore guest CSR registers */
1619 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1620 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1621 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1622 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1623 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1624 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1625 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1626 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1627 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1628 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1629 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1630 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1631 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1632 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1633 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1634 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1635 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1636 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1637 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1638 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1639 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1640 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1641 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1642 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1643 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1644 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1645 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1646 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1647 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1648 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1649 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1650 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1651 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1652 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1653 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1654 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1655 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1656 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1657 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1658 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1659 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1660 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1661 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1662 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1663 if (cpu_has_msgint) {
1664 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
1665 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
1666 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
1667 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
1668 }
1669
1670 /* Restore Root.GINTC from unused Guest.GINTC register */
1671 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1672
1673 /*
1674 * We should clear linked load bit to break interrupted atomics. This
1675 * prevents a SC on the next vCPU from succeeding by matching a LL on
1676 * the previous vCPU.
1677 */
1678 if (vcpu->kvm->created_vcpus > 1)
1679 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1680
1681 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1682
1683 return 0;
1684 }
1685
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1686 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1687 {
1688 unsigned long flags;
1689
1690 local_irq_save(flags);
1691 /* Restore guest state to registers */
1692 _kvm_vcpu_load(vcpu, cpu);
1693 local_irq_restore(flags);
1694 }
1695
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1696 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1697 {
1698 struct loongarch_csrs *csr = vcpu->arch.csr;
1699
1700 kvm_lose_fpu(vcpu);
1701
1702 /*
1703 * Update CSR state from hardware if software CSR state is stale,
1704 * most CSR registers are kept unchanged during process context
1705 * switch except CSR registers like remaining timer tick value and
1706 * injected interrupt state.
1707 */
1708 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1709 goto out;
1710
1711 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1712 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1713 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1714 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1715 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1716 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1717 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1718 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1719 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1720 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1721 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1722 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1723 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1724 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1725 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1726 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1727 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1728 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1729 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1730 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1731 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1732 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1733 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1734 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1735 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1736 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1737 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1738 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1739 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1740 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1741 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1742 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1743 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1744 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1745 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1746 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1747 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1748 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1749 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1750 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1751 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1752 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1753 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1754 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1755 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1756 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1757 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1758 if (cpu_has_msgint) {
1759 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
1760 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
1761 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
1762 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
1763 }
1764
1765 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1766
1767 out:
1768 kvm_save_timer(vcpu);
1769 /* Save Root.GINTC into unused Guest.GINTC register */
1770 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1771
1772 return 0;
1773 }
1774
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1775 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1776 {
1777 int cpu;
1778 unsigned long flags;
1779
1780 local_irq_save(flags);
1781 cpu = smp_processor_id();
1782 vcpu->arch.last_sched_cpu = cpu;
1783
1784 /* Save guest state in registers */
1785 _kvm_vcpu_put(vcpu, cpu);
1786 local_irq_restore(flags);
1787 }
1788
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1789 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1790 {
1791 int r = -EINTR;
1792 struct kvm_run *run = vcpu->run;
1793
1794 if (vcpu->mmio_needed) {
1795 if (!vcpu->mmio_is_write)
1796 kvm_complete_mmio_read(vcpu, run);
1797 vcpu->mmio_needed = 0;
1798 }
1799
1800 switch (run->exit_reason) {
1801 case KVM_EXIT_HYPERCALL:
1802 kvm_complete_user_service(vcpu, run);
1803 break;
1804 case KVM_EXIT_LOONGARCH_IOCSR:
1805 if (!run->iocsr_io.is_write)
1806 kvm_complete_iocsr_read(vcpu, run);
1807 break;
1808 }
1809
1810 if (!vcpu->wants_to_run)
1811 return r;
1812
1813 /* Clear exit_reason */
1814 run->exit_reason = KVM_EXIT_UNKNOWN;
1815 lose_fpu(1);
1816 vcpu_load(vcpu);
1817 kvm_sigset_activate(vcpu);
1818 r = kvm_pre_enter_guest(vcpu);
1819 if (r != RESUME_GUEST)
1820 goto out;
1821
1822 guest_timing_enter_irqoff();
1823 guest_state_enter_irqoff();
1824 trace_kvm_enter(vcpu);
1825 r = kvm_loongarch_ops->enter_guest(run, vcpu);
1826
1827 trace_kvm_out(vcpu);
1828 /*
1829 * Guest exit is already recorded at kvm_handle_exit()
1830 * return value must not be RESUME_GUEST
1831 */
1832 local_irq_enable();
1833 out:
1834 kvm_sigset_deactivate(vcpu);
1835 vcpu_put(vcpu);
1836
1837 return r;
1838 }
1839