1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/lbt.h>
10 #include <asm/loongarch.h>
11 #include <asm/setup.h>
12 #include <asm/time.h>
13
14 #define CREATE_TRACE_POINTS
15 #include "trace.h"
16
17 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
18 KVM_GENERIC_VCPU_STATS(),
19 STATS_DESC_COUNTER(VCPU, int_exits),
20 STATS_DESC_COUNTER(VCPU, idle_exits),
21 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 STATS_DESC_COUNTER(VCPU, signal_exits),
23 STATS_DESC_COUNTER(VCPU, hypercall_exits),
24 STATS_DESC_COUNTER(VCPU, ipi_read_exits),
25 STATS_DESC_COUNTER(VCPU, ipi_write_exits),
26 STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
27 STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
28 STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
29 STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
30 };
31
32 const struct kvm_stats_header kvm_vcpu_stats_header = {
33 .name_size = KVM_STATS_NAME_SIZE,
34 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
35 .id_offset = sizeof(struct kvm_stats_header),
36 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
37 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
38 sizeof(kvm_vcpu_stats_desc),
39 };
40
kvm_save_host_pmu(struct kvm_vcpu * vcpu)41 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
42 {
43 struct kvm_context *context;
44
45 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
46 context->perf_cntr[0] = read_csr_perfcntr0();
47 context->perf_cntr[1] = read_csr_perfcntr1();
48 context->perf_cntr[2] = read_csr_perfcntr2();
49 context->perf_cntr[3] = read_csr_perfcntr3();
50 context->perf_ctrl[0] = write_csr_perfctrl0(0);
51 context->perf_ctrl[1] = write_csr_perfctrl1(0);
52 context->perf_ctrl[2] = write_csr_perfctrl2(0);
53 context->perf_ctrl[3] = write_csr_perfctrl3(0);
54 }
55
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)56 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
57 {
58 struct kvm_context *context;
59
60 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
61 write_csr_perfcntr0(context->perf_cntr[0]);
62 write_csr_perfcntr1(context->perf_cntr[1]);
63 write_csr_perfcntr2(context->perf_cntr[2]);
64 write_csr_perfcntr3(context->perf_cntr[3]);
65 write_csr_perfctrl0(context->perf_ctrl[0]);
66 write_csr_perfctrl1(context->perf_ctrl[1]);
67 write_csr_perfctrl2(context->perf_ctrl[2]);
68 write_csr_perfctrl3(context->perf_ctrl[3]);
69 }
70
71
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)72 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
73 {
74 struct loongarch_csrs *csr = vcpu->arch.csr;
75
76 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
77 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
78 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
79 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
80 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
81 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
82 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
83 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
84 }
85
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)86 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
87 {
88 struct loongarch_csrs *csr = vcpu->arch.csr;
89
90 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
91 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
92 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
93 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
94 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
95 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
96 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
97 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
98 }
99
kvm_own_pmu(struct kvm_vcpu * vcpu)100 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
101 {
102 unsigned long val;
103
104 if (!kvm_guest_has_pmu(&vcpu->arch))
105 return -EINVAL;
106
107 kvm_save_host_pmu(vcpu);
108
109 /* Set PM0-PM(num) to guest */
110 val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
111 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
112 write_csr_gcfg(val);
113
114 kvm_restore_guest_pmu(vcpu);
115
116 return 0;
117 }
118
kvm_lose_pmu(struct kvm_vcpu * vcpu)119 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
120 {
121 unsigned long val;
122 struct loongarch_csrs *csr = vcpu->arch.csr;
123
124 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
125 return;
126
127 kvm_save_guest_pmu(vcpu);
128
129 /* Disable pmu access from guest */
130 write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
131
132 /*
133 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
134 * exiting the guest, so that the next time trap into the guest.
135 * We don't need to deal with PMU CSRs contexts.
136 */
137 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
138 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
139 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
140 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
141 if (!(val & KVM_PMU_EVENT_ENABLED))
142 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
143
144 kvm_restore_host_pmu(vcpu);
145 }
146
kvm_restore_pmu(struct kvm_vcpu * vcpu)147 static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
148 {
149 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
150 kvm_make_request(KVM_REQ_PMU, vcpu);
151 }
152
kvm_check_pmu(struct kvm_vcpu * vcpu)153 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
154 {
155 if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
156 kvm_own_pmu(vcpu);
157 vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
158 }
159 }
160
kvm_update_stolen_time(struct kvm_vcpu * vcpu)161 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
162 {
163 u32 version;
164 u64 steal;
165 gpa_t gpa;
166 struct kvm_memslots *slots;
167 struct kvm_steal_time __user *st;
168 struct gfn_to_hva_cache *ghc;
169
170 ghc = &vcpu->arch.st.cache;
171 gpa = vcpu->arch.st.guest_addr;
172 if (!(gpa & KVM_STEAL_PHYS_VALID))
173 return;
174
175 gpa &= KVM_STEAL_PHYS_MASK;
176 slots = kvm_memslots(vcpu->kvm);
177 if (slots->generation != ghc->generation || gpa != ghc->gpa) {
178 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
179 ghc->gpa = INVALID_GPA;
180 return;
181 }
182 }
183
184 st = (struct kvm_steal_time __user *)ghc->hva;
185 unsafe_get_user(version, &st->version, out);
186 if (version & 1)
187 version += 1; /* first time write, random junk */
188
189 version += 1;
190 unsafe_put_user(version, &st->version, out);
191 smp_wmb();
192
193 unsafe_get_user(steal, &st->steal, out);
194 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
195 vcpu->arch.st.last_steal = current->sched_info.run_delay;
196 unsafe_put_user(steal, &st->steal, out);
197
198 smp_wmb();
199 version += 1;
200 unsafe_put_user(version, &st->version, out);
201 out:
202 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
203 }
204
205 /*
206 * kvm_check_requests - check and handle pending vCPU requests
207 *
208 * Return: RESUME_GUEST if we should enter the guest
209 * RESUME_HOST if we should exit to userspace
210 */
kvm_check_requests(struct kvm_vcpu * vcpu)211 static int kvm_check_requests(struct kvm_vcpu *vcpu)
212 {
213 if (!kvm_request_pending(vcpu))
214 return RESUME_GUEST;
215
216 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
217 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
218
219 if (kvm_dirty_ring_check_request(vcpu))
220 return RESUME_HOST;
221
222 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
223 kvm_update_stolen_time(vcpu);
224
225 return RESUME_GUEST;
226 }
227
kvm_late_check_requests(struct kvm_vcpu * vcpu)228 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
229 {
230 lockdep_assert_irqs_disabled();
231 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
232 if (vcpu->arch.flush_gpa != INVALID_GPA) {
233 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
234 vcpu->arch.flush_gpa = INVALID_GPA;
235 }
236 }
237
238 /*
239 * Check and handle pending signal and vCPU requests etc
240 * Run with irq enabled and preempt enabled
241 *
242 * Return: RESUME_GUEST if we should enter the guest
243 * RESUME_HOST if we should exit to userspace
244 * < 0 if we should exit to userspace, where the return value
245 * indicates an error
246 */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)247 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
248 {
249 int idx, ret;
250
251 /*
252 * Check conditions before entering the guest
253 */
254 ret = xfer_to_guest_mode_handle_work(vcpu);
255 if (ret < 0)
256 return ret;
257
258 idx = srcu_read_lock(&vcpu->kvm->srcu);
259 ret = kvm_check_requests(vcpu);
260 srcu_read_unlock(&vcpu->kvm->srcu, idx);
261
262 return ret;
263 }
264
265 /*
266 * Called with irq enabled
267 *
268 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
269 * Others if we should exit to userspace
270 */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)271 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
272 {
273 int ret;
274
275 do {
276 ret = kvm_enter_guest_check(vcpu);
277 if (ret != RESUME_GUEST)
278 break;
279
280 /*
281 * Handle vcpu timer, interrupts, check requests and
282 * check vmid before vcpu enter guest
283 */
284 local_irq_disable();
285 kvm_deliver_intr(vcpu);
286 kvm_deliver_exception(vcpu);
287 /* Make sure the vcpu mode has been written */
288 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
289 kvm_check_vpid(vcpu);
290 kvm_check_pmu(vcpu);
291
292 /*
293 * Called after function kvm_check_vpid()
294 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
295 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
296 */
297 kvm_late_check_requests(vcpu);
298 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
299 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
300 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
301
302 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
303 kvm_lose_pmu(vcpu);
304 /* make sure the vcpu mode has been written */
305 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
306 local_irq_enable();
307 ret = -EAGAIN;
308 }
309 } while (ret != RESUME_GUEST);
310
311 return ret;
312 }
313
314 /*
315 * Return 1 for resume guest and "<= 0" for resume host.
316 */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)317 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
318 {
319 int ret = RESUME_GUEST;
320 unsigned long estat = vcpu->arch.host_estat;
321 u32 intr = estat & CSR_ESTAT_IS;
322 u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
323
324 vcpu->mode = OUTSIDE_GUEST_MODE;
325
326 /* Set a default exit reason */
327 run->exit_reason = KVM_EXIT_UNKNOWN;
328
329 kvm_lose_pmu(vcpu);
330
331 guest_timing_exit_irqoff();
332 guest_state_exit_irqoff();
333 local_irq_enable();
334
335 trace_kvm_exit(vcpu, ecode);
336 if (ecode) {
337 ret = kvm_handle_fault(vcpu, ecode);
338 } else {
339 WARN(!intr, "vm exiting with suspicious irq\n");
340 ++vcpu->stat.int_exits;
341 }
342
343 if (ret == RESUME_GUEST)
344 ret = kvm_pre_enter_guest(vcpu);
345
346 if (ret != RESUME_GUEST) {
347 local_irq_disable();
348 return ret;
349 }
350
351 guest_timing_enter_irqoff();
352 guest_state_enter_irqoff();
353 trace_kvm_reenter(vcpu);
354
355 return RESUME_GUEST;
356 }
357
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)358 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
359 {
360 return !!(vcpu->arch.irq_pending) &&
361 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
362 }
363
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)364 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
365 {
366 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
367 }
368
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)369 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
370 {
371 unsigned long val;
372
373 preempt_disable();
374 val = gcsr_read(LOONGARCH_CSR_CRMD);
375 preempt_enable();
376
377 return (val & CSR_PRMD_PPLV) == PLV_KERN;
378 }
379
380 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)381 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
382 {
383 return vcpu->arch.pc;
384 }
385
386 /*
387 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
388 * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM,
389 * any event that arrives while a vCPU is loaded is considered to be "in guest".
390 */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)391 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
392 {
393 return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
394 }
395 #endif
396
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)397 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
398 {
399 return false;
400 }
401
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)402 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
403 {
404 return VM_FAULT_SIGBUS;
405 }
406
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)407 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
408 struct kvm_translation *tr)
409 {
410 return -EINVAL;
411 }
412
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)413 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
414 {
415 int ret;
416
417 /* Protect from TOD sync and vcpu_load/put() */
418 preempt_disable();
419 ret = kvm_pending_timer(vcpu) ||
420 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
421 preempt_enable();
422
423 return ret;
424 }
425
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)426 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
427 {
428 int i;
429
430 kvm_debug("vCPU Register Dump:\n");
431 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
432 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
433
434 for (i = 0; i < 32; i += 4) {
435 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
436 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
437 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
438 }
439
440 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
441 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
442 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
443
444 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
445
446 return 0;
447 }
448
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)449 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
450 struct kvm_mp_state *mp_state)
451 {
452 *mp_state = vcpu->arch.mp_state;
453
454 return 0;
455 }
456
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)457 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
458 struct kvm_mp_state *mp_state)
459 {
460 int ret = 0;
461
462 switch (mp_state->mp_state) {
463 case KVM_MP_STATE_RUNNABLE:
464 vcpu->arch.mp_state = *mp_state;
465 break;
466 default:
467 ret = -EINVAL;
468 }
469
470 return ret;
471 }
472
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)473 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
474 struct kvm_guest_debug *dbg)
475 {
476 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
477 return -EINVAL;
478
479 if (dbg->control & KVM_GUESTDBG_ENABLE)
480 vcpu->guest_debug = dbg->control;
481 else
482 vcpu->guest_debug = 0;
483
484 return 0;
485 }
486
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)487 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
488 {
489 int cpuid;
490 struct kvm_phyid_map *map;
491 struct loongarch_csrs *csr = vcpu->arch.csr;
492
493 if (val >= KVM_MAX_PHYID)
494 return -EINVAL;
495
496 map = vcpu->kvm->arch.phyid_map;
497 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
498
499 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
500 if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
501 /* Discard duplicated CPUID set operation */
502 if (cpuid == val) {
503 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
504 return 0;
505 }
506
507 /*
508 * CPUID is already set before
509 * Forbid changing to a different CPUID at runtime
510 */
511 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
512 return -EINVAL;
513 }
514
515 if (map->phys_map[val].enabled) {
516 /* Discard duplicated CPUID set operation */
517 if (vcpu == map->phys_map[val].vcpu) {
518 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
519 return 0;
520 }
521
522 /*
523 * New CPUID is already set with other vcpu
524 * Forbid sharing the same CPUID between different vcpus
525 */
526 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
527 return -EINVAL;
528 }
529
530 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
531 map->phys_map[val].enabled = true;
532 map->phys_map[val].vcpu = vcpu;
533 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
534
535 return 0;
536 }
537
kvm_drop_cpuid(struct kvm_vcpu * vcpu)538 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
539 {
540 int cpuid;
541 struct kvm_phyid_map *map;
542 struct loongarch_csrs *csr = vcpu->arch.csr;
543
544 map = vcpu->kvm->arch.phyid_map;
545 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
546
547 if (cpuid >= KVM_MAX_PHYID)
548 return;
549
550 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
551 if (map->phys_map[cpuid].enabled) {
552 map->phys_map[cpuid].vcpu = NULL;
553 map->phys_map[cpuid].enabled = false;
554 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
555 }
556 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
557 }
558
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)559 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
560 {
561 struct kvm_phyid_map *map;
562
563 if (cpuid >= KVM_MAX_PHYID)
564 return NULL;
565
566 map = kvm->arch.phyid_map;
567 if (!map->phys_map[cpuid].enabled)
568 return NULL;
569
570 return map->phys_map[cpuid].vcpu;
571 }
572
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)573 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
574 {
575 unsigned long gintc;
576 struct loongarch_csrs *csr = vcpu->arch.csr;
577
578 if (get_gcsr_flag(id) & INVALID_GCSR)
579 return -EINVAL;
580
581 if (id == LOONGARCH_CSR_ESTAT) {
582 preempt_disable();
583 vcpu_load(vcpu);
584 /*
585 * Sync pending interrupts into ESTAT so that interrupt
586 * remains during VM migration stage
587 */
588 kvm_deliver_intr(vcpu);
589 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
590 vcpu_put(vcpu);
591 preempt_enable();
592
593 /* ESTAT IP0~IP7 get from GINTC */
594 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
595 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
596 return 0;
597 }
598
599 /*
600 * Get software CSR state since software state is consistent
601 * with hardware for synchronous ioctl
602 */
603 *val = kvm_read_sw_gcsr(csr, id);
604
605 return 0;
606 }
607
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)608 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
609 {
610 int ret = 0, gintc;
611 struct loongarch_csrs *csr = vcpu->arch.csr;
612
613 if (get_gcsr_flag(id) & INVALID_GCSR)
614 return -EINVAL;
615
616 if (id == LOONGARCH_CSR_CPUID)
617 return kvm_set_cpuid(vcpu, val);
618
619 if (id == LOONGARCH_CSR_ESTAT) {
620 /* ESTAT IP0~IP7 inject through GINTC */
621 gintc = (val >> 2) & 0xff;
622 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
623
624 gintc = val & ~(0xffUL << 2);
625 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
626
627 return ret;
628 }
629
630 kvm_write_sw_gcsr(csr, id, val);
631
632 /*
633 * After modifying the PMU CSR register value of the vcpu.
634 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
635 */
636 if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
637 unsigned long val;
638
639 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
640 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
641 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
642 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
643
644 if (val & KVM_PMU_EVENT_ENABLED)
645 kvm_make_request(KVM_REQ_PMU, vcpu);
646 }
647
648 return ret;
649 }
650
_kvm_get_cpucfg_mask(int id,u64 * v)651 static int _kvm_get_cpucfg_mask(int id, u64 *v)
652 {
653 if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
654 return -EINVAL;
655
656 switch (id) {
657 case LOONGARCH_CPUCFG0:
658 *v = GENMASK(31, 0);
659 return 0;
660 case LOONGARCH_CPUCFG1:
661 /* CPUCFG1_MSGINT is not supported by KVM */
662 *v = GENMASK(25, 0);
663 return 0;
664 case LOONGARCH_CPUCFG2:
665 /* CPUCFG2 features unconditionally supported by KVM */
666 *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
667 CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
668 CPUCFG2_LSPW | CPUCFG2_LAM;
669 /*
670 * For the ISA extensions listed below, if one is supported
671 * by the host, then it is also supported by KVM.
672 */
673 if (cpu_has_lsx)
674 *v |= CPUCFG2_LSX;
675 if (cpu_has_lasx)
676 *v |= CPUCFG2_LASX;
677 if (cpu_has_lbt_x86)
678 *v |= CPUCFG2_X86BT;
679 if (cpu_has_lbt_arm)
680 *v |= CPUCFG2_ARMBT;
681 if (cpu_has_lbt_mips)
682 *v |= CPUCFG2_MIPSBT;
683
684 return 0;
685 case LOONGARCH_CPUCFG3:
686 *v = GENMASK(16, 0);
687 return 0;
688 case LOONGARCH_CPUCFG4:
689 case LOONGARCH_CPUCFG5:
690 *v = GENMASK(31, 0);
691 return 0;
692 case LOONGARCH_CPUCFG6:
693 if (cpu_has_pmp)
694 *v = GENMASK(14, 0);
695 else
696 *v = 0;
697 return 0;
698 case LOONGARCH_CPUCFG16:
699 *v = GENMASK(16, 0);
700 return 0;
701 case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
702 *v = GENMASK(30, 0);
703 return 0;
704 default:
705 /*
706 * CPUCFG bits should be zero if reserved by HW or not
707 * supported by KVM.
708 */
709 *v = 0;
710 return 0;
711 }
712 }
713
kvm_check_cpucfg(int id,u64 val)714 static int kvm_check_cpucfg(int id, u64 val)
715 {
716 int ret;
717 u64 mask = 0;
718
719 ret = _kvm_get_cpucfg_mask(id, &mask);
720 if (ret)
721 return ret;
722
723 if (val & ~mask)
724 /* Unsupported features and/or the higher 32 bits should not be set */
725 return -EINVAL;
726
727 switch (id) {
728 case LOONGARCH_CPUCFG2:
729 if (!(val & CPUCFG2_LLFTP))
730 /* Guests must have a constant timer */
731 return -EINVAL;
732 if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
733 /* Single and double float point must both be set when FP is enabled */
734 return -EINVAL;
735 if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
736 /* LSX architecturally implies FP but val does not satisfy that */
737 return -EINVAL;
738 if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
739 /* LASX architecturally implies LSX and FP but val does not satisfy that */
740 return -EINVAL;
741 return 0;
742 case LOONGARCH_CPUCFG6:
743 if (val & CPUCFG6_PMP) {
744 u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
745 if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
746 return -EINVAL;
747 if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
748 return -EINVAL;
749 if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
750 return -EINVAL;
751 }
752 return 0;
753 default:
754 /*
755 * Values for the other CPUCFG IDs are not being further validated
756 * besides the mask check above.
757 */
758 return 0;
759 }
760 }
761
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)762 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
763 const struct kvm_one_reg *reg, u64 *v)
764 {
765 int id, ret = 0;
766 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
767
768 switch (type) {
769 case KVM_REG_LOONGARCH_CSR:
770 id = KVM_GET_IOC_CSR_IDX(reg->id);
771 ret = _kvm_getcsr(vcpu, id, v);
772 break;
773 case KVM_REG_LOONGARCH_CPUCFG:
774 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
775 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
776 *v = vcpu->arch.cpucfg[id];
777 else
778 ret = -EINVAL;
779 break;
780 case KVM_REG_LOONGARCH_LBT:
781 if (!kvm_guest_has_lbt(&vcpu->arch))
782 return -ENXIO;
783
784 switch (reg->id) {
785 case KVM_REG_LOONGARCH_LBT_SCR0:
786 *v = vcpu->arch.lbt.scr0;
787 break;
788 case KVM_REG_LOONGARCH_LBT_SCR1:
789 *v = vcpu->arch.lbt.scr1;
790 break;
791 case KVM_REG_LOONGARCH_LBT_SCR2:
792 *v = vcpu->arch.lbt.scr2;
793 break;
794 case KVM_REG_LOONGARCH_LBT_SCR3:
795 *v = vcpu->arch.lbt.scr3;
796 break;
797 case KVM_REG_LOONGARCH_LBT_EFLAGS:
798 *v = vcpu->arch.lbt.eflags;
799 break;
800 case KVM_REG_LOONGARCH_LBT_FTOP:
801 *v = vcpu->arch.fpu.ftop;
802 break;
803 default:
804 ret = -EINVAL;
805 break;
806 }
807 break;
808 case KVM_REG_LOONGARCH_KVM:
809 switch (reg->id) {
810 case KVM_REG_LOONGARCH_COUNTER:
811 *v = drdtime() + vcpu->kvm->arch.time_offset;
812 break;
813 case KVM_REG_LOONGARCH_DEBUG_INST:
814 *v = INSN_HVCL | KVM_HCALL_SWDBG;
815 break;
816 default:
817 ret = -EINVAL;
818 break;
819 }
820 break;
821 default:
822 ret = -EINVAL;
823 break;
824 }
825
826 return ret;
827 }
828
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)829 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
830 {
831 int ret = 0;
832 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
833
834 switch (size) {
835 case KVM_REG_SIZE_U64:
836 ret = kvm_get_one_reg(vcpu, reg, &v);
837 if (ret)
838 return ret;
839 ret = put_user(v, (u64 __user *)(long)reg->addr);
840 break;
841 default:
842 ret = -EINVAL;
843 break;
844 }
845
846 return ret;
847 }
848
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)849 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
850 const struct kvm_one_reg *reg, u64 v)
851 {
852 int id, ret = 0;
853 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
854
855 switch (type) {
856 case KVM_REG_LOONGARCH_CSR:
857 id = KVM_GET_IOC_CSR_IDX(reg->id);
858 ret = _kvm_setcsr(vcpu, id, v);
859 break;
860 case KVM_REG_LOONGARCH_CPUCFG:
861 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
862 ret = kvm_check_cpucfg(id, v);
863 if (ret)
864 break;
865 vcpu->arch.cpucfg[id] = (u32)v;
866 if (id == LOONGARCH_CPUCFG6)
867 vcpu->arch.max_pmu_csrid =
868 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
869 break;
870 case KVM_REG_LOONGARCH_LBT:
871 if (!kvm_guest_has_lbt(&vcpu->arch))
872 return -ENXIO;
873
874 switch (reg->id) {
875 case KVM_REG_LOONGARCH_LBT_SCR0:
876 vcpu->arch.lbt.scr0 = v;
877 break;
878 case KVM_REG_LOONGARCH_LBT_SCR1:
879 vcpu->arch.lbt.scr1 = v;
880 break;
881 case KVM_REG_LOONGARCH_LBT_SCR2:
882 vcpu->arch.lbt.scr2 = v;
883 break;
884 case KVM_REG_LOONGARCH_LBT_SCR3:
885 vcpu->arch.lbt.scr3 = v;
886 break;
887 case KVM_REG_LOONGARCH_LBT_EFLAGS:
888 vcpu->arch.lbt.eflags = v;
889 break;
890 case KVM_REG_LOONGARCH_LBT_FTOP:
891 vcpu->arch.fpu.ftop = v;
892 break;
893 default:
894 ret = -EINVAL;
895 break;
896 }
897 break;
898 case KVM_REG_LOONGARCH_KVM:
899 switch (reg->id) {
900 case KVM_REG_LOONGARCH_COUNTER:
901 /*
902 * gftoffset is relative with board, not vcpu
903 * only set for the first time for smp system
904 */
905 if (vcpu->vcpu_id == 0)
906 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
907 break;
908 case KVM_REG_LOONGARCH_VCPU_RESET:
909 vcpu->arch.st.guest_addr = 0;
910 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
911 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
912
913 /*
914 * When vCPU reset, clear the ESTAT and GINTC registers
915 * Other CSR registers are cleared with function _kvm_setcsr().
916 */
917 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
918 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
919 break;
920 default:
921 ret = -EINVAL;
922 break;
923 }
924 break;
925 default:
926 ret = -EINVAL;
927 break;
928 }
929
930 return ret;
931 }
932
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)933 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
934 {
935 int ret = 0;
936 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
937
938 switch (size) {
939 case KVM_REG_SIZE_U64:
940 ret = get_user(v, (u64 __user *)(long)reg->addr);
941 if (ret)
942 return ret;
943 break;
944 default:
945 return -EINVAL;
946 }
947
948 return kvm_set_one_reg(vcpu, reg, v);
949 }
950
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)951 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
952 {
953 return -ENOIOCTLCMD;
954 }
955
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)956 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
957 {
958 return -ENOIOCTLCMD;
959 }
960
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)961 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
962 {
963 int i;
964
965 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
966 regs->gpr[i] = vcpu->arch.gprs[i];
967
968 regs->pc = vcpu->arch.pc;
969
970 return 0;
971 }
972
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)973 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
974 {
975 int i;
976
977 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
978 vcpu->arch.gprs[i] = regs->gpr[i];
979
980 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
981 vcpu->arch.pc = regs->pc;
982
983 return 0;
984 }
985
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)986 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
987 struct kvm_enable_cap *cap)
988 {
989 /* FPU is enabled by default, will support LSX/LASX later. */
990 return -EINVAL;
991 }
992
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)993 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
994 struct kvm_device_attr *attr)
995 {
996 switch (attr->attr) {
997 case LOONGARCH_CPUCFG2:
998 case LOONGARCH_CPUCFG6:
999 return 0;
1000 case CPUCFG_KVM_FEATURE:
1001 return 0;
1002 default:
1003 return -ENXIO;
1004 }
1005
1006 return -ENXIO;
1007 }
1008
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1009 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
1010 struct kvm_device_attr *attr)
1011 {
1012 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1013 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1014 return -ENXIO;
1015
1016 return 0;
1017 }
1018
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1019 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1020 struct kvm_device_attr *attr)
1021 {
1022 int ret = -ENXIO;
1023
1024 switch (attr->group) {
1025 case KVM_LOONGARCH_VCPU_CPUCFG:
1026 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1027 break;
1028 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1029 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1030 break;
1031 default:
1032 break;
1033 }
1034
1035 return ret;
1036 }
1037
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1038 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1039 struct kvm_device_attr *attr)
1040 {
1041 int ret = 0;
1042 uint64_t val;
1043 uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1044
1045 switch (attr->attr) {
1046 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1047 ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1048 if (ret)
1049 return ret;
1050 break;
1051 case CPUCFG_KVM_FEATURE:
1052 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1053 break;
1054 default:
1055 return -ENXIO;
1056 }
1057
1058 put_user(val, uaddr);
1059
1060 return ret;
1061 }
1062
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1063 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1064 struct kvm_device_attr *attr)
1065 {
1066 u64 gpa;
1067 u64 __user *user = (u64 __user *)attr->addr;
1068
1069 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1070 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1071 return -ENXIO;
1072
1073 gpa = vcpu->arch.st.guest_addr;
1074 if (put_user(gpa, user))
1075 return -EFAULT;
1076
1077 return 0;
1078 }
1079
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1080 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1081 struct kvm_device_attr *attr)
1082 {
1083 int ret = -ENXIO;
1084
1085 switch (attr->group) {
1086 case KVM_LOONGARCH_VCPU_CPUCFG:
1087 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1088 break;
1089 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1090 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1091 break;
1092 default:
1093 break;
1094 }
1095
1096 return ret;
1097 }
1098
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1099 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1100 struct kvm_device_attr *attr)
1101 {
1102 u64 val, valid;
1103 u64 __user *user = (u64 __user *)attr->addr;
1104 struct kvm *kvm = vcpu->kvm;
1105
1106 switch (attr->attr) {
1107 case CPUCFG_KVM_FEATURE:
1108 if (get_user(val, user))
1109 return -EFAULT;
1110
1111 valid = LOONGARCH_PV_FEAT_MASK;
1112 if (val & ~valid)
1113 return -EINVAL;
1114
1115 /* All vCPUs need set the same PV features */
1116 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1117 && ((kvm->arch.pv_features & valid) != val))
1118 return -EINVAL;
1119 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1120 return 0;
1121 default:
1122 return -ENXIO;
1123 }
1124 }
1125
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1126 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1127 struct kvm_device_attr *attr)
1128 {
1129 int idx, ret = 0;
1130 u64 gpa, __user *user = (u64 __user *)attr->addr;
1131 struct kvm *kvm = vcpu->kvm;
1132
1133 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1134 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1135 return -ENXIO;
1136
1137 if (get_user(gpa, user))
1138 return -EFAULT;
1139
1140 if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1141 return -EINVAL;
1142
1143 if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1144 vcpu->arch.st.guest_addr = gpa;
1145 return 0;
1146 }
1147
1148 /* Check the address is in a valid memslot */
1149 idx = srcu_read_lock(&kvm->srcu);
1150 if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1151 ret = -EINVAL;
1152 srcu_read_unlock(&kvm->srcu, idx);
1153
1154 if (!ret) {
1155 vcpu->arch.st.guest_addr = gpa;
1156 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1157 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1158 }
1159
1160 return ret;
1161 }
1162
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1163 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1164 struct kvm_device_attr *attr)
1165 {
1166 int ret = -ENXIO;
1167
1168 switch (attr->group) {
1169 case KVM_LOONGARCH_VCPU_CPUCFG:
1170 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1171 break;
1172 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1173 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1174 break;
1175 default:
1176 break;
1177 }
1178
1179 return ret;
1180 }
1181
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1182 long kvm_arch_vcpu_ioctl(struct file *filp,
1183 unsigned int ioctl, unsigned long arg)
1184 {
1185 long r;
1186 struct kvm_device_attr attr;
1187 void __user *argp = (void __user *)arg;
1188 struct kvm_vcpu *vcpu = filp->private_data;
1189
1190 /*
1191 * Only software CSR should be modified
1192 *
1193 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1194 * should be used. Since CSR registers owns by this vcpu, if switch
1195 * to other vcpus, other vcpus need reload CSR registers.
1196 *
1197 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1198 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1199 * aux_inuse flag and reload CSR registers form software.
1200 */
1201
1202 switch (ioctl) {
1203 case KVM_SET_ONE_REG:
1204 case KVM_GET_ONE_REG: {
1205 struct kvm_one_reg reg;
1206
1207 r = -EFAULT;
1208 if (copy_from_user(®, argp, sizeof(reg)))
1209 break;
1210 if (ioctl == KVM_SET_ONE_REG) {
1211 r = kvm_set_reg(vcpu, ®);
1212 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1213 } else
1214 r = kvm_get_reg(vcpu, ®);
1215 break;
1216 }
1217 case KVM_ENABLE_CAP: {
1218 struct kvm_enable_cap cap;
1219
1220 r = -EFAULT;
1221 if (copy_from_user(&cap, argp, sizeof(cap)))
1222 break;
1223 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1224 break;
1225 }
1226 case KVM_HAS_DEVICE_ATTR: {
1227 r = -EFAULT;
1228 if (copy_from_user(&attr, argp, sizeof(attr)))
1229 break;
1230 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1231 break;
1232 }
1233 case KVM_GET_DEVICE_ATTR: {
1234 r = -EFAULT;
1235 if (copy_from_user(&attr, argp, sizeof(attr)))
1236 break;
1237 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1238 break;
1239 }
1240 case KVM_SET_DEVICE_ATTR: {
1241 r = -EFAULT;
1242 if (copy_from_user(&attr, argp, sizeof(attr)))
1243 break;
1244 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1245 break;
1246 }
1247 default:
1248 r = -ENOIOCTLCMD;
1249 break;
1250 }
1251
1252 return r;
1253 }
1254
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1255 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1256 {
1257 int i = 0;
1258
1259 fpu->fcc = vcpu->arch.fpu.fcc;
1260 fpu->fcsr = vcpu->arch.fpu.fcsr;
1261 for (i = 0; i < NUM_FPU_REGS; i++)
1262 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1263
1264 return 0;
1265 }
1266
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1267 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1268 {
1269 int i = 0;
1270
1271 vcpu->arch.fpu.fcc = fpu->fcc;
1272 vcpu->arch.fpu.fcsr = fpu->fcsr;
1273 for (i = 0; i < NUM_FPU_REGS; i++)
1274 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1275
1276 return 0;
1277 }
1278
1279 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1280 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1281 {
1282 if (!kvm_guest_has_lbt(&vcpu->arch))
1283 return -EINVAL;
1284
1285 preempt_disable();
1286 set_csr_euen(CSR_EUEN_LBTEN);
1287 _restore_lbt(&vcpu->arch.lbt);
1288 vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1289 preempt_enable();
1290
1291 return 0;
1292 }
1293
kvm_lose_lbt(struct kvm_vcpu * vcpu)1294 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1295 {
1296 preempt_disable();
1297 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1298 _save_lbt(&vcpu->arch.lbt);
1299 clear_csr_euen(CSR_EUEN_LBTEN);
1300 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1301 }
1302 preempt_enable();
1303 }
1304
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1305 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1306 {
1307 /*
1308 * If TM is enabled, top register save/restore will
1309 * cause lbt exception, here enable lbt in advance
1310 */
1311 if (fcsr & FPU_CSR_TM)
1312 kvm_own_lbt(vcpu);
1313 }
1314
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1315 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1316 {
1317 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1318 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1319 return;
1320 kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1321 }
1322 }
1323 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1324 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1325 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1326 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1327 #endif
1328
1329 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1330 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1331 {
1332 preempt_disable();
1333
1334 /*
1335 * Enable FPU for guest
1336 * Set FR and FRE according to guest context
1337 */
1338 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1339 set_csr_euen(CSR_EUEN_FPEN);
1340
1341 kvm_restore_fpu(&vcpu->arch.fpu);
1342 vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1343 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1344
1345 preempt_enable();
1346 }
1347
1348 #ifdef CONFIG_CPU_HAS_LSX
1349 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1350 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1351 {
1352 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1353 return -EINVAL;
1354
1355 preempt_disable();
1356
1357 /* Enable LSX for guest */
1358 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1359 set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1360 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1361 case KVM_LARCH_FPU:
1362 /*
1363 * Guest FPU state already loaded,
1364 * only restore upper LSX state
1365 */
1366 _restore_lsx_upper(&vcpu->arch.fpu);
1367 break;
1368 default:
1369 /* Neither FP or LSX already active,
1370 * restore full LSX state
1371 */
1372 kvm_restore_lsx(&vcpu->arch.fpu);
1373 break;
1374 }
1375
1376 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1377 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1378 preempt_enable();
1379
1380 return 0;
1381 }
1382 #endif
1383
1384 #ifdef CONFIG_CPU_HAS_LASX
1385 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1386 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1387 {
1388 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1389 return -EINVAL;
1390
1391 preempt_disable();
1392
1393 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1394 set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1395 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1396 case KVM_LARCH_LSX:
1397 case KVM_LARCH_LSX | KVM_LARCH_FPU:
1398 /* Guest LSX state already loaded, only restore upper LASX state */
1399 _restore_lasx_upper(&vcpu->arch.fpu);
1400 break;
1401 case KVM_LARCH_FPU:
1402 /* Guest FP state already loaded, only restore upper LSX & LASX state */
1403 _restore_lsx_upper(&vcpu->arch.fpu);
1404 _restore_lasx_upper(&vcpu->arch.fpu);
1405 break;
1406 default:
1407 /* Neither FP or LSX already active, restore full LASX state */
1408 kvm_restore_lasx(&vcpu->arch.fpu);
1409 break;
1410 }
1411
1412 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1413 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1414 preempt_enable();
1415
1416 return 0;
1417 }
1418 #endif
1419
1420 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1421 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1422 {
1423 preempt_disable();
1424
1425 kvm_check_fcsr_alive(vcpu);
1426 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1427 kvm_save_lasx(&vcpu->arch.fpu);
1428 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1429 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1430
1431 /* Disable LASX & LSX & FPU */
1432 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1433 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1434 kvm_save_lsx(&vcpu->arch.fpu);
1435 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1436 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1437
1438 /* Disable LSX & FPU */
1439 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1440 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1441 kvm_save_fpu(&vcpu->arch.fpu);
1442 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1443 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1444
1445 /* Disable FPU */
1446 clear_csr_euen(CSR_EUEN_FPEN);
1447 }
1448 kvm_lose_lbt(vcpu);
1449
1450 preempt_enable();
1451 }
1452
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1453 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1454 {
1455 int intr = (int)irq->irq;
1456
1457 if (intr > 0)
1458 kvm_queue_irq(vcpu, intr);
1459 else if (intr < 0)
1460 kvm_dequeue_irq(vcpu, -intr);
1461 else {
1462 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1463 return -EINVAL;
1464 }
1465
1466 kvm_vcpu_kick(vcpu);
1467
1468 return 0;
1469 }
1470
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1471 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1472 unsigned int ioctl, unsigned long arg)
1473 {
1474 void __user *argp = (void __user *)arg;
1475 struct kvm_vcpu *vcpu = filp->private_data;
1476
1477 if (ioctl == KVM_INTERRUPT) {
1478 struct kvm_interrupt irq;
1479
1480 if (copy_from_user(&irq, argp, sizeof(irq)))
1481 return -EFAULT;
1482
1483 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1484
1485 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1486 }
1487
1488 return -ENOIOCTLCMD;
1489 }
1490
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1491 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1492 {
1493 return 0;
1494 }
1495
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1496 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1497 {
1498 unsigned long timer_hz;
1499 struct loongarch_csrs *csr;
1500
1501 vcpu->arch.vpid = 0;
1502 vcpu->arch.flush_gpa = INVALID_GPA;
1503
1504 hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1505 HRTIMER_MODE_ABS_PINNED_HARD);
1506
1507 /* Get GPA (=HVA) of PGD for kvm hypervisor */
1508 vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1509
1510 /*
1511 * Get PGD for primary mmu, virtual address is used since there is
1512 * memory access after loading from CSR_PGD in tlb exception fast path.
1513 */
1514 vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1515
1516 vcpu->arch.handle_exit = kvm_handle_exit;
1517 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1518 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1519 if (!vcpu->arch.csr)
1520 return -ENOMEM;
1521
1522 /*
1523 * All kvm exceptions share one exception entry, and host <-> guest
1524 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1525 */
1526 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1527
1528 /* Init */
1529 vcpu->arch.last_sched_cpu = -1;
1530
1531 /* Init ipi_state lock */
1532 spin_lock_init(&vcpu->arch.ipi_state.lock);
1533
1534 /*
1535 * Initialize guest register state to valid architectural reset state.
1536 */
1537 timer_hz = calc_const_freq();
1538 kvm_init_timer(vcpu, timer_hz);
1539
1540 /* Set Initialize mode for guest */
1541 csr = vcpu->arch.csr;
1542 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1543
1544 /* Set cpuid */
1545 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1546 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1547
1548 /* Start with no pending virtual guest interrupts */
1549 csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1550
1551 return 0;
1552 }
1553
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1554 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1555 {
1556 }
1557
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1558 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1559 {
1560 int cpu;
1561 struct kvm_context *context;
1562
1563 hrtimer_cancel(&vcpu->arch.swtimer);
1564 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1565 kvm_drop_cpuid(vcpu);
1566 kfree(vcpu->arch.csr);
1567
1568 /*
1569 * If the vCPU is freed and reused as another vCPU, we don't want the
1570 * matching pointer wrongly hanging around in last_vcpu.
1571 */
1572 for_each_possible_cpu(cpu) {
1573 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1574 if (context->last_vcpu == vcpu)
1575 context->last_vcpu = NULL;
1576 }
1577 }
1578
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1579 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1580 {
1581 bool migrated;
1582 struct kvm_context *context;
1583 struct loongarch_csrs *csr = vcpu->arch.csr;
1584
1585 /*
1586 * Have we migrated to a different CPU?
1587 * If so, any old guest TLB state may be stale.
1588 */
1589 migrated = (vcpu->arch.last_sched_cpu != cpu);
1590
1591 /*
1592 * Was this the last vCPU to run on this CPU?
1593 * If not, any old guest state from this vCPU will have been clobbered.
1594 */
1595 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1596 if (migrated || (context->last_vcpu != vcpu))
1597 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1598 context->last_vcpu = vcpu;
1599
1600 /* Restore timer state regardless */
1601 kvm_restore_timer(vcpu);
1602 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1603
1604 /* Restore hardware PMU CSRs */
1605 kvm_restore_pmu(vcpu);
1606
1607 /* Don't bother restoring registers multiple times unless necessary */
1608 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1609 return 0;
1610
1611 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1612
1613 /* Restore guest CSR registers */
1614 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1615 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1616 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1617 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1618 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1619 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1620 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1621 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1622 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1623 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1624 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1625 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1626 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1627 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1628 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1629 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1630 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1631 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1632 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1633 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1634 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1635 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1636 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1637 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1638 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1639 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1640 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1641 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1642 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1643 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1644 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1645 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1646 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1647 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1648 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1649 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1650 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1651 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1652 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1653 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1654 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1655 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1656 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1657 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1658
1659 /* Restore Root.GINTC from unused Guest.GINTC register */
1660 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1661
1662 /*
1663 * We should clear linked load bit to break interrupted atomics. This
1664 * prevents a SC on the next vCPU from succeeding by matching a LL on
1665 * the previous vCPU.
1666 */
1667 if (vcpu->kvm->created_vcpus > 1)
1668 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1669
1670 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1671
1672 return 0;
1673 }
1674
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1675 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1676 {
1677 unsigned long flags;
1678
1679 local_irq_save(flags);
1680 /* Restore guest state to registers */
1681 _kvm_vcpu_load(vcpu, cpu);
1682 local_irq_restore(flags);
1683 }
1684
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1685 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1686 {
1687 struct loongarch_csrs *csr = vcpu->arch.csr;
1688
1689 kvm_lose_fpu(vcpu);
1690
1691 /*
1692 * Update CSR state from hardware if software CSR state is stale,
1693 * most CSR registers are kept unchanged during process context
1694 * switch except CSR registers like remaining timer tick value and
1695 * injected interrupt state.
1696 */
1697 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1698 goto out;
1699
1700 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1701 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1702 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1703 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1704 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1705 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1706 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1707 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1708 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1709 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1710 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1711 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1712 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1713 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1714 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1715 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1716 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1717 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1718 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1719 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1720 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1721 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1722 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1723 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1724 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1725 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1726 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1727 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1728 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1729 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1730 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1731 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1732 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1733 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1734 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1735 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1736 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1737 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1738 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1739 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1740 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1741 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1742 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1743 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1744 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1745 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1746 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1747
1748 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1749
1750 out:
1751 kvm_save_timer(vcpu);
1752 /* Save Root.GINTC into unused Guest.GINTC register */
1753 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1754
1755 return 0;
1756 }
1757
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1758 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1759 {
1760 int cpu;
1761 unsigned long flags;
1762
1763 local_irq_save(flags);
1764 cpu = smp_processor_id();
1765 vcpu->arch.last_sched_cpu = cpu;
1766
1767 /* Save guest state in registers */
1768 _kvm_vcpu_put(vcpu, cpu);
1769 local_irq_restore(flags);
1770 }
1771
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1772 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1773 {
1774 int r = -EINTR;
1775 struct kvm_run *run = vcpu->run;
1776
1777 if (vcpu->mmio_needed) {
1778 if (!vcpu->mmio_is_write)
1779 kvm_complete_mmio_read(vcpu, run);
1780 vcpu->mmio_needed = 0;
1781 }
1782
1783 switch (run->exit_reason) {
1784 case KVM_EXIT_HYPERCALL:
1785 kvm_complete_user_service(vcpu, run);
1786 break;
1787 case KVM_EXIT_LOONGARCH_IOCSR:
1788 if (!run->iocsr_io.is_write)
1789 kvm_complete_iocsr_read(vcpu, run);
1790 break;
1791 }
1792
1793 if (!vcpu->wants_to_run)
1794 return r;
1795
1796 /* Clear exit_reason */
1797 run->exit_reason = KVM_EXIT_UNKNOWN;
1798 lose_fpu(1);
1799 vcpu_load(vcpu);
1800 kvm_sigset_activate(vcpu);
1801 r = kvm_pre_enter_guest(vcpu);
1802 if (r != RESUME_GUEST)
1803 goto out;
1804
1805 guest_timing_enter_irqoff();
1806 guest_state_enter_irqoff();
1807 trace_kvm_enter(vcpu);
1808 r = kvm_loongarch_ops->enter_guest(run, vcpu);
1809
1810 trace_kvm_out(vcpu);
1811 /*
1812 * Guest exit is already recorded at kvm_handle_exit()
1813 * return value must not be RESUME_GUEST
1814 */
1815 local_irq_enable();
1816 out:
1817 kvm_sigset_deactivate(vcpu);
1818 vcpu_put(vcpu);
1819
1820 return r;
1821 }
1822