1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/lbt.h>
10 #include <asm/loongarch.h>
11 #include <asm/setup.h>
12 #include <asm/time.h>
13
14 #define CREATE_TRACE_POINTS
15 #include "trace.h"
16
17 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
18 KVM_GENERIC_VCPU_STATS(),
19 STATS_DESC_COUNTER(VCPU, int_exits),
20 STATS_DESC_COUNTER(VCPU, idle_exits),
21 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 STATS_DESC_COUNTER(VCPU, signal_exits),
23 STATS_DESC_COUNTER(VCPU, hypercall_exits)
24 };
25
26 const struct kvm_stats_header kvm_vcpu_stats_header = {
27 .name_size = KVM_STATS_NAME_SIZE,
28 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
29 .id_offset = sizeof(struct kvm_stats_header),
30 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
31 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
32 sizeof(kvm_vcpu_stats_desc),
33 };
34
kvm_save_host_pmu(struct kvm_vcpu * vcpu)35 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
36 {
37 struct kvm_context *context;
38
39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
40 context->perf_cntr[0] = read_csr_perfcntr0();
41 context->perf_cntr[1] = read_csr_perfcntr1();
42 context->perf_cntr[2] = read_csr_perfcntr2();
43 context->perf_cntr[3] = read_csr_perfcntr3();
44 context->perf_ctrl[0] = write_csr_perfctrl0(0);
45 context->perf_ctrl[1] = write_csr_perfctrl1(0);
46 context->perf_ctrl[2] = write_csr_perfctrl2(0);
47 context->perf_ctrl[3] = write_csr_perfctrl3(0);
48 }
49
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)50 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
51 {
52 struct kvm_context *context;
53
54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
55 write_csr_perfcntr0(context->perf_cntr[0]);
56 write_csr_perfcntr1(context->perf_cntr[1]);
57 write_csr_perfcntr2(context->perf_cntr[2]);
58 write_csr_perfcntr3(context->perf_cntr[3]);
59 write_csr_perfctrl0(context->perf_ctrl[0]);
60 write_csr_perfctrl1(context->perf_ctrl[1]);
61 write_csr_perfctrl2(context->perf_ctrl[2]);
62 write_csr_perfctrl3(context->perf_ctrl[3]);
63 }
64
65
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)66 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
67 {
68 struct loongarch_csrs *csr = vcpu->arch.csr;
69
70 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
71 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
72 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
73 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
74 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
75 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
76 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
77 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
78 }
79
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)80 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
81 {
82 struct loongarch_csrs *csr = vcpu->arch.csr;
83
84 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
85 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
86 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
87 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
88 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
89 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
90 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
91 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
92 }
93
kvm_own_pmu(struct kvm_vcpu * vcpu)94 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
95 {
96 unsigned long val;
97
98 if (!kvm_guest_has_pmu(&vcpu->arch))
99 return -EINVAL;
100
101 kvm_save_host_pmu(vcpu);
102
103 /* Set PM0-PM(num) to guest */
104 val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
105 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
106 write_csr_gcfg(val);
107
108 kvm_restore_guest_pmu(vcpu);
109
110 return 0;
111 }
112
kvm_lose_pmu(struct kvm_vcpu * vcpu)113 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
114 {
115 unsigned long val;
116 struct loongarch_csrs *csr = vcpu->arch.csr;
117
118 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
119 return;
120
121 kvm_save_guest_pmu(vcpu);
122
123 /* Disable pmu access from guest */
124 write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
125
126 /*
127 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
128 * exiting the guest, so that the next time trap into the guest.
129 * We don't need to deal with PMU CSRs contexts.
130 */
131 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
132 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
133 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
134 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
135 if (!(val & KVM_PMU_EVENT_ENABLED))
136 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
137
138 kvm_restore_host_pmu(vcpu);
139 }
140
kvm_restore_pmu(struct kvm_vcpu * vcpu)141 static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
142 {
143 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
144 kvm_make_request(KVM_REQ_PMU, vcpu);
145 }
146
kvm_check_pmu(struct kvm_vcpu * vcpu)147 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
148 {
149 if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
150 kvm_own_pmu(vcpu);
151 vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
152 }
153 }
154
kvm_update_stolen_time(struct kvm_vcpu * vcpu)155 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
156 {
157 u32 version;
158 u64 steal;
159 gpa_t gpa;
160 struct kvm_memslots *slots;
161 struct kvm_steal_time __user *st;
162 struct gfn_to_hva_cache *ghc;
163
164 ghc = &vcpu->arch.st.cache;
165 gpa = vcpu->arch.st.guest_addr;
166 if (!(gpa & KVM_STEAL_PHYS_VALID))
167 return;
168
169 gpa &= KVM_STEAL_PHYS_MASK;
170 slots = kvm_memslots(vcpu->kvm);
171 if (slots->generation != ghc->generation || gpa != ghc->gpa) {
172 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
173 ghc->gpa = INVALID_GPA;
174 return;
175 }
176 }
177
178 st = (struct kvm_steal_time __user *)ghc->hva;
179 unsafe_get_user(version, &st->version, out);
180 if (version & 1)
181 version += 1; /* first time write, random junk */
182
183 version += 1;
184 unsafe_put_user(version, &st->version, out);
185 smp_wmb();
186
187 unsafe_get_user(steal, &st->steal, out);
188 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
189 vcpu->arch.st.last_steal = current->sched_info.run_delay;
190 unsafe_put_user(steal, &st->steal, out);
191
192 smp_wmb();
193 version += 1;
194 unsafe_put_user(version, &st->version, out);
195 out:
196 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
197 }
198
199 /*
200 * kvm_check_requests - check and handle pending vCPU requests
201 *
202 * Return: RESUME_GUEST if we should enter the guest
203 * RESUME_HOST if we should exit to userspace
204 */
kvm_check_requests(struct kvm_vcpu * vcpu)205 static int kvm_check_requests(struct kvm_vcpu *vcpu)
206 {
207 if (!kvm_request_pending(vcpu))
208 return RESUME_GUEST;
209
210 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
211 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
212
213 if (kvm_dirty_ring_check_request(vcpu))
214 return RESUME_HOST;
215
216 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
217 kvm_update_stolen_time(vcpu);
218
219 return RESUME_GUEST;
220 }
221
kvm_late_check_requests(struct kvm_vcpu * vcpu)222 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
223 {
224 lockdep_assert_irqs_disabled();
225 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
226 if (vcpu->arch.flush_gpa != INVALID_GPA) {
227 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
228 vcpu->arch.flush_gpa = INVALID_GPA;
229 }
230 }
231
232 /*
233 * Check and handle pending signal and vCPU requests etc
234 * Run with irq enabled and preempt enabled
235 *
236 * Return: RESUME_GUEST if we should enter the guest
237 * RESUME_HOST if we should exit to userspace
238 * < 0 if we should exit to userspace, where the return value
239 * indicates an error
240 */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)241 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
242 {
243 int idx, ret;
244
245 /*
246 * Check conditions before entering the guest
247 */
248 ret = xfer_to_guest_mode_handle_work(vcpu);
249 if (ret < 0)
250 return ret;
251
252 idx = srcu_read_lock(&vcpu->kvm->srcu);
253 ret = kvm_check_requests(vcpu);
254 srcu_read_unlock(&vcpu->kvm->srcu, idx);
255
256 return ret;
257 }
258
259 /*
260 * Called with irq enabled
261 *
262 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
263 * Others if we should exit to userspace
264 */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)265 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
266 {
267 int ret;
268
269 do {
270 ret = kvm_enter_guest_check(vcpu);
271 if (ret != RESUME_GUEST)
272 break;
273
274 /*
275 * Handle vcpu timer, interrupts, check requests and
276 * check vmid before vcpu enter guest
277 */
278 local_irq_disable();
279 kvm_deliver_intr(vcpu);
280 kvm_deliver_exception(vcpu);
281 /* Make sure the vcpu mode has been written */
282 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
283 kvm_check_vpid(vcpu);
284 kvm_check_pmu(vcpu);
285
286 /*
287 * Called after function kvm_check_vpid()
288 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
289 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
290 */
291 kvm_late_check_requests(vcpu);
292 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
293 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
294 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
295
296 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
297 /* make sure the vcpu mode has been written */
298 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
299 local_irq_enable();
300 ret = -EAGAIN;
301 }
302 } while (ret != RESUME_GUEST);
303
304 return ret;
305 }
306
307 /*
308 * Return 1 for resume guest and "<= 0" for resume host.
309 */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)310 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
311 {
312 int ret = RESUME_GUEST;
313 unsigned long estat = vcpu->arch.host_estat;
314 u32 intr = estat & CSR_ESTAT_IS;
315 u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
316
317 vcpu->mode = OUTSIDE_GUEST_MODE;
318
319 /* Set a default exit reason */
320 run->exit_reason = KVM_EXIT_UNKNOWN;
321
322 kvm_lose_pmu(vcpu);
323
324 guest_timing_exit_irqoff();
325 guest_state_exit_irqoff();
326 local_irq_enable();
327
328 trace_kvm_exit(vcpu, ecode);
329 if (ecode) {
330 ret = kvm_handle_fault(vcpu, ecode);
331 } else {
332 WARN(!intr, "vm exiting with suspicious irq\n");
333 ++vcpu->stat.int_exits;
334 }
335
336 if (ret == RESUME_GUEST)
337 ret = kvm_pre_enter_guest(vcpu);
338
339 if (ret != RESUME_GUEST) {
340 local_irq_disable();
341 return ret;
342 }
343
344 guest_timing_enter_irqoff();
345 guest_state_enter_irqoff();
346 trace_kvm_reenter(vcpu);
347
348 return RESUME_GUEST;
349 }
350
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)351 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
352 {
353 return !!(vcpu->arch.irq_pending) &&
354 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
355 }
356
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)357 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
358 {
359 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
360 }
361
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)362 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
363 {
364 unsigned long val;
365
366 preempt_disable();
367 val = gcsr_read(LOONGARCH_CSR_CRMD);
368 preempt_enable();
369
370 return (val & CSR_PRMD_PPLV) == PLV_KERN;
371 }
372
373 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)374 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
375 {
376 return vcpu->arch.pc;
377 }
378
379 /*
380 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
381 * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM,
382 * any event that arrives while a vCPU is loaded is considered to be "in guest".
383 */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)384 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
385 {
386 return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
387 }
388 #endif
389
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)390 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
391 {
392 return false;
393 }
394
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)395 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
396 {
397 return VM_FAULT_SIGBUS;
398 }
399
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)400 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
401 struct kvm_translation *tr)
402 {
403 return -EINVAL;
404 }
405
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)406 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
407 {
408 int ret;
409
410 /* Protect from TOD sync and vcpu_load/put() */
411 preempt_disable();
412 ret = kvm_pending_timer(vcpu) ||
413 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
414 preempt_enable();
415
416 return ret;
417 }
418
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)419 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
420 {
421 int i;
422
423 kvm_debug("vCPU Register Dump:\n");
424 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
425 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
426
427 for (i = 0; i < 32; i += 4) {
428 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
429 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
430 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
431 }
432
433 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
434 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
435 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
436
437 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
438
439 return 0;
440 }
441
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)442 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
443 struct kvm_mp_state *mp_state)
444 {
445 *mp_state = vcpu->arch.mp_state;
446
447 return 0;
448 }
449
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)450 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
451 struct kvm_mp_state *mp_state)
452 {
453 int ret = 0;
454
455 switch (mp_state->mp_state) {
456 case KVM_MP_STATE_RUNNABLE:
457 vcpu->arch.mp_state = *mp_state;
458 break;
459 default:
460 ret = -EINVAL;
461 }
462
463 return ret;
464 }
465
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)466 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
467 struct kvm_guest_debug *dbg)
468 {
469 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
470 return -EINVAL;
471
472 if (dbg->control & KVM_GUESTDBG_ENABLE)
473 vcpu->guest_debug = dbg->control;
474 else
475 vcpu->guest_debug = 0;
476
477 return 0;
478 }
479
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)480 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
481 {
482 int cpuid;
483 struct kvm_phyid_map *map;
484 struct loongarch_csrs *csr = vcpu->arch.csr;
485
486 if (val >= KVM_MAX_PHYID)
487 return -EINVAL;
488
489 map = vcpu->kvm->arch.phyid_map;
490 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
491
492 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
493 if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
494 /* Discard duplicated CPUID set operation */
495 if (cpuid == val) {
496 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
497 return 0;
498 }
499
500 /*
501 * CPUID is already set before
502 * Forbid changing to a different CPUID at runtime
503 */
504 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
505 return -EINVAL;
506 }
507
508 if (map->phys_map[val].enabled) {
509 /* Discard duplicated CPUID set operation */
510 if (vcpu == map->phys_map[val].vcpu) {
511 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
512 return 0;
513 }
514
515 /*
516 * New CPUID is already set with other vcpu
517 * Forbid sharing the same CPUID between different vcpus
518 */
519 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
520 return -EINVAL;
521 }
522
523 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
524 map->phys_map[val].enabled = true;
525 map->phys_map[val].vcpu = vcpu;
526 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
527
528 return 0;
529 }
530
kvm_drop_cpuid(struct kvm_vcpu * vcpu)531 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
532 {
533 int cpuid;
534 struct kvm_phyid_map *map;
535 struct loongarch_csrs *csr = vcpu->arch.csr;
536
537 map = vcpu->kvm->arch.phyid_map;
538 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
539
540 if (cpuid >= KVM_MAX_PHYID)
541 return;
542
543 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
544 if (map->phys_map[cpuid].enabled) {
545 map->phys_map[cpuid].vcpu = NULL;
546 map->phys_map[cpuid].enabled = false;
547 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
548 }
549 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
550 }
551
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)552 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
553 {
554 struct kvm_phyid_map *map;
555
556 if (cpuid >= KVM_MAX_PHYID)
557 return NULL;
558
559 map = kvm->arch.phyid_map;
560 if (!map->phys_map[cpuid].enabled)
561 return NULL;
562
563 return map->phys_map[cpuid].vcpu;
564 }
565
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)566 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
567 {
568 unsigned long gintc;
569 struct loongarch_csrs *csr = vcpu->arch.csr;
570
571 if (get_gcsr_flag(id) & INVALID_GCSR)
572 return -EINVAL;
573
574 if (id == LOONGARCH_CSR_ESTAT) {
575 preempt_disable();
576 vcpu_load(vcpu);
577 /*
578 * Sync pending interrupts into ESTAT so that interrupt
579 * remains during VM migration stage
580 */
581 kvm_deliver_intr(vcpu);
582 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
583 vcpu_put(vcpu);
584 preempt_enable();
585
586 /* ESTAT IP0~IP7 get from GINTC */
587 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
588 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
589 return 0;
590 }
591
592 /*
593 * Get software CSR state since software state is consistent
594 * with hardware for synchronous ioctl
595 */
596 *val = kvm_read_sw_gcsr(csr, id);
597
598 return 0;
599 }
600
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)601 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
602 {
603 int ret = 0, gintc;
604 struct loongarch_csrs *csr = vcpu->arch.csr;
605
606 if (get_gcsr_flag(id) & INVALID_GCSR)
607 return -EINVAL;
608
609 if (id == LOONGARCH_CSR_CPUID)
610 return kvm_set_cpuid(vcpu, val);
611
612 if (id == LOONGARCH_CSR_ESTAT) {
613 /* ESTAT IP0~IP7 inject through GINTC */
614 gintc = (val >> 2) & 0xff;
615 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
616
617 gintc = val & ~(0xffUL << 2);
618 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
619
620 return ret;
621 }
622
623 kvm_write_sw_gcsr(csr, id, val);
624
625 /*
626 * After modifying the PMU CSR register value of the vcpu.
627 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
628 */
629 if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
630 unsigned long val;
631
632 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
633 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
634 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
635 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
636
637 if (val & KVM_PMU_EVENT_ENABLED)
638 kvm_make_request(KVM_REQ_PMU, vcpu);
639 }
640
641 return ret;
642 }
643
_kvm_get_cpucfg_mask(int id,u64 * v)644 static int _kvm_get_cpucfg_mask(int id, u64 *v)
645 {
646 if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
647 return -EINVAL;
648
649 switch (id) {
650 case LOONGARCH_CPUCFG0:
651 *v = GENMASK(31, 0);
652 return 0;
653 case LOONGARCH_CPUCFG1:
654 /* CPUCFG1_MSGINT is not supported by KVM */
655 *v = GENMASK(25, 0);
656 return 0;
657 case LOONGARCH_CPUCFG2:
658 /* CPUCFG2 features unconditionally supported by KVM */
659 *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
660 CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
661 CPUCFG2_LSPW | CPUCFG2_LAM;
662 /*
663 * For the ISA extensions listed below, if one is supported
664 * by the host, then it is also supported by KVM.
665 */
666 if (cpu_has_lsx)
667 *v |= CPUCFG2_LSX;
668 if (cpu_has_lasx)
669 *v |= CPUCFG2_LASX;
670 if (cpu_has_lbt_x86)
671 *v |= CPUCFG2_X86BT;
672 if (cpu_has_lbt_arm)
673 *v |= CPUCFG2_ARMBT;
674 if (cpu_has_lbt_mips)
675 *v |= CPUCFG2_MIPSBT;
676
677 return 0;
678 case LOONGARCH_CPUCFG3:
679 *v = GENMASK(16, 0);
680 return 0;
681 case LOONGARCH_CPUCFG4:
682 case LOONGARCH_CPUCFG5:
683 *v = GENMASK(31, 0);
684 return 0;
685 case LOONGARCH_CPUCFG6:
686 if (cpu_has_pmp)
687 *v = GENMASK(14, 0);
688 else
689 *v = 0;
690 return 0;
691 case LOONGARCH_CPUCFG16:
692 *v = GENMASK(16, 0);
693 return 0;
694 case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
695 *v = GENMASK(30, 0);
696 return 0;
697 default:
698 /*
699 * CPUCFG bits should be zero if reserved by HW or not
700 * supported by KVM.
701 */
702 *v = 0;
703 return 0;
704 }
705 }
706
kvm_check_cpucfg(int id,u64 val)707 static int kvm_check_cpucfg(int id, u64 val)
708 {
709 int ret;
710 u64 mask = 0;
711
712 ret = _kvm_get_cpucfg_mask(id, &mask);
713 if (ret)
714 return ret;
715
716 if (val & ~mask)
717 /* Unsupported features and/or the higher 32 bits should not be set */
718 return -EINVAL;
719
720 switch (id) {
721 case LOONGARCH_CPUCFG2:
722 if (!(val & CPUCFG2_LLFTP))
723 /* Guests must have a constant timer */
724 return -EINVAL;
725 if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
726 /* Single and double float point must both be set when FP is enabled */
727 return -EINVAL;
728 if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
729 /* LSX architecturally implies FP but val does not satisfy that */
730 return -EINVAL;
731 if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
732 /* LASX architecturally implies LSX and FP but val does not satisfy that */
733 return -EINVAL;
734 return 0;
735 case LOONGARCH_CPUCFG6:
736 if (val & CPUCFG6_PMP) {
737 u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
738 if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
739 return -EINVAL;
740 if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
741 return -EINVAL;
742 if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
743 return -EINVAL;
744 }
745 return 0;
746 default:
747 /*
748 * Values for the other CPUCFG IDs are not being further validated
749 * besides the mask check above.
750 */
751 return 0;
752 }
753 }
754
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)755 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
756 const struct kvm_one_reg *reg, u64 *v)
757 {
758 int id, ret = 0;
759 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
760
761 switch (type) {
762 case KVM_REG_LOONGARCH_CSR:
763 id = KVM_GET_IOC_CSR_IDX(reg->id);
764 ret = _kvm_getcsr(vcpu, id, v);
765 break;
766 case KVM_REG_LOONGARCH_CPUCFG:
767 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
768 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
769 *v = vcpu->arch.cpucfg[id];
770 else
771 ret = -EINVAL;
772 break;
773 case KVM_REG_LOONGARCH_LBT:
774 if (!kvm_guest_has_lbt(&vcpu->arch))
775 return -ENXIO;
776
777 switch (reg->id) {
778 case KVM_REG_LOONGARCH_LBT_SCR0:
779 *v = vcpu->arch.lbt.scr0;
780 break;
781 case KVM_REG_LOONGARCH_LBT_SCR1:
782 *v = vcpu->arch.lbt.scr1;
783 break;
784 case KVM_REG_LOONGARCH_LBT_SCR2:
785 *v = vcpu->arch.lbt.scr2;
786 break;
787 case KVM_REG_LOONGARCH_LBT_SCR3:
788 *v = vcpu->arch.lbt.scr3;
789 break;
790 case KVM_REG_LOONGARCH_LBT_EFLAGS:
791 *v = vcpu->arch.lbt.eflags;
792 break;
793 case KVM_REG_LOONGARCH_LBT_FTOP:
794 *v = vcpu->arch.fpu.ftop;
795 break;
796 default:
797 ret = -EINVAL;
798 break;
799 }
800 break;
801 case KVM_REG_LOONGARCH_KVM:
802 switch (reg->id) {
803 case KVM_REG_LOONGARCH_COUNTER:
804 *v = drdtime() + vcpu->kvm->arch.time_offset;
805 break;
806 case KVM_REG_LOONGARCH_DEBUG_INST:
807 *v = INSN_HVCL | KVM_HCALL_SWDBG;
808 break;
809 default:
810 ret = -EINVAL;
811 break;
812 }
813 break;
814 default:
815 ret = -EINVAL;
816 break;
817 }
818
819 return ret;
820 }
821
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)822 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
823 {
824 int ret = 0;
825 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
826
827 switch (size) {
828 case KVM_REG_SIZE_U64:
829 ret = kvm_get_one_reg(vcpu, reg, &v);
830 if (ret)
831 return ret;
832 ret = put_user(v, (u64 __user *)(long)reg->addr);
833 break;
834 default:
835 ret = -EINVAL;
836 break;
837 }
838
839 return ret;
840 }
841
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)842 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
843 const struct kvm_one_reg *reg, u64 v)
844 {
845 int id, ret = 0;
846 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
847
848 switch (type) {
849 case KVM_REG_LOONGARCH_CSR:
850 id = KVM_GET_IOC_CSR_IDX(reg->id);
851 ret = _kvm_setcsr(vcpu, id, v);
852 break;
853 case KVM_REG_LOONGARCH_CPUCFG:
854 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
855 ret = kvm_check_cpucfg(id, v);
856 if (ret)
857 break;
858 vcpu->arch.cpucfg[id] = (u32)v;
859 if (id == LOONGARCH_CPUCFG6)
860 vcpu->arch.max_pmu_csrid =
861 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
862 break;
863 case KVM_REG_LOONGARCH_LBT:
864 if (!kvm_guest_has_lbt(&vcpu->arch))
865 return -ENXIO;
866
867 switch (reg->id) {
868 case KVM_REG_LOONGARCH_LBT_SCR0:
869 vcpu->arch.lbt.scr0 = v;
870 break;
871 case KVM_REG_LOONGARCH_LBT_SCR1:
872 vcpu->arch.lbt.scr1 = v;
873 break;
874 case KVM_REG_LOONGARCH_LBT_SCR2:
875 vcpu->arch.lbt.scr2 = v;
876 break;
877 case KVM_REG_LOONGARCH_LBT_SCR3:
878 vcpu->arch.lbt.scr3 = v;
879 break;
880 case KVM_REG_LOONGARCH_LBT_EFLAGS:
881 vcpu->arch.lbt.eflags = v;
882 break;
883 case KVM_REG_LOONGARCH_LBT_FTOP:
884 vcpu->arch.fpu.ftop = v;
885 break;
886 default:
887 ret = -EINVAL;
888 break;
889 }
890 break;
891 case KVM_REG_LOONGARCH_KVM:
892 switch (reg->id) {
893 case KVM_REG_LOONGARCH_COUNTER:
894 /*
895 * gftoffset is relative with board, not vcpu
896 * only set for the first time for smp system
897 */
898 if (vcpu->vcpu_id == 0)
899 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
900 break;
901 case KVM_REG_LOONGARCH_VCPU_RESET:
902 vcpu->arch.st.guest_addr = 0;
903 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
904 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
905 break;
906 default:
907 ret = -EINVAL;
908 break;
909 }
910 break;
911 default:
912 ret = -EINVAL;
913 break;
914 }
915
916 return ret;
917 }
918
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)919 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
920 {
921 int ret = 0;
922 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
923
924 switch (size) {
925 case KVM_REG_SIZE_U64:
926 ret = get_user(v, (u64 __user *)(long)reg->addr);
927 if (ret)
928 return ret;
929 break;
930 default:
931 return -EINVAL;
932 }
933
934 return kvm_set_one_reg(vcpu, reg, v);
935 }
936
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)937 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
938 {
939 return -ENOIOCTLCMD;
940 }
941
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)942 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
943 {
944 return -ENOIOCTLCMD;
945 }
946
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)947 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
948 {
949 int i;
950
951 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
952 regs->gpr[i] = vcpu->arch.gprs[i];
953
954 regs->pc = vcpu->arch.pc;
955
956 return 0;
957 }
958
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)959 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
960 {
961 int i;
962
963 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
964 vcpu->arch.gprs[i] = regs->gpr[i];
965
966 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
967 vcpu->arch.pc = regs->pc;
968
969 return 0;
970 }
971
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)972 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
973 struct kvm_enable_cap *cap)
974 {
975 /* FPU is enabled by default, will support LSX/LASX later. */
976 return -EINVAL;
977 }
978
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)979 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
980 struct kvm_device_attr *attr)
981 {
982 switch (attr->attr) {
983 case LOONGARCH_CPUCFG2:
984 case LOONGARCH_CPUCFG6:
985 return 0;
986 case CPUCFG_KVM_FEATURE:
987 return 0;
988 default:
989 return -ENXIO;
990 }
991
992 return -ENXIO;
993 }
994
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)995 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
996 struct kvm_device_attr *attr)
997 {
998 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
999 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1000 return -ENXIO;
1001
1002 return 0;
1003 }
1004
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1005 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1006 struct kvm_device_attr *attr)
1007 {
1008 int ret = -ENXIO;
1009
1010 switch (attr->group) {
1011 case KVM_LOONGARCH_VCPU_CPUCFG:
1012 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1013 break;
1014 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1015 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1016 break;
1017 default:
1018 break;
1019 }
1020
1021 return ret;
1022 }
1023
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1024 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1025 struct kvm_device_attr *attr)
1026 {
1027 int ret = 0;
1028 uint64_t val;
1029 uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1030
1031 switch (attr->attr) {
1032 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1033 ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1034 if (ret)
1035 return ret;
1036 break;
1037 case CPUCFG_KVM_FEATURE:
1038 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1039 break;
1040 default:
1041 return -ENXIO;
1042 }
1043
1044 put_user(val, uaddr);
1045
1046 return ret;
1047 }
1048
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1049 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1050 struct kvm_device_attr *attr)
1051 {
1052 u64 gpa;
1053 u64 __user *user = (u64 __user *)attr->addr;
1054
1055 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1056 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1057 return -ENXIO;
1058
1059 gpa = vcpu->arch.st.guest_addr;
1060 if (put_user(gpa, user))
1061 return -EFAULT;
1062
1063 return 0;
1064 }
1065
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1066 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1067 struct kvm_device_attr *attr)
1068 {
1069 int ret = -ENXIO;
1070
1071 switch (attr->group) {
1072 case KVM_LOONGARCH_VCPU_CPUCFG:
1073 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1074 break;
1075 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1076 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1077 break;
1078 default:
1079 break;
1080 }
1081
1082 return ret;
1083 }
1084
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1085 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1086 struct kvm_device_attr *attr)
1087 {
1088 u64 val, valid;
1089 u64 __user *user = (u64 __user *)attr->addr;
1090 struct kvm *kvm = vcpu->kvm;
1091
1092 switch (attr->attr) {
1093 case CPUCFG_KVM_FEATURE:
1094 if (get_user(val, user))
1095 return -EFAULT;
1096
1097 valid = LOONGARCH_PV_FEAT_MASK;
1098 if (val & ~valid)
1099 return -EINVAL;
1100
1101 /* All vCPUs need set the same PV features */
1102 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1103 && ((kvm->arch.pv_features & valid) != val))
1104 return -EINVAL;
1105 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1106 return 0;
1107 default:
1108 return -ENXIO;
1109 }
1110 }
1111
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1112 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1113 struct kvm_device_attr *attr)
1114 {
1115 int idx, ret = 0;
1116 u64 gpa, __user *user = (u64 __user *)attr->addr;
1117 struct kvm *kvm = vcpu->kvm;
1118
1119 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1120 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1121 return -ENXIO;
1122
1123 if (get_user(gpa, user))
1124 return -EFAULT;
1125
1126 if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1127 return -EINVAL;
1128
1129 if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1130 vcpu->arch.st.guest_addr = gpa;
1131 return 0;
1132 }
1133
1134 /* Check the address is in a valid memslot */
1135 idx = srcu_read_lock(&kvm->srcu);
1136 if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1137 ret = -EINVAL;
1138 srcu_read_unlock(&kvm->srcu, idx);
1139
1140 if (!ret) {
1141 vcpu->arch.st.guest_addr = gpa;
1142 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1143 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1144 }
1145
1146 return ret;
1147 }
1148
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1149 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1150 struct kvm_device_attr *attr)
1151 {
1152 int ret = -ENXIO;
1153
1154 switch (attr->group) {
1155 case KVM_LOONGARCH_VCPU_CPUCFG:
1156 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1157 break;
1158 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1159 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1160 break;
1161 default:
1162 break;
1163 }
1164
1165 return ret;
1166 }
1167
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1168 long kvm_arch_vcpu_ioctl(struct file *filp,
1169 unsigned int ioctl, unsigned long arg)
1170 {
1171 long r;
1172 struct kvm_device_attr attr;
1173 void __user *argp = (void __user *)arg;
1174 struct kvm_vcpu *vcpu = filp->private_data;
1175
1176 /*
1177 * Only software CSR should be modified
1178 *
1179 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1180 * should be used. Since CSR registers owns by this vcpu, if switch
1181 * to other vcpus, other vcpus need reload CSR registers.
1182 *
1183 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1184 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1185 * aux_inuse flag and reload CSR registers form software.
1186 */
1187
1188 switch (ioctl) {
1189 case KVM_SET_ONE_REG:
1190 case KVM_GET_ONE_REG: {
1191 struct kvm_one_reg reg;
1192
1193 r = -EFAULT;
1194 if (copy_from_user(®, argp, sizeof(reg)))
1195 break;
1196 if (ioctl == KVM_SET_ONE_REG) {
1197 r = kvm_set_reg(vcpu, ®);
1198 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1199 } else
1200 r = kvm_get_reg(vcpu, ®);
1201 break;
1202 }
1203 case KVM_ENABLE_CAP: {
1204 struct kvm_enable_cap cap;
1205
1206 r = -EFAULT;
1207 if (copy_from_user(&cap, argp, sizeof(cap)))
1208 break;
1209 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1210 break;
1211 }
1212 case KVM_HAS_DEVICE_ATTR: {
1213 r = -EFAULT;
1214 if (copy_from_user(&attr, argp, sizeof(attr)))
1215 break;
1216 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1217 break;
1218 }
1219 case KVM_GET_DEVICE_ATTR: {
1220 r = -EFAULT;
1221 if (copy_from_user(&attr, argp, sizeof(attr)))
1222 break;
1223 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1224 break;
1225 }
1226 case KVM_SET_DEVICE_ATTR: {
1227 r = -EFAULT;
1228 if (copy_from_user(&attr, argp, sizeof(attr)))
1229 break;
1230 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1231 break;
1232 }
1233 default:
1234 r = -ENOIOCTLCMD;
1235 break;
1236 }
1237
1238 return r;
1239 }
1240
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1241 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1242 {
1243 int i = 0;
1244
1245 fpu->fcc = vcpu->arch.fpu.fcc;
1246 fpu->fcsr = vcpu->arch.fpu.fcsr;
1247 for (i = 0; i < NUM_FPU_REGS; i++)
1248 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1249
1250 return 0;
1251 }
1252
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1253 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1254 {
1255 int i = 0;
1256
1257 vcpu->arch.fpu.fcc = fpu->fcc;
1258 vcpu->arch.fpu.fcsr = fpu->fcsr;
1259 for (i = 0; i < NUM_FPU_REGS; i++)
1260 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1261
1262 return 0;
1263 }
1264
1265 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1266 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1267 {
1268 if (!kvm_guest_has_lbt(&vcpu->arch))
1269 return -EINVAL;
1270
1271 preempt_disable();
1272 set_csr_euen(CSR_EUEN_LBTEN);
1273 _restore_lbt(&vcpu->arch.lbt);
1274 vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1275 preempt_enable();
1276
1277 return 0;
1278 }
1279
kvm_lose_lbt(struct kvm_vcpu * vcpu)1280 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1281 {
1282 preempt_disable();
1283 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1284 _save_lbt(&vcpu->arch.lbt);
1285 clear_csr_euen(CSR_EUEN_LBTEN);
1286 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1287 }
1288 preempt_enable();
1289 }
1290
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1291 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1292 {
1293 /*
1294 * If TM is enabled, top register save/restore will
1295 * cause lbt exception, here enable lbt in advance
1296 */
1297 if (fcsr & FPU_CSR_TM)
1298 kvm_own_lbt(vcpu);
1299 }
1300
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1301 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1302 {
1303 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1304 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1305 return;
1306 kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1307 }
1308 }
1309 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1310 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1311 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1312 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1313 #endif
1314
1315 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1316 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1317 {
1318 preempt_disable();
1319
1320 /*
1321 * Enable FPU for guest
1322 * Set FR and FRE according to guest context
1323 */
1324 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1325 set_csr_euen(CSR_EUEN_FPEN);
1326
1327 kvm_restore_fpu(&vcpu->arch.fpu);
1328 vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1329 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1330
1331 preempt_enable();
1332 }
1333
1334 #ifdef CONFIG_CPU_HAS_LSX
1335 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1336 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1337 {
1338 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1339 return -EINVAL;
1340
1341 preempt_disable();
1342
1343 /* Enable LSX for guest */
1344 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1345 set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1346 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1347 case KVM_LARCH_FPU:
1348 /*
1349 * Guest FPU state already loaded,
1350 * only restore upper LSX state
1351 */
1352 _restore_lsx_upper(&vcpu->arch.fpu);
1353 break;
1354 default:
1355 /* Neither FP or LSX already active,
1356 * restore full LSX state
1357 */
1358 kvm_restore_lsx(&vcpu->arch.fpu);
1359 break;
1360 }
1361
1362 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1363 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1364 preempt_enable();
1365
1366 return 0;
1367 }
1368 #endif
1369
1370 #ifdef CONFIG_CPU_HAS_LASX
1371 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1372 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1373 {
1374 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1375 return -EINVAL;
1376
1377 preempt_disable();
1378
1379 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1380 set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1381 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1382 case KVM_LARCH_LSX:
1383 case KVM_LARCH_LSX | KVM_LARCH_FPU:
1384 /* Guest LSX state already loaded, only restore upper LASX state */
1385 _restore_lasx_upper(&vcpu->arch.fpu);
1386 break;
1387 case KVM_LARCH_FPU:
1388 /* Guest FP state already loaded, only restore upper LSX & LASX state */
1389 _restore_lsx_upper(&vcpu->arch.fpu);
1390 _restore_lasx_upper(&vcpu->arch.fpu);
1391 break;
1392 default:
1393 /* Neither FP or LSX already active, restore full LASX state */
1394 kvm_restore_lasx(&vcpu->arch.fpu);
1395 break;
1396 }
1397
1398 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1399 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1400 preempt_enable();
1401
1402 return 0;
1403 }
1404 #endif
1405
1406 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1407 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1408 {
1409 preempt_disable();
1410
1411 kvm_check_fcsr_alive(vcpu);
1412 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1413 kvm_save_lasx(&vcpu->arch.fpu);
1414 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1415 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1416
1417 /* Disable LASX & LSX & FPU */
1418 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1419 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1420 kvm_save_lsx(&vcpu->arch.fpu);
1421 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1422 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1423
1424 /* Disable LSX & FPU */
1425 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1426 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1427 kvm_save_fpu(&vcpu->arch.fpu);
1428 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1429 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1430
1431 /* Disable FPU */
1432 clear_csr_euen(CSR_EUEN_FPEN);
1433 }
1434 kvm_lose_lbt(vcpu);
1435
1436 preempt_enable();
1437 }
1438
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1439 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1440 {
1441 int intr = (int)irq->irq;
1442
1443 if (intr > 0)
1444 kvm_queue_irq(vcpu, intr);
1445 else if (intr < 0)
1446 kvm_dequeue_irq(vcpu, -intr);
1447 else {
1448 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1449 return -EINVAL;
1450 }
1451
1452 kvm_vcpu_kick(vcpu);
1453
1454 return 0;
1455 }
1456
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1457 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1458 unsigned int ioctl, unsigned long arg)
1459 {
1460 void __user *argp = (void __user *)arg;
1461 struct kvm_vcpu *vcpu = filp->private_data;
1462
1463 if (ioctl == KVM_INTERRUPT) {
1464 struct kvm_interrupt irq;
1465
1466 if (copy_from_user(&irq, argp, sizeof(irq)))
1467 return -EFAULT;
1468
1469 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1470
1471 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1472 }
1473
1474 return -ENOIOCTLCMD;
1475 }
1476
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1477 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1478 {
1479 return 0;
1480 }
1481
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1482 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1483 {
1484 unsigned long timer_hz;
1485 struct loongarch_csrs *csr;
1486
1487 vcpu->arch.vpid = 0;
1488 vcpu->arch.flush_gpa = INVALID_GPA;
1489
1490 hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1491 HRTIMER_MODE_ABS_PINNED_HARD);
1492
1493 /* Get GPA (=HVA) of PGD for kvm hypervisor */
1494 vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1495
1496 /*
1497 * Get PGD for primary mmu, virtual address is used since there is
1498 * memory access after loading from CSR_PGD in tlb exception fast path.
1499 */
1500 vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1501
1502 vcpu->arch.handle_exit = kvm_handle_exit;
1503 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1504 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1505 if (!vcpu->arch.csr)
1506 return -ENOMEM;
1507
1508 /*
1509 * All kvm exceptions share one exception entry, and host <-> guest
1510 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1511 */
1512 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1513
1514 /* Init */
1515 vcpu->arch.last_sched_cpu = -1;
1516
1517 /* Init ipi_state lock */
1518 spin_lock_init(&vcpu->arch.ipi_state.lock);
1519
1520 /*
1521 * Initialize guest register state to valid architectural reset state.
1522 */
1523 timer_hz = calc_const_freq();
1524 kvm_init_timer(vcpu, timer_hz);
1525
1526 /* Set Initialize mode for guest */
1527 csr = vcpu->arch.csr;
1528 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1529
1530 /* Set cpuid */
1531 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1532 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1533
1534 /* Start with no pending virtual guest interrupts */
1535 csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1536
1537 return 0;
1538 }
1539
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1540 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1541 {
1542 }
1543
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1544 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1545 {
1546 int cpu;
1547 struct kvm_context *context;
1548
1549 hrtimer_cancel(&vcpu->arch.swtimer);
1550 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1551 kvm_drop_cpuid(vcpu);
1552 kfree(vcpu->arch.csr);
1553
1554 /*
1555 * If the vCPU is freed and reused as another vCPU, we don't want the
1556 * matching pointer wrongly hanging around in last_vcpu.
1557 */
1558 for_each_possible_cpu(cpu) {
1559 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1560 if (context->last_vcpu == vcpu)
1561 context->last_vcpu = NULL;
1562 }
1563 }
1564
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1565 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1566 {
1567 bool migrated;
1568 struct kvm_context *context;
1569 struct loongarch_csrs *csr = vcpu->arch.csr;
1570
1571 /*
1572 * Have we migrated to a different CPU?
1573 * If so, any old guest TLB state may be stale.
1574 */
1575 migrated = (vcpu->arch.last_sched_cpu != cpu);
1576
1577 /*
1578 * Was this the last vCPU to run on this CPU?
1579 * If not, any old guest state from this vCPU will have been clobbered.
1580 */
1581 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1582 if (migrated || (context->last_vcpu != vcpu))
1583 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1584 context->last_vcpu = vcpu;
1585
1586 /* Restore timer state regardless */
1587 kvm_restore_timer(vcpu);
1588 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1589
1590 /* Restore hardware PMU CSRs */
1591 kvm_restore_pmu(vcpu);
1592
1593 /* Don't bother restoring registers multiple times unless necessary */
1594 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1595 return 0;
1596
1597 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1598
1599 /* Restore guest CSR registers */
1600 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1601 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1602 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1603 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1604 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1605 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1606 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1607 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1608 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1609 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1610 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1611 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1612 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1613 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1614 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1615 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1616 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1617 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1618 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1619 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1620 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1621 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1622 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1623 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1624 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1625 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1626 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1627 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1628 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1629 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1630 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1631 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1632 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1633 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1634 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1635 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1636 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1637 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1638 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1639 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1640 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1641 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1642 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1643 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1644
1645 /* Restore Root.GINTC from unused Guest.GINTC register */
1646 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1647
1648 /*
1649 * We should clear linked load bit to break interrupted atomics. This
1650 * prevents a SC on the next vCPU from succeeding by matching a LL on
1651 * the previous vCPU.
1652 */
1653 if (vcpu->kvm->created_vcpus > 1)
1654 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1655
1656 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1657
1658 return 0;
1659 }
1660
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1661 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1662 {
1663 unsigned long flags;
1664
1665 local_irq_save(flags);
1666 /* Restore guest state to registers */
1667 _kvm_vcpu_load(vcpu, cpu);
1668 local_irq_restore(flags);
1669 }
1670
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1671 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1672 {
1673 struct loongarch_csrs *csr = vcpu->arch.csr;
1674
1675 kvm_lose_fpu(vcpu);
1676
1677 /*
1678 * Update CSR state from hardware if software CSR state is stale,
1679 * most CSR registers are kept unchanged during process context
1680 * switch except CSR registers like remaining timer tick value and
1681 * injected interrupt state.
1682 */
1683 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1684 goto out;
1685
1686 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1687 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1688 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1689 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1690 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1691 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1692 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1693 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1694 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1695 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1696 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1697 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1698 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1699 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1700 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1701 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1702 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1703 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1704 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1705 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1706 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1707 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1708 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1709 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1710 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1711 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1712 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1713 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1714 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1715 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1716 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1717 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1718 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1719 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1720 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1721 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1722 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1723 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1724 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1725 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1726 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1727 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1728 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1729 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1730 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1731 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1732 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1733
1734 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1735
1736 out:
1737 kvm_save_timer(vcpu);
1738 /* Save Root.GINTC into unused Guest.GINTC register */
1739 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1740
1741 return 0;
1742 }
1743
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1744 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1745 {
1746 int cpu;
1747 unsigned long flags;
1748
1749 local_irq_save(flags);
1750 cpu = smp_processor_id();
1751 vcpu->arch.last_sched_cpu = cpu;
1752
1753 /* Save guest state in registers */
1754 _kvm_vcpu_put(vcpu, cpu);
1755 local_irq_restore(flags);
1756 }
1757
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1758 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1759 {
1760 int r = -EINTR;
1761 struct kvm_run *run = vcpu->run;
1762
1763 if (vcpu->mmio_needed) {
1764 if (!vcpu->mmio_is_write)
1765 kvm_complete_mmio_read(vcpu, run);
1766 vcpu->mmio_needed = 0;
1767 }
1768
1769 switch (run->exit_reason) {
1770 case KVM_EXIT_HYPERCALL:
1771 kvm_complete_user_service(vcpu, run);
1772 break;
1773 case KVM_EXIT_LOONGARCH_IOCSR:
1774 if (!run->iocsr_io.is_write)
1775 kvm_complete_iocsr_read(vcpu, run);
1776 break;
1777 }
1778
1779 if (!vcpu->wants_to_run)
1780 return r;
1781
1782 /* Clear exit_reason */
1783 run->exit_reason = KVM_EXIT_UNKNOWN;
1784 lose_fpu(1);
1785 vcpu_load(vcpu);
1786 kvm_sigset_activate(vcpu);
1787 r = kvm_pre_enter_guest(vcpu);
1788 if (r != RESUME_GUEST)
1789 goto out;
1790
1791 guest_timing_enter_irqoff();
1792 guest_state_enter_irqoff();
1793 trace_kvm_enter(vcpu);
1794 r = kvm_loongarch_ops->enter_guest(run, vcpu);
1795
1796 trace_kvm_out(vcpu);
1797 /*
1798 * Guest exit is already recorded at kvm_handle_exit()
1799 * return value must not be RESUME_GUEST
1800 */
1801 local_irq_enable();
1802 out:
1803 kvm_sigset_deactivate(vcpu);
1804 vcpu_put(vcpu);
1805
1806 return r;
1807 }
1808