1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/lbt.h>
10 #include <asm/loongarch.h>
11 #include <asm/setup.h>
12 #include <asm/time.h>
13
14 #define CREATE_TRACE_POINTS
15 #include "trace.h"
16
17 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
18 KVM_GENERIC_VCPU_STATS(),
19 STATS_DESC_COUNTER(VCPU, int_exits),
20 STATS_DESC_COUNTER(VCPU, idle_exits),
21 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 STATS_DESC_COUNTER(VCPU, signal_exits),
23 STATS_DESC_COUNTER(VCPU, hypercall_exits)
24 };
25
26 const struct kvm_stats_header kvm_vcpu_stats_header = {
27 .name_size = KVM_STATS_NAME_SIZE,
28 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
29 .id_offset = sizeof(struct kvm_stats_header),
30 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
31 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
32 sizeof(kvm_vcpu_stats_desc),
33 };
34
kvm_save_host_pmu(struct kvm_vcpu * vcpu)35 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
36 {
37 struct kvm_context *context;
38
39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
40 context->perf_cntr[0] = read_csr_perfcntr0();
41 context->perf_cntr[1] = read_csr_perfcntr1();
42 context->perf_cntr[2] = read_csr_perfcntr2();
43 context->perf_cntr[3] = read_csr_perfcntr3();
44 context->perf_ctrl[0] = write_csr_perfctrl0(0);
45 context->perf_ctrl[1] = write_csr_perfctrl1(0);
46 context->perf_ctrl[2] = write_csr_perfctrl2(0);
47 context->perf_ctrl[3] = write_csr_perfctrl3(0);
48 }
49
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)50 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
51 {
52 struct kvm_context *context;
53
54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
55 write_csr_perfcntr0(context->perf_cntr[0]);
56 write_csr_perfcntr1(context->perf_cntr[1]);
57 write_csr_perfcntr2(context->perf_cntr[2]);
58 write_csr_perfcntr3(context->perf_cntr[3]);
59 write_csr_perfctrl0(context->perf_ctrl[0]);
60 write_csr_perfctrl1(context->perf_ctrl[1]);
61 write_csr_perfctrl2(context->perf_ctrl[2]);
62 write_csr_perfctrl3(context->perf_ctrl[3]);
63 }
64
65
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)66 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
67 {
68 struct loongarch_csrs *csr = vcpu->arch.csr;
69
70 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
71 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
72 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
73 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
74 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
75 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
76 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
77 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
78 }
79
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)80 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
81 {
82 struct loongarch_csrs *csr = vcpu->arch.csr;
83
84 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
85 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
86 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
87 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
88 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
89 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
90 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
91 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
92 }
93
kvm_own_pmu(struct kvm_vcpu * vcpu)94 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
95 {
96 unsigned long val;
97
98 if (!kvm_guest_has_pmu(&vcpu->arch))
99 return -EINVAL;
100
101 kvm_save_host_pmu(vcpu);
102
103 /* Set PM0-PM(num) to guest */
104 val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
105 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
106 write_csr_gcfg(val);
107
108 kvm_restore_guest_pmu(vcpu);
109
110 return 0;
111 }
112
kvm_lose_pmu(struct kvm_vcpu * vcpu)113 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
114 {
115 unsigned long val;
116 struct loongarch_csrs *csr = vcpu->arch.csr;
117
118 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
119 return;
120
121 kvm_save_guest_pmu(vcpu);
122
123 /* Disable pmu access from guest */
124 write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
125
126 /*
127 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
128 * exiting the guest, so that the next time trap into the guest.
129 * We don't need to deal with PMU CSRs contexts.
130 */
131 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
132 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
133 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
134 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
135 if (!(val & KVM_PMU_EVENT_ENABLED))
136 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
137
138 kvm_restore_host_pmu(vcpu);
139 }
140
kvm_restore_pmu(struct kvm_vcpu * vcpu)141 static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
142 {
143 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
144 kvm_make_request(KVM_REQ_PMU, vcpu);
145 }
146
kvm_check_pmu(struct kvm_vcpu * vcpu)147 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
148 {
149 if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
150 kvm_own_pmu(vcpu);
151 vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
152 }
153 }
154
kvm_update_stolen_time(struct kvm_vcpu * vcpu)155 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
156 {
157 u32 version;
158 u64 steal;
159 gpa_t gpa;
160 struct kvm_memslots *slots;
161 struct kvm_steal_time __user *st;
162 struct gfn_to_hva_cache *ghc;
163
164 ghc = &vcpu->arch.st.cache;
165 gpa = vcpu->arch.st.guest_addr;
166 if (!(gpa & KVM_STEAL_PHYS_VALID))
167 return;
168
169 gpa &= KVM_STEAL_PHYS_MASK;
170 slots = kvm_memslots(vcpu->kvm);
171 if (slots->generation != ghc->generation || gpa != ghc->gpa) {
172 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
173 ghc->gpa = INVALID_GPA;
174 return;
175 }
176 }
177
178 st = (struct kvm_steal_time __user *)ghc->hva;
179 unsafe_get_user(version, &st->version, out);
180 if (version & 1)
181 version += 1; /* first time write, random junk */
182
183 version += 1;
184 unsafe_put_user(version, &st->version, out);
185 smp_wmb();
186
187 unsafe_get_user(steal, &st->steal, out);
188 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
189 vcpu->arch.st.last_steal = current->sched_info.run_delay;
190 unsafe_put_user(steal, &st->steal, out);
191
192 smp_wmb();
193 version += 1;
194 unsafe_put_user(version, &st->version, out);
195 out:
196 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
197 }
198
199 /*
200 * kvm_check_requests - check and handle pending vCPU requests
201 *
202 * Return: RESUME_GUEST if we should enter the guest
203 * RESUME_HOST if we should exit to userspace
204 */
kvm_check_requests(struct kvm_vcpu * vcpu)205 static int kvm_check_requests(struct kvm_vcpu *vcpu)
206 {
207 if (!kvm_request_pending(vcpu))
208 return RESUME_GUEST;
209
210 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
211 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
212
213 if (kvm_dirty_ring_check_request(vcpu))
214 return RESUME_HOST;
215
216 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
217 kvm_update_stolen_time(vcpu);
218
219 return RESUME_GUEST;
220 }
221
kvm_late_check_requests(struct kvm_vcpu * vcpu)222 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
223 {
224 lockdep_assert_irqs_disabled();
225 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
226 if (vcpu->arch.flush_gpa != INVALID_GPA) {
227 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
228 vcpu->arch.flush_gpa = INVALID_GPA;
229 }
230 }
231
232 /*
233 * Check and handle pending signal and vCPU requests etc
234 * Run with irq enabled and preempt enabled
235 *
236 * Return: RESUME_GUEST if we should enter the guest
237 * RESUME_HOST if we should exit to userspace
238 * < 0 if we should exit to userspace, where the return value
239 * indicates an error
240 */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)241 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
242 {
243 int idx, ret;
244
245 /*
246 * Check conditions before entering the guest
247 */
248 ret = xfer_to_guest_mode_handle_work(vcpu);
249 if (ret < 0)
250 return ret;
251
252 idx = srcu_read_lock(&vcpu->kvm->srcu);
253 ret = kvm_check_requests(vcpu);
254 srcu_read_unlock(&vcpu->kvm->srcu, idx);
255
256 return ret;
257 }
258
259 /*
260 * Called with irq enabled
261 *
262 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
263 * Others if we should exit to userspace
264 */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)265 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
266 {
267 int ret;
268
269 do {
270 ret = kvm_enter_guest_check(vcpu);
271 if (ret != RESUME_GUEST)
272 break;
273
274 /*
275 * Handle vcpu timer, interrupts, check requests and
276 * check vmid before vcpu enter guest
277 */
278 local_irq_disable();
279 kvm_deliver_intr(vcpu);
280 kvm_deliver_exception(vcpu);
281 /* Make sure the vcpu mode has been written */
282 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
283 kvm_check_vpid(vcpu);
284 kvm_check_pmu(vcpu);
285
286 /*
287 * Called after function kvm_check_vpid()
288 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
289 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
290 */
291 kvm_late_check_requests(vcpu);
292 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
293 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
294 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
295
296 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
297 kvm_lose_pmu(vcpu);
298 /* make sure the vcpu mode has been written */
299 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
300 local_irq_enable();
301 ret = -EAGAIN;
302 }
303 } while (ret != RESUME_GUEST);
304
305 return ret;
306 }
307
308 /*
309 * Return 1 for resume guest and "<= 0" for resume host.
310 */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)311 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
312 {
313 int ret = RESUME_GUEST;
314 unsigned long estat = vcpu->arch.host_estat;
315 u32 intr = estat & CSR_ESTAT_IS;
316 u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
317
318 vcpu->mode = OUTSIDE_GUEST_MODE;
319
320 /* Set a default exit reason */
321 run->exit_reason = KVM_EXIT_UNKNOWN;
322
323 kvm_lose_pmu(vcpu);
324
325 guest_timing_exit_irqoff();
326 guest_state_exit_irqoff();
327 local_irq_enable();
328
329 trace_kvm_exit(vcpu, ecode);
330 if (ecode) {
331 ret = kvm_handle_fault(vcpu, ecode);
332 } else {
333 WARN(!intr, "vm exiting with suspicious irq\n");
334 ++vcpu->stat.int_exits;
335 }
336
337 if (ret == RESUME_GUEST)
338 ret = kvm_pre_enter_guest(vcpu);
339
340 if (ret != RESUME_GUEST) {
341 local_irq_disable();
342 return ret;
343 }
344
345 guest_timing_enter_irqoff();
346 guest_state_enter_irqoff();
347 trace_kvm_reenter(vcpu);
348
349 return RESUME_GUEST;
350 }
351
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)352 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
353 {
354 return !!(vcpu->arch.irq_pending) &&
355 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
356 }
357
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)358 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
359 {
360 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
361 }
362
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)363 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
364 {
365 unsigned long val;
366
367 preempt_disable();
368 val = gcsr_read(LOONGARCH_CSR_CRMD);
369 preempt_enable();
370
371 return (val & CSR_PRMD_PPLV) == PLV_KERN;
372 }
373
374 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)375 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
376 {
377 return vcpu->arch.pc;
378 }
379
380 /*
381 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
382 * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM,
383 * any event that arrives while a vCPU is loaded is considered to be "in guest".
384 */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)385 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
386 {
387 return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
388 }
389 #endif
390
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)391 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
392 {
393 return false;
394 }
395
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)396 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
397 {
398 return VM_FAULT_SIGBUS;
399 }
400
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)401 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
402 struct kvm_translation *tr)
403 {
404 return -EINVAL;
405 }
406
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)407 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
408 {
409 int ret;
410
411 /* Protect from TOD sync and vcpu_load/put() */
412 preempt_disable();
413 ret = kvm_pending_timer(vcpu) ||
414 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
415 preempt_enable();
416
417 return ret;
418 }
419
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)420 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
421 {
422 int i;
423
424 kvm_debug("vCPU Register Dump:\n");
425 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
426 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
427
428 for (i = 0; i < 32; i += 4) {
429 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
430 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
431 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
432 }
433
434 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
435 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
436 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
437
438 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
439
440 return 0;
441 }
442
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)443 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
444 struct kvm_mp_state *mp_state)
445 {
446 *mp_state = vcpu->arch.mp_state;
447
448 return 0;
449 }
450
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)451 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
452 struct kvm_mp_state *mp_state)
453 {
454 int ret = 0;
455
456 switch (mp_state->mp_state) {
457 case KVM_MP_STATE_RUNNABLE:
458 vcpu->arch.mp_state = *mp_state;
459 break;
460 default:
461 ret = -EINVAL;
462 }
463
464 return ret;
465 }
466
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)467 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
468 struct kvm_guest_debug *dbg)
469 {
470 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
471 return -EINVAL;
472
473 if (dbg->control & KVM_GUESTDBG_ENABLE)
474 vcpu->guest_debug = dbg->control;
475 else
476 vcpu->guest_debug = 0;
477
478 return 0;
479 }
480
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)481 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
482 {
483 int cpuid;
484 struct kvm_phyid_map *map;
485 struct loongarch_csrs *csr = vcpu->arch.csr;
486
487 if (val >= KVM_MAX_PHYID)
488 return -EINVAL;
489
490 map = vcpu->kvm->arch.phyid_map;
491 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
492
493 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
494 if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
495 /* Discard duplicated CPUID set operation */
496 if (cpuid == val) {
497 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
498 return 0;
499 }
500
501 /*
502 * CPUID is already set before
503 * Forbid changing to a different CPUID at runtime
504 */
505 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
506 return -EINVAL;
507 }
508
509 if (map->phys_map[val].enabled) {
510 /* Discard duplicated CPUID set operation */
511 if (vcpu == map->phys_map[val].vcpu) {
512 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
513 return 0;
514 }
515
516 /*
517 * New CPUID is already set with other vcpu
518 * Forbid sharing the same CPUID between different vcpus
519 */
520 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
521 return -EINVAL;
522 }
523
524 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
525 map->phys_map[val].enabled = true;
526 map->phys_map[val].vcpu = vcpu;
527 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
528
529 return 0;
530 }
531
kvm_drop_cpuid(struct kvm_vcpu * vcpu)532 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
533 {
534 int cpuid;
535 struct kvm_phyid_map *map;
536 struct loongarch_csrs *csr = vcpu->arch.csr;
537
538 map = vcpu->kvm->arch.phyid_map;
539 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
540
541 if (cpuid >= KVM_MAX_PHYID)
542 return;
543
544 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
545 if (map->phys_map[cpuid].enabled) {
546 map->phys_map[cpuid].vcpu = NULL;
547 map->phys_map[cpuid].enabled = false;
548 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
549 }
550 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
551 }
552
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)553 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
554 {
555 struct kvm_phyid_map *map;
556
557 if (cpuid >= KVM_MAX_PHYID)
558 return NULL;
559
560 map = kvm->arch.phyid_map;
561 if (!map->phys_map[cpuid].enabled)
562 return NULL;
563
564 return map->phys_map[cpuid].vcpu;
565 }
566
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)567 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
568 {
569 unsigned long gintc;
570 struct loongarch_csrs *csr = vcpu->arch.csr;
571
572 if (get_gcsr_flag(id) & INVALID_GCSR)
573 return -EINVAL;
574
575 if (id == LOONGARCH_CSR_ESTAT) {
576 preempt_disable();
577 vcpu_load(vcpu);
578 /*
579 * Sync pending interrupts into ESTAT so that interrupt
580 * remains during VM migration stage
581 */
582 kvm_deliver_intr(vcpu);
583 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
584 vcpu_put(vcpu);
585 preempt_enable();
586
587 /* ESTAT IP0~IP7 get from GINTC */
588 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
589 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
590 return 0;
591 }
592
593 /*
594 * Get software CSR state since software state is consistent
595 * with hardware for synchronous ioctl
596 */
597 *val = kvm_read_sw_gcsr(csr, id);
598
599 return 0;
600 }
601
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)602 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
603 {
604 int ret = 0, gintc;
605 struct loongarch_csrs *csr = vcpu->arch.csr;
606
607 if (get_gcsr_flag(id) & INVALID_GCSR)
608 return -EINVAL;
609
610 if (id == LOONGARCH_CSR_CPUID)
611 return kvm_set_cpuid(vcpu, val);
612
613 if (id == LOONGARCH_CSR_ESTAT) {
614 /* ESTAT IP0~IP7 inject through GINTC */
615 gintc = (val >> 2) & 0xff;
616 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
617
618 gintc = val & ~(0xffUL << 2);
619 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
620
621 return ret;
622 }
623
624 kvm_write_sw_gcsr(csr, id, val);
625
626 /*
627 * After modifying the PMU CSR register value of the vcpu.
628 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
629 */
630 if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
631 unsigned long val;
632
633 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
634 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
635 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
636 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
637
638 if (val & KVM_PMU_EVENT_ENABLED)
639 kvm_make_request(KVM_REQ_PMU, vcpu);
640 }
641
642 return ret;
643 }
644
_kvm_get_cpucfg_mask(int id,u64 * v)645 static int _kvm_get_cpucfg_mask(int id, u64 *v)
646 {
647 if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
648 return -EINVAL;
649
650 switch (id) {
651 case LOONGARCH_CPUCFG0:
652 *v = GENMASK(31, 0);
653 return 0;
654 case LOONGARCH_CPUCFG1:
655 /* CPUCFG1_MSGINT is not supported by KVM */
656 *v = GENMASK(25, 0);
657 return 0;
658 case LOONGARCH_CPUCFG2:
659 /* CPUCFG2 features unconditionally supported by KVM */
660 *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
661 CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
662 CPUCFG2_LSPW | CPUCFG2_LAM;
663 /*
664 * For the ISA extensions listed below, if one is supported
665 * by the host, then it is also supported by KVM.
666 */
667 if (cpu_has_lsx)
668 *v |= CPUCFG2_LSX;
669 if (cpu_has_lasx)
670 *v |= CPUCFG2_LASX;
671 if (cpu_has_lbt_x86)
672 *v |= CPUCFG2_X86BT;
673 if (cpu_has_lbt_arm)
674 *v |= CPUCFG2_ARMBT;
675 if (cpu_has_lbt_mips)
676 *v |= CPUCFG2_MIPSBT;
677
678 return 0;
679 case LOONGARCH_CPUCFG3:
680 *v = GENMASK(16, 0);
681 return 0;
682 case LOONGARCH_CPUCFG4:
683 case LOONGARCH_CPUCFG5:
684 *v = GENMASK(31, 0);
685 return 0;
686 case LOONGARCH_CPUCFG6:
687 if (cpu_has_pmp)
688 *v = GENMASK(14, 0);
689 else
690 *v = 0;
691 return 0;
692 case LOONGARCH_CPUCFG16:
693 *v = GENMASK(16, 0);
694 return 0;
695 case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
696 *v = GENMASK(30, 0);
697 return 0;
698 default:
699 /*
700 * CPUCFG bits should be zero if reserved by HW or not
701 * supported by KVM.
702 */
703 *v = 0;
704 return 0;
705 }
706 }
707
kvm_check_cpucfg(int id,u64 val)708 static int kvm_check_cpucfg(int id, u64 val)
709 {
710 int ret;
711 u64 mask = 0;
712
713 ret = _kvm_get_cpucfg_mask(id, &mask);
714 if (ret)
715 return ret;
716
717 if (val & ~mask)
718 /* Unsupported features and/or the higher 32 bits should not be set */
719 return -EINVAL;
720
721 switch (id) {
722 case LOONGARCH_CPUCFG2:
723 if (!(val & CPUCFG2_LLFTP))
724 /* Guests must have a constant timer */
725 return -EINVAL;
726 if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
727 /* Single and double float point must both be set when FP is enabled */
728 return -EINVAL;
729 if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
730 /* LSX architecturally implies FP but val does not satisfy that */
731 return -EINVAL;
732 if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
733 /* LASX architecturally implies LSX and FP but val does not satisfy that */
734 return -EINVAL;
735 return 0;
736 case LOONGARCH_CPUCFG6:
737 if (val & CPUCFG6_PMP) {
738 u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
739 if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
740 return -EINVAL;
741 if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
742 return -EINVAL;
743 if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
744 return -EINVAL;
745 }
746 return 0;
747 default:
748 /*
749 * Values for the other CPUCFG IDs are not being further validated
750 * besides the mask check above.
751 */
752 return 0;
753 }
754 }
755
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)756 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
757 const struct kvm_one_reg *reg, u64 *v)
758 {
759 int id, ret = 0;
760 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
761
762 switch (type) {
763 case KVM_REG_LOONGARCH_CSR:
764 id = KVM_GET_IOC_CSR_IDX(reg->id);
765 ret = _kvm_getcsr(vcpu, id, v);
766 break;
767 case KVM_REG_LOONGARCH_CPUCFG:
768 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
769 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
770 *v = vcpu->arch.cpucfg[id];
771 else
772 ret = -EINVAL;
773 break;
774 case KVM_REG_LOONGARCH_LBT:
775 if (!kvm_guest_has_lbt(&vcpu->arch))
776 return -ENXIO;
777
778 switch (reg->id) {
779 case KVM_REG_LOONGARCH_LBT_SCR0:
780 *v = vcpu->arch.lbt.scr0;
781 break;
782 case KVM_REG_LOONGARCH_LBT_SCR1:
783 *v = vcpu->arch.lbt.scr1;
784 break;
785 case KVM_REG_LOONGARCH_LBT_SCR2:
786 *v = vcpu->arch.lbt.scr2;
787 break;
788 case KVM_REG_LOONGARCH_LBT_SCR3:
789 *v = vcpu->arch.lbt.scr3;
790 break;
791 case KVM_REG_LOONGARCH_LBT_EFLAGS:
792 *v = vcpu->arch.lbt.eflags;
793 break;
794 case KVM_REG_LOONGARCH_LBT_FTOP:
795 *v = vcpu->arch.fpu.ftop;
796 break;
797 default:
798 ret = -EINVAL;
799 break;
800 }
801 break;
802 case KVM_REG_LOONGARCH_KVM:
803 switch (reg->id) {
804 case KVM_REG_LOONGARCH_COUNTER:
805 *v = drdtime() + vcpu->kvm->arch.time_offset;
806 break;
807 case KVM_REG_LOONGARCH_DEBUG_INST:
808 *v = INSN_HVCL | KVM_HCALL_SWDBG;
809 break;
810 default:
811 ret = -EINVAL;
812 break;
813 }
814 break;
815 default:
816 ret = -EINVAL;
817 break;
818 }
819
820 return ret;
821 }
822
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)823 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
824 {
825 int ret = 0;
826 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
827
828 switch (size) {
829 case KVM_REG_SIZE_U64:
830 ret = kvm_get_one_reg(vcpu, reg, &v);
831 if (ret)
832 return ret;
833 ret = put_user(v, (u64 __user *)(long)reg->addr);
834 break;
835 default:
836 ret = -EINVAL;
837 break;
838 }
839
840 return ret;
841 }
842
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)843 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
844 const struct kvm_one_reg *reg, u64 v)
845 {
846 int id, ret = 0;
847 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
848
849 switch (type) {
850 case KVM_REG_LOONGARCH_CSR:
851 id = KVM_GET_IOC_CSR_IDX(reg->id);
852 ret = _kvm_setcsr(vcpu, id, v);
853 break;
854 case KVM_REG_LOONGARCH_CPUCFG:
855 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
856 ret = kvm_check_cpucfg(id, v);
857 if (ret)
858 break;
859 vcpu->arch.cpucfg[id] = (u32)v;
860 if (id == LOONGARCH_CPUCFG6)
861 vcpu->arch.max_pmu_csrid =
862 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
863 break;
864 case KVM_REG_LOONGARCH_LBT:
865 if (!kvm_guest_has_lbt(&vcpu->arch))
866 return -ENXIO;
867
868 switch (reg->id) {
869 case KVM_REG_LOONGARCH_LBT_SCR0:
870 vcpu->arch.lbt.scr0 = v;
871 break;
872 case KVM_REG_LOONGARCH_LBT_SCR1:
873 vcpu->arch.lbt.scr1 = v;
874 break;
875 case KVM_REG_LOONGARCH_LBT_SCR2:
876 vcpu->arch.lbt.scr2 = v;
877 break;
878 case KVM_REG_LOONGARCH_LBT_SCR3:
879 vcpu->arch.lbt.scr3 = v;
880 break;
881 case KVM_REG_LOONGARCH_LBT_EFLAGS:
882 vcpu->arch.lbt.eflags = v;
883 break;
884 case KVM_REG_LOONGARCH_LBT_FTOP:
885 vcpu->arch.fpu.ftop = v;
886 break;
887 default:
888 ret = -EINVAL;
889 break;
890 }
891 break;
892 case KVM_REG_LOONGARCH_KVM:
893 switch (reg->id) {
894 case KVM_REG_LOONGARCH_COUNTER:
895 /*
896 * gftoffset is relative with board, not vcpu
897 * only set for the first time for smp system
898 */
899 if (vcpu->vcpu_id == 0)
900 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
901 break;
902 case KVM_REG_LOONGARCH_VCPU_RESET:
903 vcpu->arch.st.guest_addr = 0;
904 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
905 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
906
907 /*
908 * When vCPU reset, clear the ESTAT and GINTC registers
909 * Other CSR registers are cleared with function _kvm_setcsr().
910 */
911 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
912 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
913 break;
914 default:
915 ret = -EINVAL;
916 break;
917 }
918 break;
919 default:
920 ret = -EINVAL;
921 break;
922 }
923
924 return ret;
925 }
926
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)927 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
928 {
929 int ret = 0;
930 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
931
932 switch (size) {
933 case KVM_REG_SIZE_U64:
934 ret = get_user(v, (u64 __user *)(long)reg->addr);
935 if (ret)
936 return ret;
937 break;
938 default:
939 return -EINVAL;
940 }
941
942 return kvm_set_one_reg(vcpu, reg, v);
943 }
944
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)945 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
946 {
947 return -ENOIOCTLCMD;
948 }
949
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)950 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
951 {
952 return -ENOIOCTLCMD;
953 }
954
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)955 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
956 {
957 int i;
958
959 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
960 regs->gpr[i] = vcpu->arch.gprs[i];
961
962 regs->pc = vcpu->arch.pc;
963
964 return 0;
965 }
966
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)967 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
968 {
969 int i;
970
971 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
972 vcpu->arch.gprs[i] = regs->gpr[i];
973
974 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
975 vcpu->arch.pc = regs->pc;
976
977 return 0;
978 }
979
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)980 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
981 struct kvm_enable_cap *cap)
982 {
983 /* FPU is enabled by default, will support LSX/LASX later. */
984 return -EINVAL;
985 }
986
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)987 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
988 struct kvm_device_attr *attr)
989 {
990 switch (attr->attr) {
991 case LOONGARCH_CPUCFG2:
992 case LOONGARCH_CPUCFG6:
993 return 0;
994 case CPUCFG_KVM_FEATURE:
995 return 0;
996 default:
997 return -ENXIO;
998 }
999
1000 return -ENXIO;
1001 }
1002
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1003 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
1004 struct kvm_device_attr *attr)
1005 {
1006 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1007 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1008 return -ENXIO;
1009
1010 return 0;
1011 }
1012
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1013 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1014 struct kvm_device_attr *attr)
1015 {
1016 int ret = -ENXIO;
1017
1018 switch (attr->group) {
1019 case KVM_LOONGARCH_VCPU_CPUCFG:
1020 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1021 break;
1022 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1023 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1024 break;
1025 default:
1026 break;
1027 }
1028
1029 return ret;
1030 }
1031
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1032 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1033 struct kvm_device_attr *attr)
1034 {
1035 int ret = 0;
1036 uint64_t val;
1037 uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1038
1039 switch (attr->attr) {
1040 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1041 ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1042 if (ret)
1043 return ret;
1044 break;
1045 case CPUCFG_KVM_FEATURE:
1046 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1047 break;
1048 default:
1049 return -ENXIO;
1050 }
1051
1052 put_user(val, uaddr);
1053
1054 return ret;
1055 }
1056
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1057 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1058 struct kvm_device_attr *attr)
1059 {
1060 u64 gpa;
1061 u64 __user *user = (u64 __user *)attr->addr;
1062
1063 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1064 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1065 return -ENXIO;
1066
1067 gpa = vcpu->arch.st.guest_addr;
1068 if (put_user(gpa, user))
1069 return -EFAULT;
1070
1071 return 0;
1072 }
1073
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1074 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1075 struct kvm_device_attr *attr)
1076 {
1077 int ret = -ENXIO;
1078
1079 switch (attr->group) {
1080 case KVM_LOONGARCH_VCPU_CPUCFG:
1081 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1082 break;
1083 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1084 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1085 break;
1086 default:
1087 break;
1088 }
1089
1090 return ret;
1091 }
1092
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1093 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1094 struct kvm_device_attr *attr)
1095 {
1096 u64 val, valid;
1097 u64 __user *user = (u64 __user *)attr->addr;
1098 struct kvm *kvm = vcpu->kvm;
1099
1100 switch (attr->attr) {
1101 case CPUCFG_KVM_FEATURE:
1102 if (get_user(val, user))
1103 return -EFAULT;
1104
1105 valid = LOONGARCH_PV_FEAT_MASK;
1106 if (val & ~valid)
1107 return -EINVAL;
1108
1109 /* All vCPUs need set the same PV features */
1110 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1111 && ((kvm->arch.pv_features & valid) != val))
1112 return -EINVAL;
1113 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1114 return 0;
1115 default:
1116 return -ENXIO;
1117 }
1118 }
1119
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1120 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1121 struct kvm_device_attr *attr)
1122 {
1123 int idx, ret = 0;
1124 u64 gpa, __user *user = (u64 __user *)attr->addr;
1125 struct kvm *kvm = vcpu->kvm;
1126
1127 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1128 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1129 return -ENXIO;
1130
1131 if (get_user(gpa, user))
1132 return -EFAULT;
1133
1134 if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1135 return -EINVAL;
1136
1137 if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1138 vcpu->arch.st.guest_addr = gpa;
1139 return 0;
1140 }
1141
1142 /* Check the address is in a valid memslot */
1143 idx = srcu_read_lock(&kvm->srcu);
1144 if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1145 ret = -EINVAL;
1146 srcu_read_unlock(&kvm->srcu, idx);
1147
1148 if (!ret) {
1149 vcpu->arch.st.guest_addr = gpa;
1150 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1151 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1152 }
1153
1154 return ret;
1155 }
1156
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1157 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1158 struct kvm_device_attr *attr)
1159 {
1160 int ret = -ENXIO;
1161
1162 switch (attr->group) {
1163 case KVM_LOONGARCH_VCPU_CPUCFG:
1164 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1165 break;
1166 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1167 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1168 break;
1169 default:
1170 break;
1171 }
1172
1173 return ret;
1174 }
1175
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1176 long kvm_arch_vcpu_ioctl(struct file *filp,
1177 unsigned int ioctl, unsigned long arg)
1178 {
1179 long r;
1180 struct kvm_device_attr attr;
1181 void __user *argp = (void __user *)arg;
1182 struct kvm_vcpu *vcpu = filp->private_data;
1183
1184 /*
1185 * Only software CSR should be modified
1186 *
1187 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1188 * should be used. Since CSR registers owns by this vcpu, if switch
1189 * to other vcpus, other vcpus need reload CSR registers.
1190 *
1191 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1192 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1193 * aux_inuse flag and reload CSR registers form software.
1194 */
1195
1196 switch (ioctl) {
1197 case KVM_SET_ONE_REG:
1198 case KVM_GET_ONE_REG: {
1199 struct kvm_one_reg reg;
1200
1201 r = -EFAULT;
1202 if (copy_from_user(®, argp, sizeof(reg)))
1203 break;
1204 if (ioctl == KVM_SET_ONE_REG) {
1205 r = kvm_set_reg(vcpu, ®);
1206 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1207 } else
1208 r = kvm_get_reg(vcpu, ®);
1209 break;
1210 }
1211 case KVM_ENABLE_CAP: {
1212 struct kvm_enable_cap cap;
1213
1214 r = -EFAULT;
1215 if (copy_from_user(&cap, argp, sizeof(cap)))
1216 break;
1217 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1218 break;
1219 }
1220 case KVM_HAS_DEVICE_ATTR: {
1221 r = -EFAULT;
1222 if (copy_from_user(&attr, argp, sizeof(attr)))
1223 break;
1224 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1225 break;
1226 }
1227 case KVM_GET_DEVICE_ATTR: {
1228 r = -EFAULT;
1229 if (copy_from_user(&attr, argp, sizeof(attr)))
1230 break;
1231 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1232 break;
1233 }
1234 case KVM_SET_DEVICE_ATTR: {
1235 r = -EFAULT;
1236 if (copy_from_user(&attr, argp, sizeof(attr)))
1237 break;
1238 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1239 break;
1240 }
1241 default:
1242 r = -ENOIOCTLCMD;
1243 break;
1244 }
1245
1246 return r;
1247 }
1248
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1249 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1250 {
1251 int i = 0;
1252
1253 fpu->fcc = vcpu->arch.fpu.fcc;
1254 fpu->fcsr = vcpu->arch.fpu.fcsr;
1255 for (i = 0; i < NUM_FPU_REGS; i++)
1256 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1257
1258 return 0;
1259 }
1260
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1261 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1262 {
1263 int i = 0;
1264
1265 vcpu->arch.fpu.fcc = fpu->fcc;
1266 vcpu->arch.fpu.fcsr = fpu->fcsr;
1267 for (i = 0; i < NUM_FPU_REGS; i++)
1268 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1269
1270 return 0;
1271 }
1272
1273 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1274 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1275 {
1276 if (!kvm_guest_has_lbt(&vcpu->arch))
1277 return -EINVAL;
1278
1279 preempt_disable();
1280 set_csr_euen(CSR_EUEN_LBTEN);
1281 _restore_lbt(&vcpu->arch.lbt);
1282 vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1283 preempt_enable();
1284
1285 return 0;
1286 }
1287
kvm_lose_lbt(struct kvm_vcpu * vcpu)1288 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1289 {
1290 preempt_disable();
1291 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1292 _save_lbt(&vcpu->arch.lbt);
1293 clear_csr_euen(CSR_EUEN_LBTEN);
1294 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1295 }
1296 preempt_enable();
1297 }
1298
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1299 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1300 {
1301 /*
1302 * If TM is enabled, top register save/restore will
1303 * cause lbt exception, here enable lbt in advance
1304 */
1305 if (fcsr & FPU_CSR_TM)
1306 kvm_own_lbt(vcpu);
1307 }
1308
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1309 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1310 {
1311 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1312 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1313 return;
1314 kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1315 }
1316 }
1317 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1318 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1319 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1320 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1321 #endif
1322
1323 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1324 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1325 {
1326 preempt_disable();
1327
1328 /*
1329 * Enable FPU for guest
1330 * Set FR and FRE according to guest context
1331 */
1332 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1333 set_csr_euen(CSR_EUEN_FPEN);
1334
1335 kvm_restore_fpu(&vcpu->arch.fpu);
1336 vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1337 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1338
1339 preempt_enable();
1340 }
1341
1342 #ifdef CONFIG_CPU_HAS_LSX
1343 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1344 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1345 {
1346 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1347 return -EINVAL;
1348
1349 preempt_disable();
1350
1351 /* Enable LSX for guest */
1352 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1353 set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1354 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1355 case KVM_LARCH_FPU:
1356 /*
1357 * Guest FPU state already loaded,
1358 * only restore upper LSX state
1359 */
1360 _restore_lsx_upper(&vcpu->arch.fpu);
1361 break;
1362 default:
1363 /* Neither FP or LSX already active,
1364 * restore full LSX state
1365 */
1366 kvm_restore_lsx(&vcpu->arch.fpu);
1367 break;
1368 }
1369
1370 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1371 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1372 preempt_enable();
1373
1374 return 0;
1375 }
1376 #endif
1377
1378 #ifdef CONFIG_CPU_HAS_LASX
1379 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1380 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1381 {
1382 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1383 return -EINVAL;
1384
1385 preempt_disable();
1386
1387 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1388 set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1389 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1390 case KVM_LARCH_LSX:
1391 case KVM_LARCH_LSX | KVM_LARCH_FPU:
1392 /* Guest LSX state already loaded, only restore upper LASX state */
1393 _restore_lasx_upper(&vcpu->arch.fpu);
1394 break;
1395 case KVM_LARCH_FPU:
1396 /* Guest FP state already loaded, only restore upper LSX & LASX state */
1397 _restore_lsx_upper(&vcpu->arch.fpu);
1398 _restore_lasx_upper(&vcpu->arch.fpu);
1399 break;
1400 default:
1401 /* Neither FP or LSX already active, restore full LASX state */
1402 kvm_restore_lasx(&vcpu->arch.fpu);
1403 break;
1404 }
1405
1406 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1407 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1408 preempt_enable();
1409
1410 return 0;
1411 }
1412 #endif
1413
1414 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1415 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1416 {
1417 preempt_disable();
1418
1419 kvm_check_fcsr_alive(vcpu);
1420 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1421 kvm_save_lasx(&vcpu->arch.fpu);
1422 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1423 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1424
1425 /* Disable LASX & LSX & FPU */
1426 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1427 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1428 kvm_save_lsx(&vcpu->arch.fpu);
1429 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1430 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1431
1432 /* Disable LSX & FPU */
1433 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1434 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1435 kvm_save_fpu(&vcpu->arch.fpu);
1436 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1437 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1438
1439 /* Disable FPU */
1440 clear_csr_euen(CSR_EUEN_FPEN);
1441 }
1442 kvm_lose_lbt(vcpu);
1443
1444 preempt_enable();
1445 }
1446
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1447 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1448 {
1449 int intr = (int)irq->irq;
1450
1451 if (intr > 0)
1452 kvm_queue_irq(vcpu, intr);
1453 else if (intr < 0)
1454 kvm_dequeue_irq(vcpu, -intr);
1455 else {
1456 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1457 return -EINVAL;
1458 }
1459
1460 kvm_vcpu_kick(vcpu);
1461
1462 return 0;
1463 }
1464
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1465 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1466 unsigned int ioctl, unsigned long arg)
1467 {
1468 void __user *argp = (void __user *)arg;
1469 struct kvm_vcpu *vcpu = filp->private_data;
1470
1471 if (ioctl == KVM_INTERRUPT) {
1472 struct kvm_interrupt irq;
1473
1474 if (copy_from_user(&irq, argp, sizeof(irq)))
1475 return -EFAULT;
1476
1477 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1478
1479 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1480 }
1481
1482 return -ENOIOCTLCMD;
1483 }
1484
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1485 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1486 {
1487 return 0;
1488 }
1489
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1490 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1491 {
1492 unsigned long timer_hz;
1493 struct loongarch_csrs *csr;
1494
1495 vcpu->arch.vpid = 0;
1496 vcpu->arch.flush_gpa = INVALID_GPA;
1497
1498 hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1499 HRTIMER_MODE_ABS_PINNED_HARD);
1500
1501 /* Get GPA (=HVA) of PGD for kvm hypervisor */
1502 vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1503
1504 /*
1505 * Get PGD for primary mmu, virtual address is used since there is
1506 * memory access after loading from CSR_PGD in tlb exception fast path.
1507 */
1508 vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1509
1510 vcpu->arch.handle_exit = kvm_handle_exit;
1511 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1512 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1513 if (!vcpu->arch.csr)
1514 return -ENOMEM;
1515
1516 /*
1517 * All kvm exceptions share one exception entry, and host <-> guest
1518 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1519 */
1520 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1521
1522 /* Init */
1523 vcpu->arch.last_sched_cpu = -1;
1524
1525 /* Init ipi_state lock */
1526 spin_lock_init(&vcpu->arch.ipi_state.lock);
1527
1528 /*
1529 * Initialize guest register state to valid architectural reset state.
1530 */
1531 timer_hz = calc_const_freq();
1532 kvm_init_timer(vcpu, timer_hz);
1533
1534 /* Set Initialize mode for guest */
1535 csr = vcpu->arch.csr;
1536 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1537
1538 /* Set cpuid */
1539 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1540 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1541
1542 /* Start with no pending virtual guest interrupts */
1543 csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1544
1545 return 0;
1546 }
1547
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1548 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1549 {
1550 }
1551
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1552 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1553 {
1554 int cpu;
1555 struct kvm_context *context;
1556
1557 hrtimer_cancel(&vcpu->arch.swtimer);
1558 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1559 kvm_drop_cpuid(vcpu);
1560 kfree(vcpu->arch.csr);
1561
1562 /*
1563 * If the vCPU is freed and reused as another vCPU, we don't want the
1564 * matching pointer wrongly hanging around in last_vcpu.
1565 */
1566 for_each_possible_cpu(cpu) {
1567 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1568 if (context->last_vcpu == vcpu)
1569 context->last_vcpu = NULL;
1570 }
1571 }
1572
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1573 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1574 {
1575 bool migrated;
1576 struct kvm_context *context;
1577 struct loongarch_csrs *csr = vcpu->arch.csr;
1578
1579 /*
1580 * Have we migrated to a different CPU?
1581 * If so, any old guest TLB state may be stale.
1582 */
1583 migrated = (vcpu->arch.last_sched_cpu != cpu);
1584
1585 /*
1586 * Was this the last vCPU to run on this CPU?
1587 * If not, any old guest state from this vCPU will have been clobbered.
1588 */
1589 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1590 if (migrated || (context->last_vcpu != vcpu))
1591 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1592 context->last_vcpu = vcpu;
1593
1594 /* Restore timer state regardless */
1595 kvm_restore_timer(vcpu);
1596 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1597
1598 /* Restore hardware PMU CSRs */
1599 kvm_restore_pmu(vcpu);
1600
1601 /* Don't bother restoring registers multiple times unless necessary */
1602 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1603 return 0;
1604
1605 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1606
1607 /* Restore guest CSR registers */
1608 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1609 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1610 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1611 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1612 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1613 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1614 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1615 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1616 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1617 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1618 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1619 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1620 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1621 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1622 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1623 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1624 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1625 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1626 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1627 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1628 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1629 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1630 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1631 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1632 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1633 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1634 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1635 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1636 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1637 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1638 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1639 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1640 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1641 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1642 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1643 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1644 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1645 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1646 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1647 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1648 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1649 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1650 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1651 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1652
1653 /* Restore Root.GINTC from unused Guest.GINTC register */
1654 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1655
1656 /*
1657 * We should clear linked load bit to break interrupted atomics. This
1658 * prevents a SC on the next vCPU from succeeding by matching a LL on
1659 * the previous vCPU.
1660 */
1661 if (vcpu->kvm->created_vcpus > 1)
1662 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1663
1664 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1665
1666 return 0;
1667 }
1668
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1669 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1670 {
1671 unsigned long flags;
1672
1673 local_irq_save(flags);
1674 /* Restore guest state to registers */
1675 _kvm_vcpu_load(vcpu, cpu);
1676 local_irq_restore(flags);
1677 }
1678
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1679 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1680 {
1681 struct loongarch_csrs *csr = vcpu->arch.csr;
1682
1683 kvm_lose_fpu(vcpu);
1684
1685 /*
1686 * Update CSR state from hardware if software CSR state is stale,
1687 * most CSR registers are kept unchanged during process context
1688 * switch except CSR registers like remaining timer tick value and
1689 * injected interrupt state.
1690 */
1691 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1692 goto out;
1693
1694 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1695 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1696 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1697 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1698 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1699 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1700 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1701 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1702 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1703 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1704 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1705 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1706 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1707 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1708 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1709 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1710 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1711 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1712 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1713 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1714 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1715 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1716 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1717 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1718 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1719 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1720 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1721 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1722 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1723 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1724 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1725 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1726 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1727 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1728 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1729 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1730 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1731 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1732 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1733 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1734 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1735 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1736 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1737 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1738 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1739 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1740 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1741
1742 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1743
1744 out:
1745 kvm_save_timer(vcpu);
1746 /* Save Root.GINTC into unused Guest.GINTC register */
1747 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1748
1749 return 0;
1750 }
1751
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1752 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1753 {
1754 int cpu;
1755 unsigned long flags;
1756
1757 local_irq_save(flags);
1758 cpu = smp_processor_id();
1759 vcpu->arch.last_sched_cpu = cpu;
1760
1761 /* Save guest state in registers */
1762 _kvm_vcpu_put(vcpu, cpu);
1763 local_irq_restore(flags);
1764 }
1765
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1766 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1767 {
1768 int r = -EINTR;
1769 struct kvm_run *run = vcpu->run;
1770
1771 if (vcpu->mmio_needed) {
1772 if (!vcpu->mmio_is_write)
1773 kvm_complete_mmio_read(vcpu, run);
1774 vcpu->mmio_needed = 0;
1775 }
1776
1777 switch (run->exit_reason) {
1778 case KVM_EXIT_HYPERCALL:
1779 kvm_complete_user_service(vcpu, run);
1780 break;
1781 case KVM_EXIT_LOONGARCH_IOCSR:
1782 if (!run->iocsr_io.is_write)
1783 kvm_complete_iocsr_read(vcpu, run);
1784 break;
1785 }
1786
1787 if (!vcpu->wants_to_run)
1788 return r;
1789
1790 /* Clear exit_reason */
1791 run->exit_reason = KVM_EXIT_UNKNOWN;
1792 lose_fpu(1);
1793 vcpu_load(vcpu);
1794 kvm_sigset_activate(vcpu);
1795 r = kvm_pre_enter_guest(vcpu);
1796 if (r != RESUME_GUEST)
1797 goto out;
1798
1799 guest_timing_enter_irqoff();
1800 guest_state_enter_irqoff();
1801 trace_kvm_enter(vcpu);
1802 r = kvm_loongarch_ops->enter_guest(run, vcpu);
1803
1804 trace_kvm_out(vcpu);
1805 /*
1806 * Guest exit is already recorded at kvm_handle_exit()
1807 * return value must not be RESUME_GUEST
1808 */
1809 local_irq_enable();
1810 out:
1811 kvm_sigset_deactivate(vcpu);
1812 vcpu_put(vcpu);
1813
1814 return r;
1815 }
1816