1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/lbt.h>
10 #include <asm/loongarch.h>
11 #include <asm/setup.h>
12 #include <asm/time.h>
13
14 #define CREATE_TRACE_POINTS
15 #include "trace.h"
16
17 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
18 KVM_GENERIC_VCPU_STATS(),
19 STATS_DESC_COUNTER(VCPU, int_exits),
20 STATS_DESC_COUNTER(VCPU, idle_exits),
21 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 STATS_DESC_COUNTER(VCPU, signal_exits),
23 STATS_DESC_COUNTER(VCPU, hypercall_exits)
24 };
25
26 const struct kvm_stats_header kvm_vcpu_stats_header = {
27 .name_size = KVM_STATS_NAME_SIZE,
28 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
29 .id_offset = sizeof(struct kvm_stats_header),
30 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
31 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
32 sizeof(kvm_vcpu_stats_desc),
33 };
34
kvm_save_host_pmu(struct kvm_vcpu * vcpu)35 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
36 {
37 struct kvm_context *context;
38
39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
40 context->perf_cntr[0] = read_csr_perfcntr0();
41 context->perf_cntr[1] = read_csr_perfcntr1();
42 context->perf_cntr[2] = read_csr_perfcntr2();
43 context->perf_cntr[3] = read_csr_perfcntr3();
44 context->perf_ctrl[0] = write_csr_perfctrl0(0);
45 context->perf_ctrl[1] = write_csr_perfctrl1(0);
46 context->perf_ctrl[2] = write_csr_perfctrl2(0);
47 context->perf_ctrl[3] = write_csr_perfctrl3(0);
48 }
49
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)50 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
51 {
52 struct kvm_context *context;
53
54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
55 write_csr_perfcntr0(context->perf_cntr[0]);
56 write_csr_perfcntr1(context->perf_cntr[1]);
57 write_csr_perfcntr2(context->perf_cntr[2]);
58 write_csr_perfcntr3(context->perf_cntr[3]);
59 write_csr_perfctrl0(context->perf_ctrl[0]);
60 write_csr_perfctrl1(context->perf_ctrl[1]);
61 write_csr_perfctrl2(context->perf_ctrl[2]);
62 write_csr_perfctrl3(context->perf_ctrl[3]);
63 }
64
65
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)66 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
67 {
68 struct loongarch_csrs *csr = vcpu->arch.csr;
69
70 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
71 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
72 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
73 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
74 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
75 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
76 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
77 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
78 }
79
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)80 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
81 {
82 struct loongarch_csrs *csr = vcpu->arch.csr;
83
84 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
85 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
86 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
87 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
88 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
89 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
90 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
91 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
92 }
93
kvm_own_pmu(struct kvm_vcpu * vcpu)94 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
95 {
96 unsigned long val;
97
98 if (!kvm_guest_has_pmu(&vcpu->arch))
99 return -EINVAL;
100
101 kvm_save_host_pmu(vcpu);
102
103 /* Set PM0-PM(num) to guest */
104 val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
105 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
106 write_csr_gcfg(val);
107
108 kvm_restore_guest_pmu(vcpu);
109
110 return 0;
111 }
112
kvm_lose_pmu(struct kvm_vcpu * vcpu)113 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
114 {
115 unsigned long val;
116 struct loongarch_csrs *csr = vcpu->arch.csr;
117
118 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
119 return;
120
121 kvm_save_guest_pmu(vcpu);
122
123 /* Disable pmu access from guest */
124 write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
125
126 /*
127 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
128 * exiting the guest, so that the next time trap into the guest.
129 * We don't need to deal with PMU CSRs contexts.
130 */
131 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
132 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
133 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
134 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
135 if (!(val & KVM_PMU_EVENT_ENABLED))
136 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
137
138 kvm_restore_host_pmu(vcpu);
139 }
140
kvm_restore_pmu(struct kvm_vcpu * vcpu)141 static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
142 {
143 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
144 kvm_make_request(KVM_REQ_PMU, vcpu);
145 }
146
kvm_check_pmu(struct kvm_vcpu * vcpu)147 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
148 {
149 if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
150 kvm_own_pmu(vcpu);
151 vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
152 }
153 }
154
kvm_update_stolen_time(struct kvm_vcpu * vcpu)155 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
156 {
157 u32 version;
158 u64 steal;
159 gpa_t gpa;
160 struct kvm_memslots *slots;
161 struct kvm_steal_time __user *st;
162 struct gfn_to_hva_cache *ghc;
163
164 ghc = &vcpu->arch.st.cache;
165 gpa = vcpu->arch.st.guest_addr;
166 if (!(gpa & KVM_STEAL_PHYS_VALID))
167 return;
168
169 gpa &= KVM_STEAL_PHYS_MASK;
170 slots = kvm_memslots(vcpu->kvm);
171 if (slots->generation != ghc->generation || gpa != ghc->gpa) {
172 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
173 ghc->gpa = INVALID_GPA;
174 return;
175 }
176 }
177
178 st = (struct kvm_steal_time __user *)ghc->hva;
179 unsafe_get_user(version, &st->version, out);
180 if (version & 1)
181 version += 1; /* first time write, random junk */
182
183 version += 1;
184 unsafe_put_user(version, &st->version, out);
185 smp_wmb();
186
187 unsafe_get_user(steal, &st->steal, out);
188 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
189 vcpu->arch.st.last_steal = current->sched_info.run_delay;
190 unsafe_put_user(steal, &st->steal, out);
191
192 smp_wmb();
193 version += 1;
194 unsafe_put_user(version, &st->version, out);
195 out:
196 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
197 }
198
199 /*
200 * kvm_check_requests - check and handle pending vCPU requests
201 *
202 * Return: RESUME_GUEST if we should enter the guest
203 * RESUME_HOST if we should exit to userspace
204 */
kvm_check_requests(struct kvm_vcpu * vcpu)205 static int kvm_check_requests(struct kvm_vcpu *vcpu)
206 {
207 if (!kvm_request_pending(vcpu))
208 return RESUME_GUEST;
209
210 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
211 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
212
213 if (kvm_dirty_ring_check_request(vcpu))
214 return RESUME_HOST;
215
216 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
217 kvm_update_stolen_time(vcpu);
218
219 return RESUME_GUEST;
220 }
221
kvm_late_check_requests(struct kvm_vcpu * vcpu)222 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
223 {
224 lockdep_assert_irqs_disabled();
225 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
226 if (vcpu->arch.flush_gpa != INVALID_GPA) {
227 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
228 vcpu->arch.flush_gpa = INVALID_GPA;
229 }
230 }
231
232 /*
233 * Check and handle pending signal and vCPU requests etc
234 * Run with irq enabled and preempt enabled
235 *
236 * Return: RESUME_GUEST if we should enter the guest
237 * RESUME_HOST if we should exit to userspace
238 * < 0 if we should exit to userspace, where the return value
239 * indicates an error
240 */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)241 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
242 {
243 int ret;
244
245 /*
246 * Check conditions before entering the guest
247 */
248 ret = xfer_to_guest_mode_handle_work(vcpu);
249 if (ret < 0)
250 return ret;
251
252 ret = kvm_check_requests(vcpu);
253
254 return ret;
255 }
256
257 /*
258 * Called with irq enabled
259 *
260 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
261 * Others if we should exit to userspace
262 */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)263 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
264 {
265 int ret;
266
267 do {
268 ret = kvm_enter_guest_check(vcpu);
269 if (ret != RESUME_GUEST)
270 break;
271
272 /*
273 * Handle vcpu timer, interrupts, check requests and
274 * check vmid before vcpu enter guest
275 */
276 local_irq_disable();
277 kvm_deliver_intr(vcpu);
278 kvm_deliver_exception(vcpu);
279 /* Make sure the vcpu mode has been written */
280 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
281 kvm_check_vpid(vcpu);
282 kvm_check_pmu(vcpu);
283
284 /*
285 * Called after function kvm_check_vpid()
286 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
287 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
288 */
289 kvm_late_check_requests(vcpu);
290 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
291 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
292 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
293
294 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
295 /* make sure the vcpu mode has been written */
296 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
297 local_irq_enable();
298 ret = -EAGAIN;
299 }
300 } while (ret != RESUME_GUEST);
301
302 return ret;
303 }
304
305 /*
306 * Return 1 for resume guest and "<= 0" for resume host.
307 */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)308 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
309 {
310 int ret = RESUME_GUEST;
311 unsigned long estat = vcpu->arch.host_estat;
312 u32 intr = estat & 0x1fff; /* Ignore NMI */
313 u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
314
315 vcpu->mode = OUTSIDE_GUEST_MODE;
316
317 /* Set a default exit reason */
318 run->exit_reason = KVM_EXIT_UNKNOWN;
319
320 kvm_lose_pmu(vcpu);
321
322 guest_timing_exit_irqoff();
323 guest_state_exit_irqoff();
324 local_irq_enable();
325
326 trace_kvm_exit(vcpu, ecode);
327 if (ecode) {
328 ret = kvm_handle_fault(vcpu, ecode);
329 } else {
330 WARN(!intr, "vm exiting with suspicious irq\n");
331 ++vcpu->stat.int_exits;
332 }
333
334 if (ret == RESUME_GUEST)
335 ret = kvm_pre_enter_guest(vcpu);
336
337 if (ret != RESUME_GUEST) {
338 local_irq_disable();
339 return ret;
340 }
341
342 guest_timing_enter_irqoff();
343 guest_state_enter_irqoff();
344 trace_kvm_reenter(vcpu);
345
346 return RESUME_GUEST;
347 }
348
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)349 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
350 {
351 return !!(vcpu->arch.irq_pending) &&
352 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
353 }
354
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)355 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
356 {
357 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
358 }
359
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)360 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
361 {
362 return false;
363 }
364
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)365 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
366 {
367 return VM_FAULT_SIGBUS;
368 }
369
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)370 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
371 struct kvm_translation *tr)
372 {
373 return -EINVAL;
374 }
375
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)376 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
377 {
378 int ret;
379
380 /* Protect from TOD sync and vcpu_load/put() */
381 preempt_disable();
382 ret = kvm_pending_timer(vcpu) ||
383 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
384 preempt_enable();
385
386 return ret;
387 }
388
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)389 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
390 {
391 int i;
392
393 kvm_debug("vCPU Register Dump:\n");
394 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
395 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
396
397 for (i = 0; i < 32; i += 4) {
398 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
399 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
400 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
401 }
402
403 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
404 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
405 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
406
407 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
408
409 return 0;
410 }
411
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)412 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
413 struct kvm_mp_state *mp_state)
414 {
415 *mp_state = vcpu->arch.mp_state;
416
417 return 0;
418 }
419
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)420 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
421 struct kvm_mp_state *mp_state)
422 {
423 int ret = 0;
424
425 switch (mp_state->mp_state) {
426 case KVM_MP_STATE_RUNNABLE:
427 vcpu->arch.mp_state = *mp_state;
428 break;
429 default:
430 ret = -EINVAL;
431 }
432
433 return ret;
434 }
435
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)436 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
437 struct kvm_guest_debug *dbg)
438 {
439 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
440 return -EINVAL;
441
442 if (dbg->control & KVM_GUESTDBG_ENABLE)
443 vcpu->guest_debug = dbg->control;
444 else
445 vcpu->guest_debug = 0;
446
447 return 0;
448 }
449
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)450 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
451 {
452 int cpuid;
453 struct kvm_phyid_map *map;
454 struct loongarch_csrs *csr = vcpu->arch.csr;
455
456 if (val >= KVM_MAX_PHYID)
457 return -EINVAL;
458
459 map = vcpu->kvm->arch.phyid_map;
460 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
461
462 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
463 if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
464 /* Discard duplicated CPUID set operation */
465 if (cpuid == val) {
466 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
467 return 0;
468 }
469
470 /*
471 * CPUID is already set before
472 * Forbid changing to a different CPUID at runtime
473 */
474 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
475 return -EINVAL;
476 }
477
478 if (map->phys_map[val].enabled) {
479 /* Discard duplicated CPUID set operation */
480 if (vcpu == map->phys_map[val].vcpu) {
481 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
482 return 0;
483 }
484
485 /*
486 * New CPUID is already set with other vcpu
487 * Forbid sharing the same CPUID between different vcpus
488 */
489 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
490 return -EINVAL;
491 }
492
493 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
494 map->phys_map[val].enabled = true;
495 map->phys_map[val].vcpu = vcpu;
496 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
497
498 return 0;
499 }
500
kvm_drop_cpuid(struct kvm_vcpu * vcpu)501 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
502 {
503 int cpuid;
504 struct kvm_phyid_map *map;
505 struct loongarch_csrs *csr = vcpu->arch.csr;
506
507 map = vcpu->kvm->arch.phyid_map;
508 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
509
510 if (cpuid >= KVM_MAX_PHYID)
511 return;
512
513 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
514 if (map->phys_map[cpuid].enabled) {
515 map->phys_map[cpuid].vcpu = NULL;
516 map->phys_map[cpuid].enabled = false;
517 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
518 }
519 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
520 }
521
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)522 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
523 {
524 struct kvm_phyid_map *map;
525
526 if (cpuid >= KVM_MAX_PHYID)
527 return NULL;
528
529 map = kvm->arch.phyid_map;
530 if (!map->phys_map[cpuid].enabled)
531 return NULL;
532
533 return map->phys_map[cpuid].vcpu;
534 }
535
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)536 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
537 {
538 unsigned long gintc;
539 struct loongarch_csrs *csr = vcpu->arch.csr;
540
541 if (get_gcsr_flag(id) & INVALID_GCSR)
542 return -EINVAL;
543
544 if (id == LOONGARCH_CSR_ESTAT) {
545 preempt_disable();
546 vcpu_load(vcpu);
547 /*
548 * Sync pending interrupts into ESTAT so that interrupt
549 * remains during VM migration stage
550 */
551 kvm_deliver_intr(vcpu);
552 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
553 vcpu_put(vcpu);
554 preempt_enable();
555
556 /* ESTAT IP0~IP7 get from GINTC */
557 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
558 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
559 return 0;
560 }
561
562 /*
563 * Get software CSR state since software state is consistent
564 * with hardware for synchronous ioctl
565 */
566 *val = kvm_read_sw_gcsr(csr, id);
567
568 return 0;
569 }
570
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)571 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
572 {
573 int ret = 0, gintc;
574 struct loongarch_csrs *csr = vcpu->arch.csr;
575
576 if (get_gcsr_flag(id) & INVALID_GCSR)
577 return -EINVAL;
578
579 if (id == LOONGARCH_CSR_CPUID)
580 return kvm_set_cpuid(vcpu, val);
581
582 if (id == LOONGARCH_CSR_ESTAT) {
583 /* ESTAT IP0~IP7 inject through GINTC */
584 gintc = (val >> 2) & 0xff;
585 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
586
587 gintc = val & ~(0xffUL << 2);
588 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
589
590 return ret;
591 }
592
593 kvm_write_sw_gcsr(csr, id, val);
594
595 /*
596 * After modifying the PMU CSR register value of the vcpu.
597 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
598 */
599 if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
600 unsigned long val;
601
602 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
603 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
604 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
605 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
606
607 if (val & KVM_PMU_EVENT_ENABLED)
608 kvm_make_request(KVM_REQ_PMU, vcpu);
609 }
610
611 return ret;
612 }
613
_kvm_get_cpucfg_mask(int id,u64 * v)614 static int _kvm_get_cpucfg_mask(int id, u64 *v)
615 {
616 if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
617 return -EINVAL;
618
619 switch (id) {
620 case LOONGARCH_CPUCFG0:
621 *v = GENMASK(31, 0);
622 return 0;
623 case LOONGARCH_CPUCFG1:
624 /* CPUCFG1_MSGINT is not supported by KVM */
625 *v = GENMASK(25, 0);
626 return 0;
627 case LOONGARCH_CPUCFG2:
628 /* CPUCFG2 features unconditionally supported by KVM */
629 *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
630 CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
631 CPUCFG2_LSPW | CPUCFG2_LAM;
632 /*
633 * For the ISA extensions listed below, if one is supported
634 * by the host, then it is also supported by KVM.
635 */
636 if (cpu_has_lsx)
637 *v |= CPUCFG2_LSX;
638 if (cpu_has_lasx)
639 *v |= CPUCFG2_LASX;
640 if (cpu_has_lbt_x86)
641 *v |= CPUCFG2_X86BT;
642 if (cpu_has_lbt_arm)
643 *v |= CPUCFG2_ARMBT;
644 if (cpu_has_lbt_mips)
645 *v |= CPUCFG2_MIPSBT;
646
647 return 0;
648 case LOONGARCH_CPUCFG3:
649 *v = GENMASK(16, 0);
650 return 0;
651 case LOONGARCH_CPUCFG4:
652 case LOONGARCH_CPUCFG5:
653 *v = GENMASK(31, 0);
654 return 0;
655 case LOONGARCH_CPUCFG6:
656 if (cpu_has_pmp)
657 *v = GENMASK(14, 0);
658 else
659 *v = 0;
660 return 0;
661 case LOONGARCH_CPUCFG16:
662 *v = GENMASK(16, 0);
663 return 0;
664 case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
665 *v = GENMASK(30, 0);
666 return 0;
667 default:
668 /*
669 * CPUCFG bits should be zero if reserved by HW or not
670 * supported by KVM.
671 */
672 *v = 0;
673 return 0;
674 }
675 }
676
kvm_check_cpucfg(int id,u64 val)677 static int kvm_check_cpucfg(int id, u64 val)
678 {
679 int ret;
680 u64 mask = 0;
681
682 ret = _kvm_get_cpucfg_mask(id, &mask);
683 if (ret)
684 return ret;
685
686 if (val & ~mask)
687 /* Unsupported features and/or the higher 32 bits should not be set */
688 return -EINVAL;
689
690 switch (id) {
691 case LOONGARCH_CPUCFG2:
692 if (!(val & CPUCFG2_LLFTP))
693 /* Guests must have a constant timer */
694 return -EINVAL;
695 if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
696 /* Single and double float point must both be set when FP is enabled */
697 return -EINVAL;
698 if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
699 /* LSX architecturally implies FP but val does not satisfy that */
700 return -EINVAL;
701 if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
702 /* LASX architecturally implies LSX and FP but val does not satisfy that */
703 return -EINVAL;
704 return 0;
705 case LOONGARCH_CPUCFG6:
706 if (val & CPUCFG6_PMP) {
707 u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
708 if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
709 return -EINVAL;
710 if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
711 return -EINVAL;
712 if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
713 return -EINVAL;
714 }
715 return 0;
716 default:
717 /*
718 * Values for the other CPUCFG IDs are not being further validated
719 * besides the mask check above.
720 */
721 return 0;
722 }
723 }
724
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)725 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
726 const struct kvm_one_reg *reg, u64 *v)
727 {
728 int id, ret = 0;
729 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
730
731 switch (type) {
732 case KVM_REG_LOONGARCH_CSR:
733 id = KVM_GET_IOC_CSR_IDX(reg->id);
734 ret = _kvm_getcsr(vcpu, id, v);
735 break;
736 case KVM_REG_LOONGARCH_CPUCFG:
737 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
738 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
739 *v = vcpu->arch.cpucfg[id];
740 else
741 ret = -EINVAL;
742 break;
743 case KVM_REG_LOONGARCH_LBT:
744 if (!kvm_guest_has_lbt(&vcpu->arch))
745 return -ENXIO;
746
747 switch (reg->id) {
748 case KVM_REG_LOONGARCH_LBT_SCR0:
749 *v = vcpu->arch.lbt.scr0;
750 break;
751 case KVM_REG_LOONGARCH_LBT_SCR1:
752 *v = vcpu->arch.lbt.scr1;
753 break;
754 case KVM_REG_LOONGARCH_LBT_SCR2:
755 *v = vcpu->arch.lbt.scr2;
756 break;
757 case KVM_REG_LOONGARCH_LBT_SCR3:
758 *v = vcpu->arch.lbt.scr3;
759 break;
760 case KVM_REG_LOONGARCH_LBT_EFLAGS:
761 *v = vcpu->arch.lbt.eflags;
762 break;
763 case KVM_REG_LOONGARCH_LBT_FTOP:
764 *v = vcpu->arch.fpu.ftop;
765 break;
766 default:
767 ret = -EINVAL;
768 break;
769 }
770 break;
771 case KVM_REG_LOONGARCH_KVM:
772 switch (reg->id) {
773 case KVM_REG_LOONGARCH_COUNTER:
774 *v = drdtime() + vcpu->kvm->arch.time_offset;
775 break;
776 case KVM_REG_LOONGARCH_DEBUG_INST:
777 *v = INSN_HVCL | KVM_HCALL_SWDBG;
778 break;
779 default:
780 ret = -EINVAL;
781 break;
782 }
783 break;
784 default:
785 ret = -EINVAL;
786 break;
787 }
788
789 return ret;
790 }
791
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)792 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
793 {
794 int ret = 0;
795 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
796
797 switch (size) {
798 case KVM_REG_SIZE_U64:
799 ret = kvm_get_one_reg(vcpu, reg, &v);
800 if (ret)
801 return ret;
802 ret = put_user(v, (u64 __user *)(long)reg->addr);
803 break;
804 default:
805 ret = -EINVAL;
806 break;
807 }
808
809 return ret;
810 }
811
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)812 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
813 const struct kvm_one_reg *reg, u64 v)
814 {
815 int id, ret = 0;
816 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
817
818 switch (type) {
819 case KVM_REG_LOONGARCH_CSR:
820 id = KVM_GET_IOC_CSR_IDX(reg->id);
821 ret = _kvm_setcsr(vcpu, id, v);
822 break;
823 case KVM_REG_LOONGARCH_CPUCFG:
824 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
825 ret = kvm_check_cpucfg(id, v);
826 if (ret)
827 break;
828 vcpu->arch.cpucfg[id] = (u32)v;
829 if (id == LOONGARCH_CPUCFG6)
830 vcpu->arch.max_pmu_csrid =
831 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
832 break;
833 case KVM_REG_LOONGARCH_LBT:
834 if (!kvm_guest_has_lbt(&vcpu->arch))
835 return -ENXIO;
836
837 switch (reg->id) {
838 case KVM_REG_LOONGARCH_LBT_SCR0:
839 vcpu->arch.lbt.scr0 = v;
840 break;
841 case KVM_REG_LOONGARCH_LBT_SCR1:
842 vcpu->arch.lbt.scr1 = v;
843 break;
844 case KVM_REG_LOONGARCH_LBT_SCR2:
845 vcpu->arch.lbt.scr2 = v;
846 break;
847 case KVM_REG_LOONGARCH_LBT_SCR3:
848 vcpu->arch.lbt.scr3 = v;
849 break;
850 case KVM_REG_LOONGARCH_LBT_EFLAGS:
851 vcpu->arch.lbt.eflags = v;
852 break;
853 case KVM_REG_LOONGARCH_LBT_FTOP:
854 vcpu->arch.fpu.ftop = v;
855 break;
856 default:
857 ret = -EINVAL;
858 break;
859 }
860 break;
861 case KVM_REG_LOONGARCH_KVM:
862 switch (reg->id) {
863 case KVM_REG_LOONGARCH_COUNTER:
864 /*
865 * gftoffset is relative with board, not vcpu
866 * only set for the first time for smp system
867 */
868 if (vcpu->vcpu_id == 0)
869 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
870 break;
871 case KVM_REG_LOONGARCH_VCPU_RESET:
872 vcpu->arch.st.guest_addr = 0;
873 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
874 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
875 break;
876 default:
877 ret = -EINVAL;
878 break;
879 }
880 break;
881 default:
882 ret = -EINVAL;
883 break;
884 }
885
886 return ret;
887 }
888
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)889 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
890 {
891 int ret = 0;
892 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
893
894 switch (size) {
895 case KVM_REG_SIZE_U64:
896 ret = get_user(v, (u64 __user *)(long)reg->addr);
897 if (ret)
898 return ret;
899 break;
900 default:
901 return -EINVAL;
902 }
903
904 return kvm_set_one_reg(vcpu, reg, v);
905 }
906
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)907 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
908 {
909 return -ENOIOCTLCMD;
910 }
911
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)912 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
913 {
914 return -ENOIOCTLCMD;
915 }
916
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)917 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
918 {
919 int i;
920
921 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
922 regs->gpr[i] = vcpu->arch.gprs[i];
923
924 regs->pc = vcpu->arch.pc;
925
926 return 0;
927 }
928
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)929 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
930 {
931 int i;
932
933 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
934 vcpu->arch.gprs[i] = regs->gpr[i];
935
936 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
937 vcpu->arch.pc = regs->pc;
938
939 return 0;
940 }
941
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)942 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
943 struct kvm_enable_cap *cap)
944 {
945 /* FPU is enabled by default, will support LSX/LASX later. */
946 return -EINVAL;
947 }
948
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)949 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
950 struct kvm_device_attr *attr)
951 {
952 switch (attr->attr) {
953 case LOONGARCH_CPUCFG2:
954 case LOONGARCH_CPUCFG6:
955 return 0;
956 case CPUCFG_KVM_FEATURE:
957 return 0;
958 default:
959 return -ENXIO;
960 }
961
962 return -ENXIO;
963 }
964
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)965 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
966 struct kvm_device_attr *attr)
967 {
968 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
969 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
970 return -ENXIO;
971
972 return 0;
973 }
974
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)975 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
976 struct kvm_device_attr *attr)
977 {
978 int ret = -ENXIO;
979
980 switch (attr->group) {
981 case KVM_LOONGARCH_VCPU_CPUCFG:
982 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
983 break;
984 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
985 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
986 break;
987 default:
988 break;
989 }
990
991 return ret;
992 }
993
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)994 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
995 struct kvm_device_attr *attr)
996 {
997 int ret = 0;
998 uint64_t val;
999 uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1000
1001 switch (attr->attr) {
1002 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1003 ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1004 if (ret)
1005 return ret;
1006 break;
1007 case CPUCFG_KVM_FEATURE:
1008 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1009 break;
1010 default:
1011 return -ENXIO;
1012 }
1013
1014 put_user(val, uaddr);
1015
1016 return ret;
1017 }
1018
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1019 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1020 struct kvm_device_attr *attr)
1021 {
1022 u64 gpa;
1023 u64 __user *user = (u64 __user *)attr->addr;
1024
1025 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1026 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1027 return -ENXIO;
1028
1029 gpa = vcpu->arch.st.guest_addr;
1030 if (put_user(gpa, user))
1031 return -EFAULT;
1032
1033 return 0;
1034 }
1035
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1036 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1037 struct kvm_device_attr *attr)
1038 {
1039 int ret = -ENXIO;
1040
1041 switch (attr->group) {
1042 case KVM_LOONGARCH_VCPU_CPUCFG:
1043 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1044 break;
1045 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1046 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1047 break;
1048 default:
1049 break;
1050 }
1051
1052 return ret;
1053 }
1054
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1055 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1056 struct kvm_device_attr *attr)
1057 {
1058 u64 val, valid;
1059 u64 __user *user = (u64 __user *)attr->addr;
1060 struct kvm *kvm = vcpu->kvm;
1061
1062 switch (attr->attr) {
1063 case CPUCFG_KVM_FEATURE:
1064 if (get_user(val, user))
1065 return -EFAULT;
1066
1067 valid = LOONGARCH_PV_FEAT_MASK;
1068 if (val & ~valid)
1069 return -EINVAL;
1070
1071 /* All vCPUs need set the same PV features */
1072 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1073 && ((kvm->arch.pv_features & valid) != val))
1074 return -EINVAL;
1075 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1076 return 0;
1077 default:
1078 return -ENXIO;
1079 }
1080 }
1081
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1082 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1083 struct kvm_device_attr *attr)
1084 {
1085 int idx, ret = 0;
1086 u64 gpa, __user *user = (u64 __user *)attr->addr;
1087 struct kvm *kvm = vcpu->kvm;
1088
1089 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1090 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1091 return -ENXIO;
1092
1093 if (get_user(gpa, user))
1094 return -EFAULT;
1095
1096 if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1097 return -EINVAL;
1098
1099 if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1100 vcpu->arch.st.guest_addr = gpa;
1101 return 0;
1102 }
1103
1104 /* Check the address is in a valid memslot */
1105 idx = srcu_read_lock(&kvm->srcu);
1106 if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1107 ret = -EINVAL;
1108 srcu_read_unlock(&kvm->srcu, idx);
1109
1110 if (!ret) {
1111 vcpu->arch.st.guest_addr = gpa;
1112 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1113 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1114 }
1115
1116 return ret;
1117 }
1118
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1119 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1120 struct kvm_device_attr *attr)
1121 {
1122 int ret = -ENXIO;
1123
1124 switch (attr->group) {
1125 case KVM_LOONGARCH_VCPU_CPUCFG:
1126 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1127 break;
1128 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1129 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1130 break;
1131 default:
1132 break;
1133 }
1134
1135 return ret;
1136 }
1137
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1138 long kvm_arch_vcpu_ioctl(struct file *filp,
1139 unsigned int ioctl, unsigned long arg)
1140 {
1141 long r;
1142 struct kvm_device_attr attr;
1143 void __user *argp = (void __user *)arg;
1144 struct kvm_vcpu *vcpu = filp->private_data;
1145
1146 /*
1147 * Only software CSR should be modified
1148 *
1149 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1150 * should be used. Since CSR registers owns by this vcpu, if switch
1151 * to other vcpus, other vcpus need reload CSR registers.
1152 *
1153 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1154 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1155 * aux_inuse flag and reload CSR registers form software.
1156 */
1157
1158 switch (ioctl) {
1159 case KVM_SET_ONE_REG:
1160 case KVM_GET_ONE_REG: {
1161 struct kvm_one_reg reg;
1162
1163 r = -EFAULT;
1164 if (copy_from_user(®, argp, sizeof(reg)))
1165 break;
1166 if (ioctl == KVM_SET_ONE_REG) {
1167 r = kvm_set_reg(vcpu, ®);
1168 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1169 } else
1170 r = kvm_get_reg(vcpu, ®);
1171 break;
1172 }
1173 case KVM_ENABLE_CAP: {
1174 struct kvm_enable_cap cap;
1175
1176 r = -EFAULT;
1177 if (copy_from_user(&cap, argp, sizeof(cap)))
1178 break;
1179 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1180 break;
1181 }
1182 case KVM_HAS_DEVICE_ATTR: {
1183 r = -EFAULT;
1184 if (copy_from_user(&attr, argp, sizeof(attr)))
1185 break;
1186 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1187 break;
1188 }
1189 case KVM_GET_DEVICE_ATTR: {
1190 r = -EFAULT;
1191 if (copy_from_user(&attr, argp, sizeof(attr)))
1192 break;
1193 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1194 break;
1195 }
1196 case KVM_SET_DEVICE_ATTR: {
1197 r = -EFAULT;
1198 if (copy_from_user(&attr, argp, sizeof(attr)))
1199 break;
1200 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1201 break;
1202 }
1203 default:
1204 r = -ENOIOCTLCMD;
1205 break;
1206 }
1207
1208 return r;
1209 }
1210
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1211 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1212 {
1213 int i = 0;
1214
1215 fpu->fcc = vcpu->arch.fpu.fcc;
1216 fpu->fcsr = vcpu->arch.fpu.fcsr;
1217 for (i = 0; i < NUM_FPU_REGS; i++)
1218 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1219
1220 return 0;
1221 }
1222
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1223 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1224 {
1225 int i = 0;
1226
1227 vcpu->arch.fpu.fcc = fpu->fcc;
1228 vcpu->arch.fpu.fcsr = fpu->fcsr;
1229 for (i = 0; i < NUM_FPU_REGS; i++)
1230 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1231
1232 return 0;
1233 }
1234
1235 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1236 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1237 {
1238 if (!kvm_guest_has_lbt(&vcpu->arch))
1239 return -EINVAL;
1240
1241 preempt_disable();
1242 set_csr_euen(CSR_EUEN_LBTEN);
1243 _restore_lbt(&vcpu->arch.lbt);
1244 vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1245 preempt_enable();
1246
1247 return 0;
1248 }
1249
kvm_lose_lbt(struct kvm_vcpu * vcpu)1250 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1251 {
1252 preempt_disable();
1253 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1254 _save_lbt(&vcpu->arch.lbt);
1255 clear_csr_euen(CSR_EUEN_LBTEN);
1256 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1257 }
1258 preempt_enable();
1259 }
1260
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1261 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1262 {
1263 /*
1264 * If TM is enabled, top register save/restore will
1265 * cause lbt exception, here enable lbt in advance
1266 */
1267 if (fcsr & FPU_CSR_TM)
1268 kvm_own_lbt(vcpu);
1269 }
1270
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1271 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1272 {
1273 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1274 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1275 return;
1276 kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1277 }
1278 }
1279 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1280 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1281 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1282 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1283 #endif
1284
1285 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1286 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1287 {
1288 preempt_disable();
1289
1290 /*
1291 * Enable FPU for guest
1292 * Set FR and FRE according to guest context
1293 */
1294 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1295 set_csr_euen(CSR_EUEN_FPEN);
1296
1297 kvm_restore_fpu(&vcpu->arch.fpu);
1298 vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1299 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1300
1301 preempt_enable();
1302 }
1303
1304 #ifdef CONFIG_CPU_HAS_LSX
1305 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1306 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1307 {
1308 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1309 return -EINVAL;
1310
1311 preempt_disable();
1312
1313 /* Enable LSX for guest */
1314 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1315 set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1316 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1317 case KVM_LARCH_FPU:
1318 /*
1319 * Guest FPU state already loaded,
1320 * only restore upper LSX state
1321 */
1322 _restore_lsx_upper(&vcpu->arch.fpu);
1323 break;
1324 default:
1325 /* Neither FP or LSX already active,
1326 * restore full LSX state
1327 */
1328 kvm_restore_lsx(&vcpu->arch.fpu);
1329 break;
1330 }
1331
1332 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1333 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1334 preempt_enable();
1335
1336 return 0;
1337 }
1338 #endif
1339
1340 #ifdef CONFIG_CPU_HAS_LASX
1341 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1342 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1343 {
1344 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1345 return -EINVAL;
1346
1347 preempt_disable();
1348
1349 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1350 set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1351 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1352 case KVM_LARCH_LSX:
1353 case KVM_LARCH_LSX | KVM_LARCH_FPU:
1354 /* Guest LSX state already loaded, only restore upper LASX state */
1355 _restore_lasx_upper(&vcpu->arch.fpu);
1356 break;
1357 case KVM_LARCH_FPU:
1358 /* Guest FP state already loaded, only restore upper LSX & LASX state */
1359 _restore_lsx_upper(&vcpu->arch.fpu);
1360 _restore_lasx_upper(&vcpu->arch.fpu);
1361 break;
1362 default:
1363 /* Neither FP or LSX already active, restore full LASX state */
1364 kvm_restore_lasx(&vcpu->arch.fpu);
1365 break;
1366 }
1367
1368 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1369 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1370 preempt_enable();
1371
1372 return 0;
1373 }
1374 #endif
1375
1376 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1377 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1378 {
1379 preempt_disable();
1380
1381 kvm_check_fcsr_alive(vcpu);
1382 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1383 kvm_save_lasx(&vcpu->arch.fpu);
1384 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1385 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1386
1387 /* Disable LASX & LSX & FPU */
1388 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1389 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1390 kvm_save_lsx(&vcpu->arch.fpu);
1391 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1392 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1393
1394 /* Disable LSX & FPU */
1395 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1396 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1397 kvm_save_fpu(&vcpu->arch.fpu);
1398 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1399 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1400
1401 /* Disable FPU */
1402 clear_csr_euen(CSR_EUEN_FPEN);
1403 }
1404 kvm_lose_lbt(vcpu);
1405
1406 preempt_enable();
1407 }
1408
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1409 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1410 {
1411 int intr = (int)irq->irq;
1412
1413 if (intr > 0)
1414 kvm_queue_irq(vcpu, intr);
1415 else if (intr < 0)
1416 kvm_dequeue_irq(vcpu, -intr);
1417 else {
1418 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1419 return -EINVAL;
1420 }
1421
1422 kvm_vcpu_kick(vcpu);
1423
1424 return 0;
1425 }
1426
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1427 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1428 unsigned int ioctl, unsigned long arg)
1429 {
1430 void __user *argp = (void __user *)arg;
1431 struct kvm_vcpu *vcpu = filp->private_data;
1432
1433 if (ioctl == KVM_INTERRUPT) {
1434 struct kvm_interrupt irq;
1435
1436 if (copy_from_user(&irq, argp, sizeof(irq)))
1437 return -EFAULT;
1438
1439 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1440
1441 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1442 }
1443
1444 return -ENOIOCTLCMD;
1445 }
1446
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1447 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1448 {
1449 return 0;
1450 }
1451
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1452 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1453 {
1454 unsigned long timer_hz;
1455 struct loongarch_csrs *csr;
1456
1457 vcpu->arch.vpid = 0;
1458 vcpu->arch.flush_gpa = INVALID_GPA;
1459
1460 hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
1461 vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
1462
1463 vcpu->arch.handle_exit = kvm_handle_exit;
1464 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1465 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1466 if (!vcpu->arch.csr)
1467 return -ENOMEM;
1468
1469 /*
1470 * All kvm exceptions share one exception entry, and host <-> guest
1471 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1472 */
1473 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1474
1475 /* Init */
1476 vcpu->arch.last_sched_cpu = -1;
1477
1478 /*
1479 * Initialize guest register state to valid architectural reset state.
1480 */
1481 timer_hz = calc_const_freq();
1482 kvm_init_timer(vcpu, timer_hz);
1483
1484 /* Set Initialize mode for guest */
1485 csr = vcpu->arch.csr;
1486 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1487
1488 /* Set cpuid */
1489 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1490 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1491
1492 /* Start with no pending virtual guest interrupts */
1493 csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1494
1495 return 0;
1496 }
1497
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1498 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1499 {
1500 }
1501
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1502 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1503 {
1504 int cpu;
1505 struct kvm_context *context;
1506
1507 hrtimer_cancel(&vcpu->arch.swtimer);
1508 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1509 kvm_drop_cpuid(vcpu);
1510 kfree(vcpu->arch.csr);
1511
1512 /*
1513 * If the vCPU is freed and reused as another vCPU, we don't want the
1514 * matching pointer wrongly hanging around in last_vcpu.
1515 */
1516 for_each_possible_cpu(cpu) {
1517 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1518 if (context->last_vcpu == vcpu)
1519 context->last_vcpu = NULL;
1520 }
1521 }
1522
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1523 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1524 {
1525 bool migrated;
1526 struct kvm_context *context;
1527 struct loongarch_csrs *csr = vcpu->arch.csr;
1528
1529 /*
1530 * Have we migrated to a different CPU?
1531 * If so, any old guest TLB state may be stale.
1532 */
1533 migrated = (vcpu->arch.last_sched_cpu != cpu);
1534
1535 /*
1536 * Was this the last vCPU to run on this CPU?
1537 * If not, any old guest state from this vCPU will have been clobbered.
1538 */
1539 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1540 if (migrated || (context->last_vcpu != vcpu))
1541 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1542 context->last_vcpu = vcpu;
1543
1544 /* Restore timer state regardless */
1545 kvm_restore_timer(vcpu);
1546
1547 /* Control guest page CCA attribute */
1548 change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
1549 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1550
1551 /* Restore hardware PMU CSRs */
1552 kvm_restore_pmu(vcpu);
1553
1554 /* Don't bother restoring registers multiple times unless necessary */
1555 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1556 return 0;
1557
1558 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1559
1560 /* Restore guest CSR registers */
1561 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1562 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1563 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1564 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1565 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1566 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1567 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1568 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1569 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1570 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1571 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1572 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1573 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1574 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1575 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1576 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1577 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1578 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1579 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1580 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1581 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1582 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1583 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1584 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1585 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1586 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1587 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1588 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1589 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1590 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1591 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1592 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1593 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1594 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1595 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1596 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1597 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1598 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1599 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1600 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1601 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1602 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1603 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1604 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1605
1606 /* Restore Root.GINTC from unused Guest.GINTC register */
1607 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1608
1609 /*
1610 * We should clear linked load bit to break interrupted atomics. This
1611 * prevents a SC on the next vCPU from succeeding by matching a LL on
1612 * the previous vCPU.
1613 */
1614 if (vcpu->kvm->created_vcpus > 1)
1615 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1616
1617 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1618
1619 return 0;
1620 }
1621
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1622 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1623 {
1624 unsigned long flags;
1625
1626 local_irq_save(flags);
1627 /* Restore guest state to registers */
1628 _kvm_vcpu_load(vcpu, cpu);
1629 local_irq_restore(flags);
1630 }
1631
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1632 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1633 {
1634 struct loongarch_csrs *csr = vcpu->arch.csr;
1635
1636 kvm_lose_fpu(vcpu);
1637
1638 /*
1639 * Update CSR state from hardware if software CSR state is stale,
1640 * most CSR registers are kept unchanged during process context
1641 * switch except CSR registers like remaining timer tick value and
1642 * injected interrupt state.
1643 */
1644 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1645 goto out;
1646
1647 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1648 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1649 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1650 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1651 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1652 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1653 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1654 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1655 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1656 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1657 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1658 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1659 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1660 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1661 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1662 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1663 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1664 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1665 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1666 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1667 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1668 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1669 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1670 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1671 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1672 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1673 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1674 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1675 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1676 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1677 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1678 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1679 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1680 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1681 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1682 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1683 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1684 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1685 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1686 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1687 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1688 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1689 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1690 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1691 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1692 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1693 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1694
1695 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1696
1697 out:
1698 kvm_save_timer(vcpu);
1699 /* Save Root.GINTC into unused Guest.GINTC register */
1700 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1701
1702 return 0;
1703 }
1704
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1705 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1706 {
1707 int cpu;
1708 unsigned long flags;
1709
1710 local_irq_save(flags);
1711 cpu = smp_processor_id();
1712 vcpu->arch.last_sched_cpu = cpu;
1713
1714 /* Save guest state in registers */
1715 _kvm_vcpu_put(vcpu, cpu);
1716 local_irq_restore(flags);
1717 }
1718
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1719 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1720 {
1721 int r = -EINTR;
1722 struct kvm_run *run = vcpu->run;
1723
1724 if (vcpu->mmio_needed) {
1725 if (!vcpu->mmio_is_write)
1726 kvm_complete_mmio_read(vcpu, run);
1727 vcpu->mmio_needed = 0;
1728 }
1729
1730 if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
1731 if (!run->iocsr_io.is_write)
1732 kvm_complete_iocsr_read(vcpu, run);
1733 }
1734
1735 if (!vcpu->wants_to_run)
1736 return r;
1737
1738 /* Clear exit_reason */
1739 run->exit_reason = KVM_EXIT_UNKNOWN;
1740 lose_fpu(1);
1741 vcpu_load(vcpu);
1742 kvm_sigset_activate(vcpu);
1743 r = kvm_pre_enter_guest(vcpu);
1744 if (r != RESUME_GUEST)
1745 goto out;
1746
1747 guest_timing_enter_irqoff();
1748 guest_state_enter_irqoff();
1749 trace_kvm_enter(vcpu);
1750 r = kvm_loongarch_ops->enter_guest(run, vcpu);
1751
1752 trace_kvm_out(vcpu);
1753 /*
1754 * Guest exit is already recorded at kvm_handle_exit()
1755 * return value must not be RESUME_GUEST
1756 */
1757 local_irq_enable();
1758 out:
1759 kvm_sigset_deactivate(vcpu);
1760 vcpu_put(vcpu);
1761
1762 return r;
1763 }
1764