1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <asm/fpu.h>
8 #include <asm/lbt.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12 #include <asm/timex.h>
13
14 #define CREATE_TRACE_POINTS
15 #include "trace.h"
16
17 const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
18 KVM_GENERIC_VCPU_STATS(),
19 STATS_DESC_COUNTER(VCPU, int_exits),
20 STATS_DESC_COUNTER(VCPU, idle_exits),
21 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 STATS_DESC_COUNTER(VCPU, signal_exits),
23 STATS_DESC_COUNTER(VCPU, hypercall_exits),
24 STATS_DESC_COUNTER(VCPU, ipi_read_exits),
25 STATS_DESC_COUNTER(VCPU, ipi_write_exits),
26 STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
27 STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
28 STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
29 STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
30 };
31
32 const struct kvm_stats_header kvm_vcpu_stats_header = {
33 .name_size = KVM_STATS_NAME_SIZE,
34 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
35 .id_offset = sizeof(struct kvm_stats_header),
36 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
37 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
38 sizeof(kvm_vcpu_stats_desc),
39 };
40
kvm_save_host_pmu(struct kvm_vcpu * vcpu)41 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
42 {
43 struct kvm_context *context;
44
45 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
46 context->perf_cntr[0] = read_csr_perfcntr0();
47 context->perf_cntr[1] = read_csr_perfcntr1();
48 context->perf_cntr[2] = read_csr_perfcntr2();
49 context->perf_cntr[3] = read_csr_perfcntr3();
50 context->perf_ctrl[0] = write_csr_perfctrl0(0);
51 context->perf_ctrl[1] = write_csr_perfctrl1(0);
52 context->perf_ctrl[2] = write_csr_perfctrl2(0);
53 context->perf_ctrl[3] = write_csr_perfctrl3(0);
54 }
55
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)56 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
57 {
58 struct kvm_context *context;
59
60 context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
61 write_csr_perfcntr0(context->perf_cntr[0]);
62 write_csr_perfcntr1(context->perf_cntr[1]);
63 write_csr_perfcntr2(context->perf_cntr[2]);
64 write_csr_perfcntr3(context->perf_cntr[3]);
65 write_csr_perfctrl0(context->perf_ctrl[0]);
66 write_csr_perfctrl1(context->perf_ctrl[1]);
67 write_csr_perfctrl2(context->perf_ctrl[2]);
68 write_csr_perfctrl3(context->perf_ctrl[3]);
69 }
70
71
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)72 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
73 {
74 struct loongarch_csrs *csr = vcpu->arch.csr;
75
76 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
77 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
78 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
79 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
80 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
81 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
82 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
83 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
84 }
85
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)86 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
87 {
88 struct loongarch_csrs *csr = vcpu->arch.csr;
89
90 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
91 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
92 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
93 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
94 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
95 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
96 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
97 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
98 }
99
kvm_own_pmu(struct kvm_vcpu * vcpu)100 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
101 {
102 unsigned long val;
103
104 if (!kvm_guest_has_pmu(&vcpu->arch))
105 return -EINVAL;
106
107 kvm_save_host_pmu(vcpu);
108
109 /* Set PM0-PM(num) to guest */
110 val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
111 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
112 write_csr_gcfg(val);
113
114 kvm_restore_guest_pmu(vcpu);
115
116 return 0;
117 }
118
kvm_lose_pmu(struct kvm_vcpu * vcpu)119 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
120 {
121 unsigned long val;
122 struct loongarch_csrs *csr = vcpu->arch.csr;
123
124 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
125 return;
126
127 kvm_save_guest_pmu(vcpu);
128
129 /* Disable pmu access from guest */
130 write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
131
132 /*
133 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
134 * exiting the guest, so that the next time trap into the guest.
135 * We don't need to deal with PMU CSRs contexts.
136 *
137 * Otherwise set the request bit KVM_REQ_PMU to restore guest PMU
138 * before entering guest VM
139 */
140 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
141 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
142 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
143 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
144 if (!(val & KVM_PMU_EVENT_ENABLED))
145 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
146 else
147 kvm_make_request(KVM_REQ_PMU, vcpu);
148
149 kvm_restore_host_pmu(vcpu);
150 }
151
kvm_update_stolen_time(struct kvm_vcpu * vcpu)152 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
153 {
154 u32 version;
155 u64 steal;
156 gpa_t gpa;
157 struct kvm_memslots *slots;
158 struct kvm_steal_time __user *st;
159 struct gfn_to_hva_cache *ghc;
160
161 ghc = &vcpu->arch.st.cache;
162 gpa = vcpu->arch.st.guest_addr;
163 if (!(gpa & KVM_STEAL_PHYS_VALID))
164 return;
165
166 gpa &= KVM_STEAL_PHYS_MASK;
167 slots = kvm_memslots(vcpu->kvm);
168 if (slots->generation != ghc->generation || gpa != ghc->gpa) {
169 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
170 ghc->gpa = INVALID_GPA;
171 return;
172 }
173 }
174
175 st = (struct kvm_steal_time __user *)ghc->hva;
176 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
177 unsafe_put_user(0, &st->preempted, out);
178 vcpu->arch.st.preempted = 0;
179 }
180
181 unsafe_get_user(version, &st->version, out);
182 if (version & 1)
183 version += 1; /* first time write, random junk */
184
185 version += 1;
186 unsafe_put_user(version, &st->version, out);
187 smp_wmb();
188
189 unsafe_get_user(steal, &st->steal, out);
190 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
191 vcpu->arch.st.last_steal = current->sched_info.run_delay;
192 unsafe_put_user(steal, &st->steal, out);
193
194 smp_wmb();
195 version += 1;
196 unsafe_put_user(version, &st->version, out);
197 out:
198 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
199 }
200
201 /*
202 * kvm_check_requests - check and handle pending vCPU requests
203 *
204 * Return: RESUME_GUEST if we should enter the guest
205 * RESUME_HOST if we should exit to userspace
206 */
kvm_check_requests(struct kvm_vcpu * vcpu)207 static int kvm_check_requests(struct kvm_vcpu *vcpu)
208 {
209 if (!kvm_request_pending(vcpu))
210 return RESUME_GUEST;
211
212 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
213 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
214
215 if (kvm_dirty_ring_check_request(vcpu))
216 return RESUME_HOST;
217
218 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
219 kvm_update_stolen_time(vcpu);
220
221 return RESUME_GUEST;
222 }
223
kvm_late_check_requests(struct kvm_vcpu * vcpu)224 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
225 {
226 lockdep_assert_irqs_disabled();
227
228 if (!kvm_request_pending(vcpu))
229 return;
230
231 if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
232 kvm_own_pmu(vcpu);
233 vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
234 }
235
236 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
237 if (vcpu->arch.flush_gpa != INVALID_GPA) {
238 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
239 vcpu->arch.flush_gpa = INVALID_GPA;
240 }
241
242 if (kvm_check_request(KVM_REQ_AUX_LOAD, vcpu)) {
243 switch (vcpu->arch.aux_ldtype) {
244 case KVM_LARCH_FPU:
245 kvm_own_fpu(vcpu);
246 break;
247 case KVM_LARCH_LSX:
248 kvm_own_lsx(vcpu);
249 break;
250 case KVM_LARCH_LASX:
251 kvm_own_lasx(vcpu);
252 break;
253 case KVM_LARCH_LBT:
254 kvm_own_lbt(vcpu);
255 break;
256 default:
257 break;
258 }
259
260 vcpu->arch.aux_ldtype = 0;
261 }
262 }
263
264 /*
265 * Check and handle pending signal and vCPU requests etc
266 * Run with irq enabled and preempt enabled
267 *
268 * Return: RESUME_GUEST if we should enter the guest
269 * RESUME_HOST if we should exit to userspace
270 * < 0 if we should exit to userspace, where the return value
271 * indicates an error
272 */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)273 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
274 {
275 int idx, ret;
276
277 /*
278 * Check conditions before entering the guest
279 */
280 ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
281 if (ret < 0)
282 return ret;
283
284 idx = srcu_read_lock(&vcpu->kvm->srcu);
285 ret = kvm_check_requests(vcpu);
286 srcu_read_unlock(&vcpu->kvm->srcu, idx);
287
288 return ret;
289 }
290
291 /*
292 * Called with irq enabled
293 *
294 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
295 * Others if we should exit to userspace
296 */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)297 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
298 {
299 int ret;
300
301 do {
302 ret = kvm_enter_guest_check(vcpu);
303 if (ret != RESUME_GUEST)
304 break;
305
306 /*
307 * Handle vcpu timer, interrupts, check requests and
308 * check vmid before vcpu enter guest
309 */
310 local_irq_disable();
311 kvm_deliver_intr(vcpu);
312 kvm_deliver_exception(vcpu);
313 /* Make sure the vcpu mode has been written */
314 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
315 kvm_check_vpid(vcpu);
316
317 /*
318 * Called after function kvm_check_vpid()
319 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
320 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
321 */
322 kvm_late_check_requests(vcpu);
323 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
324 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
325
326 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
327 if (vcpu->arch.aux_inuse & KVM_LARCH_PMU) {
328 kvm_lose_pmu(vcpu);
329 kvm_make_request(KVM_REQ_PMU, vcpu);
330 }
331 /* make sure the vcpu mode has been written */
332 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
333 local_irq_enable();
334 ret = -EAGAIN;
335 }
336 } while (ret != RESUME_GUEST);
337
338 return ret;
339 }
340
341 /*
342 * Return 1 for resume guest and "<= 0" for resume host.
343 */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)344 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
345 {
346 int ret = RESUME_GUEST;
347 unsigned long estat = vcpu->arch.host_estat;
348 u32 intr = estat & CSR_ESTAT_IS;
349 u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
350
351 vcpu->mode = OUTSIDE_GUEST_MODE;
352
353 /* Set a default exit reason */
354 run->exit_reason = KVM_EXIT_UNKNOWN;
355
356 kvm_lose_pmu(vcpu);
357
358 guest_timing_exit_irqoff();
359 guest_state_exit_irqoff();
360 local_irq_enable();
361
362 trace_kvm_exit(vcpu, ecode);
363 if (ecode) {
364 ret = kvm_handle_fault(vcpu, ecode);
365 } else {
366 WARN(!intr, "vm exiting with suspicious irq\n");
367 ++vcpu->stat.int_exits;
368 }
369
370 if (ret == RESUME_GUEST)
371 ret = kvm_pre_enter_guest(vcpu);
372
373 if (ret != RESUME_GUEST) {
374 local_irq_disable();
375 return ret;
376 }
377
378 guest_timing_enter_irqoff();
379 guest_state_enter_irqoff();
380 trace_kvm_reenter(vcpu);
381
382 return RESUME_GUEST;
383 }
384
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)385 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
386 {
387 return !!(vcpu->arch.irq_pending) &&
388 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
389 }
390
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)391 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
392 {
393 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
394 }
395
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)396 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
397 {
398 unsigned long val;
399
400 preempt_disable();
401 val = gcsr_read(LOONGARCH_CSR_CRMD);
402 preempt_enable();
403
404 return (val & CSR_CRMD_PLV) == PLV_KERN;
405 }
406
407 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)408 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
409 {
410 return vcpu->arch.pc;
411 }
412
413 /*
414 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
415 * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM,
416 * any event that arrives while a vCPU is loaded is considered to be "in guest".
417 */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)418 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
419 {
420 return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
421 }
422 #endif
423
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)424 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
425 {
426 return false;
427 }
428
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)429 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
430 {
431 return VM_FAULT_SIGBUS;
432 }
433
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)434 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
435 struct kvm_translation *tr)
436 {
437 return -EINVAL;
438 }
439
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)440 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
441 {
442 int ret;
443
444 /* Protect from TOD sync and vcpu_load/put() */
445 preempt_disable();
446 ret = kvm_pending_timer(vcpu) ||
447 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
448 preempt_enable();
449
450 return ret;
451 }
452
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)453 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
454 {
455 int i;
456
457 kvm_debug("vCPU Register Dump:\n");
458 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
459 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
460
461 for (i = 0; i < 32; i += 4) {
462 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
463 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
464 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
465 }
466
467 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
468 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
469 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
470
471 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
472
473 return 0;
474 }
475
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)476 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
477 struct kvm_mp_state *mp_state)
478 {
479 *mp_state = vcpu->arch.mp_state;
480
481 return 0;
482 }
483
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)484 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
485 struct kvm_mp_state *mp_state)
486 {
487 int ret = 0;
488
489 switch (mp_state->mp_state) {
490 case KVM_MP_STATE_RUNNABLE:
491 vcpu->arch.mp_state = *mp_state;
492 break;
493 default:
494 ret = -EINVAL;
495 }
496
497 return ret;
498 }
499
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)500 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
501 struct kvm_guest_debug *dbg)
502 {
503 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
504 return -EINVAL;
505
506 if (dbg->control & KVM_GUESTDBG_ENABLE)
507 vcpu->guest_debug = dbg->control;
508 else
509 vcpu->guest_debug = 0;
510
511 return 0;
512 }
513
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)514 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
515 {
516 int cpuid;
517 struct kvm_phyid_map *map;
518 struct loongarch_csrs *csr = vcpu->arch.csr;
519
520 if (val >= KVM_MAX_PHYID)
521 return -EINVAL;
522
523 map = vcpu->kvm->arch.phyid_map;
524 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
525
526 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
527 if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
528 /* Discard duplicated CPUID set operation */
529 if (cpuid == val) {
530 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
531 return 0;
532 }
533
534 /*
535 * CPUID is already set before
536 * Forbid changing to a different CPUID at runtime
537 */
538 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
539 return -EINVAL;
540 }
541
542 if (map->phys_map[val].enabled) {
543 /* Discard duplicated CPUID set operation */
544 if (vcpu == map->phys_map[val].vcpu) {
545 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
546 return 0;
547 }
548
549 /*
550 * New CPUID is already set with other vcpu
551 * Forbid sharing the same CPUID between different vcpus
552 */
553 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
554 return -EINVAL;
555 }
556
557 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
558 map->phys_map[val].enabled = true;
559 map->phys_map[val].vcpu = vcpu;
560 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
561
562 return 0;
563 }
564
kvm_drop_cpuid(struct kvm_vcpu * vcpu)565 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
566 {
567 int cpuid;
568 struct kvm_phyid_map *map;
569 struct loongarch_csrs *csr = vcpu->arch.csr;
570
571 map = vcpu->kvm->arch.phyid_map;
572 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
573
574 if (cpuid >= KVM_MAX_PHYID)
575 return;
576
577 spin_lock(&vcpu->kvm->arch.phyid_map_lock);
578 if (map->phys_map[cpuid].enabled) {
579 map->phys_map[cpuid].vcpu = NULL;
580 map->phys_map[cpuid].enabled = false;
581 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
582 }
583 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
584 }
585
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)586 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
587 {
588 struct kvm_phyid_map *map;
589
590 if (cpuid < 0)
591 return NULL;
592
593 if (cpuid >= KVM_MAX_PHYID)
594 return NULL;
595
596 map = kvm->arch.phyid_map;
597 if (!map->phys_map[cpuid].enabled)
598 return NULL;
599
600 return map->phys_map[cpuid].vcpu;
601 }
602
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)603 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
604 {
605 unsigned long gintc;
606 struct loongarch_csrs *csr = vcpu->arch.csr;
607
608 if (get_gcsr_flag(id) & INVALID_GCSR)
609 return -EINVAL;
610
611 if (id == LOONGARCH_CSR_ESTAT) {
612 preempt_disable();
613 vcpu_load(vcpu);
614 /*
615 * Sync pending interrupts into ESTAT so that interrupt
616 * remains during VM migration stage
617 */
618 kvm_deliver_intr(vcpu);
619 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
620 vcpu_put(vcpu);
621 preempt_enable();
622
623 /* ESTAT IP0~IP7 get from GINTC */
624 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
625 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
626 return 0;
627 }
628
629 /*
630 * Get software CSR state since software state is consistent
631 * with hardware for synchronous ioctl
632 */
633 *val = kvm_read_sw_gcsr(csr, id);
634
635 return 0;
636 }
637
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)638 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
639 {
640 int ret = 0, gintc;
641 struct loongarch_csrs *csr = vcpu->arch.csr;
642
643 if (get_gcsr_flag(id) & INVALID_GCSR)
644 return -EINVAL;
645
646 if (id == LOONGARCH_CSR_CPUID)
647 return kvm_set_cpuid(vcpu, val);
648
649 if (id == LOONGARCH_CSR_ESTAT) {
650 /* ESTAT IP0~IP7 inject through GINTC */
651 gintc = (val >> 2) & 0xff;
652 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
653
654 gintc = val & ~(0xffUL << 2);
655 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
656
657 return ret;
658 }
659
660 kvm_write_sw_gcsr(csr, id, val);
661
662 /*
663 * After modifying the PMU CSR register value of the vcpu.
664 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
665 */
666 if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
667 unsigned long val;
668
669 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
670 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
671 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
672 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
673
674 if (val & KVM_PMU_EVENT_ENABLED)
675 kvm_make_request(KVM_REQ_PMU, vcpu);
676 }
677
678 return ret;
679 }
680
_kvm_get_cpucfg_mask(int id,u64 * v)681 static int _kvm_get_cpucfg_mask(int id, u64 *v)
682 {
683 unsigned int config;
684
685 if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
686 return -EINVAL;
687
688 switch (id) {
689 case LOONGARCH_CPUCFG0:
690 *v = GENMASK(31, 0);
691 return 0;
692 case LOONGARCH_CPUCFG1:
693 *v = GENMASK(26, 0);
694 return 0;
695 case LOONGARCH_CPUCFG2:
696 /* CPUCFG2 features unconditionally supported by KVM */
697 *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
698 CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
699 CPUCFG2_LSPW | CPUCFG2_LAM;
700 /*
701 * For the ISA extensions listed below, if one is supported
702 * by the host, then it is also supported by KVM.
703 */
704 if (cpu_has_lsx)
705 *v |= CPUCFG2_LSX;
706 if (cpu_has_lasx)
707 *v |= CPUCFG2_LASX;
708 if (cpu_has_lbt_x86)
709 *v |= CPUCFG2_X86BT;
710 if (cpu_has_lbt_arm)
711 *v |= CPUCFG2_ARMBT;
712 if (cpu_has_lbt_mips)
713 *v |= CPUCFG2_MIPSBT;
714 if (cpu_has_ptw)
715 *v |= CPUCFG2_PTW;
716
717 config = read_cpucfg(LOONGARCH_CPUCFG2);
718 *v |= config & (CPUCFG2_FRECIPE | CPUCFG2_DIV32 | CPUCFG2_LAM_BH);
719 *v |= config & (CPUCFG2_LAMCAS | CPUCFG2_LLACQ_SCREL | CPUCFG2_SCQ);
720 return 0;
721 case LOONGARCH_CPUCFG3:
722 *v = GENMASK(23, 0);
723
724 /* VM does not support memory order and SFB setting */
725 config = read_cpucfg(LOONGARCH_CPUCFG3);
726 *v &= config & ~(CPUCFG3_SFB);
727 *v &= config & ~(CPUCFG3_ALDORDER_CAP | CPUCFG3_ASTORDER_CAP | CPUCFG3_SLDORDER_CAP);
728 return 0;
729 case LOONGARCH_CPUCFG4:
730 case LOONGARCH_CPUCFG5:
731 *v = GENMASK(31, 0);
732 return 0;
733 case LOONGARCH_CPUCFG6:
734 if (cpu_has_pmp)
735 *v = GENMASK(14, 0);
736 else
737 *v = 0;
738 return 0;
739 case LOONGARCH_CPUCFG16:
740 *v = GENMASK(16, 0);
741 return 0;
742 case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
743 *v = GENMASK(30, 0);
744 return 0;
745 default:
746 /*
747 * CPUCFG bits should be zero if reserved by HW or not
748 * supported by KVM.
749 */
750 *v = 0;
751 return 0;
752 }
753 }
754
kvm_check_cpucfg(int id,u64 val)755 static int kvm_check_cpucfg(int id, u64 val)
756 {
757 int ret;
758 u32 host;
759 u64 mask = 0;
760
761 ret = _kvm_get_cpucfg_mask(id, &mask);
762 if (ret)
763 return ret;
764
765 if (val & ~mask)
766 /* Unsupported features and/or the higher 32 bits should not be set */
767 return -EINVAL;
768
769 switch (id) {
770 case LOONGARCH_CPUCFG1:
771 if ((val & CPUCFG1_MSGINT) && !cpu_has_msgint)
772 return -EINVAL;
773 return 0;
774 case LOONGARCH_CPUCFG2:
775 if (!(val & CPUCFG2_LLFTP))
776 /* Guests must have a constant timer */
777 return -EINVAL;
778 if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
779 /* Single and double float point must both be set when FP is enabled */
780 return -EINVAL;
781 if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
782 /* LSX architecturally implies FP but val does not satisfy that */
783 return -EINVAL;
784 if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
785 /* LASX architecturally implies LSX and FP but val does not satisfy that */
786 return -EINVAL;
787 return 0;
788 case LOONGARCH_CPUCFG3:
789 host = read_cpucfg(LOONGARCH_CPUCFG3);
790 if ((val & CPUCFG3_RVAMAX) > (host & CPUCFG3_RVAMAX))
791 return -EINVAL;
792 if ((val & CPUCFG3_SPW_LVL) > (host & CPUCFG3_SPW_LVL))
793 return -EINVAL;
794 return 0;
795 case LOONGARCH_CPUCFG6:
796 if (val & CPUCFG6_PMP) {
797 host = read_cpucfg(LOONGARCH_CPUCFG6);
798 if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
799 return -EINVAL;
800 if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
801 return -EINVAL;
802 if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
803 return -EINVAL;
804 }
805 return 0;
806 default:
807 /*
808 * Values for the other CPUCFG IDs are not being further validated
809 * besides the mask check above.
810 */
811 return 0;
812 }
813 }
814
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)815 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
816 const struct kvm_one_reg *reg, u64 *v)
817 {
818 int id, ret = 0;
819 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
820
821 switch (type) {
822 case KVM_REG_LOONGARCH_CSR:
823 id = KVM_GET_IOC_CSR_IDX(reg->id);
824 ret = _kvm_getcsr(vcpu, id, v);
825 break;
826 case KVM_REG_LOONGARCH_CPUCFG:
827 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
828 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
829 *v = vcpu->arch.cpucfg[id];
830 else
831 ret = -EINVAL;
832 break;
833 case KVM_REG_LOONGARCH_LBT:
834 if (!kvm_guest_has_lbt(&vcpu->arch))
835 return -ENXIO;
836
837 switch (reg->id) {
838 case KVM_REG_LOONGARCH_LBT_SCR0:
839 *v = vcpu->arch.lbt.scr0;
840 break;
841 case KVM_REG_LOONGARCH_LBT_SCR1:
842 *v = vcpu->arch.lbt.scr1;
843 break;
844 case KVM_REG_LOONGARCH_LBT_SCR2:
845 *v = vcpu->arch.lbt.scr2;
846 break;
847 case KVM_REG_LOONGARCH_LBT_SCR3:
848 *v = vcpu->arch.lbt.scr3;
849 break;
850 case KVM_REG_LOONGARCH_LBT_EFLAGS:
851 *v = vcpu->arch.lbt.eflags;
852 break;
853 case KVM_REG_LOONGARCH_LBT_FTOP:
854 *v = vcpu->arch.fpu.ftop;
855 break;
856 default:
857 ret = -EINVAL;
858 break;
859 }
860 break;
861 case KVM_REG_LOONGARCH_KVM:
862 switch (reg->id) {
863 case KVM_REG_LOONGARCH_COUNTER:
864 *v = get_cycles() + vcpu->kvm->arch.time_offset;
865 break;
866 case KVM_REG_LOONGARCH_DEBUG_INST:
867 *v = INSN_HVCL | KVM_HCALL_SWDBG;
868 break;
869 default:
870 ret = -EINVAL;
871 break;
872 }
873 break;
874 default:
875 ret = -EINVAL;
876 break;
877 }
878
879 return ret;
880 }
881
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)882 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
883 {
884 int ret = 0;
885 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
886
887 switch (size) {
888 case KVM_REG_SIZE_U64:
889 ret = kvm_get_one_reg(vcpu, reg, &v);
890 if (ret)
891 return ret;
892 ret = put_user(v, (u64 __user *)(long)reg->addr);
893 break;
894 default:
895 ret = -EINVAL;
896 break;
897 }
898
899 return ret;
900 }
901
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)902 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
903 const struct kvm_one_reg *reg, u64 v)
904 {
905 int id, ret = 0;
906 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
907
908 switch (type) {
909 case KVM_REG_LOONGARCH_CSR:
910 id = KVM_GET_IOC_CSR_IDX(reg->id);
911 ret = _kvm_setcsr(vcpu, id, v);
912 break;
913 case KVM_REG_LOONGARCH_CPUCFG:
914 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
915 ret = kvm_check_cpucfg(id, v);
916 if (ret)
917 break;
918 vcpu->arch.cpucfg[id] = (u32)v;
919 if (id == LOONGARCH_CPUCFG6)
920 vcpu->arch.max_pmu_csrid =
921 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
922 break;
923 case KVM_REG_LOONGARCH_LBT:
924 if (!kvm_guest_has_lbt(&vcpu->arch))
925 return -ENXIO;
926
927 switch (reg->id) {
928 case KVM_REG_LOONGARCH_LBT_SCR0:
929 vcpu->arch.lbt.scr0 = v;
930 break;
931 case KVM_REG_LOONGARCH_LBT_SCR1:
932 vcpu->arch.lbt.scr1 = v;
933 break;
934 case KVM_REG_LOONGARCH_LBT_SCR2:
935 vcpu->arch.lbt.scr2 = v;
936 break;
937 case KVM_REG_LOONGARCH_LBT_SCR3:
938 vcpu->arch.lbt.scr3 = v;
939 break;
940 case KVM_REG_LOONGARCH_LBT_EFLAGS:
941 vcpu->arch.lbt.eflags = v;
942 break;
943 case KVM_REG_LOONGARCH_LBT_FTOP:
944 vcpu->arch.fpu.ftop = v;
945 break;
946 default:
947 ret = -EINVAL;
948 break;
949 }
950 break;
951 case KVM_REG_LOONGARCH_KVM:
952 switch (reg->id) {
953 case KVM_REG_LOONGARCH_COUNTER:
954 /*
955 * gftoffset is relative with board, not vcpu
956 * only set for the first time for smp system
957 */
958 if (vcpu->vcpu_id == 0)
959 vcpu->kvm->arch.time_offset = (signed long)(v - get_cycles());
960 break;
961 case KVM_REG_LOONGARCH_VCPU_RESET:
962 vcpu->arch.st.guest_addr = 0;
963 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
964 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
965
966 /*
967 * When vCPU reset, clear the ESTAT and GINTC registers
968 * Other CSR registers are cleared with function _kvm_setcsr().
969 */
970 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
971 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
972 break;
973 default:
974 ret = -EINVAL;
975 break;
976 }
977 break;
978 default:
979 ret = -EINVAL;
980 break;
981 }
982
983 return ret;
984 }
985
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)986 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
987 {
988 int ret = 0;
989 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
990
991 switch (size) {
992 case KVM_REG_SIZE_U64:
993 ret = get_user(v, (u64 __user *)(long)reg->addr);
994 if (ret)
995 return ret;
996 break;
997 default:
998 return -EINVAL;
999 }
1000
1001 return kvm_set_one_reg(vcpu, reg, v);
1002 }
1003
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1004 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1005 {
1006 return -ENOIOCTLCMD;
1007 }
1008
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1009 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1010 {
1011 return -ENOIOCTLCMD;
1012 }
1013
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1014 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1015 {
1016 int i;
1017
1018 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1019 regs->gpr[i] = vcpu->arch.gprs[i];
1020
1021 regs->pc = vcpu->arch.pc;
1022
1023 return 0;
1024 }
1025
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1026 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1027 {
1028 int i;
1029
1030 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1031 vcpu->arch.gprs[i] = regs->gpr[i];
1032
1033 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1034 vcpu->arch.pc = regs->pc;
1035
1036 return 0;
1037 }
1038
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)1039 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1040 struct kvm_enable_cap *cap)
1041 {
1042 /* FPU is enabled by default, will support LSX/LASX later. */
1043 return -EINVAL;
1044 }
1045
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1046 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
1047 struct kvm_device_attr *attr)
1048 {
1049 switch (attr->attr) {
1050 case LOONGARCH_CPUCFG2:
1051 case LOONGARCH_CPUCFG6:
1052 return 0;
1053 case CPUCFG_KVM_FEATURE:
1054 return 0;
1055 default:
1056 return -ENXIO;
1057 }
1058
1059 return -ENXIO;
1060 }
1061
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1062 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
1063 struct kvm_device_attr *attr)
1064 {
1065 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1066 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1067 return -ENXIO;
1068
1069 return 0;
1070 }
1071
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1072 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1073 struct kvm_device_attr *attr)
1074 {
1075 int ret = -ENXIO;
1076
1077 switch (attr->group) {
1078 case KVM_LOONGARCH_VCPU_CPUCFG:
1079 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1080 break;
1081 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1082 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1083 break;
1084 default:
1085 break;
1086 }
1087
1088 return ret;
1089 }
1090
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1091 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1092 struct kvm_device_attr *attr)
1093 {
1094 int ret = 0;
1095 uint64_t val;
1096 uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1097
1098 switch (attr->attr) {
1099 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1100 ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1101 if (ret)
1102 return ret;
1103 break;
1104 case CPUCFG_KVM_FEATURE:
1105 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1106 break;
1107 default:
1108 return -ENXIO;
1109 }
1110
1111 put_user(val, uaddr);
1112
1113 return ret;
1114 }
1115
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1116 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1117 struct kvm_device_attr *attr)
1118 {
1119 u64 gpa;
1120 u64 __user *user = (u64 __user *)attr->addr;
1121
1122 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1123 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1124 return -ENXIO;
1125
1126 gpa = vcpu->arch.st.guest_addr;
1127 if (put_user(gpa, user))
1128 return -EFAULT;
1129
1130 return 0;
1131 }
1132
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1133 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1134 struct kvm_device_attr *attr)
1135 {
1136 int ret = -ENXIO;
1137
1138 switch (attr->group) {
1139 case KVM_LOONGARCH_VCPU_CPUCFG:
1140 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1141 break;
1142 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1143 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1144 break;
1145 default:
1146 break;
1147 }
1148
1149 return ret;
1150 }
1151
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1152 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1153 struct kvm_device_attr *attr)
1154 {
1155 u64 val, valid;
1156 u64 __user *user = (u64 __user *)attr->addr;
1157 struct kvm *kvm = vcpu->kvm;
1158
1159 switch (attr->attr) {
1160 case CPUCFG_KVM_FEATURE:
1161 if (get_user(val, user))
1162 return -EFAULT;
1163
1164 valid = LOONGARCH_PV_FEAT_MASK;
1165 if (val & ~valid)
1166 return -EINVAL;
1167
1168 /* All vCPUs need set the same PV features */
1169 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1170 && ((kvm->arch.pv_features & valid) != val))
1171 return -EINVAL;
1172 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1173 return 0;
1174 default:
1175 return -ENXIO;
1176 }
1177 }
1178
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1179 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1180 struct kvm_device_attr *attr)
1181 {
1182 int idx, ret = 0;
1183 u64 gpa, __user *user = (u64 __user *)attr->addr;
1184 struct kvm *kvm = vcpu->kvm;
1185
1186 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1187 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1188 return -ENXIO;
1189
1190 if (get_user(gpa, user))
1191 return -EFAULT;
1192
1193 if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1194 return -EINVAL;
1195
1196 if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1197 vcpu->arch.st.guest_addr = gpa;
1198 return 0;
1199 }
1200
1201 /* Check the address is in a valid memslot */
1202 idx = srcu_read_lock(&kvm->srcu);
1203 if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1204 ret = -EINVAL;
1205 srcu_read_unlock(&kvm->srcu, idx);
1206
1207 if (!ret) {
1208 vcpu->arch.st.guest_addr = gpa;
1209 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1210 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1211 }
1212
1213 return ret;
1214 }
1215
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1216 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1217 struct kvm_device_attr *attr)
1218 {
1219 int ret = -ENXIO;
1220
1221 switch (attr->group) {
1222 case KVM_LOONGARCH_VCPU_CPUCFG:
1223 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1224 break;
1225 case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1226 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1227 break;
1228 default:
1229 break;
1230 }
1231
1232 return ret;
1233 }
1234
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1235 long kvm_arch_vcpu_ioctl(struct file *filp,
1236 unsigned int ioctl, unsigned long arg)
1237 {
1238 long r;
1239 struct kvm_device_attr attr;
1240 void __user *argp = (void __user *)arg;
1241 struct kvm_vcpu *vcpu = filp->private_data;
1242
1243 /*
1244 * Only software CSR should be modified
1245 *
1246 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1247 * should be used. Since CSR registers owns by this vcpu, if switch
1248 * to other vcpus, other vcpus need reload CSR registers.
1249 *
1250 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1251 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1252 * aux_inuse flag and reload CSR registers form software.
1253 */
1254
1255 switch (ioctl) {
1256 case KVM_SET_ONE_REG:
1257 case KVM_GET_ONE_REG: {
1258 struct kvm_one_reg reg;
1259
1260 r = -EFAULT;
1261 if (copy_from_user(®, argp, sizeof(reg)))
1262 break;
1263 if (ioctl == KVM_SET_ONE_REG) {
1264 r = kvm_set_reg(vcpu, ®);
1265 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1266 } else
1267 r = kvm_get_reg(vcpu, ®);
1268 break;
1269 }
1270 case KVM_ENABLE_CAP: {
1271 struct kvm_enable_cap cap;
1272
1273 r = -EFAULT;
1274 if (copy_from_user(&cap, argp, sizeof(cap)))
1275 break;
1276 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1277 break;
1278 }
1279 case KVM_HAS_DEVICE_ATTR: {
1280 r = -EFAULT;
1281 if (copy_from_user(&attr, argp, sizeof(attr)))
1282 break;
1283 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1284 break;
1285 }
1286 case KVM_GET_DEVICE_ATTR: {
1287 r = -EFAULT;
1288 if (copy_from_user(&attr, argp, sizeof(attr)))
1289 break;
1290 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1291 break;
1292 }
1293 case KVM_SET_DEVICE_ATTR: {
1294 r = -EFAULT;
1295 if (copy_from_user(&attr, argp, sizeof(attr)))
1296 break;
1297 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1298 break;
1299 }
1300 default:
1301 r = -ENOIOCTLCMD;
1302 break;
1303 }
1304
1305 return r;
1306 }
1307
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1308 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1309 {
1310 int i = 0;
1311
1312 fpu->fcc = vcpu->arch.fpu.fcc;
1313 fpu->fcsr = vcpu->arch.fpu.fcsr;
1314 for (i = 0; i < NUM_FPU_REGS; i++)
1315 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1316
1317 return 0;
1318 }
1319
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1320 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1321 {
1322 int i = 0;
1323
1324 vcpu->arch.fpu.fcc = fpu->fcc;
1325 vcpu->arch.fpu.fcsr = fpu->fcsr;
1326 for (i = 0; i < NUM_FPU_REGS; i++)
1327 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1328
1329 return 0;
1330 }
1331
1332 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1333 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1334 {
1335 if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1336 set_csr_euen(CSR_EUEN_LBTEN);
1337 _restore_lbt(&vcpu->arch.lbt);
1338 vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1339 }
1340
1341 return 0;
1342 }
1343
kvm_lose_lbt(struct kvm_vcpu * vcpu)1344 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1345 {
1346 preempt_disable();
1347 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1348 _save_lbt(&vcpu->arch.lbt);
1349 clear_csr_euen(CSR_EUEN_LBTEN);
1350 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1351 }
1352 preempt_enable();
1353 }
1354
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1355 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1356 {
1357 /*
1358 * If TM is enabled, top register save/restore will
1359 * cause lbt exception, here enable lbt in advance
1360 */
1361 if (fcsr & FPU_CSR_TM)
1362 kvm_own_lbt(vcpu);
1363 }
1364
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1365 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1366 {
1367 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1368 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1369 return;
1370 kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1371 }
1372 }
1373 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1374 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1375 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1376 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1377 #endif
1378
1379 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1380 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1381 {
1382 /*
1383 * Enable FPU for guest
1384 * Set FR and FRE according to guest context
1385 */
1386 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1387 set_csr_euen(CSR_EUEN_FPEN);
1388
1389 kvm_restore_fpu(&vcpu->arch.fpu);
1390 vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1391 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1392 }
1393
1394 #ifdef CONFIG_CPU_HAS_LSX
1395 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1396 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1397 {
1398 /* Enable LSX for guest */
1399 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1400 set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1401 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1402 case KVM_LARCH_FPU:
1403 /*
1404 * Guest FPU state already loaded,
1405 * only restore upper LSX state
1406 */
1407 _restore_lsx_upper(&vcpu->arch.fpu);
1408 break;
1409 default:
1410 /* Neither FP or LSX already active,
1411 * restore full LSX state
1412 */
1413 kvm_restore_lsx(&vcpu->arch.fpu);
1414 break;
1415 }
1416
1417 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1418 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1419
1420 return 0;
1421 }
1422 #endif
1423
1424 #ifdef CONFIG_CPU_HAS_LASX
1425 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1426 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1427 {
1428 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1429 set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1430 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1431 case KVM_LARCH_LSX:
1432 case KVM_LARCH_LSX | KVM_LARCH_FPU:
1433 /* Guest LSX state already loaded, only restore upper LASX state */
1434 _restore_lasx_upper(&vcpu->arch.fpu);
1435 break;
1436 case KVM_LARCH_FPU:
1437 /* Guest FP state already loaded, only restore upper LSX & LASX state */
1438 _restore_lsx_upper(&vcpu->arch.fpu);
1439 _restore_lasx_upper(&vcpu->arch.fpu);
1440 break;
1441 default:
1442 /* Neither FP or LSX already active, restore full LASX state */
1443 kvm_restore_lasx(&vcpu->arch.fpu);
1444 break;
1445 }
1446
1447 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1448 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1449
1450 return 0;
1451 }
1452 #endif
1453
1454 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1455 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1456 {
1457 preempt_disable();
1458
1459 kvm_check_fcsr_alive(vcpu);
1460 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1461 kvm_save_lasx(&vcpu->arch.fpu);
1462 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1463 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1464
1465 /* Disable LASX & LSX & FPU */
1466 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1467 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1468 kvm_save_lsx(&vcpu->arch.fpu);
1469 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1470 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1471
1472 /* Disable LSX & FPU */
1473 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1474 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1475 kvm_save_fpu(&vcpu->arch.fpu);
1476 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1477 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1478
1479 /* Disable FPU */
1480 clear_csr_euen(CSR_EUEN_FPEN);
1481 }
1482 kvm_lose_lbt(vcpu);
1483
1484 preempt_enable();
1485 }
1486
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1487 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1488 {
1489 int intr = (int)irq->irq;
1490
1491 if (intr > 0)
1492 kvm_queue_irq(vcpu, intr);
1493 else if (intr < 0)
1494 kvm_dequeue_irq(vcpu, -intr);
1495 else {
1496 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1497 return -EINVAL;
1498 }
1499
1500 kvm_vcpu_kick(vcpu);
1501
1502 return 0;
1503 }
1504
kvm_arch_vcpu_unlocked_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1505 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
1506 unsigned long arg)
1507 {
1508 void __user *argp = (void __user *)arg;
1509 struct kvm_vcpu *vcpu = filp->private_data;
1510
1511 if (ioctl == KVM_INTERRUPT) {
1512 struct kvm_interrupt irq;
1513
1514 if (copy_from_user(&irq, argp, sizeof(irq)))
1515 return -EFAULT;
1516
1517 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1518
1519 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1520 }
1521
1522 return -ENOIOCTLCMD;
1523 }
1524
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1525 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1526 {
1527 return 0;
1528 }
1529
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1530 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1531 {
1532 unsigned long timer_hz;
1533 struct loongarch_csrs *csr;
1534
1535 vcpu->arch.vpid = 0;
1536 vcpu->arch.flush_gpa = INVALID_GPA;
1537
1538 hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1539 HRTIMER_MODE_ABS_PINNED_HARD);
1540
1541 /* Get GPA (=HVA) of PGD for kvm hypervisor */
1542 vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1543
1544 /*
1545 * Get PGD for primary mmu, virtual address is used since there is
1546 * memory access after loading from CSR_PGD in tlb exception fast path.
1547 */
1548 vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1549
1550 vcpu->arch.handle_exit = kvm_handle_exit;
1551 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1552 vcpu->arch.csr = kzalloc_obj(struct loongarch_csrs);
1553 if (!vcpu->arch.csr)
1554 return -ENOMEM;
1555
1556 /*
1557 * All kvm exceptions share one exception entry, and host <-> guest
1558 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1559 */
1560 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1561
1562 /* Init */
1563 vcpu->arch.last_sched_cpu = -1;
1564
1565 /* Init ipi_state lock */
1566 spin_lock_init(&vcpu->arch.ipi_state.lock);
1567
1568 /*
1569 * Initialize guest register state to valid architectural reset state.
1570 */
1571 timer_hz = calc_const_freq();
1572 kvm_init_timer(vcpu, timer_hz);
1573
1574 /* Set Initialize mode for guest */
1575 csr = vcpu->arch.csr;
1576 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1577
1578 /* Set cpuid */
1579 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1580 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1581
1582 /* Start with no pending virtual guest interrupts */
1583 csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1584
1585 return 0;
1586 }
1587
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1588 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1589 {
1590 }
1591
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1592 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1593 {
1594 int cpu;
1595 struct kvm_context *context;
1596
1597 hrtimer_cancel(&vcpu->arch.swtimer);
1598 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1599 kvm_drop_cpuid(vcpu);
1600 kfree(vcpu->arch.csr);
1601
1602 /*
1603 * If the vCPU is freed and reused as another vCPU, we don't want the
1604 * matching pointer wrongly hanging around in last_vcpu.
1605 */
1606 for_each_possible_cpu(cpu) {
1607 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1608 if (context->last_vcpu == vcpu)
1609 context->last_vcpu = NULL;
1610 }
1611 }
1612
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1613 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1614 {
1615 bool migrated;
1616 struct kvm_context *context;
1617 struct loongarch_csrs *csr = vcpu->arch.csr;
1618
1619 /*
1620 * Have we migrated to a different CPU?
1621 * If so, any old guest TLB state may be stale.
1622 */
1623 migrated = (vcpu->arch.last_sched_cpu != cpu);
1624
1625 /*
1626 * Was this the last vCPU to run on this CPU?
1627 * If not, any old guest state from this vCPU will have been clobbered.
1628 */
1629 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1630 if (migrated || (context->last_vcpu != vcpu)) {
1631 context->last_vcpu = vcpu;
1632 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1633 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
1634 }
1635
1636 /* Restore timer state regardless */
1637 kvm_restore_timer(vcpu);
1638 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1639
1640 /* Don't bother restoring registers multiple times unless necessary */
1641 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1642 return 0;
1643
1644 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1645
1646 /* Restore guest CSR registers */
1647 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1648 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1649 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1650 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1651 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1652 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1653 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1654 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1655 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1656 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1657 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1658 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1659 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1660 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1661 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1662 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1663 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1664 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1665 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1666 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1667 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1668 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1669 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1670 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1671 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1672 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1673 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1674 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1675 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1676 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1677 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1678 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1679 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1680 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1681 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1682 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1683 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1684 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1685 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1686 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1687 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1688 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1689 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1690 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1691
1692 if (kvm_guest_has_msgint(&vcpu->arch)) {
1693 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_IPR);
1694 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
1695 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
1696 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
1697 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
1698 }
1699
1700 /* Restore Root.GINTC from unused Guest.GINTC register */
1701 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1702 write_csr_gstat(csr->csrs[LOONGARCH_CSR_GSTAT]);
1703
1704 /*
1705 * We should clear linked load bit to break interrupted atomics. This
1706 * prevents a SC on the next vCPU from succeeding by matching a LL on
1707 * the previous vCPU.
1708 */
1709 if (vcpu->kvm->created_vcpus > 1)
1710 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1711
1712 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1713
1714 return 0;
1715 }
1716
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1717 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1718 {
1719 unsigned long flags;
1720
1721 local_irq_save(flags);
1722 /* Restore guest state to registers */
1723 _kvm_vcpu_load(vcpu, cpu);
1724 local_irq_restore(flags);
1725 }
1726
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1727 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1728 {
1729 struct loongarch_csrs *csr = vcpu->arch.csr;
1730
1731 kvm_lose_fpu(vcpu);
1732
1733 /*
1734 * Update CSR state from hardware if software CSR state is stale,
1735 * most CSR registers are kept unchanged during process context
1736 * switch except CSR registers like remaining timer tick value and
1737 * injected interrupt state.
1738 */
1739 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1740 goto out;
1741
1742 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1743 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1744 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1745 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1746 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1747 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1748 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1749 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1750 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1751 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1752 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1753 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1754 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1755 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1756 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1757 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1758 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1759 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1760 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1761 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1762 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1763 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1764 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1765 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1766 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1767 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1768 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1769 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1770 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1771 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1772 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1773 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1774 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1775 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1776 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1777 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1778 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1779 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1780 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1781 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1782 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1783 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1784 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1785 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1786 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1787 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1788 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1789
1790 if (kvm_guest_has_msgint(&vcpu->arch)) {
1791 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_IPR);
1792 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
1793 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
1794 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
1795 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
1796 }
1797
1798 csr->csrs[LOONGARCH_CSR_GSTAT] = read_csr_gstat();
1799 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1800
1801 out:
1802 kvm_save_timer(vcpu);
1803 /* Save Root.GINTC into unused Guest.GINTC register */
1804 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1805
1806 return 0;
1807 }
1808
kvm_vcpu_set_pv_preempted(struct kvm_vcpu * vcpu)1809 static void kvm_vcpu_set_pv_preempted(struct kvm_vcpu *vcpu)
1810 {
1811 gpa_t gpa;
1812 struct gfn_to_hva_cache *ghc;
1813 struct kvm_memslots *slots;
1814 struct kvm_steal_time __user *st;
1815
1816 gpa = vcpu->arch.st.guest_addr;
1817 if (!(gpa & KVM_STEAL_PHYS_VALID))
1818 return;
1819
1820 /* vCPU may be preempted for many times */
1821 if (vcpu->arch.st.preempted)
1822 return;
1823
1824 /* This happens on process exit */
1825 if (unlikely(current->mm != vcpu->kvm->mm))
1826 return;
1827
1828 gpa &= KVM_STEAL_PHYS_MASK;
1829 ghc = &vcpu->arch.st.cache;
1830 slots = kvm_memslots(vcpu->kvm);
1831 if (slots->generation != ghc->generation || gpa != ghc->gpa) {
1832 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
1833 ghc->gpa = INVALID_GPA;
1834 return;
1835 }
1836 }
1837
1838 st = (struct kvm_steal_time __user *)ghc->hva;
1839 unsafe_put_user(KVM_VCPU_PREEMPTED, &st->preempted, out);
1840 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
1841 out:
1842 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
1843 }
1844
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1845 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1846 {
1847 int cpu, idx;
1848 unsigned long flags;
1849
1850 if (vcpu->preempted && kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
1851 /*
1852 * Take the srcu lock as memslots will be accessed to check
1853 * the gfn cache generation against the memslots generation.
1854 */
1855 idx = srcu_read_lock(&vcpu->kvm->srcu);
1856 kvm_vcpu_set_pv_preempted(vcpu);
1857 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1858 }
1859
1860 local_irq_save(flags);
1861 cpu = smp_processor_id();
1862 vcpu->arch.last_sched_cpu = cpu;
1863
1864 /* Save guest state in registers */
1865 _kvm_vcpu_put(vcpu, cpu);
1866 local_irq_restore(flags);
1867 }
1868
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1869 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1870 {
1871 int r = -EINTR;
1872 struct kvm_run *run = vcpu->run;
1873
1874 if (vcpu->mmio_needed) {
1875 if (!vcpu->mmio_is_write)
1876 kvm_complete_mmio_read(vcpu, run);
1877 vcpu->mmio_needed = 0;
1878 }
1879
1880 switch (run->exit_reason) {
1881 case KVM_EXIT_HYPERCALL:
1882 kvm_complete_user_service(vcpu, run);
1883 break;
1884 case KVM_EXIT_LOONGARCH_IOCSR:
1885 if (!run->iocsr_io.is_write)
1886 kvm_complete_iocsr_read(vcpu, run);
1887 break;
1888 }
1889
1890 if (!vcpu->wants_to_run)
1891 return r;
1892
1893 /* Clear exit_reason */
1894 run->exit_reason = KVM_EXIT_UNKNOWN;
1895 lose_fpu(1);
1896 vcpu_load(vcpu);
1897 kvm_sigset_activate(vcpu);
1898 r = kvm_pre_enter_guest(vcpu);
1899 if (r != RESUME_GUEST)
1900 goto out;
1901
1902 guest_timing_enter_irqoff();
1903 guest_state_enter_irqoff();
1904 trace_kvm_enter(vcpu);
1905 r = kvm_loongarch_ops->enter_guest(run, vcpu);
1906
1907 trace_kvm_out(vcpu);
1908 /*
1909 * Guest exit is already recorded at kvm_handle_exit()
1910 * return value must not be RESUME_GUEST
1911 */
1912 local_irq_enable();
1913 out:
1914 kvm_sigset_deactivate(vcpu);
1915 vcpu_put(vcpu);
1916
1917 return r;
1918 }
1919