Lines Matching full:vcpu
18 STATS_DESC_COUNTER(VCPU, int_exits),
19 STATS_DESC_COUNTER(VCPU, idle_exits),
20 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21 STATS_DESC_COUNTER(VCPU, signal_exits),
22 STATS_DESC_COUNTER(VCPU, hypercall_exits),
23 STATS_DESC_COUNTER(VCPU, ipi_read_exits),
24 STATS_DESC_COUNTER(VCPU, ipi_write_exits),
25 STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
26 STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
27 STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
28 STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
40 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu) in kvm_save_host_pmu() argument
44 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_save_host_pmu()
55 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu) in kvm_restore_host_pmu() argument
59 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu()
71 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu) in kvm_save_guest_pmu() argument
73 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu()
85 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu) in kvm_restore_guest_pmu() argument
87 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu()
99 static int kvm_own_pmu(struct kvm_vcpu *vcpu) in kvm_own_pmu() argument
103 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu()
106 kvm_save_host_pmu(vcpu); in kvm_own_pmu()
110 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; in kvm_own_pmu()
113 kvm_restore_guest_pmu(vcpu); in kvm_own_pmu()
118 static void kvm_lose_pmu(struct kvm_vcpu *vcpu) in kvm_lose_pmu() argument
121 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_lose_pmu()
123 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_lose_pmu()
126 kvm_save_guest_pmu(vcpu); in kvm_lose_pmu()
141 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; in kvm_lose_pmu()
143 kvm_restore_host_pmu(vcpu); in kvm_lose_pmu()
146 static void kvm_restore_pmu(struct kvm_vcpu *vcpu) in kvm_restore_pmu() argument
148 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_restore_pmu()
149 kvm_make_request(KVM_REQ_PMU, vcpu); in kvm_restore_pmu()
152 static void kvm_check_pmu(struct kvm_vcpu *vcpu) in kvm_check_pmu() argument
154 if (kvm_check_request(KVM_REQ_PMU, vcpu)) { in kvm_check_pmu()
155 kvm_own_pmu(vcpu); in kvm_check_pmu()
156 vcpu->arch.aux_inuse |= KVM_LARCH_PMU; in kvm_check_pmu()
160 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) in kvm_update_stolen_time() argument
169 ghc = &vcpu->arch.st.cache; in kvm_update_stolen_time()
170 gpa = vcpu->arch.st.guest_addr; in kvm_update_stolen_time()
175 slots = kvm_memslots(vcpu->kvm); in kvm_update_stolen_time()
177 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { in kvm_update_stolen_time()
193 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; in kvm_update_stolen_time()
194 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_update_stolen_time()
201 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in kvm_update_stolen_time()
205 * kvm_check_requests - check and handle pending vCPU requests
210 static int kvm_check_requests(struct kvm_vcpu *vcpu) in kvm_check_requests() argument
212 if (!kvm_request_pending(vcpu)) in kvm_check_requests()
215 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in kvm_check_requests()
216 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ in kvm_check_requests()
218 if (kvm_dirty_ring_check_request(vcpu)) in kvm_check_requests()
221 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) in kvm_check_requests()
222 kvm_update_stolen_time(vcpu); in kvm_check_requests()
227 static void kvm_late_check_requests(struct kvm_vcpu *vcpu) in kvm_late_check_requests() argument
230 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu)) in kvm_late_check_requests()
231 if (vcpu->arch.flush_gpa != INVALID_GPA) { in kvm_late_check_requests()
232 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); in kvm_late_check_requests()
233 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_late_check_requests()
238 * Check and handle pending signal and vCPU requests etc
246 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu) in kvm_enter_guest_check() argument
253 ret = kvm_xfer_to_guest_mode_handle_work(vcpu); in kvm_enter_guest_check()
257 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_enter_guest_check()
258 ret = kvm_check_requests(vcpu); in kvm_enter_guest_check()
259 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_enter_guest_check()
270 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) in kvm_pre_enter_guest() argument
275 ret = kvm_enter_guest_check(vcpu); in kvm_pre_enter_guest()
280 * Handle vcpu timer, interrupts, check requests and in kvm_pre_enter_guest()
281 * check vmid before vcpu enter guest in kvm_pre_enter_guest()
284 kvm_deliver_intr(vcpu); in kvm_pre_enter_guest()
285 kvm_deliver_exception(vcpu); in kvm_pre_enter_guest()
286 /* Make sure the vcpu mode has been written */ in kvm_pre_enter_guest()
287 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_pre_enter_guest()
288 kvm_check_vpid(vcpu); in kvm_pre_enter_guest()
289 kvm_check_pmu(vcpu); in kvm_pre_enter_guest()
296 kvm_late_check_requests(vcpu); in kvm_pre_enter_guest()
297 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); in kvm_pre_enter_guest()
299 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in kvm_pre_enter_guest()
301 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { in kvm_pre_enter_guest()
302 kvm_lose_pmu(vcpu); in kvm_pre_enter_guest()
303 /* make sure the vcpu mode has been written */ in kvm_pre_enter_guest()
304 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); in kvm_pre_enter_guest()
316 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) in kvm_handle_exit() argument
319 unsigned long estat = vcpu->arch.host_estat; in kvm_handle_exit()
323 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_handle_exit()
328 kvm_lose_pmu(vcpu); in kvm_handle_exit()
334 trace_kvm_exit(vcpu, ecode); in kvm_handle_exit()
336 ret = kvm_handle_fault(vcpu, ecode); in kvm_handle_exit()
339 ++vcpu->stat.int_exits; in kvm_handle_exit()
343 ret = kvm_pre_enter_guest(vcpu); in kvm_handle_exit()
352 trace_kvm_reenter(vcpu); in kvm_handle_exit()
357 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
359 return !!(vcpu->arch.irq_pending) && in kvm_arch_vcpu_runnable()
360 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_runnable()
363 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
365 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
368 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
380 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_get_ip() argument
382 return vcpu->arch.pc; in kvm_arch_vcpu_get_ip()
388 * any event that arrives while a vCPU is loaded is considered to be "in guest".
390 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) in kvm_arch_pmi_in_guest() argument
392 return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU)); in kvm_arch_pmi_in_guest()
396 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_preempted_in_kernel() argument
401 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
406 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
412 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
418 ret = kvm_pending_timer(vcpu) || in kvm_cpu_has_pending_timer()
425 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_dump_regs() argument
429 kvm_debug("vCPU Register Dump:\n"); in kvm_arch_vcpu_dump_regs()
430 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
431 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); in kvm_arch_vcpu_dump_regs()
435 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
436 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
448 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
451 *mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
456 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
463 vcpu->arch.mp_state = *mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
472 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
479 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
481 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
486 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val) in kvm_set_cpuid() argument
490 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_set_cpuid()
495 map = vcpu->kvm->arch.phyid_map; in kvm_set_cpuid()
498 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
502 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
510 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
516 if (vcpu == map->phys_map[val].vcpu) { in kvm_set_cpuid()
517 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
522 * New CPUID is already set with other vcpu in kvm_set_cpuid()
525 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
531 map->phys_map[val].vcpu = vcpu; in kvm_set_cpuid()
532 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
537 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu) in kvm_drop_cpuid() argument
541 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_drop_cpuid()
543 map = vcpu->kvm->arch.phyid_map; in kvm_drop_cpuid()
549 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
551 map->phys_map[cpuid].vcpu = NULL; in kvm_drop_cpuid()
555 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
569 return map->phys_map[cpuid].vcpu; in kvm_get_vcpu_by_cpuid()
572 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) in _kvm_getcsr() argument
575 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_getcsr()
582 vcpu_load(vcpu); in _kvm_getcsr()
587 kvm_deliver_intr(vcpu); in _kvm_getcsr()
588 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in _kvm_getcsr()
589 vcpu_put(vcpu); in _kvm_getcsr()
607 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) in _kvm_setcsr() argument
610 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_setcsr()
616 return kvm_set_cpuid(vcpu, val); in _kvm_setcsr()
632 * After modifying the PMU CSR register value of the vcpu. in _kvm_setcsr()
644 kvm_make_request(KVM_REQ_PMU, vcpu); in _kvm_setcsr()
763 static int kvm_get_one_reg(struct kvm_vcpu *vcpu, in kvm_get_one_reg() argument
772 ret = _kvm_getcsr(vcpu, id, v); in kvm_get_one_reg()
777 *v = vcpu->arch.cpucfg[id]; in kvm_get_one_reg()
782 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_get_one_reg()
787 *v = vcpu->arch.lbt.scr0; in kvm_get_one_reg()
790 *v = vcpu->arch.lbt.scr1; in kvm_get_one_reg()
793 *v = vcpu->arch.lbt.scr2; in kvm_get_one_reg()
796 *v = vcpu->arch.lbt.scr3; in kvm_get_one_reg()
799 *v = vcpu->arch.lbt.eflags; in kvm_get_one_reg()
802 *v = vcpu->arch.fpu.ftop; in kvm_get_one_reg()
812 *v = drdtime() + vcpu->kvm->arch.time_offset; in kvm_get_one_reg()
830 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in kvm_get_reg() argument
837 ret = kvm_get_one_reg(vcpu, reg, &v); in kvm_get_reg()
850 static int kvm_set_one_reg(struct kvm_vcpu *vcpu, in kvm_set_one_reg() argument
859 ret = _kvm_setcsr(vcpu, id, v); in kvm_set_one_reg()
866 vcpu->arch.cpucfg[id] = (u32)v; in kvm_set_one_reg()
868 vcpu->arch.max_pmu_csrid = in kvm_set_one_reg()
869 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; in kvm_set_one_reg()
872 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_set_one_reg()
877 vcpu->arch.lbt.scr0 = v; in kvm_set_one_reg()
880 vcpu->arch.lbt.scr1 = v; in kvm_set_one_reg()
883 vcpu->arch.lbt.scr2 = v; in kvm_set_one_reg()
886 vcpu->arch.lbt.scr3 = v; in kvm_set_one_reg()
889 vcpu->arch.lbt.eflags = v; in kvm_set_one_reg()
892 vcpu->arch.fpu.ftop = v; in kvm_set_one_reg()
903 * gftoffset is relative with board, not vcpu in kvm_set_one_reg()
906 if (vcpu->vcpu_id == 0) in kvm_set_one_reg()
907 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); in kvm_set_one_reg()
910 vcpu->arch.st.guest_addr = 0; in kvm_set_one_reg()
911 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); in kvm_set_one_reg()
912 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); in kvm_set_one_reg()
915 * When vCPU reset, clear the ESTAT and GINTC registers in kvm_set_one_reg()
918 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0); in kvm_set_one_reg()
919 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0); in kvm_set_one_reg()
934 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in kvm_set_reg() argument
949 return kvm_set_one_reg(vcpu, reg, v); in kvm_set_reg()
952 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_arch_vcpu_ioctl_get_sregs() argument
957 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_arch_vcpu_ioctl_set_sregs() argument
962 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
966 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
967 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
969 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
974 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
978 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
979 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
981 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
982 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
987 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
994 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_cpucfg_has_attr() argument
1010 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_pvtime_has_attr() argument
1013 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) in kvm_loongarch_pvtime_has_attr()
1020 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_has_attr() argument
1027 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr); in kvm_loongarch_vcpu_has_attr()
1030 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr); in kvm_loongarch_vcpu_has_attr()
1039 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_cpucfg_get_attr() argument
1053 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; in kvm_loongarch_cpucfg_get_attr()
1064 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_pvtime_get_attr() argument
1070 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) in kvm_loongarch_pvtime_get_attr()
1074 gpa = vcpu->arch.st.guest_addr; in kvm_loongarch_pvtime_get_attr()
1081 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_get_attr() argument
1088 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr); in kvm_loongarch_vcpu_get_attr()
1091 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr); in kvm_loongarch_vcpu_get_attr()
1100 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_cpucfg_set_attr() argument
1105 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_cpucfg_set_attr()
1127 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_pvtime_set_attr() argument
1132 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_pvtime_set_attr()
1134 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) in kvm_loongarch_pvtime_set_attr()
1145 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1156 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1157 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_loongarch_pvtime_set_attr()
1158 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_loongarch_pvtime_set_attr()
1164 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_set_attr() argument
1171 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr); in kvm_loongarch_vcpu_set_attr()
1174 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr); in kvm_loongarch_vcpu_set_attr()
1189 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
1195 * should be used. Since CSR registers owns by this vcpu, if switch in kvm_arch_vcpu_ioctl()
1199 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check in kvm_arch_vcpu_ioctl()
1212 r = kvm_set_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
1213 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in kvm_arch_vcpu_ioctl()
1215 r = kvm_get_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
1224 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
1231 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
1238 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
1245 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
1256 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
1260 fpu->fcc = vcpu->arch.fpu.fcc; in kvm_arch_vcpu_ioctl_get_fpu()
1261 fpu->fcsr = vcpu->arch.fpu.fcsr; in kvm_arch_vcpu_ioctl_get_fpu()
1263 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_get_fpu()
1268 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
1272 vcpu->arch.fpu.fcc = fpu->fcc; in kvm_arch_vcpu_ioctl_set_fpu()
1273 vcpu->arch.fpu.fcsr = fpu->fcsr; in kvm_arch_vcpu_ioctl_set_fpu()
1275 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_set_fpu()
1281 int kvm_own_lbt(struct kvm_vcpu *vcpu) in kvm_own_lbt() argument
1283 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_own_lbt()
1287 if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) { in kvm_own_lbt()
1289 _restore_lbt(&vcpu->arch.lbt); in kvm_own_lbt()
1290 vcpu->arch.aux_inuse |= KVM_LARCH_LBT; in kvm_own_lbt()
1297 static void kvm_lose_lbt(struct kvm_vcpu *vcpu) in kvm_lose_lbt() argument
1300 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) { in kvm_lose_lbt()
1301 _save_lbt(&vcpu->arch.lbt); in kvm_lose_lbt()
1303 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT; in kvm_lose_lbt()
1308 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) in kvm_check_fcsr() argument
1315 kvm_own_lbt(vcpu); in kvm_check_fcsr()
1318 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) in kvm_check_fcsr_alive() argument
1320 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_check_fcsr_alive()
1321 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) in kvm_check_fcsr_alive()
1323 kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0)); in kvm_check_fcsr_alive()
1327 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { } in kvm_lose_lbt() argument
1328 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { } in kvm_check_fcsr() argument
1329 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { } in kvm_check_fcsr_alive() argument
1333 void kvm_own_fpu(struct kvm_vcpu *vcpu) in kvm_own_fpu() argument
1341 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_fpu()
1344 kvm_restore_fpu(&vcpu->arch.fpu); in kvm_own_fpu()
1345 vcpu->arch.aux_inuse |= KVM_LARCH_FPU; in kvm_own_fpu()
1346 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); in kvm_own_fpu()
1353 int kvm_own_lsx(struct kvm_vcpu *vcpu) in kvm_own_lsx() argument
1355 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) in kvm_own_lsx()
1361 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lsx()
1363 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_own_lsx()
1369 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lsx()
1375 kvm_restore_lsx(&vcpu->arch.fpu); in kvm_own_lsx()
1379 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); in kvm_own_lsx()
1380 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lsx()
1389 int kvm_own_lasx(struct kvm_vcpu *vcpu) in kvm_own_lasx() argument
1391 …if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcp… in kvm_own_lasx()
1396 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lasx()
1398 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { in kvm_own_lasx()
1402 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1406 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1407 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1411 kvm_restore_lasx(&vcpu->arch.fpu); in kvm_own_lasx()
1415 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); in kvm_own_lasx()
1416 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lasx()
1424 void kvm_lose_fpu(struct kvm_vcpu *vcpu) in kvm_lose_fpu() argument
1428 kvm_check_fcsr_alive(vcpu); in kvm_lose_fpu()
1429 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { in kvm_lose_fpu()
1430 kvm_save_lasx(&vcpu->arch.fpu); in kvm_lose_fpu()
1431 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); in kvm_lose_fpu()
1432 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX); in kvm_lose_fpu()
1436 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { in kvm_lose_fpu()
1437 kvm_save_lsx(&vcpu->arch.fpu); in kvm_lose_fpu()
1438 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); in kvm_lose_fpu()
1439 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX); in kvm_lose_fpu()
1443 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_lose_fpu()
1444 kvm_save_fpu(&vcpu->arch.fpu); in kvm_lose_fpu()
1445 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; in kvm_lose_fpu()
1446 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); in kvm_lose_fpu()
1451 kvm_lose_lbt(vcpu); in kvm_lose_fpu()
1456 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) in kvm_vcpu_ioctl_interrupt() argument
1461 kvm_queue_irq(vcpu, intr); in kvm_vcpu_ioctl_interrupt()
1463 kvm_dequeue_irq(vcpu, -intr); in kvm_vcpu_ioctl_interrupt()
1469 kvm_vcpu_kick(vcpu); in kvm_vcpu_ioctl_interrupt()
1478 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
1486 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); in kvm_arch_vcpu_async_ioctl()
1488 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_async_ioctl()
1499 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
1504 vcpu->arch.vpid = 0; in kvm_arch_vcpu_create()
1505 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_arch_vcpu_create()
1507 hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC, in kvm_arch_vcpu_create()
1511 vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd); in kvm_arch_vcpu_create()
1517 vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd; in kvm_arch_vcpu_create()
1519 vcpu->arch.handle_exit = kvm_handle_exit; in kvm_arch_vcpu_create()
1520 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; in kvm_arch_vcpu_create()
1521 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); in kvm_arch_vcpu_create()
1522 if (!vcpu->arch.csr) in kvm_arch_vcpu_create()
1529 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); in kvm_arch_vcpu_create()
1532 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
1535 spin_lock_init(&vcpu->arch.ipi_state.lock); in kvm_arch_vcpu_create()
1541 kvm_init_timer(vcpu, timer_hz); in kvm_arch_vcpu_create()
1544 csr = vcpu->arch.csr; in kvm_arch_vcpu_create()
1548 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); in kvm_arch_vcpu_create()
1557 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
1561 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
1566 hrtimer_cancel(&vcpu->arch.swtimer); in kvm_arch_vcpu_destroy()
1567 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
1568 kvm_drop_cpuid(vcpu); in kvm_arch_vcpu_destroy()
1569 kfree(vcpu->arch.csr); in kvm_arch_vcpu_destroy()
1572 * If the vCPU is freed and reused as another vCPU, we don't want the in kvm_arch_vcpu_destroy()
1576 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in kvm_arch_vcpu_destroy()
1577 if (context->last_vcpu == vcpu) in kvm_arch_vcpu_destroy()
1582 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in _kvm_vcpu_load() argument
1586 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_load()
1592 migrated = (vcpu->arch.last_sched_cpu != cpu); in _kvm_vcpu_load()
1595 * Was this the last vCPU to run on this CPU? in _kvm_vcpu_load()
1596 * If not, any old guest state from this vCPU will have been clobbered. in _kvm_vcpu_load()
1598 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in _kvm_vcpu_load()
1599 if (migrated || (context->last_vcpu != vcpu)) in _kvm_vcpu_load()
1600 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1601 context->last_vcpu = vcpu; in _kvm_vcpu_load()
1604 kvm_restore_timer(vcpu); in _kvm_vcpu_load()
1605 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in _kvm_vcpu_load()
1608 kvm_restore_pmu(vcpu); in _kvm_vcpu_load()
1611 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) in _kvm_vcpu_load()
1614 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); in _kvm_vcpu_load()
1667 * prevents a SC on the next vCPU from succeeding by matching a LL on in _kvm_vcpu_load()
1668 * the previous vCPU. in _kvm_vcpu_load()
1670 if (vcpu->kvm->created_vcpus > 1) in _kvm_vcpu_load()
1673 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1678 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
1684 _kvm_vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
1688 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) in _kvm_vcpu_put() argument
1690 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_put()
1692 kvm_lose_fpu(vcpu); in _kvm_vcpu_put()
1700 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) in _kvm_vcpu_put()
1751 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; in _kvm_vcpu_put()
1754 kvm_save_timer(vcpu); in _kvm_vcpu_put()
1761 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
1768 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()
1771 _kvm_vcpu_put(vcpu, cpu); in kvm_arch_vcpu_put()
1775 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
1778 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1780 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1781 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1782 kvm_complete_mmio_read(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1783 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1788 kvm_complete_user_service(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1792 kvm_complete_iocsr_read(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1796 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
1802 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1803 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
1804 r = kvm_pre_enter_guest(vcpu); in kvm_arch_vcpu_ioctl_run()
1810 trace_kvm_enter(vcpu); in kvm_arch_vcpu_ioctl_run()
1811 r = kvm_loongarch_ops->enter_guest(run, vcpu); in kvm_arch_vcpu_ioctl_run()
1813 trace_kvm_out(vcpu); in kvm_arch_vcpu_ioctl_run()
1820 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1821 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()