Lines Matching +full:gpa +full:- +full:1

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
7 #include <linux/entry-kvm.h>
39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_save_host_pmu()
40 context->perf_cntr[0] = read_csr_perfcntr0(); in kvm_save_host_pmu()
41 context->perf_cntr[1] = read_csr_perfcntr1(); in kvm_save_host_pmu()
42 context->perf_cntr[2] = read_csr_perfcntr2(); in kvm_save_host_pmu()
43 context->perf_cntr[3] = read_csr_perfcntr3(); in kvm_save_host_pmu()
44 context->perf_ctrl[0] = write_csr_perfctrl0(0); in kvm_save_host_pmu()
45 context->perf_ctrl[1] = write_csr_perfctrl1(0); in kvm_save_host_pmu()
46 context->perf_ctrl[2] = write_csr_perfctrl2(0); in kvm_save_host_pmu()
47 context->perf_ctrl[3] = write_csr_perfctrl3(0); in kvm_save_host_pmu()
54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu()
55 write_csr_perfcntr0(context->perf_cntr[0]); in kvm_restore_host_pmu()
56 write_csr_perfcntr1(context->perf_cntr[1]); in kvm_restore_host_pmu()
57 write_csr_perfcntr2(context->perf_cntr[2]); in kvm_restore_host_pmu()
58 write_csr_perfcntr3(context->perf_cntr[3]); in kvm_restore_host_pmu()
59 write_csr_perfctrl0(context->perf_ctrl[0]); in kvm_restore_host_pmu()
60 write_csr_perfctrl1(context->perf_ctrl[1]); in kvm_restore_host_pmu()
61 write_csr_perfctrl2(context->perf_ctrl[2]); in kvm_restore_host_pmu()
62 write_csr_perfctrl3(context->perf_ctrl[3]); in kvm_restore_host_pmu()
68 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu()
82 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu()
98 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu()
99 return -EINVAL; in kvm_own_pmu()
103 /* Set PM0-PM(num) to guest */ in kvm_own_pmu()
105 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; in kvm_own_pmu()
116 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_lose_pmu()
118 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_lose_pmu()
136 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; in kvm_lose_pmu()
143 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_restore_pmu()
151 vcpu->arch.aux_inuse |= KVM_LARCH_PMU; in kvm_check_pmu()
159 gpa_t gpa; in kvm_update_stolen_time() local
164 ghc = &vcpu->arch.st.cache; in kvm_update_stolen_time()
165 gpa = vcpu->arch.st.guest_addr; in kvm_update_stolen_time()
166 if (!(gpa & KVM_STEAL_PHYS_VALID)) in kvm_update_stolen_time()
169 gpa &= KVM_STEAL_PHYS_MASK; in kvm_update_stolen_time()
170 slots = kvm_memslots(vcpu->kvm); in kvm_update_stolen_time()
171 if (slots->generation != ghc->generation || gpa != ghc->gpa) { in kvm_update_stolen_time()
172 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { in kvm_update_stolen_time()
173 ghc->gpa = INVALID_GPA; in kvm_update_stolen_time()
178 st = (struct kvm_steal_time __user *)ghc->hva; in kvm_update_stolen_time()
179 unsafe_get_user(version, &st->version, out); in kvm_update_stolen_time()
180 if (version & 1) in kvm_update_stolen_time()
181 version += 1; /* first time write, random junk */ in kvm_update_stolen_time()
183 version += 1; in kvm_update_stolen_time()
184 unsafe_put_user(version, &st->version, out); in kvm_update_stolen_time()
187 unsafe_get_user(steal, &st->steal, out); in kvm_update_stolen_time()
188 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; in kvm_update_stolen_time()
189 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_update_stolen_time()
190 unsafe_put_user(steal, &st->steal, out); in kvm_update_stolen_time()
193 version += 1; in kvm_update_stolen_time()
194 unsafe_put_user(version, &st->version, out); in kvm_update_stolen_time()
196 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in kvm_update_stolen_time()
200 * kvm_check_requests - check and handle pending vCPU requests
211 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ in kvm_check_requests()
226 if (vcpu->arch.flush_gpa != INVALID_GPA) { in kvm_late_check_requests()
227 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); in kvm_late_check_requests()
228 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_late_check_requests()
252 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_enter_guest_check()
254 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_enter_guest_check()
282 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_pre_enter_guest()
292 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); in kvm_pre_enter_guest()
294 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in kvm_pre_enter_guest()
298 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); in kvm_pre_enter_guest()
300 ret = -EAGAIN; in kvm_pre_enter_guest()
308 * Return 1 for resume guest and "<= 0" for resume host.
313 unsigned long estat = vcpu->arch.host_estat; in kvm_handle_exit()
317 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_handle_exit()
320 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_handle_exit()
333 ++vcpu->stat.int_exits; in kvm_handle_exit()
353 return !!(vcpu->arch.irq_pending) && in kvm_arch_vcpu_runnable()
354 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_runnable()
375 return -EINVAL; in kvm_arch_vcpu_ioctl_translate()
385 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI); in kvm_cpu_has_pending_timer()
396 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
397 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); in kvm_arch_vcpu_dump_regs()
401 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
402 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
417 *mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
427 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
429 vcpu->arch.mp_state = *mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
432 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
441 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) in kvm_arch_vcpu_ioctl_set_guest_debug()
442 return -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
444 if (dbg->control & KVM_GUESTDBG_ENABLE) in kvm_arch_vcpu_ioctl_set_guest_debug()
445 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
447 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
456 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_set_cpuid()
459 return -EINVAL; in kvm_set_cpuid()
461 map = vcpu->kvm->arch.phyid_map; in kvm_set_cpuid()
464 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
465 if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) { in kvm_set_cpuid()
468 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
476 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
477 return -EINVAL; in kvm_set_cpuid()
480 if (map->phys_map[val].enabled) { in kvm_set_cpuid()
482 if (vcpu == map->phys_map[val].vcpu) { in kvm_set_cpuid()
483 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
491 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
492 return -EINVAL; in kvm_set_cpuid()
496 map->phys_map[val].enabled = true; in kvm_set_cpuid()
497 map->phys_map[val].vcpu = vcpu; in kvm_set_cpuid()
498 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
507 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_drop_cpuid()
509 map = vcpu->kvm->arch.phyid_map; in kvm_drop_cpuid()
515 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
516 if (map->phys_map[cpuid].enabled) { in kvm_drop_cpuid()
517 map->phys_map[cpuid].vcpu = NULL; in kvm_drop_cpuid()
518 map->phys_map[cpuid].enabled = false; in kvm_drop_cpuid()
521 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
531 map = kvm->arch.phyid_map; in kvm_get_vcpu_by_cpuid()
532 if (!map->phys_map[cpuid].enabled) in kvm_get_vcpu_by_cpuid()
535 return map->phys_map[cpuid].vcpu; in kvm_get_vcpu_by_cpuid()
541 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_getcsr()
544 return -EINVAL; in _kvm_getcsr()
554 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in _kvm_getcsr()
576 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_setcsr()
579 return -EINVAL; in _kvm_setcsr()
619 return -EINVAL; in _kvm_get_cpucfg_mask()
690 return -EINVAL; in kvm_check_cpucfg()
696 return -EINVAL; in kvm_check_cpucfg()
699 return -EINVAL; in kvm_check_cpucfg()
702 return -EINVAL; in kvm_check_cpucfg()
705 return -EINVAL; in kvm_check_cpucfg()
711 return -EINVAL; in kvm_check_cpucfg()
713 return -EINVAL; in kvm_check_cpucfg()
715 return -EINVAL; in kvm_check_cpucfg()
731 u64 type = reg->id & KVM_REG_LOONGARCH_MASK; in kvm_get_one_reg()
735 id = KVM_GET_IOC_CSR_IDX(reg->id); in kvm_get_one_reg()
739 id = KVM_GET_IOC_CPUCFG_IDX(reg->id); in kvm_get_one_reg()
741 *v = vcpu->arch.cpucfg[id]; in kvm_get_one_reg()
743 ret = -EINVAL; in kvm_get_one_reg()
746 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_get_one_reg()
747 return -ENXIO; in kvm_get_one_reg()
749 switch (reg->id) { in kvm_get_one_reg()
751 *v = vcpu->arch.lbt.scr0; in kvm_get_one_reg()
754 *v = vcpu->arch.lbt.scr1; in kvm_get_one_reg()
757 *v = vcpu->arch.lbt.scr2; in kvm_get_one_reg()
760 *v = vcpu->arch.lbt.scr3; in kvm_get_one_reg()
763 *v = vcpu->arch.lbt.eflags; in kvm_get_one_reg()
766 *v = vcpu->arch.fpu.ftop; in kvm_get_one_reg()
769 ret = -EINVAL; in kvm_get_one_reg()
774 switch (reg->id) { in kvm_get_one_reg()
776 *v = drdtime() + vcpu->kvm->arch.time_offset; in kvm_get_one_reg()
782 ret = -EINVAL; in kvm_get_one_reg()
787 ret = -EINVAL; in kvm_get_one_reg()
797 u64 v, size = reg->id & KVM_REG_SIZE_MASK; in kvm_get_reg()
804 ret = put_user(v, (u64 __user *)(long)reg->addr); in kvm_get_reg()
807 ret = -EINVAL; in kvm_get_reg()
818 u64 type = reg->id & KVM_REG_LOONGARCH_MASK; in kvm_set_one_reg()
822 id = KVM_GET_IOC_CSR_IDX(reg->id); in kvm_set_one_reg()
826 id = KVM_GET_IOC_CPUCFG_IDX(reg->id); in kvm_set_one_reg()
830 vcpu->arch.cpucfg[id] = (u32)v; in kvm_set_one_reg()
832 vcpu->arch.max_pmu_csrid = in kvm_set_one_reg()
833 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; in kvm_set_one_reg()
836 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_set_one_reg()
837 return -ENXIO; in kvm_set_one_reg()
839 switch (reg->id) { in kvm_set_one_reg()
841 vcpu->arch.lbt.scr0 = v; in kvm_set_one_reg()
844 vcpu->arch.lbt.scr1 = v; in kvm_set_one_reg()
847 vcpu->arch.lbt.scr2 = v; in kvm_set_one_reg()
850 vcpu->arch.lbt.scr3 = v; in kvm_set_one_reg()
853 vcpu->arch.lbt.eflags = v; in kvm_set_one_reg()
856 vcpu->arch.fpu.ftop = v; in kvm_set_one_reg()
859 ret = -EINVAL; in kvm_set_one_reg()
864 switch (reg->id) { in kvm_set_one_reg()
870 if (vcpu->vcpu_id == 0) in kvm_set_one_reg()
871 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); in kvm_set_one_reg()
874 vcpu->arch.st.guest_addr = 0; in kvm_set_one_reg()
875 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); in kvm_set_one_reg()
876 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); in kvm_set_one_reg()
879 ret = -EINVAL; in kvm_set_one_reg()
884 ret = -EINVAL; in kvm_set_one_reg()
894 u64 v, size = reg->id & KVM_REG_SIZE_MASK; in kvm_set_reg()
898 ret = get_user(v, (u64 __user *)(long)reg->addr); in kvm_set_reg()
903 return -EINVAL; in kvm_set_reg()
911 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_get_sregs()
916 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_set_sregs()
923 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
924 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
926 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
935 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
936 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
938 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
939 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
948 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
954 switch (attr->attr) { in kvm_loongarch_cpucfg_has_attr()
961 return -ENXIO; in kvm_loongarch_cpucfg_has_attr()
964 return -ENXIO; in kvm_loongarch_cpucfg_has_attr()
971 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) in kvm_loongarch_pvtime_has_attr()
972 return -ENXIO; in kvm_loongarch_pvtime_has_attr()
980 int ret = -ENXIO; in kvm_loongarch_vcpu_has_attr()
982 switch (attr->group) { in kvm_loongarch_vcpu_has_attr()
1001 uint64_t __user *uaddr = (uint64_t __user *)attr->addr; in kvm_loongarch_cpucfg_get_attr()
1003 switch (attr->attr) { in kvm_loongarch_cpucfg_get_attr()
1004 case 0 ... (KVM_MAX_CPUCFG_REGS - 1): in kvm_loongarch_cpucfg_get_attr()
1005 ret = _kvm_get_cpucfg_mask(attr->attr, &val); in kvm_loongarch_cpucfg_get_attr()
1010 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; in kvm_loongarch_cpucfg_get_attr()
1013 return -ENXIO; in kvm_loongarch_cpucfg_get_attr()
1024 u64 gpa; in kvm_loongarch_pvtime_get_attr() local
1025 u64 __user *user = (u64 __user *)attr->addr; in kvm_loongarch_pvtime_get_attr()
1028 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) in kvm_loongarch_pvtime_get_attr()
1029 return -ENXIO; in kvm_loongarch_pvtime_get_attr()
1031 gpa = vcpu->arch.st.guest_addr; in kvm_loongarch_pvtime_get_attr()
1032 if (put_user(gpa, user)) in kvm_loongarch_pvtime_get_attr()
1033 return -EFAULT; in kvm_loongarch_pvtime_get_attr()
1041 int ret = -ENXIO; in kvm_loongarch_vcpu_get_attr()
1043 switch (attr->group) { in kvm_loongarch_vcpu_get_attr()
1061 u64 __user *user = (u64 __user *)attr->addr; in kvm_loongarch_cpucfg_set_attr()
1062 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_cpucfg_set_attr()
1064 switch (attr->attr) { in kvm_loongarch_cpucfg_set_attr()
1067 return -EFAULT; in kvm_loongarch_cpucfg_set_attr()
1071 return -EINVAL; in kvm_loongarch_cpucfg_set_attr()
1074 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED) in kvm_loongarch_cpucfg_set_attr()
1075 && ((kvm->arch.pv_features & valid) != val)) in kvm_loongarch_cpucfg_set_attr()
1076 return -EINVAL; in kvm_loongarch_cpucfg_set_attr()
1077 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED; in kvm_loongarch_cpucfg_set_attr()
1080 return -ENXIO; in kvm_loongarch_cpucfg_set_attr()
1088 u64 gpa, __user *user = (u64 __user *)attr->addr; in kvm_loongarch_pvtime_set_attr() local
1089 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_pvtime_set_attr()
1092 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) in kvm_loongarch_pvtime_set_attr()
1093 return -ENXIO; in kvm_loongarch_pvtime_set_attr()
1095 if (get_user(gpa, user)) in kvm_loongarch_pvtime_set_attr()
1096 return -EFAULT; in kvm_loongarch_pvtime_set_attr()
1098 if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID)) in kvm_loongarch_pvtime_set_attr()
1099 return -EINVAL; in kvm_loongarch_pvtime_set_attr()
1101 if (!(gpa & KVM_STEAL_PHYS_VALID)) { in kvm_loongarch_pvtime_set_attr()
1102 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1107 idx = srcu_read_lock(&kvm->srcu); in kvm_loongarch_pvtime_set_attr()
1108 if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT))) in kvm_loongarch_pvtime_set_attr()
1109 ret = -EINVAL; in kvm_loongarch_pvtime_set_attr()
1110 srcu_read_unlock(&kvm->srcu, idx); in kvm_loongarch_pvtime_set_attr()
1113 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1114 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_loongarch_pvtime_set_attr()
1124 int ret = -ENXIO; in kvm_loongarch_vcpu_set_attr()
1126 switch (attr->group) { in kvm_loongarch_vcpu_set_attr()
1146 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
1156 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check in kvm_arch_vcpu_ioctl()
1165 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1170 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in kvm_arch_vcpu_ioctl()
1178 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1185 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1192 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1199 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1206 r = -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl()
1217 fpu->fcc = vcpu->arch.fpu.fcc; in kvm_arch_vcpu_ioctl_get_fpu()
1218 fpu->fcsr = vcpu->arch.fpu.fcsr; in kvm_arch_vcpu_ioctl_get_fpu()
1220 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_get_fpu()
1229 vcpu->arch.fpu.fcc = fpu->fcc; in kvm_arch_vcpu_ioctl_set_fpu()
1230 vcpu->arch.fpu.fcsr = fpu->fcsr; in kvm_arch_vcpu_ioctl_set_fpu()
1232 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_set_fpu()
1240 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_own_lbt()
1241 return -EINVAL; in kvm_own_lbt()
1245 _restore_lbt(&vcpu->arch.lbt); in kvm_own_lbt()
1246 vcpu->arch.aux_inuse |= KVM_LARCH_LBT; in kvm_own_lbt()
1255 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) { in kvm_lose_lbt()
1256 _save_lbt(&vcpu->arch.lbt); in kvm_lose_lbt()
1258 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT; in kvm_lose_lbt()
1275 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_check_fcsr_alive()
1276 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) in kvm_check_fcsr_alive()
1296 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_fpu()
1299 kvm_restore_fpu(&vcpu->arch.fpu); in kvm_own_fpu()
1300 vcpu->arch.aux_inuse |= KVM_LARCH_FPU; in kvm_own_fpu()
1310 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) in kvm_own_lsx()
1311 return -EINVAL; in kvm_own_lsx()
1316 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lsx()
1318 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_own_lsx()
1324 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lsx()
1330 kvm_restore_lsx(&vcpu->arch.fpu); in kvm_own_lsx()
1335 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lsx()
1346 …if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcp… in kvm_own_lasx()
1347 return -EINVAL; in kvm_own_lasx()
1351 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lasx()
1353 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { in kvm_own_lasx()
1357 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1361 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1362 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1366 kvm_restore_lasx(&vcpu->arch.fpu); in kvm_own_lasx()
1371 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lasx()
1384 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { in kvm_lose_fpu()
1385 kvm_save_lasx(&vcpu->arch.fpu); in kvm_lose_fpu()
1386 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); in kvm_lose_fpu()
1391 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { in kvm_lose_fpu()
1392 kvm_save_lsx(&vcpu->arch.fpu); in kvm_lose_fpu()
1393 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); in kvm_lose_fpu()
1398 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_lose_fpu()
1399 kvm_save_fpu(&vcpu->arch.fpu); in kvm_lose_fpu()
1400 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; in kvm_lose_fpu()
1413 int intr = (int)irq->irq; in kvm_vcpu_ioctl_interrupt()
1418 kvm_dequeue_irq(vcpu, -intr); in kvm_vcpu_ioctl_interrupt()
1420 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq); in kvm_vcpu_ioctl_interrupt()
1421 return -EINVAL; in kvm_vcpu_ioctl_interrupt()
1433 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
1439 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
1441 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); in kvm_arch_vcpu_async_ioctl()
1446 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
1459 vcpu->arch.vpid = 0; in kvm_arch_vcpu_create()
1460 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_arch_vcpu_create()
1462 hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); in kvm_arch_vcpu_create()
1463 vcpu->arch.swtimer.function = kvm_swtimer_wakeup; in kvm_arch_vcpu_create()
1465 vcpu->arch.handle_exit = kvm_handle_exit; in kvm_arch_vcpu_create()
1466 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; in kvm_arch_vcpu_create()
1467 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); in kvm_arch_vcpu_create()
1468 if (!vcpu->arch.csr) in kvm_arch_vcpu_create()
1469 return -ENOMEM; in kvm_arch_vcpu_create()
1472 * All kvm exceptions share one exception entry, and host <-> guest in kvm_arch_vcpu_create()
1475 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); in kvm_arch_vcpu_create()
1478 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
1481 spin_lock_init(&vcpu->arch.ipi_state.lock); in kvm_arch_vcpu_create()
1490 csr = vcpu->arch.csr; in kvm_arch_vcpu_create()
1494 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); in kvm_arch_vcpu_create()
1498 csr->csrs[LOONGARCH_CSR_GINTC] = 0; in kvm_arch_vcpu_create()
1512 hrtimer_cancel(&vcpu->arch.swtimer); in kvm_arch_vcpu_destroy()
1513 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
1515 kfree(vcpu->arch.csr); in kvm_arch_vcpu_destroy()
1522 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in kvm_arch_vcpu_destroy()
1523 if (context->last_vcpu == vcpu) in kvm_arch_vcpu_destroy()
1524 context->last_vcpu = NULL; in kvm_arch_vcpu_destroy()
1532 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_load()
1538 migrated = (vcpu->arch.last_sched_cpu != cpu); in _kvm_vcpu_load()
1544 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in _kvm_vcpu_load()
1545 if (migrated || (context->last_vcpu != vcpu)) in _kvm_vcpu_load()
1546 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1547 context->last_vcpu = vcpu; in _kvm_vcpu_load()
1557 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) in _kvm_vcpu_load()
1560 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); in _kvm_vcpu_load()
1609 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]); in _kvm_vcpu_load()
1616 if (vcpu->kvm->created_vcpus > 1) in _kvm_vcpu_load()
1619 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1636 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_put()
1646 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) in _kvm_vcpu_put()
1697 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; in _kvm_vcpu_put()
1702 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc(); in _kvm_vcpu_put()
1714 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()
1723 int r = -EINTR; in kvm_arch_vcpu_ioctl_run()
1724 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1726 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1727 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1729 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1732 switch (run->exit_reason) { in kvm_arch_vcpu_ioctl_run()
1737 if (!run->iocsr_io.is_write) in kvm_arch_vcpu_ioctl_run()
1742 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
1746 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
1747 lose_fpu(1); in kvm_arch_vcpu_ioctl_run()
1757 r = kvm_loongarch_ops->enter_guest(run, vcpu); in kvm_arch_vcpu_ioctl_run()