Lines Matching +full:csr +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
7 #include <linux/entry-kvm.h>
45 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_save_host_pmu()
46 context->perf_cntr[0] = read_csr_perfcntr0(); in kvm_save_host_pmu()
47 context->perf_cntr[1] = read_csr_perfcntr1(); in kvm_save_host_pmu()
48 context->perf_cntr[2] = read_csr_perfcntr2(); in kvm_save_host_pmu()
49 context->perf_cntr[3] = read_csr_perfcntr3(); in kvm_save_host_pmu()
50 context->perf_ctrl[0] = write_csr_perfctrl0(0); in kvm_save_host_pmu()
51 context->perf_ctrl[1] = write_csr_perfctrl1(0); in kvm_save_host_pmu()
52 context->perf_ctrl[2] = write_csr_perfctrl2(0); in kvm_save_host_pmu()
53 context->perf_ctrl[3] = write_csr_perfctrl3(0); in kvm_save_host_pmu()
60 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu()
61 write_csr_perfcntr0(context->perf_cntr[0]); in kvm_restore_host_pmu()
62 write_csr_perfcntr1(context->perf_cntr[1]); in kvm_restore_host_pmu()
63 write_csr_perfcntr2(context->perf_cntr[2]); in kvm_restore_host_pmu()
64 write_csr_perfcntr3(context->perf_cntr[3]); in kvm_restore_host_pmu()
65 write_csr_perfctrl0(context->perf_ctrl[0]); in kvm_restore_host_pmu()
66 write_csr_perfctrl1(context->perf_ctrl[1]); in kvm_restore_host_pmu()
67 write_csr_perfctrl2(context->perf_ctrl[2]); in kvm_restore_host_pmu()
68 write_csr_perfctrl3(context->perf_ctrl[3]); in kvm_restore_host_pmu()
74 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu() local
76 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); in kvm_save_guest_pmu()
77 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); in kvm_save_guest_pmu()
78 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); in kvm_save_guest_pmu()
79 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); in kvm_save_guest_pmu()
80 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); in kvm_save_guest_pmu()
81 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); in kvm_save_guest_pmu()
82 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); in kvm_save_guest_pmu()
83 kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); in kvm_save_guest_pmu()
88 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu() local
90 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); in kvm_restore_guest_pmu()
91 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); in kvm_restore_guest_pmu()
92 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); in kvm_restore_guest_pmu()
93 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); in kvm_restore_guest_pmu()
94 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); in kvm_restore_guest_pmu()
95 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); in kvm_restore_guest_pmu()
96 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); in kvm_restore_guest_pmu()
97 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); in kvm_restore_guest_pmu()
104 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu()
105 return -EINVAL; in kvm_own_pmu()
109 /* Set PM0-PM(num) to guest */ in kvm_own_pmu()
111 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; in kvm_own_pmu()
122 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_lose_pmu() local
124 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_lose_pmu()
137 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); in kvm_lose_pmu()
138 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); in kvm_lose_pmu()
139 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); in kvm_lose_pmu()
140 val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); in kvm_lose_pmu()
142 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; in kvm_lose_pmu()
149 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_restore_pmu()
157 vcpu->arch.aux_inuse |= KVM_LARCH_PMU; in kvm_check_pmu()
170 ghc = &vcpu->arch.st.cache; in kvm_update_stolen_time()
171 gpa = vcpu->arch.st.guest_addr; in kvm_update_stolen_time()
176 slots = kvm_memslots(vcpu->kvm); in kvm_update_stolen_time()
177 if (slots->generation != ghc->generation || gpa != ghc->gpa) { in kvm_update_stolen_time()
178 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { in kvm_update_stolen_time()
179 ghc->gpa = INVALID_GPA; in kvm_update_stolen_time()
184 st = (struct kvm_steal_time __user *)ghc->hva; in kvm_update_stolen_time()
185 unsafe_get_user(version, &st->version, out); in kvm_update_stolen_time()
190 unsafe_put_user(version, &st->version, out); in kvm_update_stolen_time()
193 unsafe_get_user(steal, &st->steal, out); in kvm_update_stolen_time()
194 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; in kvm_update_stolen_time()
195 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_update_stolen_time()
196 unsafe_put_user(steal, &st->steal, out); in kvm_update_stolen_time()
200 unsafe_put_user(version, &st->version, out); in kvm_update_stolen_time()
202 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in kvm_update_stolen_time()
206 * kvm_check_requests - check and handle pending vCPU requests
217 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ in kvm_check_requests()
232 if (vcpu->arch.flush_gpa != INVALID_GPA) { in kvm_late_check_requests()
233 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); in kvm_late_check_requests()
234 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_late_check_requests()
258 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_enter_guest_check()
260 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_enter_guest_check()
288 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_pre_enter_guest()
294 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(), in kvm_pre_enter_guest()
298 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); in kvm_pre_enter_guest()
299 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */ in kvm_pre_enter_guest()
300 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in kvm_pre_enter_guest()
305 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); in kvm_pre_enter_guest()
307 ret = -EAGAIN; in kvm_pre_enter_guest()
320 unsigned long estat = vcpu->arch.host_estat; in kvm_handle_exit()
324 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_handle_exit()
327 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_handle_exit()
340 ++vcpu->stat.int_exits; in kvm_handle_exit()
360 return !!(vcpu->arch.irq_pending) && in kvm_arch_vcpu_runnable()
361 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_runnable()
383 return vcpu->arch.pc; in kvm_arch_vcpu_get_ip()
393 return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU)); in kvm_arch_pmi_in_guest()
410 return -EINVAL; in kvm_arch_vcpu_ioctl_translate()
431 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
432 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); in kvm_arch_vcpu_dump_regs()
436 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
437 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
452 *mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
462 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
464 vcpu->arch.mp_state = *mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
467 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
476 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) in kvm_arch_vcpu_ioctl_set_guest_debug()
477 return -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
479 if (dbg->control & KVM_GUESTDBG_ENABLE) in kvm_arch_vcpu_ioctl_set_guest_debug()
480 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
482 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
491 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_set_cpuid() local
494 return -EINVAL; in kvm_set_cpuid()
496 map = vcpu->kvm->arch.phyid_map; in kvm_set_cpuid()
497 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID); in kvm_set_cpuid()
499 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
500 if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) { in kvm_set_cpuid()
503 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
511 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
512 return -EINVAL; in kvm_set_cpuid()
515 if (map->phys_map[val].enabled) { in kvm_set_cpuid()
517 if (vcpu == map->phys_map[val].vcpu) { in kvm_set_cpuid()
518 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
526 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
527 return -EINVAL; in kvm_set_cpuid()
530 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val); in kvm_set_cpuid()
531 map->phys_map[val].enabled = true; in kvm_set_cpuid()
532 map->phys_map[val].vcpu = vcpu; in kvm_set_cpuid()
533 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
542 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_drop_cpuid() local
544 map = vcpu->kvm->arch.phyid_map; in kvm_drop_cpuid()
545 cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID); in kvm_drop_cpuid()
550 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
551 if (map->phys_map[cpuid].enabled) { in kvm_drop_cpuid()
552 map->phys_map[cpuid].vcpu = NULL; in kvm_drop_cpuid()
553 map->phys_map[cpuid].enabled = false; in kvm_drop_cpuid()
554 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID); in kvm_drop_cpuid()
556 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
566 map = kvm->arch.phyid_map; in kvm_get_vcpu_by_cpuid()
567 if (!map->phys_map[cpuid].enabled) in kvm_get_vcpu_by_cpuid()
570 return map->phys_map[cpuid].vcpu; in kvm_get_vcpu_by_cpuid()
576 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_getcsr() local
579 return -EINVAL; in _kvm_getcsr()
589 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in _kvm_getcsr()
594 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff; in _kvm_getcsr()
595 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2); in _kvm_getcsr()
600 * Get software CSR state since software state is consistent in _kvm_getcsr()
603 *val = kvm_read_sw_gcsr(csr, id); in _kvm_getcsr()
611 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_setcsr() local
614 return -EINVAL; in _kvm_setcsr()
622 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc); in _kvm_setcsr()
625 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc); in _kvm_setcsr()
630 kvm_write_sw_gcsr(csr, id, val); in _kvm_setcsr()
633 * After modifying the PMU CSR register value of the vcpu. in _kvm_setcsr()
639 val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) | in _kvm_setcsr()
640 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) | in _kvm_setcsr()
641 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) | in _kvm_setcsr()
642 kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); in _kvm_setcsr()
654 return -EINVAL; in _kvm_get_cpucfg_mask()
717 u64 mask = 0; in kvm_check_cpucfg() local
719 ret = _kvm_get_cpucfg_mask(id, &mask); in kvm_check_cpucfg()
723 if (val & ~mask) in kvm_check_cpucfg()
725 return -EINVAL; in kvm_check_cpucfg()
731 return -EINVAL; in kvm_check_cpucfg()
734 return -EINVAL; in kvm_check_cpucfg()
737 return -EINVAL; in kvm_check_cpucfg()
740 return -EINVAL; in kvm_check_cpucfg()
746 return -EINVAL; in kvm_check_cpucfg()
748 return -EINVAL; in kvm_check_cpucfg()
750 return -EINVAL; in kvm_check_cpucfg()
756 * besides the mask check above. in kvm_check_cpucfg()
766 u64 type = reg->id & KVM_REG_LOONGARCH_MASK; in kvm_get_one_reg()
770 id = KVM_GET_IOC_CSR_IDX(reg->id); in kvm_get_one_reg()
774 id = KVM_GET_IOC_CPUCFG_IDX(reg->id); in kvm_get_one_reg()
776 *v = vcpu->arch.cpucfg[id]; in kvm_get_one_reg()
778 ret = -EINVAL; in kvm_get_one_reg()
781 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_get_one_reg()
782 return -ENXIO; in kvm_get_one_reg()
784 switch (reg->id) { in kvm_get_one_reg()
786 *v = vcpu->arch.lbt.scr0; in kvm_get_one_reg()
789 *v = vcpu->arch.lbt.scr1; in kvm_get_one_reg()
792 *v = vcpu->arch.lbt.scr2; in kvm_get_one_reg()
795 *v = vcpu->arch.lbt.scr3; in kvm_get_one_reg()
798 *v = vcpu->arch.lbt.eflags; in kvm_get_one_reg()
801 *v = vcpu->arch.fpu.ftop; in kvm_get_one_reg()
804 ret = -EINVAL; in kvm_get_one_reg()
809 switch (reg->id) { in kvm_get_one_reg()
811 *v = drdtime() + vcpu->kvm->arch.time_offset; in kvm_get_one_reg()
817 ret = -EINVAL; in kvm_get_one_reg()
822 ret = -EINVAL; in kvm_get_one_reg()
832 u64 v, size = reg->id & KVM_REG_SIZE_MASK; in kvm_get_reg()
839 ret = put_user(v, (u64 __user *)(long)reg->addr); in kvm_get_reg()
842 ret = -EINVAL; in kvm_get_reg()
853 u64 type = reg->id & KVM_REG_LOONGARCH_MASK; in kvm_set_one_reg()
857 id = KVM_GET_IOC_CSR_IDX(reg->id); in kvm_set_one_reg()
861 id = KVM_GET_IOC_CPUCFG_IDX(reg->id); in kvm_set_one_reg()
865 vcpu->arch.cpucfg[id] = (u32)v; in kvm_set_one_reg()
867 vcpu->arch.max_pmu_csrid = in kvm_set_one_reg()
868 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; in kvm_set_one_reg()
871 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_set_one_reg()
872 return -ENXIO; in kvm_set_one_reg()
874 switch (reg->id) { in kvm_set_one_reg()
876 vcpu->arch.lbt.scr0 = v; in kvm_set_one_reg()
879 vcpu->arch.lbt.scr1 = v; in kvm_set_one_reg()
882 vcpu->arch.lbt.scr2 = v; in kvm_set_one_reg()
885 vcpu->arch.lbt.scr3 = v; in kvm_set_one_reg()
888 vcpu->arch.lbt.eflags = v; in kvm_set_one_reg()
891 vcpu->arch.fpu.ftop = v; in kvm_set_one_reg()
894 ret = -EINVAL; in kvm_set_one_reg()
899 switch (reg->id) { in kvm_set_one_reg()
905 if (vcpu->vcpu_id == 0) in kvm_set_one_reg()
906 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); in kvm_set_one_reg()
909 vcpu->arch.st.guest_addr = 0; in kvm_set_one_reg()
910 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); in kvm_set_one_reg()
911 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); in kvm_set_one_reg()
915 * Other CSR registers are cleared with function _kvm_setcsr(). in kvm_set_one_reg()
917 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0); in kvm_set_one_reg()
918 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0); in kvm_set_one_reg()
921 ret = -EINVAL; in kvm_set_one_reg()
926 ret = -EINVAL; in kvm_set_one_reg()
936 u64 v, size = reg->id & KVM_REG_SIZE_MASK; in kvm_set_reg()
940 ret = get_user(v, (u64 __user *)(long)reg->addr); in kvm_set_reg()
945 return -EINVAL; in kvm_set_reg()
953 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_get_sregs()
958 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_set_sregs()
965 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
966 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
968 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
977 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
978 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
980 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
981 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
990 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
996 switch (attr->attr) { in kvm_loongarch_cpucfg_has_attr()
1003 return -ENXIO; in kvm_loongarch_cpucfg_has_attr()
1006 return -ENXIO; in kvm_loongarch_cpucfg_has_attr()
1013 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) in kvm_loongarch_pvtime_has_attr()
1014 return -ENXIO; in kvm_loongarch_pvtime_has_attr()
1022 int ret = -ENXIO; in kvm_loongarch_vcpu_has_attr()
1024 switch (attr->group) { in kvm_loongarch_vcpu_has_attr()
1043 uint64_t __user *uaddr = (uint64_t __user *)attr->addr; in kvm_loongarch_cpucfg_get_attr()
1045 switch (attr->attr) { in kvm_loongarch_cpucfg_get_attr()
1046 case 0 ... (KVM_MAX_CPUCFG_REGS - 1): in kvm_loongarch_cpucfg_get_attr()
1047 ret = _kvm_get_cpucfg_mask(attr->attr, &val); in kvm_loongarch_cpucfg_get_attr()
1052 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; in kvm_loongarch_cpucfg_get_attr()
1055 return -ENXIO; in kvm_loongarch_cpucfg_get_attr()
1067 u64 __user *user = (u64 __user *)attr->addr; in kvm_loongarch_pvtime_get_attr()
1070 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) in kvm_loongarch_pvtime_get_attr()
1071 return -ENXIO; in kvm_loongarch_pvtime_get_attr()
1073 gpa = vcpu->arch.st.guest_addr; in kvm_loongarch_pvtime_get_attr()
1075 return -EFAULT; in kvm_loongarch_pvtime_get_attr()
1083 int ret = -ENXIO; in kvm_loongarch_vcpu_get_attr()
1085 switch (attr->group) { in kvm_loongarch_vcpu_get_attr()
1103 u64 __user *user = (u64 __user *)attr->addr; in kvm_loongarch_cpucfg_set_attr()
1104 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_cpucfg_set_attr()
1106 switch (attr->attr) { in kvm_loongarch_cpucfg_set_attr()
1109 return -EFAULT; in kvm_loongarch_cpucfg_set_attr()
1113 return -EINVAL; in kvm_loongarch_cpucfg_set_attr()
1116 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED) in kvm_loongarch_cpucfg_set_attr()
1117 && ((kvm->arch.pv_features & valid) != val)) in kvm_loongarch_cpucfg_set_attr()
1118 return -EINVAL; in kvm_loongarch_cpucfg_set_attr()
1119 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED; in kvm_loongarch_cpucfg_set_attr()
1122 return -ENXIO; in kvm_loongarch_cpucfg_set_attr()
1130 u64 gpa, __user *user = (u64 __user *)attr->addr; in kvm_loongarch_pvtime_set_attr()
1131 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_pvtime_set_attr()
1134 || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) in kvm_loongarch_pvtime_set_attr()
1135 return -ENXIO; in kvm_loongarch_pvtime_set_attr()
1138 return -EFAULT; in kvm_loongarch_pvtime_set_attr()
1141 return -EINVAL; in kvm_loongarch_pvtime_set_attr()
1144 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1149 idx = srcu_read_lock(&kvm->srcu); in kvm_loongarch_pvtime_set_attr()
1151 ret = -EINVAL; in kvm_loongarch_pvtime_set_attr()
1152 srcu_read_unlock(&kvm->srcu, idx); in kvm_loongarch_pvtime_set_attr()
1155 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1156 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_loongarch_pvtime_set_attr()
1166 int ret = -ENXIO; in kvm_loongarch_vcpu_set_attr()
1168 switch (attr->group) { in kvm_loongarch_vcpu_set_attr()
1188 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
1191 * Only software CSR should be modified in kvm_arch_vcpu_ioctl()
1193 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair in kvm_arch_vcpu_ioctl()
1194 * should be used. Since CSR registers owns by this vcpu, if switch in kvm_arch_vcpu_ioctl()
1195 * to other vcpus, other vcpus need reload CSR registers. in kvm_arch_vcpu_ioctl()
1197 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should in kvm_arch_vcpu_ioctl()
1198 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check in kvm_arch_vcpu_ioctl()
1199 * aux_inuse flag and reload CSR registers form software. in kvm_arch_vcpu_ioctl()
1207 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1212 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in kvm_arch_vcpu_ioctl()
1220 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1227 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1234 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1241 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1248 r = -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl()
1259 fpu->fcc = vcpu->arch.fpu.fcc; in kvm_arch_vcpu_ioctl_get_fpu()
1260 fpu->fcsr = vcpu->arch.fpu.fcsr; in kvm_arch_vcpu_ioctl_get_fpu()
1262 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_get_fpu()
1271 vcpu->arch.fpu.fcc = fpu->fcc; in kvm_arch_vcpu_ioctl_set_fpu()
1272 vcpu->arch.fpu.fcsr = fpu->fcsr; in kvm_arch_vcpu_ioctl_set_fpu()
1274 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_set_fpu()
1282 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_own_lbt()
1283 return -EINVAL; in kvm_own_lbt()
1286 if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) { in kvm_own_lbt()
1288 _restore_lbt(&vcpu->arch.lbt); in kvm_own_lbt()
1289 vcpu->arch.aux_inuse |= KVM_LARCH_LBT; in kvm_own_lbt()
1299 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) { in kvm_lose_lbt()
1300 _save_lbt(&vcpu->arch.lbt); in kvm_lose_lbt()
1302 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT; in kvm_lose_lbt()
1319 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_check_fcsr_alive()
1320 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) in kvm_check_fcsr_alive()
1340 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_fpu()
1343 kvm_restore_fpu(&vcpu->arch.fpu); in kvm_own_fpu()
1344 vcpu->arch.aux_inuse |= KVM_LARCH_FPU; in kvm_own_fpu()
1354 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) in kvm_own_lsx()
1355 return -EINVAL; in kvm_own_lsx()
1360 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lsx()
1362 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_own_lsx()
1368 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lsx()
1374 kvm_restore_lsx(&vcpu->arch.fpu); in kvm_own_lsx()
1379 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lsx()
1390 …if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcp… in kvm_own_lasx()
1391 return -EINVAL; in kvm_own_lasx()
1395 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lasx()
1397 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { in kvm_own_lasx()
1401 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1405 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1406 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1410 kvm_restore_lasx(&vcpu->arch.fpu); in kvm_own_lasx()
1415 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lasx()
1428 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { in kvm_lose_fpu()
1429 kvm_save_lasx(&vcpu->arch.fpu); in kvm_lose_fpu()
1430 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); in kvm_lose_fpu()
1435 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { in kvm_lose_fpu()
1436 kvm_save_lsx(&vcpu->arch.fpu); in kvm_lose_fpu()
1437 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); in kvm_lose_fpu()
1442 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_lose_fpu()
1443 kvm_save_fpu(&vcpu->arch.fpu); in kvm_lose_fpu()
1444 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; in kvm_lose_fpu()
1457 int intr = (int)irq->irq; in kvm_vcpu_ioctl_interrupt()
1462 kvm_dequeue_irq(vcpu, -intr); in kvm_vcpu_ioctl_interrupt()
1464 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq); in kvm_vcpu_ioctl_interrupt()
1465 return -EINVAL; in kvm_vcpu_ioctl_interrupt()
1477 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
1483 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
1485 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); in kvm_arch_vcpu_async_ioctl()
1490 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
1501 struct loongarch_csrs *csr; in kvm_arch_vcpu_create() local
1503 vcpu->arch.vpid = 0; in kvm_arch_vcpu_create()
1504 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_arch_vcpu_create()
1506 hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC, in kvm_arch_vcpu_create()
1510 vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd); in kvm_arch_vcpu_create()
1516 vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd; in kvm_arch_vcpu_create()
1518 vcpu->arch.handle_exit = kvm_handle_exit; in kvm_arch_vcpu_create()
1519 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; in kvm_arch_vcpu_create()
1520 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); in kvm_arch_vcpu_create()
1521 if (!vcpu->arch.csr) in kvm_arch_vcpu_create()
1522 return -ENOMEM; in kvm_arch_vcpu_create()
1525 * All kvm exceptions share one exception entry, and host <-> guest in kvm_arch_vcpu_create()
1528 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); in kvm_arch_vcpu_create()
1531 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
1534 spin_lock_init(&vcpu->arch.ipi_state.lock); in kvm_arch_vcpu_create()
1543 csr = vcpu->arch.csr; in kvm_arch_vcpu_create()
1544 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA); in kvm_arch_vcpu_create()
1547 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); in kvm_arch_vcpu_create()
1548 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID); in kvm_arch_vcpu_create()
1551 csr->csrs[LOONGARCH_CSR_GINTC] = 0; in kvm_arch_vcpu_create()
1565 hrtimer_cancel(&vcpu->arch.swtimer); in kvm_arch_vcpu_destroy()
1566 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
1568 kfree(vcpu->arch.csr); in kvm_arch_vcpu_destroy()
1575 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in kvm_arch_vcpu_destroy()
1576 if (context->last_vcpu == vcpu) in kvm_arch_vcpu_destroy()
1577 context->last_vcpu = NULL; in kvm_arch_vcpu_destroy()
1585 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_load() local
1591 migrated = (vcpu->arch.last_sched_cpu != cpu); in _kvm_vcpu_load()
1597 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in _kvm_vcpu_load()
1598 if (migrated || (context->last_vcpu != vcpu)) in _kvm_vcpu_load()
1599 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1600 context->last_vcpu = vcpu; in _kvm_vcpu_load()
1610 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) in _kvm_vcpu_load()
1613 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); in _kvm_vcpu_load()
1615 /* Restore guest CSR registers */ in _kvm_vcpu_load()
1616 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD); in _kvm_vcpu_load()
1617 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD); in _kvm_vcpu_load()
1618 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN); in _kvm_vcpu_load()
1619 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC); in _kvm_vcpu_load()
1620 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG); in _kvm_vcpu_load()
1621 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA); in _kvm_vcpu_load()
1622 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV); in _kvm_vcpu_load()
1623 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI); in _kvm_vcpu_load()
1624 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); in _kvm_vcpu_load()
1625 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); in _kvm_vcpu_load()
1626 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); in _kvm_vcpu_load()
1627 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); in _kvm_vcpu_load()
1628 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); in _kvm_vcpu_load()
1629 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID); in _kvm_vcpu_load()
1630 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL); in _kvm_vcpu_load()
1631 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH); in _kvm_vcpu_load()
1632 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); in _kvm_vcpu_load()
1633 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); in _kvm_vcpu_load()
1634 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); in _kvm_vcpu_load()
1635 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); in _kvm_vcpu_load()
1636 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID); in _kvm_vcpu_load()
1637 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0); in _kvm_vcpu_load()
1638 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1); in _kvm_vcpu_load()
1639 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2); in _kvm_vcpu_load()
1640 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3); in _kvm_vcpu_load()
1641 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4); in _kvm_vcpu_load()
1642 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5); in _kvm_vcpu_load()
1643 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6); in _kvm_vcpu_load()
1644 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7); in _kvm_vcpu_load()
1645 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID); in _kvm_vcpu_load()
1646 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC); in _kvm_vcpu_load()
1647 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); in _kvm_vcpu_load()
1648 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); in _kvm_vcpu_load()
1649 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); in _kvm_vcpu_load()
1650 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); in _kvm_vcpu_load()
1651 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); in _kvm_vcpu_load()
1652 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); in _kvm_vcpu_load()
1653 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); in _kvm_vcpu_load()
1654 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); in _kvm_vcpu_load()
1655 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); in _kvm_vcpu_load()
1656 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); in _kvm_vcpu_load()
1657 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); in _kvm_vcpu_load()
1658 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); in _kvm_vcpu_load()
1659 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); in _kvm_vcpu_load()
1662 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]); in _kvm_vcpu_load()
1669 if (vcpu->kvm->created_vcpus > 1) in _kvm_vcpu_load()
1672 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1689 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_put() local
1694 * Update CSR state from hardware if software CSR state is stale, in _kvm_vcpu_put()
1695 * most CSR registers are kept unchanged during process context in _kvm_vcpu_put()
1696 * switch except CSR registers like remaining timer tick value and in _kvm_vcpu_put()
1699 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) in _kvm_vcpu_put()
1702 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD); in _kvm_vcpu_put()
1703 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD); in _kvm_vcpu_put()
1704 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN); in _kvm_vcpu_put()
1705 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC); in _kvm_vcpu_put()
1706 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG); in _kvm_vcpu_put()
1707 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA); in _kvm_vcpu_put()
1708 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV); in _kvm_vcpu_put()
1709 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI); in _kvm_vcpu_put()
1710 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); in _kvm_vcpu_put()
1711 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); in _kvm_vcpu_put()
1712 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); in _kvm_vcpu_put()
1713 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); in _kvm_vcpu_put()
1714 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); in _kvm_vcpu_put()
1715 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID); in _kvm_vcpu_put()
1716 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL); in _kvm_vcpu_put()
1717 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH); in _kvm_vcpu_put()
1718 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); in _kvm_vcpu_put()
1719 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); in _kvm_vcpu_put()
1720 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); in _kvm_vcpu_put()
1721 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); in _kvm_vcpu_put()
1722 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID); in _kvm_vcpu_put()
1723 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1); in _kvm_vcpu_put()
1724 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2); in _kvm_vcpu_put()
1725 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3); in _kvm_vcpu_put()
1726 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0); in _kvm_vcpu_put()
1727 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1); in _kvm_vcpu_put()
1728 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2); in _kvm_vcpu_put()
1729 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3); in _kvm_vcpu_put()
1730 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4); in _kvm_vcpu_put()
1731 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5); in _kvm_vcpu_put()
1732 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6); in _kvm_vcpu_put()
1733 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7); in _kvm_vcpu_put()
1734 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID); in _kvm_vcpu_put()
1735 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC); in _kvm_vcpu_put()
1736 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); in _kvm_vcpu_put()
1737 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); in _kvm_vcpu_put()
1738 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); in _kvm_vcpu_put()
1739 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); in _kvm_vcpu_put()
1740 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); in _kvm_vcpu_put()
1741 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); in _kvm_vcpu_put()
1742 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); in _kvm_vcpu_put()
1743 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); in _kvm_vcpu_put()
1744 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); in _kvm_vcpu_put()
1745 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); in _kvm_vcpu_put()
1746 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); in _kvm_vcpu_put()
1747 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); in _kvm_vcpu_put()
1748 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); in _kvm_vcpu_put()
1750 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; in _kvm_vcpu_put()
1755 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc(); in _kvm_vcpu_put()
1767 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()
1776 int r = -EINTR; in kvm_arch_vcpu_ioctl_run()
1777 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1779 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1780 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1782 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1785 switch (run->exit_reason) { in kvm_arch_vcpu_ioctl_run()
1790 if (!run->iocsr_io.is_write) in kvm_arch_vcpu_ioctl_run()
1795 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
1799 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
1810 r = kvm_loongarch_ops->enter_guest(run, vcpu); in kvm_arch_vcpu_ioctl_run()