Lines Matching full:vcpu

56 	STATS_DESC_COUNTER(VCPU, wait_exits),
57 STATS_DESC_COUNTER(VCPU, cache_exits),
58 STATS_DESC_COUNTER(VCPU, signal_exits),
59 STATS_DESC_COUNTER(VCPU, int_exits),
60 STATS_DESC_COUNTER(VCPU, cop_unusable_exits),
61 STATS_DESC_COUNTER(VCPU, tlbmod_exits),
62 STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits),
63 STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits),
64 STATS_DESC_COUNTER(VCPU, addrerr_st_exits),
65 STATS_DESC_COUNTER(VCPU, addrerr_ld_exits),
66 STATS_DESC_COUNTER(VCPU, syscall_exits),
67 STATS_DESC_COUNTER(VCPU, resvd_inst_exits),
68 STATS_DESC_COUNTER(VCPU, break_inst_exits),
69 STATS_DESC_COUNTER(VCPU, trap_inst_exits),
70 STATS_DESC_COUNTER(VCPU, msa_fpe_exits),
71 STATS_DESC_COUNTER(VCPU, fpe_exits),
72 STATS_DESC_COUNTER(VCPU, msa_disabled_exits),
73 STATS_DESC_COUNTER(VCPU, flush_dcache_exits),
74 STATS_DESC_COUNTER(VCPU, vz_gpsi_exits),
75 STATS_DESC_COUNTER(VCPU, vz_gsfc_exits),
76 STATS_DESC_COUNTER(VCPU, vz_hc_exits),
77 STATS_DESC_COUNTER(VCPU, vz_grr_exits),
78 STATS_DESC_COUNTER(VCPU, vz_gva_exits),
79 STATS_DESC_COUNTER(VCPU, vz_ghfc_exits),
80 STATS_DESC_COUNTER(VCPU, vz_gpa_exits),
81 STATS_DESC_COUNTER(VCPU, vz_resvd_exits),
83 STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits),
113 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
115 return !!(vcpu->arch.pending_exceptions); in kvm_arch_vcpu_runnable()
118 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
123 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
261 struct kvm_vcpu *vcpu; in kvm_mips_comparecount_wakeup() local
263 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); in kvm_mips_comparecount_wakeup()
265 kvm_mips_callbacks->queue_timer_int(vcpu); in kvm_mips_comparecount_wakeup()
267 vcpu->arch.wait = 0; in kvm_mips_comparecount_wakeup()
268 rcuwait_wake_up(&vcpu->wait); in kvm_mips_comparecount_wakeup()
270 return kvm_mips_count_timeout(vcpu); in kvm_mips_comparecount_wakeup()
278 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
285 vcpu->kvm, vcpu->vcpu_id, vcpu); in kvm_arch_vcpu_create()
287 err = kvm_mips_callbacks->vcpu_init(vcpu); in kvm_arch_vcpu_create()
291 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, in kvm_arch_vcpu_create()
293 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; in kvm_arch_vcpu_create()
326 vcpu->arch.guest_ebase = gebase; in kvm_arch_vcpu_create()
353 vcpu->arch.vcpu_run = p; in kvm_arch_vcpu_create()
360 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); in kvm_arch_vcpu_create()
363 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); in kvm_arch_vcpu_create()
370 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
371 vcpu->arch.last_exec_cpu = -1; in kvm_arch_vcpu_create()
374 err = kvm_mips_callbacks->vcpu_setup(vcpu); in kvm_arch_vcpu_create()
383 kvm_mips_callbacks->vcpu_uninit(vcpu); in kvm_arch_vcpu_create()
387 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
389 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_arch_vcpu_destroy()
391 kvm_mips_dump_stats(vcpu); in kvm_arch_vcpu_destroy()
393 kvm_mmu_free_memory_caches(vcpu); in kvm_arch_vcpu_destroy()
394 kfree(vcpu->arch.guest_ebase); in kvm_arch_vcpu_destroy()
396 kvm_mips_callbacks->vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
399 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
406 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
407 * the vCPU is running.
412 static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu) in kvm_mips_vcpu_enter_exit() argument
417 ret = kvm_mips_callbacks->vcpu_run(vcpu); in kvm_mips_vcpu_enter_exit()
423 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
427 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
429 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
431 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
432 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
433 kvm_mips_complete_mmio_load(vcpu); in kvm_arch_vcpu_ioctl_run()
434 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
437 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
444 trace_kvm_enter(vcpu); in kvm_arch_vcpu_ioctl_run()
447 * Make sure the read of VCPU requests in vcpu_run() callback is not in kvm_arch_vcpu_ioctl_run()
448 * reordered ahead of the write to vcpu->mode, or we could miss a TLB in kvm_arch_vcpu_ioctl_run()
449 * flush request while the requester sees the VCPU as outside of guest in kvm_arch_vcpu_ioctl_run()
452 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_arch_vcpu_ioctl_run()
454 r = kvm_mips_vcpu_enter_exit(vcpu); in kvm_arch_vcpu_ioctl_run()
469 trace_kvm_out(vcpu); in kvm_arch_vcpu_ioctl_run()
474 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
476 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
480 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_interrupt() argument
494 dvcpu = vcpu; in kvm_vcpu_ioctl_interrupt()
496 dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu); in kvm_vcpu_ioctl_interrupt()
516 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
522 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
579 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) in kvm_mips_num_regs() argument
584 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { in kvm_mips_num_regs()
590 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) in kvm_mips_num_regs()
592 ret += kvm_mips_callbacks->num_regs(vcpu); in kvm_mips_num_regs()
597 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) in kvm_mips_copy_reg_indices() argument
607 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { in kvm_mips_copy_reg_indices()
630 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { in kvm_mips_copy_reg_indices()
644 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); in kvm_mips_copy_reg_indices()
647 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, in kvm_mips_get_reg() argument
650 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_mips_get_reg()
651 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_get_reg()
660 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; in kvm_mips_get_reg()
664 v = (long)vcpu->arch.hi; in kvm_mips_get_reg()
667 v = (long)vcpu->arch.lo; in kvm_mips_get_reg()
671 v = (long)vcpu->arch.pc; in kvm_mips_get_reg()
676 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
686 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
695 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
700 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
707 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
724 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
729 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
736 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); in kvm_mips_get_reg()
759 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, in kvm_mips_set_reg() argument
762 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_mips_set_reg()
763 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_set_reg()
794 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; in kvm_mips_set_reg()
798 vcpu->arch.hi = v; in kvm_mips_set_reg()
801 vcpu->arch.lo = v; in kvm_mips_set_reg()
805 vcpu->arch.pc = v; in kvm_mips_set_reg()
810 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
820 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
829 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
834 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
841 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
855 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
860 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
867 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); in kvm_mips_set_reg()
872 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
877 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) in kvm_vcpu_ioctl_enable_cap()
886 vcpu->arch.fpu_enabled = true; in kvm_vcpu_ioctl_enable_cap()
889 vcpu->arch.msa_enabled = true; in kvm_vcpu_ioctl_enable_cap()
902 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
910 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, in kvm_arch_vcpu_async_ioctl()
913 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_async_ioctl()
922 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
926 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
937 r = kvm_mips_set_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
939 r = kvm_mips_get_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
951 reg_list.n = kvm_mips_num_regs(vcpu); in kvm_arch_vcpu_ioctl()
957 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
966 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
973 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
1000 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
1006 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
1012 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
1016 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
1021 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
1026 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
1078 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
1080 return kvm_mips_pending_timer(vcpu) || in kvm_cpu_has_pending_timer()
1081 kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI; in kvm_cpu_has_pending_timer()
1084 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_dump_regs() argument
1089 if (!vcpu) in kvm_arch_vcpu_dump_regs()
1092 kvm_debug("VCPU Register Dump:\n"); in kvm_arch_vcpu_dump_regs()
1093 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
1094 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvm_arch_vcpu_dump_regs()
1098 vcpu->arch.gprs[i], in kvm_arch_vcpu_dump_regs()
1099 vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
1100 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
1102 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); in kvm_arch_vcpu_dump_regs()
1103 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); in kvm_arch_vcpu_dump_regs()
1105 cop0 = &vcpu->arch.cop0; in kvm_arch_vcpu_dump_regs()
1115 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
1119 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
1121 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
1122 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
1123 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
1124 vcpu->arch.hi = regs->hi; in kvm_arch_vcpu_ioctl_set_regs()
1125 vcpu->arch.lo = regs->lo; in kvm_arch_vcpu_ioctl_set_regs()
1126 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
1128 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
1132 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
1136 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
1138 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
1139 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
1141 regs->hi = vcpu->arch.hi; in kvm_arch_vcpu_ioctl_get_regs()
1142 regs->lo = vcpu->arch.lo; in kvm_arch_vcpu_ioctl_get_regs()
1143 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
1145 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
1149 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
1169 static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu) in __kvm_mips_handle_exit() argument
1171 struct kvm_run *run = vcpu->run; in __kvm_mips_handle_exit()
1172 u32 cause = vcpu->arch.host_cp0_cause; in __kvm_mips_handle_exit()
1174 u32 __user *opc = (u32 __user *) vcpu->arch.pc; in __kvm_mips_handle_exit()
1175 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; in __kvm_mips_handle_exit()
1180 vcpu->mode = OUTSIDE_GUEST_MODE; in __kvm_mips_handle_exit()
1195 cause, opc, run, vcpu); in __kvm_mips_handle_exit()
1196 trace_kvm_exit(vcpu, exccode); in __kvm_mips_handle_exit()
1200 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); in __kvm_mips_handle_exit()
1202 ++vcpu->stat.int_exits; in __kvm_mips_handle_exit()
1213 ++vcpu->stat.cop_unusable_exits; in __kvm_mips_handle_exit()
1214 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); in __kvm_mips_handle_exit()
1221 ++vcpu->stat.tlbmod_exits; in __kvm_mips_handle_exit()
1222 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); in __kvm_mips_handle_exit()
1227 cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc, in __kvm_mips_handle_exit()
1230 ++vcpu->stat.tlbmiss_st_exits; in __kvm_mips_handle_exit()
1231 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); in __kvm_mips_handle_exit()
1238 ++vcpu->stat.tlbmiss_ld_exits; in __kvm_mips_handle_exit()
1239 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); in __kvm_mips_handle_exit()
1243 ++vcpu->stat.addrerr_st_exits; in __kvm_mips_handle_exit()
1244 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); in __kvm_mips_handle_exit()
1248 ++vcpu->stat.addrerr_ld_exits; in __kvm_mips_handle_exit()
1249 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); in __kvm_mips_handle_exit()
1253 ++vcpu->stat.syscall_exits; in __kvm_mips_handle_exit()
1254 ret = kvm_mips_callbacks->handle_syscall(vcpu); in __kvm_mips_handle_exit()
1258 ++vcpu->stat.resvd_inst_exits; in __kvm_mips_handle_exit()
1259 ret = kvm_mips_callbacks->handle_res_inst(vcpu); in __kvm_mips_handle_exit()
1263 ++vcpu->stat.break_inst_exits; in __kvm_mips_handle_exit()
1264 ret = kvm_mips_callbacks->handle_break(vcpu); in __kvm_mips_handle_exit()
1268 ++vcpu->stat.trap_inst_exits; in __kvm_mips_handle_exit()
1269 ret = kvm_mips_callbacks->handle_trap(vcpu); in __kvm_mips_handle_exit()
1273 ++vcpu->stat.msa_fpe_exits; in __kvm_mips_handle_exit()
1274 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); in __kvm_mips_handle_exit()
1278 ++vcpu->stat.fpe_exits; in __kvm_mips_handle_exit()
1279 ret = kvm_mips_callbacks->handle_fpe(vcpu); in __kvm_mips_handle_exit()
1283 ++vcpu->stat.msa_disabled_exits; in __kvm_mips_handle_exit()
1284 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); in __kvm_mips_handle_exit()
1289 ret = kvm_mips_callbacks->handle_guest_exit(vcpu); in __kvm_mips_handle_exit()
1296 kvm_get_badinstr(opc, vcpu, &inst); in __kvm_mips_handle_exit()
1299 kvm_read_c0_guest_status(&vcpu->arch.cop0)); in __kvm_mips_handle_exit()
1300 kvm_arch_vcpu_dump_regs(vcpu); in __kvm_mips_handle_exit()
1310 kvm_vz_acquire_htimer(vcpu); in __kvm_mips_handle_exit()
1313 kvm_mips_deliver_interrupts(vcpu, cause); in __kvm_mips_handle_exit()
1320 ++vcpu->stat.signal_exits; in __kvm_mips_handle_exit()
1321 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL); in __kvm_mips_handle_exit()
1326 trace_kvm_reenter(vcpu); in __kvm_mips_handle_exit()
1329 * Make sure the read of VCPU requests in vcpu_reenter() in __kvm_mips_handle_exit()
1330 * callback is not reordered ahead of the write to vcpu->mode, in __kvm_mips_handle_exit()
1332 * the VCPU as outside of guest mode and not needing an IPI. in __kvm_mips_handle_exit()
1334 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in __kvm_mips_handle_exit()
1336 kvm_mips_callbacks->vcpu_reenter(vcpu); in __kvm_mips_handle_exit()
1347 if (kvm_mips_guest_has_fpu(&vcpu->arch) && in __kvm_mips_handle_exit()
1349 __kvm_restore_fcsr(&vcpu->arch); in __kvm_mips_handle_exit()
1351 if (kvm_mips_guest_has_msa(&vcpu->arch) && in __kvm_mips_handle_exit()
1353 __kvm_restore_msacsr(&vcpu->arch); in __kvm_mips_handle_exit()
1358 int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu) in kvm_mips_handle_exit() argument
1363 ret = __kvm_mips_handle_exit(vcpu); in kvm_mips_handle_exit()
1370 void kvm_own_fpu(struct kvm_vcpu *vcpu) in kvm_own_fpu() argument
1372 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_own_fpu()
1386 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) in kvm_own_fpu()
1387 kvm_lose_fpu(vcpu); in kvm_own_fpu()
1401 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { in kvm_own_fpu()
1402 __kvm_restore_fpu(&vcpu->arch); in kvm_own_fpu()
1403 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; in kvm_own_fpu()
1404 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); in kvm_own_fpu()
1406 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU); in kvm_own_fpu()
1414 void kvm_own_msa(struct kvm_vcpu *vcpu) in kvm_own_msa() argument
1416 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_own_msa()
1425 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { in kvm_own_msa()
1433 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | in kvm_own_msa()
1435 kvm_lose_fpu(vcpu); in kvm_own_msa()
1448 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { in kvm_own_msa()
1453 __kvm_restore_msa_upper(&vcpu->arch); in kvm_own_msa()
1454 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; in kvm_own_msa()
1455 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA); in kvm_own_msa()
1459 __kvm_restore_msa(&vcpu->arch); in kvm_own_msa()
1460 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; in kvm_own_msa()
1461 if (kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_own_msa()
1462 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; in kvm_own_msa()
1463 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, in kvm_own_msa()
1467 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA); in kvm_own_msa()
1476 void kvm_drop_fpu(struct kvm_vcpu *vcpu) in kvm_drop_fpu() argument
1479 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_drop_fpu()
1481 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA); in kvm_drop_fpu()
1482 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; in kvm_drop_fpu()
1484 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_drop_fpu()
1486 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU); in kvm_drop_fpu()
1487 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; in kvm_drop_fpu()
1493 void kvm_lose_fpu(struct kvm_vcpu *vcpu) in kvm_lose_fpu() argument
1503 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_lose_fpu()
1504 __kvm_save_msa(&vcpu->arch); in kvm_lose_fpu()
1505 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); in kvm_lose_fpu()
1509 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_lose_fpu()
1513 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); in kvm_lose_fpu()
1514 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_lose_fpu()
1515 __kvm_save_fpu(&vcpu->arch); in kvm_lose_fpu()
1516 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; in kvm_lose_fpu()
1517 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); in kvm_lose_fpu()