Lines Matching defs:vcpu
31 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
34 kvm_inject_vabt(vcpu);
37 static int handle_hvc(struct kvm_vcpu *vcpu)
39 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
40 kvm_vcpu_hvc_get_imm(vcpu));
41 vcpu->stat.hvc_exit_stat++;
44 if (vcpu_has_nv(vcpu)) {
45 if (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_HCD)
46 kvm_inject_undefined(vcpu);
48 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
53 return kvm_smccc_call_handler(vcpu);
56 static int handle_smc(struct kvm_vcpu *vcpu)
62 if (forward_smc_trap(vcpu))
75 kvm_incr_pc(vcpu);
81 if (kvm_vcpu_hvc_get_imm(vcpu)) {
82 vcpu_set_reg(vcpu, 0, ~0UL);
93 return kvm_smccc_call_handler(vcpu);
104 static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu)
106 if (guest_hyp_fpsimd_traps_enabled(vcpu))
107 return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
110 kvm_inject_undefined(vcpu);
118 * @vcpu: the vcpu pointer
120 * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
129 static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
131 u64 esr = kvm_vcpu_get_esr(vcpu);
134 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
135 vcpu->stat.wfe_exit_stat++;
137 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
138 vcpu->stat.wfi_exit_stat++;
145 now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
146 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
157 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
160 vcpu_set_flag(vcpu, IN_WFIT);
162 kvm_vcpu_wfi(vcpu);
165 kvm_incr_pc(vcpu);
173 * @vcpu: the vcpu pointer
179 * @return: 0 (while setting vcpu->run->exit_reason)
181 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
183 struct kvm_run *run = vcpu->run;
184 u64 esr = kvm_vcpu_get_esr(vcpu);
186 if (!vcpu->guest_debug && forward_debug_exception(vcpu))
196 run->debug.arch.far = vcpu->arch.fault.far_el2;
199 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
206 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
208 u64 esr = kvm_vcpu_get_esr(vcpu);
213 kvm_inject_undefined(vcpu);
221 static int handle_sve(struct kvm_vcpu *vcpu)
223 if (guest_hyp_sve_traps_enabled(vcpu))
224 return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
226 kvm_inject_undefined(vcpu);
243 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
245 if (!vcpu_has_ptrauth(vcpu)) {
246 kvm_inject_undefined(vcpu);
250 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
251 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
257 kvm_inject_undefined(vcpu);
261 static int kvm_handle_eret(struct kvm_vcpu *vcpu)
263 if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)) &&
264 !vcpu_has_ptrauth(vcpu))
265 return kvm_handle_ptrauth(vcpu);
278 if (is_hyp_ctxt(vcpu))
279 kvm_emulate_nested_eret(vcpu);
281 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
286 static int handle_svc(struct kvm_vcpu *vcpu)
293 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
325 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
327 u64 esr = kvm_vcpu_get_esr(vcpu);
339 static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
347 if (!kvm_condition_valid(vcpu)) {
348 kvm_incr_pc(vcpu);
353 exit_handler = kvm_get_exit_handler(vcpu);
354 handled = exit_handler(vcpu);
364 int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
366 struct kvm_run *run = vcpu->run;
384 return handle_trap_exceptions(vcpu);
408 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
412 u64 disr = kvm_vcpu_get_disr(vcpu);
414 kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
416 kvm_inject_vabt(vcpu);
425 kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
444 u64 par, uintptr_t vcpu,
489 spsr, elr_virt, esr, far, hpfar, par, vcpu);