Lines Matching refs:vcpu

31 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
34 kvm_inject_vabt(vcpu);
37 static int handle_hvc(struct kvm_vcpu *vcpu)
39 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
40 kvm_vcpu_hvc_get_imm(vcpu));
41 vcpu->stat.hvc_exit_stat++;
44 if (vcpu_has_nv(vcpu)) {
45 if (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_HCD)
46 kvm_inject_undefined(vcpu);
48 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
53 return kvm_smccc_call_handler(vcpu);
56 static int handle_smc(struct kvm_vcpu *vcpu)
62 if (forward_smc_trap(vcpu))
75 kvm_incr_pc(vcpu);
81 if (kvm_vcpu_hvc_get_imm(vcpu)) {
82 vcpu_set_reg(vcpu, 0, ~0UL);
93 return kvm_smccc_call_handler(vcpu);
104 static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu)
106 if (guest_hyp_fpsimd_traps_enabled(vcpu))
107 return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
110 kvm_inject_undefined(vcpu);
118 * @vcpu: the vcpu pointer
120 * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
129 static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
131 u64 esr = kvm_vcpu_get_esr(vcpu);
134 if (guest_hyp_wfx_traps_enabled(vcpu))
135 return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
138 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
139 vcpu->stat.wfe_exit_stat++;
141 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
142 vcpu->stat.wfi_exit_stat++;
149 now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
150 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
161 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
164 vcpu_set_flag(vcpu, IN_WFIT);
166 kvm_vcpu_wfi(vcpu);
169 kvm_incr_pc(vcpu);
177 * @vcpu: the vcpu pointer
183 * @return: 0 (while setting vcpu->run->exit_reason)
185 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
187 struct kvm_run *run = vcpu->run;
188 u64 esr = kvm_vcpu_get_esr(vcpu);
190 if (!vcpu->guest_debug && forward_debug_exception(vcpu))
200 run->debug.arch.far = vcpu->arch.fault.far_el2;
203 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
210 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
212 u64 esr = kvm_vcpu_get_esr(vcpu);
217 kvm_inject_undefined(vcpu);
225 static int handle_sve(struct kvm_vcpu *vcpu)
227 if (guest_hyp_sve_traps_enabled(vcpu))
228 return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
230 kvm_inject_undefined(vcpu);
247 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
249 if (!vcpu_has_ptrauth(vcpu)) {
250 kvm_inject_undefined(vcpu);
254 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
255 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
261 kvm_inject_undefined(vcpu);
265 static int kvm_handle_eret(struct kvm_vcpu *vcpu)
267 if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)) &&
268 !vcpu_has_ptrauth(vcpu))
269 return kvm_handle_ptrauth(vcpu);
282 if (is_hyp_ctxt(vcpu))
283 kvm_emulate_nested_eret(vcpu);
285 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
290 static int handle_svc(struct kvm_vcpu *vcpu)
297 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
329 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
331 u64 esr = kvm_vcpu_get_esr(vcpu);
343 static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
351 if (!kvm_condition_valid(vcpu)) {
352 kvm_incr_pc(vcpu);
357 exit_handler = kvm_get_exit_handler(vcpu);
358 handled = exit_handler(vcpu);
368 int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
370 struct kvm_run *run = vcpu->run;
388 return handle_trap_exceptions(vcpu);
412 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
416 u64 disr = kvm_vcpu_get_disr(vcpu);
418 kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
420 kvm_inject_vabt(vcpu);
429 kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
448 u64 par, uintptr_t vcpu,
493 spsr, elr_virt, esr, far, hpfar, par, vcpu);