Lines Matching defs:vcpu

185  * value, the vcpu support for the revelant features, and the additional
188 static u64 get_restricted_features(const struct kvm_vcpu *vcpu,
206 if (vm_supported && !vm_supported(vcpu->kvm))
217 static u64 pvm_calc_id_reg(const struct kvm_vcpu *vcpu, u32 id)
221 return get_restricted_features(vcpu, id_aa64pfr0_el1_sys_val, pvmid_aa64pfr0);
223 return get_restricted_features(vcpu, id_aa64pfr1_el1_sys_val, pvmid_aa64pfr1);
227 return get_restricted_features(vcpu, id_aa64isar1_el1_sys_val, pvmid_aa64isar1);
229 return get_restricted_features(vcpu, id_aa64isar2_el1_sys_val, pvmid_aa64isar2);
231 return get_restricted_features(vcpu, id_aa64mmfr0_el1_sys_val, pvmid_aa64mmfr0);
233 return get_restricted_features(vcpu, id_aa64mmfr1_el1_sys_val, pvmid_aa64mmfr1);
235 return get_restricted_features(vcpu, id_aa64mmfr2_el1_sys_val, pvmid_aa64mmfr2);
250 static void inject_undef64(struct kvm_vcpu *vcpu)
254 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
255 *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
257 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
259 __kvm_adjust_pc(vcpu);
263 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
264 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
267 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
270 struct kvm *kvm = vcpu->kvm;
283 static bool pvm_access_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
298 static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
303 inject_undef64(vcpu);
307 return pvm_access_raz_wi(vcpu, p, r);
317 static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu,
322 inject_undef64(vcpu);
326 p->regval = read_id_reg(vcpu, r);
330 static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu,
480 void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu)
482 struct kvm *kvm = vcpu->kvm;
496 ka->id_regs[IDREG_IDX(r)] = pvm_calc_id_reg(vcpu, r);
524 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
528 unsigned long esr = kvm_vcpu_get_esr(vcpu);
529 int Rt = kvm_vcpu_sys_get_rt(vcpu);
532 params.regval = vcpu_get_reg(vcpu, Rt);
538 inject_undef64(vcpu);
547 if (r->access(vcpu, &params, r))
548 __kvm_skip_instr(vcpu);
551 vcpu_set_reg(vcpu, Rt, params.regval);
562 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
564 inject_undef64(vcpu);