Home
last modified time | relevance | path

Searched full:vcpu (Results 1 – 25 of 504) sorted by relevance

12345678910>>...21

/linux/arch/powerpc/kvm/
H A Dbooke.c56 STATS_DESC_COUNTER(VCPU, sum_exits),
57 STATS_DESC_COUNTER(VCPU, mmio_exits),
58 STATS_DESC_COUNTER(VCPU, signal_exits),
59 STATS_DESC_COUNTER(VCPU, light_exits),
60 STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
61 STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
62 STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
63 STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
64 STATS_DESC_COUNTER(VCPU, syscall_exits),
65 STATS_DESC_COUNTER(VCPU, isi_exits),
[all …]
H A Dbook3s_emulate.c70 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) in spr_allowed() argument
73 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) in spr_allowed()
77 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) in spr_allowed()
84 static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) in kvmppc_copyto_vcpu_tm() argument
86 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], in kvmppc_copyto_vcpu_tm()
87 sizeof(vcpu->arch.gpr_tm)); in kvmppc_copyto_vcpu_tm()
88 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, in kvmppc_copyto_vcpu_tm()
90 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, in kvmppc_copyto_vcpu_tm()
92 vcpu->arch.ppr_tm = vcpu->arch.ppr; in kvmppc_copyto_vcpu_tm()
93 vcpu->arch.dscr_tm = vcpu->arch.dscr; in kvmppc_copyto_vcpu_tm()
[all …]
H A Dbooke_emulate.c24 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfi() argument
26 vcpu->arch.regs.nip = vcpu->arch.shared->srr0; in kvmppc_emul_rfi()
27 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi()
30 static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfdi() argument
32 vcpu->arch.regs.nip = vcpu->arch.dsrr0; in kvmppc_emul_rfdi()
33 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi()
36 static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) in kvmppc_emul_rfci() argument
38 vcpu->arch.regs.nip = vcpu->arch.csrr0; in kvmppc_emul_rfci()
39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci()
42 int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, in kvmppc_booke_emulate_op() argument
[all …]
H A Demulate_loadstore.c28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_fp_disabled() argument
30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { in kvmppc_check_fp_disabled()
31 kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_fp_disabled()
40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_vsx_disabled() argument
42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { in kvmppc_check_vsx_disabled()
43 kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_vsx_disabled()
52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_altivec_disabled() argument
54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { in kvmppc_check_altivec_disabled()
55 kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_altivec_disabled()
72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_loadstore() argument
[all …]
H A Dbook3s_paired_singles.c150 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) in kvmppc_sync_qpr() argument
152 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); in kvmppc_sync_qpr()
155 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) in kvmppc_inject_pf() argument
158 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_inject_pf()
162 kvmppc_set_msr(vcpu, msr); in kvmppc_inject_pf()
163 kvmppc_set_dar(vcpu, eaddr); in kvmppc_inject_pf()
168 kvmppc_set_dsisr(vcpu, dsisr); in kvmppc_inject_pf()
169 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); in kvmppc_inject_pf()
172 static int kvmppc_emulate_fpr_load(struct kvm_vcpu *vcpu, in kvmppc_emulate_fpr_load() argument
184 r = kvmppc_ld(vcpu, &addr, len, tmp, true); in kvmppc_emulate_fpr_load()
[all …]
H A Dpowerpc.c55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument
57 return kvm_arch_vcpu_runnable(vcpu); in kvm_arch_dy_runnable()
60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument
95 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter()
96 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
101 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter()
104 * Reading vcpu->requests must happen after setting vcpu->mode, in kvmppc_prepare_to_enter()
109 * to the page tables done while the VCPU is running. in kvmppc_prepare_to_enter()
[all …]
H A Dbook3s_pr_papr.c23 static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) in get_pteg_addr() argument
25 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in get_pteg_addr()
36 static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) in kvmppc_h_pr_enter() argument
38 long flags = kvmppc_get_gpr(vcpu, 4); in kvmppc_h_pr_enter()
39 long pte_index = kvmppc_get_gpr(vcpu, 5); in kvmppc_h_pr_enter()
47 pteg_addr = get_pteg_addr(vcpu, pte_index); in kvmppc_h_pr_enter()
49 mutex_lock(&vcpu->kvm->arch.hpt_mutex); in kvmppc_h_pr_enter()
70 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); in kvmppc_h_pr_enter()
71 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); in kvmppc_h_pr_enter()
76 kvmppc_set_gpr(vcpu, 4, pte_index | i); in kvmppc_h_pr_enter()
[all …]
H A Dbook3s_hv_tm.c16 static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) in emulate_tx_failure() argument
19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure()
21 tfiar = vcpu->arch.regs.nip & ~0x3ull; in emulate_tx_failure()
23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure()
29 vcpu->arch.tfiar = tfiar; in emulate_tx_failure()
31 vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; in emulate_tx_failure()
37 * instruction image is in vcpu->arch.emul_inst. If the guest was in
39 * reclaimed and is in the vcpu struct. The CPU is in virtual mode in
42 int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) in kvmhv_p9_tm_emulation() argument
44 u32 instr = vcpu->arch.emul_inst; in kvmhv_p9_tm_emulation()
[all …]
/linux/arch/x86/kvm/vmx/
H A Dmain.c68 static int vt_vcpu_create(struct kvm_vcpu *vcpu) in vt_vcpu_create() argument
70 if (is_td_vcpu(vcpu)) in vt_vcpu_create()
71 return tdx_vcpu_create(vcpu); in vt_vcpu_create()
73 return vmx_vcpu_create(vcpu); in vt_vcpu_create()
76 static void vt_vcpu_free(struct kvm_vcpu *vcpu) in vt_vcpu_free() argument
78 if (is_td_vcpu(vcpu)) { in vt_vcpu_free()
79 tdx_vcpu_free(vcpu); in vt_vcpu_free()
83 vmx_vcpu_free(vcpu); in vt_vcpu_free()
86 static void vt_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) in vt_vcpu_reset() argument
88 if (is_td_vcpu(vcpu)) { in vt_vcpu_reset()
[all …]
H A Dx86_ops.h22 int vmx_vcpu_create(struct kvm_vcpu *vcpu);
23 int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu);
24 fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags);
25 void vmx_vcpu_free(struct kvm_vcpu *vcpu);
26 void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
27 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
28 void vmx_vcpu_put(struct kvm_vcpu *vcpu);
29 int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath);
30 void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu);
31 int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu);
[all …]
/linux/arch/riscv/kvm/
H A Dvcpu.c29 STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
30 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
31 STATS_DESC_COUNTER(VCPU, wrs_exit_stat),
32 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
33 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
34 STATS_DESC_COUNTER(VCPU, csr_exit_user),
35 STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
36 STATS_DESC_COUNTER(VCPU, signal_exits),
37 STATS_DESC_COUNTER(VCPU, exits),
38 STATS_DESC_COUNTER(VCPU, instr_illegal_exits),
[all …]
/linux/arch/s390/kvm/
H A Dpriv.c32 static int handle_ri(struct kvm_vcpu *vcpu) in handle_ri() argument
34 vcpu->stat.instruction_ri++; in handle_ri()
36 if (test_kvm_facility(vcpu->kvm, 64)) { in handle_ri()
37 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); in handle_ri()
38 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in handle_ri()
39 kvm_s390_retry_instr(vcpu); in handle_ri()
42 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_ri()
45 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) in kvm_s390_handle_aa() argument
47 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) in kvm_s390_handle_aa()
48 return handle_ri(vcpu); in kvm_s390_handle_aa()
[all …]
H A Dintercept.c25 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) in kvm_s390_get_ilen() argument
27 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_get_ilen()
30 switch (vcpu->arch.sie_block->icptcode) { in kvm_s390_get_ilen()
37 ilen = insn_length(vcpu->arch.sie_block->ipa >> 8); in kvm_s390_get_ilen()
47 ilen = vcpu->arch.sie_block->pgmilc & 0x6; in kvm_s390_get_ilen()
53 static int handle_stop(struct kvm_vcpu *vcpu) in handle_stop() argument
55 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; in handle_stop()
59 vcpu->stat.exit_stop_request++; in handle_stop()
62 if (kvm_s390_vcpu_has_irq(vcpu, 1)) in handle_stop()
68 stop_pending = kvm_s390_is_stop_irq_pending(vcpu); in handle_stop()
[all …]
H A Dguestdbg.c59 static void enable_all_hw_bp(struct kvm_vcpu *vcpu) in enable_all_hw_bp() argument
62 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp()
63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp()
64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp()
67 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || in enable_all_hw_bp()
68 vcpu->arch.guestdbg.hw_bp_info == NULL) in enable_all_hw_bp()
79 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { in enable_all_hw_bp()
80 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; in enable_all_hw_bp()
81 len = vcpu->arch.guestdbg.hw_bp_info[i].len; in enable_all_hw_bp()
99 static void enable_all_hw_wp(struct kvm_vcpu *vcpu) in enable_all_hw_wp() argument
[all …]
H A Dsigp.c20 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, in __sigp_sense() argument
39 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, in __sigp_sense()
44 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, in __inject_sigp_emergency() argument
49 .u.emerg.code = vcpu->vcpu_id, in __inject_sigp_emergency()
55 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", in __inject_sigp_emergency()
61 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) in __sigp_emergency() argument
63 return __inject_sigp_emergency(vcpu, dst_vcpu); in __sigp_emergency()
66 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, in __sigp_conditional_emergency() argument
75 idle = is_vcpu_idle(vcpu); in __sigp_conditional_emergency()
81 if (!is_vcpu_stopped(vcpu) in __sigp_conditional_emergency()
[all …]
/linux/arch/loongarch/kvm/
H A Dvcpu.c18 STATS_DESC_COUNTER(VCPU, int_exits),
19 STATS_DESC_COUNTER(VCPU, idle_exits),
20 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21 STATS_DESC_COUNTER(VCPU, signal_exits),
22 STATS_DESC_COUNTER(VCPU, hypercall_exits),
23 STATS_DESC_COUNTER(VCPU, ipi_read_exits),
24 STATS_DESC_COUNTER(VCPU, ipi_write_exits),
25 STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
26 STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
27 STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
[all …]
/linux/arch/x86/kvm/
H A Dkvm_cache_regs.h20 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
22 return vcpu->arch.regs[VCPU_REGS_##uname]; \
24 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
27 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
51 * the vCPU task is in the process of updating the cache. The exception is if in BUILD_KVM_GPR_ACCESSORS()
56 #define kvm_assert_register_caching_allowed(vcpu) \ in BUILD_KVM_GPR_ACCESSORS() argument
57 lockdep_assert_once(in_task() || kvm_arch_pmi_in_guest(vcpu)) in BUILD_KVM_GPR_ACCESSORS()
63 * 1 0 register in vcpu->arch in BUILD_KVM_GPR_ACCESSORS()
64 * 1 1 register in vcpu->arch, needs to be stored back in BUILD_KVM_GPR_ACCESSORS()
66 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, in BUILD_KVM_GPR_ACCESSORS()
[all …]
H A Dx86.c107 ((struct kvm_vcpu *)(ctxt)->vcpu)
127 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
128 static void process_nmi(struct kvm_vcpu *vcpu);
129 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
130 static void store_regs(struct kvm_vcpu *vcpu);
131 static int sync_regs(struct kvm_vcpu *vcpu);
132 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
134 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
135 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
138 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
[all …]
H A Dsmm.c112 void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) in kvm_smm_changed() argument
114 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); in kvm_smm_changed()
117 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_smm_changed()
119 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); in kvm_smm_changed()
122 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_smm_changed()
129 vcpu->arch.pdptrs_from_userspace = false; in kvm_smm_changed()
132 kvm_mmu_reset_context(vcpu); in kvm_smm_changed()
136 void process_smi(struct kvm_vcpu *vcpu) in process_smi() argument
138 vcpu->arch.smi_pending = true; in process_smi()
139 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_smi()
[all …]
H A Dhyperv.h64 static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) in to_hv_vcpu() argument
66 return vcpu->arch.hyperv; in to_hv_vcpu()
69 static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) in to_hv_synic() argument
71 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in to_hv_synic()
80 return hv_vcpu->vcpu; in hv_synic_to_vcpu()
83 static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) in to_hv_syndbg() argument
85 return &vcpu->kvm->arch.hyperv.hv_syndbg; in to_hv_syndbg()
88 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) in kvm_hv_get_vpindex() argument
90 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_get_vpindex()
92 return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx; in kvm_hv_get_vpindex()
[all …]
H A Dlapic.h67 struct kvm_vcpu *vcpu; member
93 int kvm_create_lapic(struct kvm_vcpu *vcpu);
94 void kvm_free_lapic(struct kvm_vcpu *vcpu);
96 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
97 void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector);
98 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
99 int kvm_apic_accept_events(struct kvm_vcpu *vcpu);
100 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
101 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
102 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
[all …]
/linux/arch/arm64/kvm/hyp/vhe/
H A Dsysreg-sr.c18 static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu) in __sysreg_save_vel2_state() argument
21 __vcpu_assign_sys_reg(vcpu, PAR_EL1, read_sysreg(par_el1)); in __sysreg_save_vel2_state()
22 __vcpu_assign_sys_reg(vcpu, TPIDR_EL1, read_sysreg(tpidr_el1)); in __sysreg_save_vel2_state()
24 __vcpu_assign_sys_reg(vcpu, ESR_EL2, read_sysreg_el1(SYS_ESR)); in __sysreg_save_vel2_state()
25 __vcpu_assign_sys_reg(vcpu, AFSR0_EL2, read_sysreg_el1(SYS_AFSR0)); in __sysreg_save_vel2_state()
26 __vcpu_assign_sys_reg(vcpu, AFSR1_EL2, read_sysreg_el1(SYS_AFSR1)); in __sysreg_save_vel2_state()
27 __vcpu_assign_sys_reg(vcpu, FAR_EL2, read_sysreg_el1(SYS_FAR)); in __sysreg_save_vel2_state()
28 __vcpu_assign_sys_reg(vcpu, MAIR_EL2, read_sysreg_el1(SYS_MAIR)); in __sysreg_save_vel2_state()
29 __vcpu_assign_sys_reg(vcpu, VBAR_EL2, read_sysreg_el1(SYS_VBAR)); in __sysreg_save_vel2_state()
30 __vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR)); in __sysreg_save_vel2_state()
[all …]
H A Dswitch.c52 static u64 __compute_hcr(struct kvm_vcpu *vcpu) in __compute_hcr() argument
54 u64 guest_hcr, hcr = vcpu->arch.hcr_el2; in __compute_hcr()
56 if (!vcpu_has_nv(vcpu)) in __compute_hcr()
60 * We rely on the invariant that a vcpu entered from HYP in __compute_hcr()
65 if (is_hyp_ctxt(vcpu)) { in __compute_hcr()
70 if (!vcpu_el2_e2h_is_set(vcpu)) in __compute_hcr()
81 guest_hcr = kvm_vcpu_apply_reg_masks(vcpu, HCR_EL2, 0); in __compute_hcr()
83 write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2); in __compute_hcr()
87 guest_hcr = __vcpu_sys_reg(vcpu, HCR_EL2); in __compute_hcr()
92 va |= __vcpu_sys_reg(vcpu, VNCR_EL2) & GENMASK(PAGE_SHIFT - 1, 0); in __compute_hcr()
[all …]
/linux/arch/arm64/kvm/hyp/include/hyp/
H A Dswitch.h43 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) in __fpsimd_save_fpexc32() argument
45 if (!vcpu_el1_is_32bit(vcpu)) in __fpsimd_save_fpexc32()
48 __vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2)); in __fpsimd_save_fpexc32()
51 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) in __activate_traps_fpsimd32() argument
62 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) { in __activate_traps_fpsimd32()
68 static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu) in __activate_cptr_traps_nvhe() argument
78 if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) in __activate_cptr_traps_nvhe()
87 static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu) in __activate_cptr_traps_vhe() argument
102 if (vcpu_has_sve(vcpu)) in __activate_cptr_traps_vhe()
106 if (!vcpu_has_nv(vcpu)) in __activate_cptr_traps_vhe()
[all …]
/linux/arch/mips/kvm/
H A Demulate.c40 static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, in kvm_compute_return_epc() argument
45 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc()
56 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); in kvm_compute_return_epc()
243 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) in update_pc() argument
248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, in update_pc()
249 &vcpu->arch.pc); in update_pc()
253 vcpu->arch.pc += 4; in update_pc()
256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); in update_pc()
264 * @vcpu: KVM VCPU information.
272 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) in kvm_get_badinstr() argument
[all …]

12345678910>>...21