| /linux/arch/x86/kvm/vmx/ |
| H A D | main.c | 68 static int vt_vcpu_create(struct kvm_vcpu *vcpu) in vt_vcpu_create() argument 70 if (is_td_vcpu(vcpu)) in vt_vcpu_create() 71 return tdx_vcpu_create(vcpu); in vt_vcpu_create() 73 return vmx_vcpu_create(vcpu); in vt_vcpu_create() 76 static void vt_vcpu_free(struct kvm_vcpu *vcpu) in vt_vcpu_free() argument 78 if (is_td_vcpu(vcpu)) { in vt_vcpu_free() 79 tdx_vcpu_free(vcpu); in vt_vcpu_free() 83 vmx_vcpu_free(vcpu); in vt_vcpu_free() 86 static void vt_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) in vt_vcpu_reset() argument 88 if (is_td_vcpu(vcpu)) { in vt_vcpu_reset() [all …]
|
| H A D | x86_ops.h | 22 int vmx_vcpu_create(struct kvm_vcpu *vcpu); 23 int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu); 24 fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags); 25 void vmx_vcpu_free(struct kvm_vcpu *vcpu); 26 void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); 27 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 28 void vmx_vcpu_put(struct kvm_vcpu *vcpu); 29 int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath); 30 void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu); 31 int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu); [all …]
|
| /linux/arch/s390/kvm/ |
| H A D | intercept.c | 25 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) in kvm_s390_get_ilen() argument 27 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_get_ilen() 30 switch (vcpu->arch.sie_block->icptcode) { in kvm_s390_get_ilen() 37 ilen = insn_length(vcpu->arch.sie_block->ipa >> 8); in kvm_s390_get_ilen() 47 ilen = vcpu->arch.sie_block->pgmilc & 0x6; in kvm_s390_get_ilen() 53 static int handle_stop(struct kvm_vcpu *vcpu) in handle_stop() argument 55 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; in handle_stop() 59 vcpu->stat.exit_stop_request++; in handle_stop() 62 if (kvm_s390_vcpu_has_irq(vcpu, 1)) in handle_stop() 68 stop_pending = kvm_s390_is_stop_irq_pending(vcpu); in handle_stop() [all …]
|
| H A D | guestdbg.c | 59 static void enable_all_hw_bp(struct kvm_vcpu *vcpu) in enable_all_hw_bp() argument 62 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp() 63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp() 64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp() 67 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || in enable_all_hw_bp() 68 vcpu->arch.guestdbg.hw_bp_info == NULL) in enable_all_hw_bp() 79 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { in enable_all_hw_bp() 80 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; in enable_all_hw_bp() 81 len = vcpu->arch.guestdbg.hw_bp_info[i].len; in enable_all_hw_bp() 99 static void enable_all_hw_wp(struct kvm_vcpu *vcpu) in enable_all_hw_wp() argument [all …]
|
| H A D | kvm-s390.h | 44 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) argument 46 #define IS_ITDB_VALID(vcpu) \ argument 47 ((*(char *)phys_to_virt((vcpu)->arch.sie_block->itdba) == TDB_FORMAT1)) 81 static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_set_cpuflags() argument 83 atomic_or(flags, &vcpu->arch.sie_block->cpuflags); in kvm_s390_set_cpuflags() 86 static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_clear_cpuflags() argument 88 atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags); in kvm_s390_clear_cpuflags() 91 static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_test_cpuflags() argument 93 return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags; in kvm_s390_test_cpuflags() 96 static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) in is_vcpu_stopped() argument [all …]
|
| /linux/arch/powerpc/kvm/ |
| H A D | booke.c | 90 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) in kvmppc_dump_vcpu() argument 94 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, in kvmppc_dump_vcpu() 95 vcpu->arch.shared->msr); in kvmppc_dump_vcpu() 96 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, in kvmppc_dump_vcpu() 97 vcpu->arch.regs.ctr); in kvmppc_dump_vcpu() 98 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, in kvmppc_dump_vcpu() 99 vcpu->arch.shared->srr1); in kvmppc_dump_vcpu() 101 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvmppc_dump_vcpu() 105 kvmppc_get_gpr(vcpu, i), in kvmppc_dump_vcpu() 106 kvmppc_get_gpr(vcpu, i+1), in kvmppc_dump_vcpu() [all …]
|
| H A D | book3s_emulate.c | 70 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) in spr_allowed() argument 73 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) in spr_allowed() 77 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) in spr_allowed() 84 static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) in kvmppc_copyto_vcpu_tm() argument 86 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], in kvmppc_copyto_vcpu_tm() 87 sizeof(vcpu->arch.gpr_tm)); in kvmppc_copyto_vcpu_tm() 88 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, in kvmppc_copyto_vcpu_tm() 90 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, in kvmppc_copyto_vcpu_tm() 92 vcpu->arch.ppr_tm = vcpu->arch.ppr; in kvmppc_copyto_vcpu_tm() 93 vcpu->arch.dscr_tm = vcpu->arch.dscr; in kvmppc_copyto_vcpu_tm() [all …]
|
| H A D | booke_emulate.c | 24 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfi() argument 26 vcpu->arch.regs.nip = vcpu->arch.shared->srr0; in kvmppc_emul_rfi() 27 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi() 30 static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfdi() argument 32 vcpu->arch.regs.nip = vcpu->arch.dsrr0; in kvmppc_emul_rfdi() 33 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi() 36 static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) in kvmppc_emul_rfci() argument 38 vcpu->arch.regs.nip = vcpu->arch.csrr0; in kvmppc_emul_rfci() 39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci() 42 int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, in kvmppc_booke_emulate_op() argument [all …]
|
| H A D | emulate_loadstore.c | 28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_fp_disabled() argument 30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { in kvmppc_check_fp_disabled() 31 kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_fp_disabled() 40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_vsx_disabled() argument 42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { in kvmppc_check_vsx_disabled() 43 kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_vsx_disabled() 52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_altivec_disabled() argument 54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { in kvmppc_check_altivec_disabled() 55 kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_altivec_disabled() 72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_loadstore() argument [all …]
|
| H A D | book3s_hv_tm.c | 16 static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) in emulate_tx_failure() argument 19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() 21 tfiar = vcpu->arch.regs.nip & ~0x3ull; in emulate_tx_failure() 23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure() 29 vcpu->arch.tfiar = tfiar; in emulate_tx_failure() 31 vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; in emulate_tx_failure() 42 int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) in kvmhv_p9_tm_emulation() argument 44 u32 instr = vcpu->arch.emul_inst; in kvmhv_p9_tm_emulation() 45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() 56 vcpu->arch.regs.nip -= 4; in kvmhv_p9_tm_emulation() [all …]
|
| H A D | book3s_paired_singles.c | 150 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) in kvmppc_sync_qpr() argument 152 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); in kvmppc_sync_qpr() 155 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) in kvmppc_inject_pf() argument 158 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_inject_pf() 162 kvmppc_set_msr(vcpu, msr); in kvmppc_inject_pf() 163 kvmppc_set_dar(vcpu, eaddr); in kvmppc_inject_pf() 168 kvmppc_set_dsisr(vcpu, dsisr); in kvmppc_inject_pf() 169 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); in kvmppc_inject_pf() 172 static int kvmppc_emulate_fpr_load(struct kvm_vcpu *vcpu, in kvmppc_emulate_fpr_load() argument 184 r = kvmppc_ld(vcpu, &addr, len, tmp, true); in kvmppc_emulate_fpr_load() [all …]
|
| H A D | book3s_pr_papr.c | 23 static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) in get_pteg_addr() argument 25 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in get_pteg_addr() 36 static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) in kvmppc_h_pr_enter() argument 38 long flags = kvmppc_get_gpr(vcpu, 4); in kvmppc_h_pr_enter() 39 long pte_index = kvmppc_get_gpr(vcpu, 5); in kvmppc_h_pr_enter() 47 pteg_addr = get_pteg_addr(vcpu, pte_index); in kvmppc_h_pr_enter() 49 mutex_lock(&vcpu->kvm->arch.hpt_mutex); in kvmppc_h_pr_enter() 70 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); in kvmppc_h_pr_enter() 71 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); in kvmppc_h_pr_enter() 76 kvmppc_set_gpr(vcpu, 4, pte_index | i); in kvmppc_h_pr_enter() [all …]
|
| H A D | book3s_hv_p9_entry.c | 10 static void load_spr_state(struct kvm_vcpu *vcpu, in load_spr_state() argument 14 mtspr(SPRN_TAR, vcpu->arch.tar); in load_spr_state() 18 current->thread.vrsave != vcpu->arch.vrsave) in load_spr_state() 19 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in load_spr_state() 22 if (vcpu->arch.hfscr & HFSCR_EBB) { in load_spr_state() 23 if (current->thread.ebbhr != vcpu->arch.ebbhr) in load_spr_state() 24 mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); in load_spr_state() 25 if (current->thread.ebbrr != vcpu->arch.ebbrr) in load_spr_state() 26 mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); in load_spr_state() 27 if (current->thread.bescr != vcpu->arch.bescr) in load_spr_state() [all …]
|
| H A D | powerpc.c | 55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument 57 return kvm_arch_vcpu_runnable(vcpu); in kvm_arch_dy_runnable() 60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument 65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument 79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument 95 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter() 96 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter() 101 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter() 114 if (kvm_request_pending(vcpu)) { in kvmppc_prepare_to_enter() 117 trace_kvm_check_requests(vcpu); in kvmppc_prepare_to_enter() [all …]
|
| /linux/arch/riscv/kvm/ |
| H A D | vcpu.c | 54 static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_context_reset() argument 57 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_context_reset() 58 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_context_reset() 63 memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr)); in kvm_riscv_vcpu_context_reset() 69 kvm_riscv_vcpu_sbi_load_reset_state(vcpu); in kvm_riscv_vcpu_context_reset() 79 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu, bool kvm_sbi_reset) in kvm_riscv_reset_vcpu() argument 89 loaded = (vcpu->cpu != -1); in kvm_riscv_reset_vcpu() 91 kvm_arch_vcpu_put(vcpu); in kvm_riscv_reset_vcpu() 93 vcpu->arch.last_exit_cpu = -1; in kvm_riscv_reset_vcpu() 95 kvm_riscv_vcpu_context_reset(vcpu, kvm_sbi_reset); in kvm_riscv_reset_vcpu() [all …]
|
| /linux/arch/arm64/kvm/hyp/vhe/ |
| H A D | switch.c | 52 static u64 __compute_hcr(struct kvm_vcpu *vcpu) in __compute_hcr() argument 54 u64 guest_hcr, hcr = vcpu->arch.hcr_el2; in __compute_hcr() 56 if (!vcpu_has_nv(vcpu)) in __compute_hcr() 65 if (is_hyp_ctxt(vcpu)) { in __compute_hcr() 70 if (!vcpu_el2_e2h_is_set(vcpu)) in __compute_hcr() 81 guest_hcr = kvm_vcpu_apply_reg_masks(vcpu, HCR_EL2, 0); in __compute_hcr() 83 write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2); in __compute_hcr() 87 guest_hcr = __vcpu_sys_reg(vcpu, HCR_EL2); in __compute_hcr() 92 va |= __vcpu_sys_reg(vcpu, VNCR_EL2) & GENMASK(PAGE_SHIFT - 1, 0); in __compute_hcr() 113 static void __activate_traps(struct kvm_vcpu *vcpu) in __activate_traps() argument [all …]
|
| H A D | sysreg-sr.c | 18 static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu) in __sysreg_save_vel2_state() argument 21 __vcpu_assign_sys_reg(vcpu, PAR_EL1, read_sysreg(par_el1)); in __sysreg_save_vel2_state() 22 __vcpu_assign_sys_reg(vcpu, TPIDR_EL1, read_sysreg(tpidr_el1)); in __sysreg_save_vel2_state() 24 __vcpu_assign_sys_reg(vcpu, ESR_EL2, read_sysreg_el1(SYS_ESR)); in __sysreg_save_vel2_state() 25 __vcpu_assign_sys_reg(vcpu, AFSR0_EL2, read_sysreg_el1(SYS_AFSR0)); in __sysreg_save_vel2_state() 26 __vcpu_assign_sys_reg(vcpu, AFSR1_EL2, read_sysreg_el1(SYS_AFSR1)); in __sysreg_save_vel2_state() 27 __vcpu_assign_sys_reg(vcpu, FAR_EL2, read_sysreg_el1(SYS_FAR)); in __sysreg_save_vel2_state() 28 __vcpu_assign_sys_reg(vcpu, MAIR_EL2, read_sysreg_el1(SYS_MAIR)); in __sysreg_save_vel2_state() 29 __vcpu_assign_sys_reg(vcpu, VBAR_EL2, read_sysreg_el1(SYS_VBAR)); in __sysreg_save_vel2_state() 30 __vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR)); in __sysreg_save_vel2_state() [all …]
|
| /linux/arch/mips/kvm/ |
| H A D | emulate.c | 40 static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, in kvm_compute_return_epc() argument 45 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc() 56 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); in kvm_compute_return_epc() 243 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) in update_pc() argument 248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, in update_pc() 249 &vcpu->arch.pc); in update_pc() 253 vcpu->arch.pc += 4; in update_pc() 256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); in update_pc() 272 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) in kvm_get_badinstr() argument 275 *out = vcpu->arch.host_cp0_badinstr; in kvm_get_badinstr() [all …]
|
| /linux/arch/arm64/kvm/hyp/include/hyp/ |
| H A D | switch.h | 43 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) in __fpsimd_save_fpexc32() argument 45 if (!vcpu_el1_is_32bit(vcpu)) in __fpsimd_save_fpexc32() 48 __vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2)); in __fpsimd_save_fpexc32() 51 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) in __activate_traps_fpsimd32() argument 62 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) { in __activate_traps_fpsimd32() 68 static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu) in __activate_cptr_traps_nvhe() argument 78 if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) in __activate_cptr_traps_nvhe() 87 static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu) in __activate_cptr_traps_vhe() argument 102 if (vcpu_has_sve(vcpu)) in __activate_cptr_traps_vhe() 106 if (!vcpu_has_nv(vcpu)) in __activate_cptr_traps_vhe() [all …]
|
| /linux/arch/arm64/kvm/ |
| H A D | handle_exit.c | 32 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr) in kvm_handle_guest_serror() argument 35 kvm_inject_serror(vcpu); in kvm_handle_guest_serror() 38 static int handle_hvc(struct kvm_vcpu *vcpu) in handle_hvc() argument 40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), in handle_hvc() 41 kvm_vcpu_hvc_get_imm(vcpu)); in handle_hvc() 42 vcpu->stat.hvc_exit_stat++; in handle_hvc() 45 if (vcpu_has_nv(vcpu)) { in handle_hvc() 46 if (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_HCD) in handle_hvc() 47 kvm_inject_undefined(vcpu); in handle_hvc() 49 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); in handle_hvc() [all …]
|
| /linux/arch/x86/kvm/ |
| H A D | hyperv.h | 64 static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) in to_hv_vcpu() argument 66 return vcpu->arch.hyperv; in to_hv_vcpu() 69 static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) in to_hv_synic() argument 71 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in to_hv_synic() 80 return hv_vcpu->vcpu; in hv_synic_to_vcpu() 83 static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) in to_hv_syndbg() argument 85 return &vcpu->kvm->arch.hyperv.hv_syndbg; in to_hv_syndbg() 88 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) in kvm_hv_get_vpindex() argument 90 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_get_vpindex() 92 return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx; in kvm_hv_get_vpindex() [all …]
|
| H A D | kvm_cache_regs.h | 20 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ 22 return vcpu->arch.regs[VCPU_REGS_##uname]; \ 24 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \ 27 vcpu->arch.regs[VCPU_REGS_##uname] = val; \ 56 #define kvm_assert_register_caching_allowed(vcpu) \ in BUILD_KVM_GPR_ACCESSORS() argument 57 lockdep_assert_once(in_task() || kvm_arch_pmi_in_guest(vcpu)) in BUILD_KVM_GPR_ACCESSORS() 66 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, in BUILD_KVM_GPR_ACCESSORS() 69 kvm_assert_register_caching_allowed(vcpu); in BUILD_KVM_GPR_ACCESSORS() 70 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in BUILD_KVM_GPR_ACCESSORS() 73 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, in kvm_register_is_dirty() argument [all …]
|
| H A D | x86.h | 148 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); 149 int kvm_check_nested_events(struct kvm_vcpu *vcpu); 152 static inline void kvm_leave_nested(struct kvm_vcpu *vcpu) in kvm_leave_nested() argument 154 kvm_x86_ops.nested_ops->leave_nested(vcpu); in kvm_leave_nested() 165 static inline void kvm_nested_vmexit_handle_ibrs(struct kvm_vcpu *vcpu) in kvm_nested_vmexit_handle_ibrs() argument 170 if (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) || in kvm_nested_vmexit_handle_ibrs() 171 guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBRS)) in kvm_nested_vmexit_handle_ibrs() 175 static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu) in kvm_vcpu_has_run() argument 177 return vcpu->arch.last_vmentry_cpu != -1; in kvm_vcpu_has_run() 180 static inline void kvm_set_mp_state(struct kvm_vcpu *vcpu, int mp_state) in kvm_set_mp_state() argument [all …]
|
| /linux/arch/loongarch/kvm/ |
| H A D | vcpu.c | 41 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu) in kvm_save_host_pmu() argument 45 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_save_host_pmu() 56 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu) in kvm_restore_host_pmu() argument 60 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu() 72 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu) in kvm_save_guest_pmu() argument 74 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu() 86 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu) in kvm_restore_guest_pmu() argument 88 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu() 100 static int kvm_own_pmu(struct kvm_vcpu *vcpu) in kvm_own_pmu() argument 104 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu() [all …]
|
| /linux/arch/powerpc/include/asm/ |
| H A D | kvm_ppc.h | 62 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu); 63 extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu); 66 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); 67 extern int kvmppc_handle_load(struct kvm_vcpu *vcpu, 70 extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu, 73 extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, 76 extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, 78 extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, 80 extern int kvmppc_handle_store(struct kvm_vcpu *vcpu, 83 extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, [all …]
|