Home
last modified time | relevance | path

Searched refs:vcpu (Results 1 – 25 of 415) sorted by relevance

12345678910>>...17

/linux/arch/s390/kvm/
H A Dpriv.c32 static int handle_ri(struct kvm_vcpu *vcpu) in handle_ri() argument
34 vcpu->stat.instruction_ri++; in handle_ri()
36 if (test_kvm_facility(vcpu->kvm, 64)) { in handle_ri()
37 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); in handle_ri()
38 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in handle_ri()
39 kvm_s390_retry_instr(vcpu); in handle_ri()
42 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_ri()
45 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) in kvm_s390_handle_aa() argument
47 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) in kvm_s390_handle_aa()
48 return handle_ri(vcpu); in kvm_s390_handle_aa()
[all …]
H A Ddiag.c20 static int diag_release_pages(struct kvm_vcpu *vcpu) in diag_release_pages() argument
23 unsigned long prefix = kvm_s390_get_prefix(vcpu); in diag_release_pages()
25 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages()
26 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; in diag_release_pages()
27 vcpu->stat.instruction_diagnose_10++; in diag_release_pages()
31 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in diag_release_pages()
33 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); in diag_release_pages()
40 gmap_discard(vcpu->arch.gmap, start, end); in diag_release_pages()
48 gmap_discard(vcpu->arch.gmap, start, prefix); in diag_release_pages()
50 gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE); in diag_release_pages()
[all …]
H A Dguestdbg.c59 static void enable_all_hw_bp(struct kvm_vcpu *vcpu) in enable_all_hw_bp() argument
62 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp()
63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp()
64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp()
67 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || in enable_all_hw_bp()
68 vcpu->arch.guestdbg.hw_bp_info == NULL) in enable_all_hw_bp()
79 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { in enable_all_hw_bp()
80 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; in enable_all_hw_bp()
81 len = vcpu->arch.guestdbg.hw_bp_info[i].len; in enable_all_hw_bp()
99 static void enable_all_hw_wp(struct kvm_vcpu *vcpu) in enable_all_hw_wp() argument
[all …]
H A Dkvm-s390.h42 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) argument
44 #define IS_ITDB_VALID(vcpu) \ argument
45 ((*(char *)phys_to_virt((vcpu)->arch.sie_block->itdba) == TDB_FORMAT1))
79 static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_set_cpuflags() argument
81 atomic_or(flags, &vcpu->arch.sie_block->cpuflags); in kvm_s390_set_cpuflags()
84 static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_clear_cpuflags() argument
86 atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags); in kvm_s390_clear_cpuflags()
89 static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_test_cpuflags() argument
91 return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags; in kvm_s390_test_cpuflags()
94 static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) in is_vcpu_stopped() argument
[all …]
/linux/arch/powerpc/kvm/
H A Dbooke.c90 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) in kvmppc_dump_vcpu() argument
94 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, in kvmppc_dump_vcpu()
95 vcpu->arch.shared->msr); in kvmppc_dump_vcpu()
96 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, in kvmppc_dump_vcpu()
97 vcpu->arch.regs.ctr); in kvmppc_dump_vcpu()
98 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, in kvmppc_dump_vcpu()
99 vcpu->arch.shared->srr1); in kvmppc_dump_vcpu()
101 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvmppc_dump_vcpu()
105 kvmppc_get_gpr(vcpu, i), in kvmppc_dump_vcpu()
106 kvmppc_get_gpr(vcpu, i+1), in kvmppc_dump_vcpu()
[all …]
H A Dbook3s_emulate.c70 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) in spr_allowed() argument
73 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) in spr_allowed()
77 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) in spr_allowed()
84 static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) in kvmppc_copyto_vcpu_tm() argument
86 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], in kvmppc_copyto_vcpu_tm()
87 sizeof(vcpu->arch.gpr_tm)); in kvmppc_copyto_vcpu_tm()
88 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, in kvmppc_copyto_vcpu_tm()
90 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, in kvmppc_copyto_vcpu_tm()
92 vcpu->arch.ppr_tm = vcpu->arch.ppr; in kvmppc_copyto_vcpu_tm()
93 vcpu->arch.dscr_tm = vcpu->arch.dscr; in kvmppc_copyto_vcpu_tm()
[all …]
H A Dbooke_emulate.c24 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfi() argument
26 vcpu->arch.regs.nip = vcpu->arch.shared->srr0; in kvmppc_emul_rfi()
27 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi()
30 static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfdi() argument
32 vcpu->arch.regs.nip = vcpu->arch.dsrr0; in kvmppc_emul_rfdi()
33 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi()
36 static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) in kvmppc_emul_rfci() argument
38 vcpu->arch.regs.nip = vcpu->arch.csrr0; in kvmppc_emul_rfci()
39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci()
42 int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, in kvmppc_booke_emulate_op() argument
[all …]
H A Demulate_loadstore.c28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_fp_disabled() argument
30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { in kvmppc_check_fp_disabled()
31 kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_fp_disabled()
40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_vsx_disabled() argument
42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { in kvmppc_check_vsx_disabled()
43 kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_vsx_disabled()
52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_altivec_disabled() argument
54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { in kvmppc_check_altivec_disabled()
55 kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_altivec_disabled()
72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_loadstore() argument
[all …]
H A Dbook3s_hv_tm.c16 static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) in emulate_tx_failure() argument
19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure()
21 tfiar = vcpu->arch.regs.nip & ~0x3ull; in emulate_tx_failure()
23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure()
29 vcpu->arch.tfiar = tfiar; in emulate_tx_failure()
31 vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; in emulate_tx_failure()
42 int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) in kvmhv_p9_tm_emulation() argument
44 u32 instr = vcpu->arch.emul_inst; in kvmhv_p9_tm_emulation()
45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation()
56 vcpu->arch.regs.nip -= 4; in kvmhv_p9_tm_emulation()
[all …]
H A Dbook3s_paired_singles.c150 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) in kvmppc_sync_qpr() argument
152 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); in kvmppc_sync_qpr()
155 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) in kvmppc_inject_pf() argument
158 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_inject_pf()
162 kvmppc_set_msr(vcpu, msr); in kvmppc_inject_pf()
163 kvmppc_set_dar(vcpu, eaddr); in kvmppc_inject_pf()
168 kvmppc_set_dsisr(vcpu, dsisr); in kvmppc_inject_pf()
169 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); in kvmppc_inject_pf()
172 static int kvmppc_emulate_fpr_load(struct kvm_vcpu *vcpu, in kvmppc_emulate_fpr_load() argument
184 r = kvmppc_ld(vcpu, &addr, len, tmp, true); in kvmppc_emulate_fpr_load()
[all …]
H A Dbook3s_pr_papr.c23 static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) in get_pteg_addr() argument
25 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in get_pteg_addr()
36 static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) in kvmppc_h_pr_enter() argument
38 long flags = kvmppc_get_gpr(vcpu, 4); in kvmppc_h_pr_enter()
39 long pte_index = kvmppc_get_gpr(vcpu, 5); in kvmppc_h_pr_enter()
47 pteg_addr = get_pteg_addr(vcpu, pte_index); in kvmppc_h_pr_enter()
49 mutex_lock(&vcpu->kvm->arch.hpt_mutex); in kvmppc_h_pr_enter()
70 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); in kvmppc_h_pr_enter()
71 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); in kvmppc_h_pr_enter()
76 kvmppc_set_gpr(vcpu, 4, pte_index | i); in kvmppc_h_pr_enter()
[all …]
H A Dbook3s_hv_p9_entry.c10 static void load_spr_state(struct kvm_vcpu *vcpu, in load_spr_state() argument
14 mtspr(SPRN_TAR, vcpu->arch.tar); in load_spr_state()
18 current->thread.vrsave != vcpu->arch.vrsave) in load_spr_state()
19 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in load_spr_state()
22 if (vcpu->arch.hfscr & HFSCR_EBB) { in load_spr_state()
23 if (current->thread.ebbhr != vcpu->arch.ebbhr) in load_spr_state()
24 mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); in load_spr_state()
25 if (current->thread.ebbrr != vcpu->arch.ebbrr) in load_spr_state()
26 mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); in load_spr_state()
27 if (current->thread.bescr != vcpu->arch.bescr) in load_spr_state()
[all …]
/linux/arch/arm64/kvm/
H A Ddebug.c40 static void save_guest_debug_regs(struct kvm_vcpu *vcpu) in save_guest_debug_regs() argument
42 u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1); in save_guest_debug_regs()
44 vcpu->arch.guest_debug_preserved.mdscr_el1 = val; in save_guest_debug_regs()
47 vcpu->arch.guest_debug_preserved.mdscr_el1); in save_guest_debug_regs()
49 vcpu->arch.guest_debug_preserved.pstate_ss = in save_guest_debug_regs()
50 (*vcpu_cpsr(vcpu) & DBG_SPSR_SS); in save_guest_debug_regs()
53 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) in restore_guest_debug_regs() argument
55 u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1; in restore_guest_debug_regs()
57 vcpu_write_sys_reg(vcpu, val, MDSCR_EL1); in restore_guest_debug_regs()
60 vcpu_read_sys_reg(vcpu, MDSCR_EL1)); in restore_guest_debug_regs()
[all …]
H A Dinject_fault.c18 static void pend_sync_exception(struct kvm_vcpu *vcpu) in pend_sync_exception() argument
21 if (likely(!vcpu_has_nv(vcpu))) { in pend_sync_exception()
22 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); in pend_sync_exception()
32 switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) { in pend_sync_exception()
35 kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC); in pend_sync_exception()
39 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); in pend_sync_exception()
42 if (vcpu_el2_tge_is_set(vcpu)) in pend_sync_exception()
43 kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC); in pend_sync_exception()
45 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); in pend_sync_exception()
52 static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target) in match_target_el() argument
[all …]
H A Dhandle_exit.c31 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr) in kvm_handle_guest_serror() argument
34 kvm_inject_vabt(vcpu); in kvm_handle_guest_serror()
37 static int handle_hvc(struct kvm_vcpu *vcpu) in handle_hvc() argument
39 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), in handle_hvc()
40 kvm_vcpu_hvc_get_imm(vcpu)); in handle_hvc()
41 vcpu->stat.hvc_exit_stat++; in handle_hvc()
44 if (vcpu_has_nv(vcpu)) { in handle_hvc()
45 if (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_HCD) in handle_hvc()
46 kvm_inject_undefined(vcpu); in handle_hvc()
48 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); in handle_hvc()
[all …]
/linux/arch/riscv/kvm/
H A Dvcpu.c49 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) in kvm_riscv_reset_vcpu() argument
51 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_reset_vcpu()
52 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_riscv_reset_vcpu()
53 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu()
54 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; in kvm_riscv_reset_vcpu()
63 loaded = (vcpu->cpu != -1); in kvm_riscv_reset_vcpu()
65 kvm_arch_vcpu_put(vcpu); in kvm_riscv_reset_vcpu()
67 vcpu->arch.last_exit_cpu = -1; in kvm_riscv_reset_vcpu()
71 spin_lock(&vcpu->arch.reset_cntx_lock); in kvm_riscv_reset_vcpu()
73 spin_unlock(&vcpu->arch.reset_cntx_lock); in kvm_riscv_reset_vcpu()
[all …]
/linux/arch/mips/kvm/
H A Demulate.c40 static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, in kvm_compute_return_epc() argument
45 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc()
56 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); in kvm_compute_return_epc()
243 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) in update_pc() argument
248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, in update_pc()
249 &vcpu->arch.pc); in update_pc()
253 vcpu->arch.pc += 4; in update_pc()
256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); in update_pc()
272 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) in kvm_get_badinstr() argument
275 *out = vcpu->arch.host_cp0_badinstr; in kvm_get_badinstr()
[all …]
H A Dmips.c113 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
115 return !!(vcpu->arch.pending_exceptions); in kvm_arch_vcpu_runnable()
118 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
123 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
261 struct kvm_vcpu *vcpu; in kvm_mips_comparecount_wakeup() local
263 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); in kvm_mips_comparecount_wakeup()
265 kvm_mips_callbacks->queue_timer_int(vcpu); in kvm_mips_comparecount_wakeup()
267 vcpu->arch.wait = 0; in kvm_mips_comparecount_wakeup()
268 rcuwait_wake_up(&vcpu->wait); in kvm_mips_comparecount_wakeup()
270 return kvm_mips_count_timeout(vcpu); in kvm_mips_comparecount_wakeup()
[all …]
/linux/arch/x86/kvm/vmx/
H A Dx86_ops.h22 int vmx_vcpu_create(struct kvm_vcpu *vcpu);
23 int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu);
24 fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
25 void vmx_vcpu_free(struct kvm_vcpu *vcpu);
26 void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
27 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
28 void vmx_vcpu_put(struct kvm_vcpu *vcpu);
29 int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath);
30 void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu);
31 int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu);
[all …]
/linux/arch/arm64/kvm/hyp/vhe/
H A Dswitch.c49 static u64 __compute_hcr(struct kvm_vcpu *vcpu) in __compute_hcr() argument
51 u64 hcr = vcpu->arch.hcr_el2; in __compute_hcr()
53 if (!vcpu_has_nv(vcpu)) in __compute_hcr()
56 if (is_hyp_ctxt(vcpu)) { in __compute_hcr()
59 if (!vcpu_el2_e2h_is_set(vcpu)) in __compute_hcr()
62 write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2); in __compute_hcr()
65 return hcr | (__vcpu_sys_reg(vcpu, HCR_EL2) & ~NV_HCR_GUEST_EXCLUDE); in __compute_hcr()
68 static void __activate_cptr_traps(struct kvm_vcpu *vcpu) in __activate_cptr_traps() argument
84 if (vcpu_has_sve(vcpu)) in __activate_cptr_traps()
87 __activate_traps_fpsimd32(vcpu); in __activate_cptr_traps()
[all …]
/linux/arch/loongarch/kvm/
H A Dvcpu.c35 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu) in kvm_save_host_pmu() argument
39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_save_host_pmu()
50 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu) in kvm_restore_host_pmu() argument
54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu()
66 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu) in kvm_save_guest_pmu() argument
68 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu()
80 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu) in kvm_restore_guest_pmu() argument
82 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu()
94 static int kvm_own_pmu(struct kvm_vcpu *vcpu) in kvm_own_pmu() argument
98 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu()
[all …]
H A Dexit.c24 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) in kvm_emu_cpucfg() argument
34 ++vcpu->stat.cpucfg_exits; in kvm_emu_cpucfg()
35 index = vcpu->arch.gprs[rj]; in kvm_emu_cpucfg()
46 vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; in kvm_emu_cpucfg()
50 vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE; in kvm_emu_cpucfg()
53 ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; in kvm_emu_cpucfg()
54 vcpu->arch.gprs[rd] = ret; in kvm_emu_cpucfg()
57 vcpu->arch.gprs[rd] = 0; in kvm_emu_cpucfg()
65 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid) in kvm_emu_read_csr() argument
68 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_emu_read_csr()
[all …]
/linux/arch/x86/kvm/
H A Dhyperv.h64 static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) in to_hv_vcpu() argument
66 return vcpu->arch.hyperv; in to_hv_vcpu()
69 static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) in to_hv_synic() argument
71 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in to_hv_synic()
80 return hv_vcpu->vcpu; in hv_synic_to_vcpu()
83 static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) in to_hv_syndbg() argument
85 return &vcpu->kvm->arch.hyperv.hv_syndbg; in to_hv_syndbg()
88 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) in kvm_hv_get_vpindex() argument
90 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_get_vpindex()
92 return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx; in kvm_hv_get_vpindex()
[all …]
H A Dx86.c109 ((struct kvm_vcpu *)(ctxt)->vcpu)
131 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
132 static void process_nmi(struct kvm_vcpu *vcpu);
133 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
134 static void store_regs(struct kvm_vcpu *vcpu);
135 static int sync_regs(struct kvm_vcpu *vcpu);
136 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
138 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
139 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
497 typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data,
[all …]
/linux/arch/powerpc/include/asm/
H A Dkvm_ppc.h62 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
63 extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
66 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
67 extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
70 extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
73 extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
76 extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
78 extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
80 extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
83 extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
[all …]

12345678910>>...17