Lines Matching full:vcpu
55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument
57 return kvm_arch_vcpu_runnable(vcpu); in kvm_arch_dy_runnable()
60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument
95 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter()
96 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
101 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter()
104 * Reading vcpu->requests must happen after setting vcpu->mode, in kvmppc_prepare_to_enter()
109 * to the page tables done while the VCPU is running. in kvmppc_prepare_to_enter()
114 if (kvm_request_pending(vcpu)) { in kvmppc_prepare_to_enter()
117 trace_kvm_check_requests(vcpu); in kvmppc_prepare_to_enter()
118 r = kvmppc_core_check_requests(vcpu); in kvmppc_prepare_to_enter()
125 if (kvmppc_core_prepare_to_enter(vcpu)) { in kvmppc_prepare_to_enter()
142 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) in kvmppc_swab_shared() argument
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
162 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) in kvmppc_kvm_pv() argument
164 int nr = kvmppc_get_gpr(vcpu, 11); in kvmppc_kvm_pv()
166 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); in kvmppc_kvm_pv()
167 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); in kvmppc_kvm_pv()
168 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); in kvmppc_kvm_pv()
169 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); in kvmppc_kvm_pv()
172 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { in kvmppc_kvm_pv()
186 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
188 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
189 kvmppc_swab_shared(vcpu); in kvmppc_kvm_pv()
190 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
199 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
200 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvmppc_kvm_pv()
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
211 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
212 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
213 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
214 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
218 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
221 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
240 kvm_vcpu_halt(vcpu); in kvmppc_kvm_pv()
247 kvmppc_set_gpr(vcpu, 4, r2); in kvmppc_kvm_pv()
253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) in kvmppc_sanity_check() argument
258 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
277 vcpu->arch.sane = r; in kvmppc_sanity_check()
282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio() argument
287 er = kvmppc_emulate_loadstore(vcpu); in kvmppc_emulate_mmio()
298 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio()
309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_emulate_mmio()
321 if (vcpu->mmio_is_write) in kvmppc_emulate_mmio()
324 kvmppc_core_queue_data_storage(vcpu, in kvmppc_emulate_mmio()
325 kvmppc_get_msr(vcpu) & SRR1_PREFIXED, in kvmppc_emulate_mmio()
326 vcpu->arch.vaddr_accessed, dsisr); in kvmppc_emulate_mmio()
333 kvmppc_core_queue_program(vcpu, 0); in kvmppc_emulate_mmio()
348 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_st() argument
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
355 vcpu->stat.st++; in kvmppc_st()
357 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
358 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
364 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_st()
375 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_st()
377 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_st()
378 void *magic = vcpu->arch.shared; in kvmppc_st()
384 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) in kvmppc_st()
391 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_ld() argument
394 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
398 vcpu->stat.ld++; in kvmppc_ld()
400 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
401 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
407 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_ld()
421 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_ld()
423 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_ld()
424 void *magic = vcpu->arch.shared; in kvmppc_ld()
430 kvm_vcpu_srcu_read_lock(vcpu); in kvmppc_ld()
431 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); in kvmppc_ld()
432 kvm_vcpu_srcu_read_unlock(vcpu); in kvmppc_ld()
748 struct kvm_vcpu *vcpu; in kvmppc_decrementer_wakeup() local
750 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); in kvmppc_decrementer_wakeup()
751 kvmppc_decrementer_func(vcpu); in kvmppc_decrementer_wakeup()
756 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
760 hrtimer_setup(&vcpu->arch.dec_timer, kvmppc_decrementer_wakeup, CLOCK_REALTIME, in kvm_arch_vcpu_create()
764 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
766 err = kvmppc_subarch_vcpu_init(vcpu); in kvm_arch_vcpu_create()
770 err = kvmppc_core_vcpu_create(vcpu); in kvm_arch_vcpu_create()
774 rcuwait_init(&vcpu->arch.wait); in kvm_arch_vcpu_create()
775 vcpu->arch.waitp = &vcpu->arch.wait; in kvm_arch_vcpu_create()
779 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_create()
783 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
787 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
789 /* Make sure we're not using the vcpu anymore */ in kvm_arch_vcpu_destroy()
790 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
792 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
794 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
798 kvmppc_xive_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
800 kvmppc_xics_free_icp(vcpu); in kvm_arch_vcpu_destroy()
803 kvmppc_xive_native_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
807 kvmppc_core_vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
809 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
812 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
814 return kvmppc_core_pending_dec(vcpu); in kvm_cpu_has_pending_timer()
817 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
827 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
829 kvmppc_core_vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
832 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
834 kvmppc_core_vcpu_put(vcpu); in kvm_arch_vcpu_put()
836 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
908 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword() argument
912 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
913 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
919 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword()
921 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword()
923 kvmppc_set_vsx_fpr(vcpu, index, offset, gpr); in kvmppc_set_vsr_dword()
927 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword_dump() argument
931 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
934 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword_dump()
937 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword_dump()
939 kvmppc_set_vsx_fpr(vcpu, index, 0, gpr); in kvmppc_set_vsr_dword_dump()
940 kvmppc_set_vsx_fpr(vcpu, index, 1, gpr); in kvmppc_set_vsr_dword_dump()
944 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word_dump() argument
948 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
955 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_word_dump()
959 kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]); in kvmppc_set_vsr_word_dump()
960 kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]); in kvmppc_set_vsr_word_dump()
964 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word() argument
968 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
969 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
976 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_word()
978 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_word()
982 val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset); in kvmppc_set_vsr_word()
984 kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]); in kvmppc_set_vsr_word()
990 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_offset_generic() argument
999 if (kvmppc_need_byteswap(vcpu)) in kvmppc_get_vmx_offset_generic()
1007 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_dword_offset() argument
1010 return kvmppc_get_vmx_offset_generic(vcpu, index, 8); in kvmppc_get_vmx_dword_offset()
1013 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_word_offset() argument
1016 return kvmppc_get_vmx_offset_generic(vcpu, index, 4); in kvmppc_get_vmx_word_offset()
1019 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_hword_offset() argument
1022 return kvmppc_get_vmx_offset_generic(vcpu, index, 2); in kvmppc_get_vmx_hword_offset()
1025 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_byte_offset() argument
1028 return kvmppc_get_vmx_offset_generic(vcpu, index, 1); in kvmppc_get_vmx_byte_offset()
1032 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_dword() argument
1036 int offset = kvmppc_get_vmx_dword_offset(vcpu, in kvmppc_set_vmx_dword()
1037 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1038 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1043 kvmppc_get_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_dword()
1045 kvmppc_set_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_dword()
1048 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_word() argument
1052 int offset = kvmppc_get_vmx_word_offset(vcpu, in kvmppc_set_vmx_word()
1053 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1054 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1059 kvmppc_get_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_word()
1061 kvmppc_set_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_word()
1064 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_hword() argument
1068 int offset = kvmppc_get_vmx_hword_offset(vcpu, in kvmppc_set_vmx_hword()
1069 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1070 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1075 kvmppc_get_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_hword()
1077 kvmppc_set_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_hword()
1080 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_byte() argument
1084 int offset = kvmppc_get_vmx_byte_offset(vcpu, in kvmppc_set_vmx_byte()
1085 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1086 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1091 kvmppc_get_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_byte()
1093 kvmppc_set_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_byte()
1127 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) in kvmppc_complete_mmio_load() argument
1129 struct kvm_run *run = vcpu->run; in kvmppc_complete_mmio_load()
1135 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1152 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1155 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1171 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1173 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1176 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1177 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1179 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); in kvmppc_complete_mmio_load()
1183 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1186 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); in kvmppc_complete_mmio_load()
1187 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1192 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1193 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1195 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1196 kvmppc_set_vsr_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1197 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1198 kvmppc_set_vsr_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1199 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1201 kvmppc_set_vsr_dword_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1202 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1204 kvmppc_set_vsr_word_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1209 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1210 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1212 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1213 kvmppc_set_vmx_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1214 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1215 kvmppc_set_vmx_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1216 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1218 kvmppc_set_vmx_hword(vcpu, gpr); in kvmppc_complete_mmio_load()
1219 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1221 kvmppc_set_vmx_byte(vcpu, gpr); in kvmppc_complete_mmio_load()
1226 if (kvmppc_need_byteswap(vcpu)) in kvmppc_complete_mmio_load()
1228 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1237 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, in __kvmppc_handle_load() argument
1241 struct kvm_run *run = vcpu->run; in __kvmppc_handle_load()
1246 if (kvmppc_need_byteswap(vcpu)) { in __kvmppc_handle_load()
1255 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1259 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1260 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1261 vcpu->mmio_needed = 1; in __kvmppc_handle_load()
1262 vcpu->mmio_is_write = 0; in __kvmppc_handle_load()
1263 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1265 idx = srcu_read_lock(&vcpu->kvm->srcu); in __kvmppc_handle_load()
1267 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in __kvmppc_handle_load()
1270 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __kvmppc_handle_load()
1273 kvmppc_complete_mmio_load(vcpu); in __kvmppc_handle_load()
1274 vcpu->mmio_needed = 0; in __kvmppc_handle_load()
1281 int kvmppc_handle_load(struct kvm_vcpu *vcpu, in kvmppc_handle_load() argument
1285 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); in kvmppc_handle_load()
1290 int kvmppc_handle_loads(struct kvm_vcpu *vcpu, in kvmppc_handle_loads() argument
1294 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); in kvmppc_handle_loads()
1298 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_load() argument
1305 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1308 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1309 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vsx_load()
1315 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1317 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1318 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1324 int kvmppc_handle_store(struct kvm_vcpu *vcpu, in kvmppc_handle_store() argument
1327 struct kvm_run *run = vcpu->run; in kvmppc_handle_store()
1333 if (kvmppc_need_byteswap(vcpu)) { in kvmppc_handle_store()
1342 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1345 vcpu->mmio_needed = 1; in kvmppc_handle_store()
1346 vcpu->mmio_is_write = 1; in kvmppc_handle_store()
1348 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1368 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_store()
1370 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_store()
1373 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_store()
1376 vcpu->mmio_needed = 0; in kvmppc_handle_store()
1385 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) in kvmppc_get_vsr_data() argument
1390 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1396 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1404 *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset); in kvmppc_get_vsr_data()
1406 kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); in kvmppc_get_vsr_data()
1413 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1423 reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset); in kvmppc_get_vsr_data()
1426 kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); in kvmppc_get_vsr_data()
1439 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_store() argument
1445 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1448 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1451 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1452 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) in kvmppc_handle_vsx_store()
1455 emulated = kvmppc_handle_store(vcpu, in kvmppc_handle_vsx_store()
1461 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1463 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1464 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1470 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vsx_loadstore() argument
1472 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vsx_loadstore()
1476 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1478 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vsx_loadstore()
1479 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1480 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1482 emulated = kvmppc_handle_vsx_store(vcpu, in kvmppc_emulate_mmio_vsx_loadstore()
1483 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1506 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_load() argument
1511 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_load()
1514 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1515 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vmx_load()
1521 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1522 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1523 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1529 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_dword() argument
1536 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1541 kvmppc_get_vsx_vr(vcpu, index, ®.vval); in kvmppc_get_vmx_dword()
1547 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_word() argument
1554 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1559 kvmppc_get_vsx_vr(vcpu, index, ®.vval); in kvmppc_get_vmx_word()
1565 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_hword() argument
1572 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1577 kvmppc_get_vsx_vr(vcpu, index, ®.vval); in kvmppc_get_vmx_hword()
1583 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_byte() argument
1590 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1595 kvmppc_get_vsx_vr(vcpu, index, ®.vval); in kvmppc_get_vmx_byte()
1601 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_store() argument
1608 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_store()
1611 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1613 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1614 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1616 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1621 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1625 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1629 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1636 emulated = kvmppc_handle_store(vcpu, val, bytes, in kvmppc_handle_vmx_store()
1641 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1642 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1643 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1649 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vmx_loadstore() argument
1651 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vmx_loadstore()
1655 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1657 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vmx_loadstore()
1658 emulated = kvmppc_handle_vmx_load(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1659 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1661 emulated = kvmppc_handle_vmx_store(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1662 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1684 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_get_one_reg() argument
1694 r = kvmppc_get_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_get_one_reg()
1704 kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); in kvm_vcpu_ioctl_get_one_reg()
1711 val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu)); in kvm_vcpu_ioctl_get_one_reg()
1714 val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu)); in kvm_vcpu_ioctl_get_one_reg()
1732 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_set_one_reg() argument
1745 r = kvmppc_set_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_set_one_reg()
1755 kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); in kvm_vcpu_ioctl_set_one_reg()
1762 kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val)); in kvm_vcpu_ioctl_set_one_reg()
1769 kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val)); in kvm_vcpu_ioctl_set_one_reg()
1781 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
1783 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1786 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1788 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1789 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1790 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1791 kvmppc_complete_mmio_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1793 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1794 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1795 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1798 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1799 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1801 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1807 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1808 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1809 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1812 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1813 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1815 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1820 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1825 kvmppc_set_gpr(vcpu, i, gprs[i]); in kvm_arch_vcpu_ioctl_run()
1826 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1827 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1830 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); in kvm_arch_vcpu_ioctl_run()
1832 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); in kvm_arch_vcpu_ioctl_run()
1833 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1835 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1836 kvmppc_set_epr(vcpu, run->epr.epr); in kvm_arch_vcpu_ioctl_run()
1837 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1841 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
1843 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
1846 r = kvmppc_vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
1848 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1861 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
1865 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) in kvm_vcpu_ioctl_interrupt() argument
1868 kvmppc_core_dequeue_external(vcpu); in kvm_vcpu_ioctl_interrupt()
1872 kvmppc_core_queue_external(vcpu, irq); in kvm_vcpu_ioctl_interrupt()
1874 kvm_vcpu_kick(vcpu); in kvm_vcpu_ioctl_interrupt()
1879 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
1890 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1894 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1899 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1901 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1906 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1918 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); in kvm_vcpu_ioctl_enable_cap()
1934 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1952 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1954 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1975 r = kvmppc_xive_native_connect_vcpu(dev, vcpu, in kvm_vcpu_ioctl_enable_cap()
1983 if (!is_kvmppc_hv_enabled(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
1986 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1995 r = kvmppc_sanity_check(vcpu); in kvm_vcpu_ioctl_enable_cap()
2013 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
2019 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
2028 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_unlocked_ioctl() local
2035 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_unlocked_ioctl()
2043 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
2054 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2055 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
2056 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2068 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
2070 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
2080 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2081 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); in kvm_arch_vcpu_ioctl()
2082 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2094 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
2515 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) in kvm_arch_create_vcpu_debugfs() argument
2517 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) in kvm_arch_create_vcpu_debugfs()
2518 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); in kvm_arch_create_vcpu_debugfs()