Lines Matching full:arch

52 	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);  in kvm_arch_vcpu_runnable()
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
186 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
188 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
190 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
199 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
211 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
212 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
213 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
214 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
218 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
221 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
258 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
277 vcpu->arch.sane = r; in kvmppc_sanity_check()
326 vcpu->arch.vaddr_accessed, dsisr); in kvmppc_emulate_mmio()
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
357 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
358 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
378 void *magic = vcpu->arch.shared; in kvmppc_st()
394 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
400 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
401 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
424 void *magic = vcpu->arch.shared; in kvmppc_ld()
469 kvm->arch.kvm_ops = kvm_ops; in kvm_arch_init_vm()
499 module_put(kvm->arch.kvm_ops->owner); in kvm_arch_destroy_vm()
591 if (kvm->arch.emul_smt_mode > 1) in kvm_vm_ioctl_check_extension()
592 r = kvm->arch.emul_smt_mode; in kvm_vm_ioctl_check_extension()
594 r = kvm->arch.smt_mode; in kvm_vm_ioctl_check_extension()
756 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); in kvmppc_decrementer_wakeup()
766 hrtimer_setup(&vcpu->arch.dec_timer, kvmppc_decrementer_wakeup, CLOCK_REALTIME, in kvm_arch_vcpu_create()
770 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
780 rcuwait_init(&vcpu->arch.wait); in kvm_arch_vcpu_create()
781 vcpu->arch.waitp = &vcpu->arch.wait; in kvm_arch_vcpu_create()
796 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
798 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
800 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
833 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
842 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
865 if (kvm->arch.kvm_ops->irq_bypass_add_producer) in kvm_arch_irq_bypass_add_producer()
866 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); in kvm_arch_irq_bypass_add_producer()
878 if (kvm->arch.kvm_ops->irq_bypass_del_producer) in kvm_arch_irq_bypass_del_producer()
879 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); in kvm_arch_irq_bypass_del_producer()
918 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
919 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
937 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
954 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
974 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
975 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
1043 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1044 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1059 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1060 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1075 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1076 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1091 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1092 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1141 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1158 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1161 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1177 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1179 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1182 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1183 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1185 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); in kvmppc_complete_mmio_load()
1189 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1192 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); in kvmppc_complete_mmio_load()
1193 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1198 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1199 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1201 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1203 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1205 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1208 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1215 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1216 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1218 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1220 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1222 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1225 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1234 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1261 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1265 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1266 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1269 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1311 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1314 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1321 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1323 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1324 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1348 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1354 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1396 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1402 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1419 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1451 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1454 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1457 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1467 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1469 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1470 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1482 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1485 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1486 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1489 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1517 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_load()
1520 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1527 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1528 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1529 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1542 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1560 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1578 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1596 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1614 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_store()
1617 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1619 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1620 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1647 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1648 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1649 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1661 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1665 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1668 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1799 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1800 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1801 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1804 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1813 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1814 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1815 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1818 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1826 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1832 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1833 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1839 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1841 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1843 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1896 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1900 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1905 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1907 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1912 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1992 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
2009 if (kvm->arch.mpic) in kvm_arch_intc_initialized()
2013 if (kvm->arch.xics || kvm->arch.xive) in kvm_arch_intc_initialized()
2145 ret = ret || (kvm->arch.mpic != NULL); in kvm_arch_irqchip_in_kernel()
2148 ret = ret || (kvm->arch.xics != NULL); in kvm_arch_irqchip_in_kernel()
2149 ret = ret || (kvm->arch.xive != NULL); in kvm_arch_irqchip_in_kernel()
2188 set_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2190 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2199 if (kvm->arch.kvm_ops->set_smt_mode) in kvm_vm_ioctl_enable_cap()
2200 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); in kvm_vm_ioctl_enable_cap()
2207 !kvm->arch.kvm_ops->enable_nested) in kvm_vm_ioctl_enable_cap()
2209 r = kvm->arch.kvm_ops->enable_nested(kvm); in kvm_vm_ioctl_enable_cap()
2215 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) in kvm_vm_ioctl_enable_cap()
2217 r = kvm->arch.kvm_ops->enable_svm(kvm); in kvm_vm_ioctl_enable_cap()
2221 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1) in kvm_vm_ioctl_enable_cap()
2223 r = kvm->arch.kvm_ops->enable_dawr1(kvm); in kvm_vm_ioctl_enable_cap()
2418 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2434 if (!kvm->arch.kvm_ops->configure_mmu) in kvm_arch_vm_ioctl()
2439 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); in kvm_arch_vm_ioctl()
2447 if (!kvm->arch.kvm_ops->get_rmmu_info) in kvm_arch_vm_ioctl()
2449 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2466 if (!kvm->arch.kvm_ops->svm_off) in kvm_arch_vm_ioctl()
2469 r = kvm->arch.kvm_ops->svm_off(kvm); in kvm_arch_vm_ioctl()
2474 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); in kvm_arch_vm_ioctl()
2523 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) in kvm_arch_create_vcpu_debugfs()
2524 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); in kvm_arch_create_vcpu_debugfs()
2529 if (kvm->arch.kvm_ops->create_vm_debugfs) in kvm_arch_create_vm_debugfs()
2530 kvm->arch.kvm_ops->create_vm_debugfs(kvm); in kvm_arch_create_vm_debugfs()