Lines Matching +full:fails +full:- +full:without +full:- +full:test +full:- +full:cd
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
16 * Ben-Ami Yassour <benami@il.ibm.com>
48 #include <linux/user-return-notifier.h>
107 ((struct kvm_vcpu *)(ctxt)->vcpu)
110 * - enable syscall per default because its emulated by KVM
111 * - enable LME and LMA per default on 64 bit KVM
145 *(((struct kvm_x86_ops *)0)->func));
148 #include <asm/kvm-x86-ops.h>
165 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
174 * Flags to manipulate forced emulation behavior (any non-zero value will
181 int __read_mostly pi_inject_timer = -1;
323 * MSRs that KVM emulates without strictly requiring host support.
461 * List of MSRs that control the existence of MSR-based features, i.e. MSRs
474 (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
566 size - useroffset, NULL); in kvm_alloc_emulator_cache()
575 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
591 if (msrs->registered) { in kvm_on_user_return()
592 msrs->registered = false; in kvm_on_user_return()
597 values = &msrs->values[slot]; in kvm_on_user_return()
598 if (values->host != values->curr) { in kvm_on_user_return()
599 wrmsrq(kvm_uret_msrs_list[slot], values->host); in kvm_on_user_return()
600 values->curr = values->host; in kvm_on_user_return()
625 return -1; in kvm_add_user_return_msr()
640 return -1; in kvm_find_user_return_msr()
652 msrs->values[i].host = value; in kvm_user_return_msr_cpu_online()
653 msrs->values[i].curr = value; in kvm_user_return_msr_cpu_online()
659 if (!msrs->registered) { in kvm_user_return_register_notifier()
660 msrs->urn.on_user_return = kvm_on_user_return; in kvm_user_return_register_notifier()
661 user_return_notifier_register(&msrs->urn); in kvm_user_return_register_notifier()
662 msrs->registered = true; in kvm_user_return_register_notifier()
671 value = (value & mask) | (msrs->values[slot].host & ~mask); in kvm_set_user_return_msr()
672 if (value == msrs->values[slot].curr) in kvm_set_user_return_msr()
678 msrs->values[slot].curr = value; in kvm_set_user_return_msr()
688 msrs->values[slot].curr = value; in kvm_user_return_msr_update_cache()
695 return this_cpu_ptr(user_return_msrs)->values[slot].curr; in kvm_get_user_return_msr()
703 if (msrs->registered) in drop_user_return_notifiers()
704 kvm_on_user_return(&msrs->urn); in drop_user_return_notifiers()
758 * #DBs can be trap-like or fault-like, the caller must check other CPU in exception_type()
777 if (!ex->has_payload) in kvm_deliver_exception_payload()
780 switch (ex->vector) { in kvm_deliver_exception_payload()
783 * "Certain debug exceptions may clear bit 0-3. The in kvm_deliver_exception_payload()
787 vcpu->arch.dr6 &= ~DR_TRAP_BITS; in kvm_deliver_exception_payload()
796 * Active low bits should be cleared if 1-setting in payload. in kvm_deliver_exception_payload()
797 * Active high bits should be set if 1-setting in payload. in kvm_deliver_exception_payload()
804 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
805 vcpu->arch.dr6 |= ex->payload; in kvm_deliver_exception_payload()
806 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
814 vcpu->arch.dr6 &= ~BIT(12); in kvm_deliver_exception_payload()
817 vcpu->arch.cr2 = ex->payload; in kvm_deliver_exception_payload()
821 ex->has_payload = false; in kvm_deliver_exception_payload()
822 ex->payload = 0; in kvm_deliver_exception_payload()
830 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; in kvm_queue_exception_vmexit()
832 ex->vector = vector; in kvm_queue_exception_vmexit()
833 ex->injected = false; in kvm_queue_exception_vmexit()
834 ex->pending = true; in kvm_queue_exception_vmexit()
835 ex->has_error_code = has_error_code; in kvm_queue_exception_vmexit()
836 ex->error_code = error_code; in kvm_queue_exception_vmexit()
837 ex->has_payload = has_payload; in kvm_queue_exception_vmexit()
838 ex->payload = payload; in kvm_queue_exception_vmexit()
851 * If the exception is destined for L2, morph it to a VM-Exit if L1 in kvm_multiple_exception()
855 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) { in kvm_multiple_exception()
861 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
863 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
864 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
866 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
867 vcpu->arch.exception.vector = nr; in kvm_multiple_exception()
868 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
869 vcpu->arch.exception.has_payload = has_payload; in kvm_multiple_exception()
870 vcpu->arch.exception.payload = payload; in kvm_multiple_exception()
873 &vcpu->arch.exception); in kvm_multiple_exception()
878 prev_nr = vcpu->arch.exception.vector; in kvm_multiple_exception()
880 /* triple fault -> shutdown */ in kvm_multiple_exception()
892 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
893 vcpu->arch.exception.pending = false; in kvm_multiple_exception()
898 that instruction re-execution will regenerate lost in kvm_multiple_exception()
929 * On VM-Entry, an exception can be pending if and only if event in kvm_requeue_exception()
939 * re-checking is incorrect if _L1_ injected the exception, in which in kvm_requeue_exception()
944 vcpu->arch.exception.injected = true; in kvm_requeue_exception()
945 vcpu->arch.exception.has_error_code = has_error_code; in kvm_requeue_exception()
946 vcpu->arch.exception.vector = nr; in kvm_requeue_exception()
947 vcpu->arch.exception.error_code = error_code; in kvm_requeue_exception()
948 vcpu->arch.exception.has_payload = false; in kvm_requeue_exception()
949 vcpu->arch.exception.payload = 0; in kvm_requeue_exception()
977 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
980 * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of in kvm_inject_page_fault()
983 if (is_guest_mode(vcpu) && fault->async_page_fault) in kvm_inject_page_fault()
985 true, fault->error_code, in kvm_inject_page_fault()
986 true, fault->address); in kvm_inject_page_fault()
988 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, in kvm_inject_page_fault()
989 fault->address); in kvm_inject_page_fault()
996 WARN_ON_ONCE(fault->vector != PF_VECTOR); in kvm_inject_emulated_page_fault()
998 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : in kvm_inject_emulated_page_fault()
999 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()
1005 if ((fault->error_code & PFERR_PRESENT_MASK) && in kvm_inject_emulated_page_fault()
1006 !(fault->error_code & PFERR_RSVD_MASK)) in kvm_inject_emulated_page_fault()
1007 kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address, in kvm_inject_emulated_page_fault()
1010 fault_mmu->inject_page_fault(vcpu, fault); in kvm_inject_emulated_page_fault()
1016 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
1050 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); in pdptr_rsvd_bits()
1058 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in load_pdptrs()
1063 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; in load_pdptrs()
1091 if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs))) in load_pdptrs()
1092 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT); in load_pdptrs()
1094 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); in load_pdptrs()
1097 vcpu->arch.pdptrs_from_userspace = false; in load_pdptrs()
1122 * CR0.WP is incorporated into the MMU role, but only for non-nested, in kvm_post_set_cr0()
1169 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0()
1180 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0()
1208 if (vcpu->arch.guest_state_protected) in kvm_load_guest_xsave_state()
1213 if (vcpu->arch.xcr0 != kvm_host.xcr0) in kvm_load_guest_xsave_state()
1214 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xsave_state()
1217 vcpu->arch.ia32_xss != kvm_host.xss) in kvm_load_guest_xsave_state()
1218 wrmsrq(MSR_IA32_XSS, vcpu->arch.ia32_xss); in kvm_load_guest_xsave_state()
1222 vcpu->arch.pkru != vcpu->arch.host_pkru && in kvm_load_guest_xsave_state()
1223 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || in kvm_load_guest_xsave_state()
1225 wrpkru(vcpu->arch.pkru); in kvm_load_guest_xsave_state()
1231 if (vcpu->arch.guest_state_protected) in kvm_load_host_xsave_state()
1235 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || in kvm_load_host_xsave_state()
1237 vcpu->arch.pkru = rdpkru(); in kvm_load_host_xsave_state()
1238 if (vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_host_xsave_state()
1239 wrpkru(vcpu->arch.host_pkru); in kvm_load_host_xsave_state()
1244 if (vcpu->arch.xcr0 != kvm_host.xcr0) in kvm_load_host_xsave_state()
1248 vcpu->arch.ia32_xss != kvm_host.xss) in kvm_load_host_xsave_state()
1258 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC; in kvm_guest_supported_xfd()
1265 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
1281 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
1300 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
1303 vcpu->arch.cpuid_dynamic_bits_dirty = true; in __kvm_set_xcr()
1333 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB in kvm_post_set_cr4()
1347 * - CR4.PCIDE is changed from 1 to 0 in kvm_post_set_cr4()
1348 * - CR4.PGE is toggled in kvm_post_set_cr4()
1359 * - CR4.SMEP is changed from 0 to 1 in kvm_post_set_cr4()
1360 * - CR4.PAE is toggled in kvm_post_set_cr4()
1405 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_invalidate_pcid()
1440 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) in kvm_invalidate_pcid()
1443 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); in kvm_invalidate_pcid()
1476 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
1485 * and it's impossible to use a non-zero PCID when PCID is disabled, in kvm_set_cr3()
1502 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
1512 return vcpu->arch.cr8; in kvm_get_cr8()
1520 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
1522 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
1530 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
1531 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
1533 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
1535 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1537 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1555 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_set_dr()
1559 vcpu->arch.db[array_index_nospec(dr, size)] = val; in kvm_set_dr()
1560 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in kvm_set_dr()
1561 vcpu->arch.eff_db[dr] = val; in kvm_set_dr()
1567 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in kvm_set_dr()
1573 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in kvm_set_dr()
1584 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1588 return vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1591 return vcpu->arch.dr6; in kvm_get_dr()
1594 return vcpu->arch.dr7; in kvm_get_dr()
1618 * 10 - MISC_PACKAGE_CTRLS
1619 * 11 - ENERGY_FILTERING_CTL
1620 * 12 - DOITM
1621 * 18 - FB_CLEAR_CTRL
1622 * 21 - XAPIC_DISABLE_STATUS
1623 * 23 - OVERCLOCKING_STATUS
1756 u64 old_efer = vcpu->arch.efer; in set_efer()
1757 u64 efer = msr_info->data; in set_efer()
1763 if (!msr_info->host_initiated) { in set_efer()
1768 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1773 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1801 struct kvm *kvm = vcpu->kvm; in kvm_msr_allowed()
1810 idx = srcu_read_lock(&kvm->srcu); in kvm_msr_allowed()
1812 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); in kvm_msr_allowed()
1818 allowed = msr_filter->default_allow; in kvm_msr_allowed()
1819 ranges = msr_filter->ranges; in kvm_msr_allowed()
1821 for (i = 0; i < msr_filter->count; i++) { in kvm_msr_allowed()
1828 allowed = test_bit(index - start, bitmap); in kvm_msr_allowed()
1834 srcu_read_unlock(&kvm->srcu, idx); in kvm_msr_allowed()
1843 * Returns 0 on success, non-0 otherwise.
1864 * non-canonical address is written on Intel but not on in __kvm_set_msr()
1865 * AMD (which ignores the top 32-bits, because it does in __kvm_set_msr()
1866 * not implement 64-bit SYSENTER). in __kvm_set_msr()
1868 * 64-bit code should hence be able to write a non-canonical in __kvm_set_msr()
1870 * vmentry does not fail on Intel after writing a non-canonical in __kvm_set_msr()
1872 * invokes 64-bit SYSENTER. in __kvm_set_msr()
1891 * clear the bits. This ensures cross-vendor migration will in __kvm_set_msr()
1933 /* All SSP MSRs except MSR_IA32_INT_SSP_TAB must be 4-byte aligned */ in __kvm_set_msr()
1962 * Returns 0 on success, non-0 otherwise.
2056 if (!vcpu->run->msr.error) { in complete_userspace_rdmsr()
2057 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); in complete_userspace_rdmsr()
2058 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); in complete_userspace_rdmsr()
2064 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error); in complete_emulated_msr_access()
2075 return kvm_x86_call(complete_emulated_msr)(vcpu, vcpu->run->msr.error); in complete_fast_msr_access()
2086 if (!vcpu->run->msr.error) in complete_fast_rdmsr_imm()
2087 kvm_register_write(vcpu, vcpu->arch.cui_rdmsr_imm_reg, in complete_fast_rdmsr_imm()
2088 vcpu->run->msr.data); in complete_fast_rdmsr_imm()
2113 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) in kvm_msr_user_space()
2116 vcpu->run->exit_reason = exit_reason; in kvm_msr_user_space()
2117 vcpu->run->msr.error = 0; in kvm_msr_user_space()
2118 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); in kvm_msr_user_space()
2119 vcpu->run->msr.reason = msr_reason; in kvm_msr_user_space()
2120 vcpu->run->msr.index = index; in kvm_msr_user_space()
2121 vcpu->run->msr.data = data; in kvm_msr_user_space()
2122 vcpu->arch.complete_userspace_io = completion; in kvm_msr_user_space()
2139 kvm_rax_write(vcpu, data & -1u); in __kvm_emulate_rdmsr()
2140 kvm_rdx_write(vcpu, (data >> 32) & -1u); in __kvm_emulate_rdmsr()
2157 return __kvm_emulate_rdmsr(vcpu, kvm_rcx_read(vcpu), -1, in kvm_emulate_rdmsr()
2164 vcpu->arch.cui_rdmsr_imm_reg = reg; in kvm_emulate_rdmsr_imm()
2237 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS)) in kvm_emulate_monitor_mwait()
2240 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) in kvm_emulate_monitor_mwait()
2243 enabled = vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT; in kvm_emulate_monitor_mwait()
2268 return READ_ONCE(vcpu->mode) == EXITING_GUEST_MODE || in kvm_vcpu_exit_request()
2276 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic) || in __handle_fastpath_wrmsr()
2277 kvm_x2apic_icr_write_fast(vcpu->arch.apic, data)) in __handle_fastpath_wrmsr()
2329 return -EINVAL; in do_set_msr()
2361 write_seqcount_begin(&vdata->seq); in update_pvclock_gtod()
2364 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; in update_pvclock_gtod()
2365 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; in update_pvclock_gtod()
2366 vdata->clock.mask = tk->tkr_mono.mask; in update_pvclock_gtod()
2367 vdata->clock.mult = tk->tkr_mono.mult; in update_pvclock_gtod()
2368 vdata->clock.shift = tk->tkr_mono.shift; in update_pvclock_gtod()
2369 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; in update_pvclock_gtod()
2370 vdata->clock.offset = tk->tkr_mono.base; in update_pvclock_gtod()
2372 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; in update_pvclock_gtod()
2373 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; in update_pvclock_gtod()
2374 vdata->raw_clock.mask = tk->tkr_raw.mask; in update_pvclock_gtod()
2375 vdata->raw_clock.mult = tk->tkr_raw.mult; in update_pvclock_gtod()
2376 vdata->raw_clock.shift = tk->tkr_raw.shift; in update_pvclock_gtod()
2377 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; in update_pvclock_gtod()
2378 vdata->raw_clock.offset = tk->tkr_raw.base; in update_pvclock_gtod()
2380 vdata->wall_time_sec = tk->xtime_sec; in update_pvclock_gtod()
2382 vdata->offs_boot = tk->offs_boot; in update_pvclock_gtod()
2384 write_seqcount_end(&vdata->seq); in update_pvclock_gtod()
2444 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time()
2446 if (vcpu->vcpu_id == 0 && !host_initiated) { in kvm_write_system_time()
2447 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) in kvm_write_system_time()
2450 ka->boot_vcpu_runs_old_kvmclock = old_msr; in kvm_write_system_time()
2453 vcpu->arch.time = system_time; in kvm_write_system_time()
2458 kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL, in kvm_write_system_time()
2461 kvm_gpc_deactivate(&vcpu->arch.pv_time); in kvm_write_system_time()
2484 shift--; in kvm_get_time_scale()
2529 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
2530 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
2534 return -1; in set_tsc_khz()
2538 /* TSC scaling required - calculate ratio */ in set_tsc_khz()
2543 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", in set_tsc_khz()
2545 return -1; in set_tsc_khz()
2557 /* tsc_khz can be zero if TSC calibration fails */ in kvm_set_tsc_khz()
2561 return -1; in kvm_set_tsc_khz()
2566 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
2567 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
2568 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
2576 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); in kvm_set_tsc_khz()
2588 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
2589 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
2590 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
2591 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
2605 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
2613 bool use_master_clock = (ka->nr_vcpus_matched_tsc + 1 == in kvm_track_tsc_matching()
2614 atomic_read(&vcpu->kvm->online_vcpus)) && in kvm_track_tsc_matching()
2615 gtod_is_based_on_tsc(gtod->clock.vclock_mode); in kvm_track_tsc_matching()
2623 if ((ka->use_master_clock && new_generation) || in kvm_track_tsc_matching()
2624 (ka->use_master_clock != use_master_clock)) in kvm_track_tsc_matching()
2627 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
2628 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
2629 ka->use_master_clock, gtod->clock.vclock_mode); in kvm_track_tsc_matching()
2636 * The most significant 64-N bits (mult) of ratio represent the
2639 * point number (mult + frac * 2^(-N)).
2662 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); in kvm_compute_l1_tsc_offset()
2664 return target_tsc - tsc; in kvm_compute_l1_tsc_offset()
2669 return vcpu->arch.l1_tsc_offset + in kvm_read_l1_tsc()
2670 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); in kvm_read_l1_tsc()
2701 if (vcpu->arch.guest_tsc_protected) in kvm_vcpu_write_tsc_offset()
2704 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in kvm_vcpu_write_tsc_offset()
2705 vcpu->arch.l1_tsc_offset, in kvm_vcpu_write_tsc_offset()
2708 vcpu->arch.l1_tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2716 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( in kvm_vcpu_write_tsc_offset()
2721 vcpu->arch.tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2728 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2732 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( in kvm_vcpu_write_tsc_multiplier()
2736 vcpu->arch.tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2746 * TSC is marked unstable when we're running on Hyper-V, in kvm_check_tsc_unstable()
2763 struct kvm *kvm = vcpu->kvm; in __kvm_synchronize_tsc()
2765 lockdep_assert_held(&kvm->arch.tsc_write_lock); in __kvm_synchronize_tsc()
2767 if (vcpu->arch.guest_tsc_protected) in __kvm_synchronize_tsc()
2771 vcpu->kvm->arch.user_set_tsc = true; in __kvm_synchronize_tsc()
2777 kvm->arch.last_tsc_nsec = ns; in __kvm_synchronize_tsc()
2778 kvm->arch.last_tsc_write = tsc; in __kvm_synchronize_tsc()
2779 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in __kvm_synchronize_tsc()
2780 kvm->arch.last_tsc_offset = offset; in __kvm_synchronize_tsc()
2782 vcpu->arch.last_guest_tsc = tsc; in __kvm_synchronize_tsc()
2794 * These values are tracked in kvm->arch.cur_xxx variables. in __kvm_synchronize_tsc()
2796 kvm->arch.cur_tsc_generation++; in __kvm_synchronize_tsc()
2797 kvm->arch.cur_tsc_nsec = ns; in __kvm_synchronize_tsc()
2798 kvm->arch.cur_tsc_write = tsc; in __kvm_synchronize_tsc()
2799 kvm->arch.cur_tsc_offset = offset; in __kvm_synchronize_tsc()
2800 kvm->arch.nr_vcpus_matched_tsc = 0; in __kvm_synchronize_tsc()
2801 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { in __kvm_synchronize_tsc()
2802 kvm->arch.nr_vcpus_matched_tsc++; in __kvm_synchronize_tsc()
2806 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in __kvm_synchronize_tsc()
2807 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in __kvm_synchronize_tsc()
2808 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in __kvm_synchronize_tsc()
2816 struct kvm *kvm = vcpu->kvm; in kvm_synchronize_tsc()
2822 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2825 elapsed = ns - kvm->arch.last_tsc_nsec; in kvm_synchronize_tsc()
2827 if (vcpu->arch.virtual_tsc_khz) { in kvm_synchronize_tsc()
2834 } else if (kvm->arch.user_set_tsc) { in kvm_synchronize_tsc()
2835 u64 tsc_exp = kvm->arch.last_tsc_write + in kvm_synchronize_tsc()
2837 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_synchronize_tsc()
2839 * Here lies UAPI baggage: when a user-initiated TSC write has in kvm_synchronize_tsc()
2850 * come from the kernel's default vCPU creation. Make the 1-second in kvm_synchronize_tsc()
2866 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_synchronize_tsc()
2868 offset = kvm->arch.cur_tsc_offset; in kvm_synchronize_tsc()
2878 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2884 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest()
2890 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio) in adjust_tsc_offset_host()
2893 vcpu->arch.l1_tsc_scaling_ratio); in adjust_tsc_offset_host()
2925 switch (clock->vclock_mode) { in vgettsc()
2931 v = (tsc_pg_val - clock->cycle_last) & in vgettsc()
2932 clock->mask; in vgettsc()
2941 v = (*tsc_timestamp - clock->cycle_last) & in vgettsc()
2942 clock->mask; in vgettsc()
2951 return v * clock->mult; in vgettsc()
2956 * frequency of CLOCK_MONOTONIC_RAW (hence adding gtos->offs_boot).
2966 seq = read_seqcount_begin(>od->seq); in do_kvmclock_base()
2967 ns = gtod->raw_clock.base_cycles; in do_kvmclock_base()
2968 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); in do_kvmclock_base()
2969 ns >>= gtod->raw_clock.shift; in do_kvmclock_base()
2970 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); in do_kvmclock_base()
2971 } while (unlikely(read_seqcount_retry(>od->seq, seq))); in do_kvmclock_base()
2989 seq = read_seqcount_begin(>od->seq); in do_monotonic()
2990 ns = gtod->clock.base_cycles; in do_monotonic()
2991 ns += vgettsc(>od->clock, tsc_timestamp, &mode); in do_monotonic()
2992 ns >>= gtod->clock.shift; in do_monotonic()
2993 ns += ktime_to_ns(gtod->clock.offset); in do_monotonic()
2994 } while (unlikely(read_seqcount_retry(>od->seq, seq))); in do_monotonic()
3008 seq = read_seqcount_begin(>od->seq); in do_realtime()
3009 ts->tv_sec = gtod->wall_time_sec; in do_realtime()
3010 ns = gtod->clock.base_cycles; in do_realtime()
3011 ns += vgettsc(>od->clock, tsc_timestamp, &mode); in do_realtime()
3012 ns >>= gtod->clock.shift; in do_realtime()
3013 } while (unlikely(read_seqcount_retry(>od->seq, seq))); in do_realtime()
3015 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); in do_realtime()
3016 ts->tv_nsec = ns; in do_realtime()
3084 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
3085 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
3086 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
3090 * - ret0 < ret1
3091 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
3093 * - 0 < N - M => M < N
3112 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy()
3116 lockdep_assert_held(&kvm->arch.tsc_write_lock); in pvclock_update_vm_gtod_copy()
3117 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in pvclock_update_vm_gtod_copy()
3118 atomic_read(&kvm->online_vcpus)); in pvclock_update_vm_gtod_copy()
3125 &ka->master_kernel_ns, in pvclock_update_vm_gtod_copy()
3126 &ka->master_cycle_now); in pvclock_update_vm_gtod_copy()
3128 ka->use_master_clock = host_tsc_clocksource && vcpus_matched in pvclock_update_vm_gtod_copy()
3129 && !ka->backwards_tsc_observed in pvclock_update_vm_gtod_copy()
3130 && !ka->boot_vcpu_runs_old_kvmclock; in pvclock_update_vm_gtod_copy()
3132 if (ka->use_master_clock) in pvclock_update_vm_gtod_copy()
3136 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, in pvclock_update_vm_gtod_copy()
3148 raw_spin_lock_irq(&kvm->arch.tsc_write_lock); in __kvm_start_pvclock_update()
3149 write_seqcount_begin(&kvm->arch.pvclock_sc); in __kvm_start_pvclock_update()
3162 struct kvm_arch *ka = &kvm->arch; in kvm_end_pvclock_update()
3166 write_seqcount_end(&ka->pvclock_sc); in kvm_end_pvclock_update()
3167 raw_spin_unlock_irq(&ka->tsc_write_lock); in kvm_end_pvclock_update()
3186 * per-CPU value (which may be zero if a CPU is going offline). Note, tsc_khz
3200 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */
3203 struct kvm_arch *ka = &kvm->arch; in __get_kvmclock()
3209 data->flags = 0; in __get_kvmclock()
3210 if (ka->use_master_clock && in __get_kvmclock()
3215 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) { in __get_kvmclock()
3216 data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec; in __get_kvmclock()
3217 data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC; in __get_kvmclock()
3220 data->host_tsc = rdtsc(); in __get_kvmclock()
3222 data->flags |= KVM_CLOCK_TSC_STABLE; in __get_kvmclock()
3223 hv_clock.tsc_timestamp = ka->master_cycle_now; in __get_kvmclock()
3224 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; in __get_kvmclock()
3228 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc); in __get_kvmclock()
3230 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; in __get_kvmclock()
3238 struct kvm_arch *ka = &kvm->arch; in get_kvmclock()
3242 seq = read_seqcount_begin(&ka->pvclock_sc); in get_kvmclock()
3244 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); in get_kvmclock()
3266 read_lock_irqsave(&gpc->lock, flags); in kvm_setup_guest_pvclock()
3268 read_unlock_irqrestore(&gpc->lock, flags); in kvm_setup_guest_pvclock()
3273 read_lock_irqsave(&gpc->lock, flags); in kvm_setup_guest_pvclock()
3276 guest_hv_clock = (void *)(gpc->khva + offset); in kvm_setup_guest_pvclock()
3285 guest_hv_clock->version = hv_clock.version = (guest_hv_clock->version + 1) | 1; in kvm_setup_guest_pvclock()
3289 hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); in kvm_setup_guest_pvclock()
3295 guest_hv_clock->version = ++hv_clock.version; in kvm_setup_guest_pvclock()
3298 read_unlock_irqrestore(&gpc->lock, flags); in kvm_setup_guest_pvclock()
3300 trace_kvm_pvclock_update(vcpu->vcpu_id, &hv_clock); in kvm_setup_guest_pvclock()
3308 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update()
3309 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update()
3322 seq = read_seqcount_begin(&ka->pvclock_sc); in kvm_guest_time_update()
3323 use_master_clock = ka->use_master_clock; in kvm_guest_time_update()
3325 host_tsc = ka->master_cycle_now; in kvm_guest_time_update()
3326 kernel_ns = ka->master_kernel_ns; in kvm_guest_time_update()
3328 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); in kvm_guest_time_update()
3355 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
3358 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); in kvm_guest_time_update()
3369 v->arch.l1_tsc_scaling_ratio); in kvm_guest_time_update()
3373 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { in kvm_guest_time_update()
3375 &vcpu->pvclock_tsc_shift, in kvm_guest_time_update()
3376 &vcpu->pvclock_tsc_mul); in kvm_guest_time_update()
3377 vcpu->hw_tsc_khz = tgt_tsc_khz; in kvm_guest_time_update()
3380 hv_clock.tsc_shift = vcpu->pvclock_tsc_shift; in kvm_guest_time_update()
3381 hv_clock.tsc_to_system_mul = vcpu->pvclock_tsc_mul; in kvm_guest_time_update()
3383 hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
3384 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
3391 if (vcpu->pv_time.active) { in kvm_guest_time_update()
3397 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_guest_time_update()
3399 vcpu->pvclock_set_guest_stopped_request = false; in kvm_guest_time_update()
3401 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->pv_time, 0); in kvm_guest_time_update()
3406 kvm_hv_setup_tsc_page(v->kvm, &hv_clock); in kvm_guest_time_update()
3417 if (ka->xen.hvm_config.flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE) in kvm_guest_time_update()
3420 if (vcpu->xen.vcpu_info_cache.active) in kvm_guest_time_update()
3421 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_info_cache, in kvm_guest_time_update()
3423 if (vcpu->xen.vcpu_time_info_cache.active) in kvm_guest_time_update()
3424 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_time_info_cache, 0); in kvm_guest_time_update()
3435 * simple function of the TSC without any such adjustment.
3452 struct kvm_arch *ka = &kvm->arch; in kvm_get_wall_clock_epoch()
3458 seq = read_seqcount_begin(&ka->pvclock_sc); in kvm_get_wall_clock_epoch()
3461 if (!ka->use_master_clock) in kvm_get_wall_clock_epoch()
3483 hv_clock.tsc_timestamp = ka->master_cycle_now; in kvm_get_wall_clock_epoch()
3484 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; in kvm_get_wall_clock_epoch()
3486 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); in kvm_get_wall_clock_epoch()
3492 * since 1970-01-01. in kvm_get_wall_clock_epoch()
3498 return ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec - in kvm_get_wall_clock_epoch()
3502 return ktime_get_real_ns() - get_kvmclock_ns(kvm); in kvm_get_wall_clock_epoch()
3507 * vcpu->cpu migration, should not allow system_timestamp from
3513 * We need to rate-limit these requests though, as they can
3516 * by the delay we use to rate-limit the updates.
3538 struct kvm *kvm = v->kvm; in kvm_gen_kvmclock_update()
3541 schedule_delayed_work(&kvm->arch.kvmclock_update_work, in kvm_gen_kvmclock_update()
3554 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
3555 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvmclock_sync_fn()
3576 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); in can_set_mci_status()
3583 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
3585 u32 msr = msr_info->index; in set_msr_mce()
3586 u64 data = msr_info->data; in set_msr_mce()
3591 vcpu->arch.mcg_status = data; in set_msr_mce()
3595 (data || !msr_info->host_initiated)) in set_msr_mce()
3599 vcpu->arch.mcg_ctl = data; in set_msr_mce()
3601 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: in set_msr_mce()
3602 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; in set_msr_mce()
3606 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated)) in set_msr_mce()
3611 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, in set_msr_mce()
3612 last_msr + 1 - MSR_IA32_MC0_CTL2); in set_msr_mce()
3613 vcpu->arch.mci_ctl2_banks[offset] = data; in set_msr_mce()
3615 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in set_msr_mce()
3616 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; in set_msr_mce()
3628 * single-bit ECC data errors. in set_msr_mce()
3636 * AMD-based CPUs allow non-zero values, but if and only if in set_msr_mce()
3639 if (!msr_info->host_initiated && is_mci_status_msr(msr) && in set_msr_mce()
3643 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, in set_msr_mce()
3644 last_msr + 1 - MSR_IA32_MC0_CTL); in set_msr_mce()
3645 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
3657 return (vcpu->arch.apf.msr_en_val & mask) == mask; in kvm_pv_async_pf_enabled()
3679 vcpu->arch.apf.msr_en_val = data; in kvm_pv_enable_async_pf()
3687 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
3691 vcpu->arch.apf.send_always = (data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
3692 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
3701 /* Bits 8-63 are reserved */ in kvm_pv_enable_async_pf_int()
3708 vcpu->arch.apf.msr_int_val = data; in kvm_pv_enable_async_pf_int()
3710 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; in kvm_pv_enable_async_pf_int()
3717 kvm_gpc_deactivate(&vcpu->arch.pv_time); in kvmclock_reset()
3718 vcpu->arch.time = 0; in kvmclock_reset()
3723 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_all()
3732 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_guest()
3748 * Flushing all "guest" TLB is always a superset of Hyper-V's fine in kvm_vcpu_flush_tlb_guest()
3757 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_current()
3765 * prior before nested VM-Enter/VM-Exit.
3779 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in record_steal_time()
3782 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in record_steal_time()
3786 if (kvm_xen_msr_enabled(vcpu->kvm)) { in record_steal_time()
3791 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
3794 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) in record_steal_time()
3797 slots = kvm_memslots(vcpu->kvm); in record_steal_time()
3799 if (unlikely(slots->generation != ghc->generation || in record_steal_time()
3800 gpa != ghc->gpa || in record_steal_time()
3801 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) { in record_steal_time()
3803 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS); in record_steal_time()
3805 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || in record_steal_time()
3806 kvm_is_error_hva(ghc->hva) || !ghc->memslot) in record_steal_time()
3810 st = (struct kvm_steal_time __user *)ghc->hva; in record_steal_time()
3817 int err = -EFAULT; in record_steal_time()
3828 "+m" (st->preempted)); in record_steal_time()
3834 vcpu->arch.st.preempted = 0; in record_steal_time()
3836 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, in record_steal_time()
3847 unsafe_put_user(0, &st->preempted, out); in record_steal_time()
3848 vcpu->arch.st.preempted = 0; in record_steal_time()
3851 unsafe_get_user(version, &st->version, out); in record_steal_time()
3856 unsafe_put_user(version, &st->version, out); in record_steal_time()
3860 unsafe_get_user(steal, &st->steal, out); in record_steal_time()
3861 steal += current->sched_info.run_delay - in record_steal_time()
3862 vcpu->arch.st.last_steal; in record_steal_time()
3863 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
3864 unsafe_put_user(steal, &st->steal, out); in record_steal_time()
3867 unsafe_put_user(version, &st->version, out); in record_steal_time()
3872 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in record_steal_time()
3879 * Because S_CET is loaded on VM-Enter and VM-Exit via dedicated VMCS fields,
3883 * PL[0-3]_SSP while executing in the kernel is safe, as U_CET is specific to
3884 * userspace, and PL[0-3]_SSP are only consumed when transitioning to lower
3904 * Lock (and if necessary, re-load) the guest FPU, i.e. XSTATE, and access an
3915 KVM_BUG_ON(!is_xstate_managed_msr(vcpu, msr_info->index), vcpu->kvm); in kvm_access_xstate_msr()
3916 KVM_BUG_ON(!vcpu->arch.guest_fpu.fpstate->in_use, vcpu->kvm); in kvm_access_xstate_msr()
3920 rdmsrq(msr_info->index, msr_info->data); in kvm_access_xstate_msr()
3922 wrmsrq(msr_info->index, msr_info->data); in kvm_access_xstate_msr()
3938 u32 msr = msr_info->index; in kvm_set_msr_common()
3939 u64 data = msr_info->data; in kvm_set_msr_common()
3942 * Do not allow host-initiated writes to trigger the Xen hypercall in kvm_set_msr_common()
3946 if (kvm_xen_is_hypercall_page_msr(vcpu->kvm, msr) && in kvm_set_msr_common()
3947 !msr_info->host_initiated) in kvm_set_msr_common()
3962 if (msr_info->host_initiated) in kvm_set_msr_common()
3963 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
3966 if (!msr_info->host_initiated || in kvm_set_msr_common()
3969 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
3972 if (!msr_info->host_initiated || in kvm_set_msr_common()
3984 if (vcpu->arch.perf_capabilities == data) in kvm_set_msr_common()
3987 vcpu->arch.perf_capabilities = data; in kvm_set_msr_common()
3993 if (!msr_info->host_initiated) { in kvm_set_msr_common()
4021 if (!msr_info->host_initiated && in kvm_set_msr_common()
4048 vcpu->arch.msr_hwcr = data; in kvm_set_msr_common()
4060 vcpu->arch.pat = data; in kvm_set_msr_common()
4066 return kvm_apic_set_base(vcpu, data, msr_info->host_initiated); in kvm_set_msr_common()
4074 if (!msr_info->host_initiated) { in kvm_set_msr_common()
4075 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
4082 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
4086 u64 old_val = vcpu->arch.ia32_misc_enable_msr; in kvm_set_msr_common()
4088 if (!msr_info->host_initiated) { in kvm_set_msr_common()
4098 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && in kvm_set_msr_common()
4102 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
4103 vcpu->arch.cpuid_dynamic_bits_dirty = true; in kvm_set_msr_common()
4105 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
4110 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) in kvm_set_msr_common()
4112 vcpu->arch.smbase = data; in kvm_set_msr_common()
4115 vcpu->arch.msr_ia32_power_ctl = data; in kvm_set_msr_common()
4118 if (msr_info->host_initiated) { in kvm_set_msr_common()
4120 } else if (!vcpu->arch.guest_tsc_protected) { in kvm_set_msr_common()
4121 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; in kvm_set_msr_common()
4123 vcpu->arch.ia32_tsc_adjust_msr += adj; in kvm_set_msr_common()
4130 if (data & ~vcpu->arch.guest_supported_xss) in kvm_set_msr_common()
4132 if (vcpu->arch.ia32_xss == data) in kvm_set_msr_common()
4134 vcpu->arch.ia32_xss = data; in kvm_set_msr_common()
4135 vcpu->arch.cpuid_dynamic_bits_dirty = true; in kvm_set_msr_common()
4138 if (!msr_info->host_initiated) in kvm_set_msr_common()
4140 vcpu->arch.smi_count = data; in kvm_set_msr_common()
4146 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
4147 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
4153 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
4154 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
4160 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); in kvm_set_msr_common()
4166 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); in kvm_set_msr_common()
4186 vcpu->arch.apf.pageready_pending = false; in kvm_set_msr_common()
4200 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
4221 if (data & (-1ULL << 1)) in kvm_set_msr_common()
4224 vcpu->arch.msr_kvm_poll_control = data; in kvm_set_msr_common()
4229 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_set_msr_common()
4230 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: in kvm_set_msr_common()
4247 * all pre-dating SVM, but a recommended workaround from in kvm_set_msr_common()
4265 msr_info->host_initiated); in kvm_set_msr_common()
4268 /* Drop writes to this legacy MSR -- see rdmsr in kvm_set_msr_common()
4276 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
4281 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
4284 if (!msr_info->host_initiated) in kvm_set_msr_common()
4286 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
4293 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
4297 if (!msr_info->host_initiated && in kvm_set_msr_common()
4304 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); in kvm_set_msr_common()
4307 if (!msr_info->host_initiated && in kvm_set_msr_common()
4314 vcpu->arch.guest_fpu.xfd_err = data; in kvm_set_msr_common()
4334 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
4344 data = vcpu->arch.mcg_cap; in get_msr_mce()
4349 data = vcpu->arch.mcg_ctl; in get_msr_mce()
4352 data = vcpu->arch.mcg_status; in get_msr_mce()
4354 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: in get_msr_mce()
4355 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; in get_msr_mce()
4361 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, in get_msr_mce()
4362 last_msr + 1 - MSR_IA32_MC0_CTL2); in get_msr_mce()
4363 data = vcpu->arch.mci_ctl2_banks[offset]; in get_msr_mce()
4365 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in get_msr_mce()
4366 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; in get_msr_mce()
4370 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, in get_msr_mce()
4371 last_msr + 1 - MSR_IA32_MC0_CTL); in get_msr_mce()
4372 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
4383 switch (msr_info->index) { in kvm_get_msr_common()
4406 * so for existing CPU-specific MSRs. in kvm_get_msr_common()
4413 msr_info->data = 0; in kvm_get_msr_common()
4419 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
4421 msr_info->data = 0; in kvm_get_msr_common()
4424 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
4429 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
4434 msr_info->data = vcpu->arch.perf_capabilities; in kvm_get_msr_common()
4437 msr_info->data = vcpu->arch.msr_ia32_power_ctl; in kvm_get_msr_common()
4446 * return L1's TSC value to ensure backwards-compatible in kvm_get_msr_common()
4451 if (msr_info->host_initiated) { in kvm_get_msr_common()
4452 offset = vcpu->arch.l1_tsc_offset; in kvm_get_msr_common()
4453 ratio = vcpu->arch.l1_tsc_scaling_ratio; in kvm_get_msr_common()
4455 offset = vcpu->arch.tsc_offset; in kvm_get_msr_common()
4456 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_get_msr_common()
4459 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset; in kvm_get_msr_common()
4463 msr_info->data = vcpu->arch.pat; in kvm_get_msr_common()
4468 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
4470 msr_info->data = 3; in kvm_get_msr_common()
4484 msr_info->data = 1 << 24; in kvm_get_msr_common()
4487 msr_info->data = vcpu->arch.apic_base; in kvm_get_msr_common()
4490 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
4492 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
4495 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
4498 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
4501 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) in kvm_get_msr_common()
4503 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
4506 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
4510 msr_info->data = 1000ULL; in kvm_get_msr_common()
4512 msr_info->data |= (((uint64_t)4ULL) << 40); in kvm_get_msr_common()
4515 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
4521 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
4527 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
4533 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
4539 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
4545 msr_info->data = vcpu->arch.apf.msr_en_val; in kvm_get_msr_common()
4551 msr_info->data = vcpu->arch.apf.msr_int_val; in kvm_get_msr_common()
4557 msr_info->data = 0; in kvm_get_msr_common()
4563 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
4569 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
4575 msr_info->data = vcpu->arch.msr_kvm_poll_control; in kvm_get_msr_common()
4582 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_get_msr_common()
4583 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: in kvm_get_msr_common()
4584 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, in kvm_get_msr_common()
4585 msr_info->host_initiated); in kvm_get_msr_common()
4587 if (!msr_info->host_initiated && in kvm_get_msr_common()
4590 msr_info->data = vcpu->arch.ia32_xss; in kvm_get_msr_common()
4594 * Provide expected ramp-up count for K7. All other in kvm_get_msr_common()
4602 msr_info->data = 0x20000000; in kvm_get_msr_common()
4616 msr_info->index, &msr_info->data, in kvm_get_msr_common()
4617 msr_info->host_initiated); in kvm_get_msr_common()
4630 msr_info->data = 0xbe702111; in kvm_get_msr_common()
4635 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
4640 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
4643 if (!msr_info->host_initiated && in kvm_get_msr_common()
4644 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
4646 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
4649 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
4652 msr_info->data = vcpu->arch.msr_hwcr; in kvm_get_msr_common()
4656 if (!msr_info->host_initiated && in kvm_get_msr_common()
4660 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; in kvm_get_msr_common()
4663 if (!msr_info->host_initiated && in kvm_get_msr_common()
4667 msr_info->data = vcpu->arch.guest_fpu.xfd_err; in kvm_get_msr_common()
4675 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
4697 for (i = 0; i < msrs->nmsrs; ++i) { in __msr_io()
4699 * If userspace is accessing one or more XSTATE-managed MSRs, in __msr_io()
4732 r = -EFAULT; in msr_io()
4736 r = -E2BIG; in msr_io()
4741 entries = memdup_user(user_msrs->entries, size); in msr_io()
4749 if (writeback && copy_to_user(user_msrs->entries, entries, size)) in msr_io()
4750 r = -EFAULT; in msr_io()
4788 r = -EFAULT; in kvm_ioctl_get_supported_hv_cpuid()
4792 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_ioctl_get_supported_hv_cpuid()
4796 r = -EFAULT; in kvm_ioctl_get_supported_hv_cpuid()
4811 return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS; in kvm_sync_valid_fields()
4954 r = kvm->max_vcpus; in kvm_vm_ioctl_check_extension()
4976 r = kvm_x86_ops.nested_ops->get_state ? in kvm_vm_ioctl_check_extension()
4977 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; in kvm_vm_ioctl_check_extension()
4984 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; in kvm_vm_ioctl_check_extension()
5029 if (attr->group) { in __kvm_x86_dev_get_attr()
5031 return kvm_x86_call(dev_get_attr)(attr->group, attr->attr, val); in __kvm_x86_dev_get_attr()
5032 return -ENXIO; in __kvm_x86_dev_get_attr()
5035 switch (attr->attr) { in __kvm_x86_dev_get_attr()
5040 return -ENXIO; in __kvm_x86_dev_get_attr()
5046 u64 __user *uaddr = u64_to_user_ptr(attr->addr); in kvm_x86_dev_get_attr()
5055 return -EFAULT; in kvm_x86_dev_get_attr()
5079 r = -EFAULT; in kvm_arch_dev_ioctl()
5086 r = -E2BIG; in kvm_arch_dev_ioctl()
5089 r = -EFAULT; in kvm_arch_dev_ioctl()
5090 if (copy_to_user(user_msr_list->indices, &msrs_to_save, in kvm_arch_dev_ioctl()
5093 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, in kvm_arch_dev_ioctl()
5105 r = -EFAULT; in kvm_arch_dev_ioctl()
5109 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, in kvm_arch_dev_ioctl()
5114 r = -EFAULT; in kvm_arch_dev_ioctl()
5121 r = -EFAULT; in kvm_arch_dev_ioctl()
5132 r = -EFAULT; in kvm_arch_dev_ioctl()
5139 r = -E2BIG; in kvm_arch_dev_ioctl()
5142 r = -EFAULT; in kvm_arch_dev_ioctl()
5143 if (copy_to_user(user_msr_list->indices, &msr_based_features, in kvm_arch_dev_ioctl()
5159 r = -EFAULT; in kvm_arch_dev_ioctl()
5167 r = -EFAULT; in kvm_arch_dev_ioctl()
5174 r = -EINVAL; in kvm_arch_dev_ioctl()
5183 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
5192 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_vcpu_load()
5194 if (vcpu->scheduled_out && pmu->version && pmu->event_count) { in kvm_arch_vcpu_load()
5195 pmu->need_cleanup = true; in kvm_arch_vcpu_load()
5202 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
5203 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
5204 wbinvd_on_cpu(vcpu->cpu); in kvm_arch_vcpu_load()
5215 * is handled on the nested VM-Exit path. in kvm_arch_vcpu_load()
5223 vcpu->arch.host_pkru = read_pkru(); in kvm_arch_vcpu_load()
5226 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
5227 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
5228 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
5232 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { in kvm_arch_vcpu_load()
5233 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
5234 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
5240 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
5242 if (!vcpu->arch.guest_tsc_protected) in kvm_arch_vcpu_load()
5243 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
5251 * kvmclock on vcpu->cpu migration in kvm_arch_vcpu_load()
5253 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
5255 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
5257 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
5265 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in kvm_steal_time_set_preempted()
5269 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in kvm_steal_time_set_preempted()
5272 * The vCPU can be marked preempted if and only if the VM-Exit was on in kvm_steal_time_set_preempted()
5276 * preempted if and only if the VM-Exit was due to a host interrupt. in kvm_steal_time_set_preempted()
5278 if (!vcpu->arch.at_instruction_boundary) { in kvm_steal_time_set_preempted()
5279 vcpu->stat.preemption_other++; in kvm_steal_time_set_preempted()
5283 vcpu->stat.preemption_reported++; in kvm_steal_time_set_preempted()
5284 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
5287 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
5291 if (unlikely(current->mm != vcpu->kvm->mm)) in kvm_steal_time_set_preempted()
5294 slots = kvm_memslots(vcpu->kvm); in kvm_steal_time_set_preempted()
5296 if (unlikely(slots->generation != ghc->generation || in kvm_steal_time_set_preempted()
5297 gpa != ghc->gpa || in kvm_steal_time_set_preempted()
5298 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) in kvm_steal_time_set_preempted()
5301 st = (struct kvm_steal_time __user *)ghc->hva; in kvm_steal_time_set_preempted()
5302 BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted)); in kvm_steal_time_set_preempted()
5304 if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted))) in kvm_steal_time_set_preempted()
5305 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
5307 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in kvm_steal_time_set_preempted()
5314 if (vcpu->preempted) { in kvm_arch_vcpu_put()
5316 * Assume protected guests are in-kernel. Inefficient yielding in kvm_arch_vcpu_put()
5320 vcpu->arch.preempted_in_kernel = vcpu->arch.guest_state_protected || in kvm_arch_vcpu_put()
5327 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_put()
5328 if (kvm_xen_msr_enabled(vcpu->kvm)) in kvm_arch_vcpu_put()
5332 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_put()
5336 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
5342 if (vcpu->arch.apic->guest_apic_protected) in kvm_vcpu_ioctl_get_lapic()
5343 return -EINVAL; in kvm_vcpu_ioctl_get_lapic()
5355 if (vcpu->arch.apic->guest_apic_protected) in kvm_vcpu_ioctl_set_lapic()
5356 return -EINVAL; in kvm_vcpu_ioctl_set_lapic()
5389 * instruction boundary and with no events half-injected. in kvm_vcpu_ready_for_interrupt_injection()
5400 if (irq->irq >= KVM_NR_INTERRUPTS) in kvm_vcpu_ioctl_interrupt()
5401 return -EINVAL; in kvm_vcpu_ioctl_interrupt()
5403 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
5404 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
5410 * With in-kernel LAPIC, we only use this to inject EXTINT, so in kvm_vcpu_ioctl_interrupt()
5411 * fail for in-kernel 8259. in kvm_vcpu_ioctl_interrupt()
5413 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
5414 return -ENXIO; in kvm_vcpu_ioctl_interrupt()
5416 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
5417 return -EEXIST; in kvm_vcpu_ioctl_interrupt()
5419 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
5434 if (tac->flags) in vcpu_ioctl_tpr_access_reporting()
5435 return -EINVAL; in vcpu_ioctl_tpr_access_reporting()
5436 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
5446 r = -EINVAL; in kvm_vcpu_ioctl_x86_setup_mce()
5452 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
5455 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
5458 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
5460 vcpu->arch.mci_ctl2_banks[bank] = 0; in kvm_vcpu_ioctl_x86_setup_mce()
5473 * - none of the bits for Machine Check Exceptions are set
5474 * - both the VAL (valid) and UC (uncorrectable) bits are set
5475 * MCI_STATUS_PCC - Processor Context Corrupted
5476 * MCI_STATUS_S - Signaled as a Machine Check Exception
5477 * MCI_STATUS_AR - Software recoverable Action Required
5481 return !mce->mcg_status && in is_ucna()
5482 !(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) && in is_ucna()
5483 (mce->status & MCI_STATUS_VAL) && in is_ucna()
5484 (mce->status & MCI_STATUS_UC); in is_ucna()
5489 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_x86_set_ucna()
5491 banks[1] = mce->status; in kvm_vcpu_x86_set_ucna()
5492 banks[2] = mce->addr; in kvm_vcpu_x86_set_ucna()
5493 banks[3] = mce->misc; in kvm_vcpu_x86_set_ucna()
5494 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_x86_set_ucna()
5497 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN)) in kvm_vcpu_x86_set_ucna()
5501 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI); in kvm_vcpu_x86_set_ucna()
5509 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
5511 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
5513 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) in kvm_vcpu_ioctl_x86_set_mce()
5514 return -EINVAL; in kvm_vcpu_ioctl_x86_set_mce()
5516 banks += array_index_nospec(4 * mce->bank, 4 * bank_num); in kvm_vcpu_ioctl_x86_set_mce()
5525 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && in kvm_vcpu_ioctl_x86_set_mce()
5526 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
5532 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
5534 if (mce->status & MCI_STATUS_UC) { in kvm_vcpu_ioctl_x86_set_mce()
5535 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
5541 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
5542 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
5543 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
5544 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
5545 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
5550 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
5551 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
5552 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
5553 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
5574 * non-exiting _injected_ exception, and a pending exiting exception. in kvm_vcpu_ioctl_x86_get_vcpu_events()
5575 * In that case, ignore the VM-Exiting exception as it's an extension in kvm_vcpu_ioctl_x86_get_vcpu_events()
5578 if (vcpu->arch.exception_vmexit.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5579 !vcpu->arch.exception.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5580 !vcpu->arch.exception.injected) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5581 ex = &vcpu->arch.exception_vmexit; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5583 ex = &vcpu->arch.exception; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5588 * intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability, in kvm_vcpu_ioctl_x86_get_vcpu_events()
5593 if (!vcpu->kvm->arch.exception_payload_enabled && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5594 ex->pending && ex->has_payload) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5605 if (!kvm_exception_is_soft(ex->vector)) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
5606 events->exception.injected = ex->injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5607 events->exception.pending = ex->pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5613 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5614 events->exception.injected |= ex->pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5616 events->exception.nr = ex->vector; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5617 events->exception.has_error_code = ex->has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5618 events->exception.error_code = ex->error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5619 events->exception_has_payload = ex->has_payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5620 events->exception_payload = ex->payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5622 events->interrupt.injected = in kvm_vcpu_ioctl_x86_get_vcpu_events()
5623 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5624 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5625 events->interrupt.shadow = kvm_x86_call(get_interrupt_shadow)(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5627 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5628 events->nmi.pending = kvm_get_nr_pending_nmis(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5629 events->nmi.masked = kvm_x86_call(get_nmi_mask)(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5631 /* events->sipi_vector is never valid when reporting to user space */ in kvm_vcpu_ioctl_x86_get_vcpu_events()
5634 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5635 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5636 events->smi.smm_inside_nmi = in kvm_vcpu_ioctl_x86_get_vcpu_events()
5637 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5639 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5641 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_get_vcpu_events()
5644 if (vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5645 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5646 if (vcpu->kvm->arch.triple_fault_event) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
5647 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5648 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5655 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_set_vcpu_events()
5661 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5663 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5664 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5665 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5666 if (events->exception.pending) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5667 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5669 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5671 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5672 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5675 if ((events->exception.injected || events->exception.pending) && in kvm_vcpu_ioctl_x86_set_vcpu_events()
5676 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5677 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5683 * morph the exception to a VM-Exit if appropriate. Do this only for in kvm_vcpu_ioctl_x86_set_vcpu_events()
5684 * pending exceptions, already-injected exceptions are not subject to in kvm_vcpu_ioctl_x86_set_vcpu_events()
5687 * pending exception, which in turn may cause a spurious VM-Exit. in kvm_vcpu_ioctl_x86_set_vcpu_events()
5689 vcpu->arch.exception_from_userspace = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5691 vcpu->arch.exception_vmexit.pending = false; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5693 vcpu->arch.exception.injected = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5694 vcpu->arch.exception.pending = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5695 vcpu->arch.exception.vector = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5696 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5697 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5698 vcpu->arch.exception.has_payload = events->exception_has_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5699 vcpu->arch.exception.payload = events->exception_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5701 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5702 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5703 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5704 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5706 events->interrupt.shadow); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5708 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5709 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5710 vcpu->arch.nmi_pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5711 atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5712 if (events->nmi.pending) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5715 kvm_x86_call(set_nmi_mask)(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5717 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && in kvm_vcpu_ioctl_x86_set_vcpu_events()
5719 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5721 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5723 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5725 kvm_smm_changed(vcpu, events->smi.smm); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5728 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5730 if (events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5731 if (events->smi.smm_inside_nmi) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5732 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5734 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5738 if (events->smi.smm || events->smi.pending || in kvm_vcpu_ioctl_x86_set_vcpu_events()
5739 events->smi.smm_inside_nmi) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5740 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5744 if (events->smi.latched_init) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5745 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5747 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5751 if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5752 if (!vcpu->kvm->arch.triple_fault_event) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5753 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5754 if (events->triple_fault.pending) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5770 if (vcpu->kvm->arch.has_protected_state && in kvm_vcpu_ioctl_x86_get_debugregs()
5771 vcpu->arch.guest_state_protected) in kvm_vcpu_ioctl_x86_get_debugregs()
5772 return -EINVAL; in kvm_vcpu_ioctl_x86_get_debugregs()
5776 BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.db) != ARRAY_SIZE(dbgregs->db)); in kvm_vcpu_ioctl_x86_get_debugregs()
5777 for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++) in kvm_vcpu_ioctl_x86_get_debugregs()
5778 dbgregs->db[i] = vcpu->arch.db[i]; in kvm_vcpu_ioctl_x86_get_debugregs()
5780 dbgregs->dr6 = vcpu->arch.dr6; in kvm_vcpu_ioctl_x86_get_debugregs()
5781 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
5790 if (vcpu->kvm->arch.has_protected_state && in kvm_vcpu_ioctl_x86_set_debugregs()
5791 vcpu->arch.guest_state_protected) in kvm_vcpu_ioctl_x86_set_debugregs()
5792 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
5794 if (dbgregs->flags) in kvm_vcpu_ioctl_x86_set_debugregs()
5795 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
5797 if (!kvm_dr6_valid(dbgregs->dr6)) in kvm_vcpu_ioctl_x86_set_debugregs()
5798 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
5799 if (!kvm_dr7_valid(dbgregs->dr7)) in kvm_vcpu_ioctl_x86_set_debugregs()
5800 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
5802 for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++) in kvm_vcpu_ioctl_x86_set_debugregs()
5803 vcpu->arch.db[i] = dbgregs->db[i]; in kvm_vcpu_ioctl_x86_set_debugregs()
5806 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
5807 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
5822 * compatible host without the features that are NOT exposed to the in kvm_vcpu_ioctl_x86_get_xsave2()
5829 u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 | in kvm_vcpu_ioctl_x86_get_xsave2()
5832 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_get_xsave2()
5833 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_vcpu_ioctl_x86_get_xsave2()
5835 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size, in kvm_vcpu_ioctl_x86_get_xsave2()
5836 supported_xcr0, vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_get_xsave2()
5843 return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region, in kvm_vcpu_ioctl_x86_get_xsave()
5844 sizeof(guest_xsave->region)); in kvm_vcpu_ioctl_x86_get_xsave()
5850 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_set_xsave()
5851 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_vcpu_ioctl_x86_set_xsave()
5853 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, in kvm_vcpu_ioctl_x86_set_xsave()
5854 guest_xsave->region, in kvm_vcpu_ioctl_x86_set_xsave()
5856 &vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_set_xsave()
5862 if (vcpu->kvm->arch.has_protected_state && in kvm_vcpu_ioctl_x86_get_xcrs()
5863 vcpu->arch.guest_state_protected) in kvm_vcpu_ioctl_x86_get_xcrs()
5864 return -EINVAL; in kvm_vcpu_ioctl_x86_get_xcrs()
5867 guest_xcrs->nr_xcrs = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
5871 guest_xcrs->nr_xcrs = 1; in kvm_vcpu_ioctl_x86_get_xcrs()
5872 guest_xcrs->flags = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
5873 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; in kvm_vcpu_ioctl_x86_get_xcrs()
5874 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
5883 if (vcpu->kvm->arch.has_protected_state && in kvm_vcpu_ioctl_x86_set_xcrs()
5884 vcpu->arch.guest_state_protected) in kvm_vcpu_ioctl_x86_set_xcrs()
5885 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
5888 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
5890 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) in kvm_vcpu_ioctl_x86_set_xcrs()
5891 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
5893 for (i = 0; i < guest_xcrs->nr_xcrs; i++) in kvm_vcpu_ioctl_x86_set_xcrs()
5895 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { in kvm_vcpu_ioctl_x86_set_xcrs()
5897 guest_xcrs->xcrs[i].value); in kvm_vcpu_ioctl_x86_set_xcrs()
5901 r = -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
5913 if (!vcpu->arch.pv_time.active) in kvm_set_guest_paused()
5914 return -EINVAL; in kvm_set_guest_paused()
5915 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
5925 switch (attr->attr) { in kvm_arch_tsc_has_attr()
5930 r = -ENXIO; in kvm_arch_tsc_has_attr()
5939 u64 __user *uaddr = u64_to_user_ptr(attr->addr); in kvm_arch_tsc_get_attr()
5942 switch (attr->attr) { in kvm_arch_tsc_get_attr()
5944 r = -EFAULT; in kvm_arch_tsc_get_attr()
5945 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) in kvm_arch_tsc_get_attr()
5950 r = -ENXIO; in kvm_arch_tsc_get_attr()
5959 u64 __user *uaddr = u64_to_user_ptr(attr->addr); in kvm_arch_tsc_set_attr()
5960 struct kvm *kvm = vcpu->kvm; in kvm_arch_tsc_set_attr()
5963 switch (attr->attr) { in kvm_arch_tsc_set_attr()
5969 r = -EFAULT; in kvm_arch_tsc_set_attr()
5973 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_arch_tsc_set_attr()
5975 matched = (vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
5976 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
5977 kvm->arch.last_tsc_offset == offset); in kvm_arch_tsc_set_attr()
5979 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; in kvm_arch_tsc_set_attr()
5983 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_arch_tsc_set_attr()
5989 r = -ENXIO; in kvm_arch_tsc_set_attr()
6003 return -EFAULT; in kvm_vcpu_ioctl_device_attr()
6006 return -ENXIO; in kvm_vcpu_ioctl_device_attr()
6026 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
6027 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
6029 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
6032 if (cap->args[0]) in kvm_vcpu_ioctl_enable_cap()
6033 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
6037 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
6038 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
6039 return kvm_hv_activate_synic(vcpu, cap->cap == in kvm_vcpu_ioctl_enable_cap()
6047 if (!kvm_x86_ops.nested_ops->enable_evmcs) in kvm_vcpu_ioctl_enable_cap()
6048 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
6049 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
6051 user_ptr = (void __user *)(uintptr_t)cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
6054 r = -EFAULT; in kvm_vcpu_ioctl_enable_cap()
6060 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
6065 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
6069 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
6072 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
6088 switch (reg->index) { in kvm_translate_kvm_reg()
6091 * FIXME: If host-initiated accesses are ever exempted from in kvm_translate_kvm_reg()
6097 return -EINVAL; in kvm_translate_kvm_reg()
6099 reg->type = KVM_X86_REG_TYPE_MSR; in kvm_translate_kvm_reg()
6100 reg->index = MSR_KVM_INTERNAL_GUEST_SSP; in kvm_translate_kvm_reg()
6103 return -EINVAL; in kvm_translate_kvm_reg()
6113 return -EINVAL; in kvm_get_one_msr()
6116 return -EFAULT; in kvm_get_one_msr()
6126 return -EFAULT; in kvm_set_one_msr()
6129 return -EINVAL; in kvm_set_one_msr()
6144 return -EFAULT; in kvm_get_set_one_reg()
6147 return -EINVAL; in kvm_get_set_one_reg()
6150 if (reg->rsvd1 || reg->rsvd2) in kvm_get_set_one_reg()
6151 return -EINVAL; in kvm_get_set_one_reg()
6153 if (reg->type == KVM_X86_REG_TYPE_KVM) { in kvm_get_set_one_reg()
6159 if (reg->type != KVM_X86_REG_TYPE_MSR) in kvm_get_set_one_reg()
6160 return -EINVAL; in kvm_get_set_one_reg()
6163 return -EINVAL; in kvm_get_set_one_reg()
6165 guard(srcu)(&vcpu->kvm->srcu); in kvm_get_set_one_reg()
6167 load_fpu = is_xstate_managed_msr(vcpu, reg->index); in kvm_get_set_one_reg()
6173 r = kvm_get_one_msr(vcpu, reg->index, user_val); in kvm_get_set_one_reg()
6175 r = kvm_set_one_msr(vcpu, reg->index, user_val); in kvm_get_set_one_reg()
6188 if (get_user(user_nr_regs, &user_list->n)) in kvm_get_reg_list()
6189 return -EFAULT; in kvm_get_reg_list()
6191 if (put_user(nr_regs, &user_list->n)) in kvm_get_reg_list()
6192 return -EFAULT; in kvm_get_reg_list()
6195 return -E2BIG; in kvm_get_reg_list()
6198 put_user(KVM_X86_REG_KVM(KVM_REG_GUEST_SSP), &user_list->reg[0])) in kvm_get_reg_list()
6199 return -EFAULT; in kvm_get_reg_list()
6207 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
6223 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6228 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
6234 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6241 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6256 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6274 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6277 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
6284 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6288 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
6295 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6299 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
6302 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6309 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
6311 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
6315 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
6317 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
6330 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6336 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6346 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6349 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6352 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
6354 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
6360 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6369 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6380 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6389 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6405 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6415 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6424 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6425 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) in kvm_arch_vcpu_ioctl()
6429 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
6437 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6444 int size = vcpu->arch.guest_fpu.uabi_size; in kvm_arch_vcpu_ioctl()
6457 int size = vcpu->arch.guest_fpu.uabi_size; in kvm_arch_vcpu_ioctl()
6460 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
6468 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6478 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
6486 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6506 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6508 if (vcpu->arch.guest_tsc_protected) in kvm_arch_vcpu_ioctl()
6526 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
6536 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6546 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6547 if (!kvm_x86_ops.nested_ops->get_state) in kvm_arch_vcpu_ioctl()
6550 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); in kvm_arch_vcpu_ioctl()
6551 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6552 if (get_user(user_data_size, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
6555 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
6561 if (put_user(r, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
6562 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6564 r = -E2BIG; in kvm_arch_vcpu_ioctl()
6576 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6577 if (!kvm_x86_ops.nested_ops->set_state) in kvm_arch_vcpu_ioctl()
6580 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6584 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6599 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
6600 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
6601 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
6613 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6618 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6624 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6632 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6633 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl()
6634 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl()
6638 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
6642 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6649 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6650 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl()
6651 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl()
6669 r = -ENOTTY; in kvm_arch_vcpu_ioctl()
6675 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6693 if (addr > (unsigned int)(-3 * PAGE_SIZE)) in kvm_vm_ioctl_set_tss_addr()
6694 return -EINVAL; in kvm_vm_ioctl_set_tss_addr()
6709 return -EINVAL; in kvm_vm_ioctl_set_nr_mmu_pages()
6711 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
6714 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; in kvm_vm_ioctl_set_nr_mmu_pages()
6716 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
6726 * on all VM-Exits, thus we only need to kick running vCPUs to force a in kvm_arch_sync_dirty_log()
6727 * VM-Exit. in kvm_arch_sync_dirty_log()
6732 if (!kvm->arch.cpu_dirty_log_size) in kvm_arch_sync_dirty_log()
6744 if (cap->flags) in kvm_vm_ioctl_enable_cap()
6745 return -EINVAL; in kvm_vm_ioctl_enable_cap()
6747 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
6749 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6750 if (cap->args[0] & ~kvm_caps.supported_quirks) in kvm_vm_ioctl_enable_cap()
6754 kvm->arch.disabled_quirks |= cap->args[0] & kvm_caps.supported_quirks; in kvm_vm_ioctl_enable_cap()
6758 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6759 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6760 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) in kvm_vm_ioctl_enable_cap()
6762 r = -EEXIST; in kvm_vm_ioctl_enable_cap()
6765 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
6769 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; in kvm_vm_ioctl_enable_cap()
6770 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6774 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6778 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6779 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) in kvm_vm_ioctl_enable_cap()
6782 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) in kvm_vm_ioctl_enable_cap()
6783 kvm->arch.x2apic_format = true; in kvm_vm_ioctl_enable_cap()
6784 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) in kvm_vm_ioctl_enable_cap()
6785 kvm->arch.x2apic_broadcast_quirk_disabled = true; in kvm_vm_ioctl_enable_cap()
6790 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6791 if (cap->args[0] & ~kvm_get_allowed_disable_exits()) in kvm_vm_ioctl_enable_cap()
6794 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6795 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
6798 #define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerabilit… in kvm_vm_ioctl_enable_cap()
6803 (cap->args[0] & ~(KVM_X86_DISABLE_EXITS_PAUSE | in kvm_vm_ioctl_enable_cap()
6807 kvm_disable_exits(kvm, cap->args[0]); in kvm_vm_ioctl_enable_cap()
6810 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6813 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6817 kvm->arch.exception_payload_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6821 kvm->arch.triple_fault_event = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6825 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6826 if (cap->args[0] & ~KVM_MSR_EXIT_REASON_VALID_MASK) in kvm_vm_ioctl_enable_cap()
6828 kvm->arch.user_space_msr_mask = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6832 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6833 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) in kvm_vm_ioctl_enable_cap()
6836 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && in kvm_vm_ioctl_enable_cap()
6837 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) in kvm_vm_ioctl_enable_cap()
6841 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) in kvm_vm_ioctl_enable_cap()
6842 kvm->arch.bus_lock_detection_enabled = true; in kvm_vm_ioctl_enable_cap()
6849 r = sgx_set_attribute(&allowed_attributes, cap->args[0]); in kvm_vm_ioctl_enable_cap()
6856 kvm->arch.sgx_provisioning_allowed = true; in kvm_vm_ioctl_enable_cap()
6858 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6863 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6867 r = kvm_x86_call(vm_copy_enc_context_from)(kvm, cap->args[0]); in kvm_vm_ioctl_enable_cap()
6870 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6874 r = kvm_x86_call(vm_move_enc_context_from)(kvm, cap->args[0]); in kvm_vm_ioctl_enable_cap()
6877 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { in kvm_vm_ioctl_enable_cap()
6878 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6881 kvm->arch.hypercall_exit_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6885 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6886 if (cap->args[0] & ~1) in kvm_vm_ioctl_enable_cap()
6888 kvm->arch.exit_on_emulation_error = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6892 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6893 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) in kvm_vm_ioctl_enable_cap()
6896 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6897 if (!kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
6898 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE); in kvm_vm_ioctl_enable_cap()
6901 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6904 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6905 if (cap->args[0] > KVM_MAX_VCPU_IDS) in kvm_vm_ioctl_enable_cap()
6908 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6909 if (kvm->arch.bsp_vcpu_id > cap->args[0]) { in kvm_vm_ioctl_enable_cap()
6911 } else if (kvm->arch.max_vcpu_ids == cap->args[0]) { in kvm_vm_ioctl_enable_cap()
6913 } else if (!kvm->arch.max_vcpu_ids) { in kvm_vm_ioctl_enable_cap()
6914 kvm->arch.max_vcpu_ids = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6917 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6920 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6921 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS) in kvm_vm_ioctl_enable_cap()
6925 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED)) in kvm_vm_ioctl_enable_cap()
6927 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6928 if (!kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
6929 kvm->arch.notify_window = cap->args[0] >> 32; in kvm_vm_ioctl_enable_cap()
6930 kvm->arch.notify_vmexit_flags = (u32)cap->args[0]; in kvm_vm_ioctl_enable_cap()
6933 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6936 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6950 r = -EPERM; in kvm_vm_ioctl_enable_cap()
6954 if (cap->args[0]) in kvm_vm_ioctl_enable_cap()
6957 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6958 if (!kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
6959 kvm->arch.disable_nx_huge_pages = true; in kvm_vm_ioctl_enable_cap()
6962 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6965 u64 bus_cycle_ns = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6972 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6978 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6980 r = -ENXIO; in kvm_vm_ioctl_enable_cap()
6981 else if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
6982 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6984 kvm->arch.apic_bus_cycle_ns = bus_cycle_ns; in kvm_vm_ioctl_enable_cap()
6985 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6989 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
7003 msr_filter->default_allow = default_allow; in kvm_alloc_msr_filter()
7014 for (i = 0; i < msr_filter->count; i++) in kvm_free_msr_filter()
7015 kfree(msr_filter->ranges[i].bitmap); in kvm_free_msr_filter()
7026 if (!user_range->nmsrs) in kvm_add_msr_filter()
7029 if (user_range->flags & ~KVM_MSR_FILTER_RANGE_VALID_MASK) in kvm_add_msr_filter()
7030 return -EINVAL; in kvm_add_msr_filter()
7032 if (!user_range->flags) in kvm_add_msr_filter()
7033 return -EINVAL; in kvm_add_msr_filter()
7035 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); in kvm_add_msr_filter()
7037 return -EINVAL; in kvm_add_msr_filter()
7039 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); in kvm_add_msr_filter()
7043 msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) { in kvm_add_msr_filter()
7044 .flags = user_range->flags, in kvm_add_msr_filter()
7045 .base = user_range->base, in kvm_add_msr_filter()
7046 .nmsrs = user_range->nmsrs, in kvm_add_msr_filter()
7050 msr_filter->count++; in kvm_add_msr_filter()
7063 if (filter->flags & ~KVM_MSR_FILTER_VALID_MASK) in kvm_vm_ioctl_set_msr_filter()
7064 return -EINVAL; in kvm_vm_ioctl_set_msr_filter()
7066 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) in kvm_vm_ioctl_set_msr_filter()
7067 empty &= !filter->ranges[i].nmsrs; in kvm_vm_ioctl_set_msr_filter()
7069 default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); in kvm_vm_ioctl_set_msr_filter()
7071 return -EINVAL; in kvm_vm_ioctl_set_msr_filter()
7075 return -ENOMEM; in kvm_vm_ioctl_set_msr_filter()
7077 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { in kvm_vm_ioctl_set_msr_filter()
7078 r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); in kvm_vm_ioctl_set_msr_filter()
7085 mutex_lock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
7086 old_filter = rcu_replace_pointer(kvm->arch.msr_filter, new_filter, in kvm_vm_ioctl_set_msr_filter()
7087 mutex_is_locked(&kvm->lock)); in kvm_vm_ioctl_set_msr_filter()
7088 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
7089 synchronize_srcu(&kvm->srcu); in kvm_vm_ioctl_set_msr_filter()
7122 struct kvm *kvm = filp->private_data; in kvm_arch_vm_compat_ioctl()
7123 long r = -ENOTTY; in kvm_arch_vm_compat_ioctl()
7134 return -EFAULT; in kvm_arch_vm_compat_ioctl()
7142 .flags = cr->flags, in kvm_arch_vm_compat_ioctl()
7143 .nmsrs = cr->nmsrs, in kvm_arch_vm_compat_ioctl()
7144 .base = cr->base, in kvm_arch_vm_compat_ioctl()
7145 .bitmap = (__u8 *)(ulong)cr->bitmap, in kvm_arch_vm_compat_ioctl()
7165 * Ignore the return, marking the guest paused only "fails" if the vCPU in kvm_arch_suspend_notifier()
7192 return -EFAULT; in kvm_vm_ioctl_get_clock()
7199 struct kvm_arch *ka = &kvm->arch; in kvm_vm_ioctl_set_clock()
7204 return -EFAULT; in kvm_vm_ioctl_set_clock()
7211 return -EINVAL; in kvm_vm_ioctl_set_clock()
7231 data.clock += now_real_ns - data.realtime; in kvm_vm_ioctl_set_clock()
7234 if (ka->use_master_clock) in kvm_vm_ioctl_set_clock()
7235 now_raw_ns = ka->master_kernel_ns; in kvm_vm_ioctl_set_clock()
7238 ka->kvmclock_offset = data.clock - now_raw_ns; in kvm_vm_ioctl_set_clock()
7245 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
7247 int r = -ENOTTY; in kvm_arch_vm_ioctl()
7251 * This union makes it completely explicit to gcc-3.x in kvm_arch_vm_ioctl()
7269 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
7270 r = -EINVAL; in kvm_arch_vm_ioctl()
7271 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
7273 r = -EFAULT; in kvm_arch_vm_ioctl()
7278 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
7286 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
7288 r = -EEXIST; in kvm_arch_vm_ioctl()
7293 * Disallow an in-kernel I/O APIC if the VM has protected EOIs, in kvm_arch_vm_ioctl()
7295 * emulate level-triggered interrupts. in kvm_arch_vm_ioctl()
7297 r = -ENOTTY; in kvm_arch_vm_ioctl()
7298 if (kvm->arch.has_protected_eoi) in kvm_arch_vm_ioctl()
7301 r = -EINVAL; in kvm_arch_vm_ioctl()
7302 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
7321 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ in kvm_arch_vm_ioctl()
7323 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; in kvm_arch_vm_ioctl()
7326 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
7333 r = -EFAULT; in kvm_arch_vm_ioctl()
7338 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
7339 r = -EEXIST; in kvm_arch_vm_ioctl()
7340 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
7342 r = -ENOENT; in kvm_arch_vm_ioctl()
7345 r = -ENOMEM; in kvm_arch_vm_ioctl()
7346 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); in kvm_arch_vm_ioctl()
7347 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
7350 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
7362 r = -ENXIO; in kvm_arch_vm_ioctl()
7368 r = -EFAULT; in kvm_arch_vm_ioctl()
7386 r = -ENXIO; in kvm_arch_vm_ioctl()
7395 r = -EFAULT; in kvm_arch_vm_ioctl()
7398 r = -ENXIO; in kvm_arch_vm_ioctl()
7399 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
7404 r = -EFAULT; in kvm_arch_vm_ioctl()
7411 r = -EFAULT; in kvm_arch_vm_ioctl()
7414 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
7415 r = -ENXIO; in kvm_arch_vm_ioctl()
7416 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
7420 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
7424 r = -ENXIO; in kvm_arch_vm_ioctl()
7425 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
7430 r = -EFAULT; in kvm_arch_vm_ioctl()
7437 r = -EFAULT; in kvm_arch_vm_ioctl()
7440 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
7441 r = -ENXIO; in kvm_arch_vm_ioctl()
7442 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
7446 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
7451 r = -EFAULT; in kvm_arch_vm_ioctl()
7454 r = -ENXIO; in kvm_arch_vm_ioctl()
7455 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
7463 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
7464 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
7465 r = -EBUSY; in kvm_arch_vm_ioctl()
7467 (kvm->arch.max_vcpu_ids && arg > kvm->arch.max_vcpu_ids)) in kvm_arch_vm_ioctl()
7468 r = -EINVAL; in kvm_arch_vm_ioctl()
7470 kvm->arch.bsp_vcpu_id = arg; in kvm_arch_vm_ioctl()
7471 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
7476 r = -EFAULT; in kvm_arch_vm_ioctl()
7485 r = -EFAULT; in kvm_arch_vm_ioctl()
7490 r = -EFAULT; in kvm_arch_vm_ioctl()
7496 r = -EFAULT; in kvm_arch_vm_ioctl()
7505 r = -EFAULT; in kvm_arch_vm_ioctl()
7521 r = -EINVAL; in kvm_arch_vm_ioctl()
7531 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
7532 if (!kvm->created_vcpus) { in kvm_arch_vm_ioctl()
7533 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz); in kvm_arch_vm_ioctl()
7536 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
7540 r = READ_ONCE(kvm->arch.default_tsc_khz); in kvm_arch_vm_ioctl()
7544 r = -ENOTTY; in kvm_arch_vm_ioctl()
7553 r = -EFAULT; in kvm_arch_vm_ioctl()
7557 r = -ENOTTY; in kvm_arch_vm_ioctl()
7567 r = -EFAULT; in kvm_arch_vm_ioctl()
7571 r = -ENOTTY; in kvm_arch_vm_ioctl()
7582 r = -EFAULT; in kvm_arch_vm_ioctl()
7597 return -EFAULT; in kvm_arch_vm_ioctl()
7603 r = -ENOTTY; in kvm_arch_vm_ioctl()
7663 (msr_index - MSR_IA32_RTIT_ADDR0_A >= in kvm_probe_msr_to_save()
7668 MSR_ARCH_PERFMON_PERFCTR0 + KVM_MAX_NR_GP_COUNTERS - 1: in kvm_probe_msr_to_save()
7669 if (msr_index - MSR_ARCH_PERFMON_PERFCTR0 >= in kvm_probe_msr_to_save()
7674 MSR_ARCH_PERFMON_EVENTSEL0 + KVM_MAX_NR_GP_COUNTERS - 1: in kvm_probe_msr_to_save()
7675 if (msr_index - MSR_ARCH_PERFMON_EVENTSEL0 >= in kvm_probe_msr_to_save()
7680 MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_MAX_NR_FIXED_COUNTERS - 1: in kvm_probe_msr_to_save()
7681 if (msr_index - MSR_ARCH_PERFMON_FIXED_CTR0 >= in kvm_probe_msr_to_save()
7769 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
7774 len -= n; in vcpu_mmio_write()
7789 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
7796 len -= n; in vcpu_mmio_read()
7818 struct kvm_mmu *mmu = vcpu->arch.mmu; in translate_nested_gpa()
7823 /* NPT walks are always user-walks */ in translate_nested_gpa()
7825 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception); in translate_nested_gpa()
7833 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_read()
7836 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
7843 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_write()
7847 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
7851 /* uses this to access any guest's mapped memory without checking CPL */
7855 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_system()
7857 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
7864 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_read_guest_virt_helper()
7869 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); in kvm_read_guest_virt_helper()
7870 unsigned offset = addr & (PAGE_SIZE-1); in kvm_read_guest_virt_helper()
7871 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_read_guest_virt_helper()
7883 bytes -= toread; in kvm_read_guest_virt_helper()
7897 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fetch_guest_virt()
7903 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
7908 offset = addr & (PAGE_SIZE-1); in kvm_fetch_guest_virt()
7910 bytes = (unsigned)PAGE_SIZE - offset; in kvm_fetch_guest_virt()
7956 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_write_guest_virt_helper()
7961 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); in kvm_write_guest_virt_helper()
7962 unsigned offset = addr & (PAGE_SIZE-1); in kvm_write_guest_virt_helper()
7963 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_write_guest_virt_helper()
7974 bytes -= towrite; in kvm_write_guest_virt_helper()
8002 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
8062 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vcpu_mmio_gva_to_gpa()
8072 !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
8073 vcpu->arch.mmio_access, 0, access))) { in vcpu_mmio_gva_to_gpa()
8074 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
8075 (gva & (PAGE_SIZE - 1)); in vcpu_mmio_gva_to_gpa()
8080 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
8083 return -1; in vcpu_mmio_gva_to_gpa()
8114 if (vcpu->mmio_read_completed) { in read_prepare()
8116 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
8117 vcpu->mmio_read_completed = 0; in read_prepare()
8152 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
8154 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
8180 bool write = ops->write; in emulator_read_write_onepage()
8182 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
8191 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && in emulator_read_write_onepage()
8192 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { in emulator_read_write_onepage()
8193 gpa = ctxt->gpa_val; in emulator_read_write_onepage()
8201 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
8207 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
8212 bytes -= handled; in emulator_read_write_onepage()
8215 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
8216 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
8217 frag->gpa = gpa; in emulator_read_write_onepage()
8218 frag->data = val; in emulator_read_write_onepage()
8219 frag->len = bytes; in emulator_read_write_onepage()
8233 if (ops->read_write_prepare && in emulator_read_write()
8234 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
8237 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
8240 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { in emulator_read_write()
8243 now = -addr & ~PAGE_MASK; in emulator_read_write()
8250 if (ctxt->mode != X86EMUL_MODE_PROT64) in emulator_read_write()
8253 bytes -= now; in emulator_read_write()
8261 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
8264 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
8266 vcpu->mmio_needed = 1; in emulator_read_write()
8267 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
8269 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
8270 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
8271 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
8272 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
8274 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
8314 if (bytes > 8 || (bytes & (bytes - 1))) in emulator_cmpxchg_emulated()
8328 page_line_mask = ~(cache_line_size() - 1); in emulator_cmpxchg_emulated()
8332 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) in emulator_cmpxchg_emulated()
8390 WARN_ON_ONCE(vcpu->arch.pio.count); in emulator_pio_in_out()
8406 memset(data, 0, size * (count - i)); in emulator_pio_in_out()
8415 vcpu->arch.pio.port = port; in emulator_pio_in_out()
8416 vcpu->arch.pio.in = in; in emulator_pio_in_out()
8417 vcpu->arch.pio.count = count; in emulator_pio_in_out()
8418 vcpu->arch.pio.size = size; in emulator_pio_in_out()
8421 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in_out()
8423 memcpy(vcpu->arch.pio_data, data, size * count); in emulator_pio_in_out()
8425 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
8426 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
8427 vcpu->run->io.size = size; in emulator_pio_in_out()
8428 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
8429 vcpu->run->io.count = count; in emulator_pio_in_out()
8430 vcpu->run->io.port = port; in emulator_pio_in_out()
8446 int size = vcpu->arch.pio.size; in complete_emulator_pio_in()
8447 unsigned int count = vcpu->arch.pio.count; in complete_emulator_pio_in()
8448 memcpy(val, vcpu->arch.pio_data, size * count); in complete_emulator_pio_in()
8449 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); in complete_emulator_pio_in()
8450 vcpu->arch.pio.count = 0; in complete_emulator_pio_in()
8458 if (vcpu->arch.pio.count) { in emulator_pio_in_emulated()
8506 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
8507 wbinvd_on_cpus_mask(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
8509 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
8543 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; in mk_cr_64()
8556 value = vcpu->arch.cr2; in emulator_get_cr()
8585 vcpu->arch.cr2 = val; in emulator_set_cr()
8598 res = -1; in emulator_set_cr()
8659 desc->type = var.type; in emulator_get_segment()
8660 desc->s = var.s; in emulator_get_segment()
8661 desc->dpl = var.dpl; in emulator_get_segment()
8662 desc->p = var.present; in emulator_get_segment()
8663 desc->avl = var.avl; in emulator_get_segment()
8664 desc->l = var.l; in emulator_get_segment()
8665 desc->d = var.db; in emulator_get_segment()
8666 desc->g = var.g; in emulator_get_segment()
8684 if (desc->g) in emulator_set_segment()
8686 var.type = desc->type; in emulator_set_segment()
8687 var.dpl = desc->dpl; in emulator_set_segment()
8688 var.db = desc->d; in emulator_set_segment()
8689 var.s = desc->s; in emulator_set_segment()
8690 var.l = desc->l; in emulator_set_segment()
8691 var.g = desc->g; in emulator_set_segment()
8692 var.avl = desc->avl; in emulator_set_segment()
8693 var.present = desc->p; in emulator_set_segment()
8751 * Treat emulator accesses to the current shadow stack pointer as host- in emulator_get_msr()
8754 * so the index is fully KVM-controlled. in emulator_get_msr()
8775 emul_to_vcpu(ctxt)->arch.halt_request = 1; in emulator_halt()
8783 &ctxt->exception); in emulator_intercept()
8853 struct kvm *kvm = emul_to_vcpu(ctxt)->kvm; in emulator_vm_bugged()
8855 if (!kvm->vm_bugged) in emulator_vm_bugged()
8944 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in inject_emulated_exception()
8946 if (ctxt->exception.vector == PF_VECTOR) in inject_emulated_exception()
8947 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
8948 else if (ctxt->exception.error_code_valid) in inject_emulated_exception()
8949 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
8950 ctxt->exception.error_code); in inject_emulated_exception()
8952 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
8965 ctxt->vcpu = vcpu; in alloc_emulate_ctxt()
8966 ctxt->ops = &emulate_ops; in alloc_emulate_ctxt()
8967 vcpu->arch.emulate_ctxt = ctxt; in alloc_emulate_ctxt()
8974 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
8979 ctxt->gpa_available = false; in init_emulate_ctxt()
8980 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
8981 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; in init_emulate_ctxt()
8983 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
8984 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
8985 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : in init_emulate_ctxt()
8989 ctxt->interruptibility = 0; in init_emulate_ctxt()
8990 ctxt->have_exception = false; in init_emulate_ctxt()
8991 ctxt->exception.vector = -1; in init_emulate_ctxt()
8992 ctxt->perm_ok = false; in init_emulate_ctxt()
8995 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
9000 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
9005 ctxt->op_bytes = 2; in kvm_inject_realmode_interrupt()
9006 ctxt->ad_bytes = 2; in kvm_inject_realmode_interrupt()
9007 ctxt->_eip = ctxt->eip + inc_eip; in kvm_inject_realmode_interrupt()
9013 ctxt->eip = ctxt->_eip; in kvm_inject_realmode_interrupt()
9014 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
9015 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
9023 struct kvm_run *run = vcpu->run; in prepare_emulation_failure_exit()
9036 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in prepare_emulation_failure_exit()
9037 run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; in prepare_emulation_failure_exit()
9049 run->emulation_failure.flags = 0; in prepare_emulation_failure_exit()
9052 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) + in prepare_emulation_failure_exit()
9053 sizeof(run->emulation_failure.insn_bytes) != 16)); in prepare_emulation_failure_exit()
9055 run->emulation_failure.flags |= in prepare_emulation_failure_exit()
9057 run->emulation_failure.insn_size = insn_size; in prepare_emulation_failure_exit()
9058 memset(run->emulation_failure.insn_bytes, 0x90, in prepare_emulation_failure_exit()
9059 sizeof(run->emulation_failure.insn_bytes)); in prepare_emulation_failure_exit()
9060 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size); in prepare_emulation_failure_exit()
9063 memcpy(&run->internal.data[info_start], info, sizeof(info)); in prepare_emulation_failure_exit()
9064 memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data, in prepare_emulation_failure_exit()
9067 run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata; in prepare_emulation_failure_exit()
9072 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in prepare_emulation_ctxt_failure_exit()
9074 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, in prepare_emulation_ctxt_failure_exit()
9075 ctxt->fetch.end - ctxt->fetch.data); in prepare_emulation_ctxt_failure_exit()
9094 struct kvm_run *run = vcpu->run; in kvm_prepare_event_vectoring_exit()
9101 run->internal.data[ndata++] = info2; in kvm_prepare_event_vectoring_exit()
9102 run->internal.data[ndata++] = reason; in kvm_prepare_event_vectoring_exit()
9103 run->internal.data[ndata++] = info1; in kvm_prepare_event_vectoring_exit()
9104 run->internal.data[ndata++] = gpa; in kvm_prepare_event_vectoring_exit()
9105 run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu; in kvm_prepare_event_vectoring_exit()
9107 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_prepare_event_vectoring_exit()
9108 run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; in kvm_prepare_event_vectoring_exit()
9109 run->internal.ndata = ndata; in kvm_prepare_event_vectoring_exit()
9115 struct kvm *kvm = vcpu->kvm; in handle_emulation_failure()
9117 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
9125 if (kvm->arch.exit_on_emulation_error || in handle_emulation_failure()
9154 * a SPTE and write-protect the gfn to resolve the !PRESENT fault, and in kvm_unprotect_and_retry_on_failure()
9163 * table, unprotect the gfn (zap any relevant SPTEs) and re-enter the in kvm_unprotect_and_retry_on_failure()
9164 * guest to let the CPU re-execute the instruction in the hope that the in kvm_unprotect_and_retry_on_failure()
9199 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_do_singlestep()
9201 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_do_singlestep()
9202 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; in kvm_vcpu_do_singlestep()
9203 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in kvm_vcpu_do_singlestep()
9204 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_do_singlestep()
9205 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_do_singlestep()
9268 * to behave as if KVM intercepted the instruction without an exception in kvm_vcpu_check_code_breakpoint()
9269 * and without a prefix. in kvm_vcpu_check_code_breakpoint()
9275 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_code_breakpoint()
9276 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_code_breakpoint()
9277 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_code_breakpoint()
9280 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_code_breakpoint()
9281 vcpu->arch.eff_db); in kvm_vcpu_check_code_breakpoint()
9284 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; in kvm_vcpu_check_code_breakpoint()
9285 kvm_run->debug.arch.pc = eip; in kvm_vcpu_check_code_breakpoint()
9286 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_check_code_breakpoint()
9287 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_check_code_breakpoint()
9293 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_code_breakpoint()
9297 vcpu->arch.dr7, in kvm_vcpu_check_code_breakpoint()
9298 vcpu->arch.db); in kvm_vcpu_check_code_breakpoint()
9312 switch (ctxt->opcode_len) { in is_vmware_backdoor_opcode()
9314 switch (ctxt->b) { in is_vmware_backdoor_opcode()
9331 switch (ctxt->b) { in is_vmware_backdoor_opcode()
9344 * (and wrong) when emulating on an intercepted fault-like exception[*], as
9354 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_decode_emulated_instruction()
9362 ++vcpu->stat.insn_emulation; in x86_decode_emulated_instruction()
9372 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
9398 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
9405 * are fault-like and are higher priority than any faults on in x86_emulate_instruction()
9423 if (ctxt->have_exception && in x86_emulate_instruction()
9426 * #UD should result in just EMULATION_FAILED, and trap-like in x86_emulate_instruction()
9429 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || in x86_emulate_instruction()
9430 exception_type(ctxt->exception.vector) == EXCPT_TRAP); in x86_emulate_instruction()
9445 * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for in x86_emulate_instruction()
9448 * injecting single-step #DBs. in x86_emulate_instruction()
9451 if (ctxt->mode != X86EMUL_MODE_PROT64) in x86_emulate_instruction()
9452 ctxt->eip = (u32)ctxt->_eip; in x86_emulate_instruction()
9454 ctxt->eip = ctxt->_eip; in x86_emulate_instruction()
9461 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
9462 if (ctxt->eflags & X86_EFLAGS_RF) in x86_emulate_instruction()
9463 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
9468 * If emulation was caused by a write-protection #PF on a non-page_table in x86_emulate_instruction()
9480 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
9481 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
9488 ctxt->exception.address = cr2_or_gpa; in x86_emulate_instruction()
9491 if (vcpu->arch.mmu->root_role.direct) { in x86_emulate_instruction()
9492 ctxt->gpa_available = true; in x86_emulate_instruction()
9493 ctxt->gpa_val = cr2_or_gpa; in x86_emulate_instruction()
9497 ctxt->exception.address = 0; in x86_emulate_instruction()
9502 * L2, unless KVM is re-emulating a previously decoded instruction, in x86_emulate_instruction()
9520 if (ctxt->have_exception) { in x86_emulate_instruction()
9521 WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write); in x86_emulate_instruction()
9522 vcpu->mmio_needed = false; in x86_emulate_instruction()
9525 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
9526 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
9527 /* FIXME: return into emulator if single-stepping. */ in x86_emulate_instruction()
9528 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
9531 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
9534 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
9535 ++vcpu->stat.mmio_exits; in x86_emulate_instruction()
9537 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
9540 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
9541 } else if (vcpu->arch.complete_userspace_io) { in x86_emulate_instruction()
9552 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
9553 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
9556 * Note, EXCPT_DB is assumed to be fault-like as the emulator in x86_emulate_instruction()
9558 * of which are fault-like. in x86_emulate_instruction()
9560 if (!ctxt->have_exception || in x86_emulate_instruction()
9561 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { in x86_emulate_instruction()
9563 if (ctxt->is_branch) in x86_emulate_instruction()
9565 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
9566 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) in x86_emulate_instruction()
9569 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
9578 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) in x86_emulate_instruction()
9581 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
9601 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
9607 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
9609 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip))) in complete_fast_pio_out()
9629 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { in kvm_fast_pio_out()
9630 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
9634 vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
9635 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
9645 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
9647 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip))) { in complete_fast_pio_in()
9648 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
9653 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
9676 vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
9677 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
9708 khz = freq->new; in tsc_khz_changed()
9729 /* TSC frequency always matches when on Hyper-V */ in kvm_hyperv_tsc_notifier()
9797 if (vcpu->cpu != cpu) in __kvmclock_cpufreq_notifier()
9800 if (vcpu->cpu != raw_smp_processor_id()) in __kvmclock_cpufreq_notifier()
9806 if (freq->old < freq->new && send_ipi) { in __kvmclock_cpufreq_notifier()
9829 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) in kvmclock_cpufreq_notifier()
9831 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) in kvmclock_cpufreq_notifier()
9834 for_each_cpu(cpu, freq->policy->cpus) in kvmclock_cpufreq_notifier()
9862 if (policy->cpuinfo.max_freq) in kvm_timer_init()
9863 max_tsc_khz = policy->cpuinfo.max_freq; in kvm_timer_init()
9921 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && in pvclock_gtod_notify()
9934 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); in kvm_ops_update()
9944 #include <asm/kvm-x86-ops.h> in kvm_ops_update()
9947 kvm_pmu_ops_update(ops->pmu_ops); in kvm_ops_update()
9965 return -EIO; in kvm_x86_check_processor_compatibility()
9984 return -EEXIST; in kvm_x86_vendor_init()
9994 return -EOPNOTSUPP; in kvm_x86_vendor_init()
9999 return -EOPNOTSUPP; in kvm_x86_vendor_init()
10012 return -EIO; in kvm_x86_vendor_init()
10024 return -EIO; in kvm_x86_vendor_init()
10032 return -ENOMEM; in kvm_x86_vendor_init()
10038 r = -ENOMEM; in kvm_x86_vendor_init()
10065 kvm_init_pmu_capability(ops->pmu_ops); in kvm_x86_vendor_init()
10070 r = ops->hardware_setup(); in kvm_x86_vendor_init()
10092 if (pi_inject_timer == -1) in kvm_x86_vendor_init()
10101 kvm_register_perf_callbacks(ops->handle_intel_pt_intr); in kvm_x86_vendor_init()
10195 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
10201 if (vcpu->arch.tsc_always_catchup) in kvm_pv_clock_pairing()
10202 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
10205 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
10214 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, in kvm_pv_clock_pairing()
10216 ret = -KVM_EFAULT; in kvm_pv_clock_pairing()
10225 * @apicid - apicid of vcpu to be kicked.
10245 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); in kvm_apicv_activated()
10251 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); in kvm_vcpu_apicv_activated()
10279 set_or_clear_apicv_inhibit(&kvm->arch.apicv_inhibit_reasons, reason, true); in kvm_apicv_init()
10281 init_rwsem(&kvm->arch.apicv_update_lock); in kvm_apicv_init()
10289 vcpu->stat.directed_yield_attempted++; in kvm_sched_yield()
10295 map = rcu_dereference(vcpu->kvm->arch.apic_map); in kvm_sched_yield()
10297 if (likely(map) && dest_id <= map->max_apic_id) { in kvm_sched_yield()
10298 dest_id = array_index_nospec(dest_id, map->max_apic_id + 1); in kvm_sched_yield()
10299 if (map->phys_map[dest_id]) in kvm_sched_yield()
10300 target = map->phys_map[dest_id]->vcpu; in kvm_sched_yield()
10305 if (!target || !READ_ONCE(target->ready)) in kvm_sched_yield()
10315 vcpu->stat.directed_yield_successful++; in kvm_sched_yield()
10323 u64 ret = vcpu->run->hypercall.ret; in complete_hypercall_exit()
10342 ++vcpu->stat.hypercalls; in ____kvm_emulate_hypercall()
10355 ret = -KVM_EPERM; in ____kvm_emulate_hypercall()
10359 ret = -KVM_ENOSYS; in ____kvm_emulate_hypercall()
10369 kvm_pv_kick_cpu_op(vcpu->kvm, a1); in ____kvm_emulate_hypercall()
10382 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); in ____kvm_emulate_hypercall()
10394 ret = -KVM_ENOSYS; in ____kvm_emulate_hypercall()
10395 if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) in ____kvm_emulate_hypercall()
10400 ret = -KVM_EINVAL; in ____kvm_emulate_hypercall()
10404 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; in ____kvm_emulate_hypercall()
10405 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; in ____kvm_emulate_hypercall()
10407 * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2) in ____kvm_emulate_hypercall()
10408 * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that in ____kvm_emulate_hypercall()
10410 * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU. in ____kvm_emulate_hypercall()
10412 vcpu->run->hypercall.ret = 0; in ____kvm_emulate_hypercall()
10413 vcpu->run->hypercall.args[0] = gpa; in ____kvm_emulate_hypercall()
10414 vcpu->run->hypercall.args[1] = npages; in ____kvm_emulate_hypercall()
10415 vcpu->run->hypercall.args[2] = attrs; in ____kvm_emulate_hypercall()
10416 vcpu->run->hypercall.flags = 0; in ____kvm_emulate_hypercall()
10418 vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE; in ____kvm_emulate_hypercall()
10420 WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ); in ____kvm_emulate_hypercall()
10421 vcpu->arch.complete_userspace_io = complete_hypercall; in ____kvm_emulate_hypercall()
10425 ret = -KVM_ENOSYS; in ____kvm_emulate_hypercall()
10430 vcpu->run->hypercall.ret = ret; in ____kvm_emulate_hypercall()
10437 if (kvm_xen_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
10458 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { in emulator_fix_hypercall()
10459 ctxt->exception.error_code_valid = false; in emulator_fix_hypercall()
10460 ctxt->exception.vector = UD_VECTOR; in emulator_fix_hypercall()
10461 ctxt->have_exception = true; in emulator_fix_hypercall()
10468 &ctxt->exception); in emulator_fix_hypercall()
10473 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
10474 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
10477 /* Called within kvm->srcu read side. */
10480 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
10482 kvm_run->if_flag = kvm_x86_call(get_if_flag)(vcpu); in post_kvm_run_save()
10483 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
10484 kvm_run->apic_base = vcpu->arch.apic_base; in post_kvm_run_save()
10486 kvm_run->ready_for_interrupt_injection = in post_kvm_run_save()
10487 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
10491 kvm_run->flags |= KVM_RUN_X86_SMM; in post_kvm_run_save()
10493 kvm_run->flags |= KVM_RUN_X86_GUEST_MODE; in post_kvm_run_save()
10506 if (vcpu->arch.apic->apicv_active) in update_cr8_intercept()
10509 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
10512 max_irr = -1; in update_cr8_intercept()
10514 if (max_irr != -1) in update_cr8_intercept()
10526 kvm_x86_ops.nested_ops->triple_fault(vcpu); in kvm_check_nested_events()
10530 return kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_check_nested_events()
10539 * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do in kvm_inject_exception()
10542 vcpu->arch.exception.has_error_code &= is_protmode(vcpu); in kvm_inject_exception()
10544 trace_kvm_inj_exception(vcpu->arch.exception.vector, in kvm_inject_exception()
10545 vcpu->arch.exception.has_error_code, in kvm_inject_exception()
10546 vcpu->arch.exception.error_code, in kvm_inject_exception()
10547 vcpu->arch.exception.injected); in kvm_inject_exception()
10557 * injected as part of a previous VM-Enter, but weren't successfully delivered
10558 * and need to be re-injected.
10563 * also be able to re-inject NMIs and IRQs in the middle of an instruction.
10564 * I.e. for exceptions and re-injected events, NOT invoking this on instruction
10569 * instruction boundaries for asynchronous events. However, because VM-Exits
10575 * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip
10598 * Process nested events first, as nested VM-Exit supersedes event in kvm_check_and_inject_events()
10599 * re-injection. If there's an event queued for re-injection, it will in kvm_check_and_inject_events()
10600 * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit. in kvm_check_and_inject_events()
10608 * Re-inject exceptions and events *especially* if immediate entry+exit in kvm_check_and_inject_events()
10612 * Don't re-inject an NMI or interrupt if there is a pending exception. in kvm_check_and_inject_events()
10621 * as the exception "occurred" before the exit to userspace. Trap-like in kvm_check_and_inject_events()
10623 * And while fault-like exceptions, e.g. #GP and #PF, are the lowest in kvm_check_and_inject_events()
10626 * Thus a pending fault-like exception means the fault occurred on the in kvm_check_and_inject_events()
10630 if (vcpu->arch.exception.injected) in kvm_check_and_inject_events()
10634 else if (vcpu->arch.nmi_injected) in kvm_check_and_inject_events()
10636 else if (vcpu->arch.interrupt.injected) in kvm_check_and_inject_events()
10640 * Exceptions that morph to VM-Exits are handled above, and pending in kvm_check_and_inject_events()
10641 * exceptions on top of injected exceptions that do not VM-Exit should in kvm_check_and_inject_events()
10644 WARN_ON_ONCE(vcpu->arch.exception.injected && in kvm_check_and_inject_events()
10645 vcpu->arch.exception.pending); in kvm_check_and_inject_events()
10649 * nested VM-Enter or event re-injection so that a different pending in kvm_check_and_inject_events()
10652 * Otherwise, continue processing events even if VM-Exit occurred. The in kvm_check_and_inject_events()
10653 * VM-Exit will have cleared exceptions that were meant for L2, but in kvm_check_and_inject_events()
10660 * A pending exception VM-Exit should either result in nested VM-Exit in kvm_check_and_inject_events()
10661 * or force an immediate re-entry and exit to/from L2, and exception in kvm_check_and_inject_events()
10662 * VM-Exits cannot be injected (flag should _never_ be set). in kvm_check_and_inject_events()
10664 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected || in kvm_check_and_inject_events()
10665 vcpu->arch.exception_vmexit.pending); in kvm_check_and_inject_events()
10669 * to re-inject a previous event. See above comments on re-injecting in kvm_check_and_inject_events()
10674 if (vcpu->arch.exception.pending) { in kvm_check_and_inject_events()
10676 * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS in kvm_check_and_inject_events()
10677 * value pushed on the stack. Trap-like exception and all #DBs in kvm_check_and_inject_events()
10678 * leave RF as-is (KVM follows Intel's behavior in this regard; in kvm_check_and_inject_events()
10683 * fault-like. They do _not_ set RF, a la code breakpoints. in kvm_check_and_inject_events()
10685 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT) in kvm_check_and_inject_events()
10689 if (vcpu->arch.exception.vector == DB_VECTOR) { in kvm_check_and_inject_events()
10690 kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception); in kvm_check_and_inject_events()
10691 if (vcpu->arch.dr7 & DR7_GD) { in kvm_check_and_inject_events()
10692 vcpu->arch.dr7 &= ~DR7_GD; in kvm_check_and_inject_events()
10699 vcpu->arch.exception.pending = false; in kvm_check_and_inject_events()
10700 vcpu->arch.exception.injected = true; in kvm_check_and_inject_events()
10706 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) in kvm_check_and_inject_events()
10711 * due to architectural conditions (e.g. IF=0) a window-open exit in kvm_check_and_inject_events()
10712 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending in kvm_check_and_inject_events()
10718 * The kvm_x86_ops hooks communicate this by returning -EBUSY. in kvm_check_and_inject_events()
10721 if (vcpu->arch.smi_pending) { in kvm_check_and_inject_events()
10723 -EBUSY; in kvm_check_and_inject_events()
10727 vcpu->arch.smi_pending = false; in kvm_check_and_inject_events()
10728 ++vcpu->arch.smi_count; in kvm_check_and_inject_events()
10736 if (vcpu->arch.nmi_pending) { in kvm_check_and_inject_events()
10738 -EBUSY; in kvm_check_and_inject_events()
10742 --vcpu->arch.nmi_pending; in kvm_check_and_inject_events()
10743 vcpu->arch.nmi_injected = true; in kvm_check_and_inject_events()
10748 if (vcpu->arch.nmi_pending) in kvm_check_and_inject_events()
10754 -EBUSY; in kvm_check_and_inject_events()
10760 if (!WARN_ON_ONCE(irq == -1)) { in kvm_check_and_inject_events()
10771 kvm_x86_ops.nested_ops->has_events && in kvm_check_and_inject_events()
10772 kvm_x86_ops.nested_ops->has_events(vcpu, true)) in kvm_check_and_inject_events()
10777 * is done emulating and should only propagate the to-be-injected event in kvm_check_and_inject_events()
10779 * infinite loop as KVM will bail from VM-Enter to inject the pending in kvm_check_and_inject_events()
10784 * VMX without unrestricted guest, as that requires KVM to emulate Real in kvm_check_and_inject_events()
10787 WARN_ON_ONCE(vcpu->arch.exception.pending || in kvm_check_and_inject_events()
10788 vcpu->arch.exception_vmexit.pending); in kvm_check_and_inject_events()
10792 if (r == -EBUSY) { in kvm_check_and_inject_events()
10813 if (kvm_x86_call(get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
10820 * tracked in vcpu->arch.nmi_pending. in process_nmi()
10823 limit--; in process_nmi()
10825 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
10826 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
10828 if (vcpu->arch.nmi_pending && in process_nmi()
10830 vcpu->arch.nmi_pending--; in process_nmi()
10832 if (vcpu->arch.nmi_pending) in process_nmi()
10839 return vcpu->arch.nmi_pending + in kvm_get_nr_pending_nmis()
10856 struct kvm_lapic *apic = vcpu->arch.apic; in __kvm_vcpu_update_apicv()
10862 down_read(&vcpu->kvm->arch.apicv_update_lock); in __kvm_vcpu_update_apicv()
10869 if (apic->apicv_active == activate) in __kvm_vcpu_update_apicv()
10872 apic->apicv_active = activate; in __kvm_vcpu_update_apicv()
10882 if (!apic->apicv_active) in __kvm_vcpu_update_apicv()
10887 up_read(&vcpu->kvm->arch.apicv_update_lock); in __kvm_vcpu_update_apicv()
10907 if (apic_x2apic_mode(vcpu->arch.apic) && in kvm_vcpu_update_apicv()
10919 lockdep_assert_held_write(&kvm->arch.apicv_update_lock); in __kvm_set_or_clear_apicv_inhibit()
10924 old = new = kvm->arch.apicv_inhibit_reasons; in __kvm_set_or_clear_apicv_inhibit()
10935 * redo vcpu_enter_guest() without seeing the new inhibit state. in __kvm_set_or_clear_apicv_inhibit()
10942 kvm->arch.apicv_inhibit_reasons = new; in __kvm_set_or_clear_apicv_inhibit()
10945 int idx = srcu_read_lock(&kvm->srcu); in __kvm_set_or_clear_apicv_inhibit()
10948 srcu_read_unlock(&kvm->srcu, idx); in __kvm_set_or_clear_apicv_inhibit()
10951 kvm->arch.apicv_inhibit_reasons = new; in __kvm_set_or_clear_apicv_inhibit()
10961 down_write(&kvm->arch.apicv_update_lock); in kvm_set_or_clear_apicv_inhibit()
10963 up_write(&kvm->arch.apicv_update_lock); in kvm_set_or_clear_apicv_inhibit()
10972 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
10973 vcpu->arch.highest_stale_pending_ioapic_eoi = -1; in vcpu_scan_ioapic()
10977 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
10978 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
10980 else if (ioapic_in_kernel(vcpu->kvm)) in vcpu_scan_ioapic()
10981 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
10985 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
10992 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
11000 vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
11001 to_hv_synic(vcpu)->vec_bitmap, 256); in vcpu_load_eoi_exitmap()
11007 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); in vcpu_load_eoi_exitmap()
11024 * Called within kvm->srcu read side.
11025 * Returns 1 to let vcpu_run() continue the guest execution loop without
11042 r = -EIO; in vcpu_enter_guest()
11052 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
11062 kvm_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
11086 * Fall back to a "full" guest flush if Hyper-V's precise in vcpu_enter_guest()
11087 * flushing fails. Note, Hyper-V's flushing is per-vCPU, but in vcpu_enter_guest()
11098 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
11104 kvm_x86_ops.nested_ops->triple_fault(vcpu); in vcpu_enter_guest()
11107 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
11108 vcpu->mmio_needed = 0; in vcpu_enter_guest()
11115 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
11132 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
11133 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
11134 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
11135 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
11136 vcpu->run->eoi.vector = in vcpu_enter_guest()
11137 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
11150 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
11151 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
11152 vcpu->run->system_event.ndata = 0; in vcpu_enter_guest()
11157 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
11158 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
11159 vcpu->run->system_event.ndata = 0; in vcpu_enter_guest()
11166 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in vcpu_enter_guest()
11167 vcpu->run->hyperv = hv_vcpu->exit; in vcpu_enter_guest()
11174 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers in vcpu_enter_guest()
11175 * depend on the guest clock being up-to-date in vcpu_enter_guest()
11193 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) { in vcpu_enter_guest()
11202 ++vcpu->stat.req_event; in vcpu_enter_guest()
11208 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
11243 /* Store vcpu->apicv_active before vcpu->mode. */ in vcpu_enter_guest()
11244 smp_store_release(&vcpu->mode, IN_GUEST_MODE); in vcpu_enter_guest()
11249 * 1) We should set ->mode before checking ->requests. Please see in vcpu_enter_guest()
11252 * 2) For APICv, we should set ->mode before checking PID.ON. This in vcpu_enter_guest()
11273 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
11292 if (vcpu->arch.guest_fpu.xfd_err) in vcpu_enter_guest()
11293 wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); in vcpu_enter_guest()
11295 if (unlikely(vcpu->arch.switch_db_regs && in vcpu_enter_guest()
11296 !(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) { in vcpu_enter_guest()
11298 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
11299 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
11300 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
11301 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
11303 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) in vcpu_enter_guest()
11312 * vendor code if any host-owned bits were changed, e.g. so that the in vcpu_enter_guest()
11316 if ((debug_ctl ^ vcpu->arch.host_debugctl) & kvm_x86_ops.HOST_OWNED_DEBUGCTL && in vcpu_enter_guest()
11317 !vcpu->arch.guest_state_protected) in vcpu_enter_guest()
11319 vcpu->arch.host_debugctl = debug_ctl; in vcpu_enter_guest()
11327 * per-VM state, and responding vCPUs must wait for the update in vcpu_enter_guest()
11347 /* Note, VM-Exits that go down the "slow" path are accounted below. */ in vcpu_enter_guest()
11348 ++vcpu->stat.exits; in vcpu_enter_guest()
11357 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
11358 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
11359 WARN_ON(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH); in vcpu_enter_guest()
11375 vcpu->arch.last_vmentry_cpu = vcpu->cpu; in vcpu_enter_guest()
11376 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
11378 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
11383 * rely on the fact that guest_fpu::xfd is up-to-date (e.g. in vcpu_enter_guest()
11386 if (vcpu->arch.xfd_no_write_intercept) in vcpu_enter_guest()
11391 if (vcpu->arch.guest_fpu.xfd_err) in vcpu_enter_guest()
11405 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. in vcpu_enter_guest()
11412 ++vcpu->stat.exits; in vcpu_enter_guest()
11440 !vcpu->arch.guest_state_protected)) { in vcpu_enter_guest()
11445 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
11448 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
11461 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
11469 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
11470 !vcpu->arch.apf.halted); in kvm_vcpu_running()
11475 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
11486 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
11492 (vcpu->arch.smi_pending && in kvm_vcpu_has_events()
11510 kvm_x86_ops.nested_ops->has_events && in kvm_vcpu_has_events()
11511 kvm_x86_ops.nested_ops->has_events(vcpu, false)) in kvm_vcpu_has_events()
11523 return kvm_vcpu_running(vcpu) || vcpu->arch.pv.pv_unhalted || in kvm_arch_vcpu_runnable()
11527 /* Called within kvm->srcu read side. */
11534 * Switch to the software timer before halt-polling/blocking as in vcpu_block()
11537 * Switch before halt-polling so that KVM recognizes an expired in vcpu_block()
11545 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) in vcpu_block()
11556 * of some kind is pending; service it without changing the in vcpu_block()
11566 * state field (AMD does not have a similar field and a VM-Exit always in vcpu_block()
11572 WARN_ON_ONCE(r == -EBUSY); in vcpu_block()
11579 switch(vcpu->arch.mp_state) { in vcpu_block()
11585 vcpu->arch.apf.halted = false; in vcpu_block()
11596 /* Called within kvm->srcu read side. */
11601 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; in vcpu_run()
11610 vcpu->arch.at_instruction_boundary = false; in vcpu_run()
11630 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
11631 ++vcpu->stat.request_irq_exits; in vcpu_run()
11651 * local APIC is in-kernel, the run loop will detect the non-runnable in __kvm_emulate_halt()
11656 ++vcpu->stat.halt_exits; in __kvm_emulate_halt()
11658 if (kvm_vcpu_has_events(vcpu) || vcpu->arch.pv.pv_unhalted) in __kvm_emulate_halt()
11663 vcpu->run->exit_reason = reason; in __kvm_emulate_halt()
11678 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered in kvm_emulate_halt()
11714 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_preempted_in_kernel()
11719 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
11739 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
11764 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
11768 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
11771 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
11772 len = min(8u, frag->len); in complete_emulated_mmio()
11773 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
11774 memcpy(frag->data, run->mmio.data, len); in complete_emulated_mmio()
11776 if (frag->len <= 8) { in complete_emulated_mmio()
11779 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
11782 frag->data += len; in complete_emulated_mmio()
11783 frag->gpa += len; in complete_emulated_mmio()
11784 frag->len -= len; in complete_emulated_mmio()
11787 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
11788 vcpu->mmio_needed = 0; in complete_emulated_mmio()
11790 /* FIXME: return into emulator if single-stepping. */ in complete_emulated_mmio()
11791 if (vcpu->mmio_is_write) in complete_emulated_mmio()
11793 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
11797 run->exit_reason = KVM_EXIT_MMIO; in complete_emulated_mmio()
11798 run->mmio.phys_addr = frag->gpa; in complete_emulated_mmio()
11799 if (vcpu->mmio_is_write) in complete_emulated_mmio()
11800 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); in complete_emulated_mmio()
11801 run->mmio.len = min(8u, frag->len); in complete_emulated_mmio()
11802 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
11803 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
11810 /* Exclude PKRU, it's restored separately immediately after VM-Exit. */ in kvm_load_guest_fpu()
11811 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); in kvm_load_guest_fpu()
11818 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); in kvm_put_guest_fpu()
11819 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
11826 * SIPI_RECEIVED is obsolete; KVM leaves the vCPU in Wait-For-SIPI and in kvm_x86_vcpu_pre_run()
11831 if (WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) in kvm_x86_vcpu_pre_run()
11832 return -EINVAL; in kvm_x86_vcpu_pre_run()
11838 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED && in kvm_x86_vcpu_pre_run()
11840 return -EINVAL; in kvm_x86_vcpu_pre_run()
11847 struct kvm_queued_exception *ex = &vcpu->arch.exception; in kvm_arch_vcpu_ioctl_run()
11848 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
11852 r = kvm_mmu_post_init_vm(vcpu->kvm); in kvm_arch_vcpu_ioctl_run()
11858 kvm_run->flags = 0; in kvm_arch_vcpu_ioctl_run()
11862 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
11863 if (!vcpu->wants_to_run) { in kvm_arch_vcpu_ioctl_run()
11864 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
11873 * will transition the vCPU out of UNINITIALIZED (without more in kvm_arch_vcpu_ioctl_run()
11886 r = -EAGAIN; in kvm_arch_vcpu_ioctl_run()
11888 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
11889 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
11890 ++vcpu->stat.signal_exits; in kvm_arch_vcpu_ioctl_run()
11895 sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm); in kvm_arch_vcpu_ioctl_run()
11896 if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) || in kvm_arch_vcpu_ioctl_run()
11897 (kvm_run->kvm_dirty_regs & ~sync_valid_fields)) { in kvm_arch_vcpu_ioctl_run()
11898 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
11902 if (kvm_run->kvm_dirty_regs) { in kvm_arch_vcpu_ioctl_run()
11908 /* re-sync apic's tpr */ in kvm_arch_vcpu_ioctl_run()
11910 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
11911 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
11918 * a pending VM-Exit if L1 wants to intercept the exception. in kvm_arch_vcpu_ioctl_run()
11920 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) && in kvm_arch_vcpu_ioctl_run()
11921 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector, in kvm_arch_vcpu_ioctl_run()
11922 ex->error_code)) { in kvm_arch_vcpu_ioctl_run()
11923 kvm_queue_exception_vmexit(vcpu, ex->vector, in kvm_arch_vcpu_ioctl_run()
11924 ex->has_error_code, ex->error_code, in kvm_arch_vcpu_ioctl_run()
11925 ex->has_payload, ex->payload); in kvm_arch_vcpu_ioctl_run()
11926 ex->injected = false; in kvm_arch_vcpu_ioctl_run()
11927 ex->pending = false; in kvm_arch_vcpu_ioctl_run()
11929 vcpu->arch.exception_from_userspace = false; in kvm_arch_vcpu_ioctl_run()
11931 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
11932 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
11933 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
11938 WARN_ON_ONCE(vcpu->arch.pio.count); in kvm_arch_vcpu_ioctl_run()
11939 WARN_ON_ONCE(vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
11942 if (!vcpu->wants_to_run) { in kvm_arch_vcpu_ioctl_run()
11943 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
11955 if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected)) in kvm_arch_vcpu_ioctl_run()
11967 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
11975 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); in __get_regs()
11976 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
11978 regs->rax = kvm_rax_read(vcpu); in __get_regs()
11979 regs->rbx = kvm_rbx_read(vcpu); in __get_regs()
11980 regs->rcx = kvm_rcx_read(vcpu); in __get_regs()
11981 regs->rdx = kvm_rdx_read(vcpu); in __get_regs()
11982 regs->rsi = kvm_rsi_read(vcpu); in __get_regs()
11983 regs->rdi = kvm_rdi_read(vcpu); in __get_regs()
11984 regs->rsp = kvm_rsp_read(vcpu); in __get_regs()
11985 regs->rbp = kvm_rbp_read(vcpu); in __get_regs()
11987 regs->r8 = kvm_r8_read(vcpu); in __get_regs()
11988 regs->r9 = kvm_r9_read(vcpu); in __get_regs()
11989 regs->r10 = kvm_r10_read(vcpu); in __get_regs()
11990 regs->r11 = kvm_r11_read(vcpu); in __get_regs()
11991 regs->r12 = kvm_r12_read(vcpu); in __get_regs()
11992 regs->r13 = kvm_r13_read(vcpu); in __get_regs()
11993 regs->r14 = kvm_r14_read(vcpu); in __get_regs()
11994 regs->r15 = kvm_r15_read(vcpu); in __get_regs()
11997 regs->rip = kvm_rip_read(vcpu); in __get_regs()
11998 regs->rflags = kvm_get_rflags(vcpu); in __get_regs()
12003 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl_get_regs()
12004 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_get_regs()
12005 return -EINVAL; in kvm_arch_vcpu_ioctl_get_regs()
12015 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
12016 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
12018 kvm_rax_write(vcpu, regs->rax); in __set_regs()
12019 kvm_rbx_write(vcpu, regs->rbx); in __set_regs()
12020 kvm_rcx_write(vcpu, regs->rcx); in __set_regs()
12021 kvm_rdx_write(vcpu, regs->rdx); in __set_regs()
12022 kvm_rsi_write(vcpu, regs->rsi); in __set_regs()
12023 kvm_rdi_write(vcpu, regs->rdi); in __set_regs()
12024 kvm_rsp_write(vcpu, regs->rsp); in __set_regs()
12025 kvm_rbp_write(vcpu, regs->rbp); in __set_regs()
12027 kvm_r8_write(vcpu, regs->r8); in __set_regs()
12028 kvm_r9_write(vcpu, regs->r9); in __set_regs()
12029 kvm_r10_write(vcpu, regs->r10); in __set_regs()
12030 kvm_r11_write(vcpu, regs->r11); in __set_regs()
12031 kvm_r12_write(vcpu, regs->r12); in __set_regs()
12032 kvm_r13_write(vcpu, regs->r13); in __set_regs()
12033 kvm_r14_write(vcpu, regs->r14); in __set_regs()
12034 kvm_r15_write(vcpu, regs->r15); in __set_regs()
12037 kvm_rip_write(vcpu, regs->rip); in __set_regs()
12038 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); in __set_regs()
12040 vcpu->arch.exception.pending = false; in __set_regs()
12041 vcpu->arch.exception_vmexit.pending = false; in __set_regs()
12048 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl_set_regs()
12049 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_set_regs()
12050 return -EINVAL; in kvm_arch_vcpu_ioctl_set_regs()
12062 if (vcpu->arch.guest_state_protected) in __get_sregs_common()
12065 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs_common()
12066 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs_common()
12067 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs_common()
12068 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs_common()
12069 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs_common()
12070 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs_common()
12072 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs_common()
12073 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs_common()
12076 sregs->idt.limit = dt.size; in __get_sregs_common()
12077 sregs->idt.base = dt.address; in __get_sregs_common()
12079 sregs->gdt.limit = dt.size; in __get_sregs_common()
12080 sregs->gdt.base = dt.address; in __get_sregs_common()
12082 sregs->cr2 = vcpu->arch.cr2; in __get_sregs_common()
12083 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs_common()
12086 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs_common()
12087 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs_common()
12088 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs_common()
12089 sregs->efer = vcpu->arch.efer; in __get_sregs_common()
12090 sregs->apic_base = vcpu->arch.apic_base; in __get_sregs_common()
12097 if (vcpu->arch.guest_state_protected) in __get_sregs()
12100 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
12101 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
12102 (unsigned long *)sregs->interrupt_bitmap); in __get_sregs()
12111 if (vcpu->arch.guest_state_protected) in __get_sregs2()
12116 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); in __get_sregs2()
12117 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; in __get_sregs2()
12124 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl_get_sregs()
12125 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_get_sregs()
12126 return -EINVAL; in kvm_arch_vcpu_ioctl_get_sregs()
12150 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || in kvm_arch_vcpu_ioctl_get_mpstate()
12151 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && in kvm_arch_vcpu_ioctl_get_mpstate()
12152 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
12153 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_get_mpstate()
12155 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
12169 int ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
12173 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
12192 * leaves the vCPU in INIT_RECIEVED (Wait-For-SIPI) and pends the SIPI. in kvm_arch_vcpu_ioctl_set_mpstate()
12195 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { in kvm_arch_vcpu_ioctl_set_mpstate()
12196 mp_state->mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
12197 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
12200 kvm_set_mp_state(vcpu, mp_state->mp_state); in kvm_arch_vcpu_ioctl_set_mpstate()
12212 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_task_switch()
12219 * Check both User and Supervisor on task switches as inter- in kvm_task_switch()
12243 if (ret || vcpu->mmio_needed) in kvm_task_switch()
12246 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
12247 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
12251 vcpu->mmio_needed = false; in kvm_task_switch()
12252 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_task_switch()
12253 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_task_switch()
12254 vcpu->run->internal.ndata = 0; in kvm_task_switch()
12261 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { in kvm_is_valid_sregs()
12264 * 64-bit mode (though maybe in a 32-bit code segment). in kvm_is_valid_sregs()
12267 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) in kvm_is_valid_sregs()
12269 if (!kvm_vcpu_is_legal_cr3(vcpu, sregs->cr3)) in kvm_is_valid_sregs()
12273 * Not in 64-bit mode: EFER.LMA is clear and the code in kvm_is_valid_sregs()
12274 * segment cannot be 64-bit. in kvm_is_valid_sregs()
12276 if (sregs->efer & EFER_LMA || sregs->cs.l) in kvm_is_valid_sregs()
12280 return kvm_is_valid_cr4(vcpu, sregs->cr4) && in kvm_is_valid_sregs()
12281 kvm_is_valid_cr0(vcpu, sregs->cr0); in kvm_is_valid_sregs()
12291 return -EINVAL; in __set_sregs_common()
12293 if (kvm_apic_set_base(vcpu, sregs->apic_base, true)) in __set_sregs_common()
12294 return -EINVAL; in __set_sregs_common()
12296 if (vcpu->arch.guest_state_protected) in __set_sregs_common()
12299 dt.size = sregs->idt.limit; in __set_sregs_common()
12300 dt.address = sregs->idt.base; in __set_sregs_common()
12302 dt.size = sregs->gdt.limit; in __set_sregs_common()
12303 dt.address = sregs->gdt.base; in __set_sregs_common()
12306 vcpu->arch.cr2 = sregs->cr2; in __set_sregs_common()
12307 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs_common()
12308 vcpu->arch.cr3 = sregs->cr3; in __set_sregs_common()
12310 kvm_x86_call(post_set_cr3)(vcpu, sregs->cr3); in __set_sregs_common()
12312 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs_common()
12314 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs_common()
12315 kvm_x86_call(set_efer)(vcpu, sregs->efer); in __set_sregs_common()
12317 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs_common()
12318 kvm_x86_call(set_cr0)(vcpu, sregs->cr0); in __set_sregs_common()
12320 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs_common()
12321 kvm_x86_call(set_cr4)(vcpu, sregs->cr4); in __set_sregs_common()
12324 idx = srcu_read_lock(&vcpu->kvm->srcu); in __set_sregs_common()
12329 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __set_sregs_common()
12332 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs_common()
12333 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs_common()
12334 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs_common()
12335 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs_common()
12336 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs_common()
12337 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs_common()
12339 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs_common()
12340 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs_common()
12346 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && in __set_sregs_common()
12369 (const unsigned long *)sregs->interrupt_bitmap, max_bits); in __set_sregs()
12382 bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; in __set_sregs2()
12383 bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) && in __set_sregs2()
12384 !(sregs2->efer & EFER_LMA); in __set_sregs2()
12387 if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID) in __set_sregs2()
12388 return -EINVAL; in __set_sregs2()
12390 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) in __set_sregs2()
12391 return -EINVAL; in __set_sregs2()
12400 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); in __set_sregs2()
12404 vcpu->arch.pdptrs_from_userspace = true; in __set_sregs2()
12418 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl_set_sregs()
12419 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_set_sregs()
12420 return -EINVAL; in kvm_arch_vcpu_ioctl_set_sregs()
12437 down_write(&kvm->arch.apicv_update_lock); in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
12440 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
12446 up_write(&kvm->arch.apicv_update_lock); in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
12455 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_set_guest_debug()
12456 return -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
12460 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { in kvm_arch_vcpu_ioctl_set_guest_debug()
12461 r = -EBUSY; in kvm_arch_vcpu_ioctl_set_guest_debug()
12464 if (dbg->control & KVM_GUESTDBG_INJECT_DB) in kvm_arch_vcpu_ioctl_set_guest_debug()
12476 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
12477 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
12478 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
12480 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
12482 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
12483 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
12486 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
12490 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
12491 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
12501 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_guest_debug()
12516 unsigned long vaddr = tr->linear_address; in kvm_arch_vcpu_ioctl_translate()
12522 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
12524 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
12525 tr->physical_address = gpa; in kvm_arch_vcpu_ioctl_translate()
12526 tr->valid = gpa != INVALID_GPA; in kvm_arch_vcpu_ioctl_translate()
12527 tr->writeable = 1; in kvm_arch_vcpu_ioctl_translate()
12528 tr->usermode = 0; in kvm_arch_vcpu_ioctl_translate()
12538 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_get_fpu()
12539 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_arch_vcpu_ioctl_get_fpu()
12543 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
12544 memcpy(fpu->fpr, fxsave->st_space, 128); in kvm_arch_vcpu_ioctl_get_fpu()
12545 fpu->fcw = fxsave->cwd; in kvm_arch_vcpu_ioctl_get_fpu()
12546 fpu->fsw = fxsave->swd; in kvm_arch_vcpu_ioctl_get_fpu()
12547 fpu->ftwx = fxsave->twd; in kvm_arch_vcpu_ioctl_get_fpu()
12548 fpu->last_opcode = fxsave->fop; in kvm_arch_vcpu_ioctl_get_fpu()
12549 fpu->last_ip = fxsave->rip; in kvm_arch_vcpu_ioctl_get_fpu()
12550 fpu->last_dp = fxsave->rdp; in kvm_arch_vcpu_ioctl_get_fpu()
12551 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_get_fpu()
12561 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_set_fpu()
12562 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_arch_vcpu_ioctl_set_fpu()
12566 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
12568 memcpy(fxsave->st_space, fpu->fpr, 128); in kvm_arch_vcpu_ioctl_set_fpu()
12569 fxsave->cwd = fpu->fcw; in kvm_arch_vcpu_ioctl_set_fpu()
12570 fxsave->swd = fpu->fsw; in kvm_arch_vcpu_ioctl_set_fpu()
12571 fxsave->twd = fpu->ftwx; in kvm_arch_vcpu_ioctl_set_fpu()
12572 fxsave->fop = fpu->last_opcode; in kvm_arch_vcpu_ioctl_set_fpu()
12573 fxsave->rip = fpu->last_ip; in kvm_arch_vcpu_ioctl_set_fpu()
12574 fxsave->rdp = fpu->last_dp; in kvm_arch_vcpu_ioctl_set_fpu()
12575 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_set_fpu()
12585 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) in store_regs()
12586 __get_regs(vcpu, &vcpu->run->s.regs.regs); in store_regs()
12588 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) in store_regs()
12589 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
12591 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) in store_regs()
12593 vcpu, &vcpu->run->s.regs.events); in store_regs()
12598 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { in sync_regs()
12599 __set_regs(vcpu, &vcpu->run->s.regs.regs); in sync_regs()
12600 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; in sync_regs()
12603 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { in sync_regs()
12604 struct kvm_sregs sregs = vcpu->run->s.regs.sregs; in sync_regs()
12607 return -EINVAL; in sync_regs()
12609 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; in sync_regs()
12612 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { in sync_regs()
12613 struct kvm_vcpu_events events = vcpu->run->s.regs.events; in sync_regs()
12616 return -EINVAL; in sync_regs()
12618 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; in sync_regs()
12626 if (kvm_check_tsc_unstable() && kvm->created_vcpus) in kvm_arch_vcpu_precreate()
12630 if (!kvm->arch.max_vcpu_ids) in kvm_arch_vcpu_precreate()
12631 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS; in kvm_arch_vcpu_precreate()
12633 if (id >= kvm->arch.max_vcpu_ids) in kvm_arch_vcpu_precreate()
12634 return -EINVAL; in kvm_arch_vcpu_precreate()
12644 vcpu->arch.last_vmentry_cpu = -1; in kvm_arch_vcpu_create()
12645 vcpu->arch.regs_avail = ~0; in kvm_arch_vcpu_create()
12646 vcpu->arch.regs_dirty = ~0; in kvm_arch_vcpu_create()
12648 kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm); in kvm_arch_vcpu_create()
12650 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_create()
12663 r = -ENOMEM; in kvm_arch_vcpu_create()
12668 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_create()
12670 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64), in kvm_arch_vcpu_create()
12672 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64), in kvm_arch_vcpu_create()
12674 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks) in kvm_arch_vcpu_create()
12676 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_create()
12678 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, in kvm_arch_vcpu_create()
12685 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { in kvm_arch_vcpu_create()
12692 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) { in kvm_arch_vcpu_create()
12693 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_create()
12694 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_arch_vcpu_create()
12695 vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap; in kvm_arch_vcpu_create()
12699 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_create()
12700 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_create()
12703 vcpu->arch.hv_root_tdp = INVALID_PAGE; in kvm_arch_vcpu_create()
12713 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); in kvm_arch_vcpu_create()
12720 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_create()
12722 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_create()
12724 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_create()
12726 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_create()
12727 kfree(vcpu->arch.mci_ctl2_banks); in kvm_arch_vcpu_create()
12728 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_create()
12738 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
12740 if (mutex_lock_killable(&vcpu->mutex)) in kvm_arch_vcpu_postcreate()
12747 vcpu->arch.msr_kvm_poll_control = 1; in kvm_arch_vcpu_postcreate()
12749 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_postcreate()
12751 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) in kvm_arch_vcpu_postcreate()
12752 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvm_arch_vcpu_postcreate()
12770 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_destroy()
12771 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_destroy()
12772 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_destroy()
12777 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_destroy()
12778 kfree(vcpu->arch.mci_ctl2_banks); in kvm_arch_vcpu_destroy()
12780 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_destroy()
12782 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_destroy()
12783 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_destroy()
12784 kvfree(vcpu->arch.cpuid_entries); in kvm_arch_vcpu_destroy()
12789 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; in kvm_xstate_reset()
12830 * Several of the "set" flows, e.g. ->set_cr0(), read other registers in kvm_vcpu_reset()
12840 * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's in kvm_vcpu_reset()
12851 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
12853 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
12854 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
12855 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
12856 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
12857 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
12861 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
12863 vcpu->arch.dr6 = DR6_ACTIVE_LOW; in kvm_vcpu_reset()
12864 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
12867 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
12870 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
12871 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
12872 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
12878 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
12883 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
12885 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_vcpu_reset()
12887 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
12888 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | in kvm_vcpu_reset()
12896 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
12907 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); in kvm_vcpu_reset()
12914 vcpu->arch.cr3 = 0; in kvm_vcpu_reset()
12918 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions in kvm_vcpu_reset()
12919 * of Intel's SDM list CD/NW as being set on INIT, but they contradict in kvm_vcpu_reset()
12920 * (or qualify) that with a footnote stating that CD/NW are preserved. in kvm_vcpu_reset()
12936 * which PCIDs have to be flushed. However, CR0.WP and the paging-related in kvm_vcpu_reset()
13006 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_enable_virtualization_cpu()
13008 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_enable_virtualization_cpu()
13010 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_enable_virtualization_cpu()
13011 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_enable_virtualization_cpu()
13041 * N.B. - this code below runs only on platforms with reliable TSC, in kvm_arch_enable_virtualization_cpu()
13055 u64 delta_cyc = max_tsc - local_tsc; in kvm_arch_enable_virtualization_cpu()
13057 kvm->arch.backwards_tsc_observed = true; in kvm_arch_enable_virtualization_cpu()
13059 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_enable_virtualization_cpu()
13060 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_enable_virtualization_cpu()
13070 kvm->arch.last_tsc_nsec = 0; in kvm_arch_enable_virtualization_cpu()
13071 kvm->arch.last_tsc_write = 0; in kvm_arch_enable_virtualization_cpu()
13086 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
13092 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
13098 kfree(kvm->arch.hv_pa_pg); in kvm_arch_free_vm()
13110 return -EINVAL; in kvm_arch_init_vm()
13112 kvm->arch.vm_type = type; in kvm_arch_init_vm()
13113 kvm->arch.has_private_mem = in kvm_arch_init_vm()
13116 kvm->arch.pre_fault_allowed = in kvm_arch_init_vm()
13118 kvm->arch.disabled_quirks = kvm_caps.inapplicable_quirks & kvm_caps.supported_quirks; in kvm_arch_init_vm()
13132 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
13134 raw_spin_lock_init(&kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
13135 mutex_init(&kvm->arch.apic_map_lock); in kvm_arch_init_vm()
13136 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
13137 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); in kvm_arch_init_vm()
13139 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_arch_init_vm()
13141 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_arch_init_vm()
13143 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz; in kvm_arch_init_vm()
13144 kvm->arch.apic_bus_cycle_ns = APIC_BUS_CYCLE_NS_DEFAULT; in kvm_arch_init_vm()
13145 kvm->arch.guest_can_read_msr_platform_info = true; in kvm_arch_init_vm()
13146 kvm->arch.enable_pmu = enable_pmu; in kvm_arch_init_vm()
13149 spin_lock_init(&kvm->arch.hv_root_tdp_lock); in kvm_arch_init_vm()
13150 kvm->arch.hv_root_tdp = INVALID_PAGE; in kvm_arch_init_vm()
13153 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); in kvm_arch_init_vm()
13154 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); in kvm_arch_init_vm()
13164 "does not run without ignore_msrs=1, please report it to kvm@vger.kernel.org.\n"); in kvm_arch_init_vm()
13167 once_init(&kvm->arch.nx_once); in kvm_arch_init_vm()
13191 * -errno: on error
13196 * GPA->HVA translation will not change. However, the HVA is a user
13208 lockdep_assert_held(&kvm->slots_lock); in __x86_set_memory_region()
13211 return ERR_PTR_USR(-EINVAL); in __x86_set_memory_region()
13215 if (slot && slot->npages) in __x86_set_memory_region()
13216 return ERR_PTR_USR(-EEXIST); in __x86_set_memory_region()
13227 if (!slot || !slot->npages) in __x86_set_memory_region()
13230 old_npages = slot->npages; in __x86_set_memory_region()
13231 hva = slot->userspace_addr; in __x86_set_memory_region()
13259 * is unsafe, i.e. will lead to use-after-free. The PIT also needs to in kvm_arch_pre_destroy_vm()
13262 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); in kvm_arch_pre_destroy_vm()
13263 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); in kvm_arch_pre_destroy_vm()
13275 if (current->mm == kvm->mm) { in kvm_arch_destroy_vm()
13281 mutex_lock(&kvm->slots_lock); in kvm_arch_destroy_vm()
13287 mutex_unlock(&kvm->slots_lock); in kvm_arch_destroy_vm()
13290 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
13295 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); in kvm_arch_destroy_vm()
13296 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
13309 vfree(slot->arch.rmap[i]); in memslot_rmap_free()
13310 slot->arch.rmap[i] = NULL; in memslot_rmap_free()
13321 vfree(slot->arch.lpage_info[i - 1]); in kvm_arch_free_memslot()
13322 slot->arch.lpage_info[i - 1] = NULL; in kvm_arch_free_memslot()
13330 const int sz = sizeof(*slot->arch.rmap[0]); in memslot_rmap_alloc()
13337 if (slot->arch.rmap[i]) in memslot_rmap_alloc()
13340 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); in memslot_rmap_alloc()
13341 if (!slot->arch.rmap[i]) { in memslot_rmap_alloc()
13343 return -ENOMEM; in memslot_rmap_alloc()
13353 unsigned long npages = slot->npages; in kvm_alloc_memslot_metadata()
13361 memset(&slot->arch, 0, sizeof(slot->arch)); in kvm_alloc_memslot_metadata()
13381 slot->arch.lpage_info[i - 1] = linfo; in kvm_alloc_memslot_metadata()
13383 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
13385 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
13386 linfo[lpages - 1].disallow_lpage = 1; in kvm_alloc_memslot_metadata()
13387 ugfn = slot->userspace_addr >> PAGE_SHIFT; in kvm_alloc_memslot_metadata()
13392 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { in kvm_alloc_memslot_metadata()
13413 vfree(slot->arch.lpage_info[i - 1]); in kvm_alloc_memslot_metadata()
13414 slot->arch.lpage_info[i - 1] = NULL; in kvm_alloc_memslot_metadata()
13416 return -ENOMEM; in kvm_alloc_memslot_metadata()
13425 * memslots->generation has been incremented. in kvm_arch_memslots_updated()
13430 /* Force re-initialization of steal_time cache */ in kvm_arch_memslots_updated()
13445 return -EINVAL; in kvm_arch_prepare_memory_region()
13448 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) in kvm_arch_prepare_memory_region()
13449 return -EINVAL; in kvm_arch_prepare_memory_region()
13451 if (kvm_is_gfn_alias(kvm, new->base_gfn + new->npages - 1)) in kvm_arch_prepare_memory_region()
13452 return -EINVAL; in kvm_arch_prepare_memory_region()
13458 memcpy(&new->arch, &old->arch, sizeof(old->arch)); in kvm_arch_prepare_memory_region()
13460 return -EIO; in kvm_arch_prepare_memory_region()
13470 if (!kvm->arch.cpu_dirty_log_size) in kvm_mmu_update_cpu_dirty_logging()
13473 nr_slots = atomic_read(&kvm->nr_memslots_dirty_logging); in kvm_mmu_update_cpu_dirty_logging()
13483 u32 old_flags = old ? old->flags : 0; in kvm_mmu_slot_apply_flags()
13484 u32 new_flags = new ? new->flags : 0; in kvm_mmu_slot_apply_flags()
13504 * CREATE: No shadow pages exist, thus nothing to write-protect in kvm_mmu_slot_apply_flags()
13513 * READONLY and non-flags changes were filtered out above, and the only in kvm_mmu_slot_apply_flags()
13533 * Initially-all-set does not require write protecting any page, in kvm_mmu_slot_apply_flags()
13542 if (kvm->arch.cpu_dirty_log_size) { in kvm_mmu_slot_apply_flags()
13558 * write-protected before returning to userspace, i.e. before in kvm_mmu_slot_apply_flags()
13565 * Specifically, KVM also write-protects guest page tables to in kvm_mmu_slot_apply_flags()
13572 * e.g. to allow dirty logging without taking mmu_lock. in kvm_mmu_slot_apply_flags()
13574 * To handle these scenarios, KVM uses a separate software-only in kvm_mmu_slot_apply_flags()
13575 * bit (MMU-writable) to track if a SPTE is !writable due to in kvm_mmu_slot_apply_flags()
13576 * a guest page table being write-protected (KVM clears the in kvm_mmu_slot_apply_flags()
13577 * MMU-writable flag when write-protecting for shadow paging). in kvm_mmu_slot_apply_flags()
13579 * The use of MMU-writable is also the primary motivation for in kvm_mmu_slot_apply_flags()
13582 * !MMU-writable SPTE, KVM must flush if it encounters any in kvm_mmu_slot_apply_flags()
13583 * MMU-writable SPTE regardless of whether the actual hardware in kvm_mmu_slot_apply_flags()
13586 * write access" helpers to ignore MMU-writable entirely. in kvm_mmu_slot_apply_flags()
13589 * access-tracked SPTEs is particularly relevant). in kvm_mmu_slot_apply_flags()
13603 if (!kvm->arch.n_requested_mmu_pages && in kvm_arch_commit_memory_region()
13607 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO; in kvm_arch_commit_memory_region()
13623 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_in_kernel()
13633 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_get_ip()
13652 if (vcpu->arch.guest_state_protected) in kvm_get_linear_rip()
13673 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
13681 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
13682 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
13703 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); in kvm_async_pf_next_probe()
13710 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
13713 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
13722 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
13723 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
13731 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
13740 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) in kvm_del_async_pf_gfn()
13744 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
13747 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
13749 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
13756 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
13765 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, in apf_put_user_notpresent()
13773 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_put_user_ready()
13782 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_pageready_slot_free()
13795 if (!vcpu->arch.apf.send_always && in kvm_can_deliver_async_pf()
13796 (vcpu->arch.guest_state_protected || !kvm_x86_call(get_cpl)(vcpu))) in kvm_can_deliver_async_pf()
13804 return vcpu->arch.apf.delivery_as_pf_vmexit; in kvm_can_deliver_async_pf()
13822 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) in kvm_can_do_async_pf()
13837 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_not_present()
13838 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
13846 fault.address = work->arch.token; in kvm_arch_async_page_not_present()
13869 .vector = vcpu->arch.apf.vec in kvm_arch_async_page_present()
13872 if (work->wakeup_all) in kvm_arch_async_page_present()
13873 work->arch.token = ~0; /* broadcast wakeup */ in kvm_arch_async_page_present()
13875 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
13876 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_present()
13878 if ((work->wakeup_all || work->notpresent_injected) && in kvm_arch_async_page_present()
13880 !apf_put_user_ready(vcpu, work->arch.token)) { in kvm_arch_async_page_present()
13881 vcpu->arch.apf.pageready_pending = true; in kvm_arch_async_page_present()
13885 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
13892 if (!vcpu->arch.apf.pageready_pending) in kvm_arch_async_page_present_queued()
13907 * Non-coherent DMA assignment and de-assignment may affect whether or in kvm_noncoherent_dma_assignment_start_or_stop()
13910 * (or last) non-coherent device is (un)registered to so that new SPTEs in kvm_noncoherent_dma_assignment_start_or_stop()
13921 if (atomic_inc_return(&kvm->arch.noncoherent_dma_count) == 1) in kvm_arch_register_noncoherent_dma()
13927 if (!atomic_dec_return(&kvm->arch.noncoherent_dma_count)) in kvm_arch_unregister_noncoherent_dma()
13933 return atomic_read(&kvm->arch.noncoherent_dma_count); in kvm_arch_has_noncoherent_dma()
13939 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
13971 * test that setting IA32_SPEC_CTRL to given value in kvm_spec_ctrl_test_value()
13996 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fixup_and_inject_pf_error()
14002 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) { in kvm_fixup_and_inject_pf_error()
14004 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page in kvm_fixup_and_inject_pf_error()
14015 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); in kvm_fixup_and_inject_pf_error()
14028 if (KVM_BUG_ON(!e, vcpu->kvm)) in kvm_handle_memory_failure()
14029 return -EIO; in kvm_handle_memory_failure()
14039 * doesn't seem to be a real use-case behind such requests, just return in kvm_handle_memory_failure()
14095 * page tables, so a non-global flush just degenerates to a in kvm_handle_invpcid()
14114 struct kvm_run *run = vcpu->run; in complete_sev_es_emulated_mmio()
14118 BUG_ON(!vcpu->mmio_needed); in complete_sev_es_emulated_mmio()
14121 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_sev_es_emulated_mmio()
14122 len = min(8u, frag->len); in complete_sev_es_emulated_mmio()
14123 if (!vcpu->mmio_is_write) in complete_sev_es_emulated_mmio()
14124 memcpy(frag->data, run->mmio.data, len); in complete_sev_es_emulated_mmio()
14126 if (frag->len <= 8) { in complete_sev_es_emulated_mmio()
14129 vcpu->mmio_cur_fragment++; in complete_sev_es_emulated_mmio()
14132 frag->data += len; in complete_sev_es_emulated_mmio()
14133 frag->gpa += len; in complete_sev_es_emulated_mmio()
14134 frag->len -= len; in complete_sev_es_emulated_mmio()
14137 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_sev_es_emulated_mmio()
14138 vcpu->mmio_needed = 0; in complete_sev_es_emulated_mmio()
14146 run->mmio.phys_addr = frag->gpa; in complete_sev_es_emulated_mmio()
14147 run->mmio.len = min(8u, frag->len); in complete_sev_es_emulated_mmio()
14148 run->mmio.is_write = vcpu->mmio_is_write; in complete_sev_es_emulated_mmio()
14149 if (run->mmio.is_write) in complete_sev_es_emulated_mmio()
14150 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); in complete_sev_es_emulated_mmio()
14151 run->exit_reason = KVM_EXIT_MMIO; in complete_sev_es_emulated_mmio()
14153 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in complete_sev_es_emulated_mmio()
14165 return -EINVAL; in kvm_sev_es_mmio_write()
14171 bytes -= handled; in kvm_sev_es_mmio_write()
14176 frag = vcpu->mmio_fragments; in kvm_sev_es_mmio_write()
14177 vcpu->mmio_nr_fragments = 1; in kvm_sev_es_mmio_write()
14178 frag->len = bytes; in kvm_sev_es_mmio_write()
14179 frag->gpa = gpa; in kvm_sev_es_mmio_write()
14180 frag->data = data; in kvm_sev_es_mmio_write()
14182 vcpu->mmio_needed = 1; in kvm_sev_es_mmio_write()
14183 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_write()
14185 vcpu->run->mmio.phys_addr = gpa; in kvm_sev_es_mmio_write()
14186 vcpu->run->mmio.len = min(8u, frag->len); in kvm_sev_es_mmio_write()
14187 vcpu->run->mmio.is_write = 1; in kvm_sev_es_mmio_write()
14188 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in kvm_sev_es_mmio_write()
14189 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvm_sev_es_mmio_write()
14191 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_write()
14204 return -EINVAL; in kvm_sev_es_mmio_read()
14210 bytes -= handled; in kvm_sev_es_mmio_read()
14215 frag = vcpu->mmio_fragments; in kvm_sev_es_mmio_read()
14216 vcpu->mmio_nr_fragments = 1; in kvm_sev_es_mmio_read()
14217 frag->len = bytes; in kvm_sev_es_mmio_read()
14218 frag->gpa = gpa; in kvm_sev_es_mmio_read()
14219 frag->data = data; in kvm_sev_es_mmio_read()
14221 vcpu->mmio_needed = 1; in kvm_sev_es_mmio_read()
14222 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_read()
14224 vcpu->run->mmio.phys_addr = gpa; in kvm_sev_es_mmio_read()
14225 vcpu->run->mmio.len = min(8u, frag->len); in kvm_sev_es_mmio_read()
14226 vcpu->run->mmio.is_write = 0; in kvm_sev_es_mmio_read()
14227 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvm_sev_es_mmio_read()
14229 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_read()
14237 vcpu->arch.sev_pio_count -= count; in advance_sev_es_emulated_pio()
14238 vcpu->arch.sev_pio_data += count * size; in advance_sev_es_emulated_pio()
14246 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_outs()
14247 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_outs()
14249 vcpu->arch.pio.count = 0; in complete_sev_es_emulated_outs()
14250 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_outs()
14260 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_outs()
14261 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); in kvm_sev_es_outs()
14269 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_outs()
14273 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; in kvm_sev_es_outs()
14282 unsigned count = vcpu->arch.pio.count; in complete_sev_es_emulated_ins()
14283 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_ins()
14284 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_ins()
14286 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); in complete_sev_es_emulated_ins()
14288 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_ins()
14298 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_ins()
14299 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count)) in kvm_sev_es_ins()
14304 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_ins()
14308 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; in kvm_sev_es_ins()
14316 vcpu->arch.sev_pio_data = data; in kvm_sev_es_string_io()
14317 vcpu->arch.sev_pio_count = count; in kvm_sev_es_string_io()