Lines Matching +full:0 +full:x8ff
145 *(((struct kvm_x86_ops *)0)->func));
152 static bool __read_mostly ignore_msrs = 0;
231 bool __read_mostly allow_smaller_maxphyaddr = 0;
488 for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) { in kvm_is_immutable_feature_msr()
500 for (i = 0; i < num_msrs_to_save; i++) { in kvm_is_advertised_msr()
505 for (i = 0; i < num_emulated_msrs; i++) { in kvm_is_advertised_msr()
532 *data = 0; in kvm_do_msr_access()
538 * Userspace is allowed to read MSRs, and write '0' to MSRs, that KVM in kvm_do_msr_access()
540 * Simply check that @data is '0', which covers both the write '0' case in kvm_do_msr_access()
544 return 0; in kvm_do_msr_access()
547 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n", in kvm_do_msr_access()
553 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", op, msr, *data); in kvm_do_msr_access()
555 return 0; in kvm_do_msr_access()
574 for (i = 0; i < ASYNC_PF_PER_VCPU; i++) in kvm_async_pf_hash_reset()
575 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
596 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) { in kvm_on_user_return()
636 for (i = 0; i < kvm_nr_uret_msrs; ++i) { in kvm_find_user_return_msr()
650 for (i = 0; i < kvm_nr_uret_msrs; ++i) { in kvm_user_return_msr_cpu_online()
673 return 0; in kvm_set_user_return_msr()
680 return 0; in kvm_set_user_return_msr()
721 #define EXCPT_BENIGN 0
742 #define EXCPT_FAULT 0
783 * "Certain debug exceptions may clear bit 0-3. The in kvm_deliver_exception_payload()
822 ex->payload = 0; in kvm_deliver_exception_payload()
895 kvm_queue_exception_e(vcpu, DF_VECTOR, 0); in kvm_multiple_exception()
906 kvm_multiple_exception(vcpu, nr, false, 0, false, 0); in kvm_queue_exception()
914 kvm_multiple_exception(vcpu, nr, false, 0, true, payload); in kvm_queue_exception_p()
949 vcpu->arch.exception.payload = 0; in kvm_requeue_exception()
956 kvm_inject_gp(vcpu, 0); in kvm_complete_insn_gp()
967 kvm_inject_gp(vcpu, 0); in complete_emulated_insn_gp()
1022 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0); in kvm_queue_exception_e()
1034 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in kvm_require_cpl()
1054 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
1072 return 0; in load_pdptrs()
1077 if (ret < 0) in load_pdptrs()
1078 return 0; in load_pdptrs()
1080 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { in load_pdptrs()
1083 return 0; in load_pdptrs()
1106 if (cr0 & 0xffffffff00000000UL) in kvm_is_valid_cr0()
1196 return 0; in kvm_set_cr0()
1202 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); in kvm_lmsw()
1278 * saving. However, xcr0 bit 0 is always set, even if the in __kvm_set_xcr()
1304 return 0; in __kvm_set_xcr()
1310 /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */ in kvm_emulate_xsetbv()
1311 if (kvm_x86_call(get_cpl)(vcpu) != 0 || in kvm_emulate_xsetbv()
1313 kvm_inject_gp(vcpu, 0); in kvm_emulate_xsetbv()
1333 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB in kvm_post_set_cr4()
1347 * - CR4.PCIDE is changed from 1 to 0 in kvm_post_set_cr4()
1359 * - CR4.SMEP is changed from 0 to 1 in kvm_post_set_cr4()
1387 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ in kvm_set_cr4()
1399 return 0; in kvm_set_cr4()
1406 unsigned long roots_to_free = 0; in kvm_invalidate_pcid()
1411 * this is reachable when running EPT=1 and unrestricted_guest=0, and in kvm_invalidate_pcid()
1433 * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB in kvm_invalidate_pcid()
1434 * with PCIDE=0. in kvm_invalidate_pcid()
1439 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) in kvm_invalidate_pcid()
1449 unsigned long pcid = 0; in kvm_set_cr3()
1483 * even if PCID is disabled, in which case PCID=0 is flushed. It's a in kvm_set_cr3()
1486 * i.e. only PCID=0 can be relevant. in kvm_set_cr3()
1491 return 0; in kvm_set_cr3()
1503 return 0; in kvm_set_cr8()
1521 for (i = 0; i < KVM_NR_DB_REGS; i++) in kvm_update_dr0123()
1558 case 0 ... 3: in kvm_set_dr()
1578 return 0; in kvm_set_dr()
1587 case 0 ... 3: in kvm_get_dr()
1605 kvm_inject_gp(vcpu, 0); in kvm_emulate_rdpmc()
1671 * If RTM=0 because the kernel has disabled TSX, the host might in kvm_get_arch_capabilities()
1672 * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0 in kvm_get_arch_capabilities()
1715 return 0; in kvm_get_feature_msr()
1777 WARN_ON(r > 0); in set_efer()
1788 return 0; in set_efer()
1807 if (index >= 0x800 && index <= 0x8ff) in kvm_msr_allowed()
1821 for (i = 0; i < msr_filter->count; i++) { in kvm_msr_allowed()
1843 * Returns 0 on success, non-0 otherwise.
1894 if (guest_cpuid_is_intel_compatible(vcpu) && (data >> 32) != 0) in __kvm_set_msr()
1915 * bits do not exist and should always read '0'. However, in __kvm_set_msr()
1962 * Returns 0 on success, non-0 otherwise.
2114 return 0; in kvm_msr_user_space()
2117 vcpu->run->msr.error = 0; in kvm_msr_user_space()
2118 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); in kvm_msr_user_space()
2138 if (reg < 0) { in __kvm_emulate_rdmsr()
2146 if (kvm_msr_user_space(vcpu, msr, KVM_EXIT_X86_RDMSR, 0, in __kvm_emulate_rdmsr()
2148 return 0; in __kvm_emulate_rdmsr()
2181 return 0; in __kvm_emulate_wrmsr()
2183 if (r < 0) in __kvm_emulate_wrmsr()
2446 if (vcpu->vcpu_id == 0 && !host_initiated) { in kvm_write_system_time()
2476 int32_t shift = 0; in kvm_get_time_scale()
2482 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { in kvm_get_time_scale()
2488 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { in kvm_get_time_scale()
2489 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) in kvm_get_time_scale()
2501 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
2523 return 0; in set_tsc_khz()
2531 return 0; in set_tsc_khz()
2542 if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) { in set_tsc_khz()
2549 return 0; in set_tsc_khz()
2555 int use_scaling = 0; in kvm_set_tsc_khz()
2558 if (user_tsc_khz == 0) { in kvm_set_tsc_khz()
2800 kvm->arch.nr_vcpus_matched_tsc = 0; in __kvm_synchronize_tsc()
2815 u64 data = user_value ? *user_value : 0; in kvm_synchronize_tsc()
2828 if (data == 0) { in kvm_synchronize_tsc()
2891 WARN_ON(adjustment < 0); in adjust_tsc_offset_host()
2949 *tsc_timestamp = v = 0; in vgettsc()
3093 * - 0 < N - M => M < N
3209 data->flags = 0; in __get_kvmclock()
3314 kernel_ns = 0; in kvm_guest_time_update()
3315 host_tsc = 0; in kvm_guest_time_update()
3333 if (unlikely(tgt_tsc_khz == 0)) { in kvm_guest_time_update()
3387 hv_clock.flags = 0; in kvm_guest_time_update()
3401 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->pv_time, 0); in kvm_guest_time_update()
3424 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_time_info_cache, 0); in kvm_guest_time_update()
3426 return 0; in kvm_guest_time_update()
3460 local_tsc_khz = 0; in kvm_get_wall_clock_epoch()
3474 local_tsc_khz = 0; /* Fall back to old method */ in kvm_get_wall_clock_epoch()
3554 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
3562 return (msr & 3) == 0; in is_mci_control_msr()
3584 unsigned bank_num = mcg_cap & 0xff; in set_msr_mce()
3597 if (data != 0 && data != ~(u64)0) in set_msr_mce()
3621 * Only 0 or all 1s can be written to IA32_MCi_CTL, all other in set_msr_mce()
3627 * UNIXWARE clears bit 0 of MC1_CTL to ignore correctable, in set_msr_mce()
3631 data != 0 && (data | (1 << 10) | 1) != ~(u64)0) in set_msr_mce()
3635 * All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR. in set_msr_mce()
3640 data != 0 && !can_set_mci_status(vcpu)) in set_msr_mce()
3650 return 0; in set_msr_mce()
3662 gpa_t gpa = data & ~0x3f; in kvm_pv_enable_async_pf()
3665 if (data & 0x30) in kvm_pv_enable_async_pf()
3677 return data ? 1 : 0; in kvm_pv_enable_async_pf()
3684 return 0; in kvm_pv_enable_async_pf()
3696 return 0; in kvm_pv_enable_async_pf()
3712 return 0; in kvm_pv_enable_async_pf_int()
3718 vcpu->arch.time = 0; in kvmclock_reset()
3816 u8 st_preempted = 0; in record_steal_time()
3822 asm volatile("1: xchgb %0, %2\n" in record_steal_time()
3834 vcpu->arch.st.preempted = 0; in record_steal_time()
3847 unsafe_put_user(0, &st->preempted, out); in record_steal_time()
3848 vcpu->arch.st.preempted = 0; in record_steal_time()
3883 * PL[0-3]_SSP while executing in the kernel is safe, as U_CET is specific to
3884 * userspace, and PL[0-3]_SSP are only consumed when transitioning to lower
4035 data &= ~(u64)0x40; /* ignore flush filter disable */ in kvm_set_msr_common()
4036 data &= ~(u64)0x100; /* ignore ignne emulation enable */ in kvm_set_msr_common()
4037 data &= ~(u64)0x8; /* ignore TLB cache disable */ in kvm_set_msr_common()
4051 if (data != 0) { in kvm_set_msr_common()
4062 case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000: in kvm_set_msr_common()
4067 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: in kvm_set_msr_common()
4147 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
4154 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
4185 if (data & 0x1) { in kvm_set_msr_common()
4327 return 0; in kvm_set_msr_common()
4335 unsigned bank_num = mcg_cap & 0xff; in get_msr_mce()
4341 data = 0; in get_msr_mce()
4378 return 0; in get_msr_mce()
4404 * limit) MSRs. Just return 0, as we do not want to expose the host in kvm_get_msr_common()
4409 case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */ in kvm_get_msr_common()
4413 msr_info->data = 0; in kvm_get_msr_common()
4421 msr_info->data = 0; in kvm_get_msr_common()
4466 case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000: in kvm_get_msr_common()
4469 case 0xcd: /* fsb frequency */ in kvm_get_msr_common()
4475 * Models 0,1: 000 in bits 23:21 indicating a bus speed of in kvm_get_msr_common()
4489 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: in kvm_get_msr_common()
4557 msr_info->data = 0; in kvm_get_msr_common()
4602 msr_info->data = 0x20000000; in kvm_get_msr_common()
4628 * enabled, latency 0x1, configured in kvm_get_msr_common()
4630 msr_info->data = 0xbe702111; in kvm_get_msr_common()
4680 return 0; in kvm_get_msr_common()
4697 for (i = 0; i < msrs->nmsrs; ++i) { in __msr_io()
4800 return 0; in kvm_ioctl_get_supported_hv_cpuid()
4811 return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS; in kvm_sync_valid_fields()
4816 int r = 0; in kvm_vm_ioctl_check_extension()
4960 r = 0; in kvm_vm_ioctl_check_extension()
4977 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; in kvm_vm_ioctl_check_extension()
4998 r = 0; in kvm_vm_ioctl_check_extension()
5007 r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0; in kvm_vm_ioctl_check_extension()
5038 return 0; in __kvm_x86_dev_get_attr()
5051 if (r < 0) in kvm_x86_dev_get_attr()
5057 return 0; in kvm_x86_dev_get_attr()
5097 r = 0; in kvm_arch_dev_ioctl()
5117 r = 0; in kvm_arch_dev_ioctl()
5125 r = 0; in kvm_arch_dev_ioctl()
5146 r = 0; in kvm_arch_dev_ioctl()
5228 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
5233 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
5235 if (tsc_delta < 0) in kvm_arch_vcpu_load()
5363 return 0; in kvm_vcpu_ioctl_set_lapic()
5406 return 0; in kvm_vcpu_ioctl_interrupt()
5421 return 0; in kvm_vcpu_ioctl_interrupt()
5428 return 0; in kvm_vcpu_ioctl_nmi()
5437 return 0; in vcpu_ioctl_tpr_access_reporting()
5444 unsigned bank_num = mcg_cap & 0xff, bank; in kvm_vcpu_ioctl_x86_setup_mce()
5449 if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000)) in kvm_vcpu_ioctl_x86_setup_mce()
5451 r = 0; in kvm_vcpu_ioctl_x86_setup_mce()
5455 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
5456 /* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */ in kvm_vcpu_ioctl_x86_setup_mce()
5457 for (bank = 0; bank < bank_num; bank++) { in kvm_vcpu_ioctl_x86_setup_mce()
5458 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
5460 vcpu->arch.mci_ctl2_banks[bank] = 0; in kvm_vcpu_ioctl_x86_setup_mce()
5498 return 0; in kvm_vcpu_x86_set_ucna()
5503 return 0; in kvm_vcpu_x86_set_ucna()
5510 unsigned bank_num = mcg_cap & 0xff; in kvm_vcpu_ioctl_x86_set_mce()
5526 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
5527 return 0; in kvm_vcpu_ioctl_x86_set_mce()
5532 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
5533 return 0; in kvm_vcpu_ioctl_x86_set_mce()
5538 return 0; in kvm_vcpu_ioctl_x86_set_mce()
5556 return 0; in kvm_vcpu_ioctl_x86_set_mce()
5597 memset(events, 0, sizeof(*events)); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5667 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5669 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5671 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5672 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5710 vcpu->arch.nmi_pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5762 return 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5774 memset(dbgregs, 0, sizeof(*dbgregs)); in kvm_vcpu_ioctl_x86_get_debugregs()
5777 for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++) in kvm_vcpu_ioctl_x86_get_debugregs()
5782 return 0; in kvm_vcpu_ioctl_x86_get_debugregs()
5802 for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++) in kvm_vcpu_ioctl_x86_set_debugregs()
5810 return 0; in kvm_vcpu_ioctl_x86_set_debugregs()
5833 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_vcpu_ioctl_x86_get_xsave2()
5837 return 0; in kvm_vcpu_ioctl_x86_get_xsave2()
5851 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_vcpu_ioctl_x86_set_xsave()
5867 guest_xcrs->nr_xcrs = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
5868 return 0; in kvm_vcpu_ioctl_x86_get_xcrs()
5872 guest_xcrs->flags = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
5873 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; in kvm_vcpu_ioctl_x86_get_xcrs()
5874 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
5875 return 0; in kvm_vcpu_ioctl_x86_get_xcrs()
5881 int i, r = 0; in kvm_vcpu_ioctl_x86_set_xcrs()
5893 for (i = 0; i < guest_xcrs->nr_xcrs; i++) in kvm_vcpu_ioctl_x86_set_xcrs()
5917 return 0; in kvm_set_guest_paused()
5927 r = 0; in kvm_arch_tsc_has_attr()
5947 r = 0; in kvm_arch_tsc_get_attr()
5985 r = 0; in kvm_arch_tsc_set_attr()
6032 if (cap->args[0]) in kvm_vcpu_ioctl_enable_cap()
6051 user_ptr = (void __user *)(uintptr_t)cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
6065 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
6069 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
6070 return 0; in kvm_vcpu_ioctl_enable_cap()
6105 return 0; in kvm_translate_kvm_reg()
6118 return 0; in kvm_get_one_msr()
6131 return 0; in kvm_set_one_msr()
6185 u64 nr_regs = guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) ? 1 : 0; in kvm_get_reg_list()
6198 put_user(KVM_X86_REG_KVM(KVM_REG_GUEST_SSP), &user_list->reg[0])) in kvm_get_reg_list()
6201 return 0; in kvm_get_reg_list()
6237 r = 0; in kvm_arch_vcpu_ioctl()
6305 r = 0; in kvm_arch_vcpu_ioctl()
6316 r = msr_io(vcpu, argp, do_set_msr, 0); in kvm_arch_vcpu_ioctl()
6339 r = 0; in kvm_arch_vcpu_ioctl()
6383 r = 0; in kvm_arch_vcpu_ioctl()
6402 if (r < 0) in kvm_arch_vcpu_ioctl()
6409 r = 0; in kvm_arch_vcpu_ioctl()
6434 if (r < 0) in kvm_arch_vcpu_ioctl()
6440 r = 0; in kvm_arch_vcpu_ioctl()
6465 if (r < 0) in kvm_arch_vcpu_ioctl()
6472 r = 0; in kvm_arch_vcpu_ioctl()
6483 if (r < 0) in kvm_arch_vcpu_ioctl()
6490 r = 0; in kvm_arch_vcpu_ioctl()
6517 if (user_tsc_khz == 0) in kvm_arch_vcpu_ioctl()
6521 r = 0; in kvm_arch_vcpu_ioctl()
6557 if (r < 0) in kvm_arch_vcpu_ioctl()
6568 r = 0; in kvm_arch_vcpu_ioctl()
6645 r = 0; in kvm_arch_vcpu_ioctl()
6717 return 0; in kvm_vm_ioctl_set_nr_mmu_pages()
6750 if (cap->args[0] & ~kvm_caps.supported_quirks) in kvm_vm_ioctl_enable_cap()
6754 kvm->arch.disabled_quirks |= cap->args[0] & kvm_caps.supported_quirks; in kvm_vm_ioctl_enable_cap()
6755 r = 0; in kvm_vm_ioctl_enable_cap()
6760 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) in kvm_vm_ioctl_enable_cap()
6770 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6772 r = 0; in kvm_vm_ioctl_enable_cap()
6779 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) in kvm_vm_ioctl_enable_cap()
6782 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) in kvm_vm_ioctl_enable_cap()
6784 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) in kvm_vm_ioctl_enable_cap()
6787 r = 0; in kvm_vm_ioctl_enable_cap()
6791 if (cap->args[0] & ~kvm_get_allowed_disable_exits()) in kvm_vm_ioctl_enable_cap()
6803 (cap->args[0] & ~(KVM_X86_DISABLE_EXITS_PAUSE | in kvm_vm_ioctl_enable_cap()
6807 kvm_disable_exits(kvm, cap->args[0]); in kvm_vm_ioctl_enable_cap()
6808 r = 0; in kvm_vm_ioctl_enable_cap()
6813 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6814 r = 0; in kvm_vm_ioctl_enable_cap()
6817 kvm->arch.exception_payload_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6818 r = 0; in kvm_vm_ioctl_enable_cap()
6821 kvm->arch.triple_fault_event = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6822 r = 0; in kvm_vm_ioctl_enable_cap()
6826 if (cap->args[0] & ~KVM_MSR_EXIT_REASON_VALID_MASK) in kvm_vm_ioctl_enable_cap()
6828 kvm->arch.user_space_msr_mask = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6829 r = 0; in kvm_vm_ioctl_enable_cap()
6833 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) in kvm_vm_ioctl_enable_cap()
6836 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && in kvm_vm_ioctl_enable_cap()
6837 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) in kvm_vm_ioctl_enable_cap()
6841 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) in kvm_vm_ioctl_enable_cap()
6843 r = 0; in kvm_vm_ioctl_enable_cap()
6847 unsigned long allowed_attributes = 0; in kvm_vm_ioctl_enable_cap()
6849 r = sgx_set_attribute(&allowed_attributes, cap->args[0]); in kvm_vm_ioctl_enable_cap()
6867 r = kvm_x86_call(vm_copy_enc_context_from)(kvm, cap->args[0]); in kvm_vm_ioctl_enable_cap()
6874 r = kvm_x86_call(vm_move_enc_context_from)(kvm, cap->args[0]); in kvm_vm_ioctl_enable_cap()
6877 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { in kvm_vm_ioctl_enable_cap()
6881 kvm->arch.hypercall_exit_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6882 r = 0; in kvm_vm_ioctl_enable_cap()
6886 if (cap->args[0] & ~1) in kvm_vm_ioctl_enable_cap()
6888 kvm->arch.exit_on_emulation_error = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6889 r = 0; in kvm_vm_ioctl_enable_cap()
6893 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) in kvm_vm_ioctl_enable_cap()
6898 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE); in kvm_vm_ioctl_enable_cap()
6899 r = 0; in kvm_vm_ioctl_enable_cap()
6905 if (cap->args[0] > KVM_MAX_VCPU_IDS) in kvm_vm_ioctl_enable_cap()
6909 if (kvm->arch.bsp_vcpu_id > cap->args[0]) { in kvm_vm_ioctl_enable_cap()
6911 } else if (kvm->arch.max_vcpu_ids == cap->args[0]) { in kvm_vm_ioctl_enable_cap()
6912 r = 0; in kvm_vm_ioctl_enable_cap()
6914 kvm->arch.max_vcpu_ids = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6915 r = 0; in kvm_vm_ioctl_enable_cap()
6921 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS) in kvm_vm_ioctl_enable_cap()
6925 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED)) in kvm_vm_ioctl_enable_cap()
6929 kvm->arch.notify_window = cap->args[0] >> 32; in kvm_vm_ioctl_enable_cap()
6930 kvm->arch.notify_vmexit_flags = (u32)cap->args[0]; in kvm_vm_ioctl_enable_cap()
6931 r = 0; in kvm_vm_ioctl_enable_cap()
6954 if (cap->args[0]) in kvm_vm_ioctl_enable_cap()
6960 r = 0; in kvm_vm_ioctl_enable_cap()
6965 u64 bus_cycle_ns = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6977 r = 0; in kvm_vm_ioctl_enable_cap()
7014 for (i = 0; i < msr_filter->count; i++) in kvm_free_msr_filter()
7027 return 0; in kvm_add_msr_filter()
7051 return 0; in kvm_add_msr_filter()
7066 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) in kvm_vm_ioctl_set_msr_filter()
7077 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { in kvm_vm_ioctl_set_msr_filter()
7099 return 0; in kvm_vm_ioctl_set_msr_filter()
7116 #define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)
7137 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { in kvm_arch_vm_compat_ioctl()
7188 struct kvm_clock_data data = { 0 }; in kvm_vm_ioctl_get_clock()
7194 return 0; in kvm_vm_ioctl_get_clock()
7240 return 0; in kvm_vm_ioctl_set_clock()
7348 r = 0; in kvm_arch_vm_ioctl()
7353 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ in kvm_arch_vm_ioctl()
7371 r = 0; in kvm_arch_vm_ioctl()
7377 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ in kvm_arch_vm_ioctl()
7407 r = 0; in kvm_arch_vm_ioctl()
7433 r = 0; in kvm_arch_vm_ioctl()
7462 r = 0; in kvm_arch_vm_ioctl()
7528 if (user_tsc_khz == 0) in kvm_arch_vm_ioctl()
7534 r = 0; in kvm_arch_vm_ioctl()
7623 if (rdmsr_safe(msr_index, &dummy[0], &dummy[1])) in kvm_probe_msr_to_save()
7733 num_msrs_to_save = 0; in kvm_init_msr_lists()
7734 num_emulated_msrs = 0; in kvm_init_msr_lists()
7735 num_msr_based_features = 0; in kvm_init_msr_lists()
7737 for (i = 0; i < ARRAY_SIZE(msrs_to_save_base); i++) in kvm_init_msr_lists()
7741 for (i = 0; i < ARRAY_SIZE(msrs_to_save_pmu); i++) in kvm_init_msr_lists()
7745 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { in kvm_init_msr_lists()
7756 for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) in kvm_init_msr_lists()
7763 int handled = 0; in vcpu_mmio_write()
7783 int handled = 0; in vcpu_mmio_read()
7835 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
7845 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
7857 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
7878 if (ret < 0) { in kvm_read_guest_virt_helper()
7898 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
7913 if (unlikely(ret < 0)) in kvm_fetch_guest_virt()
7923 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
7931 memset(exception, 0, sizeof(*exception)); in kvm_read_guest_virt()
7942 u64 access = 0; in emulator_read_std()
7969 if (ret < 0) { in kvm_write_guest_virt_helper()
8025 r = kvm_check_emulate_insn(vcpu, emul_type, NULL, 0); in handle_ud()
8031 sig, sizeof(sig), &e) == 0 && in handle_ud()
8032 memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) { in handle_ud()
8055 return 0; in vcpu_is_mmio_gpa()
8063 u64 access = ((kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
8064 | (write ? PFERR_WRITE_MASK : 0); in vcpu_mmio_gva_to_gpa()
8073 vcpu->arch.mmio_access, 0, access))) { in vcpu_mmio_gva_to_gpa()
8094 if (ret < 0) in emulator_write_phys()
8095 return 0; in emulator_write_phys()
8116 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
8117 vcpu->mmio_read_completed = 0; in read_prepare()
8121 return 0; in read_prepare()
8152 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
8197 if (ret < 0) in emulator_read_write_onepage()
8237 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
8264 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
8267 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
8269 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
8358 if (r < 0) in emulator_cmpxchg_emulated()
8391 for (i = 0; i < count; i++) { in emulator_pio_in_out()
8398 if (i == 0) in emulator_pio_in_out()
8403 * was running. Drop writes / read as 0. in emulator_pio_in_out()
8406 memset(data, 0, size * (count - i)); in emulator_pio_in_out()
8421 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in_out()
8431 return 0; in emulator_pio_in_out()
8450 vcpu->arch.pio.count = 0; in complete_emulator_pio_in()
8552 case 0: in emulator_get_cr()
8569 return 0; in emulator_get_cr()
8578 int res = 0; in emulator_set_cr()
8581 case 0: in emulator_set_cr()
8645 memset(desc, 0, sizeof(*desc)); in emulator_get_segment()
8647 *base3 = 0; in emulator_get_segment()
8685 var.limit = (var.limit << 12) | 0xfff; in emulator_set_segment()
8695 var.padding = 0; in emulator_set_segment()
8708 if (r < 0) in emulator_get_msr_with_filter()
8712 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, in emulator_get_msr_with_filter()
8731 if (r < 0) in emulator_set_msr_with_filter()
8934 mask = 0; in toggle_interruptibility()
8981 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; in init_emulate_ctxt()
8989 ctxt->interruptibility = 0; in init_emulate_ctxt()
9031 memset(&info, 0, sizeof(info)); in prepare_emulation_failure_exit()
9033 kvm_x86_call(get_exit_info)(vcpu, (u32 *)&info[0], &info[1], &info[2], in prepare_emulation_failure_exit()
9049 run->emulation_failure.flags = 0; in prepare_emulation_failure_exit()
9058 memset(run->emulation_failure.insn_bytes, 0x90, in prepare_emulation_failure_exit()
9065 ndata * sizeof(data[0])); in prepare_emulation_failure_exit()
9074 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, in prepare_emulation_ctxt_failure_exit()
9081 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); in __kvm_prepare_emulation_failure_exit()
9087 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); in kvm_prepare_emulation_failure_exit()
9096 int ndata = 0; in kvm_prepare_event_vectoring_exit()
9121 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in handle_emulation_failure()
9128 return 0; in handle_emulation_failure()
9133 if (!is_guest_mode(vcpu) && kvm_x86_call(get_cpl)(vcpu) == 0) { in handle_emulation_failure()
9135 return 0; in handle_emulation_failure()
9185 u32 dr6 = 0; in kvm_vcpu_check_hw_bp()
9191 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) in kvm_vcpu_check_hw_bp()
9206 return 0; in kvm_vcpu_do_singlestep()
9219 return 0; in kvm_skip_emulated_instruction()
9279 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, in kvm_vcpu_check_code_breakpoint()
9283 if (dr6 != 0) { in kvm_vcpu_check_code_breakpoint()
9288 *r = 0; in kvm_vcpu_check_code_breakpoint()
9296 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, in kvm_vcpu_check_code_breakpoint()
9300 if (dr6 != 0) { in kvm_vcpu_check_code_breakpoint()
9315 case 0xe4: /* IN */ in is_vmware_backdoor_opcode()
9316 case 0xe5: in is_vmware_backdoor_opcode()
9317 case 0xec: in is_vmware_backdoor_opcode()
9318 case 0xed: in is_vmware_backdoor_opcode()
9319 case 0xe6: /* OUT */ in is_vmware_backdoor_opcode()
9320 case 0xe7: in is_vmware_backdoor_opcode()
9321 case 0xee: in is_vmware_backdoor_opcode()
9322 case 0xef: in is_vmware_backdoor_opcode()
9323 case 0x6c: /* INS */ in is_vmware_backdoor_opcode()
9324 case 0x6d: in is_vmware_backdoor_opcode()
9325 case 0x6e: /* OUTS */ in is_vmware_backdoor_opcode()
9326 case 0x6f: in is_vmware_backdoor_opcode()
9332 case 0x33: /* RDPMC */ in is_vmware_backdoor_opcode()
9391 return 0; in x86_emulate_instruction()
9440 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in x86_emulate_instruction()
9497 ctxt->exception.address = 0; in x86_emulate_instruction()
9528 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
9533 r = 0; in x86_emulate_instruction()
9539 r = 0; in x86_emulate_instruction()
9543 r = 0; in x86_emulate_instruction()
9588 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); in kvm_emulate_instruction()
9595 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); in kvm_emulate_instruction_from_buffer()
9601 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
9607 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
9626 * incremented prior to exiting to userspace to handle "OUT 0x7e". in kvm_fast_pio_out()
9628 if (port == 0x7e && in kvm_fast_pio_out()
9637 return 0; in kvm_fast_pio_out()
9648 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
9653 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
9668 val = (size < 4) ? kvm_rax_read(vcpu) : 0; in kvm_fast_pio_in()
9679 return 0; in kvm_fast_pio_in()
9696 __this_cpu_write(cpu_tsc_khz, 0); in kvmclock_cpu_down_prep()
9697 return 0; in kvmclock_cpu_down_prep()
9750 int send_ipi = 0; in __kvmclock_cpufreq_notifier()
9830 return 0; in kvmclock_cpufreq_notifier()
9832 return 0; in kvmclock_cpufreq_notifier()
9837 return 0; in kvmclock_cpufreq_notifier()
9847 return 0; in kvmclock_cpu_online()
9887 atomic_set(&kvm_guest_has_master_clock, 0); in pvclock_gtod_update_fn()
9922 atomic_read(&kvm_guest_has_master_clock) != 0) in pvclock_gtod_notify()
9924 return 0; in pvclock_gtod_notify()
10003 * KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes in kvm_x86_vendor_init()
10004 * the PAT bits in SPTEs. Bail if PAT[0] is programmed to something in kvm_x86_vendor_init()
10006 * with an exception. PAT[0] is set to WB on RESET and also by the in kvm_x86_vendor_init()
10010 (host_pat & GENMASK(2, 0)) != 6) { in kvm_x86_vendor_init()
10011 pr_err("host PAT[0] is not WB\n"); in kvm_x86_vendor_init()
10027 memset(&kvm_caps, 0, sizeof(kvm_caps)); in kvm_x86_vendor_init()
10041 kvm_nr_uret_msrs = 0; in kvm_x86_vendor_init()
10071 if (r != 0) in kvm_x86_vendor_init()
10081 if (r < 0) in kvm_x86_vendor_init()
10111 kvm_caps.supported_xss = 0; in kvm_x86_vendor_init()
10130 u64 max = min(0x7fffffffULL, in kvm_x86_vendor_init()
10136 return 0; in kvm_x86_vendor_init()
10210 clock_pairing.flags = 0; in kvm_pv_clock_pairing()
10211 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad)); in kvm_pv_clock_pairing()
10213 ret = 0; in kvm_pv_clock_pairing()
10245 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); in kvm_apicv_activated()
10255 return (vm_reasons | vcpu_reasons) == 0; in kvm_vcpu_apicv_activated()
10312 if (kvm_vcpu_yield_to(target) <= 0) in kvm_sched_yield()
10347 nr &= 0xFFFFFFFF; in ____kvm_emulate_hypercall()
10348 a0 &= 0xFFFFFFFF; in ____kvm_emulate_hypercall()
10349 a1 &= 0xFFFFFFFF; in ____kvm_emulate_hypercall()
10350 a2 &= 0xFFFFFFFF; in ____kvm_emulate_hypercall()
10351 a3 &= 0xFFFFFFFF; in ____kvm_emulate_hypercall()
10363 ret = 0; in ____kvm_emulate_hypercall()
10371 ret = 0; in ____kvm_emulate_hypercall()
10389 ret = 0; in ____kvm_emulate_hypercall()
10412 vcpu->run->hypercall.ret = 0; in ____kvm_emulate_hypercall()
10413 vcpu->run->hypercall.args[0] = gpa; in ____kvm_emulate_hypercall()
10416 vcpu->run->hypercall.flags = 0; in ____kvm_emulate_hypercall()
10422 return 0; in ____kvm_emulate_hypercall()
10605 r = 0; in kvm_check_and_inject_events()
10656 if (r < 0) in kvm_check_and_inject_events()
10679 * AMD states that code breakpoint #DBs excplitly clear RF=0). in kvm_check_and_inject_events()
10707 return 0; in kvm_check_and_inject_events()
10711 * due to architectural conditions (e.g. IF=0) a window-open exit in kvm_check_and_inject_events()
10724 if (r < 0) in kvm_check_and_inject_events()
10739 if (r < 0) in kvm_check_and_inject_events()
10746 WARN_ON(kvm_x86_call(nmi_allowed)(vcpu, true) < 0); in kvm_check_and_inject_events()
10755 if (r < 0) in kvm_check_and_inject_events()
10763 WARN_ON(kvm_x86_call(interrupt_allowed)(vcpu, true) < 0); in kvm_check_and_inject_events()
10789 return 0; in kvm_check_and_inject_events()
10794 r = 0; in kvm_check_and_inject_events()
10825 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
11047 r = 0; in vcpu_enter_guest()
11053 r = 0; in vcpu_enter_guest()
11099 r = 0; in vcpu_enter_guest()
11108 vcpu->mmio_needed = 0; in vcpu_enter_guest()
11109 r = 0; in vcpu_enter_guest()
11138 r = 0; in vcpu_enter_guest()
11152 vcpu->run->system_event.ndata = 0; in vcpu_enter_guest()
11153 r = 0; in vcpu_enter_guest()
11159 vcpu->run->system_event.ndata = 0; in vcpu_enter_guest()
11160 r = 0; in vcpu_enter_guest()
11168 r = 0; in vcpu_enter_guest()
11204 if (r < 0) { in vcpu_enter_guest()
11205 r = 0; in vcpu_enter_guest()
11214 if (r < 0) { in vcpu_enter_guest()
11215 r = 0; in vcpu_enter_guest()
11282 run_flags = 0; in vcpu_enter_guest()
11298 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
11345 run_flags = 0; in vcpu_enter_guest()
11392 wrmsrq(MSR_IA32_XFD_ERR, 0); in vcpu_enter_guest()
11452 return 0; in vcpu_enter_guest()
11573 if (r < 0) in vcpu_block()
11574 return 0; in vcpu_block()
11577 if (kvm_apic_accept_events(vcpu) < 0) in vcpu_block()
11578 return 0; in vcpu_block()
11617 if (r <= 0) in vcpu_run()
11629 r = 0; in vcpu_run()
11664 return 0; in __kvm_emulate_halt()
11788 vcpu->mmio_needed = 0; in complete_emulated_mmio()
11804 return 0; in complete_emulated_mmio()
11820 trace_kvm_fpu(0); in kvm_put_guest_fpu()
11858 kvm_run->flags = 0; in kvm_arch_vcpu_ioctl_run()
11882 if (kvm_apic_accept_events(vcpu) < 0) { in kvm_arch_vcpu_ioctl_run()
11883 r = 0; in kvm_arch_vcpu_ioctl_run()
11904 if (r != 0) in kvm_arch_vcpu_ioctl_run()
11910 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
11935 if (r <= 0) in kvm_arch_vcpu_ioctl_run()
11948 if (r <= 0) in kvm_arch_vcpu_ioctl_run()
12010 return 0; in kvm_arch_vcpu_ioctl_get_regs()
12055 return 0; in kvm_arch_vcpu_ioctl_set_regs()
12115 for (i = 0 ; i < 4 ; i++) in __get_sregs2()
12131 return 0; in kvm_arch_vcpu_ioctl_get_sregs()
12146 if (r < 0) in kvm_arch_vcpu_ioctl_get_mpstate()
12148 r = 0; in kvm_arch_vcpu_ioctl_get_mpstate()
12203 ret = 0; in kvm_arch_vcpu_ioctl_set_mpstate()
12254 vcpu->run->internal.ndata = 0; in kvm_task_switch()
12255 return 0; in kvm_task_switch()
12297 return 0; in __set_sregs_common()
12345 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && in __set_sregs_common()
12346 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && in __set_sregs_common()
12350 return 0; in __set_sregs_common()
12356 int mmu_reset_needed = 0; in __set_sregs()
12376 return 0; in __set_sregs()
12381 int mmu_reset_needed = 0; in __set_sregs2()
12399 for (i = 0; i < 4 ; i++) in __set_sregs2()
12410 return 0; in __set_sregs2()
12478 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
12481 for (i = 0; i < KVM_NR_DB_REGS; ++i) in kvm_arch_vcpu_ioctl_set_guest_debug()
12485 for (i = 0; i < KVM_NR_DB_REGS; i++) in kvm_arch_vcpu_ioctl_set_guest_debug()
12503 r = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
12528 tr->usermode = 0; in kvm_arch_vcpu_ioctl_translate()
12531 return 0; in kvm_arch_vcpu_ioctl_translate()
12539 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_arch_vcpu_ioctl_get_fpu()
12554 return 0; in kvm_arch_vcpu_ioctl_get_fpu()
12562 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_arch_vcpu_ioctl_set_fpu()
12578 return 0; in kvm_arch_vcpu_ioctl_set_fpu()
12621 return 0; in sync_regs()
12645 vcpu->arch.regs_avail = ~0; in kvm_arch_vcpu_create()
12646 vcpu->arch.regs_dirty = ~0; in kvm_arch_vcpu_create()
12656 if (r < 0) in kvm_arch_vcpu_create()
12660 if (r < 0) in kvm_arch_vcpu_create()
12717 return 0; in kvm_arch_vcpu_create()
12751 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) in kvm_arch_vcpu_postcreate()
12851 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
12853 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
12854 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
12855 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
12856 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
12861 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
12867 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
12870 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
12871 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
12872 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
12883 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
12887 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
12891 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP); in kvm_vcpu_reset()
12892 kvm_msr_write(vcpu, MSR_IA32_XSS, 0); in kvm_vcpu_reset()
12896 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
12900 * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon) in kvm_vcpu_reset()
12907 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); in kvm_vcpu_reset()
12912 kvm_rip_write(vcpu, 0xfff0); in kvm_vcpu_reset()
12914 vcpu->arch.cr3 = 0; in kvm_vcpu_reset()
12929 kvm_x86_call(set_cr4)(vcpu, 0); in kvm_vcpu_reset()
12930 kvm_x86_call(set_efer)(vcpu, 0); in kvm_vcpu_reset()
12937 * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush in kvm_vcpu_reset()
12939 * CR0 will be '0' prior to RESET). So we only need to check CR0.PG here. in kvm_vcpu_reset()
12968 kvm_rip_write(vcpu, 0); in kvm_vcpu_deliver_sipi_vector()
12989 u64 max_tsc = 0; in kvm_arch_enable_virtualization_cpu()
12999 if (ret != 0) in kvm_arch_enable_virtualization_cpu()
13070 kvm->arch.last_tsc_nsec = 0; in kvm_arch_enable_virtualization_cpu()
13071 kvm->arch.last_tsc_write = 0; in kvm_arch_enable_virtualization_cpu()
13075 return 0; in kvm_arch_enable_virtualization_cpu()
13092 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
13132 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
13161 pr_warn_once("Running KVM with ignore_msrs=1 and report_ignored_msrs=0 is not a\n" in kvm_arch_init_vm()
13168 return 0; in kvm_arch_init_vm()
13183 * @gpa: the GPA to install the slot (unused when @size == 0).
13187 * @size > 0 to install a new slot, while @size == 0 to uninstall a
13222 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, in __x86_set_memory_region()
13223 MAP_SHARED | MAP_ANONYMOUS, 0); in __x86_set_memory_region()
13234 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { in __x86_set_memory_region()
13238 m.flags = 0; in __x86_set_memory_region()
13243 if (r < 0) in __x86_set_memory_region()
13283 0, 0); in kvm_arch_destroy_vm()
13285 0, 0); in kvm_arch_destroy_vm()
13286 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); in kvm_arch_destroy_vm()
13308 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { in memslot_rmap_free()
13330 const int sz = sizeof(*slot->arch.rmap[0]); in memslot_rmap_alloc()
13333 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { in memslot_rmap_alloc()
13347 return 0; in memslot_rmap_alloc()
13361 memset(&slot->arch, 0, sizeof(slot->arch)); in kvm_alloc_memslot_metadata()
13384 linfo[0].disallow_lpage = 1; in kvm_alloc_memslot_metadata()
13395 for (j = 0; j < lpages; ++j) in kvm_alloc_memslot_metadata()
13407 return 0; in kvm_alloc_memslot_metadata()
13462 return 0; in kvm_arch_prepare_memory_region()
13483 u32 old_flags = old ? old->flags : 0; in kvm_mmu_slot_apply_flags()
13484 u32 new_flags = new ? new->flags : 0; in kvm_mmu_slot_apply_flags()
13626 return kvm_x86_call(get_cpl)(vcpu) == 0; in kvm_arch_vcpu_in_kernel()
13634 return 0; in kvm_arch_vcpu_get_ip()
13651 /* Can't read the RIP when guest state is protected, just return 0 */ in kvm_get_linear_rip()
13653 return 0; in kvm_get_linear_rip()
13698 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); in kvm_async_pf_hash_fn()
13710 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
13721 for (i = 0; i < ASYNC_PF_PER_VCPU && in kvm_async_pf_gfn_slot()
13723 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
13744 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
13747 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
13844 fault.error_code = 0; in kvm_arch_async_page_not_present()
13873 work->arch.token = ~0; /* broadcast wakeup */ in kvm_arch_async_page_present()
13916 kvm_zap_gfn_range(kvm, gpa_to_gfn(0), gpa_to_gfn(~0ULL)); in kvm_noncoherent_dma_assignment_start_or_stop()
13939 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
13977 int ret = 0; in kvm_spec_ctrl_test_value()
14044 return 0; in kvm_handle_memory_failure()
14062 if (operand.pcid >> 12 != 0) { in kvm_handle_invpcid()
14063 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
14075 if ((!pcid_enabled && (operand.pcid != 0)) || in kvm_handle_invpcid()
14077 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
14084 if (!pcid_enabled && (operand.pcid != 0)) { in kvm_handle_invpcid()
14085 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
14106 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
14138 vcpu->mmio_needed = 0; in complete_sev_es_emulated_mmio()
14155 return 0; in complete_sev_es_emulated_mmio()
14183 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_write()
14193 return 0; in kvm_sev_es_mmio_write()
14222 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_read()
14226 vcpu->run->mmio.is_write = 0; in kvm_sev_es_mmio_read()
14231 return 0; in kvm_sev_es_mmio_read()
14249 vcpu->arch.pio.count = 0; in complete_sev_es_emulated_outs()
14274 return 0; in kvm_sev_es_outs()
14309 return 0; in kvm_sev_es_ins()
14360 return 0; in kvm_x86_init()