x86.c (cfef5af3cb0e57501dcac2816ab11a20c074866d) | x86.c (b4f69df0f65e97fec439130a0d0a8b9c7cc02df2) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * derived from drivers/kvm/kvm_main.c 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright (C) 2008 Qumranet, Inc. --- 1490 unchanged lines hidden (view full) --- 1499 1500static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) + 1501 ARRAY_SIZE(msrs_to_save_pmu)]; 1502static unsigned num_msrs_to_save; 1503 1504static const u32 emulated_msrs_all[] = { 1505 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 1506 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * derived from drivers/kvm/kvm_main.c 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright (C) 2008 Qumranet, Inc. --- 1490 unchanged lines hidden (view full) --- 1499 1500static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) + 1501 ARRAY_SIZE(msrs_to_save_pmu)]; 1502static unsigned num_msrs_to_save; 1503 1504static const u32 emulated_msrs_all[] = { 1505 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 1506 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, |
1507 1508#ifdef CONFIG_KVM_HYPERV |
|
1507 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 1508 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, 1509 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, 1510 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, 1511 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, 1512 HV_X64_MSR_RESET, 1513 HV_X64_MSR_VP_INDEX, 1514 HV_X64_MSR_VP_RUNTIME, 1515 HV_X64_MSR_SCONTROL, 1516 HV_X64_MSR_STIMER0_CONFIG, 1517 HV_X64_MSR_VP_ASSIST_PAGE, 1518 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, 1519 HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL, 1520 HV_X64_MSR_SYNDBG_OPTIONS, 1521 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, 1522 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, 1523 HV_X64_MSR_SYNDBG_PENDING_BUFFER, | 1509 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 1510 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, 1511 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, 1512 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, 1513 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, 1514 HV_X64_MSR_RESET, 1515 HV_X64_MSR_VP_INDEX, 1516 HV_X64_MSR_VP_RUNTIME, 1517 HV_X64_MSR_SCONTROL, 1518 HV_X64_MSR_STIMER0_CONFIG, 1519 HV_X64_MSR_VP_ASSIST_PAGE, 1520 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, 1521 HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL, 1522 HV_X64_MSR_SYNDBG_OPTIONS, 1523 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, 1524 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, 1525 HV_X64_MSR_SYNDBG_PENDING_BUFFER, |
1526#endif |
|
1524 1525 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 1526 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, 1527 1528 MSR_IA32_TSC_ADJUST, 1529 MSR_IA32_TSC_DEADLINE, 1530 MSR_IA32_ARCH_CAPABILITIES, 1531 MSR_IA32_PERF_CAPABILITIES, --- 2483 unchanged lines hidden (view full) --- 4015 * Ignore all writes to this no longer documented MSR. 4016 * Writes are only relevant for old K7 processors, 4017 * all pre-dating SVM, but a recommended workaround from 4018 * AMD for these chips. It is possible to specify the 4019 * affected processor models on the command line, hence 4020 * the need to ignore the workaround. 4021 */ 4022 break; | 1527 1528 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 1529 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, 1530 1531 MSR_IA32_TSC_ADJUST, 1532 MSR_IA32_TSC_DEADLINE, 1533 MSR_IA32_ARCH_CAPABILITIES, 1534 MSR_IA32_PERF_CAPABILITIES, --- 2483 unchanged lines hidden (view full) --- 4018 * Ignore all writes to this no longer documented MSR. 4019 * Writes are only relevant for old K7 processors, 4020 * all pre-dating SVM, but a recommended workaround from 4021 * AMD for these chips. It is possible to specify the 4022 * affected processor models on the command line, hence 4023 * the need to ignore the workaround. 4024 */ 4025 break; |
4026#ifdef CONFIG_KVM_HYPERV |
|
4023 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 4024 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 4025 case HV_X64_MSR_SYNDBG_OPTIONS: 4026 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4027 case HV_X64_MSR_CRASH_CTL: 4028 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 4029 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4030 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4031 case HV_X64_MSR_TSC_EMULATION_STATUS: 4032 case HV_X64_MSR_TSC_INVARIANT_CONTROL: 4033 return kvm_hv_set_msr_common(vcpu, msr, data, 4034 msr_info->host_initiated); | 4027 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 4028 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 4029 case HV_X64_MSR_SYNDBG_OPTIONS: 4030 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4031 case HV_X64_MSR_CRASH_CTL: 4032 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 4033 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4034 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4035 case HV_X64_MSR_TSC_EMULATION_STATUS: 4036 case HV_X64_MSR_TSC_INVARIANT_CONTROL: 4037 return kvm_hv_set_msr_common(vcpu, msr, data, 4038 msr_info->host_initiated); |
4039#endif |
|
4035 case MSR_IA32_BBL_CR_CTL3: 4036 /* Drop writes to this legacy MSR -- see rdmsr 4037 * counterpart for further detail. 4038 */ 4039 kvm_pr_unimpl_wrmsr(vcpu, msr, data); 4040 break; 4041 case MSR_AMD64_OSVW_ID_LENGTH: 4042 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) --- 329 unchanged lines hidden (view full) --- 4372 * every field. 4373 * 4374 * This prevents guest kernels on AMD host with CPU 4375 * type 6, model 8 and higher from exploding due to 4376 * the rdmsr failing. 4377 */ 4378 msr_info->data = 0x20000000; 4379 break; | 4040 case MSR_IA32_BBL_CR_CTL3: 4041 /* Drop writes to this legacy MSR -- see rdmsr 4042 * counterpart for further detail. 4043 */ 4044 kvm_pr_unimpl_wrmsr(vcpu, msr, data); 4045 break; 4046 case MSR_AMD64_OSVW_ID_LENGTH: 4047 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) --- 329 unchanged lines hidden (view full) --- 4377 * every field. 4378 * 4379 * This prevents guest kernels on AMD host with CPU 4380 * type 6, model 8 and higher from exploding due to 4381 * the rdmsr failing. 4382 */ 4383 msr_info->data = 0x20000000; 4384 break; |
4385#ifdef CONFIG_KVM_HYPERV |
|
4380 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 4381 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 4382 case HV_X64_MSR_SYNDBG_OPTIONS: 4383 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4384 case HV_X64_MSR_CRASH_CTL: 4385 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 4386 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4387 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4388 case HV_X64_MSR_TSC_EMULATION_STATUS: 4389 case HV_X64_MSR_TSC_INVARIANT_CONTROL: 4390 return kvm_hv_get_msr_common(vcpu, 4391 msr_info->index, &msr_info->data, 4392 msr_info->host_initiated); | 4386 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 4387 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 4388 case HV_X64_MSR_SYNDBG_OPTIONS: 4389 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4390 case HV_X64_MSR_CRASH_CTL: 4391 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 4392 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4393 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4394 case HV_X64_MSR_TSC_EMULATION_STATUS: 4395 case HV_X64_MSR_TSC_INVARIANT_CONTROL: 4396 return kvm_hv_get_msr_common(vcpu, 4397 msr_info->index, &msr_info->data, 4398 msr_info->host_initiated); |
4399#endif |
|
4393 case MSR_IA32_BBL_CR_CTL3: 4394 /* This legacy MSR exists but isn't fully documented in current 4395 * silicon. It is however accessed by winxp in very narrow 4396 * scenarios where it sets bit #19, itself documented as 4397 * a "reserved" bit. Best effort attempt to source coherent 4398 * read data here should the balance of the register be 4399 * interpreted by the guest: 4400 * --- 121 unchanged lines hidden (view full) --- 4522 4523static inline bool kvm_can_mwait_in_guest(void) 4524{ 4525 return boot_cpu_has(X86_FEATURE_MWAIT) && 4526 !boot_cpu_has_bug(X86_BUG_MONITOR) && 4527 boot_cpu_has(X86_FEATURE_ARAT); 4528} 4529 | 4400 case MSR_IA32_BBL_CR_CTL3: 4401 /* This legacy MSR exists but isn't fully documented in current 4402 * silicon. It is however accessed by winxp in very narrow 4403 * scenarios where it sets bit #19, itself documented as 4404 * a "reserved" bit. Best effort attempt to source coherent 4405 * read data here should the balance of the register be 4406 * interpreted by the guest: 4407 * --- 121 unchanged lines hidden (view full) --- 4529 4530static inline bool kvm_can_mwait_in_guest(void) 4531{ 4532 return boot_cpu_has(X86_FEATURE_MWAIT) && 4533 !boot_cpu_has_bug(X86_BUG_MONITOR) && 4534 boot_cpu_has(X86_FEATURE_ARAT); 4535} 4536 |
4537#ifdef CONFIG_KVM_HYPERV |
|
4530static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, 4531 struct kvm_cpuid2 __user *cpuid_arg) 4532{ 4533 struct kvm_cpuid2 cpuid; 4534 int r; 4535 4536 r = -EFAULT; 4537 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) --- 4 unchanged lines hidden (view full) --- 4542 return r; 4543 4544 r = -EFAULT; 4545 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4546 return r; 4547 4548 return 0; 4549} | 4538static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, 4539 struct kvm_cpuid2 __user *cpuid_arg) 4540{ 4541 struct kvm_cpuid2 cpuid; 4542 int r; 4543 4544 r = -EFAULT; 4545 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) --- 4 unchanged lines hidden (view full) --- 4550 return r; 4551 4552 r = -EFAULT; 4553 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4554 return r; 4555 4556 return 0; 4557} |
4558#endif |
|
4550 4551static bool kvm_is_vm_type_supported(unsigned long type) 4552{ 4553 return type == KVM_X86_DEFAULT_VM || 4554 (type == KVM_X86_SW_PROTECTED_VM && 4555 IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_enabled); 4556} 4557 --- 17 unchanged lines hidden (view full) --- 4575 case KVM_CAP_REINJECT_CONTROL: 4576 case KVM_CAP_IRQ_INJECT_STATUS: 4577 case KVM_CAP_IOEVENTFD: 4578 case KVM_CAP_IOEVENTFD_NO_LENGTH: 4579 case KVM_CAP_PIT2: 4580 case KVM_CAP_PIT_STATE2: 4581 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 4582 case KVM_CAP_VCPU_EVENTS: | 4559 4560static bool kvm_is_vm_type_supported(unsigned long type) 4561{ 4562 return type == KVM_X86_DEFAULT_VM || 4563 (type == KVM_X86_SW_PROTECTED_VM && 4564 IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_enabled); 4565} 4566 --- 17 unchanged lines hidden (view full) --- 4584 case KVM_CAP_REINJECT_CONTROL: 4585 case KVM_CAP_IRQ_INJECT_STATUS: 4586 case KVM_CAP_IOEVENTFD: 4587 case KVM_CAP_IOEVENTFD_NO_LENGTH: 4588 case KVM_CAP_PIT2: 4589 case KVM_CAP_PIT_STATE2: 4590 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 4591 case KVM_CAP_VCPU_EVENTS: |
4592#ifdef CONFIG_KVM_HYPERV |
|
4583 case KVM_CAP_HYPERV: 4584 case KVM_CAP_HYPERV_VAPIC: 4585 case KVM_CAP_HYPERV_SPIN: | 4593 case KVM_CAP_HYPERV: 4594 case KVM_CAP_HYPERV_VAPIC: 4595 case KVM_CAP_HYPERV_SPIN: |
4596 case KVM_CAP_HYPERV_TIME: |
|
4586 case KVM_CAP_HYPERV_SYNIC: 4587 case KVM_CAP_HYPERV_SYNIC2: 4588 case KVM_CAP_HYPERV_VP_INDEX: 4589 case KVM_CAP_HYPERV_EVENTFD: 4590 case KVM_CAP_HYPERV_TLBFLUSH: 4591 case KVM_CAP_HYPERV_SEND_IPI: 4592 case KVM_CAP_HYPERV_CPUID: 4593 case KVM_CAP_HYPERV_ENFORCE_CPUID: 4594 case KVM_CAP_SYS_HYPERV_CPUID: | 4597 case KVM_CAP_HYPERV_SYNIC: 4598 case KVM_CAP_HYPERV_SYNIC2: 4599 case KVM_CAP_HYPERV_VP_INDEX: 4600 case KVM_CAP_HYPERV_EVENTFD: 4601 case KVM_CAP_HYPERV_TLBFLUSH: 4602 case KVM_CAP_HYPERV_SEND_IPI: 4603 case KVM_CAP_HYPERV_CPUID: 4604 case KVM_CAP_HYPERV_ENFORCE_CPUID: 4605 case KVM_CAP_SYS_HYPERV_CPUID: |
4606#endif |
|
4595 case KVM_CAP_PCI_SEGMENT: 4596 case KVM_CAP_DEBUGREGS: 4597 case KVM_CAP_X86_ROBUST_SINGLESTEP: 4598 case KVM_CAP_XSAVE: 4599 case KVM_CAP_ASYNC_PF: 4600 case KVM_CAP_ASYNC_PF_INT: 4601 case KVM_CAP_GET_TSC_KHZ: 4602 case KVM_CAP_KVMCLOCK_CTRL: 4603 case KVM_CAP_READONLY_MEM: | 4607 case KVM_CAP_PCI_SEGMENT: 4608 case KVM_CAP_DEBUGREGS: 4609 case KVM_CAP_X86_ROBUST_SINGLESTEP: 4610 case KVM_CAP_XSAVE: 4611 case KVM_CAP_ASYNC_PF: 4612 case KVM_CAP_ASYNC_PF_INT: 4613 case KVM_CAP_GET_TSC_KHZ: 4614 case KVM_CAP_KVMCLOCK_CTRL: 4615 case KVM_CAP_READONLY_MEM: |
4604 case KVM_CAP_HYPERV_TIME: | |
4605 case KVM_CAP_IOAPIC_POLARITY_IGNORED: 4606 case KVM_CAP_TSC_DEADLINE_TIMER: 4607 case KVM_CAP_DISABLE_QUIRKS: 4608 case KVM_CAP_SET_BOOT_CPU_ID: 4609 case KVM_CAP_SPLIT_IRQCHIP: 4610 case KVM_CAP_IMMEDIATE_EXIT: 4611 case KVM_CAP_PMU_EVENT_FILTER: 4612 case KVM_CAP_PMU_EVENT_MASKED_EVENTS: --- 94 unchanged lines hidden (view full) --- 4707 break; 4708 case KVM_CAP_X2APIC_API: 4709 r = KVM_X2APIC_API_VALID_FLAGS; 4710 break; 4711 case KVM_CAP_NESTED_STATE: 4712 r = kvm_x86_ops.nested_ops->get_state ? 4713 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; 4714 break; | 4616 case KVM_CAP_IOAPIC_POLARITY_IGNORED: 4617 case KVM_CAP_TSC_DEADLINE_TIMER: 4618 case KVM_CAP_DISABLE_QUIRKS: 4619 case KVM_CAP_SET_BOOT_CPU_ID: 4620 case KVM_CAP_SPLIT_IRQCHIP: 4621 case KVM_CAP_IMMEDIATE_EXIT: 4622 case KVM_CAP_PMU_EVENT_FILTER: 4623 case KVM_CAP_PMU_EVENT_MASKED_EVENTS: --- 94 unchanged lines hidden (view full) --- 4718 break; 4719 case KVM_CAP_X2APIC_API: 4720 r = KVM_X2APIC_API_VALID_FLAGS; 4721 break; 4722 case KVM_CAP_NESTED_STATE: 4723 r = kvm_x86_ops.nested_ops->get_state ? 4724 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; 4725 break; |
4726#ifdef CONFIG_KVM_HYPERV |
|
4715 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 4716 r = kvm_x86_ops.enable_l2_tlb_flush != NULL; 4717 break; 4718 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 4719 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; 4720 break; | 4727 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 4728 r = kvm_x86_ops.enable_l2_tlb_flush != NULL; 4729 break; 4730 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 4731 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; 4732 break; |
4733#endif |
|
4721 case KVM_CAP_SMALLER_MAXPHYADDR: 4722 r = (int) allow_smaller_maxphyaddr; 4723 break; 4724 case KVM_CAP_STEAL_TIME: 4725 r = sched_info_on(); 4726 break; 4727 case KVM_CAP_X86_BUS_LOCK_EXIT: 4728 if (kvm_caps.has_bus_lock_exit) --- 150 unchanged lines hidden (view full) --- 4879 num_msr_based_features * sizeof(u32))) 4880 goto out; 4881 r = 0; 4882 break; 4883 } 4884 case KVM_GET_MSRS: 4885 r = msr_io(NULL, argp, do_get_msr_feature, 1); 4886 break; | 4734 case KVM_CAP_SMALLER_MAXPHYADDR: 4735 r = (int) allow_smaller_maxphyaddr; 4736 break; 4737 case KVM_CAP_STEAL_TIME: 4738 r = sched_info_on(); 4739 break; 4740 case KVM_CAP_X86_BUS_LOCK_EXIT: 4741 if (kvm_caps.has_bus_lock_exit) --- 150 unchanged lines hidden (view full) --- 4892 num_msr_based_features * sizeof(u32))) 4893 goto out; 4894 r = 0; 4895 break; 4896 } 4897 case KVM_GET_MSRS: 4898 r = msr_io(NULL, argp, do_get_msr_feature, 1); 4899 break; |
4900#ifdef CONFIG_KVM_HYPERV |
|
4887 case KVM_GET_SUPPORTED_HV_CPUID: 4888 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); 4889 break; | 4901 case KVM_GET_SUPPORTED_HV_CPUID: 4902 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); 4903 break; |
4904#endif |
|
4890 case KVM_GET_DEVICE_ATTR: { 4891 struct kvm_device_attr attr; 4892 r = -EFAULT; 4893 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4894 break; 4895 r = kvm_x86_dev_get_attr(&attr); 4896 break; 4897 } --- 809 unchanged lines hidden (view full) --- 5707 } 5708 5709 return r; 5710} 5711 5712static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 5713 struct kvm_enable_cap *cap) 5714{ | 4905 case KVM_GET_DEVICE_ATTR: { 4906 struct kvm_device_attr attr; 4907 r = -EFAULT; 4908 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4909 break; 4910 r = kvm_x86_dev_get_attr(&attr); 4911 break; 4912 } --- 809 unchanged lines hidden (view full) --- 5722 } 5723 5724 return r; 5725} 5726 5727static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 5728 struct kvm_enable_cap *cap) 5729{ |
5715 int r; 5716 uint16_t vmcs_version; 5717 void __user *user_ptr; 5718 | |
5719 if (cap->flags) 5720 return -EINVAL; 5721 5722 switch (cap->cap) { | 5730 if (cap->flags) 5731 return -EINVAL; 5732 5733 switch (cap->cap) { |
5734#ifdef CONFIG_KVM_HYPERV |
|
5723 case KVM_CAP_HYPERV_SYNIC2: 5724 if (cap->args[0]) 5725 return -EINVAL; 5726 fallthrough; 5727 5728 case KVM_CAP_HYPERV_SYNIC: 5729 if (!irqchip_in_kernel(vcpu->kvm)) 5730 return -EINVAL; 5731 return kvm_hv_activate_synic(vcpu, cap->cap == 5732 KVM_CAP_HYPERV_SYNIC2); 5733 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: | 5735 case KVM_CAP_HYPERV_SYNIC2: 5736 if (cap->args[0]) 5737 return -EINVAL; 5738 fallthrough; 5739 5740 case KVM_CAP_HYPERV_SYNIC: 5741 if (!irqchip_in_kernel(vcpu->kvm)) 5742 return -EINVAL; 5743 return kvm_hv_activate_synic(vcpu, cap->cap == 5744 KVM_CAP_HYPERV_SYNIC2); 5745 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: |
5734 if (!kvm_x86_ops.nested_ops->enable_evmcs) 5735 return -ENOTTY; 5736 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); 5737 if (!r) { 5738 user_ptr = (void __user *)(uintptr_t)cap->args[0]; 5739 if (copy_to_user(user_ptr, &vmcs_version, 5740 sizeof(vmcs_version))) 5741 r = -EFAULT; | 5746 { 5747 int r; 5748 uint16_t vmcs_version; 5749 void __user *user_ptr; 5750 5751 if (!kvm_x86_ops.nested_ops->enable_evmcs) 5752 return -ENOTTY; 5753 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); 5754 if (!r) { 5755 user_ptr = (void __user *)(uintptr_t)cap->args[0]; 5756 if (copy_to_user(user_ptr, &vmcs_version, 5757 sizeof(vmcs_version))) 5758 r = -EFAULT; 5759 } 5760 return r; |
5742 } | 5761 } |
5743 return r; | |
5744 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 5745 if (!kvm_x86_ops.enable_l2_tlb_flush) 5746 return -ENOTTY; 5747 5748 return static_call(kvm_x86_enable_l2_tlb_flush)(vcpu); 5749 5750 case KVM_CAP_HYPERV_ENFORCE_CPUID: 5751 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); | 5762 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 5763 if (!kvm_x86_ops.enable_l2_tlb_flush) 5764 return -ENOTTY; 5765 5766 return static_call(kvm_x86_enable_l2_tlb_flush)(vcpu); 5767 5768 case KVM_CAP_HYPERV_ENFORCE_CPUID: 5769 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); |
5770#endif |
|
5752 5753 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 5754 vcpu->arch.pv_cpuid.enforce = cap->args[0]; 5755 if (vcpu->arch.pv_cpuid.enforce) 5756 kvm_update_pv_runtime(vcpu); 5757 5758 return 0; 5759 default: --- 376 unchanged lines hidden (view full) --- 6136 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) 6137 break; 6138 6139 idx = srcu_read_lock(&vcpu->kvm->srcu); 6140 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); 6141 srcu_read_unlock(&vcpu->kvm->srcu, idx); 6142 break; 6143 } | 5771 5772 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 5773 vcpu->arch.pv_cpuid.enforce = cap->args[0]; 5774 if (vcpu->arch.pv_cpuid.enforce) 5775 kvm_update_pv_runtime(vcpu); 5776 5777 return 0; 5778 default: --- 376 unchanged lines hidden (view full) --- 6155 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) 6156 break; 6157 6158 idx = srcu_read_lock(&vcpu->kvm->srcu); 6159 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); 6160 srcu_read_unlock(&vcpu->kvm->srcu, idx); 6161 break; 6162 } |
6163#ifdef CONFIG_KVM_HYPERV |
|
6144 case KVM_GET_SUPPORTED_HV_CPUID: 6145 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); 6146 break; | 6164 case KVM_GET_SUPPORTED_HV_CPUID: 6165 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); 6166 break; |
6167#endif |
|
6147#ifdef CONFIG_KVM_XEN 6148 case KVM_XEN_VCPU_GET_ATTR: { 6149 struct kvm_xen_vcpu_attr xva; 6150 6151 r = -EFAULT; 6152 if (copy_from_user(&xva, argp, sizeof(xva))) 6153 goto out; 6154 r = kvm_xen_vcpu_get_attr(vcpu, &xva); --- 1041 unchanged lines hidden (view full) --- 7196 7197 r = -ENOTTY; 7198 if (!kvm_x86_ops.mem_enc_unregister_region) 7199 goto out; 7200 7201 r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, ®ion); 7202 break; 7203 } | 6168#ifdef CONFIG_KVM_XEN 6169 case KVM_XEN_VCPU_GET_ATTR: { 6170 struct kvm_xen_vcpu_attr xva; 6171 6172 r = -EFAULT; 6173 if (copy_from_user(&xva, argp, sizeof(xva))) 6174 goto out; 6175 r = kvm_xen_vcpu_get_attr(vcpu, &xva); --- 1041 unchanged lines hidden (view full) --- 7217 7218 r = -ENOTTY; 7219 if (!kvm_x86_ops.mem_enc_unregister_region) 7220 goto out; 7221 7222 r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, ®ion); 7223 break; 7224 } |
7225#ifdef CONFIG_KVM_HYPERV |
|
7204 case KVM_HYPERV_EVENTFD: { 7205 struct kvm_hyperv_eventfd hvevfd; 7206 7207 r = -EFAULT; 7208 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd))) 7209 goto out; 7210 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd); 7211 break; 7212 } | 7226 case KVM_HYPERV_EVENTFD: { 7227 struct kvm_hyperv_eventfd hvevfd; 7228 7229 r = -EFAULT; 7230 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd))) 7231 goto out; 7232 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd); 7233 break; 7234 } |
7235#endif |
|
7213 case KVM_SET_PMU_EVENT_FILTER: 7214 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); 7215 break; 7216 case KVM_X86_SET_MSR_FILTER: { 7217 struct kvm_msr_filter __user *user_msr_filter = argp; 7218 struct kvm_msr_filter filter; 7219 7220 if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) --- 3362 unchanged lines hidden (view full) --- 10583 if (is_guest_mode(vcpu)) 10584 vcpu->arch.load_eoi_exitmap_pending = true; 10585 else 10586 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); 10587} 10588 10589static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) 10590{ | 7236 case KVM_SET_PMU_EVENT_FILTER: 7237 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); 7238 break; 7239 case KVM_X86_SET_MSR_FILTER: { 7240 struct kvm_msr_filter __user *user_msr_filter = argp; 7241 struct kvm_msr_filter filter; 7242 7243 if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) --- 3362 unchanged lines hidden (view full) --- 10606 if (is_guest_mode(vcpu)) 10607 vcpu->arch.load_eoi_exitmap_pending = true; 10608 else 10609 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); 10610} 10611 10612static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) 10613{ |
10591 u64 eoi_exit_bitmap[4]; 10592 | |
10593 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 10594 return; 10595 | 10614 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 10615 return; 10616 |
10617#ifdef CONFIG_KVM_HYPERV |
|
10596 if (to_hv_vcpu(vcpu)) { | 10618 if (to_hv_vcpu(vcpu)) { |
10619 u64 eoi_exit_bitmap[4]; 10620 |
|
10597 bitmap_or((ulong *)eoi_exit_bitmap, 10598 vcpu->arch.ioapic_handled_vectors, 10599 to_hv_synic(vcpu)->vec_bitmap, 256); 10600 static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 10601 return; 10602 } | 10621 bitmap_or((ulong *)eoi_exit_bitmap, 10622 vcpu->arch.ioapic_handled_vectors, 10623 to_hv_synic(vcpu)->vec_bitmap, 256); 10624 static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 10625 return; 10626 } |
10603 | 10627#endif |
10604 static_call_cond(kvm_x86_load_eoi_exitmap)( 10605 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); 10606} 10607 10608void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 10609{ 10610 static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); 10611} --- 74 unchanged lines hidden (view full) --- 10686 kvm_service_local_tlb_flush_requests(vcpu); 10687 10688 /* 10689 * Fall back to a "full" guest flush if Hyper-V's precise 10690 * flushing fails. Note, Hyper-V's flushing is per-vCPU, but 10691 * the flushes are considered "remote" and not "local" because 10692 * the requests can be initiated from other vCPUs. 10693 */ | 10628 static_call_cond(kvm_x86_load_eoi_exitmap)( 10629 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); 10630} 10631 10632void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 10633{ 10634 static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); 10635} --- 74 unchanged lines hidden (view full) --- 10710 kvm_service_local_tlb_flush_requests(vcpu); 10711 10712 /* 10713 * Fall back to a "full" guest flush if Hyper-V's precise 10714 * flushing fails. Note, Hyper-V's flushing is per-vCPU, but 10715 * the flushes are considered "remote" and not "local" because 10716 * the requests can be initiated from other vCPUs. 10717 */ |
10718#ifdef CONFIG_KVM_HYPERV |
|
10694 if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) && 10695 kvm_hv_vcpu_flush_tlb(vcpu)) 10696 kvm_vcpu_flush_tlb_guest(vcpu); | 10719 if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) && 10720 kvm_hv_vcpu_flush_tlb(vcpu)) 10721 kvm_vcpu_flush_tlb_guest(vcpu); |
10722#endif |
|
10697 10698 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 10699 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 10700 r = 0; 10701 goto out; 10702 } 10703 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 10704 if (is_guest_mode(vcpu)) --- 36 unchanged lines hidden (view full) --- 10741 } 10742 } 10743 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 10744 vcpu_scan_ioapic(vcpu); 10745 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) 10746 vcpu_load_eoi_exitmap(vcpu); 10747 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) 10748 kvm_vcpu_reload_apic_access_page(vcpu); | 10723 10724 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 10725 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 10726 r = 0; 10727 goto out; 10728 } 10729 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 10730 if (is_guest_mode(vcpu)) --- 36 unchanged lines hidden (view full) --- 10767 } 10768 } 10769 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 10770 vcpu_scan_ioapic(vcpu); 10771 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) 10772 vcpu_load_eoi_exitmap(vcpu); 10773 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) 10774 kvm_vcpu_reload_apic_access_page(vcpu); |
10775#ifdef CONFIG_KVM_HYPERV |
|
10749 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { 10750 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10751 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; 10752 vcpu->run->system_event.ndata = 0; 10753 r = 0; 10754 goto out; 10755 } 10756 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { --- 14 unchanged lines hidden (view full) --- 10771 10772 /* 10773 * KVM_REQ_HV_STIMER has to be processed after 10774 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers 10775 * depend on the guest clock being up-to-date 10776 */ 10777 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) 10778 kvm_hv_process_stimers(vcpu); | 10776 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { 10777 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10778 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; 10779 vcpu->run->system_event.ndata = 0; 10780 r = 0; 10781 goto out; 10782 } 10783 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { --- 14 unchanged lines hidden (view full) --- 10798 10799 /* 10800 * KVM_REQ_HV_STIMER has to be processed after 10801 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers 10802 * depend on the guest clock being up-to-date 10803 */ 10804 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) 10805 kvm_hv_process_stimers(vcpu); |
10806#endif |
|
10779 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) 10780 kvm_vcpu_update_apicv(vcpu); 10781 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) 10782 kvm_check_async_pf_completion(vcpu); 10783 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) 10784 static_call(kvm_x86_msr_filter_changed)(vcpu); 10785 10786 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) --- 3065 unchanged lines hidden --- | 10807 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) 10808 kvm_vcpu_update_apicv(vcpu); 10809 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) 10810 kvm_check_async_pf_completion(vcpu); 10811 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) 10812 static_call(kvm_x86_msr_filter_changed)(vcpu); 10813 10814 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) --- 3065 unchanged lines hidden --- |