1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * derived from drivers/kvm/kvm_main.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 *
12 * Authors:
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 */
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/kvm_host.h>
21 #include "irq.h"
22 #include "ioapic.h"
23 #include "mmu.h"
24 #include "i8254.h"
25 #include "tss.h"
26 #include "kvm_cache_regs.h"
27 #include "kvm_emulate.h"
28 #include "mmu/page_track.h"
29 #include "x86.h"
30 #include "cpuid.h"
31 #include "pmu.h"
32 #include "hyperv.h"
33 #include "lapic.h"
34 #include "xen.h"
35 #include "smm.h"
36
37 #include <linux/clocksource.h>
38 #include <linux/interrupt.h>
39 #include <linux/kvm.h>
40 #include <linux/fs.h>
41 #include <linux/vmalloc.h>
42 #include <linux/export.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mman.h>
45 #include <linux/highmem.h>
46 #include <linux/iommu.h>
47 #include <linux/cpufreq.h>
48 #include <linux/user-return-notifier.h>
49 #include <linux/srcu.h>
50 #include <linux/slab.h>
51 #include <linux/perf_event.h>
52 #include <linux/uaccess.h>
53 #include <linux/hash.h>
54 #include <linux/pci.h>
55 #include <linux/timekeeper_internal.h>
56 #include <linux/pvclock_gtod.h>
57 #include <linux/kvm_irqfd.h>
58 #include <linux/irqbypass.h>
59 #include <linux/sched/stat.h>
60 #include <linux/sched/isolation.h>
61 #include <linux/mem_encrypt.h>
62 #include <linux/suspend.h>
63 #include <linux/smp.h>
64
65 #include <trace/events/ipi.h>
66 #include <trace/events/kvm.h>
67
68 #include <asm/debugreg.h>
69 #include <asm/msr.h>
70 #include <asm/desc.h>
71 #include <asm/mce.h>
72 #include <asm/pkru.h>
73 #include <linux/kernel_stat.h>
74 #include <asm/fpu/api.h>
75 #include <asm/fpu/xcr.h>
76 #include <asm/fpu/xstate.h>
77 #include <asm/pvclock.h>
78 #include <asm/div64.h>
79 #include <asm/irq_remapping.h>
80 #include <asm/mshyperv.h>
81 #include <asm/hypervisor.h>
82 #include <asm/tlbflush.h>
83 #include <asm/intel_pt.h>
84 #include <asm/emulate_prefix.h>
85 #include <asm/sgx.h>
86 #include <clocksource/hyperv_timer.h>
87
88 #define CREATE_TRACE_POINTS
89 #include "trace.h"
90
91 #define MAX_IO_MSRS 256
92
93 /*
94 * Note, kvm_caps fields should *never* have default values, all fields must be
95 * recomputed from scratch during vendor module load, e.g. to account for a
96 * vendor module being reloaded with different module parameters.
97 */
98 struct kvm_caps kvm_caps __read_mostly;
99 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_caps);
100
101 struct kvm_host_values kvm_host __read_mostly;
102 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_host);
103
104 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
105
106 #define emul_to_vcpu(ctxt) \
107 ((struct kvm_vcpu *)(ctxt)->vcpu)
108
109 /* EFER defaults:
110 * - enable syscall per default because its emulated by KVM
111 * - enable LME and LMA per default on 64 bit KVM
112 */
113 #ifdef CONFIG_X86_64
114 static
115 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
116 #else
117 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
118 #endif
119
120 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
121
122 #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE
123
124 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
125 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK | \
126 KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST | \
127 KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST)
128
129 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
130 static void process_nmi(struct kvm_vcpu *vcpu);
131 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
132 static void store_regs(struct kvm_vcpu *vcpu);
133 static int sync_regs(struct kvm_vcpu *vcpu);
134 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
135
136 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
137 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
138
139 static DEFINE_MUTEX(vendor_module_lock);
140 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
141 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
142
143 struct kvm_x86_ops kvm_x86_ops __read_mostly;
144
145 #define KVM_X86_OP(func) \
146 DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \
147 *(((struct kvm_x86_ops *)0)->func));
148 #define KVM_X86_OP_OPTIONAL KVM_X86_OP
149 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
150 #include <asm/kvm-x86-ops.h>
151 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits);
152 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg);
153
154 static bool __read_mostly ignore_msrs = 0;
155 module_param(ignore_msrs, bool, 0644);
156
157 bool __read_mostly report_ignored_msrs = true;
158 module_param(report_ignored_msrs, bool, 0644);
159 EXPORT_SYMBOL_FOR_KVM_INTERNAL(report_ignored_msrs);
160
161 unsigned int min_timer_period_us = 200;
162 module_param(min_timer_period_us, uint, 0644);
163
164 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
165 static u32 __read_mostly tsc_tolerance_ppm = 250;
166 module_param(tsc_tolerance_ppm, uint, 0644);
167
168 bool __read_mostly enable_vmware_backdoor = false;
169 module_param(enable_vmware_backdoor, bool, 0444);
170 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_vmware_backdoor);
171
172 /*
173 * Flags to manipulate forced emulation behavior (any non-zero value will
174 * enable forced emulation).
175 */
176 #define KVM_FEP_CLEAR_RFLAGS_RF BIT(1)
177 static int __read_mostly force_emulation_prefix;
178 module_param(force_emulation_prefix, int, 0644);
179
180 int __read_mostly pi_inject_timer = -1;
181 module_param(pi_inject_timer, bint, 0644);
182
183 /* Enable/disable PMU virtualization */
184 bool __read_mostly enable_pmu = true;
185 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_pmu);
186 module_param(enable_pmu, bool, 0444);
187
188 /* Enable/disabled mediated PMU virtualization. */
189 bool __read_mostly enable_mediated_pmu;
190 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_mediated_pmu);
191
192 bool __read_mostly eager_page_split = true;
193 module_param(eager_page_split, bool, 0644);
194
195 /* Enable/disable SMT_RSB bug mitigation */
196 static bool __read_mostly mitigate_smt_rsb;
197 module_param(mitigate_smt_rsb, bool, 0444);
198
199 /*
200 * Restoring the host value for MSRs that are only consumed when running in
201 * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
202 * returns to userspace, i.e. the kernel can run with the guest's value.
203 */
204 #define KVM_MAX_NR_USER_RETURN_MSRS 16
205
206 struct kvm_user_return_msrs {
207 struct user_return_notifier urn;
208 bool registered;
209 struct kvm_user_return_msr_values {
210 u64 host;
211 u64 curr;
212 } values[KVM_MAX_NR_USER_RETURN_MSRS];
213 };
214
215 u32 __read_mostly kvm_nr_uret_msrs;
216 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_nr_uret_msrs);
217 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
218 static DEFINE_PER_CPU(struct kvm_user_return_msrs, user_return_msrs);
219
220 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
221 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
222 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
223 | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE)
224
225 #define XFEATURE_MASK_CET_ALL (XFEATURE_MASK_CET_USER | XFEATURE_MASK_CET_KERNEL)
226 /*
227 * Note, KVM supports exposing PT to the guest, but does not support context
228 * switching PT via XSTATE (KVM's PT virtualization relies on perf; swapping
229 * PT via guest XSTATE would clobber perf state), i.e. KVM doesn't support
230 * IA32_XSS[bit 8] (guests can/must use RDMSR/WRMSR to save/restore PT MSRs).
231 */
232 #define KVM_SUPPORTED_XSS (XFEATURE_MASK_CET_ALL)
233
234 bool __read_mostly allow_smaller_maxphyaddr = 0;
235 EXPORT_SYMBOL_FOR_KVM_INTERNAL(allow_smaller_maxphyaddr);
236
237 bool __read_mostly enable_apicv = true;
238 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_apicv);
239
240 bool __read_mostly enable_ipiv = true;
241 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_ipiv);
242
243 bool __read_mostly enable_device_posted_irqs = true;
244 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_device_posted_irqs);
245
246 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
247 KVM_GENERIC_VM_STATS(),
248 STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
249 STATS_DESC_COUNTER(VM, mmu_pte_write),
250 STATS_DESC_COUNTER(VM, mmu_pde_zapped),
251 STATS_DESC_COUNTER(VM, mmu_flooded),
252 STATS_DESC_COUNTER(VM, mmu_recycled),
253 STATS_DESC_COUNTER(VM, mmu_cache_miss),
254 STATS_DESC_ICOUNTER(VM, mmu_unsync),
255 STATS_DESC_ICOUNTER(VM, pages_4k),
256 STATS_DESC_ICOUNTER(VM, pages_2m),
257 STATS_DESC_ICOUNTER(VM, pages_1g),
258 STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
259 STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size),
260 STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)
261 };
262
263 const struct kvm_stats_header kvm_vm_stats_header = {
264 .name_size = KVM_STATS_NAME_SIZE,
265 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
266 .id_offset = sizeof(struct kvm_stats_header),
267 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
268 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
269 sizeof(kvm_vm_stats_desc),
270 };
271
272 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
273 KVM_GENERIC_VCPU_STATS(),
274 STATS_DESC_COUNTER(VCPU, pf_taken),
275 STATS_DESC_COUNTER(VCPU, pf_fixed),
276 STATS_DESC_COUNTER(VCPU, pf_emulate),
277 STATS_DESC_COUNTER(VCPU, pf_spurious),
278 STATS_DESC_COUNTER(VCPU, pf_fast),
279 STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created),
280 STATS_DESC_COUNTER(VCPU, pf_guest),
281 STATS_DESC_COUNTER(VCPU, tlb_flush),
282 STATS_DESC_COUNTER(VCPU, invlpg),
283 STATS_DESC_COUNTER(VCPU, exits),
284 STATS_DESC_COUNTER(VCPU, io_exits),
285 STATS_DESC_COUNTER(VCPU, mmio_exits),
286 STATS_DESC_COUNTER(VCPU, signal_exits),
287 STATS_DESC_COUNTER(VCPU, irq_window_exits),
288 STATS_DESC_COUNTER(VCPU, nmi_window_exits),
289 STATS_DESC_COUNTER(VCPU, l1d_flush),
290 STATS_DESC_COUNTER(VCPU, halt_exits),
291 STATS_DESC_COUNTER(VCPU, request_irq_exits),
292 STATS_DESC_COUNTER(VCPU, irq_exits),
293 STATS_DESC_COUNTER(VCPU, host_state_reload),
294 STATS_DESC_COUNTER(VCPU, fpu_reload),
295 STATS_DESC_COUNTER(VCPU, insn_emulation),
296 STATS_DESC_COUNTER(VCPU, insn_emulation_fail),
297 STATS_DESC_COUNTER(VCPU, hypercalls),
298 STATS_DESC_COUNTER(VCPU, irq_injections),
299 STATS_DESC_COUNTER(VCPU, nmi_injections),
300 STATS_DESC_COUNTER(VCPU, req_event),
301 STATS_DESC_COUNTER(VCPU, nested_run),
302 STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
303 STATS_DESC_COUNTER(VCPU, directed_yield_successful),
304 STATS_DESC_COUNTER(VCPU, preemption_reported),
305 STATS_DESC_COUNTER(VCPU, preemption_other),
306 STATS_DESC_IBOOLEAN(VCPU, guest_mode),
307 STATS_DESC_COUNTER(VCPU, notify_window_exits),
308 };
309
310 const struct kvm_stats_header kvm_vcpu_stats_header = {
311 .name_size = KVM_STATS_NAME_SIZE,
312 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
313 .id_offset = sizeof(struct kvm_stats_header),
314 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
315 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
316 sizeof(kvm_vcpu_stats_desc),
317 };
318
319 static struct kmem_cache *x86_emulator_cache;
320
321 /*
322 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
323 * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
324 * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that
325 * require host support, i.e. should be probed via RDMSR. emulated_msrs holds
326 * MSRs that KVM emulates without strictly requiring host support.
327 * msr_based_features holds MSRs that enumerate features, i.e. are effectively
328 * CPUID leafs. Note, msr_based_features isn't mutually exclusive with
329 * msrs_to_save and emulated_msrs.
330 */
331
332 static const u32 msrs_to_save_base[] = {
333 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
334 MSR_STAR,
335 #ifdef CONFIG_X86_64
336 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
337 #endif
338 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
339 MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
340 MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
341 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
342 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
343 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
344 MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
345 MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
346 MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
347 MSR_IA32_UMWAIT_CONTROL,
348
349 MSR_IA32_XFD, MSR_IA32_XFD_ERR, MSR_IA32_XSS,
350
351 MSR_IA32_U_CET, MSR_IA32_S_CET,
352 MSR_IA32_PL0_SSP, MSR_IA32_PL1_SSP, MSR_IA32_PL2_SSP,
353 MSR_IA32_PL3_SSP, MSR_IA32_INT_SSP_TAB,
354 };
355
356 static const u32 msrs_to_save_pmu[] = {
357 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
358 MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
359 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
360 MSR_CORE_PERF_GLOBAL_CTRL,
361 MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
362
363 /* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */
364 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
365 MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
366 MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
367 MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
368 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
369 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
370 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
371 MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
372
373 MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
374 MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
375
376 /* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */
377 MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
378 MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
379 MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
380 MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
381
382 MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
383 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
384 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
385 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET,
386 };
387
388 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
389 ARRAY_SIZE(msrs_to_save_pmu)];
390 static unsigned num_msrs_to_save;
391
392 static const u32 emulated_msrs_all[] = {
393 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
394 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
395
396 #ifdef CONFIG_KVM_HYPERV
397 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
398 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
399 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
400 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
401 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
402 HV_X64_MSR_RESET,
403 HV_X64_MSR_VP_INDEX,
404 HV_X64_MSR_VP_RUNTIME,
405 HV_X64_MSR_SCONTROL,
406 HV_X64_MSR_STIMER0_CONFIG,
407 HV_X64_MSR_VP_ASSIST_PAGE,
408 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
409 HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL,
410 HV_X64_MSR_SYNDBG_OPTIONS,
411 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
412 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
413 HV_X64_MSR_SYNDBG_PENDING_BUFFER,
414 #endif
415
416 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
417 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
418
419 MSR_IA32_TSC_ADJUST,
420 MSR_IA32_TSC_DEADLINE,
421 MSR_IA32_ARCH_CAPABILITIES,
422 MSR_IA32_PERF_CAPABILITIES,
423 MSR_IA32_MISC_ENABLE,
424 MSR_IA32_MCG_STATUS,
425 MSR_IA32_MCG_CTL,
426 MSR_IA32_MCG_EXT_CTL,
427 MSR_IA32_SMBASE,
428 MSR_SMI_COUNT,
429 MSR_PLATFORM_INFO,
430 MSR_MISC_FEATURES_ENABLES,
431 MSR_AMD64_VIRT_SPEC_CTRL,
432 MSR_AMD64_TSC_RATIO,
433 MSR_IA32_POWER_CTL,
434 MSR_IA32_UCODE_REV,
435
436 /*
437 * KVM always supports the "true" VMX control MSRs, even if the host
438 * does not. The VMX MSRs as a whole are considered "emulated" as KVM
439 * doesn't strictly require them to exist in the host (ignoring that
440 * KVM would refuse to load in the first place if the core set of MSRs
441 * aren't supported).
442 */
443 MSR_IA32_VMX_BASIC,
444 MSR_IA32_VMX_TRUE_PINBASED_CTLS,
445 MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
446 MSR_IA32_VMX_TRUE_EXIT_CTLS,
447 MSR_IA32_VMX_TRUE_ENTRY_CTLS,
448 MSR_IA32_VMX_MISC,
449 MSR_IA32_VMX_CR0_FIXED0,
450 MSR_IA32_VMX_CR4_FIXED0,
451 MSR_IA32_VMX_VMCS_ENUM,
452 MSR_IA32_VMX_PROCBASED_CTLS2,
453 MSR_IA32_VMX_EPT_VPID_CAP,
454 MSR_IA32_VMX_VMFUNC,
455
456 MSR_K7_HWCR,
457 MSR_KVM_POLL_CONTROL,
458 };
459
460 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
461 static unsigned num_emulated_msrs;
462
463 /*
464 * List of MSRs that control the existence of MSR-based features, i.e. MSRs
465 * that are effectively CPUID leafs. VMX MSRs are also included in the set of
466 * feature MSRs, but are handled separately to allow expedited lookups.
467 */
468 static const u32 msr_based_features_all_except_vmx[] = {
469 MSR_AMD64_DE_CFG,
470 MSR_IA32_UCODE_REV,
471 MSR_IA32_ARCH_CAPABILITIES,
472 MSR_IA32_PERF_CAPABILITIES,
473 MSR_PLATFORM_INFO,
474 };
475
476 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
477 (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
478 static unsigned int num_msr_based_features;
479
480 /*
481 * All feature MSRs except uCode revID, which tracks the currently loaded uCode
482 * patch, are immutable once the vCPU model is defined.
483 */
kvm_is_immutable_feature_msr(u32 msr)484 static bool kvm_is_immutable_feature_msr(u32 msr)
485 {
486 int i;
487
488 if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR)
489 return true;
490
491 for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) {
492 if (msr == msr_based_features_all_except_vmx[i])
493 return msr != MSR_IA32_UCODE_REV;
494 }
495
496 return false;
497 }
498
kvm_is_advertised_msr(u32 msr_index)499 static bool kvm_is_advertised_msr(u32 msr_index)
500 {
501 unsigned int i;
502
503 for (i = 0; i < num_msrs_to_save; i++) {
504 if (msrs_to_save[i] == msr_index)
505 return true;
506 }
507
508 for (i = 0; i < num_emulated_msrs; i++) {
509 if (emulated_msrs[i] == msr_index)
510 return true;
511 }
512
513 return false;
514 }
515
516 typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data,
517 bool host_initiated);
518
kvm_do_msr_access(struct kvm_vcpu * vcpu,u32 msr,u64 * data,bool host_initiated,enum kvm_msr_access rw,msr_access_t msr_access_fn)519 static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr,
520 u64 *data, bool host_initiated,
521 enum kvm_msr_access rw,
522 msr_access_t msr_access_fn)
523 {
524 const char *op = rw == MSR_TYPE_W ? "wrmsr" : "rdmsr";
525 int ret;
526
527 BUILD_BUG_ON(rw != MSR_TYPE_R && rw != MSR_TYPE_W);
528
529 /*
530 * Zero the data on read failures to avoid leaking stack data to the
531 * guest and/or userspace, e.g. if the failure is ignored below.
532 */
533 ret = msr_access_fn(vcpu, msr, data, host_initiated);
534 if (ret && rw == MSR_TYPE_R)
535 *data = 0;
536
537 if (ret != KVM_MSR_RET_UNSUPPORTED)
538 return ret;
539
540 /*
541 * Userspace is allowed to read MSRs, and write '0' to MSRs, that KVM
542 * advertises to userspace, even if an MSR isn't fully supported.
543 * Simply check that @data is '0', which covers both the write '0' case
544 * and all reads (in which case @data is zeroed on failure; see above).
545 */
546 if (host_initiated && !*data && kvm_is_advertised_msr(msr))
547 return 0;
548
549 if (!ignore_msrs) {
550 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
551 op, msr, *data);
552 return ret;
553 }
554
555 if (report_ignored_msrs)
556 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", op, msr, *data);
557
558 return 0;
559 }
560
kvm_alloc_emulator_cache(void)561 static struct kmem_cache *kvm_alloc_emulator_cache(void)
562 {
563 unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src);
564 unsigned int size = sizeof(struct x86_emulate_ctxt);
565
566 return kmem_cache_create_usercopy("x86_emulator", size,
567 __alignof__(struct x86_emulate_ctxt),
568 SLAB_ACCOUNT, useroffset,
569 size - useroffset, NULL);
570 }
571
572 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
573
kvm_async_pf_hash_reset(struct kvm_vcpu * vcpu)574 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
575 {
576 int i;
577 for (i = 0; i < ASYNC_PF_PER_VCPU; i++)
578 vcpu->arch.apf.gfns[i] = ~0;
579 }
580
kvm_destroy_user_return_msrs(void)581 static void kvm_destroy_user_return_msrs(void)
582 {
583 int cpu;
584
585 for_each_possible_cpu(cpu)
586 WARN_ON_ONCE(per_cpu(user_return_msrs, cpu).registered);
587
588 kvm_nr_uret_msrs = 0;
589 }
590
kvm_on_user_return(struct user_return_notifier * urn)591 static void kvm_on_user_return(struct user_return_notifier *urn)
592 {
593 unsigned slot;
594 struct kvm_user_return_msrs *msrs
595 = container_of(urn, struct kvm_user_return_msrs, urn);
596 struct kvm_user_return_msr_values *values;
597
598 msrs->registered = false;
599 user_return_notifier_unregister(urn);
600
601 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
602 values = &msrs->values[slot];
603 if (values->host != values->curr) {
604 wrmsrq(kvm_uret_msrs_list[slot], values->host);
605 values->curr = values->host;
606 }
607 }
608 }
609
kvm_probe_user_return_msr(u32 msr)610 static int kvm_probe_user_return_msr(u32 msr)
611 {
612 u64 val;
613 int ret;
614
615 preempt_disable();
616 ret = rdmsrq_safe(msr, &val);
617 if (ret)
618 goto out;
619 ret = wrmsrq_safe(msr, val);
620 out:
621 preempt_enable();
622 return ret;
623 }
624
kvm_add_user_return_msr(u32 msr)625 int kvm_add_user_return_msr(u32 msr)
626 {
627 BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS);
628
629 if (kvm_probe_user_return_msr(msr))
630 return -1;
631
632 kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
633 return kvm_nr_uret_msrs++;
634 }
635 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_add_user_return_msr);
636
kvm_find_user_return_msr(u32 msr)637 int kvm_find_user_return_msr(u32 msr)
638 {
639 int i;
640
641 for (i = 0; i < kvm_nr_uret_msrs; ++i) {
642 if (kvm_uret_msrs_list[i] == msr)
643 return i;
644 }
645 return -1;
646 }
647 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_find_user_return_msr);
648
kvm_user_return_msr_cpu_online(void)649 static void kvm_user_return_msr_cpu_online(void)
650 {
651 struct kvm_user_return_msrs *msrs = this_cpu_ptr(&user_return_msrs);
652 u64 value;
653 int i;
654
655 for (i = 0; i < kvm_nr_uret_msrs; ++i) {
656 rdmsrq_safe(kvm_uret_msrs_list[i], &value);
657 msrs->values[i].host = value;
658 msrs->values[i].curr = value;
659 }
660 }
661
kvm_user_return_register_notifier(struct kvm_user_return_msrs * msrs)662 static void kvm_user_return_register_notifier(struct kvm_user_return_msrs *msrs)
663 {
664 if (!msrs->registered) {
665 msrs->urn.on_user_return = kvm_on_user_return;
666 user_return_notifier_register(&msrs->urn);
667 msrs->registered = true;
668 }
669 }
670
kvm_set_user_return_msr(unsigned slot,u64 value,u64 mask)671 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
672 {
673 struct kvm_user_return_msrs *msrs = this_cpu_ptr(&user_return_msrs);
674 int err;
675
676 value = (value & mask) | (msrs->values[slot].host & ~mask);
677 if (value == msrs->values[slot].curr)
678 return 0;
679 err = wrmsrq_safe(kvm_uret_msrs_list[slot], value);
680 if (err)
681 return 1;
682
683 msrs->values[slot].curr = value;
684 kvm_user_return_register_notifier(msrs);
685 return 0;
686 }
687 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_user_return_msr);
688
kvm_get_user_return_msr(unsigned int slot)689 u64 kvm_get_user_return_msr(unsigned int slot)
690 {
691 return this_cpu_ptr(&user_return_msrs)->values[slot].curr;
692 }
693 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_user_return_msr);
694
drop_user_return_notifiers(void)695 static void drop_user_return_notifiers(void)
696 {
697 struct kvm_user_return_msrs *msrs = this_cpu_ptr(&user_return_msrs);
698
699 if (msrs->registered)
700 kvm_on_user_return(&msrs->urn);
701 }
702
703 /*
704 * Handle a fault on a hardware virtualization (VMX or SVM) instruction.
705 *
706 * Hardware virtualization extension instructions may fault if a reboot turns
707 * off virtualization while processes are running. Usually after catching the
708 * fault we just panic; during reboot instead the instruction is ignored.
709 */
kvm_spurious_fault(void)710 noinstr void kvm_spurious_fault(void)
711 {
712 /* Fault while not rebooting. We want the trace. */
713 BUG_ON(!kvm_rebooting);
714 }
715 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_spurious_fault);
716
717 #define EXCPT_BENIGN 0
718 #define EXCPT_CONTRIBUTORY 1
719 #define EXCPT_PF 2
720
exception_class(int vector)721 static int exception_class(int vector)
722 {
723 switch (vector) {
724 case PF_VECTOR:
725 return EXCPT_PF;
726 case DE_VECTOR:
727 case TS_VECTOR:
728 case NP_VECTOR:
729 case SS_VECTOR:
730 case GP_VECTOR:
731 return EXCPT_CONTRIBUTORY;
732 default:
733 break;
734 }
735 return EXCPT_BENIGN;
736 }
737
738 #define EXCPT_FAULT 0
739 #define EXCPT_TRAP 1
740 #define EXCPT_ABORT 2
741 #define EXCPT_INTERRUPT 3
742 #define EXCPT_DB 4
743
exception_type(int vector)744 static int exception_type(int vector)
745 {
746 unsigned int mask;
747
748 if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
749 return EXCPT_INTERRUPT;
750
751 mask = 1 << vector;
752
753 /*
754 * #DBs can be trap-like or fault-like, the caller must check other CPU
755 * state, e.g. DR6, to determine whether a #DB is a trap or fault.
756 */
757 if (mask & (1 << DB_VECTOR))
758 return EXCPT_DB;
759
760 if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR)))
761 return EXCPT_TRAP;
762
763 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
764 return EXCPT_ABORT;
765
766 /* Reserved exceptions will result in fault */
767 return EXCPT_FAULT;
768 }
769
kvm_deliver_exception_payload(struct kvm_vcpu * vcpu,struct kvm_queued_exception * ex)770 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
771 struct kvm_queued_exception *ex)
772 {
773 if (!ex->has_payload)
774 return;
775
776 switch (ex->vector) {
777 case DB_VECTOR:
778 /*
779 * "Certain debug exceptions may clear bit 0-3. The
780 * remaining contents of the DR6 register are never
781 * cleared by the processor".
782 */
783 vcpu->arch.dr6 &= ~DR_TRAP_BITS;
784 /*
785 * In order to reflect the #DB exception payload in guest
786 * dr6, three components need to be considered: active low
787 * bit, FIXED_1 bits and active high bits (e.g. DR6_BD,
788 * DR6_BS and DR6_BT)
789 * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits.
790 * In the target guest dr6:
791 * FIXED_1 bits should always be set.
792 * Active low bits should be cleared if 1-setting in payload.
793 * Active high bits should be set if 1-setting in payload.
794 *
795 * Note, the payload is compatible with the pending debug
796 * exceptions/exit qualification under VMX, that active_low bits
797 * are active high in payload.
798 * So they need to be flipped for DR6.
799 */
800 vcpu->arch.dr6 |= DR6_ACTIVE_LOW;
801 vcpu->arch.dr6 |= ex->payload;
802 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW;
803
804 /*
805 * The #DB payload is defined as compatible with the 'pending
806 * debug exceptions' field under VMX, not DR6. While bit 12 is
807 * defined in the 'pending debug exceptions' field (enabled
808 * breakpoint), it is reserved and must be zero in DR6.
809 */
810 vcpu->arch.dr6 &= ~BIT(12);
811 break;
812 case PF_VECTOR:
813 vcpu->arch.cr2 = ex->payload;
814 break;
815 }
816
817 ex->has_payload = false;
818 ex->payload = 0;
819 }
820 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_deliver_exception_payload);
821
kvm_queue_exception_vmexit(struct kvm_vcpu * vcpu,unsigned int vector,bool has_error_code,u32 error_code,bool has_payload,unsigned long payload)822 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector,
823 bool has_error_code, u32 error_code,
824 bool has_payload, unsigned long payload)
825 {
826 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
827
828 ex->vector = vector;
829 ex->injected = false;
830 ex->pending = true;
831 ex->has_error_code = has_error_code;
832 ex->error_code = error_code;
833 ex->has_payload = has_payload;
834 ex->payload = payload;
835 }
836
kvm_multiple_exception(struct kvm_vcpu * vcpu,unsigned int nr,bool has_error,u32 error_code,bool has_payload,unsigned long payload)837 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned int nr,
838 bool has_error, u32 error_code,
839 bool has_payload, unsigned long payload)
840 {
841 u32 prev_nr;
842 int class1, class2;
843
844 kvm_make_request(KVM_REQ_EVENT, vcpu);
845
846 /*
847 * If the exception is destined for L2, morph it to a VM-Exit if L1
848 * wants to intercept the exception.
849 */
850 if (is_guest_mode(vcpu) &&
851 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) {
852 kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code,
853 has_payload, payload);
854 return;
855 }
856
857 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
858 queue:
859 vcpu->arch.exception.pending = true;
860 vcpu->arch.exception.injected = false;
861
862 vcpu->arch.exception.has_error_code = has_error;
863 vcpu->arch.exception.vector = nr;
864 vcpu->arch.exception.error_code = error_code;
865 vcpu->arch.exception.has_payload = has_payload;
866 vcpu->arch.exception.payload = payload;
867 if (!is_guest_mode(vcpu))
868 kvm_deliver_exception_payload(vcpu,
869 &vcpu->arch.exception);
870 return;
871 }
872
873 /* to check exception */
874 prev_nr = vcpu->arch.exception.vector;
875 if (prev_nr == DF_VECTOR) {
876 /* triple fault -> shutdown */
877 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
878 return;
879 }
880 class1 = exception_class(prev_nr);
881 class2 = exception_class(nr);
882 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) ||
883 (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
884 /*
885 * Synthesize #DF. Clear the previously injected or pending
886 * exception so as not to incorrectly trigger shutdown.
887 */
888 vcpu->arch.exception.injected = false;
889 vcpu->arch.exception.pending = false;
890
891 kvm_queue_exception_e(vcpu, DF_VECTOR, 0);
892 } else {
893 /* replace previous exception with a new one in a hope
894 that instruction re-execution will regenerate lost
895 exception */
896 goto queue;
897 }
898 }
899
kvm_queue_exception(struct kvm_vcpu * vcpu,unsigned nr)900 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
901 {
902 kvm_multiple_exception(vcpu, nr, false, 0, false, 0);
903 }
904 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception);
905
906
kvm_queue_exception_p(struct kvm_vcpu * vcpu,unsigned nr,unsigned long payload)907 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
908 unsigned long payload)
909 {
910 kvm_multiple_exception(vcpu, nr, false, 0, true, payload);
911 }
912 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception_p);
913
kvm_queue_exception_e_p(struct kvm_vcpu * vcpu,unsigned nr,u32 error_code,unsigned long payload)914 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
915 u32 error_code, unsigned long payload)
916 {
917 kvm_multiple_exception(vcpu, nr, true, error_code, true, payload);
918 }
919
kvm_requeue_exception(struct kvm_vcpu * vcpu,unsigned int nr,bool has_error_code,u32 error_code)920 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr,
921 bool has_error_code, u32 error_code)
922 {
923
924 /*
925 * On VM-Entry, an exception can be pending if and only if event
926 * injection was blocked by nested_run_pending. In that case, however,
927 * vcpu_enter_guest() requests an immediate exit, and the guest
928 * shouldn't proceed far enough to need reinjection.
929 */
930 WARN_ON_ONCE(kvm_is_exception_pending(vcpu));
931
932 /*
933 * Do not check for interception when injecting an event for L2, as the
934 * exception was checked for intercept when it was original queued, and
935 * re-checking is incorrect if _L1_ injected the exception, in which
936 * case it's exempt from interception.
937 */
938 kvm_make_request(KVM_REQ_EVENT, vcpu);
939
940 vcpu->arch.exception.injected = true;
941 vcpu->arch.exception.has_error_code = has_error_code;
942 vcpu->arch.exception.vector = nr;
943 vcpu->arch.exception.error_code = error_code;
944 vcpu->arch.exception.has_payload = false;
945 vcpu->arch.exception.payload = 0;
946 }
947 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_requeue_exception);
948
kvm_complete_insn_gp(struct kvm_vcpu * vcpu,int err)949 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
950 {
951 if (err)
952 kvm_inject_gp(vcpu, 0);
953 else
954 return kvm_skip_emulated_instruction(vcpu);
955
956 return 1;
957 }
958 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_complete_insn_gp);
959
complete_emulated_insn_gp(struct kvm_vcpu * vcpu,int err)960 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err)
961 {
962 if (err) {
963 kvm_inject_gp(vcpu, 0);
964 return 1;
965 }
966
967 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
968 EMULTYPE_COMPLETE_USER_EXIT);
969 }
970
kvm_inject_page_fault(struct kvm_vcpu * vcpu,struct x86_exception * fault)971 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
972 {
973 ++vcpu->stat.pf_guest;
974
975 /*
976 * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of
977 * whether or not L1 wants to intercept "regular" #PF.
978 */
979 if (is_guest_mode(vcpu) && fault->async_page_fault)
980 kvm_queue_exception_vmexit(vcpu, PF_VECTOR,
981 true, fault->error_code,
982 true, fault->address);
983 else
984 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
985 fault->address);
986 }
987
kvm_inject_emulated_page_fault(struct kvm_vcpu * vcpu,struct x86_exception * fault)988 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
989 struct x86_exception *fault)
990 {
991 struct kvm_mmu *fault_mmu;
992 WARN_ON_ONCE(fault->vector != PF_VECTOR);
993
994 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
995 vcpu->arch.walk_mmu;
996
997 /*
998 * Invalidate the TLB entry for the faulting address, if it exists,
999 * else the access will fault indefinitely (and to emulate hardware).
1000 */
1001 if ((fault->error_code & PFERR_PRESENT_MASK) &&
1002 !(fault->error_code & PFERR_RSVD_MASK))
1003 kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address,
1004 KVM_MMU_ROOT_CURRENT);
1005
1006 fault_mmu->inject_page_fault(vcpu, fault);
1007 }
1008 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_inject_emulated_page_fault);
1009
kvm_inject_nmi(struct kvm_vcpu * vcpu)1010 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
1011 {
1012 atomic_inc(&vcpu->arch.nmi_queued);
1013 kvm_make_request(KVM_REQ_NMI, vcpu);
1014 }
1015
kvm_queue_exception_e(struct kvm_vcpu * vcpu,unsigned nr,u32 error_code)1016 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
1017 {
1018 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0);
1019 }
1020 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception_e);
1021
1022 /*
1023 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
1024 * a #GP and return false.
1025 */
kvm_require_cpl(struct kvm_vcpu * vcpu,int required_cpl)1026 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
1027 {
1028 if (kvm_x86_call(get_cpl)(vcpu) <= required_cpl)
1029 return true;
1030 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
1031 return false;
1032 }
1033
kvm_require_dr(struct kvm_vcpu * vcpu,int dr)1034 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
1035 {
1036 if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE))
1037 return true;
1038
1039 kvm_queue_exception(vcpu, UD_VECTOR);
1040 return false;
1041 }
1042 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_require_dr);
1043
kvm_pv_async_pf_enabled(struct kvm_vcpu * vcpu)1044 static bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
1045 {
1046 u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
1047
1048 return (vcpu->arch.apf.msr_en_val & mask) == mask;
1049 }
1050
pdptr_rsvd_bits(struct kvm_vcpu * vcpu)1051 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
1052 {
1053 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
1054 }
1055
1056 /*
1057 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
1058 */
load_pdptrs(struct kvm_vcpu * vcpu,unsigned long cr3)1059 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
1060 {
1061 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
1062 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
1063 gpa_t real_gpa;
1064 int i;
1065 int ret;
1066 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
1067
1068 /*
1069 * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated
1070 * to an L1 GPA.
1071 */
1072 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn),
1073 PFERR_USER_MASK | PFERR_WRITE_MASK, NULL);
1074 if (real_gpa == INVALID_GPA)
1075 return 0;
1076
1077 /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */
1078 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte,
1079 cr3 & GENMASK(11, 5), sizeof(pdpte));
1080 if (ret < 0)
1081 return 0;
1082
1083 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
1084 if ((pdpte[i] & PT_PRESENT_MASK) &&
1085 (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
1086 return 0;
1087 }
1088 }
1089
1090 /*
1091 * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled.
1092 * Shadow page roots need to be reconstructed instead.
1093 */
1094 if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)))
1095 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT);
1096
1097 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
1098 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
1099 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
1100 vcpu->arch.pdptrs_from_userspace = false;
1101
1102 return 1;
1103 }
1104 EXPORT_SYMBOL_FOR_KVM_INTERNAL(load_pdptrs);
1105
kvm_is_valid_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)1106 static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1107 {
1108 #ifdef CONFIG_X86_64
1109 if (cr0 & 0xffffffff00000000UL)
1110 return false;
1111 #endif
1112
1113 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
1114 return false;
1115
1116 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
1117 return false;
1118
1119 return kvm_x86_call(is_valid_cr0)(vcpu, cr0);
1120 }
1121
kvm_post_set_cr0(struct kvm_vcpu * vcpu,unsigned long old_cr0,unsigned long cr0)1122 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
1123 {
1124 /*
1125 * CR0.WP is incorporated into the MMU role, but only for non-nested,
1126 * indirect shadow MMUs. If paging is disabled, no updates are needed
1127 * as there are no permission bits to emulate. If TDP is enabled, the
1128 * MMU's metadata needs to be updated, e.g. so that emulating guest
1129 * translations does the right thing, but there's no need to unload the
1130 * root as CR0.WP doesn't affect SPTEs.
1131 */
1132 if ((cr0 ^ old_cr0) == X86_CR0_WP) {
1133 if (!(cr0 & X86_CR0_PG))
1134 return;
1135
1136 if (tdp_enabled) {
1137 kvm_init_mmu(vcpu);
1138 return;
1139 }
1140 }
1141
1142 if ((cr0 ^ old_cr0) & X86_CR0_PG) {
1143 /*
1144 * Clearing CR0.PG is defined to flush the TLB from the guest's
1145 * perspective.
1146 */
1147 if (!(cr0 & X86_CR0_PG))
1148 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1149 /*
1150 * Check for async #PF completion events when enabling paging,
1151 * as the vCPU may have previously encountered async #PFs (it's
1152 * entirely legal for the guest to toggle paging on/off without
1153 * waiting for the async #PF queue to drain).
1154 */
1155 else if (kvm_pv_async_pf_enabled(vcpu))
1156 kvm_make_request(KVM_REQ_APF_READY, vcpu);
1157 }
1158
1159 if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
1160 kvm_mmu_reset_context(vcpu);
1161 }
1162 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_post_set_cr0);
1163
kvm_set_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)1164 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1165 {
1166 unsigned long old_cr0 = kvm_read_cr0(vcpu);
1167
1168 if (!kvm_is_valid_cr0(vcpu, cr0))
1169 return 1;
1170
1171 cr0 |= X86_CR0_ET;
1172
1173 /* Write to CR0 reserved bits are ignored, even on Intel. */
1174 cr0 &= ~CR0_RESERVED_BITS;
1175
1176 #ifdef CONFIG_X86_64
1177 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
1178 (cr0 & X86_CR0_PG)) {
1179 int cs_db, cs_l;
1180
1181 if (!is_pae(vcpu))
1182 return 1;
1183 kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
1184 if (cs_l)
1185 return 1;
1186 }
1187 #endif
1188 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
1189 is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) &&
1190 !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
1191 return 1;
1192
1193 if (!(cr0 & X86_CR0_PG) &&
1194 (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
1195 return 1;
1196
1197 if (!(cr0 & X86_CR0_WP) && kvm_is_cr4_bit_set(vcpu, X86_CR4_CET))
1198 return 1;
1199
1200 kvm_x86_call(set_cr0)(vcpu, cr0);
1201
1202 kvm_post_set_cr0(vcpu, old_cr0, cr0);
1203
1204 return 0;
1205 }
1206 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr0);
1207
kvm_lmsw(struct kvm_vcpu * vcpu,unsigned long msw)1208 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
1209 {
1210 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
1211 }
1212 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lmsw);
1213
kvm_load_xfeatures(struct kvm_vcpu * vcpu,bool load_guest)1214 static void kvm_load_xfeatures(struct kvm_vcpu *vcpu, bool load_guest)
1215 {
1216 if (vcpu->arch.guest_state_protected)
1217 return;
1218
1219 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE))
1220 return;
1221
1222 if (vcpu->arch.xcr0 != kvm_host.xcr0)
1223 xsetbv(XCR_XFEATURE_ENABLED_MASK,
1224 load_guest ? vcpu->arch.xcr0 : kvm_host.xcr0);
1225
1226 if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
1227 vcpu->arch.ia32_xss != kvm_host.xss)
1228 wrmsrq(MSR_IA32_XSS, load_guest ? vcpu->arch.ia32_xss : kvm_host.xss);
1229 }
1230
kvm_load_guest_pkru(struct kvm_vcpu * vcpu)1231 static void kvm_load_guest_pkru(struct kvm_vcpu *vcpu)
1232 {
1233 if (vcpu->arch.guest_state_protected)
1234 return;
1235
1236 if (cpu_feature_enabled(X86_FEATURE_PKU) &&
1237 vcpu->arch.pkru != vcpu->arch.host_pkru &&
1238 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1239 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
1240 wrpkru(vcpu->arch.pkru);
1241 }
1242
kvm_load_host_pkru(struct kvm_vcpu * vcpu)1243 static void kvm_load_host_pkru(struct kvm_vcpu *vcpu)
1244 {
1245 if (vcpu->arch.guest_state_protected)
1246 return;
1247
1248 if (cpu_feature_enabled(X86_FEATURE_PKU) &&
1249 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1250 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
1251 vcpu->arch.pkru = rdpkru();
1252 if (vcpu->arch.pkru != vcpu->arch.host_pkru)
1253 wrpkru(vcpu->arch.host_pkru);
1254 }
1255 }
1256
1257 #ifdef CONFIG_X86_64
kvm_guest_supported_xfd(struct kvm_vcpu * vcpu)1258 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
1259 {
1260 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
1261 }
1262 #endif
1263
__kvm_set_xcr(struct kvm_vcpu * vcpu,u32 index,u64 xcr)1264 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
1265 {
1266 u64 xcr0 = xcr;
1267 u64 old_xcr0 = vcpu->arch.xcr0;
1268 u64 valid_bits;
1269
1270 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
1271 if (index != XCR_XFEATURE_ENABLED_MASK)
1272 return 1;
1273 if (!(xcr0 & XFEATURE_MASK_FP))
1274 return 1;
1275 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
1276 return 1;
1277
1278 /*
1279 * Do not allow the guest to set bits that we do not support
1280 * saving. However, xcr0 bit 0 is always set, even if the
1281 * emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
1282 */
1283 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
1284 if (xcr0 & ~valid_bits)
1285 return 1;
1286
1287 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
1288 (!(xcr0 & XFEATURE_MASK_BNDCSR)))
1289 return 1;
1290
1291 if (xcr0 & XFEATURE_MASK_AVX512) {
1292 if (!(xcr0 & XFEATURE_MASK_YMM))
1293 return 1;
1294 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
1295 return 1;
1296 }
1297
1298 if ((xcr0 & XFEATURE_MASK_XTILE) &&
1299 ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE))
1300 return 1;
1301
1302 vcpu->arch.xcr0 = xcr0;
1303
1304 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
1305 vcpu->arch.cpuid_dynamic_bits_dirty = true;
1306 return 0;
1307 }
1308 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_set_xcr);
1309
kvm_emulate_xsetbv(struct kvm_vcpu * vcpu)1310 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
1311 {
1312 /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
1313 if (kvm_x86_call(get_cpl)(vcpu) != 0 ||
1314 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
1315 kvm_inject_gp(vcpu, 0);
1316 return 1;
1317 }
1318
1319 return kvm_skip_emulated_instruction(vcpu);
1320 }
1321 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_xsetbv);
1322
kvm_is_valid_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)1323 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1324 {
1325 return __kvm_is_valid_cr4(vcpu, cr4) &&
1326 kvm_x86_call(is_valid_cr4)(vcpu, cr4);
1327 }
1328
kvm_post_set_cr4(struct kvm_vcpu * vcpu,unsigned long old_cr4,unsigned long cr4)1329 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
1330 {
1331 if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS)
1332 kvm_mmu_reset_context(vcpu);
1333
1334 /*
1335 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB
1336 * according to the SDM; however, stale prev_roots could be reused
1337 * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we
1338 * free them all. This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST
1339 * or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed,
1340 * so fall through.
1341 */
1342 if (!tdp_enabled &&
1343 (cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE))
1344 kvm_mmu_unload(vcpu);
1345
1346 /*
1347 * The TLB has to be flushed for all PCIDs if any of the following
1348 * (architecturally required) changes happen:
1349 * - CR4.PCIDE is changed from 1 to 0
1350 * - CR4.PGE is toggled
1351 *
1352 * This is a superset of KVM_REQ_TLB_FLUSH_CURRENT.
1353 */
1354 if (((cr4 ^ old_cr4) & X86_CR4_PGE) ||
1355 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
1356 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1357
1358 /*
1359 * The TLB has to be flushed for the current PCID if any of the
1360 * following (architecturally required) changes happen:
1361 * - CR4.SMEP is changed from 0 to 1
1362 * - CR4.PAE is toggled
1363 */
1364 else if (((cr4 ^ old_cr4) & X86_CR4_PAE) ||
1365 ((cr4 & X86_CR4_SMEP) && !(old_cr4 & X86_CR4_SMEP)))
1366 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1367
1368 }
1369 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_post_set_cr4);
1370
kvm_set_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)1371 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1372 {
1373 unsigned long old_cr4 = kvm_read_cr4(vcpu);
1374
1375 if (!kvm_is_valid_cr4(vcpu, cr4))
1376 return 1;
1377
1378 if (is_long_mode(vcpu)) {
1379 if (!(cr4 & X86_CR4_PAE))
1380 return 1;
1381 if ((cr4 ^ old_cr4) & X86_CR4_LA57)
1382 return 1;
1383 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
1384 && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS)
1385 && !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
1386 return 1;
1387
1388 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
1389 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
1390 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
1391 return 1;
1392 }
1393
1394 if ((cr4 & X86_CR4_CET) && !kvm_is_cr0_bit_set(vcpu, X86_CR0_WP))
1395 return 1;
1396
1397 kvm_x86_call(set_cr4)(vcpu, cr4);
1398
1399 kvm_post_set_cr4(vcpu, old_cr4, cr4);
1400
1401 return 0;
1402 }
1403 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr4);
1404
kvm_invalidate_pcid(struct kvm_vcpu * vcpu,unsigned long pcid)1405 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
1406 {
1407 struct kvm_mmu *mmu = vcpu->arch.mmu;
1408 unsigned long roots_to_free = 0;
1409 int i;
1410
1411 /*
1412 * MOV CR3 and INVPCID are usually not intercepted when using TDP, but
1413 * this is reachable when running EPT=1 and unrestricted_guest=0, and
1414 * also via the emulator. KVM's TDP page tables are not in the scope of
1415 * the invalidation, but the guest's TLB entries need to be flushed as
1416 * the CPU may have cached entries in its TLB for the target PCID.
1417 */
1418 if (unlikely(tdp_enabled)) {
1419 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1420 return;
1421 }
1422
1423 /*
1424 * If neither the current CR3 nor any of the prev_roots use the given
1425 * PCID, then nothing needs to be done here because a resync will
1426 * happen anyway before switching to any other CR3.
1427 */
1428 if (kvm_get_active_pcid(vcpu) == pcid) {
1429 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1430 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1431 }
1432
1433 /*
1434 * If PCID is disabled, there is no need to free prev_roots even if the
1435 * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
1436 * with PCIDE=0.
1437 */
1438 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))
1439 return;
1440
1441 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
1442 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid)
1443 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
1444
1445 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free);
1446 }
1447
kvm_set_cr3(struct kvm_vcpu * vcpu,unsigned long cr3)1448 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1449 {
1450 bool skip_tlb_flush = false;
1451 unsigned long pcid = 0;
1452 #ifdef CONFIG_X86_64
1453 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) {
1454 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
1455 cr3 &= ~X86_CR3_PCID_NOFLUSH;
1456 pcid = cr3 & X86_CR3_PCID_MASK;
1457 }
1458 #endif
1459
1460 /* PDPTRs are always reloaded for PAE paging. */
1461 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu))
1462 goto handle_tlb_flush;
1463
1464 /*
1465 * Do not condition the GPA check on long mode, this helper is used to
1466 * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
1467 * the current vCPU mode is accurate.
1468 */
1469 if (!kvm_vcpu_is_legal_cr3(vcpu, cr3))
1470 return 1;
1471
1472 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3))
1473 return 1;
1474
1475 if (cr3 != kvm_read_cr3(vcpu))
1476 kvm_mmu_new_pgd(vcpu, cr3);
1477
1478 vcpu->arch.cr3 = cr3;
1479 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
1480 /* Do not call post_set_cr3, we do not get here for confidential guests. */
1481
1482 handle_tlb_flush:
1483 /*
1484 * A load of CR3 that flushes the TLB flushes only the current PCID,
1485 * even if PCID is disabled, in which case PCID=0 is flushed. It's a
1486 * moot point in the end because _disabling_ PCID will flush all PCIDs,
1487 * and it's impossible to use a non-zero PCID when PCID is disabled,
1488 * i.e. only PCID=0 can be relevant.
1489 */
1490 if (!skip_tlb_flush)
1491 kvm_invalidate_pcid(vcpu, pcid);
1492
1493 return 0;
1494 }
1495 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr3);
1496
kvm_set_cr8(struct kvm_vcpu * vcpu,unsigned long cr8)1497 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
1498 {
1499 if (cr8 & CR8_RESERVED_BITS)
1500 return 1;
1501 if (lapic_in_kernel(vcpu))
1502 kvm_lapic_set_tpr(vcpu, cr8);
1503 else
1504 vcpu->arch.cr8 = cr8;
1505 return 0;
1506 }
1507 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr8);
1508
kvm_get_cr8(struct kvm_vcpu * vcpu)1509 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
1510 {
1511 if (lapic_in_kernel(vcpu))
1512 return kvm_lapic_get_cr8(vcpu);
1513 else
1514 return vcpu->arch.cr8;
1515 }
1516 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_cr8);
1517
kvm_update_dr0123(struct kvm_vcpu * vcpu)1518 static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
1519 {
1520 int i;
1521
1522 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1523 for (i = 0; i < KVM_NR_DB_REGS; i++)
1524 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
1525 }
1526 }
1527
kvm_update_dr7(struct kvm_vcpu * vcpu)1528 void kvm_update_dr7(struct kvm_vcpu *vcpu)
1529 {
1530 unsigned long dr7;
1531
1532 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1533 dr7 = vcpu->arch.guest_debug_dr7;
1534 else
1535 dr7 = vcpu->arch.dr7;
1536 kvm_x86_call(set_dr7)(vcpu, dr7);
1537 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
1538 if (dr7 & DR7_BP_EN_MASK)
1539 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
1540 }
1541 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_update_dr7);
1542
kvm_dr6_fixed(struct kvm_vcpu * vcpu)1543 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
1544 {
1545 u64 fixed = DR6_FIXED_1;
1546
1547 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_RTM))
1548 fixed |= DR6_RTM;
1549
1550 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
1551 fixed |= DR6_BUS_LOCK;
1552 return fixed;
1553 }
1554
kvm_set_dr(struct kvm_vcpu * vcpu,int dr,unsigned long val)1555 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
1556 {
1557 size_t size = ARRAY_SIZE(vcpu->arch.db);
1558
1559 switch (dr) {
1560 case 0 ... 3:
1561 vcpu->arch.db[array_index_nospec(dr, size)] = val;
1562 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1563 vcpu->arch.eff_db[dr] = val;
1564 break;
1565 case 4:
1566 case 6:
1567 if (!kvm_dr6_valid(val))
1568 return 1; /* #GP */
1569 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
1570 break;
1571 case 5:
1572 default: /* 7 */
1573 if (!kvm_dr7_valid(val))
1574 return 1; /* #GP */
1575 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
1576 kvm_update_dr7(vcpu);
1577 break;
1578 }
1579
1580 return 0;
1581 }
1582 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_dr);
1583
kvm_get_dr(struct kvm_vcpu * vcpu,int dr)1584 unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr)
1585 {
1586 size_t size = ARRAY_SIZE(vcpu->arch.db);
1587
1588 switch (dr) {
1589 case 0 ... 3:
1590 return vcpu->arch.db[array_index_nospec(dr, size)];
1591 case 4:
1592 case 6:
1593 return vcpu->arch.dr6;
1594 case 5:
1595 default: /* 7 */
1596 return vcpu->arch.dr7;
1597 }
1598 }
1599 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_dr);
1600
kvm_emulate_rdpmc(struct kvm_vcpu * vcpu)1601 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
1602 {
1603 u32 pmc = kvm_rcx_read(vcpu);
1604 u64 data;
1605
1606 if (kvm_pmu_rdpmc(vcpu, pmc, &data)) {
1607 kvm_inject_gp(vcpu, 0);
1608 return 1;
1609 }
1610
1611 kvm_rax_write(vcpu, (u32)data);
1612 kvm_rdx_write(vcpu, data >> 32);
1613 return kvm_skip_emulated_instruction(vcpu);
1614 }
1615 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdpmc);
1616
1617 /*
1618 * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
1619 * does not yet virtualize. These include:
1620 * 10 - MISC_PACKAGE_CTRLS
1621 * 11 - ENERGY_FILTERING_CTL
1622 * 12 - DOITM
1623 * 18 - FB_CLEAR_CTRL
1624 * 21 - XAPIC_DISABLE_STATUS
1625 * 23 - OVERCLOCKING_STATUS
1626 */
1627
1628 #define KVM_SUPPORTED_ARCH_CAP \
1629 (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \
1630 ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
1631 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
1632 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
1633 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
1634 ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO)
1635
kvm_get_arch_capabilities(void)1636 static u64 kvm_get_arch_capabilities(void)
1637 {
1638 u64 data = kvm_host.arch_capabilities & KVM_SUPPORTED_ARCH_CAP;
1639
1640 /*
1641 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
1642 * the nested hypervisor runs with NX huge pages. If it is not,
1643 * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other
1644 * L1 guests, so it need not worry about its own (L2) guests.
1645 */
1646 data |= ARCH_CAP_PSCHANGE_MC_NO;
1647
1648 /*
1649 * If we're doing cache flushes (either "always" or "cond")
1650 * we will do one whenever the guest does a vmlaunch/vmresume.
1651 * If an outer hypervisor is doing the cache flush for us
1652 * (ARCH_CAP_SKIP_VMENTRY_L1DFLUSH), we can safely pass that
1653 * capability to the guest too, and if EPT is disabled we're not
1654 * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
1655 * require a nested hypervisor to do a flush of its own.
1656 */
1657 if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
1658 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
1659
1660 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1661 data |= ARCH_CAP_RDCL_NO;
1662 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1663 data |= ARCH_CAP_SSB_NO;
1664 if (!boot_cpu_has_bug(X86_BUG_MDS))
1665 data |= ARCH_CAP_MDS_NO;
1666 if (!boot_cpu_has_bug(X86_BUG_RFDS))
1667 data |= ARCH_CAP_RFDS_NO;
1668 if (!boot_cpu_has_bug(X86_BUG_ITS))
1669 data |= ARCH_CAP_ITS_NO;
1670
1671 if (!boot_cpu_has(X86_FEATURE_RTM)) {
1672 /*
1673 * If RTM=0 because the kernel has disabled TSX, the host might
1674 * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0
1675 * and therefore knows that there cannot be TAA) but keep
1676 * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
1677 * and we want to allow migrating those guests to tsx=off hosts.
1678 */
1679 data &= ~ARCH_CAP_TAA_NO;
1680 } else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
1681 data |= ARCH_CAP_TAA_NO;
1682 } else {
1683 /*
1684 * Nothing to do here; we emulate TSX_CTRL if present on the
1685 * host so the guest can choose between disabling TSX or
1686 * using VERW to clear CPU buffers.
1687 */
1688 }
1689
1690 if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated())
1691 data |= ARCH_CAP_GDS_NO;
1692
1693 return data;
1694 }
1695
kvm_get_feature_msr(struct kvm_vcpu * vcpu,u32 index,u64 * data,bool host_initiated)1696 static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1697 bool host_initiated)
1698 {
1699 WARN_ON_ONCE(!host_initiated);
1700
1701 switch (index) {
1702 case MSR_IA32_ARCH_CAPABILITIES:
1703 *data = kvm_get_arch_capabilities();
1704 break;
1705 case MSR_IA32_PERF_CAPABILITIES:
1706 *data = kvm_caps.supported_perf_cap;
1707 break;
1708 case MSR_PLATFORM_INFO:
1709 *data = MSR_PLATFORM_INFO_CPUID_FAULT;
1710 break;
1711 case MSR_IA32_UCODE_REV:
1712 rdmsrq_safe(index, data);
1713 break;
1714 default:
1715 return kvm_x86_call(get_feature_msr)(index, data);
1716 }
1717 return 0;
1718 }
1719
do_get_feature_msr(struct kvm_vcpu * vcpu,unsigned index,u64 * data)1720 static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1721 {
1722 return kvm_do_msr_access(vcpu, index, data, true, MSR_TYPE_R,
1723 kvm_get_feature_msr);
1724 }
1725
__kvm_valid_efer(struct kvm_vcpu * vcpu,u64 efer)1726 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1727 {
1728 if (efer & EFER_AUTOIBRS && !guest_cpu_cap_has(vcpu, X86_FEATURE_AUTOIBRS))
1729 return false;
1730
1731 if (efer & EFER_FFXSR && !guest_cpu_cap_has(vcpu, X86_FEATURE_FXSR_OPT))
1732 return false;
1733
1734 if (efer & EFER_SVME && !guest_cpu_cap_has(vcpu, X86_FEATURE_SVM))
1735 return false;
1736
1737 if (efer & (EFER_LME | EFER_LMA) &&
1738 !guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
1739 return false;
1740
1741 if (efer & EFER_NX && !guest_cpu_cap_has(vcpu, X86_FEATURE_NX))
1742 return false;
1743
1744 return true;
1745
1746 }
kvm_valid_efer(struct kvm_vcpu * vcpu,u64 efer)1747 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1748 {
1749 if (efer & efer_reserved_bits)
1750 return false;
1751
1752 return __kvm_valid_efer(vcpu, efer);
1753 }
1754 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_valid_efer);
1755
set_efer(struct kvm_vcpu * vcpu,struct msr_data * msr_info)1756 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1757 {
1758 u64 old_efer = vcpu->arch.efer;
1759 u64 efer = msr_info->data;
1760 int r;
1761
1762 if (efer & efer_reserved_bits)
1763 return 1;
1764
1765 if (!msr_info->host_initiated) {
1766 if (!__kvm_valid_efer(vcpu, efer))
1767 return 1;
1768
1769 if (is_paging(vcpu) &&
1770 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1771 return 1;
1772 }
1773
1774 efer &= ~EFER_LMA;
1775 efer |= vcpu->arch.efer & EFER_LMA;
1776
1777 r = kvm_x86_call(set_efer)(vcpu, efer);
1778 if (r) {
1779 WARN_ON(r > 0);
1780 return r;
1781 }
1782
1783 if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS)
1784 kvm_mmu_reset_context(vcpu);
1785
1786 if (!static_cpu_has(X86_FEATURE_XSAVES) &&
1787 (efer & EFER_SVME))
1788 kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
1789
1790 return 0;
1791 }
1792
kvm_enable_efer_bits(u64 mask)1793 void kvm_enable_efer_bits(u64 mask)
1794 {
1795 efer_reserved_bits &= ~mask;
1796 }
1797 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_enable_efer_bits);
1798
kvm_msr_allowed(struct kvm_vcpu * vcpu,u32 index,u32 type)1799 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
1800 {
1801 struct kvm_x86_msr_filter *msr_filter;
1802 struct msr_bitmap_range *ranges;
1803 struct kvm *kvm = vcpu->kvm;
1804 bool allowed;
1805 int idx;
1806 u32 i;
1807
1808 /* x2APIC MSRs do not support filtering. */
1809 if (index >= 0x800 && index <= 0x8ff)
1810 return true;
1811
1812 idx = srcu_read_lock(&kvm->srcu);
1813
1814 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
1815 if (!msr_filter) {
1816 allowed = true;
1817 goto out;
1818 }
1819
1820 allowed = msr_filter->default_allow;
1821 ranges = msr_filter->ranges;
1822
1823 for (i = 0; i < msr_filter->count; i++) {
1824 u32 start = ranges[i].base;
1825 u32 end = start + ranges[i].nmsrs;
1826 u32 flags = ranges[i].flags;
1827 unsigned long *bitmap = ranges[i].bitmap;
1828
1829 if ((index >= start) && (index < end) && (flags & type)) {
1830 allowed = test_bit(index - start, bitmap);
1831 break;
1832 }
1833 }
1834
1835 out:
1836 srcu_read_unlock(&kvm->srcu, idx);
1837
1838 return allowed;
1839 }
1840 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_msr_allowed);
1841
1842 /*
1843 * Write @data into the MSR specified by @index. Select MSR specific fault
1844 * checks are bypassed if @host_initiated is %true.
1845 * Returns 0 on success, non-0 otherwise.
1846 * Assumes vcpu_load() was already called.
1847 */
__kvm_set_msr(struct kvm_vcpu * vcpu,u32 index,u64 data,bool host_initiated)1848 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
1849 bool host_initiated)
1850 {
1851 struct msr_data msr;
1852
1853 switch (index) {
1854 case MSR_FS_BASE:
1855 case MSR_GS_BASE:
1856 case MSR_KERNEL_GS_BASE:
1857 case MSR_CSTAR:
1858 case MSR_LSTAR:
1859 if (is_noncanonical_msr_address(data, vcpu))
1860 return 1;
1861 break;
1862 case MSR_IA32_SYSENTER_EIP:
1863 case MSR_IA32_SYSENTER_ESP:
1864 /*
1865 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1866 * non-canonical address is written on Intel but not on
1867 * AMD (which ignores the top 32-bits, because it does
1868 * not implement 64-bit SYSENTER).
1869 *
1870 * 64-bit code should hence be able to write a non-canonical
1871 * value on AMD. Making the address canonical ensures that
1872 * vmentry does not fail on Intel after writing a non-canonical
1873 * value, and that something deterministic happens if the guest
1874 * invokes 64-bit SYSENTER.
1875 */
1876 data = __canonical_address(data, max_host_virt_addr_bits());
1877 break;
1878 case MSR_TSC_AUX:
1879 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1880 return 1;
1881
1882 if (!host_initiated &&
1883 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
1884 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID))
1885 return 1;
1886
1887 /*
1888 * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
1889 * incomplete and conflicting architectural behavior. Current
1890 * AMD CPUs completely ignore bits 63:32, i.e. they aren't
1891 * reserved and always read as zeros. Enforce Intel's reserved
1892 * bits check if the guest CPU is Intel compatible, otherwise
1893 * clear the bits. This ensures cross-vendor migration will
1894 * provide consistent behavior for the guest.
1895 */
1896 if (guest_cpuid_is_intel_compatible(vcpu) && (data >> 32) != 0)
1897 return 1;
1898
1899 data = (u32)data;
1900 break;
1901 case MSR_IA32_U_CET:
1902 case MSR_IA32_S_CET:
1903 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
1904 !guest_cpu_cap_has(vcpu, X86_FEATURE_IBT))
1905 return KVM_MSR_RET_UNSUPPORTED;
1906 if (!kvm_is_valid_u_s_cet(vcpu, data))
1907 return 1;
1908 break;
1909 case MSR_KVM_INTERNAL_GUEST_SSP:
1910 if (!host_initiated)
1911 return 1;
1912 fallthrough;
1913 /*
1914 * Note that the MSR emulation here is flawed when a vCPU
1915 * doesn't support the Intel 64 architecture. The expected
1916 * architectural behavior in this case is that the upper 32
1917 * bits do not exist and should always read '0'. However,
1918 * because the actual hardware on which the virtual CPU is
1919 * running does support Intel 64, XRSTORS/XSAVES in the
1920 * guest could observe behavior that violates the
1921 * architecture. Intercepting XRSTORS/XSAVES for this
1922 * special case isn't deemed worthwhile.
1923 */
1924 case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
1925 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
1926 return KVM_MSR_RET_UNSUPPORTED;
1927 /*
1928 * MSR_IA32_INT_SSP_TAB is not present on processors that do
1929 * not support Intel 64 architecture.
1930 */
1931 if (index == MSR_IA32_INT_SSP_TAB && !guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
1932 return KVM_MSR_RET_UNSUPPORTED;
1933 if (is_noncanonical_msr_address(data, vcpu))
1934 return 1;
1935 /* All SSP MSRs except MSR_IA32_INT_SSP_TAB must be 4-byte aligned */
1936 if (index != MSR_IA32_INT_SSP_TAB && !IS_ALIGNED(data, 4))
1937 return 1;
1938 break;
1939 }
1940
1941 msr.data = data;
1942 msr.index = index;
1943 msr.host_initiated = host_initiated;
1944
1945 return kvm_x86_call(set_msr)(vcpu, &msr);
1946 }
1947
_kvm_set_msr(struct kvm_vcpu * vcpu,u32 index,u64 * data,bool host_initiated)1948 static int _kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1949 bool host_initiated)
1950 {
1951 return __kvm_set_msr(vcpu, index, *data, host_initiated);
1952 }
1953
kvm_set_msr_ignored_check(struct kvm_vcpu * vcpu,u32 index,u64 data,bool host_initiated)1954 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
1955 u32 index, u64 data, bool host_initiated)
1956 {
1957 return kvm_do_msr_access(vcpu, index, &data, host_initiated, MSR_TYPE_W,
1958 _kvm_set_msr);
1959 }
1960
1961 /*
1962 * Read the MSR specified by @index into @data. Select MSR specific fault
1963 * checks are bypassed if @host_initiated is %true.
1964 * Returns 0 on success, non-0 otherwise.
1965 * Assumes vcpu_load() was already called.
1966 */
__kvm_get_msr(struct kvm_vcpu * vcpu,u32 index,u64 * data,bool host_initiated)1967 static int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1968 bool host_initiated)
1969 {
1970 struct msr_data msr;
1971 int ret;
1972
1973 switch (index) {
1974 case MSR_TSC_AUX:
1975 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1976 return 1;
1977
1978 if (!host_initiated &&
1979 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
1980 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID))
1981 return 1;
1982 break;
1983 case MSR_IA32_U_CET:
1984 case MSR_IA32_S_CET:
1985 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
1986 !guest_cpu_cap_has(vcpu, X86_FEATURE_IBT))
1987 return KVM_MSR_RET_UNSUPPORTED;
1988 break;
1989 case MSR_KVM_INTERNAL_GUEST_SSP:
1990 if (!host_initiated)
1991 return 1;
1992 fallthrough;
1993 case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
1994 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
1995 return KVM_MSR_RET_UNSUPPORTED;
1996 break;
1997 }
1998
1999 msr.index = index;
2000 msr.host_initiated = host_initiated;
2001
2002 ret = kvm_x86_call(get_msr)(vcpu, &msr);
2003 if (!ret)
2004 *data = msr.data;
2005 return ret;
2006 }
2007
kvm_msr_write(struct kvm_vcpu * vcpu,u32 index,u64 data)2008 int kvm_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
2009 {
2010 return __kvm_set_msr(vcpu, index, data, true);
2011 }
2012
kvm_msr_read(struct kvm_vcpu * vcpu,u32 index,u64 * data)2013 int kvm_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
2014 {
2015 return __kvm_get_msr(vcpu, index, data, true);
2016 }
2017
kvm_get_msr_ignored_check(struct kvm_vcpu * vcpu,u32 index,u64 * data,bool host_initiated)2018 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
2019 u32 index, u64 *data, bool host_initiated)
2020 {
2021 return kvm_do_msr_access(vcpu, index, data, host_initiated, MSR_TYPE_R,
2022 __kvm_get_msr);
2023 }
2024
__kvm_emulate_msr_read(struct kvm_vcpu * vcpu,u32 index,u64 * data)2025 int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
2026 {
2027 return kvm_get_msr_ignored_check(vcpu, index, data, false);
2028 }
2029 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_emulate_msr_read);
2030
__kvm_emulate_msr_write(struct kvm_vcpu * vcpu,u32 index,u64 data)2031 int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
2032 {
2033 return kvm_set_msr_ignored_check(vcpu, index, data, false);
2034 }
2035 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_emulate_msr_write);
2036
kvm_emulate_msr_read(struct kvm_vcpu * vcpu,u32 index,u64 * data)2037 int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
2038 {
2039 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
2040 return KVM_MSR_RET_FILTERED;
2041
2042 return __kvm_emulate_msr_read(vcpu, index, data);
2043 }
2044 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_msr_read);
2045
kvm_emulate_msr_write(struct kvm_vcpu * vcpu,u32 index,u64 data)2046 int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
2047 {
2048 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
2049 return KVM_MSR_RET_FILTERED;
2050
2051 return __kvm_emulate_msr_write(vcpu, index, data);
2052 }
2053 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_msr_write);
2054
2055
complete_userspace_rdmsr(struct kvm_vcpu * vcpu)2056 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
2057 {
2058 if (!vcpu->run->msr.error) {
2059 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
2060 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
2061 }
2062 }
2063
complete_emulated_msr_access(struct kvm_vcpu * vcpu)2064 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu)
2065 {
2066 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error);
2067 }
2068
complete_emulated_rdmsr(struct kvm_vcpu * vcpu)2069 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
2070 {
2071 complete_userspace_rdmsr(vcpu);
2072 return complete_emulated_msr_access(vcpu);
2073 }
2074
complete_fast_msr_access(struct kvm_vcpu * vcpu)2075 static int complete_fast_msr_access(struct kvm_vcpu *vcpu)
2076 {
2077 return kvm_x86_call(complete_emulated_msr)(vcpu, vcpu->run->msr.error);
2078 }
2079
complete_fast_rdmsr(struct kvm_vcpu * vcpu)2080 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
2081 {
2082 complete_userspace_rdmsr(vcpu);
2083 return complete_fast_msr_access(vcpu);
2084 }
2085
complete_fast_rdmsr_imm(struct kvm_vcpu * vcpu)2086 static int complete_fast_rdmsr_imm(struct kvm_vcpu *vcpu)
2087 {
2088 if (!vcpu->run->msr.error)
2089 kvm_register_write(vcpu, vcpu->arch.cui_rdmsr_imm_reg,
2090 vcpu->run->msr.data);
2091
2092 return complete_fast_msr_access(vcpu);
2093 }
2094
kvm_msr_reason(int r)2095 static u64 kvm_msr_reason(int r)
2096 {
2097 switch (r) {
2098 case KVM_MSR_RET_UNSUPPORTED:
2099 return KVM_MSR_EXIT_REASON_UNKNOWN;
2100 case KVM_MSR_RET_FILTERED:
2101 return KVM_MSR_EXIT_REASON_FILTER;
2102 default:
2103 return KVM_MSR_EXIT_REASON_INVAL;
2104 }
2105 }
2106
kvm_msr_user_space(struct kvm_vcpu * vcpu,u32 index,u32 exit_reason,u64 data,int (* completion)(struct kvm_vcpu * vcpu),int r)2107 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
2108 u32 exit_reason, u64 data,
2109 int (*completion)(struct kvm_vcpu *vcpu),
2110 int r)
2111 {
2112 u64 msr_reason = kvm_msr_reason(r);
2113
2114 /* Check if the user wanted to know about this MSR fault */
2115 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason))
2116 return 0;
2117
2118 vcpu->run->exit_reason = exit_reason;
2119 vcpu->run->msr.error = 0;
2120 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad));
2121 vcpu->run->msr.reason = msr_reason;
2122 vcpu->run->msr.index = index;
2123 vcpu->run->msr.data = data;
2124 vcpu->arch.complete_userspace_io = completion;
2125
2126 return 1;
2127 }
2128
__kvm_emulate_rdmsr(struct kvm_vcpu * vcpu,u32 msr,int reg,int (* complete_rdmsr)(struct kvm_vcpu *))2129 static int __kvm_emulate_rdmsr(struct kvm_vcpu *vcpu, u32 msr, int reg,
2130 int (*complete_rdmsr)(struct kvm_vcpu *))
2131 {
2132 u64 data;
2133 int r;
2134
2135 r = kvm_emulate_msr_read(vcpu, msr, &data);
2136
2137 if (!r) {
2138 trace_kvm_msr_read(msr, data);
2139
2140 if (reg < 0) {
2141 kvm_rax_write(vcpu, data & -1u);
2142 kvm_rdx_write(vcpu, (data >> 32) & -1u);
2143 } else {
2144 kvm_register_write(vcpu, reg, data);
2145 }
2146 } else {
2147 /* MSR read failed? See if we should ask user space */
2148 if (kvm_msr_user_space(vcpu, msr, KVM_EXIT_X86_RDMSR, 0,
2149 complete_rdmsr, r))
2150 return 0;
2151 trace_kvm_msr_read_ex(msr);
2152 }
2153
2154 return kvm_x86_call(complete_emulated_msr)(vcpu, r);
2155 }
2156
kvm_emulate_rdmsr(struct kvm_vcpu * vcpu)2157 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
2158 {
2159 return __kvm_emulate_rdmsr(vcpu, kvm_rcx_read(vcpu), -1,
2160 complete_fast_rdmsr);
2161 }
2162 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdmsr);
2163
kvm_emulate_rdmsr_imm(struct kvm_vcpu * vcpu,u32 msr,int reg)2164 int kvm_emulate_rdmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
2165 {
2166 vcpu->arch.cui_rdmsr_imm_reg = reg;
2167
2168 return __kvm_emulate_rdmsr(vcpu, msr, reg, complete_fast_rdmsr_imm);
2169 }
2170 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdmsr_imm);
2171
__kvm_emulate_wrmsr(struct kvm_vcpu * vcpu,u32 msr,u64 data)2172 static int __kvm_emulate_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2173 {
2174 int r;
2175
2176 r = kvm_emulate_msr_write(vcpu, msr, data);
2177 if (!r) {
2178 trace_kvm_msr_write(msr, data);
2179 } else {
2180 /* MSR write failed? See if we should ask user space */
2181 if (kvm_msr_user_space(vcpu, msr, KVM_EXIT_X86_WRMSR, data,
2182 complete_fast_msr_access, r))
2183 return 0;
2184 /* Signal all other negative errors to userspace */
2185 if (r < 0)
2186 return r;
2187 trace_kvm_msr_write_ex(msr, data);
2188 }
2189
2190 return kvm_x86_call(complete_emulated_msr)(vcpu, r);
2191 }
2192
kvm_emulate_wrmsr(struct kvm_vcpu * vcpu)2193 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
2194 {
2195 return __kvm_emulate_wrmsr(vcpu, kvm_rcx_read(vcpu),
2196 kvm_read_edx_eax(vcpu));
2197 }
2198 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wrmsr);
2199
kvm_emulate_wrmsr_imm(struct kvm_vcpu * vcpu,u32 msr,int reg)2200 int kvm_emulate_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
2201 {
2202 return __kvm_emulate_wrmsr(vcpu, msr, kvm_register_read(vcpu, reg));
2203 }
2204 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wrmsr_imm);
2205
kvm_emulate_as_nop(struct kvm_vcpu * vcpu)2206 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu)
2207 {
2208 return kvm_skip_emulated_instruction(vcpu);
2209 }
2210
kvm_emulate_invd(struct kvm_vcpu * vcpu)2211 int kvm_emulate_invd(struct kvm_vcpu *vcpu)
2212 {
2213 /* Treat an INVD instruction as a NOP and just skip it. */
2214 return kvm_emulate_as_nop(vcpu);
2215 }
2216 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_invd);
2217
handle_fastpath_invd(struct kvm_vcpu * vcpu)2218 fastpath_t handle_fastpath_invd(struct kvm_vcpu *vcpu)
2219 {
2220 if (!kvm_pmu_is_fastpath_emulation_allowed(vcpu))
2221 return EXIT_FASTPATH_NONE;
2222
2223 if (!kvm_emulate_invd(vcpu))
2224 return EXIT_FASTPATH_EXIT_USERSPACE;
2225
2226 return EXIT_FASTPATH_REENTER_GUEST;
2227 }
2228 EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_invd);
2229
kvm_handle_invalid_op(struct kvm_vcpu * vcpu)2230 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
2231 {
2232 kvm_queue_exception(vcpu, UD_VECTOR);
2233 return 1;
2234 }
2235 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_invalid_op);
2236
2237
kvm_emulate_monitor_mwait(struct kvm_vcpu * vcpu,const char * insn)2238 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn)
2239 {
2240 bool enabled;
2241
2242 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS))
2243 goto emulate_as_nop;
2244
2245 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT))
2246 enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_MWAIT);
2247 else
2248 enabled = vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT;
2249
2250 if (!enabled)
2251 return kvm_handle_invalid_op(vcpu);
2252
2253 emulate_as_nop:
2254 pr_warn_once("%s instruction emulated as NOP!\n", insn);
2255 return kvm_emulate_as_nop(vcpu);
2256 }
kvm_emulate_mwait(struct kvm_vcpu * vcpu)2257 int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
2258 {
2259 return kvm_emulate_monitor_mwait(vcpu, "MWAIT");
2260 }
2261 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_mwait);
2262
kvm_emulate_monitor(struct kvm_vcpu * vcpu)2263 int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
2264 {
2265 return kvm_emulate_monitor_mwait(vcpu, "MONITOR");
2266 }
2267 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_monitor);
2268
kvm_vcpu_exit_request(struct kvm_vcpu * vcpu)2269 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
2270 {
2271 xfer_to_guest_mode_prepare();
2272
2273 return READ_ONCE(vcpu->mode) == EXITING_GUEST_MODE ||
2274 kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending();
2275 }
2276
__handle_fastpath_wrmsr(struct kvm_vcpu * vcpu,u32 msr,u64 data)2277 static fastpath_t __handle_fastpath_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2278 {
2279 if (!kvm_pmu_is_fastpath_emulation_allowed(vcpu))
2280 return EXIT_FASTPATH_NONE;
2281
2282 switch (msr) {
2283 case APIC_BASE_MSR + (APIC_ICR >> 4):
2284 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic) ||
2285 kvm_x2apic_icr_write_fast(vcpu->arch.apic, data))
2286 return EXIT_FASTPATH_NONE;
2287 break;
2288 case MSR_IA32_TSC_DEADLINE:
2289 kvm_set_lapic_tscdeadline_msr(vcpu, data);
2290 break;
2291 default:
2292 return EXIT_FASTPATH_NONE;
2293 }
2294
2295 trace_kvm_msr_write(msr, data);
2296
2297 if (!kvm_skip_emulated_instruction(vcpu))
2298 return EXIT_FASTPATH_EXIT_USERSPACE;
2299
2300 return EXIT_FASTPATH_REENTER_GUEST;
2301 }
2302
handle_fastpath_wrmsr(struct kvm_vcpu * vcpu)2303 fastpath_t handle_fastpath_wrmsr(struct kvm_vcpu *vcpu)
2304 {
2305 return __handle_fastpath_wrmsr(vcpu, kvm_rcx_read(vcpu),
2306 kvm_read_edx_eax(vcpu));
2307 }
2308 EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_wrmsr);
2309
handle_fastpath_wrmsr_imm(struct kvm_vcpu * vcpu,u32 msr,int reg)2310 fastpath_t handle_fastpath_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
2311 {
2312 return __handle_fastpath_wrmsr(vcpu, msr, kvm_register_read(vcpu, reg));
2313 }
2314 EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_wrmsr_imm);
2315
2316 /*
2317 * Adapt set_msr() to msr_io()'s calling convention
2318 */
do_get_msr(struct kvm_vcpu * vcpu,unsigned index,u64 * data)2319 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2320 {
2321 return kvm_get_msr_ignored_check(vcpu, index, data, true);
2322 }
2323
do_set_msr(struct kvm_vcpu * vcpu,unsigned index,u64 * data)2324 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2325 {
2326 u64 val;
2327
2328 /*
2329 * Reject writes to immutable feature MSRs if the vCPU model is frozen,
2330 * as KVM doesn't support modifying the guest vCPU model on the fly,
2331 * e.g. changing the VMX capabilities MSRs while L2 is active is
2332 * nonsensical. Allow writes of the same value, e.g. so that userspace
2333 * can blindly stuff all MSRs when emulating RESET.
2334 */
2335 if (!kvm_can_set_cpuid_and_feature_msrs(vcpu) &&
2336 kvm_is_immutable_feature_msr(index) &&
2337 (do_get_msr(vcpu, index, &val) || *data != val))
2338 return -EINVAL;
2339
2340 return kvm_set_msr_ignored_check(vcpu, index, *data, true);
2341 }
2342
2343 #ifdef CONFIG_X86_64
2344 struct pvclock_clock {
2345 int vclock_mode;
2346 u64 cycle_last;
2347 u64 mask;
2348 u32 mult;
2349 u32 shift;
2350 u64 base_cycles;
2351 u64 offset;
2352 };
2353
2354 struct pvclock_gtod_data {
2355 seqcount_t seq;
2356
2357 struct pvclock_clock clock; /* extract of a clocksource struct */
2358 struct pvclock_clock raw_clock; /* extract of a clocksource struct */
2359
2360 ktime_t offs_boot;
2361 u64 wall_time_sec;
2362 };
2363
2364 static struct pvclock_gtod_data pvclock_gtod_data;
2365
update_pvclock_gtod(struct timekeeper * tk)2366 static void update_pvclock_gtod(struct timekeeper *tk)
2367 {
2368 struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
2369
2370 write_seqcount_begin(&vdata->seq);
2371
2372 /* copy pvclock gtod data */
2373 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode;
2374 vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
2375 vdata->clock.mask = tk->tkr_mono.mask;
2376 vdata->clock.mult = tk->tkr_mono.mult;
2377 vdata->clock.shift = tk->tkr_mono.shift;
2378 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec;
2379 vdata->clock.offset = tk->tkr_mono.base;
2380
2381 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode;
2382 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last;
2383 vdata->raw_clock.mask = tk->tkr_raw.mask;
2384 vdata->raw_clock.mult = tk->tkr_raw.mult;
2385 vdata->raw_clock.shift = tk->tkr_raw.shift;
2386 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec;
2387 vdata->raw_clock.offset = tk->tkr_raw.base;
2388
2389 vdata->wall_time_sec = tk->xtime_sec;
2390
2391 vdata->offs_boot = tk->offs_boot;
2392
2393 write_seqcount_end(&vdata->seq);
2394 }
2395
get_kvmclock_base_ns(void)2396 static s64 get_kvmclock_base_ns(void)
2397 {
2398 /* Count up from boot time, but with the frequency of the raw clock. */
2399 return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot));
2400 }
2401 #else
get_kvmclock_base_ns(void)2402 static s64 get_kvmclock_base_ns(void)
2403 {
2404 /* Master clock not used, so we can just use CLOCK_BOOTTIME. */
2405 return ktime_get_boottime_ns();
2406 }
2407 #endif
2408
kvm_write_wall_clock(struct kvm * kvm,gpa_t wall_clock,int sec_hi_ofs)2409 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
2410 {
2411 int version;
2412 int r;
2413 struct pvclock_wall_clock wc;
2414 u32 wc_sec_hi;
2415 u64 wall_nsec;
2416
2417 if (!wall_clock)
2418 return;
2419
2420 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
2421 if (r)
2422 return;
2423
2424 if (version & 1)
2425 ++version; /* first time write, random junk */
2426
2427 ++version;
2428
2429 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
2430 return;
2431
2432 wall_nsec = kvm_get_wall_clock_epoch(kvm);
2433
2434 wc.nsec = do_div(wall_nsec, NSEC_PER_SEC);
2435 wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */
2436 wc.version = version;
2437
2438 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
2439
2440 if (sec_hi_ofs) {
2441 wc_sec_hi = wall_nsec >> 32;
2442 kvm_write_guest(kvm, wall_clock + sec_hi_ofs,
2443 &wc_sec_hi, sizeof(wc_sec_hi));
2444 }
2445
2446 version++;
2447 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
2448 }
2449
kvm_write_system_time(struct kvm_vcpu * vcpu,gpa_t system_time,bool old_msr,bool host_initiated)2450 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
2451 bool old_msr, bool host_initiated)
2452 {
2453 struct kvm_arch *ka = &vcpu->kvm->arch;
2454
2455 if (vcpu->vcpu_id == 0 && !host_initiated) {
2456 if (ka->boot_vcpu_runs_old_kvmclock != old_msr)
2457 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2458
2459 ka->boot_vcpu_runs_old_kvmclock = old_msr;
2460 }
2461
2462 vcpu->arch.time = system_time;
2463 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2464
2465 /* we verify if the enable bit is set... */
2466 if (system_time & 1)
2467 kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL,
2468 sizeof(struct pvclock_vcpu_time_info));
2469 else
2470 kvm_gpc_deactivate(&vcpu->arch.pv_time);
2471
2472 return;
2473 }
2474
div_frac(uint32_t dividend,uint32_t divisor)2475 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
2476 {
2477 do_shl32_div32(dividend, divisor);
2478 return dividend;
2479 }
2480
kvm_get_time_scale(uint64_t scaled_hz,uint64_t base_hz,s8 * pshift,u32 * pmultiplier)2481 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
2482 s8 *pshift, u32 *pmultiplier)
2483 {
2484 uint64_t scaled64;
2485 int32_t shift = 0;
2486 uint64_t tps64;
2487 uint32_t tps32;
2488
2489 tps64 = base_hz;
2490 scaled64 = scaled_hz;
2491 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
2492 tps64 >>= 1;
2493 shift--;
2494 }
2495
2496 tps32 = (uint32_t)tps64;
2497 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
2498 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
2499 scaled64 >>= 1;
2500 else
2501 tps32 <<= 1;
2502 shift++;
2503 }
2504
2505 *pshift = shift;
2506 *pmultiplier = div_frac(scaled64, tps32);
2507 }
2508
2509 #ifdef CONFIG_X86_64
2510 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
2511 #endif
2512
2513 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
2514 static unsigned long max_tsc_khz;
2515
adjust_tsc_khz(u32 khz,s32 ppm)2516 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
2517 {
2518 u64 v = (u64)khz * (1000000 + ppm);
2519 do_div(v, 1000000);
2520 return v;
2521 }
2522
2523 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
2524
set_tsc_khz(struct kvm_vcpu * vcpu,u32 user_tsc_khz,bool scale)2525 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2526 {
2527 u64 ratio;
2528
2529 /* Guest TSC same frequency as host TSC? */
2530 if (!scale) {
2531 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio);
2532 return 0;
2533 }
2534
2535 /* TSC scaling supported? */
2536 if (!kvm_caps.has_tsc_control) {
2537 if (user_tsc_khz > tsc_khz) {
2538 vcpu->arch.tsc_catchup = 1;
2539 vcpu->arch.tsc_always_catchup = 1;
2540 return 0;
2541 } else {
2542 pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
2543 return -1;
2544 }
2545 }
2546
2547 /* TSC scaling required - calculate ratio */
2548 ratio = mul_u64_u32_div(1ULL << kvm_caps.tsc_scaling_ratio_frac_bits,
2549 user_tsc_khz, tsc_khz);
2550
2551 if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) {
2552 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
2553 user_tsc_khz);
2554 return -1;
2555 }
2556
2557 kvm_vcpu_write_tsc_multiplier(vcpu, ratio);
2558 return 0;
2559 }
2560
kvm_set_tsc_khz(struct kvm_vcpu * vcpu,u32 user_tsc_khz)2561 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
2562 {
2563 u32 thresh_lo, thresh_hi;
2564 int use_scaling = 0;
2565
2566 /* tsc_khz can be zero if TSC calibration fails */
2567 if (user_tsc_khz == 0) {
2568 /* set tsc_scaling_ratio to a safe value */
2569 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio);
2570 return -1;
2571 }
2572
2573 /* Compute a scale to convert nanoseconds in TSC cycles */
2574 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
2575 &vcpu->arch.virtual_tsc_shift,
2576 &vcpu->arch.virtual_tsc_mult);
2577 vcpu->arch.virtual_tsc_khz = user_tsc_khz;
2578
2579 /*
2580 * Compute the variation in TSC rate which is acceptable
2581 * within the range of tolerance and decide if the
2582 * rate being applied is within that bounds of the hardware
2583 * rate. If so, no scaling or compensation need be done.
2584 */
2585 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
2586 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
2587 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
2588 pr_debug("requested TSC rate %u falls outside tolerance [%u,%u]\n",
2589 user_tsc_khz, thresh_lo, thresh_hi);
2590 use_scaling = 1;
2591 }
2592 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
2593 }
2594
compute_guest_tsc(struct kvm_vcpu * vcpu,s64 kernel_ns)2595 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
2596 {
2597 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
2598 vcpu->arch.virtual_tsc_mult,
2599 vcpu->arch.virtual_tsc_shift);
2600 tsc += vcpu->arch.this_tsc_write;
2601 return tsc;
2602 }
2603
2604 #ifdef CONFIG_X86_64
gtod_is_based_on_tsc(int mode)2605 static inline bool gtod_is_based_on_tsc(int mode)
2606 {
2607 return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
2608 }
2609 #endif
2610
kvm_track_tsc_matching(struct kvm_vcpu * vcpu,bool new_generation)2611 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu, bool new_generation)
2612 {
2613 #ifdef CONFIG_X86_64
2614 struct kvm_arch *ka = &vcpu->kvm->arch;
2615 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2616
2617 /*
2618 * To use the masterclock, the host clocksource must be based on TSC
2619 * and all vCPUs must have matching TSCs. Note, the count for matching
2620 * vCPUs doesn't include the reference vCPU, hence "+1".
2621 */
2622 bool use_master_clock = (ka->nr_vcpus_matched_tsc + 1 ==
2623 atomic_read(&vcpu->kvm->online_vcpus)) &&
2624 gtod_is_based_on_tsc(gtod->clock.vclock_mode);
2625
2626 /*
2627 * Request a masterclock update if the masterclock needs to be toggled
2628 * on/off, or when starting a new generation and the masterclock is
2629 * enabled (compute_guest_tsc() requires the masterclock snapshot to be
2630 * taken _after_ the new generation is created).
2631 */
2632 if ((ka->use_master_clock && new_generation) ||
2633 (ka->use_master_clock != use_master_clock))
2634 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2635
2636 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
2637 atomic_read(&vcpu->kvm->online_vcpus),
2638 ka->use_master_clock, gtod->clock.vclock_mode);
2639 #endif
2640 }
2641
2642 /*
2643 * Multiply tsc by a fixed point number represented by ratio.
2644 *
2645 * The most significant 64-N bits (mult) of ratio represent the
2646 * integral part of the fixed point number; the remaining N bits
2647 * (frac) represent the fractional part, ie. ratio represents a fixed
2648 * point number (mult + frac * 2^(-N)).
2649 *
2650 * N equals to kvm_caps.tsc_scaling_ratio_frac_bits.
2651 */
__scale_tsc(u64 ratio,u64 tsc)2652 static inline u64 __scale_tsc(u64 ratio, u64 tsc)
2653 {
2654 return mul_u64_u64_shr(tsc, ratio, kvm_caps.tsc_scaling_ratio_frac_bits);
2655 }
2656
kvm_scale_tsc(u64 tsc,u64 ratio)2657 u64 kvm_scale_tsc(u64 tsc, u64 ratio)
2658 {
2659 u64 _tsc = tsc;
2660
2661 if (ratio != kvm_caps.default_tsc_scaling_ratio)
2662 _tsc = __scale_tsc(ratio, tsc);
2663
2664 return _tsc;
2665 }
2666
kvm_compute_l1_tsc_offset(struct kvm_vcpu * vcpu,u64 target_tsc)2667 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
2668 {
2669 u64 tsc;
2670
2671 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
2672
2673 return target_tsc - tsc;
2674 }
2675
kvm_read_l1_tsc(struct kvm_vcpu * vcpu,u64 host_tsc)2676 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2677 {
2678 return vcpu->arch.l1_tsc_offset +
2679 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
2680 }
2681 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_l1_tsc);
2682
kvm_calc_nested_tsc_offset(u64 l1_offset,u64 l2_offset,u64 l2_multiplier)2683 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
2684 {
2685 u64 nested_offset;
2686
2687 if (l2_multiplier == kvm_caps.default_tsc_scaling_ratio)
2688 nested_offset = l1_offset;
2689 else
2690 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier,
2691 kvm_caps.tsc_scaling_ratio_frac_bits);
2692
2693 nested_offset += l2_offset;
2694 return nested_offset;
2695 }
2696 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_calc_nested_tsc_offset);
2697
kvm_calc_nested_tsc_multiplier(u64 l1_multiplier,u64 l2_multiplier)2698 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
2699 {
2700 if (l2_multiplier != kvm_caps.default_tsc_scaling_ratio)
2701 return mul_u64_u64_shr(l1_multiplier, l2_multiplier,
2702 kvm_caps.tsc_scaling_ratio_frac_bits);
2703
2704 return l1_multiplier;
2705 }
2706 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_calc_nested_tsc_multiplier);
2707
kvm_vcpu_write_tsc_offset(struct kvm_vcpu * vcpu,u64 l1_offset)2708 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
2709 {
2710 if (vcpu->arch.guest_tsc_protected)
2711 return;
2712
2713 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2714 vcpu->arch.l1_tsc_offset,
2715 l1_offset);
2716
2717 vcpu->arch.l1_tsc_offset = l1_offset;
2718
2719 /*
2720 * If we are here because L1 chose not to trap WRMSR to TSC then
2721 * according to the spec this should set L1's TSC (as opposed to
2722 * setting L1's offset for L2).
2723 */
2724 if (is_guest_mode(vcpu))
2725 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2726 l1_offset,
2727 kvm_x86_call(get_l2_tsc_offset)(vcpu),
2728 kvm_x86_call(get_l2_tsc_multiplier)(vcpu));
2729 else
2730 vcpu->arch.tsc_offset = l1_offset;
2731
2732 kvm_x86_call(write_tsc_offset)(vcpu);
2733 }
2734
kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu * vcpu,u64 l1_multiplier)2735 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
2736 {
2737 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
2738
2739 /* Userspace is changing the multiplier while L2 is active */
2740 if (is_guest_mode(vcpu))
2741 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2742 l1_multiplier,
2743 kvm_x86_call(get_l2_tsc_multiplier)(vcpu));
2744 else
2745 vcpu->arch.tsc_scaling_ratio = l1_multiplier;
2746
2747 if (kvm_caps.has_tsc_control)
2748 kvm_x86_call(write_tsc_multiplier)(vcpu);
2749 }
2750
kvm_check_tsc_unstable(void)2751 static inline bool kvm_check_tsc_unstable(void)
2752 {
2753 #ifdef CONFIG_X86_64
2754 /*
2755 * TSC is marked unstable when we're running on Hyper-V,
2756 * 'TSC page' clocksource is good.
2757 */
2758 if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK)
2759 return false;
2760 #endif
2761 return check_tsc_unstable();
2762 }
2763
2764 /*
2765 * Infers attempts to synchronize the guest's tsc from host writes. Sets the
2766 * offset for the vcpu and tracks the TSC matching generation that the vcpu
2767 * participates in.
2768 */
__kvm_synchronize_tsc(struct kvm_vcpu * vcpu,u64 offset,u64 tsc,u64 ns,bool matched,bool user_set_tsc)2769 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
2770 u64 ns, bool matched, bool user_set_tsc)
2771 {
2772 struct kvm *kvm = vcpu->kvm;
2773
2774 lockdep_assert_held(&kvm->arch.tsc_write_lock);
2775
2776 if (vcpu->arch.guest_tsc_protected)
2777 return;
2778
2779 if (user_set_tsc)
2780 vcpu->kvm->arch.user_set_tsc = true;
2781
2782 /*
2783 * We also track th most recent recorded KHZ, write and time to
2784 * allow the matching interval to be extended at each write.
2785 */
2786 kvm->arch.last_tsc_nsec = ns;
2787 kvm->arch.last_tsc_write = tsc;
2788 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
2789 kvm->arch.last_tsc_offset = offset;
2790
2791 vcpu->arch.last_guest_tsc = tsc;
2792
2793 kvm_vcpu_write_tsc_offset(vcpu, offset);
2794
2795 if (!matched) {
2796 /*
2797 * We split periods of matched TSC writes into generations.
2798 * For each generation, we track the original measured
2799 * nanosecond time, offset, and write, so if TSCs are in
2800 * sync, we can match exact offset, and if not, we can match
2801 * exact software computation in compute_guest_tsc()
2802 *
2803 * These values are tracked in kvm->arch.cur_xxx variables.
2804 */
2805 kvm->arch.cur_tsc_generation++;
2806 kvm->arch.cur_tsc_nsec = ns;
2807 kvm->arch.cur_tsc_write = tsc;
2808 kvm->arch.cur_tsc_offset = offset;
2809 kvm->arch.nr_vcpus_matched_tsc = 0;
2810 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) {
2811 kvm->arch.nr_vcpus_matched_tsc++;
2812 }
2813
2814 /* Keep track of which generation this VCPU has synchronized to */
2815 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
2816 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
2817 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
2818
2819 kvm_track_tsc_matching(vcpu, !matched);
2820 }
2821
kvm_synchronize_tsc(struct kvm_vcpu * vcpu,u64 * user_value)2822 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
2823 {
2824 u64 data = user_value ? *user_value : 0;
2825 struct kvm *kvm = vcpu->kvm;
2826 u64 offset, ns, elapsed;
2827 unsigned long flags;
2828 bool matched = false;
2829 bool synchronizing = false;
2830
2831 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
2832 offset = kvm_compute_l1_tsc_offset(vcpu, data);
2833 ns = get_kvmclock_base_ns();
2834 elapsed = ns - kvm->arch.last_tsc_nsec;
2835
2836 if (vcpu->arch.virtual_tsc_khz) {
2837 if (data == 0) {
2838 /*
2839 * Force synchronization when creating a vCPU, or when
2840 * userspace explicitly writes a zero value.
2841 */
2842 synchronizing = true;
2843 } else if (kvm->arch.user_set_tsc) {
2844 u64 tsc_exp = kvm->arch.last_tsc_write +
2845 nsec_to_cycles(vcpu, elapsed);
2846 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
2847 /*
2848 * Here lies UAPI baggage: when a user-initiated TSC write has
2849 * a small delta (1 second) of virtual cycle time against the
2850 * previously set vCPU, we assume that they were intended to be
2851 * in sync and the delta was only due to the racy nature of the
2852 * legacy API.
2853 *
2854 * This trick falls down when restoring a guest which genuinely
2855 * has been running for less time than the 1 second of imprecision
2856 * which we allow for in the legacy API. In this case, the first
2857 * value written by userspace (on any vCPU) should not be subject
2858 * to this 'correction' to make it sync up with values that only
2859 * come from the kernel's default vCPU creation. Make the 1-second
2860 * slop hack only trigger if the user_set_tsc flag is already set.
2861 */
2862 synchronizing = data < tsc_exp + tsc_hz &&
2863 data + tsc_hz > tsc_exp;
2864 }
2865 }
2866
2867
2868 /*
2869 * For a reliable TSC, we can match TSC offsets, and for an unstable
2870 * TSC, we add elapsed time in this computation. We could let the
2871 * compensation code attempt to catch up if we fall behind, but
2872 * it's better to try to match offsets from the beginning.
2873 */
2874 if (synchronizing &&
2875 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
2876 if (!kvm_check_tsc_unstable()) {
2877 offset = kvm->arch.cur_tsc_offset;
2878 } else {
2879 u64 delta = nsec_to_cycles(vcpu, elapsed);
2880 data += delta;
2881 offset = kvm_compute_l1_tsc_offset(vcpu, data);
2882 }
2883 matched = true;
2884 }
2885
2886 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched, !!user_value);
2887 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
2888 }
2889
adjust_tsc_offset_guest(struct kvm_vcpu * vcpu,s64 adjustment)2890 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
2891 s64 adjustment)
2892 {
2893 u64 tsc_offset = vcpu->arch.l1_tsc_offset;
2894 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
2895 }
2896
adjust_tsc_offset_host(struct kvm_vcpu * vcpu,s64 adjustment)2897 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
2898 {
2899 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio)
2900 WARN_ON(adjustment < 0);
2901 adjustment = kvm_scale_tsc((u64) adjustment,
2902 vcpu->arch.l1_tsc_scaling_ratio);
2903 adjust_tsc_offset_guest(vcpu, adjustment);
2904 }
2905
2906 #ifdef CONFIG_X86_64
2907
read_tsc(void)2908 static u64 read_tsc(void)
2909 {
2910 u64 ret = (u64)rdtsc_ordered();
2911 u64 last = pvclock_gtod_data.clock.cycle_last;
2912
2913 if (likely(ret >= last))
2914 return ret;
2915
2916 /*
2917 * GCC likes to generate cmov here, but this branch is extremely
2918 * predictable (it's just a function of time and the likely is
2919 * very likely) and there's a data dependence, so force GCC
2920 * to generate a branch instead. I don't barrier() because
2921 * we don't actually need a barrier, and if this function
2922 * ever gets inlined it will generate worse code.
2923 */
2924 asm volatile ("");
2925 return last;
2926 }
2927
vgettsc(struct pvclock_clock * clock,u64 * tsc_timestamp,int * mode)2928 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
2929 int *mode)
2930 {
2931 u64 tsc_pg_val;
2932 long v;
2933
2934 switch (clock->vclock_mode) {
2935 case VDSO_CLOCKMODE_HVCLOCK:
2936 if (hv_read_tsc_page_tsc(hv_get_tsc_page(),
2937 tsc_timestamp, &tsc_pg_val)) {
2938 /* TSC page valid */
2939 *mode = VDSO_CLOCKMODE_HVCLOCK;
2940 v = (tsc_pg_val - clock->cycle_last) &
2941 clock->mask;
2942 } else {
2943 /* TSC page invalid */
2944 *mode = VDSO_CLOCKMODE_NONE;
2945 }
2946 break;
2947 case VDSO_CLOCKMODE_TSC:
2948 *mode = VDSO_CLOCKMODE_TSC;
2949 *tsc_timestamp = read_tsc();
2950 v = (*tsc_timestamp - clock->cycle_last) &
2951 clock->mask;
2952 break;
2953 default:
2954 *mode = VDSO_CLOCKMODE_NONE;
2955 }
2956
2957 if (*mode == VDSO_CLOCKMODE_NONE)
2958 *tsc_timestamp = v = 0;
2959
2960 return v * clock->mult;
2961 }
2962
2963 /*
2964 * As with get_kvmclock_base_ns(), this counts from boot time, at the
2965 * frequency of CLOCK_MONOTONIC_RAW (hence adding gtos->offs_boot).
2966 */
do_kvmclock_base(s64 * t,u64 * tsc_timestamp)2967 static int do_kvmclock_base(s64 *t, u64 *tsc_timestamp)
2968 {
2969 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2970 unsigned long seq;
2971 int mode;
2972 u64 ns;
2973
2974 do {
2975 seq = read_seqcount_begin(>od->seq);
2976 ns = gtod->raw_clock.base_cycles;
2977 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode);
2978 ns >>= gtod->raw_clock.shift;
2979 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot));
2980 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
2981 *t = ns;
2982
2983 return mode;
2984 }
2985
2986 /*
2987 * This calculates CLOCK_MONOTONIC at the time of the TSC snapshot, with
2988 * no boot time offset.
2989 */
do_monotonic(s64 * t,u64 * tsc_timestamp)2990 static int do_monotonic(s64 *t, u64 *tsc_timestamp)
2991 {
2992 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2993 unsigned long seq;
2994 int mode;
2995 u64 ns;
2996
2997 do {
2998 seq = read_seqcount_begin(>od->seq);
2999 ns = gtod->clock.base_cycles;
3000 ns += vgettsc(>od->clock, tsc_timestamp, &mode);
3001 ns >>= gtod->clock.shift;
3002 ns += ktime_to_ns(gtod->clock.offset);
3003 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
3004 *t = ns;
3005
3006 return mode;
3007 }
3008
do_realtime(struct timespec64 * ts,u64 * tsc_timestamp)3009 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
3010 {
3011 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
3012 unsigned long seq;
3013 int mode;
3014 u64 ns;
3015
3016 do {
3017 seq = read_seqcount_begin(>od->seq);
3018 ts->tv_sec = gtod->wall_time_sec;
3019 ns = gtod->clock.base_cycles;
3020 ns += vgettsc(>od->clock, tsc_timestamp, &mode);
3021 ns >>= gtod->clock.shift;
3022 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
3023
3024 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
3025 ts->tv_nsec = ns;
3026
3027 return mode;
3028 }
3029
3030 /*
3031 * Calculates the kvmclock_base_ns (CLOCK_MONOTONIC_RAW + boot time) and
3032 * reports the TSC value from which it do so. Returns true if host is
3033 * using TSC based clocksource.
3034 */
kvm_get_time_and_clockread(s64 * kernel_ns,u64 * tsc_timestamp)3035 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
3036 {
3037 /* checked again under seqlock below */
3038 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
3039 return false;
3040
3041 return gtod_is_based_on_tsc(do_kvmclock_base(kernel_ns,
3042 tsc_timestamp));
3043 }
3044
3045 /*
3046 * Calculates CLOCK_MONOTONIC and reports the TSC value from which it did
3047 * so. Returns true if host is using TSC based clocksource.
3048 */
kvm_get_monotonic_and_clockread(s64 * kernel_ns,u64 * tsc_timestamp)3049 bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
3050 {
3051 /* checked again under seqlock below */
3052 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
3053 return false;
3054
3055 return gtod_is_based_on_tsc(do_monotonic(kernel_ns,
3056 tsc_timestamp));
3057 }
3058
3059 /*
3060 * Calculates CLOCK_REALTIME and reports the TSC value from which it did
3061 * so. Returns true if host is using TSC based clocksource.
3062 *
3063 * DO NOT USE this for anything related to migration. You want CLOCK_TAI
3064 * for that.
3065 */
kvm_get_walltime_and_clockread(struct timespec64 * ts,u64 * tsc_timestamp)3066 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
3067 u64 *tsc_timestamp)
3068 {
3069 /* checked again under seqlock below */
3070 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
3071 return false;
3072
3073 return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
3074 }
3075 #endif
3076
3077 /*
3078 *
3079 * Assuming a stable TSC across physical CPUS, and a stable TSC
3080 * across virtual CPUs, the following condition is possible.
3081 * Each numbered line represents an event visible to both
3082 * CPUs at the next numbered event.
3083 *
3084 * "timespecX" represents host monotonic time. "tscX" represents
3085 * RDTSC value.
3086 *
3087 * VCPU0 on CPU0 | VCPU1 on CPU1
3088 *
3089 * 1. read timespec0,tsc0
3090 * 2. | timespec1 = timespec0 + N
3091 * | tsc1 = tsc0 + M
3092 * 3. transition to guest | transition to guest
3093 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
3094 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
3095 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
3096 *
3097 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
3098 *
3099 * - ret0 < ret1
3100 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
3101 * ...
3102 * - 0 < N - M => M < N
3103 *
3104 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
3105 * always the case (the difference between two distinct xtime instances
3106 * might be smaller then the difference between corresponding TSC reads,
3107 * when updating guest vcpus pvclock areas).
3108 *
3109 * To avoid that problem, do not allow visibility of distinct
3110 * system_timestamp/tsc_timestamp values simultaneously: use a master
3111 * copy of host monotonic time values. Update that master copy
3112 * in lockstep.
3113 *
3114 * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
3115 *
3116 */
3117
pvclock_update_vm_gtod_copy(struct kvm * kvm)3118 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
3119 {
3120 #ifdef CONFIG_X86_64
3121 struct kvm_arch *ka = &kvm->arch;
3122 int vclock_mode;
3123 bool host_tsc_clocksource, vcpus_matched;
3124
3125 lockdep_assert_held(&kvm->arch.tsc_write_lock);
3126 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
3127 atomic_read(&kvm->online_vcpus));
3128
3129 /*
3130 * If the host uses TSC clock, then passthrough TSC as stable
3131 * to the guest.
3132 */
3133 host_tsc_clocksource = kvm_get_time_and_clockread(
3134 &ka->master_kernel_ns,
3135 &ka->master_cycle_now);
3136
3137 ka->use_master_clock = host_tsc_clocksource && vcpus_matched
3138 && !ka->backwards_tsc_observed
3139 && !ka->boot_vcpu_runs_old_kvmclock;
3140
3141 if (ka->use_master_clock)
3142 atomic_set(&kvm_guest_has_master_clock, 1);
3143
3144 vclock_mode = pvclock_gtod_data.clock.vclock_mode;
3145 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
3146 vcpus_matched);
3147 #endif
3148 }
3149
kvm_make_mclock_inprogress_request(struct kvm * kvm)3150 static void kvm_make_mclock_inprogress_request(struct kvm *kvm)
3151 {
3152 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
3153 }
3154
__kvm_start_pvclock_update(struct kvm * kvm)3155 static void __kvm_start_pvclock_update(struct kvm *kvm)
3156 {
3157 raw_spin_lock_irq(&kvm->arch.tsc_write_lock);
3158 write_seqcount_begin(&kvm->arch.pvclock_sc);
3159 }
3160
kvm_start_pvclock_update(struct kvm * kvm)3161 static void kvm_start_pvclock_update(struct kvm *kvm)
3162 {
3163 kvm_make_mclock_inprogress_request(kvm);
3164
3165 /* no guest entries from this point */
3166 __kvm_start_pvclock_update(kvm);
3167 }
3168
kvm_end_pvclock_update(struct kvm * kvm)3169 static void kvm_end_pvclock_update(struct kvm *kvm)
3170 {
3171 struct kvm_arch *ka = &kvm->arch;
3172 struct kvm_vcpu *vcpu;
3173 unsigned long i;
3174
3175 write_seqcount_end(&ka->pvclock_sc);
3176 raw_spin_unlock_irq(&ka->tsc_write_lock);
3177 kvm_for_each_vcpu(i, vcpu, kvm)
3178 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3179
3180 /* guest entries allowed */
3181 kvm_for_each_vcpu(i, vcpu, kvm)
3182 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
3183 }
3184
kvm_update_masterclock(struct kvm * kvm)3185 static void kvm_update_masterclock(struct kvm *kvm)
3186 {
3187 kvm_hv_request_tsc_page_update(kvm);
3188 kvm_start_pvclock_update(kvm);
3189 pvclock_update_vm_gtod_copy(kvm);
3190 kvm_end_pvclock_update(kvm);
3191 }
3192
3193 /*
3194 * Use the kernel's tsc_khz directly if the TSC is constant, otherwise use KVM's
3195 * per-CPU value (which may be zero if a CPU is going offline). Note, tsc_khz
3196 * can change during boot even if the TSC is constant, as it's possible for KVM
3197 * to be loaded before TSC calibration completes. Ideally, KVM would get a
3198 * notification when calibration completes, but practically speaking calibration
3199 * will complete before userspace is alive enough to create VMs.
3200 */
get_cpu_tsc_khz(void)3201 static unsigned long get_cpu_tsc_khz(void)
3202 {
3203 if (static_cpu_has(X86_FEATURE_CONSTANT_TSC))
3204 return tsc_khz;
3205 else
3206 return __this_cpu_read(cpu_tsc_khz);
3207 }
3208
3209 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */
__get_kvmclock(struct kvm * kvm,struct kvm_clock_data * data)3210 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3211 {
3212 struct kvm_arch *ka = &kvm->arch;
3213 struct pvclock_vcpu_time_info hv_clock;
3214
3215 /* both __this_cpu_read() and rdtsc() should be on the same cpu */
3216 get_cpu();
3217
3218 data->flags = 0;
3219 if (ka->use_master_clock &&
3220 (static_cpu_has(X86_FEATURE_CONSTANT_TSC) || __this_cpu_read(cpu_tsc_khz))) {
3221 #ifdef CONFIG_X86_64
3222 struct timespec64 ts;
3223
3224 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) {
3225 data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec;
3226 data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC;
3227 } else
3228 #endif
3229 data->host_tsc = rdtsc();
3230
3231 data->flags |= KVM_CLOCK_TSC_STABLE;
3232 hv_clock.tsc_timestamp = ka->master_cycle_now;
3233 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
3234 kvm_get_time_scale(NSEC_PER_SEC, get_cpu_tsc_khz() * 1000LL,
3235 &hv_clock.tsc_shift,
3236 &hv_clock.tsc_to_system_mul);
3237 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc);
3238 } else {
3239 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset;
3240 }
3241
3242 put_cpu();
3243 }
3244
get_kvmclock(struct kvm * kvm,struct kvm_clock_data * data)3245 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3246 {
3247 struct kvm_arch *ka = &kvm->arch;
3248 unsigned seq;
3249
3250 do {
3251 seq = read_seqcount_begin(&ka->pvclock_sc);
3252 __get_kvmclock(kvm, data);
3253 } while (read_seqcount_retry(&ka->pvclock_sc, seq));
3254 }
3255
get_kvmclock_ns(struct kvm * kvm)3256 u64 get_kvmclock_ns(struct kvm *kvm)
3257 {
3258 struct kvm_clock_data data;
3259
3260 get_kvmclock(kvm, &data);
3261 return data.clock;
3262 }
3263
kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info * ref_hv_clock,struct kvm_vcpu * vcpu,struct gfn_to_pfn_cache * gpc,unsigned int offset)3264 static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock,
3265 struct kvm_vcpu *vcpu,
3266 struct gfn_to_pfn_cache *gpc,
3267 unsigned int offset)
3268 {
3269 struct pvclock_vcpu_time_info *guest_hv_clock;
3270 struct pvclock_vcpu_time_info hv_clock;
3271 unsigned long flags;
3272
3273 memcpy(&hv_clock, ref_hv_clock, sizeof(hv_clock));
3274
3275 read_lock_irqsave(&gpc->lock, flags);
3276 while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) {
3277 read_unlock_irqrestore(&gpc->lock, flags);
3278
3279 if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock)))
3280 return;
3281
3282 read_lock_irqsave(&gpc->lock, flags);
3283 }
3284
3285 guest_hv_clock = (void *)(gpc->khva + offset);
3286
3287 /*
3288 * This VCPU is paused, but it's legal for a guest to read another
3289 * VCPU's kvmclock, so we really have to follow the specification where
3290 * it says that version is odd if data is being modified, and even after
3291 * it is consistent.
3292 */
3293
3294 guest_hv_clock->version = hv_clock.version = (guest_hv_clock->version + 1) | 1;
3295 smp_wmb();
3296
3297 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
3298 hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
3299
3300 memcpy(guest_hv_clock, &hv_clock, sizeof(*guest_hv_clock));
3301
3302 smp_wmb();
3303
3304 guest_hv_clock->version = ++hv_clock.version;
3305
3306 kvm_gpc_mark_dirty_in_slot(gpc);
3307 read_unlock_irqrestore(&gpc->lock, flags);
3308
3309 trace_kvm_pvclock_update(vcpu->vcpu_id, &hv_clock);
3310 }
3311
kvm_guest_time_update(struct kvm_vcpu * v)3312 int kvm_guest_time_update(struct kvm_vcpu *v)
3313 {
3314 struct pvclock_vcpu_time_info hv_clock = {};
3315 unsigned long flags, tgt_tsc_khz;
3316 unsigned seq;
3317 struct kvm_vcpu_arch *vcpu = &v->arch;
3318 struct kvm_arch *ka = &v->kvm->arch;
3319 s64 kernel_ns;
3320 u64 tsc_timestamp, host_tsc;
3321 bool use_master_clock;
3322
3323 kernel_ns = 0;
3324 host_tsc = 0;
3325
3326 /*
3327 * If the host uses TSC clock, then passthrough TSC as stable
3328 * to the guest.
3329 */
3330 do {
3331 seq = read_seqcount_begin(&ka->pvclock_sc);
3332 use_master_clock = ka->use_master_clock;
3333 if (use_master_clock) {
3334 host_tsc = ka->master_cycle_now;
3335 kernel_ns = ka->master_kernel_ns;
3336 }
3337 } while (read_seqcount_retry(&ka->pvclock_sc, seq));
3338
3339 /* Keep irq disabled to prevent changes to the clock */
3340 local_irq_save(flags);
3341 tgt_tsc_khz = get_cpu_tsc_khz();
3342 if (unlikely(tgt_tsc_khz == 0)) {
3343 local_irq_restore(flags);
3344 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
3345 return 1;
3346 }
3347 if (!use_master_clock) {
3348 host_tsc = rdtsc();
3349 kernel_ns = get_kvmclock_base_ns();
3350 }
3351
3352 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
3353
3354 /*
3355 * We may have to catch up the TSC to match elapsed wall clock
3356 * time for two reasons, even if kvmclock is used.
3357 * 1) CPU could have been running below the maximum TSC rate
3358 * 2) Broken TSC compensation resets the base at each VCPU
3359 * entry to avoid unknown leaps of TSC even when running
3360 * again on the same CPU. This may cause apparent elapsed
3361 * time to disappear, and the guest to stand still or run
3362 * very slowly.
3363 */
3364 if (vcpu->tsc_catchup) {
3365 u64 tsc = compute_guest_tsc(v, kernel_ns);
3366 if (tsc > tsc_timestamp) {
3367 adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
3368 tsc_timestamp = tsc;
3369 }
3370 }
3371
3372 local_irq_restore(flags);
3373
3374 /* With all the info we got, fill in the values */
3375
3376 if (kvm_caps.has_tsc_control) {
3377 tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz,
3378 v->arch.l1_tsc_scaling_ratio);
3379 tgt_tsc_khz = tgt_tsc_khz ? : 1;
3380 }
3381
3382 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
3383 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
3384 &vcpu->pvclock_tsc_shift,
3385 &vcpu->pvclock_tsc_mul);
3386 vcpu->hw_tsc_khz = tgt_tsc_khz;
3387 }
3388
3389 hv_clock.tsc_shift = vcpu->pvclock_tsc_shift;
3390 hv_clock.tsc_to_system_mul = vcpu->pvclock_tsc_mul;
3391 hv_clock.tsc_timestamp = tsc_timestamp;
3392 hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
3393 vcpu->last_guest_tsc = tsc_timestamp;
3394
3395 /* If the host uses TSC clocksource, then it is stable */
3396 hv_clock.flags = 0;
3397 if (use_master_clock)
3398 hv_clock.flags |= PVCLOCK_TSC_STABLE_BIT;
3399
3400 if (vcpu->pv_time.active) {
3401 /*
3402 * GUEST_STOPPED is only supported by kvmclock, and KVM's
3403 * historic behavior is to only process the request if kvmclock
3404 * is active/enabled.
3405 */
3406 if (vcpu->pvclock_set_guest_stopped_request) {
3407 hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
3408 vcpu->pvclock_set_guest_stopped_request = false;
3409 }
3410 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->pv_time, 0);
3411
3412 hv_clock.flags &= ~PVCLOCK_GUEST_STOPPED;
3413 }
3414
3415 kvm_hv_setup_tsc_page(v->kvm, &hv_clock);
3416
3417 #ifdef CONFIG_KVM_XEN
3418 /*
3419 * For Xen guests we may need to override PVCLOCK_TSC_STABLE_BIT as unless
3420 * explicitly told to use TSC as its clocksource Xen will not set this bit.
3421 * This default behaviour led to bugs in some guest kernels which cause
3422 * problems if they observe PVCLOCK_TSC_STABLE_BIT in the pvclock flags.
3423 *
3424 * Note! Clear TSC_STABLE only for Xen clocks, i.e. the order matters!
3425 */
3426 if (ka->xen.hvm_config.flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE)
3427 hv_clock.flags &= ~PVCLOCK_TSC_STABLE_BIT;
3428
3429 if (vcpu->xen.vcpu_info_cache.active)
3430 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_info_cache,
3431 offsetof(struct compat_vcpu_info, time));
3432 if (vcpu->xen.vcpu_time_info_cache.active)
3433 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_time_info_cache, 0);
3434 #endif
3435 return 0;
3436 }
3437
3438 /*
3439 * The pvclock_wall_clock ABI tells the guest the wall clock time at
3440 * which it started (i.e. its epoch, when its kvmclock was zero).
3441 *
3442 * In fact those clocks are subtly different; wall clock frequency is
3443 * adjusted by NTP and has leap seconds, while the kvmclock is a
3444 * simple function of the TSC without any such adjustment.
3445 *
3446 * Perhaps the ABI should have exposed CLOCK_TAI and a ratio between
3447 * that and kvmclock, but even that would be subject to change over
3448 * time.
3449 *
3450 * Attempt to calculate the epoch at a given moment using the *same*
3451 * TSC reading via kvm_get_walltime_and_clockread() to obtain both
3452 * wallclock and kvmclock times, and subtracting one from the other.
3453 *
3454 * Fall back to using their values at slightly different moments by
3455 * calling ktime_get_real_ns() and get_kvmclock_ns() separately.
3456 */
kvm_get_wall_clock_epoch(struct kvm * kvm)3457 uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm)
3458 {
3459 #ifdef CONFIG_X86_64
3460 struct pvclock_vcpu_time_info hv_clock;
3461 struct kvm_arch *ka = &kvm->arch;
3462 unsigned long seq, local_tsc_khz;
3463 struct timespec64 ts;
3464 uint64_t host_tsc;
3465
3466 do {
3467 seq = read_seqcount_begin(&ka->pvclock_sc);
3468
3469 local_tsc_khz = 0;
3470 if (!ka->use_master_clock)
3471 break;
3472
3473 /*
3474 * The TSC read and the call to get_cpu_tsc_khz() must happen
3475 * on the same CPU.
3476 */
3477 get_cpu();
3478
3479 local_tsc_khz = get_cpu_tsc_khz();
3480
3481 if (local_tsc_khz &&
3482 !kvm_get_walltime_and_clockread(&ts, &host_tsc))
3483 local_tsc_khz = 0; /* Fall back to old method */
3484
3485 put_cpu();
3486
3487 /*
3488 * These values must be snapshotted within the seqcount loop.
3489 * After that, it's just mathematics which can happen on any
3490 * CPU at any time.
3491 */
3492 hv_clock.tsc_timestamp = ka->master_cycle_now;
3493 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
3494
3495 } while (read_seqcount_retry(&ka->pvclock_sc, seq));
3496
3497 /*
3498 * If the conditions were right, and obtaining the wallclock+TSC was
3499 * successful, calculate the KVM clock at the corresponding time and
3500 * subtract one from the other to get the guest's epoch in nanoseconds
3501 * since 1970-01-01.
3502 */
3503 if (local_tsc_khz) {
3504 kvm_get_time_scale(NSEC_PER_SEC, local_tsc_khz * NSEC_PER_USEC,
3505 &hv_clock.tsc_shift,
3506 &hv_clock.tsc_to_system_mul);
3507 return ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec -
3508 __pvclock_read_cycles(&hv_clock, host_tsc);
3509 }
3510 #endif
3511 return ktime_get_real_ns() - get_kvmclock_ns(kvm);
3512 }
3513
3514 /*
3515 * kvmclock updates which are isolated to a given vcpu, such as
3516 * vcpu->cpu migration, should not allow system_timestamp from
3517 * the rest of the vcpus to remain static.
3518 *
3519 * So in those cases, request a kvmclock update for all vcpus.
3520 * The worst case for a remote vcpu to update its kvmclock
3521 * is then bounded by maximum nohz sleep latency.
3522 */
kvm_gen_kvmclock_update(struct kvm_vcpu * v)3523 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
3524 {
3525 unsigned long i;
3526 struct kvm_vcpu *vcpu;
3527 struct kvm *kvm = v->kvm;
3528
3529 kvm_for_each_vcpu(i, vcpu, kvm) {
3530 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3531 kvm_vcpu_kick(vcpu);
3532 }
3533 }
3534
3535 /* These helpers are safe iff @msr is known to be an MCx bank MSR. */
is_mci_control_msr(u32 msr)3536 static bool is_mci_control_msr(u32 msr)
3537 {
3538 return (msr & 3) == 0;
3539 }
is_mci_status_msr(u32 msr)3540 static bool is_mci_status_msr(u32 msr)
3541 {
3542 return (msr & 3) == 1;
3543 }
3544
3545 /*
3546 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
3547 */
can_set_mci_status(struct kvm_vcpu * vcpu)3548 static bool can_set_mci_status(struct kvm_vcpu *vcpu)
3549 {
3550 /* McStatusWrEn enabled? */
3551 if (guest_cpuid_is_amd_compatible(vcpu))
3552 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
3553
3554 return false;
3555 }
3556
set_msr_mce(struct kvm_vcpu * vcpu,struct msr_data * msr_info)3557 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3558 {
3559 u64 mcg_cap = vcpu->arch.mcg_cap;
3560 unsigned bank_num = mcg_cap & 0xff;
3561 u32 msr = msr_info->index;
3562 u64 data = msr_info->data;
3563 u32 offset, last_msr;
3564
3565 switch (msr) {
3566 case MSR_IA32_MCG_STATUS:
3567 vcpu->arch.mcg_status = data;
3568 break;
3569 case MSR_IA32_MCG_CTL:
3570 if (!(mcg_cap & MCG_CTL_P) &&
3571 (data || !msr_info->host_initiated))
3572 return 1;
3573 if (data != 0 && data != ~(u64)0)
3574 return 1;
3575 vcpu->arch.mcg_ctl = data;
3576 break;
3577 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
3578 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1;
3579 if (msr > last_msr)
3580 return 1;
3581
3582 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated))
3583 return 1;
3584 /* An attempt to write a 1 to a reserved bit raises #GP */
3585 if (data & ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK))
3586 return 1;
3587 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2,
3588 last_msr + 1 - MSR_IA32_MC0_CTL2);
3589 vcpu->arch.mci_ctl2_banks[offset] = data;
3590 break;
3591 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3592 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1;
3593 if (msr > last_msr)
3594 return 1;
3595
3596 /*
3597 * Only 0 or all 1s can be written to IA32_MCi_CTL, all other
3598 * values are architecturally undefined. But, some Linux
3599 * kernels clear bit 10 in bank 4 to workaround a BIOS/GART TLB
3600 * issue on AMD K8s, allow bit 10 to be clear when setting all
3601 * other bits in order to avoid an uncaught #GP in the guest.
3602 *
3603 * UNIXWARE clears bit 0 of MC1_CTL to ignore correctable,
3604 * single-bit ECC data errors.
3605 */
3606 if (is_mci_control_msr(msr) &&
3607 data != 0 && (data | (1 << 10) | 1) != ~(u64)0)
3608 return 1;
3609
3610 /*
3611 * All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR.
3612 * AMD-based CPUs allow non-zero values, but if and only if
3613 * HWCR[McStatusWrEn] is set.
3614 */
3615 if (!msr_info->host_initiated && is_mci_status_msr(msr) &&
3616 data != 0 && !can_set_mci_status(vcpu))
3617 return 1;
3618
3619 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL,
3620 last_msr + 1 - MSR_IA32_MC0_CTL);
3621 vcpu->arch.mce_banks[offset] = data;
3622 break;
3623 default:
3624 return 1;
3625 }
3626 return 0;
3627 }
3628
kvm_pv_enable_async_pf(struct kvm_vcpu * vcpu,u64 data)3629 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
3630 {
3631 gpa_t gpa = data & ~0x3f;
3632
3633 /* Bits 4:5 are reserved, Should be zero */
3634 if (data & 0x30)
3635 return 1;
3636
3637 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) &&
3638 (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT))
3639 return 1;
3640
3641 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) &&
3642 (data & KVM_ASYNC_PF_DELIVERY_AS_INT))
3643 return 1;
3644
3645 if (!lapic_in_kernel(vcpu))
3646 return data ? 1 : 0;
3647
3648 vcpu->arch.apf.msr_en_val = data;
3649
3650 if (!kvm_pv_async_pf_enabled(vcpu)) {
3651 kvm_clear_async_pf_completion_queue(vcpu);
3652 kvm_async_pf_hash_reset(vcpu);
3653 return 0;
3654 }
3655
3656 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
3657 sizeof(u64)))
3658 return 1;
3659
3660 vcpu->arch.apf.send_always = (data & KVM_ASYNC_PF_SEND_ALWAYS);
3661 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
3662
3663 kvm_async_pf_wakeup_all(vcpu);
3664
3665 return 0;
3666 }
3667
kvm_pv_enable_async_pf_int(struct kvm_vcpu * vcpu,u64 data)3668 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
3669 {
3670 /* Bits 8-63 are reserved */
3671 if (data >> 8)
3672 return 1;
3673
3674 if (!lapic_in_kernel(vcpu))
3675 return 1;
3676
3677 vcpu->arch.apf.msr_int_val = data;
3678
3679 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK;
3680
3681 return 0;
3682 }
3683
kvmclock_reset(struct kvm_vcpu * vcpu)3684 static void kvmclock_reset(struct kvm_vcpu *vcpu)
3685 {
3686 kvm_gpc_deactivate(&vcpu->arch.pv_time);
3687 vcpu->arch.time = 0;
3688 }
3689
kvm_vcpu_flush_tlb_all(struct kvm_vcpu * vcpu)3690 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
3691 {
3692 ++vcpu->stat.tlb_flush;
3693 kvm_x86_call(flush_tlb_all)(vcpu);
3694
3695 /* Flushing all ASIDs flushes the current ASID... */
3696 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
3697 }
3698
kvm_vcpu_flush_tlb_guest(struct kvm_vcpu * vcpu)3699 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
3700 {
3701 ++vcpu->stat.tlb_flush;
3702
3703 if (!tdp_enabled) {
3704 /*
3705 * A TLB flush on behalf of the guest is equivalent to
3706 * INVPCID(all), toggling CR4.PGE, etc., which requires
3707 * a forced sync of the shadow page tables. Ensure all the
3708 * roots are synced and the guest TLB in hardware is clean.
3709 */
3710 kvm_mmu_sync_roots(vcpu);
3711 kvm_mmu_sync_prev_roots(vcpu);
3712 }
3713
3714 kvm_x86_call(flush_tlb_guest)(vcpu);
3715
3716 /*
3717 * Flushing all "guest" TLB is always a superset of Hyper-V's fine
3718 * grained flushing.
3719 */
3720 kvm_hv_vcpu_purge_flush_tlb(vcpu);
3721 }
3722
3723
kvm_vcpu_flush_tlb_current(struct kvm_vcpu * vcpu)3724 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
3725 {
3726 ++vcpu->stat.tlb_flush;
3727 kvm_x86_call(flush_tlb_current)(vcpu);
3728 }
3729
3730 /*
3731 * Service "local" TLB flush requests, which are specific to the current MMU
3732 * context. In addition to the generic event handling in vcpu_enter_guest(),
3733 * TLB flushes that are targeted at an MMU context also need to be serviced
3734 * prior before nested VM-Enter/VM-Exit.
3735 */
kvm_service_local_tlb_flush_requests(struct kvm_vcpu * vcpu)3736 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
3737 {
3738 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
3739 kvm_vcpu_flush_tlb_current(vcpu);
3740
3741 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
3742 kvm_vcpu_flush_tlb_guest(vcpu);
3743 }
3744 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_service_local_tlb_flush_requests);
3745
record_steal_time(struct kvm_vcpu * vcpu)3746 static void record_steal_time(struct kvm_vcpu *vcpu)
3747 {
3748 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
3749 struct kvm_steal_time __user *st;
3750 struct kvm_memslots *slots;
3751 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
3752 u64 steal;
3753 u32 version;
3754
3755 if (kvm_xen_msr_enabled(vcpu->kvm)) {
3756 kvm_xen_runstate_set_running(vcpu);
3757 return;
3758 }
3759
3760 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
3761 return;
3762
3763 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm))
3764 return;
3765
3766 slots = kvm_memslots(vcpu->kvm);
3767
3768 if (unlikely(slots->generation != ghc->generation ||
3769 gpa != ghc->gpa ||
3770 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
3771 /* We rely on the fact that it fits in a single page. */
3772 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
3773
3774 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
3775 kvm_is_error_hva(ghc->hva) || !ghc->memslot)
3776 return;
3777 }
3778
3779 st = (struct kvm_steal_time __user *)ghc->hva;
3780 /*
3781 * Doing a TLB flush here, on the guest's behalf, can avoid
3782 * expensive IPIs.
3783 */
3784 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
3785 u8 st_preempted = 0;
3786 int err = -EFAULT;
3787
3788 if (!user_access_begin(st, sizeof(*st)))
3789 return;
3790
3791 asm volatile("1: xchgb %0, %2\n"
3792 "xor %1, %1\n"
3793 "2:\n"
3794 _ASM_EXTABLE_UA(1b, 2b)
3795 : "+q" (st_preempted),
3796 "+&r" (err),
3797 "+m" (st->preempted));
3798 if (err)
3799 goto out;
3800
3801 user_access_end();
3802
3803 vcpu->arch.st.preempted = 0;
3804
3805 trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
3806 st_preempted & KVM_VCPU_FLUSH_TLB);
3807 if (st_preempted & KVM_VCPU_FLUSH_TLB)
3808 kvm_vcpu_flush_tlb_guest(vcpu);
3809
3810 if (!user_access_begin(st, sizeof(*st)))
3811 goto dirty;
3812 } else {
3813 if (!user_access_begin(st, sizeof(*st)))
3814 return;
3815
3816 unsafe_put_user(0, &st->preempted, out);
3817 vcpu->arch.st.preempted = 0;
3818 }
3819
3820 unsafe_get_user(version, &st->version, out);
3821 if (version & 1)
3822 version += 1; /* first time write, random junk */
3823
3824 version += 1;
3825 unsafe_put_user(version, &st->version, out);
3826
3827 smp_wmb();
3828
3829 unsafe_get_user(steal, &st->steal, out);
3830 steal += current->sched_info.run_delay -
3831 vcpu->arch.st.last_steal;
3832 vcpu->arch.st.last_steal = current->sched_info.run_delay;
3833 unsafe_put_user(steal, &st->steal, out);
3834
3835 version += 1;
3836 unsafe_put_user(version, &st->version, out);
3837
3838 out:
3839 user_access_end();
3840 dirty:
3841 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
3842 }
3843
3844 /*
3845 * Returns true if the MSR in question is managed via XSTATE, i.e. is context
3846 * switched with the rest of guest FPU state.
3847 *
3848 * Note, S_CET is _not_ saved/restored via XSAVES/XRSTORS.
3849 */
is_xstate_managed_msr(struct kvm_vcpu * vcpu,u32 msr)3850 static bool is_xstate_managed_msr(struct kvm_vcpu *vcpu, u32 msr)
3851 {
3852 if (!vcpu)
3853 return false;
3854
3855 switch (msr) {
3856 case MSR_IA32_U_CET:
3857 return guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) ||
3858 guest_cpu_cap_has(vcpu, X86_FEATURE_IBT);
3859 case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
3860 return guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
3861 default:
3862 return false;
3863 }
3864 }
3865
3866 /*
3867 * Lock (and if necessary, re-load) the guest FPU, i.e. XSTATE, and access an
3868 * MSR that is managed via XSTATE. Note, the caller is responsible for doing
3869 * the initial FPU load, this helper only ensures that guest state is resident
3870 * in hardware (the kernel can load its FPU state in IRQ context).
3871 *
3872 * Note, loading guest values for U_CET and PL[0-3]_SSP while executing in the
3873 * kernel is safe, as U_CET is specific to userspace, and PL[0-3]_SSP are only
3874 * consumed when transitioning to lower privilege levels, i.e. are effectively
3875 * only consumed by userspace as well.
3876 */
kvm_access_xstate_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info,int access)3877 static __always_inline void kvm_access_xstate_msr(struct kvm_vcpu *vcpu,
3878 struct msr_data *msr_info,
3879 int access)
3880 {
3881 BUILD_BUG_ON(access != MSR_TYPE_R && access != MSR_TYPE_W);
3882
3883 KVM_BUG_ON(!is_xstate_managed_msr(vcpu, msr_info->index), vcpu->kvm);
3884 KVM_BUG_ON(!vcpu->arch.guest_fpu.fpstate->in_use, vcpu->kvm);
3885
3886 kvm_fpu_get();
3887 if (access == MSR_TYPE_R)
3888 rdmsrq(msr_info->index, msr_info->data);
3889 else
3890 wrmsrq(msr_info->index, msr_info->data);
3891 kvm_fpu_put();
3892 }
3893
kvm_set_xstate_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)3894 static void kvm_set_xstate_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3895 {
3896 kvm_access_xstate_msr(vcpu, msr_info, MSR_TYPE_W);
3897 }
3898
kvm_get_xstate_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)3899 static void kvm_get_xstate_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3900 {
3901 kvm_access_xstate_msr(vcpu, msr_info, MSR_TYPE_R);
3902 }
3903
kvm_set_msr_common(struct kvm_vcpu * vcpu,struct msr_data * msr_info)3904 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3905 {
3906 u32 msr = msr_info->index;
3907 u64 data = msr_info->data;
3908
3909 /*
3910 * Do not allow host-initiated writes to trigger the Xen hypercall
3911 * page setup; it could incur locking paths which are not expected
3912 * if userspace sets the MSR in an unusual location.
3913 */
3914 if (kvm_xen_is_hypercall_page_msr(vcpu->kvm, msr) &&
3915 !msr_info->host_initiated)
3916 return kvm_xen_write_hypercall_page(vcpu, data);
3917
3918 switch (msr) {
3919 case MSR_AMD64_NB_CFG:
3920 case MSR_IA32_UCODE_WRITE:
3921 case MSR_VM_HSAVE_PA:
3922 case MSR_AMD64_PATCH_LOADER:
3923 case MSR_AMD64_BU_CFG2:
3924 case MSR_AMD64_DC_CFG:
3925 case MSR_AMD64_TW_CFG:
3926 case MSR_F15H_EX_CFG:
3927 break;
3928
3929 case MSR_IA32_UCODE_REV:
3930 if (msr_info->host_initiated)
3931 vcpu->arch.microcode_version = data;
3932 break;
3933 case MSR_IA32_ARCH_CAPABILITIES:
3934 if (!msr_info->host_initiated ||
3935 !guest_cpu_cap_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3936 return KVM_MSR_RET_UNSUPPORTED;
3937 vcpu->arch.arch_capabilities = data;
3938 break;
3939 case MSR_IA32_PERF_CAPABILITIES:
3940 if (!msr_info->host_initiated ||
3941 !guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
3942 return KVM_MSR_RET_UNSUPPORTED;
3943
3944 if (data & ~kvm_caps.supported_perf_cap)
3945 return 1;
3946
3947 /*
3948 * Note, this is not just a performance optimization! KVM
3949 * disallows changing feature MSRs after the vCPU has run; PMU
3950 * refresh will bug the VM if called after the vCPU has run.
3951 */
3952 if (vcpu->arch.perf_capabilities == data)
3953 break;
3954
3955 vcpu->arch.perf_capabilities = data;
3956 kvm_pmu_refresh(vcpu);
3957 kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
3958 break;
3959 case MSR_IA32_PRED_CMD: {
3960 u64 reserved_bits = ~(PRED_CMD_IBPB | PRED_CMD_SBPB);
3961
3962 if (!msr_info->host_initiated) {
3963 if ((!guest_has_pred_cmd_msr(vcpu)))
3964 return 1;
3965
3966 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
3967 !guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBPB))
3968 reserved_bits |= PRED_CMD_IBPB;
3969
3970 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SBPB))
3971 reserved_bits |= PRED_CMD_SBPB;
3972 }
3973
3974 if (!boot_cpu_has(X86_FEATURE_IBPB))
3975 reserved_bits |= PRED_CMD_IBPB;
3976
3977 if (!boot_cpu_has(X86_FEATURE_SBPB))
3978 reserved_bits |= PRED_CMD_SBPB;
3979
3980 if (data & reserved_bits)
3981 return 1;
3982
3983 if (!data)
3984 break;
3985
3986 wrmsrq(MSR_IA32_PRED_CMD, data);
3987 break;
3988 }
3989 case MSR_IA32_FLUSH_CMD:
3990 if (!msr_info->host_initiated &&
3991 !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D))
3992 return 1;
3993
3994 if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D) || (data & ~L1D_FLUSH))
3995 return 1;
3996 if (!data)
3997 break;
3998
3999 wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
4000 break;
4001 case MSR_EFER:
4002 return set_efer(vcpu, msr_info);
4003 case MSR_K7_HWCR:
4004 data &= ~(u64)0x40; /* ignore flush filter disable */
4005 data &= ~(u64)0x100; /* ignore ignne emulation enable */
4006 data &= ~(u64)0x8; /* ignore TLB cache disable */
4007
4008 /*
4009 * Allow McStatusWrEn and TscFreqSel. (Linux guests from v3.2
4010 * through at least v6.6 whine if TscFreqSel is clear,
4011 * depending on F/M/S.
4012 */
4013 if (data & ~(BIT_ULL(18) | BIT_ULL(24))) {
4014 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
4015 return 1;
4016 }
4017 vcpu->arch.msr_hwcr = data;
4018 break;
4019 case MSR_FAM10H_MMIO_CONF_BASE:
4020 if (data != 0) {
4021 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
4022 return 1;
4023 }
4024 break;
4025 case MSR_IA32_CR_PAT:
4026 if (!kvm_pat_valid(data))
4027 return 1;
4028
4029 vcpu->arch.pat = data;
4030 break;
4031 case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
4032 case MSR_MTRRdefType:
4033 return kvm_mtrr_set_msr(vcpu, msr, data);
4034 case MSR_IA32_APICBASE:
4035 return kvm_apic_set_base(vcpu, data, msr_info->host_initiated);
4036 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
4037 return kvm_x2apic_msr_write(vcpu, msr, data);
4038 case MSR_IA32_TSC_DEADLINE:
4039 kvm_set_lapic_tscdeadline_msr(vcpu, data);
4040 break;
4041 case MSR_IA32_TSC_ADJUST:
4042 if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
4043 if (!msr_info->host_initiated) {
4044 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
4045 adjust_tsc_offset_guest(vcpu, adj);
4046 /* Before back to guest, tsc_timestamp must be adjusted
4047 * as well, otherwise guest's percpu pvclock time could jump.
4048 */
4049 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4050 }
4051 vcpu->arch.ia32_tsc_adjust_msr = data;
4052 }
4053 break;
4054 case MSR_IA32_MISC_ENABLE: {
4055 u64 old_val = vcpu->arch.ia32_misc_enable_msr;
4056
4057 if (!msr_info->host_initiated) {
4058 /* RO bits */
4059 if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK)
4060 return 1;
4061
4062 /* R bits, i.e. writes are ignored, but don't fault. */
4063 data = data & ~MSR_IA32_MISC_ENABLE_EMON;
4064 data |= old_val & MSR_IA32_MISC_ENABLE_EMON;
4065 }
4066
4067 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
4068 ((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
4069 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_XMM3))
4070 return 1;
4071 vcpu->arch.ia32_misc_enable_msr = data;
4072 vcpu->arch.cpuid_dynamic_bits_dirty = true;
4073 } else {
4074 vcpu->arch.ia32_misc_enable_msr = data;
4075 }
4076 break;
4077 }
4078 case MSR_IA32_SMBASE:
4079 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
4080 return 1;
4081 vcpu->arch.smbase = data;
4082 break;
4083 case MSR_IA32_POWER_CTL:
4084 vcpu->arch.msr_ia32_power_ctl = data;
4085 break;
4086 case MSR_IA32_TSC:
4087 if (msr_info->host_initiated) {
4088 kvm_synchronize_tsc(vcpu, &data);
4089 } else if (!vcpu->arch.guest_tsc_protected) {
4090 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
4091 adjust_tsc_offset_guest(vcpu, adj);
4092 vcpu->arch.ia32_tsc_adjust_msr += adj;
4093 }
4094 break;
4095 case MSR_IA32_XSS:
4096 if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
4097 return KVM_MSR_RET_UNSUPPORTED;
4098
4099 if (data & ~vcpu->arch.guest_supported_xss)
4100 return 1;
4101 if (vcpu->arch.ia32_xss == data)
4102 break;
4103 vcpu->arch.ia32_xss = data;
4104 vcpu->arch.cpuid_dynamic_bits_dirty = true;
4105 break;
4106 case MSR_SMI_COUNT:
4107 if (!msr_info->host_initiated)
4108 return 1;
4109 vcpu->arch.smi_count = data;
4110 break;
4111 case MSR_KVM_WALL_CLOCK_NEW:
4112 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4113 return KVM_MSR_RET_UNSUPPORTED;
4114
4115 vcpu->kvm->arch.wall_clock = data;
4116 kvm_write_wall_clock(vcpu->kvm, data, 0);
4117 break;
4118 case MSR_KVM_WALL_CLOCK:
4119 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4120 return KVM_MSR_RET_UNSUPPORTED;
4121
4122 vcpu->kvm->arch.wall_clock = data;
4123 kvm_write_wall_clock(vcpu->kvm, data, 0);
4124 break;
4125 case MSR_KVM_SYSTEM_TIME_NEW:
4126 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4127 return KVM_MSR_RET_UNSUPPORTED;
4128
4129 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated);
4130 break;
4131 case MSR_KVM_SYSTEM_TIME:
4132 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4133 return KVM_MSR_RET_UNSUPPORTED;
4134
4135 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated);
4136 break;
4137 case MSR_KVM_ASYNC_PF_EN:
4138 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
4139 return KVM_MSR_RET_UNSUPPORTED;
4140
4141 if (kvm_pv_enable_async_pf(vcpu, data))
4142 return 1;
4143 break;
4144 case MSR_KVM_ASYNC_PF_INT:
4145 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4146 return KVM_MSR_RET_UNSUPPORTED;
4147
4148 if (kvm_pv_enable_async_pf_int(vcpu, data))
4149 return 1;
4150 break;
4151 case MSR_KVM_ASYNC_PF_ACK:
4152 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4153 return KVM_MSR_RET_UNSUPPORTED;
4154 if (data & 0x1) {
4155 /*
4156 * Pairs with the smp_mb__after_atomic() in
4157 * kvm_arch_async_page_present_queued().
4158 */
4159 smp_store_mb(vcpu->arch.apf.pageready_pending, false);
4160
4161 kvm_check_async_pf_completion(vcpu);
4162 }
4163 break;
4164 case MSR_KVM_STEAL_TIME:
4165 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
4166 return KVM_MSR_RET_UNSUPPORTED;
4167
4168 if (unlikely(!sched_info_on()))
4169 return 1;
4170
4171 if (data & KVM_STEAL_RESERVED_MASK)
4172 return 1;
4173
4174 vcpu->arch.st.msr_val = data;
4175
4176 if (!(data & KVM_MSR_ENABLED))
4177 break;
4178
4179 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
4180
4181 break;
4182 case MSR_KVM_PV_EOI_EN:
4183 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
4184 return KVM_MSR_RET_UNSUPPORTED;
4185
4186 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8)))
4187 return 1;
4188 break;
4189
4190 case MSR_KVM_POLL_CONTROL:
4191 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
4192 return KVM_MSR_RET_UNSUPPORTED;
4193
4194 /* only enable bit supported */
4195 if (data & (-1ULL << 1))
4196 return 1;
4197
4198 vcpu->arch.msr_kvm_poll_control = data;
4199 break;
4200
4201 case MSR_IA32_MCG_CTL:
4202 case MSR_IA32_MCG_STATUS:
4203 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4204 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4205 return set_msr_mce(vcpu, msr_info);
4206
4207 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
4208 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
4209 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
4210 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
4211 if (kvm_pmu_is_valid_msr(vcpu, msr))
4212 return kvm_pmu_set_msr(vcpu, msr_info);
4213
4214 if (data)
4215 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
4216 break;
4217 case MSR_K7_CLK_CTL:
4218 /*
4219 * Ignore all writes to this no longer documented MSR.
4220 * Writes are only relevant for old K7 processors,
4221 * all pre-dating SVM, but a recommended workaround from
4222 * AMD for these chips. It is possible to specify the
4223 * affected processor models on the command line, hence
4224 * the need to ignore the workaround.
4225 */
4226 break;
4227 #ifdef CONFIG_KVM_HYPERV
4228 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
4229 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
4230 case HV_X64_MSR_SYNDBG_OPTIONS:
4231 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4232 case HV_X64_MSR_CRASH_CTL:
4233 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
4234 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4235 case HV_X64_MSR_TSC_EMULATION_CONTROL:
4236 case HV_X64_MSR_TSC_EMULATION_STATUS:
4237 case HV_X64_MSR_TSC_INVARIANT_CONTROL:
4238 return kvm_hv_set_msr_common(vcpu, msr, data,
4239 msr_info->host_initiated);
4240 #endif
4241 case MSR_IA32_BBL_CR_CTL3:
4242 /* Drop writes to this legacy MSR -- see rdmsr
4243 * counterpart for further detail.
4244 */
4245 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
4246 break;
4247 case MSR_AMD64_OSVW_ID_LENGTH:
4248 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4249 return 1;
4250 vcpu->arch.osvw.length = data;
4251 break;
4252 case MSR_AMD64_OSVW_STATUS:
4253 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4254 return 1;
4255 vcpu->arch.osvw.status = data;
4256 break;
4257 case MSR_PLATFORM_INFO:
4258 if (!msr_info->host_initiated)
4259 return 1;
4260 vcpu->arch.msr_platform_info = data;
4261 break;
4262 case MSR_MISC_FEATURES_ENABLES:
4263 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
4264 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
4265 !supports_cpuid_fault(vcpu)))
4266 return 1;
4267 vcpu->arch.msr_misc_features_enables = data;
4268 break;
4269 #ifdef CONFIG_X86_64
4270 case MSR_IA32_XFD:
4271 if (!msr_info->host_initiated &&
4272 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4273 return 1;
4274
4275 if (data & ~kvm_guest_supported_xfd(vcpu))
4276 return 1;
4277
4278 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data);
4279 break;
4280 case MSR_IA32_XFD_ERR:
4281 if (!msr_info->host_initiated &&
4282 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4283 return 1;
4284
4285 if (data & ~kvm_guest_supported_xfd(vcpu))
4286 return 1;
4287
4288 vcpu->arch.guest_fpu.xfd_err = data;
4289 break;
4290 #endif
4291 case MSR_IA32_U_CET:
4292 case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
4293 kvm_set_xstate_msr(vcpu, msr_info);
4294 break;
4295 default:
4296 if (kvm_pmu_is_valid_msr(vcpu, msr))
4297 return kvm_pmu_set_msr(vcpu, msr_info);
4298
4299 return KVM_MSR_RET_UNSUPPORTED;
4300 }
4301 return 0;
4302 }
4303 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_msr_common);
4304
get_msr_mce(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)4305 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
4306 {
4307 u64 data;
4308 u64 mcg_cap = vcpu->arch.mcg_cap;
4309 unsigned bank_num = mcg_cap & 0xff;
4310 u32 offset, last_msr;
4311
4312 switch (msr) {
4313 case MSR_IA32_P5_MC_ADDR:
4314 case MSR_IA32_P5_MC_TYPE:
4315 data = 0;
4316 break;
4317 case MSR_IA32_MCG_CAP:
4318 data = vcpu->arch.mcg_cap;
4319 break;
4320 case MSR_IA32_MCG_CTL:
4321 if (!(mcg_cap & MCG_CTL_P) && !host)
4322 return 1;
4323 data = vcpu->arch.mcg_ctl;
4324 break;
4325 case MSR_IA32_MCG_STATUS:
4326 data = vcpu->arch.mcg_status;
4327 break;
4328 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4329 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1;
4330 if (msr > last_msr)
4331 return 1;
4332
4333 if (!(mcg_cap & MCG_CMCI_P) && !host)
4334 return 1;
4335 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2,
4336 last_msr + 1 - MSR_IA32_MC0_CTL2);
4337 data = vcpu->arch.mci_ctl2_banks[offset];
4338 break;
4339 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4340 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1;
4341 if (msr > last_msr)
4342 return 1;
4343
4344 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL,
4345 last_msr + 1 - MSR_IA32_MC0_CTL);
4346 data = vcpu->arch.mce_banks[offset];
4347 break;
4348 default:
4349 return 1;
4350 }
4351 *pdata = data;
4352 return 0;
4353 }
4354
kvm_get_msr_common(struct kvm_vcpu * vcpu,struct msr_data * msr_info)4355 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4356 {
4357 switch (msr_info->index) {
4358 case MSR_IA32_PLATFORM_ID:
4359 case MSR_IA32_EBL_CR_POWERON:
4360 case MSR_IA32_LASTBRANCHFROMIP:
4361 case MSR_IA32_LASTBRANCHTOIP:
4362 case MSR_IA32_LASTINTFROMIP:
4363 case MSR_IA32_LASTINTTOIP:
4364 case MSR_AMD64_SYSCFG:
4365 case MSR_K8_TSEG_ADDR:
4366 case MSR_K8_TSEG_MASK:
4367 case MSR_VM_HSAVE_PA:
4368 case MSR_K8_INT_PENDING_MSG:
4369 case MSR_AMD64_NB_CFG:
4370 case MSR_FAM10H_MMIO_CONF_BASE:
4371 case MSR_AMD64_BU_CFG2:
4372 case MSR_IA32_PERF_CTL:
4373 case MSR_AMD64_DC_CFG:
4374 case MSR_AMD64_TW_CFG:
4375 case MSR_F15H_EX_CFG:
4376 /*
4377 * Intel Sandy Bridge CPUs must support the RAPL (running average power
4378 * limit) MSRs. Just return 0, as we do not want to expose the host
4379 * data here. Do not conditionalize this on CPUID, as KVM does not do
4380 * so for existing CPU-specific MSRs.
4381 */
4382 case MSR_RAPL_POWER_UNIT:
4383 case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */
4384 case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */
4385 case MSR_PKG_ENERGY_STATUS: /* Total package */
4386 case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */
4387 msr_info->data = 0;
4388 break;
4389 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
4390 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
4391 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
4392 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
4393 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
4394 return kvm_pmu_get_msr(vcpu, msr_info);
4395 msr_info->data = 0;
4396 break;
4397 case MSR_IA32_UCODE_REV:
4398 msr_info->data = vcpu->arch.microcode_version;
4399 break;
4400 case MSR_IA32_ARCH_CAPABILITIES:
4401 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
4402 return KVM_MSR_RET_UNSUPPORTED;
4403 msr_info->data = vcpu->arch.arch_capabilities;
4404 break;
4405 case MSR_IA32_PERF_CAPABILITIES:
4406 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
4407 return KVM_MSR_RET_UNSUPPORTED;
4408 msr_info->data = vcpu->arch.perf_capabilities;
4409 break;
4410 case MSR_IA32_POWER_CTL:
4411 msr_info->data = vcpu->arch.msr_ia32_power_ctl;
4412 break;
4413 case MSR_IA32_TSC: {
4414 /*
4415 * Intel SDM states that MSR_IA32_TSC read adds the TSC offset
4416 * even when not intercepted. AMD manual doesn't explicitly
4417 * state this but appears to behave the same.
4418 *
4419 * On userspace reads and writes, however, we unconditionally
4420 * return L1's TSC value to ensure backwards-compatible
4421 * behavior for migration.
4422 */
4423 u64 offset, ratio;
4424
4425 if (msr_info->host_initiated) {
4426 offset = vcpu->arch.l1_tsc_offset;
4427 ratio = vcpu->arch.l1_tsc_scaling_ratio;
4428 } else {
4429 offset = vcpu->arch.tsc_offset;
4430 ratio = vcpu->arch.tsc_scaling_ratio;
4431 }
4432
4433 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
4434 break;
4435 }
4436 case MSR_IA32_CR_PAT:
4437 msr_info->data = vcpu->arch.pat;
4438 break;
4439 case MSR_MTRRcap:
4440 case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
4441 case MSR_MTRRdefType:
4442 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
4443 case 0xcd: /* fsb frequency */
4444 msr_info->data = 3;
4445 break;
4446 /*
4447 * MSR_EBC_FREQUENCY_ID
4448 * Conservative value valid for even the basic CPU models.
4449 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
4450 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
4451 * and 266MHz for model 3, or 4. Set Core Clock
4452 * Frequency to System Bus Frequency Ratio to 1 (bits
4453 * 31:24) even though these are only valid for CPU
4454 * models > 2, however guests may end up dividing or
4455 * multiplying by zero otherwise.
4456 */
4457 case MSR_EBC_FREQUENCY_ID:
4458 msr_info->data = 1 << 24;
4459 break;
4460 case MSR_IA32_APICBASE:
4461 msr_info->data = vcpu->arch.apic_base;
4462 break;
4463 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
4464 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
4465 case MSR_IA32_TSC_DEADLINE:
4466 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
4467 break;
4468 case MSR_IA32_TSC_ADJUST:
4469 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
4470 break;
4471 case MSR_IA32_MISC_ENABLE:
4472 msr_info->data = vcpu->arch.ia32_misc_enable_msr;
4473 break;
4474 case MSR_IA32_SMBASE:
4475 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
4476 return 1;
4477 msr_info->data = vcpu->arch.smbase;
4478 break;
4479 case MSR_SMI_COUNT:
4480 msr_info->data = vcpu->arch.smi_count;
4481 break;
4482 case MSR_IA32_PERF_STATUS:
4483 /* TSC increment by tick */
4484 msr_info->data = 1000ULL;
4485 /* CPU multiplier */
4486 msr_info->data |= (((uint64_t)4ULL) << 40);
4487 break;
4488 case MSR_EFER:
4489 msr_info->data = vcpu->arch.efer;
4490 break;
4491 case MSR_KVM_WALL_CLOCK:
4492 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4493 return KVM_MSR_RET_UNSUPPORTED;
4494
4495 msr_info->data = vcpu->kvm->arch.wall_clock;
4496 break;
4497 case MSR_KVM_WALL_CLOCK_NEW:
4498 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4499 return KVM_MSR_RET_UNSUPPORTED;
4500
4501 msr_info->data = vcpu->kvm->arch.wall_clock;
4502 break;
4503 case MSR_KVM_SYSTEM_TIME:
4504 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4505 return KVM_MSR_RET_UNSUPPORTED;
4506
4507 msr_info->data = vcpu->arch.time;
4508 break;
4509 case MSR_KVM_SYSTEM_TIME_NEW:
4510 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4511 return KVM_MSR_RET_UNSUPPORTED;
4512
4513 msr_info->data = vcpu->arch.time;
4514 break;
4515 case MSR_KVM_ASYNC_PF_EN:
4516 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
4517 return KVM_MSR_RET_UNSUPPORTED;
4518
4519 msr_info->data = vcpu->arch.apf.msr_en_val;
4520 break;
4521 case MSR_KVM_ASYNC_PF_INT:
4522 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4523 return KVM_MSR_RET_UNSUPPORTED;
4524
4525 msr_info->data = vcpu->arch.apf.msr_int_val;
4526 break;
4527 case MSR_KVM_ASYNC_PF_ACK:
4528 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4529 return KVM_MSR_RET_UNSUPPORTED;
4530
4531 msr_info->data = 0;
4532 break;
4533 case MSR_KVM_STEAL_TIME:
4534 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
4535 return KVM_MSR_RET_UNSUPPORTED;
4536
4537 msr_info->data = vcpu->arch.st.msr_val;
4538 break;
4539 case MSR_KVM_PV_EOI_EN:
4540 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
4541 return KVM_MSR_RET_UNSUPPORTED;
4542
4543 msr_info->data = vcpu->arch.pv_eoi.msr_val;
4544 break;
4545 case MSR_KVM_POLL_CONTROL:
4546 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
4547 return KVM_MSR_RET_UNSUPPORTED;
4548
4549 msr_info->data = vcpu->arch.msr_kvm_poll_control;
4550 break;
4551 case MSR_IA32_P5_MC_ADDR:
4552 case MSR_IA32_P5_MC_TYPE:
4553 case MSR_IA32_MCG_CAP:
4554 case MSR_IA32_MCG_CTL:
4555 case MSR_IA32_MCG_STATUS:
4556 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4557 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4558 return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
4559 msr_info->host_initiated);
4560 case MSR_IA32_XSS:
4561 if (!msr_info->host_initiated &&
4562 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
4563 return 1;
4564 msr_info->data = vcpu->arch.ia32_xss;
4565 break;
4566 case MSR_K7_CLK_CTL:
4567 /*
4568 * Provide expected ramp-up count for K7. All other
4569 * are set to zero, indicating minimum divisors for
4570 * every field.
4571 *
4572 * This prevents guest kernels on AMD host with CPU
4573 * type 6, model 8 and higher from exploding due to
4574 * the rdmsr failing.
4575 */
4576 msr_info->data = 0x20000000;
4577 break;
4578 #ifdef CONFIG_KVM_HYPERV
4579 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
4580 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
4581 case HV_X64_MSR_SYNDBG_OPTIONS:
4582 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4583 case HV_X64_MSR_CRASH_CTL:
4584 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
4585 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4586 case HV_X64_MSR_TSC_EMULATION_CONTROL:
4587 case HV_X64_MSR_TSC_EMULATION_STATUS:
4588 case HV_X64_MSR_TSC_INVARIANT_CONTROL:
4589 return kvm_hv_get_msr_common(vcpu,
4590 msr_info->index, &msr_info->data,
4591 msr_info->host_initiated);
4592 #endif
4593 case MSR_IA32_BBL_CR_CTL3:
4594 /* This legacy MSR exists but isn't fully documented in current
4595 * silicon. It is however accessed by winxp in very narrow
4596 * scenarios where it sets bit #19, itself documented as
4597 * a "reserved" bit. Best effort attempt to source coherent
4598 * read data here should the balance of the register be
4599 * interpreted by the guest:
4600 *
4601 * L2 cache control register 3: 64GB range, 256KB size,
4602 * enabled, latency 0x1, configured
4603 */
4604 msr_info->data = 0xbe702111;
4605 break;
4606 case MSR_AMD64_OSVW_ID_LENGTH:
4607 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4608 return 1;
4609 msr_info->data = vcpu->arch.osvw.length;
4610 break;
4611 case MSR_AMD64_OSVW_STATUS:
4612 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4613 return 1;
4614 msr_info->data = vcpu->arch.osvw.status;
4615 break;
4616 case MSR_PLATFORM_INFO:
4617 if (!msr_info->host_initiated &&
4618 !vcpu->kvm->arch.guest_can_read_msr_platform_info)
4619 return 1;
4620 msr_info->data = vcpu->arch.msr_platform_info;
4621 break;
4622 case MSR_MISC_FEATURES_ENABLES:
4623 msr_info->data = vcpu->arch.msr_misc_features_enables;
4624 break;
4625 case MSR_K7_HWCR:
4626 msr_info->data = vcpu->arch.msr_hwcr;
4627 break;
4628 #ifdef CONFIG_X86_64
4629 case MSR_IA32_XFD:
4630 if (!msr_info->host_initiated &&
4631 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4632 return 1;
4633
4634 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd;
4635 break;
4636 case MSR_IA32_XFD_ERR:
4637 if (!msr_info->host_initiated &&
4638 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4639 return 1;
4640
4641 msr_info->data = vcpu->arch.guest_fpu.xfd_err;
4642 break;
4643 #endif
4644 case MSR_IA32_U_CET:
4645 case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
4646 kvm_get_xstate_msr(vcpu, msr_info);
4647 break;
4648 default:
4649 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
4650 return kvm_pmu_get_msr(vcpu, msr_info);
4651
4652 return KVM_MSR_RET_UNSUPPORTED;
4653 }
4654 return 0;
4655 }
4656 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_msr_common);
4657
4658 /*
4659 * Read or write a bunch of msrs. All parameters are kernel addresses.
4660 *
4661 * @return number of msrs set successfully.
4662 */
__msr_io(struct kvm_vcpu * vcpu,struct kvm_msrs * msrs,struct kvm_msr_entry * entries,int (* do_msr)(struct kvm_vcpu * vcpu,unsigned index,u64 * data))4663 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
4664 struct kvm_msr_entry *entries,
4665 int (*do_msr)(struct kvm_vcpu *vcpu,
4666 unsigned index, u64 *data))
4667 {
4668 bool fpu_loaded = false;
4669 int i;
4670
4671 for (i = 0; i < msrs->nmsrs; ++i) {
4672 /*
4673 * If userspace is accessing one or more XSTATE-managed MSRs,
4674 * temporarily load the guest's FPU state so that the guest's
4675 * MSR value(s) is resident in hardware and thus can be accessed
4676 * via RDMSR/WRMSR.
4677 */
4678 if (!fpu_loaded && is_xstate_managed_msr(vcpu, entries[i].index)) {
4679 kvm_load_guest_fpu(vcpu);
4680 fpu_loaded = true;
4681 }
4682 if (do_msr(vcpu, entries[i].index, &entries[i].data))
4683 break;
4684 }
4685 if (fpu_loaded)
4686 kvm_put_guest_fpu(vcpu);
4687
4688 return i;
4689 }
4690
4691 /*
4692 * Read or write a bunch of msrs. Parameters are user addresses.
4693 *
4694 * @return number of msrs set successfully.
4695 */
msr_io(struct kvm_vcpu * vcpu,struct kvm_msrs __user * user_msrs,int (* do_msr)(struct kvm_vcpu * vcpu,unsigned index,u64 * data),int writeback)4696 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
4697 int (*do_msr)(struct kvm_vcpu *vcpu,
4698 unsigned index, u64 *data),
4699 int writeback)
4700 {
4701 struct kvm_msrs msrs;
4702 struct kvm_msr_entry *entries;
4703 unsigned size;
4704 int r;
4705
4706 r = -EFAULT;
4707 if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))
4708 goto out;
4709
4710 r = -E2BIG;
4711 if (msrs.nmsrs >= MAX_IO_MSRS)
4712 goto out;
4713
4714 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
4715 entries = memdup_user(user_msrs->entries, size);
4716 if (IS_ERR(entries)) {
4717 r = PTR_ERR(entries);
4718 goto out;
4719 }
4720
4721 r = __msr_io(vcpu, &msrs, entries, do_msr);
4722
4723 if (writeback && copy_to_user(user_msrs->entries, entries, size))
4724 r = -EFAULT;
4725
4726 kfree(entries);
4727 out:
4728 return r;
4729 }
4730
kvm_can_mwait_in_guest(void)4731 static inline bool kvm_can_mwait_in_guest(void)
4732 {
4733 return boot_cpu_has(X86_FEATURE_MWAIT) &&
4734 !boot_cpu_has_bug(X86_BUG_MONITOR) &&
4735 boot_cpu_has(X86_FEATURE_ARAT);
4736 }
4737
kvm_get_allowed_disable_exits(void)4738 static u64 kvm_get_allowed_disable_exits(void)
4739 {
4740 u64 r = KVM_X86_DISABLE_EXITS_PAUSE;
4741
4742 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
4743 r |= KVM_X86_DISABLE_EXITS_APERFMPERF;
4744
4745 if (!mitigate_smt_rsb) {
4746 r |= KVM_X86_DISABLE_EXITS_HLT |
4747 KVM_X86_DISABLE_EXITS_CSTATE;
4748
4749 if (kvm_can_mwait_in_guest())
4750 r |= KVM_X86_DISABLE_EXITS_MWAIT;
4751 }
4752 return r;
4753 }
4754
4755 #ifdef CONFIG_KVM_HYPERV
kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu * vcpu,struct kvm_cpuid2 __user * cpuid_arg)4756 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
4757 struct kvm_cpuid2 __user *cpuid_arg)
4758 {
4759 struct kvm_cpuid2 cpuid;
4760 int r;
4761
4762 r = -EFAULT;
4763 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4764 return r;
4765
4766 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries);
4767 if (r)
4768 return r;
4769
4770 r = -EFAULT;
4771 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
4772 return r;
4773
4774 return 0;
4775 }
4776 #endif
4777
kvm_is_vm_type_supported(unsigned long type)4778 static bool kvm_is_vm_type_supported(unsigned long type)
4779 {
4780 return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
4781 }
4782
kvm_sync_valid_fields(struct kvm * kvm)4783 static inline u64 kvm_sync_valid_fields(struct kvm *kvm)
4784 {
4785 return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
4786 }
4787
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)4788 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
4789 {
4790 int r = 0;
4791
4792 switch (ext) {
4793 case KVM_CAP_IRQCHIP:
4794 case KVM_CAP_HLT:
4795 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
4796 case KVM_CAP_SET_TSS_ADDR:
4797 case KVM_CAP_EXT_CPUID:
4798 case KVM_CAP_EXT_EMUL_CPUID:
4799 case KVM_CAP_CLOCKSOURCE:
4800 #ifdef CONFIG_KVM_IOAPIC
4801 case KVM_CAP_PIT:
4802 case KVM_CAP_PIT2:
4803 case KVM_CAP_PIT_STATE2:
4804 case KVM_CAP_REINJECT_CONTROL:
4805 #endif
4806 case KVM_CAP_NOP_IO_DELAY:
4807 case KVM_CAP_MP_STATE:
4808 case KVM_CAP_USER_NMI:
4809 case KVM_CAP_IRQ_INJECT_STATUS:
4810 case KVM_CAP_IOEVENTFD:
4811 case KVM_CAP_IOEVENTFD_NO_LENGTH:
4812
4813 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
4814 case KVM_CAP_VCPU_EVENTS:
4815 #ifdef CONFIG_KVM_HYPERV
4816 case KVM_CAP_HYPERV:
4817 case KVM_CAP_HYPERV_VAPIC:
4818 case KVM_CAP_HYPERV_SPIN:
4819 case KVM_CAP_HYPERV_TIME:
4820 case KVM_CAP_HYPERV_SYNIC:
4821 case KVM_CAP_HYPERV_SYNIC2:
4822 case KVM_CAP_HYPERV_VP_INDEX:
4823 case KVM_CAP_HYPERV_EVENTFD:
4824 case KVM_CAP_HYPERV_TLBFLUSH:
4825 case KVM_CAP_HYPERV_SEND_IPI:
4826 case KVM_CAP_HYPERV_CPUID:
4827 case KVM_CAP_HYPERV_ENFORCE_CPUID:
4828 case KVM_CAP_SYS_HYPERV_CPUID:
4829 #endif
4830 case KVM_CAP_PCI_SEGMENT:
4831 case KVM_CAP_DEBUGREGS:
4832 case KVM_CAP_X86_ROBUST_SINGLESTEP:
4833 case KVM_CAP_XSAVE:
4834 case KVM_CAP_ASYNC_PF:
4835 case KVM_CAP_ASYNC_PF_INT:
4836 case KVM_CAP_GET_TSC_KHZ:
4837 case KVM_CAP_KVMCLOCK_CTRL:
4838 case KVM_CAP_IOAPIC_POLARITY_IGNORED:
4839 case KVM_CAP_TSC_DEADLINE_TIMER:
4840 case KVM_CAP_DISABLE_QUIRKS:
4841 case KVM_CAP_SET_BOOT_CPU_ID:
4842 case KVM_CAP_SPLIT_IRQCHIP:
4843 case KVM_CAP_IMMEDIATE_EXIT:
4844 case KVM_CAP_PMU_EVENT_FILTER:
4845 case KVM_CAP_PMU_EVENT_MASKED_EVENTS:
4846 case KVM_CAP_GET_MSR_FEATURES:
4847 case KVM_CAP_MSR_PLATFORM_INFO:
4848 case KVM_CAP_EXCEPTION_PAYLOAD:
4849 case KVM_CAP_X86_TRIPLE_FAULT_EVENT:
4850 case KVM_CAP_SET_GUEST_DEBUG:
4851 case KVM_CAP_LAST_CPU:
4852 case KVM_CAP_X86_USER_SPACE_MSR:
4853 case KVM_CAP_X86_MSR_FILTER:
4854 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
4855 #ifdef CONFIG_X86_SGX_KVM
4856 case KVM_CAP_SGX_ATTRIBUTE:
4857 #endif
4858 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
4859 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
4860 case KVM_CAP_SREGS2:
4861 case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
4862 case KVM_CAP_VCPU_ATTRIBUTES:
4863 case KVM_CAP_SYS_ATTRIBUTES:
4864 case KVM_CAP_VAPIC:
4865 case KVM_CAP_ENABLE_CAP:
4866 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
4867 case KVM_CAP_IRQFD_RESAMPLE:
4868 case KVM_CAP_MEMORY_FAULT_INFO:
4869 case KVM_CAP_X86_GUEST_MODE:
4870 case KVM_CAP_ONE_REG:
4871 r = 1;
4872 break;
4873 case KVM_CAP_PRE_FAULT_MEMORY:
4874 r = tdp_enabled;
4875 break;
4876 case KVM_CAP_X86_APIC_BUS_CYCLES_NS:
4877 r = APIC_BUS_CYCLE_NS_DEFAULT;
4878 break;
4879 case KVM_CAP_EXIT_HYPERCALL:
4880 r = KVM_EXIT_HYPERCALL_VALID_MASK;
4881 break;
4882 case KVM_CAP_SET_GUEST_DEBUG2:
4883 return KVM_GUESTDBG_VALID_MASK;
4884 #ifdef CONFIG_KVM_XEN
4885 case KVM_CAP_XEN_HVM:
4886 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
4887 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
4888 KVM_XEN_HVM_CONFIG_SHARED_INFO |
4889 KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL |
4890 KVM_XEN_HVM_CONFIG_EVTCHN_SEND |
4891 KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE |
4892 KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA;
4893 if (sched_info_on())
4894 r |= KVM_XEN_HVM_CONFIG_RUNSTATE |
4895 KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG;
4896 break;
4897 #endif
4898 case KVM_CAP_SYNC_REGS:
4899 r = kvm_sync_valid_fields(kvm);
4900 break;
4901 case KVM_CAP_ADJUST_CLOCK:
4902 r = KVM_CLOCK_VALID_FLAGS;
4903 break;
4904 case KVM_CAP_X86_DISABLE_EXITS:
4905 r = kvm_get_allowed_disable_exits();
4906 break;
4907 case KVM_CAP_X86_SMM:
4908 if (!IS_ENABLED(CONFIG_KVM_SMM))
4909 break;
4910
4911 /* SMBASE is usually relocated above 1M on modern chipsets,
4912 * and SMM handlers might indeed rely on 4G segment limits,
4913 * so do not report SMM to be available if real mode is
4914 * emulated via vm86 mode. Still, do not go to great lengths
4915 * to avoid userspace's usage of the feature, because it is a
4916 * fringe case that is not enabled except via specific settings
4917 * of the module parameters.
4918 */
4919 r = kvm_x86_call(has_emulated_msr)(kvm, MSR_IA32_SMBASE);
4920 break;
4921 case KVM_CAP_NR_VCPUS:
4922 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
4923 break;
4924 case KVM_CAP_MAX_VCPUS:
4925 r = KVM_MAX_VCPUS;
4926 if (kvm)
4927 r = kvm->max_vcpus;
4928 break;
4929 case KVM_CAP_MAX_VCPU_ID:
4930 r = KVM_MAX_VCPU_IDS;
4931 break;
4932 case KVM_CAP_PV_MMU: /* obsolete */
4933 r = 0;
4934 break;
4935 case KVM_CAP_MCE:
4936 r = KVM_MAX_MCE_BANKS;
4937 break;
4938 case KVM_CAP_XCRS:
4939 r = boot_cpu_has(X86_FEATURE_XSAVE);
4940 break;
4941 case KVM_CAP_TSC_CONTROL:
4942 case KVM_CAP_VM_TSC_CONTROL:
4943 r = kvm_caps.has_tsc_control;
4944 break;
4945 case KVM_CAP_X2APIC_API:
4946 r = KVM_X2APIC_API_VALID_FLAGS;
4947 if (kvm && !irqchip_split(kvm))
4948 r &= ~KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST;
4949 break;
4950 case KVM_CAP_NESTED_STATE:
4951 r = kvm_x86_ops.nested_ops->get_state ?
4952 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
4953 break;
4954 #ifdef CONFIG_KVM_HYPERV
4955 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
4956 r = kvm_x86_ops.enable_l2_tlb_flush != NULL;
4957 break;
4958 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
4959 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
4960 break;
4961 #endif
4962 case KVM_CAP_SMALLER_MAXPHYADDR:
4963 r = (int) allow_smaller_maxphyaddr;
4964 break;
4965 case KVM_CAP_STEAL_TIME:
4966 r = sched_info_on();
4967 break;
4968 case KVM_CAP_X86_BUS_LOCK_EXIT:
4969 if (kvm_caps.has_bus_lock_exit)
4970 r = KVM_BUS_LOCK_DETECTION_OFF |
4971 KVM_BUS_LOCK_DETECTION_EXIT;
4972 else
4973 r = 0;
4974 break;
4975 case KVM_CAP_XSAVE2: {
4976 r = xstate_required_size(kvm_get_filtered_xcr0(), false);
4977 if (r < sizeof(struct kvm_xsave))
4978 r = sizeof(struct kvm_xsave);
4979 break;
4980 }
4981 case KVM_CAP_PMU_CAPABILITY:
4982 r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0;
4983 break;
4984 case KVM_CAP_DISABLE_QUIRKS2:
4985 r = kvm_caps.supported_quirks;
4986 break;
4987 case KVM_CAP_X86_NOTIFY_VMEXIT:
4988 r = kvm_caps.has_notify_vmexit;
4989 break;
4990 case KVM_CAP_VM_TYPES:
4991 r = kvm_caps.supported_vm_types;
4992 break;
4993 case KVM_CAP_READONLY_MEM:
4994 r = kvm ? kvm_arch_has_readonly_mem(kvm) : 1;
4995 break;
4996 default:
4997 break;
4998 }
4999 return r;
5000 }
5001
__kvm_x86_dev_get_attr(struct kvm_device_attr * attr,u64 * val)5002 static int __kvm_x86_dev_get_attr(struct kvm_device_attr *attr, u64 *val)
5003 {
5004 if (attr->group) {
5005 if (kvm_x86_ops.dev_get_attr)
5006 return kvm_x86_call(dev_get_attr)(attr->group, attr->attr, val);
5007 return -ENXIO;
5008 }
5009
5010 switch (attr->attr) {
5011 case KVM_X86_XCOMP_GUEST_SUPP:
5012 *val = kvm_caps.supported_xcr0;
5013 return 0;
5014 default:
5015 return -ENXIO;
5016 }
5017 }
5018
kvm_x86_dev_get_attr(struct kvm_device_attr * attr)5019 static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
5020 {
5021 u64 __user *uaddr = u64_to_user_ptr(attr->addr);
5022 int r;
5023 u64 val;
5024
5025 r = __kvm_x86_dev_get_attr(attr, &val);
5026 if (r < 0)
5027 return r;
5028
5029 if (put_user(val, uaddr))
5030 return -EFAULT;
5031
5032 return 0;
5033 }
5034
kvm_x86_dev_has_attr(struct kvm_device_attr * attr)5035 static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr)
5036 {
5037 u64 val;
5038
5039 return __kvm_x86_dev_get_attr(attr, &val);
5040 }
5041
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5042 long kvm_arch_dev_ioctl(struct file *filp,
5043 unsigned int ioctl, unsigned long arg)
5044 {
5045 void __user *argp = (void __user *)arg;
5046 long r;
5047
5048 switch (ioctl) {
5049 case KVM_GET_MSR_INDEX_LIST: {
5050 struct kvm_msr_list __user *user_msr_list = argp;
5051 struct kvm_msr_list msr_list;
5052 unsigned n;
5053
5054 r = -EFAULT;
5055 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
5056 goto out;
5057 n = msr_list.nmsrs;
5058 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
5059 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
5060 goto out;
5061 r = -E2BIG;
5062 if (n < msr_list.nmsrs)
5063 goto out;
5064 r = -EFAULT;
5065 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
5066 num_msrs_to_save * sizeof(u32)))
5067 goto out;
5068 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
5069 &emulated_msrs,
5070 num_emulated_msrs * sizeof(u32)))
5071 goto out;
5072 r = 0;
5073 break;
5074 }
5075 case KVM_GET_SUPPORTED_CPUID:
5076 case KVM_GET_EMULATED_CPUID: {
5077 struct kvm_cpuid2 __user *cpuid_arg = argp;
5078 struct kvm_cpuid2 cpuid;
5079
5080 r = -EFAULT;
5081 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5082 goto out;
5083
5084 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
5085 ioctl);
5086 if (r)
5087 goto out;
5088
5089 r = -EFAULT;
5090 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
5091 goto out;
5092 r = 0;
5093 break;
5094 }
5095 case KVM_X86_GET_MCE_CAP_SUPPORTED:
5096 r = -EFAULT;
5097 if (copy_to_user(argp, &kvm_caps.supported_mce_cap,
5098 sizeof(kvm_caps.supported_mce_cap)))
5099 goto out;
5100 r = 0;
5101 break;
5102 case KVM_GET_MSR_FEATURE_INDEX_LIST: {
5103 struct kvm_msr_list __user *user_msr_list = argp;
5104 struct kvm_msr_list msr_list;
5105 unsigned int n;
5106
5107 r = -EFAULT;
5108 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
5109 goto out;
5110 n = msr_list.nmsrs;
5111 msr_list.nmsrs = num_msr_based_features;
5112 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
5113 goto out;
5114 r = -E2BIG;
5115 if (n < msr_list.nmsrs)
5116 goto out;
5117 r = -EFAULT;
5118 if (copy_to_user(user_msr_list->indices, &msr_based_features,
5119 num_msr_based_features * sizeof(u32)))
5120 goto out;
5121 r = 0;
5122 break;
5123 }
5124 case KVM_GET_MSRS:
5125 r = msr_io(NULL, argp, do_get_feature_msr, 1);
5126 break;
5127 #ifdef CONFIG_KVM_HYPERV
5128 case KVM_GET_SUPPORTED_HV_CPUID:
5129 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp);
5130 break;
5131 #endif
5132 case KVM_GET_DEVICE_ATTR: {
5133 struct kvm_device_attr attr;
5134 r = -EFAULT;
5135 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
5136 break;
5137 r = kvm_x86_dev_get_attr(&attr);
5138 break;
5139 }
5140 case KVM_HAS_DEVICE_ATTR: {
5141 struct kvm_device_attr attr;
5142 r = -EFAULT;
5143 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
5144 break;
5145 r = kvm_x86_dev_has_attr(&attr);
5146 break;
5147 }
5148 default:
5149 r = -EINVAL;
5150 break;
5151 }
5152 out:
5153 return r;
5154 }
5155
need_emulate_wbinvd(struct kvm_vcpu * vcpu)5156 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
5157 {
5158 return kvm_arch_has_noncoherent_dma(vcpu->kvm);
5159 }
5160
5161 static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
5162
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)5163 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
5164 {
5165 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
5166
5167 kvm_request_l1tf_flush_l1d();
5168
5169 if (vcpu->scheduled_out && pmu->version && pmu->event_count) {
5170 pmu->need_cleanup = true;
5171 kvm_make_request(KVM_REQ_PMU, vcpu);
5172 }
5173
5174 /* Address WBINVD may be executed by guest */
5175 if (need_emulate_wbinvd(vcpu)) {
5176 if (kvm_x86_call(has_wbinvd_exit)())
5177 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
5178 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
5179 wbinvd_on_cpu(vcpu->cpu);
5180 }
5181
5182 kvm_x86_call(vcpu_load)(vcpu, cpu);
5183
5184 if (vcpu != per_cpu(last_vcpu, cpu)) {
5185 /*
5186 * Flush the branch predictor when switching vCPUs on the same
5187 * physical CPU, as each vCPU needs its own branch prediction
5188 * domain. No IBPB is needed when switching between L1 and L2
5189 * on the same vCPU unless IBRS is advertised to the vCPU; that
5190 * is handled on the nested VM-Exit path.
5191 */
5192 if (static_branch_likely(&switch_vcpu_ibpb))
5193 indirect_branch_prediction_barrier();
5194 per_cpu(last_vcpu, cpu) = vcpu;
5195 }
5196
5197 /* Save host pkru register if supported */
5198 vcpu->arch.host_pkru = read_pkru();
5199
5200 /* Apply any externally detected TSC adjustments (due to suspend) */
5201 if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
5202 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
5203 vcpu->arch.tsc_offset_adjustment = 0;
5204 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5205 }
5206
5207 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
5208 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
5209 rdtsc() - vcpu->arch.last_host_tsc;
5210 if (tsc_delta < 0)
5211 mark_tsc_unstable("KVM discovered backwards TSC");
5212
5213 if (kvm_check_tsc_unstable()) {
5214 u64 offset = kvm_compute_l1_tsc_offset(vcpu,
5215 vcpu->arch.last_guest_tsc);
5216 kvm_vcpu_write_tsc_offset(vcpu, offset);
5217 if (!vcpu->arch.guest_tsc_protected)
5218 vcpu->arch.tsc_catchup = 1;
5219 }
5220
5221 if (kvm_lapic_hv_timer_in_use(vcpu))
5222 kvm_lapic_restart_hv_timer(vcpu);
5223
5224 /*
5225 * On a host with synchronized TSC, there is no need to update
5226 * kvmclock on vcpu->cpu migration
5227 */
5228 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
5229 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
5230 if (vcpu->cpu != cpu)
5231 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
5232 vcpu->cpu = cpu;
5233 }
5234
5235 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
5236 }
5237
kvm_steal_time_set_preempted(struct kvm_vcpu * vcpu)5238 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
5239 {
5240 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
5241 struct kvm_steal_time __user *st;
5242 struct kvm_memslots *slots;
5243 static const u8 preempted = KVM_VCPU_PREEMPTED;
5244 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
5245
5246 /*
5247 * The vCPU can be marked preempted if and only if the VM-Exit was on
5248 * an instruction boundary and will not trigger guest emulation of any
5249 * kind (see vcpu_run). Vendor specific code controls (conservatively)
5250 * when this is true, for example allowing the vCPU to be marked
5251 * preempted if and only if the VM-Exit was due to a host interrupt.
5252 */
5253 if (!vcpu->arch.at_instruction_boundary) {
5254 vcpu->stat.preemption_other++;
5255 return;
5256 }
5257
5258 vcpu->stat.preemption_reported++;
5259 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
5260 return;
5261
5262 if (vcpu->arch.st.preempted)
5263 return;
5264
5265 /* This happens on process exit */
5266 if (unlikely(current->mm != vcpu->kvm->mm))
5267 return;
5268
5269 slots = kvm_memslots(vcpu->kvm);
5270
5271 if (unlikely(slots->generation != ghc->generation ||
5272 gpa != ghc->gpa ||
5273 kvm_is_error_hva(ghc->hva) || !ghc->memslot))
5274 return;
5275
5276 st = (struct kvm_steal_time __user *)ghc->hva;
5277 BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted));
5278
5279 if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted)))
5280 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
5281
5282 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
5283 }
5284
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)5285 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
5286 {
5287 int idx;
5288
5289 if (vcpu->preempted) {
5290 /*
5291 * Assume protected guests are in-kernel. Inefficient yielding
5292 * due to false positives is preferable to never yielding due
5293 * to false negatives.
5294 */
5295 vcpu->arch.preempted_in_kernel = vcpu->arch.guest_state_protected ||
5296 !kvm_x86_call(get_cpl_no_cache)(vcpu);
5297
5298 /*
5299 * Take the srcu lock as memslots will be accessed to check the gfn
5300 * cache generation against the memslots generation.
5301 */
5302 idx = srcu_read_lock(&vcpu->kvm->srcu);
5303 if (kvm_xen_msr_enabled(vcpu->kvm))
5304 kvm_xen_runstate_set_preempted(vcpu);
5305 else
5306 kvm_steal_time_set_preempted(vcpu);
5307 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5308 }
5309
5310 kvm_x86_call(vcpu_put)(vcpu);
5311 vcpu->arch.last_host_tsc = rdtsc();
5312 }
5313
kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu * vcpu,struct kvm_lapic_state * s)5314 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
5315 struct kvm_lapic_state *s)
5316 {
5317 if (vcpu->arch.apic->guest_apic_protected)
5318 return -EINVAL;
5319
5320 kvm_x86_call(sync_pir_to_irr)(vcpu);
5321
5322 return kvm_apic_get_state(vcpu, s);
5323 }
5324
kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu * vcpu,struct kvm_lapic_state * s)5325 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
5326 struct kvm_lapic_state *s)
5327 {
5328 int r;
5329
5330 if (vcpu->arch.apic->guest_apic_protected)
5331 return -EINVAL;
5332
5333 r = kvm_apic_set_state(vcpu, s);
5334 if (r)
5335 return r;
5336 update_cr8_intercept(vcpu);
5337
5338 return 0;
5339 }
5340
kvm_cpu_accept_dm_intr(struct kvm_vcpu * vcpu)5341 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
5342 {
5343 /*
5344 * We can accept userspace's request for interrupt injection
5345 * as long as we have a place to store the interrupt number.
5346 * The actual injection will happen when the CPU is able to
5347 * deliver the interrupt.
5348 */
5349 if (kvm_cpu_has_extint(vcpu))
5350 return false;
5351
5352 /* Acknowledging ExtINT does not happen if LINT0 is masked. */
5353 return (!lapic_in_kernel(vcpu) ||
5354 kvm_apic_accept_pic_intr(vcpu));
5355 }
5356
kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu * vcpu)5357 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
5358 {
5359 /*
5360 * Do not cause an interrupt window exit if an exception
5361 * is pending or an event needs reinjection; userspace
5362 * might want to inject the interrupt manually using KVM_SET_REGS
5363 * or KVM_SET_SREGS. For that to work, we must be at an
5364 * instruction boundary and with no events half-injected.
5365 */
5366 return (kvm_arch_interrupt_allowed(vcpu) &&
5367 kvm_cpu_accept_dm_intr(vcpu) &&
5368 !kvm_event_needs_reinjection(vcpu) &&
5369 !kvm_is_exception_pending(vcpu));
5370 }
5371
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)5372 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
5373 struct kvm_interrupt *irq)
5374 {
5375 if (irq->irq >= KVM_NR_INTERRUPTS)
5376 return -EINVAL;
5377
5378 if (!irqchip_in_kernel(vcpu->kvm)) {
5379 kvm_queue_interrupt(vcpu, irq->irq, false);
5380 kvm_make_request(KVM_REQ_EVENT, vcpu);
5381 return 0;
5382 }
5383
5384 /*
5385 * With in-kernel LAPIC, we only use this to inject EXTINT, so
5386 * fail for in-kernel 8259.
5387 */
5388 if (pic_in_kernel(vcpu->kvm))
5389 return -ENXIO;
5390
5391 if (vcpu->arch.pending_external_vector != -1)
5392 return -EEXIST;
5393
5394 vcpu->arch.pending_external_vector = irq->irq;
5395 kvm_make_request(KVM_REQ_EVENT, vcpu);
5396 return 0;
5397 }
5398
kvm_vcpu_ioctl_nmi(struct kvm_vcpu * vcpu)5399 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
5400 {
5401 kvm_inject_nmi(vcpu);
5402
5403 return 0;
5404 }
5405
vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu * vcpu,struct kvm_tpr_access_ctl * tac)5406 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
5407 struct kvm_tpr_access_ctl *tac)
5408 {
5409 if (tac->flags)
5410 return -EINVAL;
5411 vcpu->arch.tpr_access_reporting = !!tac->enabled;
5412 return 0;
5413 }
5414
kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu * vcpu,u64 mcg_cap)5415 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
5416 u64 mcg_cap)
5417 {
5418 int r;
5419 unsigned bank_num = mcg_cap & 0xff, bank;
5420
5421 r = -EINVAL;
5422 if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
5423 goto out;
5424 if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000))
5425 goto out;
5426 r = 0;
5427 vcpu->arch.mcg_cap = mcg_cap;
5428 /* Init IA32_MCG_CTL to all 1s */
5429 if (mcg_cap & MCG_CTL_P)
5430 vcpu->arch.mcg_ctl = ~(u64)0;
5431 /* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */
5432 for (bank = 0; bank < bank_num; bank++) {
5433 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
5434 if (mcg_cap & MCG_CMCI_P)
5435 vcpu->arch.mci_ctl2_banks[bank] = 0;
5436 }
5437
5438 kvm_apic_after_set_mcg_cap(vcpu);
5439
5440 kvm_x86_call(setup_mce)(vcpu);
5441 out:
5442 return r;
5443 }
5444
5445 /*
5446 * Validate this is an UCNA (uncorrectable no action) error by checking the
5447 * MCG_STATUS and MCi_STATUS registers:
5448 * - none of the bits for Machine Check Exceptions are set
5449 * - both the VAL (valid) and UC (uncorrectable) bits are set
5450 * MCI_STATUS_PCC - Processor Context Corrupted
5451 * MCI_STATUS_S - Signaled as a Machine Check Exception
5452 * MCI_STATUS_AR - Software recoverable Action Required
5453 */
is_ucna(struct kvm_x86_mce * mce)5454 static bool is_ucna(struct kvm_x86_mce *mce)
5455 {
5456 return !mce->mcg_status &&
5457 !(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) &&
5458 (mce->status & MCI_STATUS_VAL) &&
5459 (mce->status & MCI_STATUS_UC);
5460 }
5461
kvm_vcpu_x86_set_ucna(struct kvm_vcpu * vcpu,struct kvm_x86_mce * mce,u64 * banks)5462 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks)
5463 {
5464 u64 mcg_cap = vcpu->arch.mcg_cap;
5465
5466 banks[1] = mce->status;
5467 banks[2] = mce->addr;
5468 banks[3] = mce->misc;
5469 vcpu->arch.mcg_status = mce->mcg_status;
5470
5471 if (!(mcg_cap & MCG_CMCI_P) ||
5472 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN))
5473 return 0;
5474
5475 if (lapic_in_kernel(vcpu))
5476 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI);
5477
5478 return 0;
5479 }
5480
kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu * vcpu,struct kvm_x86_mce * mce)5481 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
5482 struct kvm_x86_mce *mce)
5483 {
5484 u64 mcg_cap = vcpu->arch.mcg_cap;
5485 unsigned bank_num = mcg_cap & 0xff;
5486 u64 *banks = vcpu->arch.mce_banks;
5487
5488 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
5489 return -EINVAL;
5490
5491 banks += array_index_nospec(4 * mce->bank, 4 * bank_num);
5492
5493 if (is_ucna(mce))
5494 return kvm_vcpu_x86_set_ucna(vcpu, mce, banks);
5495
5496 /*
5497 * if IA32_MCG_CTL is not all 1s, the uncorrected error
5498 * reporting is disabled
5499 */
5500 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
5501 vcpu->arch.mcg_ctl != ~(u64)0)
5502 return 0;
5503 /*
5504 * if IA32_MCi_CTL is not all 1s, the uncorrected error
5505 * reporting is disabled for the bank
5506 */
5507 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
5508 return 0;
5509 if (mce->status & MCI_STATUS_UC) {
5510 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
5511 !kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) {
5512 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5513 return 0;
5514 }
5515 if (banks[1] & MCI_STATUS_VAL)
5516 mce->status |= MCI_STATUS_OVER;
5517 banks[2] = mce->addr;
5518 banks[3] = mce->misc;
5519 vcpu->arch.mcg_status = mce->mcg_status;
5520 banks[1] = mce->status;
5521 kvm_queue_exception(vcpu, MC_VECTOR);
5522 } else if (!(banks[1] & MCI_STATUS_VAL)
5523 || !(banks[1] & MCI_STATUS_UC)) {
5524 if (banks[1] & MCI_STATUS_VAL)
5525 mce->status |= MCI_STATUS_OVER;
5526 banks[2] = mce->addr;
5527 banks[3] = mce->misc;
5528 banks[1] = mce->status;
5529 } else
5530 banks[1] |= MCI_STATUS_OVER;
5531 return 0;
5532 }
5533
kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)5534 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
5535 struct kvm_vcpu_events *events)
5536 {
5537 struct kvm_queued_exception *ex;
5538
5539 process_nmi(vcpu);
5540
5541 #ifdef CONFIG_KVM_SMM
5542 if (kvm_check_request(KVM_REQ_SMI, vcpu))
5543 process_smi(vcpu);
5544 #endif
5545
5546 /*
5547 * KVM's ABI only allows for one exception to be migrated. Luckily,
5548 * the only time there can be two queued exceptions is if there's a
5549 * non-exiting _injected_ exception, and a pending exiting exception.
5550 * In that case, ignore the VM-Exiting exception as it's an extension
5551 * of the injected exception.
5552 */
5553 if (vcpu->arch.exception_vmexit.pending &&
5554 !vcpu->arch.exception.pending &&
5555 !vcpu->arch.exception.injected)
5556 ex = &vcpu->arch.exception_vmexit;
5557 else
5558 ex = &vcpu->arch.exception;
5559
5560 /*
5561 * In guest mode, payload delivery should be deferred if the exception
5562 * will be intercepted by L1, e.g. KVM should not modifying CR2 if L1
5563 * intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability,
5564 * KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not
5565 * propagate the payload and so it cannot be safely deferred. Deliver
5566 * the payload if the capability hasn't been requested.
5567 */
5568 if (!vcpu->kvm->arch.exception_payload_enabled &&
5569 ex->pending && ex->has_payload)
5570 kvm_deliver_exception_payload(vcpu, ex);
5571
5572 memset(events, 0, sizeof(*events));
5573
5574 /*
5575 * The API doesn't provide the instruction length for software
5576 * exceptions, so don't report them. As long as the guest RIP
5577 * isn't advanced, we should expect to encounter the exception
5578 * again.
5579 */
5580 if (!kvm_exception_is_soft(ex->vector)) {
5581 events->exception.injected = ex->injected;
5582 events->exception.pending = ex->pending;
5583 /*
5584 * For ABI compatibility, deliberately conflate
5585 * pending and injected exceptions when
5586 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
5587 */
5588 if (!vcpu->kvm->arch.exception_payload_enabled)
5589 events->exception.injected |= ex->pending;
5590 }
5591 events->exception.nr = ex->vector;
5592 events->exception.has_error_code = ex->has_error_code;
5593 events->exception.error_code = ex->error_code;
5594 events->exception_has_payload = ex->has_payload;
5595 events->exception_payload = ex->payload;
5596
5597 events->interrupt.injected =
5598 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
5599 events->interrupt.nr = vcpu->arch.interrupt.nr;
5600 events->interrupt.shadow = kvm_x86_call(get_interrupt_shadow)(vcpu);
5601
5602 events->nmi.injected = vcpu->arch.nmi_injected;
5603 events->nmi.pending = kvm_get_nr_pending_nmis(vcpu);
5604 events->nmi.masked = kvm_x86_call(get_nmi_mask)(vcpu);
5605
5606 /* events->sipi_vector is never valid when reporting to user space */
5607
5608 #ifdef CONFIG_KVM_SMM
5609 events->smi.smm = is_smm(vcpu);
5610 events->smi.pending = vcpu->arch.smi_pending;
5611 events->smi.smm_inside_nmi =
5612 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
5613 #endif
5614 events->smi.latched_init = kvm_lapic_latched_init(vcpu);
5615
5616 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
5617 | KVM_VCPUEVENT_VALID_SHADOW
5618 | KVM_VCPUEVENT_VALID_SMM);
5619 if (vcpu->kvm->arch.exception_payload_enabled)
5620 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
5621 if (vcpu->kvm->arch.triple_fault_event) {
5622 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5623 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
5624 }
5625 }
5626
kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)5627 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
5628 struct kvm_vcpu_events *events)
5629 {
5630 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
5631 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
5632 | KVM_VCPUEVENT_VALID_SHADOW
5633 | KVM_VCPUEVENT_VALID_SMM
5634 | KVM_VCPUEVENT_VALID_PAYLOAD
5635 | KVM_VCPUEVENT_VALID_TRIPLE_FAULT))
5636 return -EINVAL;
5637
5638 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
5639 if (!vcpu->kvm->arch.exception_payload_enabled)
5640 return -EINVAL;
5641 if (events->exception.pending)
5642 events->exception.injected = 0;
5643 else
5644 events->exception_has_payload = 0;
5645 } else {
5646 events->exception.pending = 0;
5647 events->exception_has_payload = 0;
5648 }
5649
5650 if ((events->exception.injected || events->exception.pending) &&
5651 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
5652 return -EINVAL;
5653
5654 process_nmi(vcpu);
5655
5656 /*
5657 * Flag that userspace is stuffing an exception, the next KVM_RUN will
5658 * morph the exception to a VM-Exit if appropriate. Do this only for
5659 * pending exceptions, already-injected exceptions are not subject to
5660 * intercpetion. Note, userspace that conflates pending and injected
5661 * is hosed, and will incorrectly convert an injected exception into a
5662 * pending exception, which in turn may cause a spurious VM-Exit.
5663 */
5664 vcpu->arch.exception_from_userspace = events->exception.pending;
5665
5666 vcpu->arch.exception_vmexit.pending = false;
5667
5668 vcpu->arch.exception.injected = events->exception.injected;
5669 vcpu->arch.exception.pending = events->exception.pending;
5670 vcpu->arch.exception.vector = events->exception.nr;
5671 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
5672 vcpu->arch.exception.error_code = events->exception.error_code;
5673 vcpu->arch.exception.has_payload = events->exception_has_payload;
5674 vcpu->arch.exception.payload = events->exception_payload;
5675
5676 vcpu->arch.interrupt.injected = events->interrupt.injected;
5677 vcpu->arch.interrupt.nr = events->interrupt.nr;
5678 vcpu->arch.interrupt.soft = events->interrupt.soft;
5679 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
5680 kvm_x86_call(set_interrupt_shadow)(vcpu,
5681 events->interrupt.shadow);
5682
5683 vcpu->arch.nmi_injected = events->nmi.injected;
5684 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
5685 vcpu->arch.nmi_pending = 0;
5686 atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
5687 if (events->nmi.pending)
5688 kvm_make_request(KVM_REQ_NMI, vcpu);
5689 }
5690 kvm_x86_call(set_nmi_mask)(vcpu, events->nmi.masked);
5691
5692 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
5693 lapic_in_kernel(vcpu))
5694 vcpu->arch.apic->sipi_vector = events->sipi_vector;
5695
5696 if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
5697 #ifdef CONFIG_KVM_SMM
5698 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
5699 kvm_leave_nested(vcpu);
5700 kvm_smm_changed(vcpu, events->smi.smm);
5701 }
5702
5703 vcpu->arch.smi_pending = events->smi.pending;
5704
5705 if (events->smi.smm) {
5706 if (events->smi.smm_inside_nmi)
5707 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
5708 else
5709 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
5710 }
5711
5712 #else
5713 if (events->smi.smm || events->smi.pending ||
5714 events->smi.smm_inside_nmi)
5715 return -EINVAL;
5716 #endif
5717
5718 if (lapic_in_kernel(vcpu)) {
5719 if (events->smi.latched_init)
5720 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5721 else
5722 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5723 }
5724 }
5725
5726 if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
5727 if (!vcpu->kvm->arch.triple_fault_event)
5728 return -EINVAL;
5729 if (events->triple_fault.pending)
5730 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5731 else
5732 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5733 }
5734
5735 kvm_make_request(KVM_REQ_EVENT, vcpu);
5736
5737 return 0;
5738 }
5739
kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu * vcpu,struct kvm_debugregs * dbgregs)5740 static int kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
5741 struct kvm_debugregs *dbgregs)
5742 {
5743 unsigned int i;
5744
5745 if (vcpu->kvm->arch.has_protected_state &&
5746 vcpu->arch.guest_state_protected)
5747 return -EINVAL;
5748
5749 memset(dbgregs, 0, sizeof(*dbgregs));
5750
5751 BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.db) != ARRAY_SIZE(dbgregs->db));
5752 for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++)
5753 dbgregs->db[i] = vcpu->arch.db[i];
5754
5755 dbgregs->dr6 = vcpu->arch.dr6;
5756 dbgregs->dr7 = vcpu->arch.dr7;
5757 return 0;
5758 }
5759
kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu * vcpu,struct kvm_debugregs * dbgregs)5760 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
5761 struct kvm_debugregs *dbgregs)
5762 {
5763 unsigned int i;
5764
5765 if (vcpu->kvm->arch.has_protected_state &&
5766 vcpu->arch.guest_state_protected)
5767 return -EINVAL;
5768
5769 if (dbgregs->flags)
5770 return -EINVAL;
5771
5772 if (!kvm_dr6_valid(dbgregs->dr6))
5773 return -EINVAL;
5774 if (!kvm_dr7_valid(dbgregs->dr7))
5775 return -EINVAL;
5776
5777 for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++)
5778 vcpu->arch.db[i] = dbgregs->db[i];
5779
5780 kvm_update_dr0123(vcpu);
5781 vcpu->arch.dr6 = dbgregs->dr6;
5782 vcpu->arch.dr7 = dbgregs->dr7;
5783 kvm_update_dr7(vcpu);
5784
5785 return 0;
5786 }
5787
5788
kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu * vcpu,u8 * state,unsigned int size)5789 static int kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
5790 u8 *state, unsigned int size)
5791 {
5792 /*
5793 * Only copy state for features that are enabled for the guest. The
5794 * state itself isn't problematic, but setting bits in the header for
5795 * features that are supported in *this* host but not exposed to the
5796 * guest can result in KVM_SET_XSAVE failing when live migrating to a
5797 * compatible host without the features that are NOT exposed to the
5798 * guest.
5799 *
5800 * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
5801 * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
5802 * supported by the host.
5803 */
5804 u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
5805 XFEATURE_MASK_FPSSE;
5806
5807 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5808 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
5809
5810 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
5811 supported_xcr0, vcpu->arch.pkru);
5812 return 0;
5813 }
5814
kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu * vcpu,struct kvm_xsave * guest_xsave)5815 static int kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
5816 struct kvm_xsave *guest_xsave)
5817 {
5818 return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
5819 sizeof(guest_xsave->region));
5820 }
5821
kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu * vcpu,struct kvm_xsave * guest_xsave)5822 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
5823 struct kvm_xsave *guest_xsave)
5824 {
5825 union fpregs_state *xstate = (union fpregs_state *)guest_xsave->region;
5826
5827 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5828 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
5829
5830 /*
5831 * For backwards compatibility, do not expect disabled features to be in
5832 * their initial state. XSTATE_BV[i] must still be cleared whenever
5833 * XFD[i]=1, or XRSTOR would cause a #NM.
5834 */
5835 xstate->xsave.header.xfeatures &= ~vcpu->arch.guest_fpu.fpstate->xfd;
5836
5837 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
5838 guest_xsave->region,
5839 kvm_caps.supported_xcr0,
5840 &vcpu->arch.pkru);
5841 }
5842
kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu * vcpu,struct kvm_xcrs * guest_xcrs)5843 static int kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
5844 struct kvm_xcrs *guest_xcrs)
5845 {
5846 if (vcpu->kvm->arch.has_protected_state &&
5847 vcpu->arch.guest_state_protected)
5848 return -EINVAL;
5849
5850 if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
5851 guest_xcrs->nr_xcrs = 0;
5852 return 0;
5853 }
5854
5855 guest_xcrs->nr_xcrs = 1;
5856 guest_xcrs->flags = 0;
5857 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
5858 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
5859 return 0;
5860 }
5861
kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu * vcpu,struct kvm_xcrs * guest_xcrs)5862 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
5863 struct kvm_xcrs *guest_xcrs)
5864 {
5865 int i, r = 0;
5866
5867 if (vcpu->kvm->arch.has_protected_state &&
5868 vcpu->arch.guest_state_protected)
5869 return -EINVAL;
5870
5871 if (!boot_cpu_has(X86_FEATURE_XSAVE))
5872 return -EINVAL;
5873
5874 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
5875 return -EINVAL;
5876
5877 for (i = 0; i < guest_xcrs->nr_xcrs; i++)
5878 /* Only support XCR0 currently */
5879 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
5880 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
5881 guest_xcrs->xcrs[i].value);
5882 break;
5883 }
5884 if (r)
5885 r = -EINVAL;
5886 return r;
5887 }
5888
5889 /*
5890 * kvm_set_guest_paused() indicates to the guest kernel that it has been
5891 * stopped by the hypervisor. This function will be called from the host only.
5892 * EINVAL is returned when the host attempts to set the flag for a guest that
5893 * does not support pv clocks.
5894 */
kvm_set_guest_paused(struct kvm_vcpu * vcpu)5895 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
5896 {
5897 if (!vcpu->arch.pv_time.active)
5898 return -EINVAL;
5899 vcpu->arch.pvclock_set_guest_stopped_request = true;
5900 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5901 return 0;
5902 }
5903
kvm_arch_tsc_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)5904 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu,
5905 struct kvm_device_attr *attr)
5906 {
5907 int r;
5908
5909 switch (attr->attr) {
5910 case KVM_VCPU_TSC_OFFSET:
5911 r = 0;
5912 break;
5913 default:
5914 r = -ENXIO;
5915 }
5916
5917 return r;
5918 }
5919
kvm_arch_tsc_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)5920 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
5921 struct kvm_device_attr *attr)
5922 {
5923 u64 __user *uaddr = u64_to_user_ptr(attr->addr);
5924 int r;
5925
5926 switch (attr->attr) {
5927 case KVM_VCPU_TSC_OFFSET:
5928 r = -EFAULT;
5929 if (put_user(vcpu->arch.l1_tsc_offset, uaddr))
5930 break;
5931 r = 0;
5932 break;
5933 default:
5934 r = -ENXIO;
5935 }
5936
5937 return r;
5938 }
5939
kvm_arch_tsc_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)5940 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
5941 struct kvm_device_attr *attr)
5942 {
5943 u64 __user *uaddr = u64_to_user_ptr(attr->addr);
5944 struct kvm *kvm = vcpu->kvm;
5945 int r;
5946
5947 switch (attr->attr) {
5948 case KVM_VCPU_TSC_OFFSET: {
5949 u64 offset, tsc, ns;
5950 unsigned long flags;
5951 bool matched;
5952
5953 r = -EFAULT;
5954 if (get_user(offset, uaddr))
5955 break;
5956
5957 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
5958
5959 matched = (vcpu->arch.virtual_tsc_khz &&
5960 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz &&
5961 kvm->arch.last_tsc_offset == offset);
5962
5963 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
5964 ns = get_kvmclock_base_ns();
5965
5966 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched, true);
5967 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
5968
5969 r = 0;
5970 break;
5971 }
5972 default:
5973 r = -ENXIO;
5974 }
5975
5976 return r;
5977 }
5978
kvm_vcpu_ioctl_device_attr(struct kvm_vcpu * vcpu,unsigned int ioctl,void __user * argp)5979 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
5980 unsigned int ioctl,
5981 void __user *argp)
5982 {
5983 struct kvm_device_attr attr;
5984 int r;
5985
5986 if (copy_from_user(&attr, argp, sizeof(attr)))
5987 return -EFAULT;
5988
5989 if (attr.group != KVM_VCPU_TSC_CTRL)
5990 return -ENXIO;
5991
5992 switch (ioctl) {
5993 case KVM_HAS_DEVICE_ATTR:
5994 r = kvm_arch_tsc_has_attr(vcpu, &attr);
5995 break;
5996 case KVM_GET_DEVICE_ATTR:
5997 r = kvm_arch_tsc_get_attr(vcpu, &attr);
5998 break;
5999 case KVM_SET_DEVICE_ATTR:
6000 r = kvm_arch_tsc_set_attr(vcpu, &attr);
6001 break;
6002 }
6003
6004 return r;
6005 }
6006
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)6007 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
6008 struct kvm_enable_cap *cap)
6009 {
6010 if (cap->flags)
6011 return -EINVAL;
6012
6013 switch (cap->cap) {
6014 #ifdef CONFIG_KVM_HYPERV
6015 case KVM_CAP_HYPERV_SYNIC2:
6016 if (cap->args[0])
6017 return -EINVAL;
6018 fallthrough;
6019
6020 case KVM_CAP_HYPERV_SYNIC:
6021 if (!irqchip_in_kernel(vcpu->kvm))
6022 return -EINVAL;
6023 return kvm_hv_activate_synic(vcpu, cap->cap ==
6024 KVM_CAP_HYPERV_SYNIC2);
6025 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
6026 {
6027 int r;
6028 uint16_t vmcs_version;
6029 void __user *user_ptr;
6030
6031 if (!kvm_x86_ops.nested_ops->enable_evmcs)
6032 return -ENOTTY;
6033 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
6034 if (!r) {
6035 user_ptr = (void __user *)(uintptr_t)cap->args[0];
6036 if (copy_to_user(user_ptr, &vmcs_version,
6037 sizeof(vmcs_version)))
6038 r = -EFAULT;
6039 }
6040 return r;
6041 }
6042 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
6043 if (!kvm_x86_ops.enable_l2_tlb_flush)
6044 return -ENOTTY;
6045
6046 return kvm_x86_call(enable_l2_tlb_flush)(vcpu);
6047
6048 case KVM_CAP_HYPERV_ENFORCE_CPUID:
6049 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
6050 #endif
6051
6052 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
6053 vcpu->arch.pv_cpuid.enforce = cap->args[0];
6054 return 0;
6055 default:
6056 return -EINVAL;
6057 }
6058 }
6059
6060 struct kvm_x86_reg_id {
6061 __u32 index;
6062 __u8 type;
6063 __u8 rsvd1;
6064 __u8 rsvd2:4;
6065 __u8 size:4;
6066 __u8 x86;
6067 };
6068
kvm_translate_kvm_reg(struct kvm_vcpu * vcpu,struct kvm_x86_reg_id * reg)6069 static int kvm_translate_kvm_reg(struct kvm_vcpu *vcpu,
6070 struct kvm_x86_reg_id *reg)
6071 {
6072 switch (reg->index) {
6073 case KVM_REG_GUEST_SSP:
6074 /*
6075 * FIXME: If host-initiated accesses are ever exempted from
6076 * ignore_msrs (in kvm_do_msr_access()), drop this manual check
6077 * and rely on KVM's standard checks to reject accesses to regs
6078 * that don't exist.
6079 */
6080 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
6081 return -EINVAL;
6082
6083 reg->type = KVM_X86_REG_TYPE_MSR;
6084 reg->index = MSR_KVM_INTERNAL_GUEST_SSP;
6085 break;
6086 default:
6087 return -EINVAL;
6088 }
6089 return 0;
6090 }
6091
kvm_get_one_msr(struct kvm_vcpu * vcpu,u32 msr,u64 __user * user_val)6092 static int kvm_get_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *user_val)
6093 {
6094 u64 val;
6095
6096 if (do_get_msr(vcpu, msr, &val))
6097 return -EINVAL;
6098
6099 if (put_user(val, user_val))
6100 return -EFAULT;
6101
6102 return 0;
6103 }
6104
kvm_set_one_msr(struct kvm_vcpu * vcpu,u32 msr,u64 __user * user_val)6105 static int kvm_set_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *user_val)
6106 {
6107 u64 val;
6108
6109 if (get_user(val, user_val))
6110 return -EFAULT;
6111
6112 if (do_set_msr(vcpu, msr, &val))
6113 return -EINVAL;
6114
6115 return 0;
6116 }
6117
kvm_get_set_one_reg(struct kvm_vcpu * vcpu,unsigned int ioctl,void __user * argp)6118 static int kvm_get_set_one_reg(struct kvm_vcpu *vcpu, unsigned int ioctl,
6119 void __user *argp)
6120 {
6121 struct kvm_one_reg one_reg;
6122 struct kvm_x86_reg_id *reg;
6123 u64 __user *user_val;
6124 bool load_fpu;
6125 int r;
6126
6127 if (copy_from_user(&one_reg, argp, sizeof(one_reg)))
6128 return -EFAULT;
6129
6130 if ((one_reg.id & KVM_REG_ARCH_MASK) != KVM_REG_X86)
6131 return -EINVAL;
6132
6133 reg = (struct kvm_x86_reg_id *)&one_reg.id;
6134 if (reg->rsvd1 || reg->rsvd2)
6135 return -EINVAL;
6136
6137 if (reg->type == KVM_X86_REG_TYPE_KVM) {
6138 r = kvm_translate_kvm_reg(vcpu, reg);
6139 if (r)
6140 return r;
6141 }
6142
6143 if (reg->type != KVM_X86_REG_TYPE_MSR)
6144 return -EINVAL;
6145
6146 if ((one_reg.id & KVM_REG_SIZE_MASK) != KVM_REG_SIZE_U64)
6147 return -EINVAL;
6148
6149 guard(srcu)(&vcpu->kvm->srcu);
6150
6151 load_fpu = is_xstate_managed_msr(vcpu, reg->index);
6152 if (load_fpu)
6153 kvm_load_guest_fpu(vcpu);
6154
6155 user_val = u64_to_user_ptr(one_reg.addr);
6156 if (ioctl == KVM_GET_ONE_REG)
6157 r = kvm_get_one_msr(vcpu, reg->index, user_val);
6158 else
6159 r = kvm_set_one_msr(vcpu, reg->index, user_val);
6160
6161 if (load_fpu)
6162 kvm_put_guest_fpu(vcpu);
6163 return r;
6164 }
6165
kvm_get_reg_list(struct kvm_vcpu * vcpu,struct kvm_reg_list __user * user_list)6166 static int kvm_get_reg_list(struct kvm_vcpu *vcpu,
6167 struct kvm_reg_list __user *user_list)
6168 {
6169 u64 nr_regs = guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) ? 1 : 0;
6170 u64 user_nr_regs;
6171
6172 if (get_user(user_nr_regs, &user_list->n))
6173 return -EFAULT;
6174
6175 if (put_user(nr_regs, &user_list->n))
6176 return -EFAULT;
6177
6178 if (user_nr_regs < nr_regs)
6179 return -E2BIG;
6180
6181 if (nr_regs &&
6182 put_user(KVM_X86_REG_KVM(KVM_REG_GUEST_SSP), &user_list->reg[0]))
6183 return -EFAULT;
6184
6185 return 0;
6186 }
6187
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)6188 long kvm_arch_vcpu_ioctl(struct file *filp,
6189 unsigned int ioctl, unsigned long arg)
6190 {
6191 struct kvm_vcpu *vcpu = filp->private_data;
6192 void __user *argp = (void __user *)arg;
6193 int r;
6194 union {
6195 struct kvm_sregs2 *sregs2;
6196 struct kvm_lapic_state *lapic;
6197 struct kvm_xsave *xsave;
6198 struct kvm_xcrs *xcrs;
6199 void *buffer;
6200 } u;
6201
6202 vcpu_load(vcpu);
6203
6204 u.buffer = NULL;
6205 switch (ioctl) {
6206 case KVM_GET_LAPIC: {
6207 r = -EINVAL;
6208 if (!lapic_in_kernel(vcpu))
6209 goto out;
6210 u.lapic = kzalloc_obj(struct kvm_lapic_state);
6211
6212 r = -ENOMEM;
6213 if (!u.lapic)
6214 goto out;
6215 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
6216 if (r)
6217 goto out;
6218 r = -EFAULT;
6219 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
6220 goto out;
6221 r = 0;
6222 break;
6223 }
6224 case KVM_SET_LAPIC: {
6225 r = -EINVAL;
6226 if (!lapic_in_kernel(vcpu))
6227 goto out;
6228 u.lapic = memdup_user(argp, sizeof(*u.lapic));
6229 if (IS_ERR(u.lapic)) {
6230 r = PTR_ERR(u.lapic);
6231 goto out_nofree;
6232 }
6233
6234 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
6235 break;
6236 }
6237 case KVM_INTERRUPT: {
6238 struct kvm_interrupt irq;
6239
6240 r = -EFAULT;
6241 if (copy_from_user(&irq, argp, sizeof(irq)))
6242 goto out;
6243 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
6244 break;
6245 }
6246 case KVM_NMI: {
6247 r = kvm_vcpu_ioctl_nmi(vcpu);
6248 break;
6249 }
6250 case KVM_SMI: {
6251 r = kvm_inject_smi(vcpu);
6252 break;
6253 }
6254 case KVM_SET_CPUID: {
6255 struct kvm_cpuid __user *cpuid_arg = argp;
6256 struct kvm_cpuid cpuid;
6257
6258 r = -EFAULT;
6259 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
6260 goto out;
6261 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
6262 break;
6263 }
6264 case KVM_SET_CPUID2: {
6265 struct kvm_cpuid2 __user *cpuid_arg = argp;
6266 struct kvm_cpuid2 cpuid;
6267
6268 r = -EFAULT;
6269 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
6270 goto out;
6271 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
6272 cpuid_arg->entries);
6273 break;
6274 }
6275 case KVM_GET_CPUID2: {
6276 struct kvm_cpuid2 __user *cpuid_arg = argp;
6277 struct kvm_cpuid2 cpuid;
6278
6279 r = -EFAULT;
6280 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
6281 goto out;
6282 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
6283 cpuid_arg->entries);
6284 if (r)
6285 goto out;
6286 r = -EFAULT;
6287 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
6288 goto out;
6289 r = 0;
6290 break;
6291 }
6292 case KVM_GET_MSRS: {
6293 int idx = srcu_read_lock(&vcpu->kvm->srcu);
6294 r = msr_io(vcpu, argp, do_get_msr, 1);
6295 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6296 break;
6297 }
6298 case KVM_SET_MSRS: {
6299 int idx = srcu_read_lock(&vcpu->kvm->srcu);
6300 r = msr_io(vcpu, argp, do_set_msr, 0);
6301 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6302 break;
6303 }
6304 case KVM_GET_ONE_REG:
6305 case KVM_SET_ONE_REG:
6306 r = kvm_get_set_one_reg(vcpu, ioctl, argp);
6307 break;
6308 case KVM_GET_REG_LIST:
6309 r = kvm_get_reg_list(vcpu, argp);
6310 break;
6311 case KVM_TPR_ACCESS_REPORTING: {
6312 struct kvm_tpr_access_ctl tac;
6313
6314 r = -EFAULT;
6315 if (copy_from_user(&tac, argp, sizeof(tac)))
6316 goto out;
6317 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
6318 if (r)
6319 goto out;
6320 r = -EFAULT;
6321 if (copy_to_user(argp, &tac, sizeof(tac)))
6322 goto out;
6323 r = 0;
6324 break;
6325 };
6326 case KVM_SET_VAPIC_ADDR: {
6327 struct kvm_vapic_addr va;
6328 int idx;
6329
6330 r = -EINVAL;
6331 if (!lapic_in_kernel(vcpu))
6332 goto out;
6333 r = -EFAULT;
6334 if (copy_from_user(&va, argp, sizeof(va)))
6335 goto out;
6336 idx = srcu_read_lock(&vcpu->kvm->srcu);
6337 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
6338 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6339 break;
6340 }
6341 case KVM_X86_SETUP_MCE: {
6342 u64 mcg_cap;
6343
6344 r = -EFAULT;
6345 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))
6346 goto out;
6347 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
6348 break;
6349 }
6350 case KVM_X86_SET_MCE: {
6351 struct kvm_x86_mce mce;
6352
6353 r = -EFAULT;
6354 if (copy_from_user(&mce, argp, sizeof(mce)))
6355 goto out;
6356 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
6357 break;
6358 }
6359 case KVM_GET_VCPU_EVENTS: {
6360 struct kvm_vcpu_events events;
6361
6362 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
6363
6364 r = -EFAULT;
6365 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
6366 break;
6367 r = 0;
6368 break;
6369 }
6370 case KVM_SET_VCPU_EVENTS: {
6371 struct kvm_vcpu_events events;
6372
6373 r = -EFAULT;
6374 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
6375 break;
6376
6377 kvm_vcpu_srcu_read_lock(vcpu);
6378 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
6379 kvm_vcpu_srcu_read_unlock(vcpu);
6380 break;
6381 }
6382 case KVM_GET_DEBUGREGS: {
6383 struct kvm_debugregs dbgregs;
6384
6385 r = kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
6386 if (r < 0)
6387 break;
6388
6389 r = -EFAULT;
6390 if (copy_to_user(argp, &dbgregs,
6391 sizeof(struct kvm_debugregs)))
6392 break;
6393 r = 0;
6394 break;
6395 }
6396 case KVM_SET_DEBUGREGS: {
6397 struct kvm_debugregs dbgregs;
6398
6399 r = -EFAULT;
6400 if (copy_from_user(&dbgregs, argp,
6401 sizeof(struct kvm_debugregs)))
6402 break;
6403
6404 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
6405 break;
6406 }
6407 case KVM_GET_XSAVE: {
6408 r = -EINVAL;
6409 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave))
6410 break;
6411
6412 u.xsave = kzalloc_obj(struct kvm_xsave);
6413 r = -ENOMEM;
6414 if (!u.xsave)
6415 break;
6416
6417 r = kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
6418 if (r < 0)
6419 break;
6420
6421 r = -EFAULT;
6422 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
6423 break;
6424 r = 0;
6425 break;
6426 }
6427 case KVM_SET_XSAVE: {
6428 int size = vcpu->arch.guest_fpu.uabi_size;
6429
6430 u.xsave = memdup_user(argp, size);
6431 if (IS_ERR(u.xsave)) {
6432 r = PTR_ERR(u.xsave);
6433 goto out_nofree;
6434 }
6435
6436 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
6437 break;
6438 }
6439
6440 case KVM_GET_XSAVE2: {
6441 int size = vcpu->arch.guest_fpu.uabi_size;
6442
6443 u.xsave = kzalloc(size, GFP_KERNEL);
6444 r = -ENOMEM;
6445 if (!u.xsave)
6446 break;
6447
6448 r = kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size);
6449 if (r < 0)
6450 break;
6451
6452 r = -EFAULT;
6453 if (copy_to_user(argp, u.xsave, size))
6454 break;
6455
6456 r = 0;
6457 break;
6458 }
6459
6460 case KVM_GET_XCRS: {
6461 u.xcrs = kzalloc_obj(struct kvm_xcrs);
6462 r = -ENOMEM;
6463 if (!u.xcrs)
6464 break;
6465
6466 r = kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
6467 if (r < 0)
6468 break;
6469
6470 r = -EFAULT;
6471 if (copy_to_user(argp, u.xcrs,
6472 sizeof(struct kvm_xcrs)))
6473 break;
6474 r = 0;
6475 break;
6476 }
6477 case KVM_SET_XCRS: {
6478 u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
6479 if (IS_ERR(u.xcrs)) {
6480 r = PTR_ERR(u.xcrs);
6481 goto out_nofree;
6482 }
6483
6484 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
6485 break;
6486 }
6487 case KVM_SET_TSC_KHZ: {
6488 u32 user_tsc_khz;
6489
6490 r = -EINVAL;
6491
6492 if (vcpu->arch.guest_tsc_protected)
6493 goto out;
6494
6495 user_tsc_khz = (u32)arg;
6496
6497 if (kvm_caps.has_tsc_control &&
6498 user_tsc_khz >= kvm_caps.max_guest_tsc_khz)
6499 goto out;
6500
6501 if (user_tsc_khz == 0)
6502 user_tsc_khz = tsc_khz;
6503
6504 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
6505 r = 0;
6506
6507 goto out;
6508 }
6509 case KVM_GET_TSC_KHZ: {
6510 r = vcpu->arch.virtual_tsc_khz;
6511 goto out;
6512 }
6513 case KVM_KVMCLOCK_CTRL: {
6514 r = kvm_set_guest_paused(vcpu);
6515 goto out;
6516 }
6517 case KVM_ENABLE_CAP: {
6518 struct kvm_enable_cap cap;
6519
6520 r = -EFAULT;
6521 if (copy_from_user(&cap, argp, sizeof(cap)))
6522 goto out;
6523 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
6524 break;
6525 }
6526 case KVM_GET_NESTED_STATE: {
6527 struct kvm_nested_state __user *user_kvm_nested_state = argp;
6528 u32 user_data_size;
6529
6530 r = -EINVAL;
6531 if (!kvm_x86_ops.nested_ops->get_state)
6532 break;
6533
6534 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
6535 r = -EFAULT;
6536 if (get_user(user_data_size, &user_kvm_nested_state->size))
6537 break;
6538
6539 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state,
6540 user_data_size);
6541 if (r < 0)
6542 break;
6543
6544 if (r > user_data_size) {
6545 if (put_user(r, &user_kvm_nested_state->size))
6546 r = -EFAULT;
6547 else
6548 r = -E2BIG;
6549 break;
6550 }
6551
6552 r = 0;
6553 break;
6554 }
6555 case KVM_SET_NESTED_STATE: {
6556 struct kvm_nested_state __user *user_kvm_nested_state = argp;
6557 struct kvm_nested_state kvm_state;
6558 int idx;
6559
6560 r = -EINVAL;
6561 if (!kvm_x86_ops.nested_ops->set_state)
6562 break;
6563
6564 r = -EFAULT;
6565 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
6566 break;
6567
6568 r = -EINVAL;
6569 if (kvm_state.size < sizeof(kvm_state))
6570 break;
6571
6572 if (kvm_state.flags &
6573 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
6574 | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING
6575 | KVM_STATE_NESTED_GIF_SET))
6576 break;
6577
6578 /* nested_run_pending implies guest_mode. */
6579 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
6580 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
6581 break;
6582
6583 idx = srcu_read_lock(&vcpu->kvm->srcu);
6584 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state);
6585 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6586 break;
6587 }
6588 #ifdef CONFIG_KVM_HYPERV
6589 case KVM_GET_SUPPORTED_HV_CPUID:
6590 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp);
6591 break;
6592 #endif
6593 #ifdef CONFIG_KVM_XEN
6594 case KVM_XEN_VCPU_GET_ATTR: {
6595 struct kvm_xen_vcpu_attr xva;
6596
6597 r = -EFAULT;
6598 if (copy_from_user(&xva, argp, sizeof(xva)))
6599 goto out;
6600 r = kvm_xen_vcpu_get_attr(vcpu, &xva);
6601 if (!r && copy_to_user(argp, &xva, sizeof(xva)))
6602 r = -EFAULT;
6603 break;
6604 }
6605 case KVM_XEN_VCPU_SET_ATTR: {
6606 struct kvm_xen_vcpu_attr xva;
6607
6608 r = -EFAULT;
6609 if (copy_from_user(&xva, argp, sizeof(xva)))
6610 goto out;
6611 r = kvm_xen_vcpu_set_attr(vcpu, &xva);
6612 break;
6613 }
6614 #endif
6615 case KVM_GET_SREGS2: {
6616 r = -EINVAL;
6617 if (vcpu->kvm->arch.has_protected_state &&
6618 vcpu->arch.guest_state_protected)
6619 goto out;
6620
6621 u.sregs2 = kzalloc_obj(struct kvm_sregs2);
6622 r = -ENOMEM;
6623 if (!u.sregs2)
6624 goto out;
6625 __get_sregs2(vcpu, u.sregs2);
6626 r = -EFAULT;
6627 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2)))
6628 goto out;
6629 r = 0;
6630 break;
6631 }
6632 case KVM_SET_SREGS2: {
6633 r = -EINVAL;
6634 if (vcpu->kvm->arch.has_protected_state &&
6635 vcpu->arch.guest_state_protected)
6636 goto out;
6637
6638 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2));
6639 if (IS_ERR(u.sregs2)) {
6640 r = PTR_ERR(u.sregs2);
6641 u.sregs2 = NULL;
6642 goto out;
6643 }
6644 r = __set_sregs2(vcpu, u.sregs2);
6645 break;
6646 }
6647 case KVM_HAS_DEVICE_ATTR:
6648 case KVM_GET_DEVICE_ATTR:
6649 case KVM_SET_DEVICE_ATTR:
6650 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp);
6651 break;
6652 case KVM_MEMORY_ENCRYPT_OP:
6653 r = -ENOTTY;
6654 if (!kvm_x86_ops.vcpu_mem_enc_ioctl)
6655 goto out;
6656 r = kvm_x86_ops.vcpu_mem_enc_ioctl(vcpu, argp);
6657 break;
6658 default:
6659 r = -EINVAL;
6660 }
6661 out:
6662 kfree(u.buffer);
6663 out_nofree:
6664 vcpu_put(vcpu);
6665 return r;
6666 }
6667
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)6668 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
6669 {
6670 return VM_FAULT_SIGBUS;
6671 }
6672
kvm_vm_ioctl_set_tss_addr(struct kvm * kvm,unsigned long addr)6673 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
6674 {
6675 int ret;
6676
6677 if (addr > (unsigned int)(-3 * PAGE_SIZE))
6678 return -EINVAL;
6679 ret = kvm_x86_call(set_tss_addr)(kvm, addr);
6680 return ret;
6681 }
6682
kvm_vm_ioctl_set_identity_map_addr(struct kvm * kvm,u64 ident_addr)6683 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
6684 u64 ident_addr)
6685 {
6686 return kvm_x86_call(set_identity_map_addr)(kvm, ident_addr);
6687 }
6688
kvm_vm_ioctl_set_nr_mmu_pages(struct kvm * kvm,unsigned long kvm_nr_mmu_pages)6689 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
6690 unsigned long kvm_nr_mmu_pages)
6691 {
6692 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
6693 return -EINVAL;
6694
6695 mutex_lock(&kvm->slots_lock);
6696
6697 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
6698 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
6699
6700 mutex_unlock(&kvm->slots_lock);
6701 return 0;
6702 }
6703
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)6704 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
6705 {
6706
6707 /*
6708 * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called
6709 * before reporting dirty_bitmap to userspace. KVM flushes the buffers
6710 * on all VM-Exits, thus we only need to kick running vCPUs to force a
6711 * VM-Exit.
6712 */
6713 struct kvm_vcpu *vcpu;
6714 unsigned long i;
6715
6716 if (!kvm->arch.cpu_dirty_log_size)
6717 return;
6718
6719 kvm_for_each_vcpu(i, vcpu, kvm)
6720 kvm_vcpu_kick(vcpu);
6721 }
6722
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)6723 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
6724 struct kvm_enable_cap *cap)
6725 {
6726 int r;
6727
6728 if (cap->flags)
6729 return -EINVAL;
6730
6731 switch (cap->cap) {
6732 case KVM_CAP_DISABLE_QUIRKS2:
6733 r = -EINVAL;
6734 if (cap->args[0] & ~kvm_caps.supported_quirks)
6735 break;
6736 fallthrough;
6737 case KVM_CAP_DISABLE_QUIRKS:
6738 kvm->arch.disabled_quirks |= cap->args[0] & kvm_caps.supported_quirks;
6739 r = 0;
6740 break;
6741 case KVM_CAP_SPLIT_IRQCHIP: {
6742 mutex_lock(&kvm->lock);
6743 r = -EINVAL;
6744 if (cap->args[0] > KVM_MAX_IRQ_ROUTES)
6745 goto split_irqchip_unlock;
6746 r = -EEXIST;
6747 if (irqchip_in_kernel(kvm))
6748 goto split_irqchip_unlock;
6749 if (kvm->created_vcpus)
6750 goto split_irqchip_unlock;
6751 /* Pairs with irqchip_in_kernel. */
6752 smp_wmb();
6753 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
6754 kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
6755 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
6756 r = 0;
6757 split_irqchip_unlock:
6758 mutex_unlock(&kvm->lock);
6759 break;
6760 }
6761 case KVM_CAP_X2APIC_API:
6762 r = -EINVAL;
6763 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
6764 break;
6765
6766 if ((cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST) &&
6767 (cap->args[0] & KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST))
6768 break;
6769
6770 if ((cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST) &&
6771 !irqchip_split(kvm))
6772 break;
6773
6774 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
6775 kvm->arch.x2apic_format = true;
6776 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
6777 kvm->arch.x2apic_broadcast_quirk_disabled = true;
6778
6779 if (cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST)
6780 kvm->arch.suppress_eoi_broadcast_mode = KVM_SUPPRESS_EOI_BROADCAST_ENABLED;
6781 if (cap->args[0] & KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST)
6782 kvm->arch.suppress_eoi_broadcast_mode = KVM_SUPPRESS_EOI_BROADCAST_DISABLED;
6783
6784 r = 0;
6785 break;
6786 case KVM_CAP_X86_DISABLE_EXITS:
6787 r = -EINVAL;
6788 if (cap->args[0] & ~kvm_get_allowed_disable_exits())
6789 break;
6790
6791 mutex_lock(&kvm->lock);
6792 if (kvm->created_vcpus)
6793 goto disable_exits_unlock;
6794
6795 #define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \
6796 "KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests."
6797
6798 if (!mitigate_smt_rsb && boot_cpu_has_bug(X86_BUG_SMT_RSB) &&
6799 cpu_smt_possible() &&
6800 (cap->args[0] & ~(KVM_X86_DISABLE_EXITS_PAUSE |
6801 KVM_X86_DISABLE_EXITS_APERFMPERF)))
6802 pr_warn_once(SMT_RSB_MSG);
6803
6804 kvm_disable_exits(kvm, cap->args[0]);
6805 r = 0;
6806 disable_exits_unlock:
6807 mutex_unlock(&kvm->lock);
6808 break;
6809 case KVM_CAP_MSR_PLATFORM_INFO:
6810 kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
6811 r = 0;
6812 break;
6813 case KVM_CAP_EXCEPTION_PAYLOAD:
6814 kvm->arch.exception_payload_enabled = cap->args[0];
6815 r = 0;
6816 break;
6817 case KVM_CAP_X86_TRIPLE_FAULT_EVENT:
6818 kvm->arch.triple_fault_event = cap->args[0];
6819 r = 0;
6820 break;
6821 case KVM_CAP_X86_USER_SPACE_MSR:
6822 r = -EINVAL;
6823 if (cap->args[0] & ~KVM_MSR_EXIT_REASON_VALID_MASK)
6824 break;
6825 kvm->arch.user_space_msr_mask = cap->args[0];
6826 r = 0;
6827 break;
6828 case KVM_CAP_X86_BUS_LOCK_EXIT:
6829 r = -EINVAL;
6830 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE)
6831 break;
6832
6833 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) &&
6834 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT))
6835 break;
6836
6837 if (kvm_caps.has_bus_lock_exit &&
6838 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)
6839 kvm->arch.bus_lock_detection_enabled = true;
6840 r = 0;
6841 break;
6842 #ifdef CONFIG_X86_SGX_KVM
6843 case KVM_CAP_SGX_ATTRIBUTE: {
6844 unsigned long allowed_attributes = 0;
6845
6846 r = sgx_set_attribute(&allowed_attributes, cap->args[0]);
6847 if (r)
6848 break;
6849
6850 /* KVM only supports the PROVISIONKEY privileged attribute. */
6851 if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) &&
6852 !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY))
6853 kvm->arch.sgx_provisioning_allowed = true;
6854 else
6855 r = -EINVAL;
6856 break;
6857 }
6858 #endif
6859 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
6860 r = -EINVAL;
6861 if (!kvm_x86_ops.vm_copy_enc_context_from)
6862 break;
6863
6864 r = kvm_x86_call(vm_copy_enc_context_from)(kvm, cap->args[0]);
6865 break;
6866 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
6867 r = -EINVAL;
6868 if (!kvm_x86_ops.vm_move_enc_context_from)
6869 break;
6870
6871 r = kvm_x86_call(vm_move_enc_context_from)(kvm, cap->args[0]);
6872 break;
6873 case KVM_CAP_EXIT_HYPERCALL:
6874 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) {
6875 r = -EINVAL;
6876 break;
6877 }
6878 kvm->arch.hypercall_exit_enabled = cap->args[0];
6879 r = 0;
6880 break;
6881 case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
6882 r = -EINVAL;
6883 if (cap->args[0] & ~1)
6884 break;
6885 kvm->arch.exit_on_emulation_error = cap->args[0];
6886 r = 0;
6887 break;
6888 case KVM_CAP_PMU_CAPABILITY:
6889 r = -EINVAL;
6890 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK))
6891 break;
6892
6893 mutex_lock(&kvm->lock);
6894 if (!kvm->created_vcpus && !kvm->arch.created_mediated_pmu) {
6895 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE);
6896 r = 0;
6897 }
6898 mutex_unlock(&kvm->lock);
6899 break;
6900 case KVM_CAP_MAX_VCPU_ID:
6901 r = -EINVAL;
6902 if (cap->args[0] > KVM_MAX_VCPU_IDS)
6903 break;
6904
6905 mutex_lock(&kvm->lock);
6906 if (kvm->arch.bsp_vcpu_id > cap->args[0]) {
6907 ;
6908 } else if (kvm->arch.max_vcpu_ids == cap->args[0]) {
6909 r = 0;
6910 } else if (!kvm->arch.max_vcpu_ids) {
6911 kvm->arch.max_vcpu_ids = cap->args[0];
6912 r = 0;
6913 }
6914 mutex_unlock(&kvm->lock);
6915 break;
6916 case KVM_CAP_X86_NOTIFY_VMEXIT:
6917 r = -EINVAL;
6918 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS)
6919 break;
6920 if (!kvm_caps.has_notify_vmexit)
6921 break;
6922 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED))
6923 break;
6924 mutex_lock(&kvm->lock);
6925 if (!kvm->created_vcpus) {
6926 kvm->arch.notify_window = cap->args[0] >> 32;
6927 kvm->arch.notify_vmexit_flags = (u32)cap->args[0];
6928 r = 0;
6929 }
6930 mutex_unlock(&kvm->lock);
6931 break;
6932 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
6933 r = -EINVAL;
6934
6935 /*
6936 * Since the risk of disabling NX hugepages is a guest crashing
6937 * the system, ensure the userspace process has permission to
6938 * reboot the system.
6939 *
6940 * Note that unlike the reboot() syscall, the process must have
6941 * this capability in the root namespace because exposing
6942 * /dev/kvm into a container does not limit the scope of the
6943 * iTLB multihit bug to that container. In other words,
6944 * this must use capable(), not ns_capable().
6945 */
6946 if (!capable(CAP_SYS_BOOT)) {
6947 r = -EPERM;
6948 break;
6949 }
6950
6951 if (cap->args[0])
6952 break;
6953
6954 mutex_lock(&kvm->lock);
6955 if (!kvm->created_vcpus) {
6956 kvm->arch.disable_nx_huge_pages = true;
6957 r = 0;
6958 }
6959 mutex_unlock(&kvm->lock);
6960 break;
6961 case KVM_CAP_X86_APIC_BUS_CYCLES_NS: {
6962 u64 bus_cycle_ns = cap->args[0];
6963 u64 unused;
6964
6965 /*
6966 * Guard against overflow in tmict_to_ns(). 128 is the highest
6967 * divide value that can be programmed in APIC_TDCR.
6968 */
6969 r = -EINVAL;
6970 if (!bus_cycle_ns ||
6971 check_mul_overflow((u64)U32_MAX * 128, bus_cycle_ns, &unused))
6972 break;
6973
6974 r = 0;
6975 mutex_lock(&kvm->lock);
6976 if (!irqchip_in_kernel(kvm))
6977 r = -ENXIO;
6978 else if (kvm->created_vcpus)
6979 r = -EINVAL;
6980 else
6981 kvm->arch.apic_bus_cycle_ns = bus_cycle_ns;
6982 mutex_unlock(&kvm->lock);
6983 break;
6984 }
6985 default:
6986 r = -EINVAL;
6987 break;
6988 }
6989 return r;
6990 }
6991
kvm_alloc_msr_filter(bool default_allow)6992 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
6993 {
6994 struct kvm_x86_msr_filter *msr_filter;
6995
6996 msr_filter = kzalloc_obj(*msr_filter, GFP_KERNEL_ACCOUNT);
6997 if (!msr_filter)
6998 return NULL;
6999
7000 msr_filter->default_allow = default_allow;
7001 return msr_filter;
7002 }
7003
kvm_free_msr_filter(struct kvm_x86_msr_filter * msr_filter)7004 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
7005 {
7006 u32 i;
7007
7008 if (!msr_filter)
7009 return;
7010
7011 for (i = 0; i < msr_filter->count; i++)
7012 kfree(msr_filter->ranges[i].bitmap);
7013
7014 kfree(msr_filter);
7015 }
7016
kvm_add_msr_filter(struct kvm_x86_msr_filter * msr_filter,struct kvm_msr_filter_range * user_range)7017 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
7018 struct kvm_msr_filter_range *user_range)
7019 {
7020 unsigned long *bitmap;
7021 size_t bitmap_size;
7022
7023 if (!user_range->nmsrs)
7024 return 0;
7025
7026 if (user_range->flags & ~KVM_MSR_FILTER_RANGE_VALID_MASK)
7027 return -EINVAL;
7028
7029 if (!user_range->flags)
7030 return -EINVAL;
7031
7032 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long);
7033 if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE)
7034 return -EINVAL;
7035
7036 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size);
7037 if (IS_ERR(bitmap))
7038 return PTR_ERR(bitmap);
7039
7040 msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) {
7041 .flags = user_range->flags,
7042 .base = user_range->base,
7043 .nmsrs = user_range->nmsrs,
7044 .bitmap = bitmap,
7045 };
7046
7047 msr_filter->count++;
7048 return 0;
7049 }
7050
kvm_vm_ioctl_set_msr_filter(struct kvm * kvm,struct kvm_msr_filter * filter)7051 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
7052 struct kvm_msr_filter *filter)
7053 {
7054 struct kvm_x86_msr_filter *new_filter, *old_filter;
7055 bool default_allow;
7056 bool empty = true;
7057 int r;
7058 u32 i;
7059
7060 if (filter->flags & ~KVM_MSR_FILTER_VALID_MASK)
7061 return -EINVAL;
7062
7063 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++)
7064 empty &= !filter->ranges[i].nmsrs;
7065
7066 default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY);
7067 if (empty && !default_allow)
7068 return -EINVAL;
7069
7070 new_filter = kvm_alloc_msr_filter(default_allow);
7071 if (!new_filter)
7072 return -ENOMEM;
7073
7074 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) {
7075 r = kvm_add_msr_filter(new_filter, &filter->ranges[i]);
7076 if (r) {
7077 kvm_free_msr_filter(new_filter);
7078 return r;
7079 }
7080 }
7081
7082 mutex_lock(&kvm->lock);
7083 old_filter = rcu_replace_pointer(kvm->arch.msr_filter, new_filter,
7084 mutex_is_locked(&kvm->lock));
7085 mutex_unlock(&kvm->lock);
7086 synchronize_srcu(&kvm->srcu);
7087
7088 kvm_free_msr_filter(old_filter);
7089
7090 /*
7091 * Recalc MSR intercepts as userspace may want to intercept accesses to
7092 * MSRs that KVM would otherwise pass through to the guest.
7093 */
7094 kvm_make_all_cpus_request(kvm, KVM_REQ_RECALC_INTERCEPTS);
7095
7096 return 0;
7097 }
7098
7099 #ifdef CONFIG_KVM_COMPAT
7100 /* for KVM_X86_SET_MSR_FILTER */
7101 struct kvm_msr_filter_range_compat {
7102 __u32 flags;
7103 __u32 nmsrs;
7104 __u32 base;
7105 __u32 bitmap;
7106 };
7107
7108 struct kvm_msr_filter_compat {
7109 __u32 flags;
7110 struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES];
7111 };
7112
7113 #define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)
7114
kvm_arch_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)7115 long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
7116 unsigned long arg)
7117 {
7118 void __user *argp = (void __user *)arg;
7119 struct kvm *kvm = filp->private_data;
7120 long r = -ENOTTY;
7121
7122 switch (ioctl) {
7123 case KVM_X86_SET_MSR_FILTER_COMPAT: {
7124 struct kvm_msr_filter __user *user_msr_filter = argp;
7125 struct kvm_msr_filter_compat filter_compat;
7126 struct kvm_msr_filter filter;
7127 int i;
7128
7129 if (copy_from_user(&filter_compat, user_msr_filter,
7130 sizeof(filter_compat)))
7131 return -EFAULT;
7132
7133 filter.flags = filter_compat.flags;
7134 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
7135 struct kvm_msr_filter_range_compat *cr;
7136
7137 cr = &filter_compat.ranges[i];
7138 filter.ranges[i] = (struct kvm_msr_filter_range) {
7139 .flags = cr->flags,
7140 .nmsrs = cr->nmsrs,
7141 .base = cr->base,
7142 .bitmap = (__u8 *)(ulong)cr->bitmap,
7143 };
7144 }
7145
7146 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
7147 break;
7148 }
7149 }
7150
7151 return r;
7152 }
7153 #endif
7154
7155 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
kvm_arch_suspend_notifier(struct kvm * kvm)7156 static int kvm_arch_suspend_notifier(struct kvm *kvm)
7157 {
7158 struct kvm_vcpu *vcpu;
7159 unsigned long i;
7160
7161 /*
7162 * Ignore the return, marking the guest paused only "fails" if the vCPU
7163 * isn't using kvmclock; continuing on is correct and desirable.
7164 */
7165 kvm_for_each_vcpu(i, vcpu, kvm)
7166 (void)kvm_set_guest_paused(vcpu);
7167
7168 return NOTIFY_DONE;
7169 }
7170
kvm_arch_pm_notifier(struct kvm * kvm,unsigned long state)7171 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
7172 {
7173 switch (state) {
7174 case PM_HIBERNATION_PREPARE:
7175 case PM_SUSPEND_PREPARE:
7176 return kvm_arch_suspend_notifier(kvm);
7177 }
7178
7179 return NOTIFY_DONE;
7180 }
7181 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
7182
kvm_vm_ioctl_get_clock(struct kvm * kvm,void __user * argp)7183 static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp)
7184 {
7185 struct kvm_clock_data data = { 0 };
7186
7187 get_kvmclock(kvm, &data);
7188 if (copy_to_user(argp, &data, sizeof(data)))
7189 return -EFAULT;
7190
7191 return 0;
7192 }
7193
kvm_vm_ioctl_set_clock(struct kvm * kvm,void __user * argp)7194 static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
7195 {
7196 struct kvm_arch *ka = &kvm->arch;
7197 struct kvm_clock_data data;
7198 u64 now_raw_ns;
7199
7200 if (copy_from_user(&data, argp, sizeof(data)))
7201 return -EFAULT;
7202
7203 /*
7204 * Only KVM_CLOCK_REALTIME is used, but allow passing the
7205 * result of KVM_GET_CLOCK back to KVM_SET_CLOCK.
7206 */
7207 if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
7208 return -EINVAL;
7209
7210 kvm_hv_request_tsc_page_update(kvm);
7211 kvm_start_pvclock_update(kvm);
7212 pvclock_update_vm_gtod_copy(kvm);
7213
7214 /*
7215 * This pairs with kvm_guest_time_update(): when masterclock is
7216 * in use, we use master_kernel_ns + kvmclock_offset to set
7217 * unsigned 'system_time' so if we use get_kvmclock_ns() (which
7218 * is slightly ahead) here we risk going negative on unsigned
7219 * 'system_time' when 'data.clock' is very small.
7220 */
7221 if (data.flags & KVM_CLOCK_REALTIME) {
7222 u64 now_real_ns = ktime_get_real_ns();
7223
7224 /*
7225 * Avoid stepping the kvmclock backwards.
7226 */
7227 if (now_real_ns > data.realtime)
7228 data.clock += now_real_ns - data.realtime;
7229 }
7230
7231 if (ka->use_master_clock)
7232 now_raw_ns = ka->master_kernel_ns;
7233 else
7234 now_raw_ns = get_kvmclock_base_ns();
7235 ka->kvmclock_offset = data.clock - now_raw_ns;
7236 kvm_end_pvclock_update(kvm);
7237 return 0;
7238 }
7239
kvm_arch_vcpu_unlocked_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)7240 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
7241 unsigned long arg)
7242 {
7243 struct kvm_vcpu *vcpu = filp->private_data;
7244 void __user *argp = (void __user *)arg;
7245
7246 if (ioctl == KVM_MEMORY_ENCRYPT_OP &&
7247 kvm_x86_ops.vcpu_mem_enc_unlocked_ioctl)
7248 return kvm_x86_call(vcpu_mem_enc_unlocked_ioctl)(vcpu, argp);
7249
7250 return -ENOIOCTLCMD;
7251 }
7252
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)7253 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
7254 {
7255 struct kvm *kvm = filp->private_data;
7256 void __user *argp = (void __user *)arg;
7257 int r = -ENOTTY;
7258
7259 #ifdef CONFIG_KVM_IOAPIC
7260 /*
7261 * This union makes it completely explicit to gcc-3.x
7262 * that these three variables' stack usage should be
7263 * combined, not added together.
7264 */
7265 union {
7266 struct kvm_pit_state ps;
7267 struct kvm_pit_state2 ps2;
7268 struct kvm_pit_config pit_config;
7269 } u;
7270 #endif
7271
7272 switch (ioctl) {
7273 case KVM_SET_TSS_ADDR:
7274 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
7275 break;
7276 case KVM_SET_IDENTITY_MAP_ADDR: {
7277 u64 ident_addr;
7278
7279 mutex_lock(&kvm->lock);
7280 r = -EINVAL;
7281 if (kvm->created_vcpus)
7282 goto set_identity_unlock;
7283 r = -EFAULT;
7284 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr)))
7285 goto set_identity_unlock;
7286 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
7287 set_identity_unlock:
7288 mutex_unlock(&kvm->lock);
7289 break;
7290 }
7291 case KVM_SET_NR_MMU_PAGES:
7292 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
7293 break;
7294 #ifdef CONFIG_KVM_IOAPIC
7295 case KVM_CREATE_IRQCHIP: {
7296 mutex_lock(&kvm->lock);
7297
7298 r = -EEXIST;
7299 if (irqchip_in_kernel(kvm))
7300 goto create_irqchip_unlock;
7301
7302 /*
7303 * Disallow an in-kernel I/O APIC if the VM has protected EOIs,
7304 * i.e. if KVM can't intercept EOIs and thus can't properly
7305 * emulate level-triggered interrupts.
7306 */
7307 r = -ENOTTY;
7308 if (kvm->arch.has_protected_eoi)
7309 goto create_irqchip_unlock;
7310
7311 r = -EINVAL;
7312 if (kvm->created_vcpus)
7313 goto create_irqchip_unlock;
7314
7315 r = kvm_pic_init(kvm);
7316 if (r)
7317 goto create_irqchip_unlock;
7318
7319 r = kvm_ioapic_init(kvm);
7320 if (r) {
7321 kvm_pic_destroy(kvm);
7322 goto create_irqchip_unlock;
7323 }
7324
7325 r = kvm_setup_default_ioapic_and_pic_routing(kvm);
7326 if (r) {
7327 kvm_ioapic_destroy(kvm);
7328 kvm_pic_destroy(kvm);
7329 goto create_irqchip_unlock;
7330 }
7331 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
7332 smp_wmb();
7333 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
7334 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
7335 create_irqchip_unlock:
7336 mutex_unlock(&kvm->lock);
7337 break;
7338 }
7339 case KVM_CREATE_PIT:
7340 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
7341 goto create_pit;
7342 case KVM_CREATE_PIT2:
7343 r = -EFAULT;
7344 if (copy_from_user(&u.pit_config, argp,
7345 sizeof(struct kvm_pit_config)))
7346 goto out;
7347 create_pit:
7348 mutex_lock(&kvm->lock);
7349 r = -EEXIST;
7350 if (kvm->arch.vpit)
7351 goto create_pit_unlock;
7352 r = -ENOENT;
7353 if (!pic_in_kernel(kvm))
7354 goto create_pit_unlock;
7355 r = -ENOMEM;
7356 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
7357 if (kvm->arch.vpit)
7358 r = 0;
7359 create_pit_unlock:
7360 mutex_unlock(&kvm->lock);
7361 break;
7362 case KVM_GET_IRQCHIP: {
7363 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
7364 struct kvm_irqchip *chip;
7365
7366 chip = memdup_user(argp, sizeof(*chip));
7367 if (IS_ERR(chip)) {
7368 r = PTR_ERR(chip);
7369 goto out;
7370 }
7371
7372 r = -ENXIO;
7373 if (!irqchip_full(kvm))
7374 goto get_irqchip_out;
7375 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
7376 if (r)
7377 goto get_irqchip_out;
7378 r = -EFAULT;
7379 if (copy_to_user(argp, chip, sizeof(*chip)))
7380 goto get_irqchip_out;
7381 r = 0;
7382 get_irqchip_out:
7383 kfree(chip);
7384 break;
7385 }
7386 case KVM_SET_IRQCHIP: {
7387 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
7388 struct kvm_irqchip *chip;
7389
7390 chip = memdup_user(argp, sizeof(*chip));
7391 if (IS_ERR(chip)) {
7392 r = PTR_ERR(chip);
7393 goto out;
7394 }
7395
7396 r = -ENXIO;
7397 if (!irqchip_full(kvm))
7398 goto set_irqchip_out;
7399 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
7400 set_irqchip_out:
7401 kfree(chip);
7402 break;
7403 }
7404 case KVM_GET_PIT: {
7405 r = -EFAULT;
7406 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
7407 goto out;
7408 r = -ENXIO;
7409 if (!kvm->arch.vpit)
7410 goto out;
7411 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
7412 if (r)
7413 goto out;
7414 r = -EFAULT;
7415 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
7416 goto out;
7417 r = 0;
7418 break;
7419 }
7420 case KVM_SET_PIT: {
7421 r = -EFAULT;
7422 if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
7423 goto out;
7424 mutex_lock(&kvm->lock);
7425 r = -ENXIO;
7426 if (!kvm->arch.vpit)
7427 goto set_pit_out;
7428 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
7429 set_pit_out:
7430 mutex_unlock(&kvm->lock);
7431 break;
7432 }
7433 case KVM_GET_PIT2: {
7434 r = -ENXIO;
7435 if (!kvm->arch.vpit)
7436 goto out;
7437 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
7438 if (r)
7439 goto out;
7440 r = -EFAULT;
7441 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
7442 goto out;
7443 r = 0;
7444 break;
7445 }
7446 case KVM_SET_PIT2: {
7447 r = -EFAULT;
7448 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
7449 goto out;
7450 mutex_lock(&kvm->lock);
7451 r = -ENXIO;
7452 if (!kvm->arch.vpit)
7453 goto set_pit2_out;
7454 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
7455 set_pit2_out:
7456 mutex_unlock(&kvm->lock);
7457 break;
7458 }
7459 case KVM_REINJECT_CONTROL: {
7460 struct kvm_reinject_control control;
7461 r = -EFAULT;
7462 if (copy_from_user(&control, argp, sizeof(control)))
7463 goto out;
7464 r = -ENXIO;
7465 if (!kvm->arch.vpit)
7466 goto out;
7467 r = kvm_vm_ioctl_reinject(kvm, &control);
7468 break;
7469 }
7470 #endif
7471 case KVM_SET_BOOT_CPU_ID:
7472 r = 0;
7473 mutex_lock(&kvm->lock);
7474 if (kvm->created_vcpus)
7475 r = -EBUSY;
7476 else if (arg > KVM_MAX_VCPU_IDS ||
7477 (kvm->arch.max_vcpu_ids && arg > kvm->arch.max_vcpu_ids))
7478 r = -EINVAL;
7479 else
7480 kvm->arch.bsp_vcpu_id = arg;
7481 mutex_unlock(&kvm->lock);
7482 break;
7483 #ifdef CONFIG_KVM_XEN
7484 case KVM_XEN_HVM_CONFIG: {
7485 struct kvm_xen_hvm_config xhc;
7486 r = -EFAULT;
7487 if (copy_from_user(&xhc, argp, sizeof(xhc)))
7488 goto out;
7489 r = kvm_xen_hvm_config(kvm, &xhc);
7490 break;
7491 }
7492 case KVM_XEN_HVM_GET_ATTR: {
7493 struct kvm_xen_hvm_attr xha;
7494
7495 r = -EFAULT;
7496 if (copy_from_user(&xha, argp, sizeof(xha)))
7497 goto out;
7498 r = kvm_xen_hvm_get_attr(kvm, &xha);
7499 if (!r && copy_to_user(argp, &xha, sizeof(xha)))
7500 r = -EFAULT;
7501 break;
7502 }
7503 case KVM_XEN_HVM_SET_ATTR: {
7504 struct kvm_xen_hvm_attr xha;
7505
7506 r = -EFAULT;
7507 if (copy_from_user(&xha, argp, sizeof(xha)))
7508 goto out;
7509 r = kvm_xen_hvm_set_attr(kvm, &xha);
7510 break;
7511 }
7512 case KVM_XEN_HVM_EVTCHN_SEND: {
7513 struct kvm_irq_routing_xen_evtchn uxe;
7514
7515 r = -EFAULT;
7516 if (copy_from_user(&uxe, argp, sizeof(uxe)))
7517 goto out;
7518 r = kvm_xen_hvm_evtchn_send(kvm, &uxe);
7519 break;
7520 }
7521 #endif
7522 case KVM_SET_CLOCK:
7523 r = kvm_vm_ioctl_set_clock(kvm, argp);
7524 break;
7525 case KVM_GET_CLOCK:
7526 r = kvm_vm_ioctl_get_clock(kvm, argp);
7527 break;
7528 case KVM_SET_TSC_KHZ: {
7529 u32 user_tsc_khz;
7530
7531 r = -EINVAL;
7532 user_tsc_khz = (u32)arg;
7533
7534 if (kvm_caps.has_tsc_control &&
7535 user_tsc_khz >= kvm_caps.max_guest_tsc_khz)
7536 goto out;
7537
7538 if (user_tsc_khz == 0)
7539 user_tsc_khz = tsc_khz;
7540
7541 mutex_lock(&kvm->lock);
7542 if (!kvm->created_vcpus) {
7543 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz);
7544 r = 0;
7545 }
7546 mutex_unlock(&kvm->lock);
7547 goto out;
7548 }
7549 case KVM_GET_TSC_KHZ: {
7550 r = READ_ONCE(kvm->arch.default_tsc_khz);
7551 goto out;
7552 }
7553 case KVM_MEMORY_ENCRYPT_OP:
7554 r = -ENOTTY;
7555 if (!kvm_x86_ops.mem_enc_ioctl)
7556 goto out;
7557
7558 r = kvm_x86_call(mem_enc_ioctl)(kvm, argp);
7559 break;
7560 case KVM_MEMORY_ENCRYPT_REG_REGION: {
7561 struct kvm_enc_region region;
7562
7563 r = -EFAULT;
7564 if (copy_from_user(®ion, argp, sizeof(region)))
7565 goto out;
7566
7567 r = -ENOTTY;
7568 if (!kvm_x86_ops.mem_enc_register_region)
7569 goto out;
7570
7571 r = kvm_x86_call(mem_enc_register_region)(kvm, ®ion);
7572 break;
7573 }
7574 case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
7575 struct kvm_enc_region region;
7576
7577 r = -EFAULT;
7578 if (copy_from_user(®ion, argp, sizeof(region)))
7579 goto out;
7580
7581 r = -ENOTTY;
7582 if (!kvm_x86_ops.mem_enc_unregister_region)
7583 goto out;
7584
7585 r = kvm_x86_call(mem_enc_unregister_region)(kvm, ®ion);
7586 break;
7587 }
7588 #ifdef CONFIG_KVM_HYPERV
7589 case KVM_HYPERV_EVENTFD: {
7590 struct kvm_hyperv_eventfd hvevfd;
7591
7592 r = -EFAULT;
7593 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd)))
7594 goto out;
7595 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
7596 break;
7597 }
7598 #endif
7599 case KVM_SET_PMU_EVENT_FILTER:
7600 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
7601 break;
7602 case KVM_X86_SET_MSR_FILTER: {
7603 struct kvm_msr_filter __user *user_msr_filter = argp;
7604 struct kvm_msr_filter filter;
7605
7606 if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
7607 return -EFAULT;
7608
7609 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
7610 break;
7611 }
7612 default:
7613 r = -ENOTTY;
7614 }
7615 out:
7616 return r;
7617 }
7618
kvm_probe_feature_msr(u32 msr_index)7619 static void kvm_probe_feature_msr(u32 msr_index)
7620 {
7621 u64 data;
7622
7623 if (kvm_get_feature_msr(NULL, msr_index, &data, true))
7624 return;
7625
7626 msr_based_features[num_msr_based_features++] = msr_index;
7627 }
7628
kvm_probe_msr_to_save(u32 msr_index)7629 static void kvm_probe_msr_to_save(u32 msr_index)
7630 {
7631 u32 dummy[2];
7632
7633 if (rdmsr_safe(msr_index, &dummy[0], &dummy[1]))
7634 return;
7635
7636 /*
7637 * Even MSRs that are valid in the host may not be exposed to guests in
7638 * some cases.
7639 */
7640 switch (msr_index) {
7641 case MSR_IA32_BNDCFGS:
7642 if (!kvm_mpx_supported())
7643 return;
7644 break;
7645 case MSR_TSC_AUX:
7646 if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
7647 !kvm_cpu_cap_has(X86_FEATURE_RDPID))
7648 return;
7649 break;
7650 case MSR_IA32_UMWAIT_CONTROL:
7651 if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG))
7652 return;
7653 break;
7654 case MSR_IA32_RTIT_CTL:
7655 case MSR_IA32_RTIT_STATUS:
7656 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
7657 return;
7658 break;
7659 case MSR_IA32_RTIT_CR3_MATCH:
7660 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7661 !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
7662 return;
7663 break;
7664 case MSR_IA32_RTIT_OUTPUT_BASE:
7665 case MSR_IA32_RTIT_OUTPUT_MASK:
7666 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7667 (!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
7668 !intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
7669 return;
7670 break;
7671 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
7672 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7673 (msr_index - MSR_IA32_RTIT_ADDR0_A >=
7674 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2))
7675 return;
7676 break;
7677 case MSR_ARCH_PERFMON_PERFCTR0 ...
7678 MSR_ARCH_PERFMON_PERFCTR0 + KVM_MAX_NR_GP_COUNTERS - 1:
7679 if (msr_index - MSR_ARCH_PERFMON_PERFCTR0 >=
7680 kvm_pmu_cap.num_counters_gp)
7681 return;
7682 break;
7683 case MSR_ARCH_PERFMON_EVENTSEL0 ...
7684 MSR_ARCH_PERFMON_EVENTSEL0 + KVM_MAX_NR_GP_COUNTERS - 1:
7685 if (msr_index - MSR_ARCH_PERFMON_EVENTSEL0 >=
7686 kvm_pmu_cap.num_counters_gp)
7687 return;
7688 break;
7689 case MSR_ARCH_PERFMON_FIXED_CTR0 ...
7690 MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_MAX_NR_FIXED_COUNTERS - 1:
7691 if (msr_index - MSR_ARCH_PERFMON_FIXED_CTR0 >=
7692 kvm_pmu_cap.num_counters_fixed)
7693 return;
7694 break;
7695 case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
7696 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
7697 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
7698 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
7699 if (!kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
7700 return;
7701 break;
7702 case MSR_IA32_XFD:
7703 case MSR_IA32_XFD_ERR:
7704 if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
7705 return;
7706 break;
7707 case MSR_IA32_TSX_CTRL:
7708 if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR))
7709 return;
7710 break;
7711 case MSR_IA32_XSS:
7712 if (!kvm_caps.supported_xss)
7713 return;
7714 break;
7715 case MSR_IA32_U_CET:
7716 case MSR_IA32_S_CET:
7717 if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&
7718 !kvm_cpu_cap_has(X86_FEATURE_IBT))
7719 return;
7720 break;
7721 case MSR_IA32_INT_SSP_TAB:
7722 if (!kvm_cpu_cap_has(X86_FEATURE_LM))
7723 return;
7724 fallthrough;
7725 case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
7726 if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK))
7727 return;
7728 break;
7729 default:
7730 break;
7731 }
7732
7733 msrs_to_save[num_msrs_to_save++] = msr_index;
7734 }
7735
kvm_init_msr_lists(void)7736 static void kvm_init_msr_lists(void)
7737 {
7738 unsigned i;
7739
7740 BUILD_BUG_ON_MSG(KVM_MAX_NR_FIXED_COUNTERS != 3,
7741 "Please update the fixed PMCs in msrs_to_save_pmu[]");
7742
7743 num_msrs_to_save = 0;
7744 num_emulated_msrs = 0;
7745 num_msr_based_features = 0;
7746
7747 for (i = 0; i < ARRAY_SIZE(msrs_to_save_base); i++)
7748 kvm_probe_msr_to_save(msrs_to_save_base[i]);
7749
7750 if (enable_pmu) {
7751 for (i = 0; i < ARRAY_SIZE(msrs_to_save_pmu); i++)
7752 kvm_probe_msr_to_save(msrs_to_save_pmu[i]);
7753 }
7754
7755 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
7756 if (!kvm_x86_call(has_emulated_msr)(NULL,
7757 emulated_msrs_all[i]))
7758 continue;
7759
7760 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
7761 }
7762
7763 for (i = KVM_FIRST_EMULATED_VMX_MSR; i <= KVM_LAST_EMULATED_VMX_MSR; i++)
7764 kvm_probe_feature_msr(i);
7765
7766 for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++)
7767 kvm_probe_feature_msr(msr_based_features_all_except_vmx[i]);
7768 }
7769
vcpu_mmio_write(struct kvm_vcpu * vcpu,gpa_t addr,int len,const void * v)7770 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
7771 const void *v)
7772 {
7773 int handled = 0;
7774 int n;
7775
7776 do {
7777 n = min(len, 8);
7778 if (!(lapic_in_kernel(vcpu) &&
7779 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
7780 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
7781 break;
7782 handled += n;
7783 addr += n;
7784 len -= n;
7785 v += n;
7786 } while (len);
7787
7788 return handled;
7789 }
7790
vcpu_mmio_read(struct kvm_vcpu * vcpu,gpa_t addr,int len,void * v)7791 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
7792 {
7793 int handled = 0;
7794 int n;
7795
7796 do {
7797 n = min(len, 8);
7798 if (!(lapic_in_kernel(vcpu) &&
7799 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
7800 addr, n, v))
7801 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
7802 break;
7803 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
7804 handled += n;
7805 addr += n;
7806 len -= n;
7807 v += n;
7808 } while (len);
7809
7810 return handled;
7811 }
7812
kvm_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)7813 void kvm_set_segment(struct kvm_vcpu *vcpu,
7814 struct kvm_segment *var, int seg)
7815 {
7816 kvm_x86_call(set_segment)(vcpu, var, seg);
7817 }
7818
kvm_get_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)7819 void kvm_get_segment(struct kvm_vcpu *vcpu,
7820 struct kvm_segment *var, int seg)
7821 {
7822 kvm_x86_call(get_segment)(vcpu, var, seg);
7823 }
7824
translate_nested_gpa(struct kvm_vcpu * vcpu,gpa_t gpa,u64 access,struct x86_exception * exception)7825 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
7826 struct x86_exception *exception)
7827 {
7828 struct kvm_mmu *mmu = vcpu->arch.mmu;
7829 gpa_t t_gpa;
7830
7831 BUG_ON(!mmu_is_nested(vcpu));
7832
7833 /* NPT walks are always user-walks */
7834 access |= PFERR_USER_MASK;
7835 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
7836
7837 return t_gpa;
7838 }
7839
kvm_mmu_gva_to_gpa_read(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)7840 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
7841 struct x86_exception *exception)
7842 {
7843 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7844
7845 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7846 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7847 }
7848 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_gva_to_gpa_read);
7849
kvm_mmu_gva_to_gpa_write(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)7850 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
7851 struct x86_exception *exception)
7852 {
7853 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7854
7855 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7856 access |= PFERR_WRITE_MASK;
7857 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7858 }
7859 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_gva_to_gpa_write);
7860
7861 /* uses this to access any guest's mapped memory without checking CPL */
kvm_mmu_gva_to_gpa_system(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)7862 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
7863 struct x86_exception *exception)
7864 {
7865 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7866
7867 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception);
7868 }
7869
kvm_read_guest_virt_helper(gva_t addr,void * val,unsigned int bytes,struct kvm_vcpu * vcpu,u64 access,struct x86_exception * exception)7870 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
7871 struct kvm_vcpu *vcpu, u64 access,
7872 struct x86_exception *exception)
7873 {
7874 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7875 void *data = val;
7876 int r = X86EMUL_CONTINUE;
7877
7878 while (bytes) {
7879 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7880 unsigned offset = addr & (PAGE_SIZE-1);
7881 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
7882 int ret;
7883
7884 if (gpa == INVALID_GPA)
7885 return X86EMUL_PROPAGATE_FAULT;
7886 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
7887 offset, toread);
7888 if (ret < 0) {
7889 r = X86EMUL_IO_NEEDED;
7890 goto out;
7891 }
7892
7893 bytes -= toread;
7894 data += toread;
7895 addr += toread;
7896 }
7897 out:
7898 return r;
7899 }
7900
7901 /* used for instruction fetching */
kvm_fetch_guest_virt(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)7902 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
7903 gva_t addr, void *val, unsigned int bytes,
7904 struct x86_exception *exception)
7905 {
7906 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7907 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7908 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7909 unsigned offset;
7910 int ret;
7911
7912 /* Inline kvm_read_guest_virt_helper for speed. */
7913 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK,
7914 exception);
7915 if (unlikely(gpa == INVALID_GPA))
7916 return X86EMUL_PROPAGATE_FAULT;
7917
7918 offset = addr & (PAGE_SIZE-1);
7919 if (WARN_ON(offset + bytes > PAGE_SIZE))
7920 bytes = (unsigned)PAGE_SIZE - offset;
7921 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
7922 offset, bytes);
7923 if (unlikely(ret < 0))
7924 return X86EMUL_IO_NEEDED;
7925
7926 return X86EMUL_CONTINUE;
7927 }
7928
kvm_read_guest_virt(struct kvm_vcpu * vcpu,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)7929 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
7930 gva_t addr, void *val, unsigned int bytes,
7931 struct x86_exception *exception)
7932 {
7933 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7934
7935 /*
7936 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
7937 * is returned, but our callers are not ready for that and they blindly
7938 * call kvm_inject_page_fault. Ensure that they at least do not leak
7939 * uninitialized kernel stack memory into cr2 and error code.
7940 */
7941 memset(exception, 0, sizeof(*exception));
7942 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
7943 exception);
7944 }
7945 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_virt);
7946
emulator_read_std(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception,bool system)7947 static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
7948 gva_t addr, void *val, unsigned int bytes,
7949 struct x86_exception *exception, bool system)
7950 {
7951 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7952 u64 access = 0;
7953
7954 if (system)
7955 access |= PFERR_IMPLICIT_ACCESS;
7956 else if (kvm_x86_call(get_cpl)(vcpu) == 3)
7957 access |= PFERR_USER_MASK;
7958
7959 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
7960 }
7961
kvm_write_guest_virt_helper(gva_t addr,void * val,unsigned int bytes,struct kvm_vcpu * vcpu,u64 access,struct x86_exception * exception)7962 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
7963 struct kvm_vcpu *vcpu, u64 access,
7964 struct x86_exception *exception)
7965 {
7966 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7967 void *data = val;
7968 int r = X86EMUL_CONTINUE;
7969
7970 while (bytes) {
7971 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7972 unsigned offset = addr & (PAGE_SIZE-1);
7973 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
7974 int ret;
7975
7976 if (gpa == INVALID_GPA)
7977 return X86EMUL_PROPAGATE_FAULT;
7978 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
7979 if (ret < 0) {
7980 r = X86EMUL_IO_NEEDED;
7981 goto out;
7982 }
7983
7984 bytes -= towrite;
7985 data += towrite;
7986 addr += towrite;
7987 }
7988 out:
7989 return r;
7990 }
7991
emulator_write_std(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception,bool system)7992 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
7993 unsigned int bytes, struct x86_exception *exception,
7994 bool system)
7995 {
7996 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7997 u64 access = PFERR_WRITE_MASK;
7998
7999 if (system)
8000 access |= PFERR_IMPLICIT_ACCESS;
8001 else if (kvm_x86_call(get_cpl)(vcpu) == 3)
8002 access |= PFERR_USER_MASK;
8003
8004 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
8005 access, exception);
8006 }
8007
kvm_write_guest_virt_system(struct kvm_vcpu * vcpu,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)8008 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
8009 unsigned int bytes, struct x86_exception *exception)
8010 {
8011 /* kvm_write_guest_virt_system can pull in tons of pages. */
8012 kvm_request_l1tf_flush_l1d();
8013
8014 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
8015 PFERR_WRITE_MASK, exception);
8016 }
8017 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_virt_system);
8018
kvm_check_emulate_insn(struct kvm_vcpu * vcpu,int emul_type,void * insn,int insn_len)8019 static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
8020 void *insn, int insn_len)
8021 {
8022 return kvm_x86_call(check_emulate_instruction)(vcpu, emul_type,
8023 insn, insn_len);
8024 }
8025
handle_ud(struct kvm_vcpu * vcpu)8026 int handle_ud(struct kvm_vcpu *vcpu)
8027 {
8028 static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
8029 int fep_flags = READ_ONCE(force_emulation_prefix);
8030 int emul_type = EMULTYPE_TRAP_UD;
8031 char sig[5]; /* ud2; .ascii "kvm" */
8032 struct x86_exception e;
8033 int r;
8034
8035 r = kvm_check_emulate_insn(vcpu, emul_type, NULL, 0);
8036 if (r != X86EMUL_CONTINUE)
8037 return 1;
8038
8039 if (fep_flags &&
8040 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
8041 sig, sizeof(sig), &e) == 0 &&
8042 memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
8043 if (fep_flags & KVM_FEP_CLEAR_RFLAGS_RF)
8044 kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF);
8045 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
8046 emul_type = EMULTYPE_TRAP_UD_FORCED;
8047 }
8048
8049 return kvm_emulate_instruction(vcpu, emul_type);
8050 }
8051 EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_ud);
8052
vcpu_is_mmio_gpa(struct kvm_vcpu * vcpu,unsigned long gva,gpa_t gpa,bool write)8053 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
8054 gpa_t gpa, bool write)
8055 {
8056 /* For APIC access vmexit */
8057 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
8058 return 1;
8059
8060 if (vcpu_match_mmio_gpa(vcpu, gpa)) {
8061 trace_vcpu_match_mmio(gva, gpa, write, true);
8062 return 1;
8063 }
8064
8065 return 0;
8066 }
8067
vcpu_mmio_gva_to_gpa(struct kvm_vcpu * vcpu,unsigned long gva,gpa_t * gpa,struct x86_exception * exception,bool write)8068 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
8069 gpa_t *gpa, struct x86_exception *exception,
8070 bool write)
8071 {
8072 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
8073 u64 access = ((kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
8074 | (write ? PFERR_WRITE_MASK : 0);
8075
8076 /*
8077 * currently PKRU is only applied to ept enabled guest so
8078 * there is no pkey in EPT page table for L1 guest or EPT
8079 * shadow page table for L2 guest.
8080 */
8081 if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) ||
8082 !permission_fault(vcpu, vcpu->arch.walk_mmu,
8083 vcpu->arch.mmio_access, 0, access))) {
8084 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
8085 (gva & (PAGE_SIZE - 1));
8086 trace_vcpu_match_mmio(gva, *gpa, write, false);
8087 return 1;
8088 }
8089
8090 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
8091
8092 if (*gpa == INVALID_GPA)
8093 return -1;
8094
8095 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
8096 }
8097
emulator_write_phys(struct kvm_vcpu * vcpu,gpa_t gpa,const void * val,int bytes)8098 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
8099 const void *val, int bytes)
8100 {
8101 int ret;
8102
8103 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
8104 if (ret < 0)
8105 return 0;
8106 kvm_page_track_write(vcpu, gpa, val, bytes);
8107 return 1;
8108 }
8109
8110 struct read_write_emulator_ops {
8111 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
8112 int bytes);
8113 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
8114 void *val, int bytes);
8115 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
8116 int bytes, void *val);
8117 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
8118 void *val, int bytes);
8119 bool write;
8120 };
8121
read_prepare(struct kvm_vcpu * vcpu,void * val,int bytes)8122 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
8123 {
8124 if (vcpu->mmio_read_completed) {
8125 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
8126 vcpu->mmio_fragments[0].gpa, val);
8127 vcpu->mmio_read_completed = 0;
8128 return 1;
8129 }
8130
8131 return 0;
8132 }
8133
read_emulate(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)8134 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
8135 void *val, int bytes)
8136 {
8137 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
8138 }
8139
write_emulate(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)8140 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
8141 void *val, int bytes)
8142 {
8143 return emulator_write_phys(vcpu, gpa, val, bytes);
8144 }
8145
write_mmio(struct kvm_vcpu * vcpu,gpa_t gpa,int bytes,void * val)8146 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
8147 {
8148 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
8149 return vcpu_mmio_write(vcpu, gpa, bytes, val);
8150 }
8151
read_exit_mmio(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)8152 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
8153 void *val, int bytes)
8154 {
8155 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
8156 return X86EMUL_IO_NEEDED;
8157 }
8158
write_exit_mmio(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)8159 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
8160 void *val, int bytes)
8161 {
8162 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
8163
8164 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
8165 return X86EMUL_CONTINUE;
8166 }
8167
8168 static const struct read_write_emulator_ops read_emultor = {
8169 .read_write_prepare = read_prepare,
8170 .read_write_emulate = read_emulate,
8171 .read_write_mmio = vcpu_mmio_read,
8172 .read_write_exit_mmio = read_exit_mmio,
8173 };
8174
8175 static const struct read_write_emulator_ops write_emultor = {
8176 .read_write_emulate = write_emulate,
8177 .read_write_mmio = write_mmio,
8178 .read_write_exit_mmio = write_exit_mmio,
8179 .write = true,
8180 };
8181
emulator_read_write_onepage(unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception,struct kvm_vcpu * vcpu,const struct read_write_emulator_ops * ops)8182 static int emulator_read_write_onepage(unsigned long addr, void *val,
8183 unsigned int bytes,
8184 struct x86_exception *exception,
8185 struct kvm_vcpu *vcpu,
8186 const struct read_write_emulator_ops *ops)
8187 {
8188 gpa_t gpa;
8189 int handled, ret;
8190 bool write = ops->write;
8191 struct kvm_mmio_fragment *frag;
8192 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8193
8194 /*
8195 * If the exit was due to a NPF we may already have a GPA.
8196 * If the GPA is present, use it to avoid the GVA to GPA table walk.
8197 * Note, this cannot be used on string operations since string
8198 * operation using rep will only have the initial GPA from the NPF
8199 * occurred.
8200 */
8201 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) &&
8202 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) {
8203 gpa = ctxt->gpa_val;
8204 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
8205 } else {
8206 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
8207 if (ret < 0)
8208 return X86EMUL_PROPAGATE_FAULT;
8209 }
8210
8211 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
8212 return X86EMUL_CONTINUE;
8213
8214 /*
8215 * Is this MMIO handled locally?
8216 */
8217 handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
8218 if (handled == bytes)
8219 return X86EMUL_CONTINUE;
8220
8221 gpa += handled;
8222 bytes -= handled;
8223 val += handled;
8224
8225 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
8226 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
8227 frag->gpa = gpa;
8228 frag->data = val;
8229 frag->len = bytes;
8230 return X86EMUL_CONTINUE;
8231 }
8232
emulator_read_write(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception,const struct read_write_emulator_ops * ops)8233 static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
8234 unsigned long addr,
8235 void *val, unsigned int bytes,
8236 struct x86_exception *exception,
8237 const struct read_write_emulator_ops *ops)
8238 {
8239 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8240 gpa_t gpa;
8241 int rc;
8242
8243 if (ops->read_write_prepare &&
8244 ops->read_write_prepare(vcpu, val, bytes))
8245 return X86EMUL_CONTINUE;
8246
8247 vcpu->mmio_nr_fragments = 0;
8248
8249 /* Crossing a page boundary? */
8250 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
8251 int now;
8252
8253 now = -addr & ~PAGE_MASK;
8254 rc = emulator_read_write_onepage(addr, val, now, exception,
8255 vcpu, ops);
8256
8257 if (rc != X86EMUL_CONTINUE)
8258 return rc;
8259 addr += now;
8260 if (ctxt->mode != X86EMUL_MODE_PROT64)
8261 addr = (u32)addr;
8262 val += now;
8263 bytes -= now;
8264 }
8265
8266 rc = emulator_read_write_onepage(addr, val, bytes, exception,
8267 vcpu, ops);
8268 if (rc != X86EMUL_CONTINUE)
8269 return rc;
8270
8271 if (!vcpu->mmio_nr_fragments)
8272 return X86EMUL_CONTINUE;
8273
8274 gpa = vcpu->mmio_fragments[0].gpa;
8275
8276 vcpu->mmio_needed = 1;
8277 vcpu->mmio_cur_fragment = 0;
8278
8279 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
8280 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
8281 vcpu->run->exit_reason = KVM_EXIT_MMIO;
8282 vcpu->run->mmio.phys_addr = gpa;
8283
8284 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
8285 }
8286
emulator_read_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception)8287 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
8288 unsigned long addr,
8289 void *val,
8290 unsigned int bytes,
8291 struct x86_exception *exception)
8292 {
8293 return emulator_read_write(ctxt, addr, val, bytes,
8294 exception, &read_emultor);
8295 }
8296
emulator_write_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,const void * val,unsigned int bytes,struct x86_exception * exception)8297 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
8298 unsigned long addr,
8299 const void *val,
8300 unsigned int bytes,
8301 struct x86_exception *exception)
8302 {
8303 return emulator_read_write(ctxt, addr, (void *)val, bytes,
8304 exception, &write_emultor);
8305 }
8306
8307 #define emulator_try_cmpxchg_user(t, ptr, old, new) \
8308 (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t))
8309
emulator_cmpxchg_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,const void * old,const void * new,unsigned int bytes,struct x86_exception * exception)8310 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
8311 unsigned long addr,
8312 const void *old,
8313 const void *new,
8314 unsigned int bytes,
8315 struct x86_exception *exception)
8316 {
8317 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8318 u64 page_line_mask;
8319 unsigned long hva;
8320 gpa_t gpa;
8321 int r;
8322
8323 /* guests cmpxchg8b have to be emulated atomically */
8324 if (bytes > 8 || (bytes & (bytes - 1)))
8325 goto emul_write;
8326
8327 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
8328
8329 if (gpa == INVALID_GPA ||
8330 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
8331 goto emul_write;
8332
8333 /*
8334 * Emulate the atomic as a straight write to avoid #AC if SLD is
8335 * enabled in the host and the access splits a cache line.
8336 */
8337 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
8338 page_line_mask = ~(cache_line_size() - 1);
8339 else
8340 page_line_mask = PAGE_MASK;
8341
8342 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
8343 goto emul_write;
8344
8345 hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa));
8346 if (kvm_is_error_hva(hva))
8347 goto emul_write;
8348
8349 hva += offset_in_page(gpa);
8350
8351 switch (bytes) {
8352 case 1:
8353 r = emulator_try_cmpxchg_user(u8, hva, old, new);
8354 break;
8355 case 2:
8356 r = emulator_try_cmpxchg_user(u16, hva, old, new);
8357 break;
8358 case 4:
8359 r = emulator_try_cmpxchg_user(u32, hva, old, new);
8360 break;
8361 case 8:
8362 r = emulator_try_cmpxchg_user(u64, hva, old, new);
8363 break;
8364 default:
8365 BUG();
8366 }
8367
8368 if (r < 0)
8369 return X86EMUL_UNHANDLEABLE;
8370
8371 /*
8372 * Mark the page dirty _before_ checking whether or not the CMPXCHG was
8373 * successful, as the old value is written back on failure. Note, for
8374 * live migration, this is unnecessarily conservative as CMPXCHG writes
8375 * back the original value and the access is atomic, but KVM's ABI is
8376 * that all writes are dirty logged, regardless of the value written.
8377 */
8378 kvm_vcpu_mark_page_dirty(vcpu, gpa_to_gfn(gpa));
8379
8380 if (r)
8381 return X86EMUL_CMPXCHG_FAILED;
8382
8383 kvm_page_track_write(vcpu, gpa, new, bytes);
8384
8385 return X86EMUL_CONTINUE;
8386
8387 emul_write:
8388 pr_warn_once("emulating exchange as write\n");
8389
8390 return emulator_write_emulated(ctxt, addr, new, bytes, exception);
8391 }
8392
emulator_pio_in_out(struct kvm_vcpu * vcpu,int size,unsigned short port,void * data,unsigned int count,bool in)8393 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
8394 unsigned short port, void *data,
8395 unsigned int count, bool in)
8396 {
8397 unsigned i;
8398 int r;
8399
8400 WARN_ON_ONCE(vcpu->arch.pio.count);
8401 for (i = 0; i < count; i++) {
8402 if (in)
8403 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data);
8404 else
8405 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data);
8406
8407 if (r) {
8408 if (i == 0)
8409 goto userspace_io;
8410
8411 /*
8412 * Userspace must have unregistered the device while PIO
8413 * was running. Drop writes / read as 0.
8414 */
8415 if (in)
8416 memset(data, 0, size * (count - i));
8417 break;
8418 }
8419
8420 data += size;
8421 }
8422 return 1;
8423
8424 userspace_io:
8425 vcpu->arch.pio.port = port;
8426 vcpu->arch.pio.in = in;
8427 vcpu->arch.pio.count = count;
8428 vcpu->arch.pio.size = size;
8429
8430 if (in)
8431 memset(vcpu->arch.pio_data, 0, size * count);
8432 else
8433 memcpy(vcpu->arch.pio_data, data, size * count);
8434
8435 vcpu->run->exit_reason = KVM_EXIT_IO;
8436 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
8437 vcpu->run->io.size = size;
8438 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
8439 vcpu->run->io.count = count;
8440 vcpu->run->io.port = port;
8441 return 0;
8442 }
8443
emulator_pio_in(struct kvm_vcpu * vcpu,int size,unsigned short port,void * val,unsigned int count)8444 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
8445 unsigned short port, void *val, unsigned int count)
8446 {
8447 int r = emulator_pio_in_out(vcpu, size, port, val, count, true);
8448 if (r)
8449 trace_kvm_pio(KVM_PIO_IN, port, size, count, val);
8450
8451 return r;
8452 }
8453
complete_emulator_pio_in(struct kvm_vcpu * vcpu,void * val)8454 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
8455 {
8456 int size = vcpu->arch.pio.size;
8457 unsigned int count = vcpu->arch.pio.count;
8458 memcpy(val, vcpu->arch.pio_data, size * count);
8459 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
8460 vcpu->arch.pio.count = 0;
8461 }
8462
emulator_pio_in_emulated(struct x86_emulate_ctxt * ctxt,int size,unsigned short port,void * val,unsigned int count)8463 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
8464 int size, unsigned short port, void *val,
8465 unsigned int count)
8466 {
8467 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8468 if (vcpu->arch.pio.count) {
8469 /*
8470 * Complete a previous iteration that required userspace I/O.
8471 * Note, @count isn't guaranteed to match pio.count as userspace
8472 * can modify ECX before rerunning the vCPU. Ignore any such
8473 * shenanigans as KVM doesn't support modifying the rep count,
8474 * and the emulator ensures @count doesn't overflow the buffer.
8475 */
8476 complete_emulator_pio_in(vcpu, val);
8477 return 1;
8478 }
8479
8480 return emulator_pio_in(vcpu, size, port, val, count);
8481 }
8482
emulator_pio_out(struct kvm_vcpu * vcpu,int size,unsigned short port,const void * val,unsigned int count)8483 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
8484 unsigned short port, const void *val,
8485 unsigned int count)
8486 {
8487 trace_kvm_pio(KVM_PIO_OUT, port, size, count, val);
8488 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
8489 }
8490
emulator_pio_out_emulated(struct x86_emulate_ctxt * ctxt,int size,unsigned short port,const void * val,unsigned int count)8491 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
8492 int size, unsigned short port,
8493 const void *val, unsigned int count)
8494 {
8495 return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count);
8496 }
8497
get_segment_base(struct kvm_vcpu * vcpu,int seg)8498 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
8499 {
8500 return kvm_x86_call(get_segment_base)(vcpu, seg);
8501 }
8502
emulator_invlpg(struct x86_emulate_ctxt * ctxt,ulong address)8503 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
8504 {
8505 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
8506 }
8507
kvm_emulate_wbinvd_noskip(struct kvm_vcpu * vcpu)8508 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
8509 {
8510 if (!need_emulate_wbinvd(vcpu))
8511 return X86EMUL_CONTINUE;
8512
8513 if (kvm_x86_call(has_wbinvd_exit)()) {
8514 int cpu = get_cpu();
8515
8516 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
8517 wbinvd_on_cpus_mask(vcpu->arch.wbinvd_dirty_mask);
8518 put_cpu();
8519 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
8520 } else
8521 wbinvd();
8522 return X86EMUL_CONTINUE;
8523 }
8524
kvm_emulate_wbinvd(struct kvm_vcpu * vcpu)8525 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
8526 {
8527 kvm_emulate_wbinvd_noskip(vcpu);
8528 return kvm_skip_emulated_instruction(vcpu);
8529 }
8530 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wbinvd);
8531
8532
8533
emulator_wbinvd(struct x86_emulate_ctxt * ctxt)8534 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
8535 {
8536 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
8537 }
8538
emulator_get_dr(struct x86_emulate_ctxt * ctxt,int dr)8539 static unsigned long emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr)
8540 {
8541 return kvm_get_dr(emul_to_vcpu(ctxt), dr);
8542 }
8543
emulator_set_dr(struct x86_emulate_ctxt * ctxt,int dr,unsigned long value)8544 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
8545 unsigned long value)
8546 {
8547
8548 return kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
8549 }
8550
mk_cr_64(u64 curr_cr,u32 new_val)8551 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
8552 {
8553 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
8554 }
8555
emulator_get_cr(struct x86_emulate_ctxt * ctxt,int cr)8556 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
8557 {
8558 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8559 unsigned long value;
8560
8561 switch (cr) {
8562 case 0:
8563 value = kvm_read_cr0(vcpu);
8564 break;
8565 case 2:
8566 value = vcpu->arch.cr2;
8567 break;
8568 case 3:
8569 value = kvm_read_cr3(vcpu);
8570 break;
8571 case 4:
8572 value = kvm_read_cr4(vcpu);
8573 break;
8574 case 8:
8575 value = kvm_get_cr8(vcpu);
8576 break;
8577 default:
8578 kvm_err("%s: unexpected cr %u\n", __func__, cr);
8579 return 0;
8580 }
8581
8582 return value;
8583 }
8584
emulator_set_cr(struct x86_emulate_ctxt * ctxt,int cr,ulong val)8585 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
8586 {
8587 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8588 int res = 0;
8589
8590 switch (cr) {
8591 case 0:
8592 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
8593 break;
8594 case 2:
8595 vcpu->arch.cr2 = val;
8596 break;
8597 case 3:
8598 res = kvm_set_cr3(vcpu, val);
8599 break;
8600 case 4:
8601 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
8602 break;
8603 case 8:
8604 res = kvm_set_cr8(vcpu, val);
8605 break;
8606 default:
8607 kvm_err("%s: unexpected cr %u\n", __func__, cr);
8608 res = -1;
8609 }
8610
8611 return res;
8612 }
8613
emulator_get_cpl(struct x86_emulate_ctxt * ctxt)8614 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
8615 {
8616 return kvm_x86_call(get_cpl)(emul_to_vcpu(ctxt));
8617 }
8618
emulator_get_gdt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)8619 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8620 {
8621 kvm_x86_call(get_gdt)(emul_to_vcpu(ctxt), dt);
8622 }
8623
emulator_get_idt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)8624 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8625 {
8626 kvm_x86_call(get_idt)(emul_to_vcpu(ctxt), dt);
8627 }
8628
emulator_set_gdt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)8629 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8630 {
8631 kvm_x86_call(set_gdt)(emul_to_vcpu(ctxt), dt);
8632 }
8633
emulator_set_idt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)8634 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8635 {
8636 kvm_x86_call(set_idt)(emul_to_vcpu(ctxt), dt);
8637 }
8638
emulator_get_cached_segment_base(struct x86_emulate_ctxt * ctxt,int seg)8639 static unsigned long emulator_get_cached_segment_base(
8640 struct x86_emulate_ctxt *ctxt, int seg)
8641 {
8642 return get_segment_base(emul_to_vcpu(ctxt), seg);
8643 }
8644
emulator_get_segment(struct x86_emulate_ctxt * ctxt,u16 * selector,struct desc_struct * desc,u32 * base3,int seg)8645 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
8646 struct desc_struct *desc, u32 *base3,
8647 int seg)
8648 {
8649 struct kvm_segment var;
8650
8651 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
8652 *selector = var.selector;
8653
8654 if (var.unusable) {
8655 memset(desc, 0, sizeof(*desc));
8656 if (base3)
8657 *base3 = 0;
8658 return false;
8659 }
8660
8661 if (var.g)
8662 var.limit >>= 12;
8663 set_desc_limit(desc, var.limit);
8664 set_desc_base(desc, (unsigned long)var.base);
8665 #ifdef CONFIG_X86_64
8666 if (base3)
8667 *base3 = var.base >> 32;
8668 #endif
8669 desc->type = var.type;
8670 desc->s = var.s;
8671 desc->dpl = var.dpl;
8672 desc->p = var.present;
8673 desc->avl = var.avl;
8674 desc->l = var.l;
8675 desc->d = var.db;
8676 desc->g = var.g;
8677
8678 return true;
8679 }
8680
emulator_set_segment(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_struct * desc,u32 base3,int seg)8681 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
8682 struct desc_struct *desc, u32 base3,
8683 int seg)
8684 {
8685 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8686 struct kvm_segment var;
8687
8688 var.selector = selector;
8689 var.base = get_desc_base(desc);
8690 #ifdef CONFIG_X86_64
8691 var.base |= ((u64)base3) << 32;
8692 #endif
8693 var.limit = get_desc_limit(desc);
8694 if (desc->g)
8695 var.limit = (var.limit << 12) | 0xfff;
8696 var.type = desc->type;
8697 var.dpl = desc->dpl;
8698 var.db = desc->d;
8699 var.s = desc->s;
8700 var.l = desc->l;
8701 var.g = desc->g;
8702 var.avl = desc->avl;
8703 var.present = desc->p;
8704 var.unusable = !var.present;
8705 var.padding = 0;
8706
8707 kvm_set_segment(vcpu, &var, seg);
8708 return;
8709 }
8710
emulator_get_msr_with_filter(struct x86_emulate_ctxt * ctxt,u32 msr_index,u64 * pdata)8711 static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
8712 u32 msr_index, u64 *pdata)
8713 {
8714 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8715 int r;
8716
8717 r = kvm_emulate_msr_read(vcpu, msr_index, pdata);
8718 if (r < 0)
8719 return X86EMUL_UNHANDLEABLE;
8720
8721 if (r) {
8722 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0,
8723 complete_emulated_rdmsr, r))
8724 return X86EMUL_IO_NEEDED;
8725
8726 trace_kvm_msr_read_ex(msr_index);
8727 return X86EMUL_PROPAGATE_FAULT;
8728 }
8729
8730 trace_kvm_msr_read(msr_index, *pdata);
8731 return X86EMUL_CONTINUE;
8732 }
8733
emulator_set_msr_with_filter(struct x86_emulate_ctxt * ctxt,u32 msr_index,u64 data)8734 static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
8735 u32 msr_index, u64 data)
8736 {
8737 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8738 int r;
8739
8740 r = kvm_emulate_msr_write(vcpu, msr_index, data);
8741 if (r < 0)
8742 return X86EMUL_UNHANDLEABLE;
8743
8744 if (r) {
8745 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
8746 complete_emulated_msr_access, r))
8747 return X86EMUL_IO_NEEDED;
8748
8749 trace_kvm_msr_write_ex(msr_index, data);
8750 return X86EMUL_PROPAGATE_FAULT;
8751 }
8752
8753 trace_kvm_msr_write(msr_index, data);
8754 return X86EMUL_CONTINUE;
8755 }
8756
emulator_get_msr(struct x86_emulate_ctxt * ctxt,u32 msr_index,u64 * pdata)8757 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
8758 u32 msr_index, u64 *pdata)
8759 {
8760 /*
8761 * Treat emulator accesses to the current shadow stack pointer as host-
8762 * initiated, as they aren't true MSR accesses (SSP is a "just a reg"),
8763 * and this API is used only for implicit accesses, i.e. not RDMSR, and
8764 * so the index is fully KVM-controlled.
8765 */
8766 if (unlikely(msr_index == MSR_KVM_INTERNAL_GUEST_SSP))
8767 return kvm_msr_read(emul_to_vcpu(ctxt), msr_index, pdata);
8768
8769 return __kvm_emulate_msr_read(emul_to_vcpu(ctxt), msr_index, pdata);
8770 }
8771
emulator_check_rdpmc_early(struct x86_emulate_ctxt * ctxt,u32 pmc)8772 static int emulator_check_rdpmc_early(struct x86_emulate_ctxt *ctxt, u32 pmc)
8773 {
8774 return kvm_pmu_check_rdpmc_early(emul_to_vcpu(ctxt), pmc);
8775 }
8776
emulator_read_pmc(struct x86_emulate_ctxt * ctxt,u32 pmc,u64 * pdata)8777 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
8778 u32 pmc, u64 *pdata)
8779 {
8780 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
8781 }
8782
emulator_halt(struct x86_emulate_ctxt * ctxt)8783 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
8784 {
8785 emul_to_vcpu(ctxt)->arch.halt_request = 1;
8786 }
8787
emulator_intercept(struct x86_emulate_ctxt * ctxt,struct x86_instruction_info * info,enum x86_intercept_stage stage)8788 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
8789 struct x86_instruction_info *info,
8790 enum x86_intercept_stage stage)
8791 {
8792 return kvm_x86_call(check_intercept)(emul_to_vcpu(ctxt), info, stage,
8793 &ctxt->exception);
8794 }
8795
emulator_get_cpuid(struct x86_emulate_ctxt * ctxt,u32 * eax,u32 * ebx,u32 * ecx,u32 * edx,bool exact_only)8796 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
8797 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx,
8798 bool exact_only)
8799 {
8800 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only);
8801 }
8802
emulator_guest_has_movbe(struct x86_emulate_ctxt * ctxt)8803 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
8804 {
8805 return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE);
8806 }
8807
emulator_guest_has_fxsr(struct x86_emulate_ctxt * ctxt)8808 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
8809 {
8810 return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
8811 }
8812
emulator_guest_has_rdpid(struct x86_emulate_ctxt * ctxt)8813 static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
8814 {
8815 return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
8816 }
8817
emulator_guest_cpuid_is_intel_compatible(struct x86_emulate_ctxt * ctxt)8818 static bool emulator_guest_cpuid_is_intel_compatible(struct x86_emulate_ctxt *ctxt)
8819 {
8820 return guest_cpuid_is_intel_compatible(emul_to_vcpu(ctxt));
8821 }
8822
emulator_read_gpr(struct x86_emulate_ctxt * ctxt,unsigned reg)8823 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
8824 {
8825 return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
8826 }
8827
emulator_write_gpr(struct x86_emulate_ctxt * ctxt,unsigned reg,ulong val)8828 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
8829 {
8830 kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val);
8831 }
8832
emulator_set_nmi_mask(struct x86_emulate_ctxt * ctxt,bool masked)8833 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
8834 {
8835 kvm_x86_call(set_nmi_mask)(emul_to_vcpu(ctxt), masked);
8836 }
8837
emulator_is_smm(struct x86_emulate_ctxt * ctxt)8838 static bool emulator_is_smm(struct x86_emulate_ctxt *ctxt)
8839 {
8840 return is_smm(emul_to_vcpu(ctxt));
8841 }
8842
8843 #ifndef CONFIG_KVM_SMM
emulator_leave_smm(struct x86_emulate_ctxt * ctxt)8844 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
8845 {
8846 WARN_ON_ONCE(1);
8847 return X86EMUL_UNHANDLEABLE;
8848 }
8849 #endif
8850
emulator_triple_fault(struct x86_emulate_ctxt * ctxt)8851 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
8852 {
8853 kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
8854 }
8855
emulator_get_xcr(struct x86_emulate_ctxt * ctxt,u32 index,u64 * xcr)8856 static int emulator_get_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 *xcr)
8857 {
8858 if (index != XCR_XFEATURE_ENABLED_MASK)
8859 return 1;
8860 *xcr = emul_to_vcpu(ctxt)->arch.xcr0;
8861 return 0;
8862 }
8863
emulator_set_xcr(struct x86_emulate_ctxt * ctxt,u32 index,u64 xcr)8864 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
8865 {
8866 return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);
8867 }
8868
emulator_vm_bugged(struct x86_emulate_ctxt * ctxt)8869 static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
8870 {
8871 struct kvm *kvm = emul_to_vcpu(ctxt)->kvm;
8872
8873 if (!kvm->vm_bugged)
8874 kvm_vm_bugged(kvm);
8875 }
8876
emulator_get_untagged_addr(struct x86_emulate_ctxt * ctxt,gva_t addr,unsigned int flags)8877 static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt,
8878 gva_t addr, unsigned int flags)
8879 {
8880 if (!kvm_x86_ops.get_untagged_addr)
8881 return addr;
8882
8883 return kvm_x86_call(get_untagged_addr)(emul_to_vcpu(ctxt),
8884 addr, flags);
8885 }
8886
emulator_is_canonical_addr(struct x86_emulate_ctxt * ctxt,gva_t addr,unsigned int flags)8887 static bool emulator_is_canonical_addr(struct x86_emulate_ctxt *ctxt,
8888 gva_t addr, unsigned int flags)
8889 {
8890 return !is_noncanonical_address(addr, emul_to_vcpu(ctxt), flags);
8891 }
8892
8893 static const struct x86_emulate_ops emulate_ops = {
8894 .vm_bugged = emulator_vm_bugged,
8895 .read_gpr = emulator_read_gpr,
8896 .write_gpr = emulator_write_gpr,
8897 .read_std = emulator_read_std,
8898 .write_std = emulator_write_std,
8899 .fetch = kvm_fetch_guest_virt,
8900 .read_emulated = emulator_read_emulated,
8901 .write_emulated = emulator_write_emulated,
8902 .cmpxchg_emulated = emulator_cmpxchg_emulated,
8903 .invlpg = emulator_invlpg,
8904 .pio_in_emulated = emulator_pio_in_emulated,
8905 .pio_out_emulated = emulator_pio_out_emulated,
8906 .get_segment = emulator_get_segment,
8907 .set_segment = emulator_set_segment,
8908 .get_cached_segment_base = emulator_get_cached_segment_base,
8909 .get_gdt = emulator_get_gdt,
8910 .get_idt = emulator_get_idt,
8911 .set_gdt = emulator_set_gdt,
8912 .set_idt = emulator_set_idt,
8913 .get_cr = emulator_get_cr,
8914 .set_cr = emulator_set_cr,
8915 .cpl = emulator_get_cpl,
8916 .get_dr = emulator_get_dr,
8917 .set_dr = emulator_set_dr,
8918 .set_msr_with_filter = emulator_set_msr_with_filter,
8919 .get_msr_with_filter = emulator_get_msr_with_filter,
8920 .get_msr = emulator_get_msr,
8921 .check_rdpmc_early = emulator_check_rdpmc_early,
8922 .read_pmc = emulator_read_pmc,
8923 .halt = emulator_halt,
8924 .wbinvd = emulator_wbinvd,
8925 .fix_hypercall = emulator_fix_hypercall,
8926 .intercept = emulator_intercept,
8927 .get_cpuid = emulator_get_cpuid,
8928 .guest_has_movbe = emulator_guest_has_movbe,
8929 .guest_has_fxsr = emulator_guest_has_fxsr,
8930 .guest_has_rdpid = emulator_guest_has_rdpid,
8931 .guest_cpuid_is_intel_compatible = emulator_guest_cpuid_is_intel_compatible,
8932 .set_nmi_mask = emulator_set_nmi_mask,
8933 .is_smm = emulator_is_smm,
8934 .leave_smm = emulator_leave_smm,
8935 .triple_fault = emulator_triple_fault,
8936 .get_xcr = emulator_get_xcr,
8937 .set_xcr = emulator_set_xcr,
8938 .get_untagged_addr = emulator_get_untagged_addr,
8939 .is_canonical_addr = emulator_is_canonical_addr,
8940 };
8941
toggle_interruptibility(struct kvm_vcpu * vcpu,u32 mask)8942 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
8943 {
8944 u32 int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu);
8945 /*
8946 * an sti; sti; sequence only disable interrupts for the first
8947 * instruction. So, if the last instruction, be it emulated or
8948 * not, left the system with the INT_STI flag enabled, it
8949 * means that the last instruction is an sti. We should not
8950 * leave the flag on in this case. The same goes for mov ss
8951 */
8952 if (int_shadow & mask)
8953 mask = 0;
8954 if (unlikely(int_shadow || mask)) {
8955 kvm_x86_call(set_interrupt_shadow)(vcpu, mask);
8956 if (!mask)
8957 kvm_make_request(KVM_REQ_EVENT, vcpu);
8958 }
8959 }
8960
inject_emulated_exception(struct kvm_vcpu * vcpu)8961 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
8962 {
8963 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8964
8965 if (ctxt->exception.vector == PF_VECTOR)
8966 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception);
8967 else if (ctxt->exception.error_code_valid)
8968 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
8969 ctxt->exception.error_code);
8970 else
8971 kvm_queue_exception(vcpu, ctxt->exception.vector);
8972 }
8973
alloc_emulate_ctxt(struct kvm_vcpu * vcpu)8974 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu)
8975 {
8976 struct x86_emulate_ctxt *ctxt;
8977
8978 ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT);
8979 if (!ctxt) {
8980 pr_err("failed to allocate vcpu's emulator\n");
8981 return NULL;
8982 }
8983
8984 ctxt->vcpu = vcpu;
8985 ctxt->ops = &emulate_ops;
8986 vcpu->arch.emulate_ctxt = ctxt;
8987
8988 return ctxt;
8989 }
8990
init_emulate_ctxt(struct kvm_vcpu * vcpu)8991 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
8992 {
8993 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8994 int cs_db, cs_l;
8995
8996 kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
8997
8998 ctxt->gpa_available = false;
8999 ctxt->eflags = kvm_get_rflags(vcpu);
9000 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
9001
9002 ctxt->eip = kvm_rip_read(vcpu);
9003 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
9004 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
9005 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 :
9006 cs_db ? X86EMUL_MODE_PROT32 :
9007 X86EMUL_MODE_PROT16;
9008 ctxt->interruptibility = 0;
9009 ctxt->have_exception = false;
9010 ctxt->exception.vector = -1;
9011 ctxt->perm_ok = false;
9012
9013 init_decode_cache(ctxt);
9014 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
9015 }
9016
kvm_inject_realmode_interrupt(struct kvm_vcpu * vcpu,int irq,int inc_eip)9017 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
9018 {
9019 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9020 int ret;
9021
9022 init_emulate_ctxt(vcpu);
9023
9024 ctxt->op_bytes = 2;
9025 ctxt->ad_bytes = 2;
9026 ctxt->_eip = ctxt->eip + inc_eip;
9027 ret = emulate_int_real(ctxt, irq);
9028
9029 if (ret != X86EMUL_CONTINUE) {
9030 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
9031 } else {
9032 ctxt->eip = ctxt->_eip;
9033 kvm_rip_write(vcpu, ctxt->eip);
9034 kvm_set_rflags(vcpu, ctxt->eflags);
9035 }
9036 }
9037 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_inject_realmode_interrupt);
9038
prepare_emulation_failure_exit(struct kvm_vcpu * vcpu,u64 * data,u8 ndata,u8 * insn_bytes,u8 insn_size)9039 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
9040 u8 ndata, u8 *insn_bytes, u8 insn_size)
9041 {
9042 struct kvm_run *run = vcpu->run;
9043 u64 info[5];
9044 u8 info_start;
9045
9046 /*
9047 * Zero the whole array used to retrieve the exit info, as casting to
9048 * u32 for select entries will leave some chunks uninitialized.
9049 */
9050 memset(&info, 0, sizeof(info));
9051
9052 kvm_x86_call(get_exit_info)(vcpu, (u32 *)&info[0], &info[1], &info[2],
9053 (u32 *)&info[3], (u32 *)&info[4]);
9054
9055 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
9056 run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION;
9057
9058 /*
9059 * There's currently space for 13 entries, but 5 are used for the exit
9060 * reason and info. Restrict to 4 to reduce the maintenance burden
9061 * when expanding kvm_run.emulation_failure in the future.
9062 */
9063 if (WARN_ON_ONCE(ndata > 4))
9064 ndata = 4;
9065
9066 /* Always include the flags as a 'data' entry. */
9067 info_start = 1;
9068 run->emulation_failure.flags = 0;
9069
9070 if (insn_size) {
9071 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) +
9072 sizeof(run->emulation_failure.insn_bytes) != 16));
9073 info_start += 2;
9074 run->emulation_failure.flags |=
9075 KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES;
9076 run->emulation_failure.insn_size = insn_size;
9077 memset(run->emulation_failure.insn_bytes, 0x90,
9078 sizeof(run->emulation_failure.insn_bytes));
9079 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size);
9080 }
9081
9082 memcpy(&run->internal.data[info_start], info, sizeof(info));
9083 memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data,
9084 ndata * sizeof(data[0]));
9085
9086 run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata;
9087 }
9088
prepare_emulation_ctxt_failure_exit(struct kvm_vcpu * vcpu)9089 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu)
9090 {
9091 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9092
9093 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data,
9094 ctxt->fetch.end - ctxt->fetch.data);
9095 }
9096
__kvm_prepare_emulation_failure_exit(struct kvm_vcpu * vcpu,u64 * data,u8 ndata)9097 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
9098 u8 ndata)
9099 {
9100 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0);
9101 }
9102 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_prepare_emulation_failure_exit);
9103
kvm_prepare_emulation_failure_exit(struct kvm_vcpu * vcpu)9104 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
9105 {
9106 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0);
9107 }
9108 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_emulation_failure_exit);
9109
kvm_prepare_event_vectoring_exit(struct kvm_vcpu * vcpu,gpa_t gpa)9110 void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa)
9111 {
9112 u32 reason, intr_info, error_code;
9113 struct kvm_run *run = vcpu->run;
9114 u64 info1, info2;
9115 int ndata = 0;
9116
9117 kvm_x86_call(get_exit_info)(vcpu, &reason, &info1, &info2,
9118 &intr_info, &error_code);
9119
9120 run->internal.data[ndata++] = info2;
9121 run->internal.data[ndata++] = reason;
9122 run->internal.data[ndata++] = info1;
9123 run->internal.data[ndata++] = gpa;
9124 run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
9125
9126 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
9127 run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
9128 run->internal.ndata = ndata;
9129 }
9130 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_event_vectoring_exit);
9131
kvm_prepare_unexpected_reason_exit(struct kvm_vcpu * vcpu,u64 exit_reason)9132 void kvm_prepare_unexpected_reason_exit(struct kvm_vcpu *vcpu, u64 exit_reason)
9133 {
9134 vcpu_unimpl(vcpu, "unexpected exit reason 0x%llx\n", exit_reason);
9135
9136 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
9137 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
9138 vcpu->run->internal.ndata = 2;
9139 vcpu->run->internal.data[0] = exit_reason;
9140 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
9141 }
9142 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_unexpected_reason_exit);
9143
handle_emulation_failure(struct kvm_vcpu * vcpu,int emulation_type)9144 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
9145 {
9146 struct kvm *kvm = vcpu->kvm;
9147
9148 ++vcpu->stat.insn_emulation_fail;
9149 trace_kvm_emulate_insn_failed(vcpu);
9150
9151 if (emulation_type & EMULTYPE_VMWARE_GP) {
9152 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
9153 return 1;
9154 }
9155
9156 if (kvm->arch.exit_on_emulation_error ||
9157 (emulation_type & EMULTYPE_SKIP)) {
9158 prepare_emulation_ctxt_failure_exit(vcpu);
9159 return 0;
9160 }
9161
9162 kvm_queue_exception(vcpu, UD_VECTOR);
9163
9164 if (!is_guest_mode(vcpu) && kvm_x86_call(get_cpl)(vcpu) == 0) {
9165 prepare_emulation_ctxt_failure_exit(vcpu);
9166 return 0;
9167 }
9168
9169 return 1;
9170 }
9171
kvm_unprotect_and_retry_on_failure(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,int emulation_type)9172 static bool kvm_unprotect_and_retry_on_failure(struct kvm_vcpu *vcpu,
9173 gpa_t cr2_or_gpa,
9174 int emulation_type)
9175 {
9176 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
9177 return false;
9178
9179 /*
9180 * If the failed instruction faulted on an access to page tables that
9181 * are used to translate any part of the instruction, KVM can't resolve
9182 * the issue by unprotecting the gfn, as zapping the shadow page will
9183 * result in the instruction taking a !PRESENT page fault and thus put
9184 * the vCPU into an infinite loop of page faults. E.g. KVM will create
9185 * a SPTE and write-protect the gfn to resolve the !PRESENT fault, and
9186 * then zap the SPTE to unprotect the gfn, and then do it all over
9187 * again. Report the error to userspace.
9188 */
9189 if (emulation_type & EMULTYPE_WRITE_PF_TO_SP)
9190 return false;
9191
9192 /*
9193 * If emulation may have been triggered by a write to a shadowed page
9194 * table, unprotect the gfn (zap any relevant SPTEs) and re-enter the
9195 * guest to let the CPU re-execute the instruction in the hope that the
9196 * CPU can cleanly execute the instruction that KVM failed to emulate.
9197 */
9198 __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, true);
9199
9200 /*
9201 * Retry even if _this_ vCPU didn't unprotect the gfn, as it's possible
9202 * all SPTEs were already zapped by a different task. The alternative
9203 * is to report the error to userspace and likely terminate the guest,
9204 * and the last_retry_{eip,addr} checks will prevent retrying the page
9205 * fault indefinitely, i.e. there's nothing to lose by retrying.
9206 */
9207 return true;
9208 }
9209
9210 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
9211 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
9212
kvm_vcpu_check_hw_bp(unsigned long addr,u32 type,u32 dr7,unsigned long * db)9213 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
9214 unsigned long *db)
9215 {
9216 u32 dr6 = 0;
9217 int i;
9218 u32 enable, rwlen;
9219
9220 enable = dr7;
9221 rwlen = dr7 >> 16;
9222 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
9223 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
9224 dr6 |= (1 << i);
9225 return dr6;
9226 }
9227
kvm_vcpu_do_singlestep(struct kvm_vcpu * vcpu)9228 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
9229 {
9230 struct kvm_run *kvm_run = vcpu->run;
9231
9232 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
9233 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW;
9234 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
9235 kvm_run->debug.arch.exception = DB_VECTOR;
9236 kvm_run->exit_reason = KVM_EXIT_DEBUG;
9237 return 0;
9238 }
9239 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
9240 return 1;
9241 }
9242
kvm_skip_emulated_instruction(struct kvm_vcpu * vcpu)9243 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
9244 {
9245 unsigned long rflags = kvm_x86_call(get_rflags)(vcpu);
9246 int r;
9247
9248 r = kvm_x86_call(skip_emulated_instruction)(vcpu);
9249 if (unlikely(!r))
9250 return 0;
9251
9252 kvm_pmu_instruction_retired(vcpu);
9253
9254 /*
9255 * rflags is the old, "raw" value of the flags. The new value has
9256 * not been saved yet.
9257 *
9258 * This is correct even for TF set by the guest, because "the
9259 * processor will not generate this exception after the instruction
9260 * that sets the TF flag".
9261 */
9262 if (unlikely(rflags & X86_EFLAGS_TF))
9263 r = kvm_vcpu_do_singlestep(vcpu);
9264 return r;
9265 }
9266 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_skip_emulated_instruction);
9267
kvm_is_code_breakpoint_inhibited(struct kvm_vcpu * vcpu)9268 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu)
9269 {
9270 if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF)
9271 return true;
9272
9273 /*
9274 * Intel compatible CPUs inhibit code #DBs when MOV/POP SS blocking is
9275 * active, but AMD compatible CPUs do not.
9276 */
9277 if (!guest_cpuid_is_intel_compatible(vcpu))
9278 return false;
9279
9280 return kvm_x86_call(get_interrupt_shadow)(vcpu) & KVM_X86_SHADOW_INT_MOV_SS;
9281 }
9282
kvm_vcpu_check_code_breakpoint(struct kvm_vcpu * vcpu,int emulation_type,int * r)9283 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu,
9284 int emulation_type, int *r)
9285 {
9286 WARN_ON_ONCE(emulation_type & EMULTYPE_NO_DECODE);
9287
9288 /*
9289 * Do not check for code breakpoints if hardware has already done the
9290 * checks, as inferred from the emulation type. On NO_DECODE and SKIP,
9291 * the instruction has passed all exception checks, and all intercepted
9292 * exceptions that trigger emulation have lower priority than code
9293 * breakpoints, i.e. the fact that the intercepted exception occurred
9294 * means any code breakpoints have already been serviced.
9295 *
9296 * Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as
9297 * hardware has checked the RIP of the magic prefix, but not the RIP of
9298 * the instruction being emulated. The intent of forced emulation is
9299 * to behave as if KVM intercepted the instruction without an exception
9300 * and without a prefix.
9301 */
9302 if (emulation_type & (EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
9303 EMULTYPE_TRAP_UD | EMULTYPE_VMWARE_GP | EMULTYPE_PF))
9304 return false;
9305
9306 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
9307 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
9308 struct kvm_run *kvm_run = vcpu->run;
9309 unsigned long eip = kvm_get_linear_rip(vcpu);
9310 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
9311 vcpu->arch.guest_debug_dr7,
9312 vcpu->arch.eff_db);
9313
9314 if (dr6 != 0) {
9315 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
9316 kvm_run->debug.arch.pc = eip;
9317 kvm_run->debug.arch.exception = DB_VECTOR;
9318 kvm_run->exit_reason = KVM_EXIT_DEBUG;
9319 *r = 0;
9320 return true;
9321 }
9322 }
9323
9324 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
9325 !kvm_is_code_breakpoint_inhibited(vcpu)) {
9326 unsigned long eip = kvm_get_linear_rip(vcpu);
9327 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
9328 vcpu->arch.dr7,
9329 vcpu->arch.db);
9330
9331 if (dr6 != 0) {
9332 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
9333 *r = 1;
9334 return true;
9335 }
9336 }
9337
9338 return false;
9339 }
9340
is_vmware_backdoor_opcode(struct x86_emulate_ctxt * ctxt)9341 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
9342 {
9343 switch (ctxt->opcode_len) {
9344 case 1:
9345 switch (ctxt->b) {
9346 case 0xe4: /* IN */
9347 case 0xe5:
9348 case 0xec:
9349 case 0xed:
9350 case 0xe6: /* OUT */
9351 case 0xe7:
9352 case 0xee:
9353 case 0xef:
9354 case 0x6c: /* INS */
9355 case 0x6d:
9356 case 0x6e: /* OUTS */
9357 case 0x6f:
9358 return true;
9359 }
9360 break;
9361 case 2:
9362 switch (ctxt->b) {
9363 case 0x33: /* RDPMC */
9364 return true;
9365 }
9366 break;
9367 }
9368
9369 return false;
9370 }
9371
is_soft_int_instruction(struct x86_emulate_ctxt * ctxt,int emulation_type)9372 static bool is_soft_int_instruction(struct x86_emulate_ctxt *ctxt,
9373 int emulation_type)
9374 {
9375 u8 vector = EMULTYPE_GET_SOFT_INT_VECTOR(emulation_type);
9376
9377 switch (ctxt->b) {
9378 case 0xcc:
9379 return vector == BP_VECTOR;
9380 case 0xcd:
9381 return vector == ctxt->src.val;
9382 case 0xce:
9383 return vector == OF_VECTOR;
9384 default:
9385 return false;
9386 }
9387 }
9388
9389 /*
9390 * Decode an instruction for emulation. The caller is responsible for handling
9391 * code breakpoints. Note, manually detecting code breakpoints is unnecessary
9392 * (and wrong) when emulating on an intercepted fault-like exception[*], as
9393 * code breakpoints have higher priority and thus have already been done by
9394 * hardware.
9395 *
9396 * [*] Except #MC, which is higher priority, but KVM should never emulate in
9397 * response to a machine check.
9398 */
x86_decode_emulated_instruction(struct kvm_vcpu * vcpu,int emulation_type,void * insn,int insn_len)9399 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
9400 void *insn, int insn_len)
9401 {
9402 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9403 int r;
9404
9405 init_emulate_ctxt(vcpu);
9406
9407 r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
9408
9409 trace_kvm_emulate_insn_start(vcpu);
9410 ++vcpu->stat.insn_emulation;
9411
9412 return r;
9413 }
9414 EXPORT_SYMBOL_FOR_KVM_INTERNAL(x86_decode_emulated_instruction);
9415
x86_emulate_instruction(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,int emulation_type,void * insn,int insn_len)9416 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
9417 int emulation_type, void *insn, int insn_len)
9418 {
9419 int r;
9420 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9421 bool writeback = true;
9422
9423 if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
9424 (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
9425 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))))
9426 emulation_type &= ~EMULTYPE_ALLOW_RETRY_PF;
9427
9428 r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len);
9429 if (r != X86EMUL_CONTINUE) {
9430 if (r == X86EMUL_RETRY_INSTR || r == X86EMUL_PROPAGATE_FAULT)
9431 return 1;
9432
9433 if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
9434 emulation_type))
9435 return 1;
9436
9437 if (r == X86EMUL_UNHANDLEABLE_VECTORING) {
9438 kvm_prepare_event_vectoring_exit(vcpu, cr2_or_gpa);
9439 return 0;
9440 }
9441
9442 WARN_ON_ONCE(r != X86EMUL_UNHANDLEABLE);
9443 return handle_emulation_failure(vcpu, emulation_type);
9444 }
9445
9446 kvm_request_l1tf_flush_l1d();
9447
9448 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
9449 kvm_clear_exception_queue(vcpu);
9450
9451 /*
9452 * Return immediately if RIP hits a code breakpoint, such #DBs
9453 * are fault-like and are higher priority than any faults on
9454 * the code fetch itself.
9455 */
9456 if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r))
9457 return r;
9458
9459 r = x86_decode_emulated_instruction(vcpu, emulation_type,
9460 insn, insn_len);
9461 if (r != EMULATION_OK) {
9462 if ((emulation_type & EMULTYPE_TRAP_UD) ||
9463 (emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
9464 kvm_queue_exception(vcpu, UD_VECTOR);
9465 return 1;
9466 }
9467 if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
9468 emulation_type))
9469 return 1;
9470
9471 if (ctxt->have_exception &&
9472 !(emulation_type & EMULTYPE_SKIP)) {
9473 /*
9474 * #UD should result in just EMULATION_FAILED, and trap-like
9475 * exception should not be encountered during decode.
9476 */
9477 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
9478 exception_type(ctxt->exception.vector) == EXCPT_TRAP);
9479 inject_emulated_exception(vcpu);
9480 return 1;
9481 }
9482 return handle_emulation_failure(vcpu, emulation_type);
9483 }
9484 }
9485
9486 if ((emulation_type & EMULTYPE_VMWARE_GP) &&
9487 !is_vmware_backdoor_opcode(ctxt)) {
9488 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
9489 return 1;
9490 }
9491
9492 /*
9493 * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for
9494 * use *only* by vendor callbacks for kvm_skip_emulated_instruction().
9495 * The caller is responsible for updating interruptibility state and
9496 * injecting single-step #DBs.
9497 */
9498 if (emulation_type & EMULTYPE_SKIP) {
9499 if (emulation_type & EMULTYPE_SKIP_SOFT_INT &&
9500 !is_soft_int_instruction(ctxt, emulation_type))
9501 return 0;
9502
9503 if (ctxt->mode != X86EMUL_MODE_PROT64)
9504 ctxt->eip = (u32)ctxt->_eip;
9505 else
9506 ctxt->eip = ctxt->_eip;
9507
9508 if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) {
9509 r = 1;
9510 goto writeback;
9511 }
9512
9513 kvm_rip_write(vcpu, ctxt->eip);
9514 if (ctxt->eflags & X86_EFLAGS_RF)
9515 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
9516 return 1;
9517 }
9518
9519 /*
9520 * If emulation was caused by a write-protection #PF on a non-page_table
9521 * writing instruction, try to unprotect the gfn, i.e. zap shadow pages,
9522 * and retry the instruction, as the vCPU is likely no longer using the
9523 * gfn as a page table.
9524 */
9525 if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
9526 !x86_page_table_writing_insn(ctxt) &&
9527 kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
9528 return 1;
9529
9530 /* this is needed for vmware backdoor interface to work since it
9531 changes registers values during IO operation */
9532 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
9533 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
9534 emulator_invalidate_register_cache(ctxt);
9535 }
9536
9537 restart:
9538 if (emulation_type & EMULTYPE_PF) {
9539 /* Save the faulting GPA (cr2) in the address field */
9540 ctxt->exception.address = cr2_or_gpa;
9541
9542 /* With shadow page tables, cr2 contains a GVA or nGPA. */
9543 if (vcpu->arch.mmu->root_role.direct) {
9544 ctxt->gpa_available = true;
9545 ctxt->gpa_val = cr2_or_gpa;
9546 }
9547 } else {
9548 /* Sanitize the address out of an abundance of paranoia. */
9549 ctxt->exception.address = 0;
9550 }
9551
9552 /*
9553 * Check L1's instruction intercepts when emulating instructions for
9554 * L2, unless KVM is re-emulating a previously decoded instruction,
9555 * e.g. to complete userspace I/O, in which case KVM has already
9556 * checked the intercepts.
9557 */
9558 r = x86_emulate_insn(ctxt, is_guest_mode(vcpu) &&
9559 !(emulation_type & EMULTYPE_NO_DECODE));
9560
9561 if (r == EMULATION_INTERCEPTED)
9562 return 1;
9563
9564 if (r == EMULATION_FAILED) {
9565 if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
9566 emulation_type))
9567 return 1;
9568
9569 return handle_emulation_failure(vcpu, emulation_type);
9570 }
9571
9572 if (ctxt->have_exception) {
9573 WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write);
9574 vcpu->mmio_needed = false;
9575 r = 1;
9576 inject_emulated_exception(vcpu);
9577 } else if (vcpu->arch.pio.count) {
9578 if (!vcpu->arch.pio.in) {
9579 /* FIXME: return into emulator if single-stepping. */
9580 vcpu->arch.pio.count = 0;
9581 } else {
9582 writeback = false;
9583 vcpu->arch.complete_userspace_io = complete_emulated_pio;
9584 }
9585 r = 0;
9586 } else if (vcpu->mmio_needed) {
9587 ++vcpu->stat.mmio_exits;
9588
9589 if (!vcpu->mmio_is_write)
9590 writeback = false;
9591 r = 0;
9592 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
9593 } else if (vcpu->arch.complete_userspace_io) {
9594 writeback = false;
9595 r = 0;
9596 } else if (r == EMULATION_RESTART)
9597 goto restart;
9598 else
9599 r = 1;
9600
9601 writeback:
9602 if (writeback) {
9603 unsigned long rflags = kvm_x86_call(get_rflags)(vcpu);
9604 toggle_interruptibility(vcpu, ctxt->interruptibility);
9605 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
9606
9607 /*
9608 * Note, EXCPT_DB is assumed to be fault-like as the emulator
9609 * only supports code breakpoints and general detect #DB, both
9610 * of which are fault-like.
9611 */
9612 if (!ctxt->have_exception ||
9613 exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
9614 kvm_pmu_instruction_retired(vcpu);
9615 if (ctxt->is_branch)
9616 kvm_pmu_branch_retired(vcpu);
9617 kvm_rip_write(vcpu, ctxt->eip);
9618 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
9619 r = kvm_vcpu_do_singlestep(vcpu);
9620 kvm_x86_call(update_emulated_instruction)(vcpu);
9621 __kvm_set_rflags(vcpu, ctxt->eflags);
9622 }
9623
9624 /*
9625 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
9626 * do nothing, and it will be requested again as soon as
9627 * the shadow expires. But we still need to check here,
9628 * because POPF has no interrupt shadow.
9629 */
9630 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
9631 kvm_make_request(KVM_REQ_EVENT, vcpu);
9632 } else
9633 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
9634
9635 return r;
9636 }
9637
kvm_emulate_instruction(struct kvm_vcpu * vcpu,int emulation_type)9638 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
9639 {
9640 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
9641 }
9642 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_instruction);
9643
kvm_emulate_instruction_from_buffer(struct kvm_vcpu * vcpu,void * insn,int insn_len)9644 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
9645 void *insn, int insn_len)
9646 {
9647 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
9648 }
9649 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_instruction_from_buffer);
9650
complete_fast_pio_out_port_0x7e(struct kvm_vcpu * vcpu)9651 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
9652 {
9653 vcpu->arch.pio.count = 0;
9654 return 1;
9655 }
9656
complete_fast_pio_out(struct kvm_vcpu * vcpu)9657 static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
9658 {
9659 vcpu->arch.pio.count = 0;
9660
9661 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip)))
9662 return 1;
9663
9664 return kvm_skip_emulated_instruction(vcpu);
9665 }
9666
kvm_fast_pio_out(struct kvm_vcpu * vcpu,int size,unsigned short port)9667 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
9668 unsigned short port)
9669 {
9670 unsigned long val = kvm_rax_read(vcpu);
9671 int ret = emulator_pio_out(vcpu, size, port, &val, 1);
9672
9673 if (ret)
9674 return ret;
9675
9676 /*
9677 * Workaround userspace that relies on old KVM behavior of %rip being
9678 * incremented prior to exiting to userspace to handle "OUT 0x7e".
9679 */
9680 if (port == 0x7e &&
9681 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
9682 vcpu->arch.complete_userspace_io =
9683 complete_fast_pio_out_port_0x7e;
9684 kvm_skip_emulated_instruction(vcpu);
9685 } else {
9686 vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
9687 vcpu->arch.complete_userspace_io = complete_fast_pio_out;
9688 }
9689 return 0;
9690 }
9691
complete_fast_pio_in(struct kvm_vcpu * vcpu)9692 static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
9693 {
9694 unsigned long val;
9695
9696 /* We should only ever be called with arch.pio.count equal to 1 */
9697 BUG_ON(vcpu->arch.pio.count != 1);
9698
9699 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip))) {
9700 vcpu->arch.pio.count = 0;
9701 return 1;
9702 }
9703
9704 /* For size less than 4 we merge, else we zero extend */
9705 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
9706
9707 complete_emulator_pio_in(vcpu, &val);
9708 kvm_rax_write(vcpu, val);
9709
9710 return kvm_skip_emulated_instruction(vcpu);
9711 }
9712
kvm_fast_pio_in(struct kvm_vcpu * vcpu,int size,unsigned short port)9713 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
9714 unsigned short port)
9715 {
9716 unsigned long val;
9717 int ret;
9718
9719 /* For size less than 4 we merge, else we zero extend */
9720 val = (size < 4) ? kvm_rax_read(vcpu) : 0;
9721
9722 ret = emulator_pio_in(vcpu, size, port, &val, 1);
9723 if (ret) {
9724 kvm_rax_write(vcpu, val);
9725 return ret;
9726 }
9727
9728 vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
9729 vcpu->arch.complete_userspace_io = complete_fast_pio_in;
9730
9731 return 0;
9732 }
9733
kvm_fast_pio(struct kvm_vcpu * vcpu,int size,unsigned short port,int in)9734 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
9735 {
9736 int ret;
9737
9738 if (in)
9739 ret = kvm_fast_pio_in(vcpu, size, port);
9740 else
9741 ret = kvm_fast_pio_out(vcpu, size, port);
9742 return ret && kvm_skip_emulated_instruction(vcpu);
9743 }
9744 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_fast_pio);
9745
kvmclock_cpu_down_prep(unsigned int cpu)9746 static int kvmclock_cpu_down_prep(unsigned int cpu)
9747 {
9748 __this_cpu_write(cpu_tsc_khz, 0);
9749 return 0;
9750 }
9751
tsc_khz_changed(void * data)9752 static void tsc_khz_changed(void *data)
9753 {
9754 struct cpufreq_freqs *freq = data;
9755 unsigned long khz;
9756
9757 WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_CONSTANT_TSC));
9758
9759 if (data)
9760 khz = freq->new;
9761 else
9762 khz = cpufreq_quick_get(raw_smp_processor_id());
9763 if (!khz)
9764 khz = tsc_khz;
9765 __this_cpu_write(cpu_tsc_khz, khz);
9766 }
9767
9768 #ifdef CONFIG_X86_64
kvm_hyperv_tsc_notifier(void)9769 static void kvm_hyperv_tsc_notifier(void)
9770 {
9771 struct kvm *kvm;
9772 int cpu;
9773
9774 mutex_lock(&kvm_lock);
9775 list_for_each_entry(kvm, &vm_list, vm_list)
9776 kvm_make_mclock_inprogress_request(kvm);
9777
9778 /* no guest entries from this point */
9779 hyperv_stop_tsc_emulation();
9780
9781 /* TSC frequency always matches when on Hyper-V */
9782 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9783 for_each_present_cpu(cpu)
9784 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
9785 }
9786 kvm_caps.max_guest_tsc_khz = tsc_khz;
9787
9788 list_for_each_entry(kvm, &vm_list, vm_list) {
9789 __kvm_start_pvclock_update(kvm);
9790 pvclock_update_vm_gtod_copy(kvm);
9791 kvm_end_pvclock_update(kvm);
9792 }
9793
9794 mutex_unlock(&kvm_lock);
9795 }
9796 #endif
9797
__kvmclock_cpufreq_notifier(struct cpufreq_freqs * freq,int cpu)9798 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
9799 {
9800 struct kvm *kvm;
9801 struct kvm_vcpu *vcpu;
9802 int send_ipi = 0;
9803 unsigned long i;
9804
9805 /*
9806 * We allow guests to temporarily run on slowing clocks,
9807 * provided we notify them after, or to run on accelerating
9808 * clocks, provided we notify them before. Thus time never
9809 * goes backwards.
9810 *
9811 * However, we have a problem. We can't atomically update
9812 * the frequency of a given CPU from this function; it is
9813 * merely a notifier, which can be called from any CPU.
9814 * Changing the TSC frequency at arbitrary points in time
9815 * requires a recomputation of local variables related to
9816 * the TSC for each VCPU. We must flag these local variables
9817 * to be updated and be sure the update takes place with the
9818 * new frequency before any guests proceed.
9819 *
9820 * Unfortunately, the combination of hotplug CPU and frequency
9821 * change creates an intractable locking scenario; the order
9822 * of when these callouts happen is undefined with respect to
9823 * CPU hotplug, and they can race with each other. As such,
9824 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
9825 * undefined; you can actually have a CPU frequency change take
9826 * place in between the computation of X and the setting of the
9827 * variable. To protect against this problem, all updates of
9828 * the per_cpu tsc_khz variable are done in an interrupt
9829 * protected IPI, and all callers wishing to update the value
9830 * must wait for a synchronous IPI to complete (which is trivial
9831 * if the caller is on the CPU already). This establishes the
9832 * necessary total order on variable updates.
9833 *
9834 * Note that because a guest time update may take place
9835 * anytime after the setting of the VCPU's request bit, the
9836 * correct TSC value must be set before the request. However,
9837 * to ensure the update actually makes it to any guest which
9838 * starts running in hardware virtualization between the set
9839 * and the acquisition of the spinlock, we must also ping the
9840 * CPU after setting the request bit.
9841 *
9842 */
9843
9844 smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
9845
9846 mutex_lock(&kvm_lock);
9847 list_for_each_entry(kvm, &vm_list, vm_list) {
9848 kvm_for_each_vcpu(i, vcpu, kvm) {
9849 if (vcpu->cpu != cpu)
9850 continue;
9851 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
9852 if (vcpu->cpu != raw_smp_processor_id())
9853 send_ipi = 1;
9854 }
9855 }
9856 mutex_unlock(&kvm_lock);
9857
9858 if (freq->old < freq->new && send_ipi) {
9859 /*
9860 * We upscale the frequency. Must make the guest
9861 * doesn't see old kvmclock values while running with
9862 * the new frequency, otherwise we risk the guest sees
9863 * time go backwards.
9864 *
9865 * In case we update the frequency for another cpu
9866 * (which might be in guest context) send an interrupt
9867 * to kick the cpu out of guest context. Next time
9868 * guest context is entered kvmclock will be updated,
9869 * so the guest will not see stale values.
9870 */
9871 smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
9872 }
9873 }
9874
kvmclock_cpufreq_notifier(struct notifier_block * nb,unsigned long val,void * data)9875 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
9876 void *data)
9877 {
9878 struct cpufreq_freqs *freq = data;
9879 int cpu;
9880
9881 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
9882 return 0;
9883 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
9884 return 0;
9885
9886 for_each_cpu(cpu, freq->policy->cpus)
9887 __kvmclock_cpufreq_notifier(freq, cpu);
9888
9889 return 0;
9890 }
9891
9892 static struct notifier_block kvmclock_cpufreq_notifier_block = {
9893 .notifier_call = kvmclock_cpufreq_notifier
9894 };
9895
kvmclock_cpu_online(unsigned int cpu)9896 static int kvmclock_cpu_online(unsigned int cpu)
9897 {
9898 tsc_khz_changed(NULL);
9899 return 0;
9900 }
9901
kvm_timer_init(void)9902 static void kvm_timer_init(void)
9903 {
9904 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9905 max_tsc_khz = tsc_khz;
9906
9907 if (IS_ENABLED(CONFIG_CPU_FREQ)) {
9908 struct cpufreq_policy *policy;
9909 int cpu;
9910
9911 cpu = get_cpu();
9912 policy = cpufreq_cpu_get(cpu);
9913 if (policy) {
9914 if (policy->cpuinfo.max_freq)
9915 max_tsc_khz = policy->cpuinfo.max_freq;
9916 cpufreq_cpu_put(policy);
9917 }
9918 put_cpu();
9919 }
9920 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
9921 CPUFREQ_TRANSITION_NOTIFIER);
9922
9923 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
9924 kvmclock_cpu_online, kvmclock_cpu_down_prep);
9925 }
9926 }
9927
9928 #ifdef CONFIG_X86_64
pvclock_gtod_update_fn(struct work_struct * work)9929 static void pvclock_gtod_update_fn(struct work_struct *work)
9930 {
9931 struct kvm *kvm;
9932 struct kvm_vcpu *vcpu;
9933 unsigned long i;
9934
9935 mutex_lock(&kvm_lock);
9936 list_for_each_entry(kvm, &vm_list, vm_list)
9937 kvm_for_each_vcpu(i, vcpu, kvm)
9938 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
9939 atomic_set(&kvm_guest_has_master_clock, 0);
9940 mutex_unlock(&kvm_lock);
9941 }
9942
9943 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
9944
9945 /*
9946 * Indirection to move queue_work() out of the tk_core.seq write held
9947 * region to prevent possible deadlocks against time accessors which
9948 * are invoked with work related locks held.
9949 */
pvclock_irq_work_fn(struct irq_work * w)9950 static void pvclock_irq_work_fn(struct irq_work *w)
9951 {
9952 queue_work(system_long_wq, &pvclock_gtod_work);
9953 }
9954
9955 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
9956
9957 /*
9958 * Notification about pvclock gtod data update.
9959 */
pvclock_gtod_notify(struct notifier_block * nb,unsigned long unused,void * priv)9960 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
9961 void *priv)
9962 {
9963 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
9964 struct timekeeper *tk = priv;
9965
9966 update_pvclock_gtod(tk);
9967
9968 /*
9969 * Disable master clock if host does not trust, or does not use,
9970 * TSC based clocksource. Delegate queue_work() to irq_work as
9971 * this is invoked with tk_core.seq write held.
9972 */
9973 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
9974 atomic_read(&kvm_guest_has_master_clock) != 0)
9975 irq_work_queue(&pvclock_irq_work);
9976 return 0;
9977 }
9978
9979 static struct notifier_block pvclock_gtod_notifier = {
9980 .notifier_call = pvclock_gtod_notify,
9981 };
9982 #endif
9983
kvm_setup_xss_caps(void)9984 void kvm_setup_xss_caps(void)
9985 {
9986 if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
9987 kvm_caps.supported_xss = 0;
9988
9989 if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&
9990 !kvm_cpu_cap_has(X86_FEATURE_IBT))
9991 kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;
9992
9993 if ((kvm_caps.supported_xss & XFEATURE_MASK_CET_ALL) != XFEATURE_MASK_CET_ALL) {
9994 kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
9995 kvm_cpu_cap_clear(X86_FEATURE_IBT);
9996 kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;
9997 }
9998 }
9999 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_setup_xss_caps);
10000
kvm_ops_update(struct kvm_x86_init_ops * ops)10001 static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
10002 {
10003 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
10004
10005 #define __KVM_X86_OP(func) \
10006 static_call_update(kvm_x86_##func, kvm_x86_ops.func);
10007 #define KVM_X86_OP(func) \
10008 WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func)
10009 #define KVM_X86_OP_OPTIONAL __KVM_X86_OP
10010 #define KVM_X86_OP_OPTIONAL_RET0(func) \
10011 static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \
10012 (void *)__static_call_return0);
10013 #include <asm/kvm-x86-ops.h>
10014 #undef __KVM_X86_OP
10015
10016 kvm_pmu_ops_update(ops->pmu_ops);
10017 }
10018
kvm_x86_check_processor_compatibility(void)10019 static int kvm_x86_check_processor_compatibility(void)
10020 {
10021 int cpu = smp_processor_id();
10022 struct cpuinfo_x86 *c = &cpu_data(cpu);
10023
10024 /*
10025 * Compatibility checks are done when loading KVM and when enabling
10026 * hardware, e.g. during CPU hotplug, to ensure all online CPUs are
10027 * compatible, i.e. KVM should never perform a compatibility check on
10028 * an offline CPU.
10029 */
10030 WARN_ON(!cpu_online(cpu));
10031
10032 if (__cr4_reserved_bits(cpu_has, c) !=
10033 __cr4_reserved_bits(cpu_has, &boot_cpu_data))
10034 return -EIO;
10035
10036 return kvm_x86_call(check_processor_compatibility)();
10037 }
10038
kvm_x86_check_cpu_compat(void * ret)10039 static void kvm_x86_check_cpu_compat(void *ret)
10040 {
10041 *(int *)ret = kvm_x86_check_processor_compatibility();
10042 }
10043
kvm_x86_vendor_init(struct kvm_x86_init_ops * ops)10044 int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
10045 {
10046 u64 host_pat;
10047 int r, cpu;
10048
10049 guard(mutex)(&vendor_module_lock);
10050
10051 if (kvm_x86_ops.enable_virtualization_cpu) {
10052 pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
10053 return -EEXIST;
10054 }
10055
10056 /*
10057 * KVM explicitly assumes that the guest has an FPU and
10058 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the
10059 * vCPU's FPU state as a fxregs_state struct.
10060 */
10061 if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) {
10062 pr_err("inadequate fpu\n");
10063 return -EOPNOTSUPP;
10064 }
10065
10066 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
10067 pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n");
10068 return -EOPNOTSUPP;
10069 }
10070
10071 /*
10072 * KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes
10073 * the PAT bits in SPTEs. Bail if PAT[0] is programmed to something
10074 * other than WB. Note, EPT doesn't utilize the PAT, but don't bother
10075 * with an exception. PAT[0] is set to WB on RESET and also by the
10076 * kernel, i.e. failure indicates a kernel bug or broken firmware.
10077 */
10078 if (rdmsrq_safe(MSR_IA32_CR_PAT, &host_pat) ||
10079 (host_pat & GENMASK(2, 0)) != 6) {
10080 pr_err("host PAT[0] is not WB\n");
10081 return -EIO;
10082 }
10083
10084 if (boot_cpu_has(X86_FEATURE_SHSTK) || boot_cpu_has(X86_FEATURE_IBT)) {
10085 rdmsrq(MSR_IA32_S_CET, kvm_host.s_cet);
10086 /*
10087 * Linux doesn't yet support supervisor shadow stacks (SSS), so
10088 * KVM doesn't save/restore the associated MSRs, i.e. KVM may
10089 * clobber the host values. Yell and refuse to load if SSS is
10090 * unexpectedly enabled, e.g. to avoid crashing the host.
10091 */
10092 if (WARN_ON_ONCE(kvm_host.s_cet & CET_SHSTK_EN))
10093 return -EIO;
10094 }
10095
10096 memset(&kvm_caps, 0, sizeof(kvm_caps));
10097
10098 x86_emulator_cache = kvm_alloc_emulator_cache();
10099 if (!x86_emulator_cache) {
10100 pr_err("failed to allocate cache for x86 emulator\n");
10101 return -ENOMEM;
10102 }
10103
10104 r = kvm_mmu_vendor_module_init();
10105 if (r)
10106 goto out_free_x86_emulator_cache;
10107
10108 kvm_caps.supported_vm_types = BIT(KVM_X86_DEFAULT_VM);
10109 kvm_caps.supported_mce_cap = MCG_CTL_P | MCG_SER_P;
10110
10111 if (boot_cpu_has(X86_FEATURE_XSAVE)) {
10112 kvm_host.xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
10113 kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0;
10114 }
10115
10116 if (boot_cpu_has(X86_FEATURE_XSAVES)) {
10117 rdmsrq(MSR_IA32_XSS, kvm_host.xss);
10118 kvm_caps.supported_xss = kvm_host.xss & KVM_SUPPORTED_XSS;
10119 }
10120
10121 kvm_caps.supported_quirks = KVM_X86_VALID_QUIRKS;
10122 kvm_caps.inapplicable_quirks = KVM_X86_CONDITIONAL_QUIRKS;
10123
10124 rdmsrq_safe(MSR_EFER, &kvm_host.efer);
10125
10126 kvm_init_pmu_capability(ops->pmu_ops);
10127
10128 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
10129 rdmsrq(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities);
10130
10131 WARN_ON_ONCE(kvm_nr_uret_msrs);
10132
10133 r = ops->hardware_setup();
10134 if (r != 0)
10135 goto out_mmu_exit;
10136
10137 enable_device_posted_irqs &= enable_apicv &&
10138 irq_remapping_cap(IRQ_POSTING_CAP);
10139
10140 kvm_ops_update(ops);
10141
10142 for_each_online_cpu(cpu) {
10143 smp_call_function_single(cpu, kvm_x86_check_cpu_compat, &r, 1);
10144 if (r < 0)
10145 goto out_unwind_ops;
10146 }
10147
10148 /*
10149 * Point of no return! DO NOT add error paths below this point unless
10150 * absolutely necessary, as most operations from this point forward
10151 * require unwinding.
10152 */
10153 kvm_timer_init();
10154
10155 if (pi_inject_timer == -1)
10156 pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER);
10157 #ifdef CONFIG_X86_64
10158 pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
10159
10160 if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
10161 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier);
10162 #endif
10163
10164 __kvm_register_perf_callbacks(ops->handle_intel_pt_intr,
10165 enable_mediated_pmu ? kvm_handle_guest_mediated_pmi : NULL);
10166
10167 if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_mmu_enabled)
10168 kvm_caps.supported_vm_types |= BIT(KVM_X86_SW_PROTECTED_VM);
10169
10170 /* KVM always ignores guest PAT for shadow paging. */
10171 if (!tdp_enabled)
10172 kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
10173
10174 if (kvm_caps.has_tsc_control) {
10175 /*
10176 * Make sure the user can only configure tsc_khz values that
10177 * fit into a signed integer.
10178 * A min value is not calculated because it will always
10179 * be 1 on all machines.
10180 */
10181 u64 max = min(0x7fffffffULL,
10182 __scale_tsc(kvm_caps.max_tsc_scaling_ratio, tsc_khz));
10183 kvm_caps.max_guest_tsc_khz = max;
10184 }
10185 kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits;
10186 kvm_init_msr_lists();
10187 return 0;
10188
10189 out_unwind_ops:
10190 kvm_x86_ops.enable_virtualization_cpu = NULL;
10191 kvm_x86_call(hardware_unsetup)();
10192 out_mmu_exit:
10193 kvm_destroy_user_return_msrs();
10194 kvm_mmu_vendor_module_exit();
10195 out_free_x86_emulator_cache:
10196 kmem_cache_destroy(x86_emulator_cache);
10197 return r;
10198 }
10199 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_x86_vendor_init);
10200
kvm_x86_vendor_exit(void)10201 void kvm_x86_vendor_exit(void)
10202 {
10203 kvm_unregister_perf_callbacks();
10204
10205 #ifdef CONFIG_X86_64
10206 if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
10207 clear_hv_tscchange_cb();
10208 #endif
10209 kvm_lapic_exit();
10210
10211 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
10212 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
10213 CPUFREQ_TRANSITION_NOTIFIER);
10214 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
10215 }
10216 #ifdef CONFIG_X86_64
10217 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
10218 irq_work_sync(&pvclock_irq_work);
10219 cancel_work_sync(&pvclock_gtod_work);
10220 #endif
10221 kvm_x86_call(hardware_unsetup)();
10222 kvm_destroy_user_return_msrs();
10223 kvm_mmu_vendor_module_exit();
10224 kmem_cache_destroy(x86_emulator_cache);
10225 #ifdef CONFIG_KVM_XEN
10226 static_key_deferred_flush(&kvm_xen_enabled);
10227 WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
10228 #endif
10229 mutex_lock(&vendor_module_lock);
10230 kvm_x86_ops.enable_virtualization_cpu = NULL;
10231 mutex_unlock(&vendor_module_lock);
10232 }
10233 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_x86_vendor_exit);
10234
10235 #ifdef CONFIG_X86_64
kvm_pv_clock_pairing(struct kvm_vcpu * vcpu,gpa_t paddr,unsigned long clock_type)10236 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
10237 unsigned long clock_type)
10238 {
10239 struct kvm_clock_pairing clock_pairing;
10240 struct timespec64 ts;
10241 u64 cycle;
10242 int ret;
10243
10244 if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
10245 return -KVM_EOPNOTSUPP;
10246
10247 /*
10248 * When tsc is in permanent catchup mode guests won't be able to use
10249 * pvclock_read_retry loop to get consistent view of pvclock
10250 */
10251 if (vcpu->arch.tsc_always_catchup)
10252 return -KVM_EOPNOTSUPP;
10253
10254 if (!kvm_get_walltime_and_clockread(&ts, &cycle))
10255 return -KVM_EOPNOTSUPP;
10256
10257 clock_pairing.sec = ts.tv_sec;
10258 clock_pairing.nsec = ts.tv_nsec;
10259 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
10260 clock_pairing.flags = 0;
10261 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
10262
10263 ret = 0;
10264 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
10265 sizeof(struct kvm_clock_pairing)))
10266 ret = -KVM_EFAULT;
10267
10268 return ret;
10269 }
10270 #endif
10271
10272 /*
10273 * kvm_pv_kick_cpu_op: Kick a vcpu.
10274 *
10275 * @apicid - apicid of vcpu to be kicked.
10276 */
kvm_pv_kick_cpu_op(struct kvm * kvm,int apicid)10277 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid)
10278 {
10279 /*
10280 * All other fields are unused for APIC_DM_REMRD, but may be consumed by
10281 * common code, e.g. for tracing. Defer initialization to the compiler.
10282 */
10283 struct kvm_lapic_irq lapic_irq = {
10284 .delivery_mode = APIC_DM_REMRD,
10285 .dest_mode = APIC_DEST_PHYSICAL,
10286 .shorthand = APIC_DEST_NOSHORT,
10287 .dest_id = apicid,
10288 };
10289
10290 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq);
10291 }
10292
kvm_apicv_activated(struct kvm * kvm)10293 bool kvm_apicv_activated(struct kvm *kvm)
10294 {
10295 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
10296 }
10297 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apicv_activated);
10298
kvm_vcpu_apicv_activated(struct kvm_vcpu * vcpu)10299 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
10300 {
10301 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons);
10302 ulong vcpu_reasons =
10303 kvm_x86_call(vcpu_get_apicv_inhibit_reasons)(vcpu);
10304
10305 return (vm_reasons | vcpu_reasons) == 0;
10306 }
10307 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_apicv_activated);
10308
set_or_clear_apicv_inhibit(unsigned long * inhibits,enum kvm_apicv_inhibit reason,bool set)10309 static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
10310 enum kvm_apicv_inhibit reason, bool set)
10311 {
10312 const struct trace_print_flags apicv_inhibits[] = { APICV_INHIBIT_REASONS };
10313
10314 BUILD_BUG_ON(ARRAY_SIZE(apicv_inhibits) != NR_APICV_INHIBIT_REASONS);
10315
10316 if (set)
10317 __set_bit(reason, inhibits);
10318 else
10319 __clear_bit(reason, inhibits);
10320
10321 trace_kvm_apicv_inhibit_changed(reason, set, *inhibits);
10322 }
10323
kvm_apicv_init(struct kvm * kvm)10324 static void kvm_apicv_init(struct kvm *kvm)
10325 {
10326 enum kvm_apicv_inhibit reason = enable_apicv ? APICV_INHIBIT_REASON_ABSENT :
10327 APICV_INHIBIT_REASON_DISABLED;
10328
10329 set_or_clear_apicv_inhibit(&kvm->arch.apicv_inhibit_reasons, reason, true);
10330
10331 init_rwsem(&kvm->arch.apicv_update_lock);
10332 }
10333
kvm_sched_yield(struct kvm_vcpu * vcpu,unsigned long dest_id)10334 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
10335 {
10336 struct kvm_vcpu *target = NULL;
10337 struct kvm_apic_map *map;
10338
10339 vcpu->stat.directed_yield_attempted++;
10340
10341 if (single_task_running())
10342 goto no_yield;
10343
10344 rcu_read_lock();
10345 map = rcu_dereference(vcpu->kvm->arch.apic_map);
10346
10347 if (likely(map) && dest_id <= map->max_apic_id) {
10348 dest_id = array_index_nospec(dest_id, map->max_apic_id + 1);
10349 if (map->phys_map[dest_id])
10350 target = map->phys_map[dest_id]->vcpu;
10351 }
10352
10353 rcu_read_unlock();
10354
10355 if (!target || !READ_ONCE(target->ready))
10356 goto no_yield;
10357
10358 /* Ignore requests to yield to self */
10359 if (vcpu == target)
10360 goto no_yield;
10361
10362 if (kvm_vcpu_yield_to(target) <= 0)
10363 goto no_yield;
10364
10365 vcpu->stat.directed_yield_successful++;
10366
10367 no_yield:
10368 return;
10369 }
10370
complete_hypercall_exit(struct kvm_vcpu * vcpu)10371 static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
10372 {
10373 u64 ret = vcpu->run->hypercall.ret;
10374
10375 if (!is_64_bit_hypercall(vcpu))
10376 ret = (u32)ret;
10377 kvm_rax_write(vcpu, ret);
10378 return kvm_skip_emulated_instruction(vcpu);
10379 }
10380
____kvm_emulate_hypercall(struct kvm_vcpu * vcpu,int cpl,int (* complete_hypercall)(struct kvm_vcpu *))10381 int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl,
10382 int (*complete_hypercall)(struct kvm_vcpu *))
10383 {
10384 unsigned long ret;
10385 unsigned long nr = kvm_rax_read(vcpu);
10386 unsigned long a0 = kvm_rbx_read(vcpu);
10387 unsigned long a1 = kvm_rcx_read(vcpu);
10388 unsigned long a2 = kvm_rdx_read(vcpu);
10389 unsigned long a3 = kvm_rsi_read(vcpu);
10390 int op_64_bit = is_64_bit_hypercall(vcpu);
10391
10392 ++vcpu->stat.hypercalls;
10393
10394 trace_kvm_hypercall(nr, a0, a1, a2, a3);
10395
10396 if (!op_64_bit) {
10397 nr &= 0xFFFFFFFF;
10398 a0 &= 0xFFFFFFFF;
10399 a1 &= 0xFFFFFFFF;
10400 a2 &= 0xFFFFFFFF;
10401 a3 &= 0xFFFFFFFF;
10402 }
10403
10404 if (cpl) {
10405 ret = -KVM_EPERM;
10406 goto out;
10407 }
10408
10409 ret = -KVM_ENOSYS;
10410
10411 switch (nr) {
10412 case KVM_HC_VAPIC_POLL_IRQ:
10413 ret = 0;
10414 break;
10415 case KVM_HC_KICK_CPU:
10416 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT))
10417 break;
10418
10419 kvm_pv_kick_cpu_op(vcpu->kvm, a1);
10420 kvm_sched_yield(vcpu, a1);
10421 ret = 0;
10422 break;
10423 #ifdef CONFIG_X86_64
10424 case KVM_HC_CLOCK_PAIRING:
10425 ret = kvm_pv_clock_pairing(vcpu, a0, a1);
10426 break;
10427 #endif
10428 case KVM_HC_SEND_IPI:
10429 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI))
10430 break;
10431
10432 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
10433 break;
10434 case KVM_HC_SCHED_YIELD:
10435 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD))
10436 break;
10437
10438 kvm_sched_yield(vcpu, a0);
10439 ret = 0;
10440 break;
10441 case KVM_HC_MAP_GPA_RANGE: {
10442 u64 gpa = a0, npages = a1, attrs = a2;
10443
10444 ret = -KVM_ENOSYS;
10445 if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE))
10446 break;
10447
10448 if (!PAGE_ALIGNED(gpa) || !npages ||
10449 gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) {
10450 ret = -KVM_EINVAL;
10451 break;
10452 }
10453
10454 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
10455 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
10456 /*
10457 * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
10458 * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
10459 * it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
10460 * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
10461 */
10462 vcpu->run->hypercall.ret = 0;
10463 vcpu->run->hypercall.args[0] = gpa;
10464 vcpu->run->hypercall.args[1] = npages;
10465 vcpu->run->hypercall.args[2] = attrs;
10466 vcpu->run->hypercall.flags = 0;
10467 if (op_64_bit)
10468 vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE;
10469
10470 WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ);
10471 vcpu->arch.complete_userspace_io = complete_hypercall;
10472 return 0;
10473 }
10474 default:
10475 ret = -KVM_ENOSYS;
10476 break;
10477 }
10478
10479 out:
10480 vcpu->run->hypercall.ret = ret;
10481 return 1;
10482 }
10483 EXPORT_SYMBOL_FOR_KVM_INTERNAL(____kvm_emulate_hypercall);
10484
kvm_emulate_hypercall(struct kvm_vcpu * vcpu)10485 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
10486 {
10487 if (kvm_xen_hypercall_enabled(vcpu->kvm))
10488 return kvm_xen_hypercall(vcpu);
10489
10490 if (kvm_hv_hypercall_enabled(vcpu))
10491 return kvm_hv_hypercall(vcpu);
10492
10493 return __kvm_emulate_hypercall(vcpu, kvm_x86_call(get_cpl)(vcpu),
10494 complete_hypercall_exit);
10495 }
10496 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_hypercall);
10497
emulator_fix_hypercall(struct x86_emulate_ctxt * ctxt)10498 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
10499 {
10500 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
10501 char instruction[3];
10502 unsigned long rip = kvm_rip_read(vcpu);
10503
10504 /*
10505 * If the quirk is disabled, synthesize a #UD and let the guest pick up
10506 * the pieces.
10507 */
10508 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) {
10509 ctxt->exception.error_code_valid = false;
10510 ctxt->exception.vector = UD_VECTOR;
10511 ctxt->have_exception = true;
10512 return X86EMUL_PROPAGATE_FAULT;
10513 }
10514
10515 kvm_x86_call(patch_hypercall)(vcpu, instruction);
10516
10517 return emulator_write_emulated(ctxt, rip, instruction, 3,
10518 &ctxt->exception);
10519 }
10520
dm_request_for_irq_injection(struct kvm_vcpu * vcpu)10521 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
10522 {
10523 return vcpu->run->request_interrupt_window &&
10524 likely(!pic_in_kernel(vcpu->kvm));
10525 }
10526
10527 /* Called within kvm->srcu read side. */
post_kvm_run_save(struct kvm_vcpu * vcpu)10528 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
10529 {
10530 struct kvm_run *kvm_run = vcpu->run;
10531
10532 kvm_run->if_flag = kvm_x86_call(get_if_flag)(vcpu);
10533 kvm_run->cr8 = kvm_get_cr8(vcpu);
10534 kvm_run->apic_base = vcpu->arch.apic_base;
10535
10536 kvm_run->ready_for_interrupt_injection =
10537 pic_in_kernel(vcpu->kvm) ||
10538 kvm_vcpu_ready_for_interrupt_injection(vcpu);
10539
10540 if (is_smm(vcpu))
10541 kvm_run->flags |= KVM_RUN_X86_SMM;
10542 if (is_guest_mode(vcpu))
10543 kvm_run->flags |= KVM_RUN_X86_GUEST_MODE;
10544 }
10545
update_cr8_intercept(struct kvm_vcpu * vcpu)10546 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
10547 {
10548 int max_irr, tpr;
10549
10550 if (!kvm_x86_ops.update_cr8_intercept)
10551 return;
10552
10553 if (!lapic_in_kernel(vcpu))
10554 return;
10555
10556 if (vcpu->arch.apic->apicv_active)
10557 return;
10558
10559 if (!vcpu->arch.apic->vapic_addr)
10560 max_irr = kvm_lapic_find_highest_irr(vcpu);
10561 else
10562 max_irr = -1;
10563
10564 if (max_irr != -1)
10565 max_irr >>= 4;
10566
10567 tpr = kvm_lapic_get_cr8(vcpu);
10568
10569 kvm_x86_call(update_cr8_intercept)(vcpu, tpr, max_irr);
10570 }
10571
10572
kvm_check_nested_events(struct kvm_vcpu * vcpu)10573 int kvm_check_nested_events(struct kvm_vcpu *vcpu)
10574 {
10575 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
10576 kvm_x86_ops.nested_ops->triple_fault(vcpu);
10577 return 1;
10578 }
10579
10580 return kvm_x86_ops.nested_ops->check_events(vcpu);
10581 }
10582
kvm_inject_exception(struct kvm_vcpu * vcpu)10583 static void kvm_inject_exception(struct kvm_vcpu *vcpu)
10584 {
10585 /*
10586 * Suppress the error code if the vCPU is in Real Mode, as Real Mode
10587 * exceptions don't report error codes. The presence of an error code
10588 * is carried with the exception and only stripped when the exception
10589 * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
10590 * report an error code despite the CPU being in Real Mode.
10591 */
10592 vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
10593
10594 trace_kvm_inj_exception(vcpu->arch.exception.vector,
10595 vcpu->arch.exception.has_error_code,
10596 vcpu->arch.exception.error_code,
10597 vcpu->arch.exception.injected);
10598
10599 kvm_x86_call(inject_exception)(vcpu);
10600 }
10601
10602 /*
10603 * Check for any event (interrupt or exception) that is ready to be injected,
10604 * and if there is at least one event, inject the event with the highest
10605 * priority. This handles both "pending" events, i.e. events that have never
10606 * been injected into the guest, and "injected" events, i.e. events that were
10607 * injected as part of a previous VM-Enter, but weren't successfully delivered
10608 * and need to be re-injected.
10609 *
10610 * Note, this is not guaranteed to be invoked on a guest instruction boundary,
10611 * i.e. doesn't guarantee that there's an event window in the guest. KVM must
10612 * be able to inject exceptions in the "middle" of an instruction, and so must
10613 * also be able to re-inject NMIs and IRQs in the middle of an instruction.
10614 * I.e. for exceptions and re-injected events, NOT invoking this on instruction
10615 * boundaries is necessary and correct.
10616 *
10617 * For simplicity, KVM uses a single path to inject all events (except events
10618 * that are injected directly from L1 to L2) and doesn't explicitly track
10619 * instruction boundaries for asynchronous events. However, because VM-Exits
10620 * that can occur during instruction execution typically result in KVM skipping
10621 * the instruction or injecting an exception, e.g. instruction and exception
10622 * intercepts, and because pending exceptions have higher priority than pending
10623 * interrupts, KVM still honors instruction boundaries in most scenarios.
10624 *
10625 * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip
10626 * the instruction or inject an exception, then KVM can incorrecty inject a new
10627 * asynchronous event if the event became pending after the CPU fetched the
10628 * instruction (in the guest). E.g. if a page fault (#PF, #NPF, EPT violation)
10629 * occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be
10630 * injected on the restarted instruction instead of being deferred until the
10631 * instruction completes.
10632 *
10633 * In practice, this virtualization hole is unlikely to be observed by the
10634 * guest, and even less likely to cause functional problems. To detect the
10635 * hole, the guest would have to trigger an event on a side effect of an early
10636 * phase of instruction execution, e.g. on the instruction fetch from memory.
10637 * And for it to be a functional problem, the guest would need to depend on the
10638 * ordering between that side effect, the instruction completing, _and_ the
10639 * delivery of the asynchronous event.
10640 */
kvm_check_and_inject_events(struct kvm_vcpu * vcpu,bool * req_immediate_exit)10641 static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
10642 bool *req_immediate_exit)
10643 {
10644 bool can_inject;
10645 int r;
10646
10647 /*
10648 * Process nested events first, as nested VM-Exit supersedes event
10649 * re-injection. If there's an event queued for re-injection, it will
10650 * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit.
10651 */
10652 if (is_guest_mode(vcpu))
10653 r = kvm_check_nested_events(vcpu);
10654 else
10655 r = 0;
10656
10657 /*
10658 * Re-inject exceptions and events *especially* if immediate entry+exit
10659 * to/from L2 is needed, as any event that has already been injected
10660 * into L2 needs to complete its lifecycle before injecting a new event.
10661 *
10662 * Don't re-inject an NMI or interrupt if there is a pending exception.
10663 * This collision arises if an exception occurred while vectoring the
10664 * injected event, KVM intercepted said exception, and KVM ultimately
10665 * determined the fault belongs to the guest and queues the exception
10666 * for injection back into the guest.
10667 *
10668 * "Injected" interrupts can also collide with pending exceptions if
10669 * userspace ignores the "ready for injection" flag and blindly queues
10670 * an interrupt. In that case, prioritizing the exception is correct,
10671 * as the exception "occurred" before the exit to userspace. Trap-like
10672 * exceptions, e.g. most #DBs, have higher priority than interrupts.
10673 * And while fault-like exceptions, e.g. #GP and #PF, are the lowest
10674 * priority, they're only generated (pended) during instruction
10675 * execution, and interrupts are recognized at instruction boundaries.
10676 * Thus a pending fault-like exception means the fault occurred on the
10677 * *previous* instruction and must be serviced prior to recognizing any
10678 * new events in order to fully complete the previous instruction.
10679 */
10680 if (vcpu->arch.exception.injected)
10681 kvm_inject_exception(vcpu);
10682 else if (kvm_is_exception_pending(vcpu))
10683 ; /* see above */
10684 else if (vcpu->arch.nmi_injected)
10685 kvm_x86_call(inject_nmi)(vcpu);
10686 else if (vcpu->arch.interrupt.injected)
10687 kvm_x86_call(inject_irq)(vcpu, true);
10688
10689 /*
10690 * Exceptions that morph to VM-Exits are handled above, and pending
10691 * exceptions on top of injected exceptions that do not VM-Exit should
10692 * either morph to #DF or, sadly, override the injected exception.
10693 */
10694 WARN_ON_ONCE(vcpu->arch.exception.injected &&
10695 vcpu->arch.exception.pending);
10696
10697 /*
10698 * Bail if immediate entry+exit to/from the guest is needed to complete
10699 * nested VM-Enter or event re-injection so that a different pending
10700 * event can be serviced (or if KVM needs to exit to userspace).
10701 *
10702 * Otherwise, continue processing events even if VM-Exit occurred. The
10703 * VM-Exit will have cleared exceptions that were meant for L2, but
10704 * there may now be events that can be injected into L1.
10705 */
10706 if (r < 0)
10707 goto out;
10708
10709 /*
10710 * A pending exception VM-Exit should either result in nested VM-Exit
10711 * or force an immediate re-entry and exit to/from L2, and exception
10712 * VM-Exits cannot be injected (flag should _never_ be set).
10713 */
10714 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected ||
10715 vcpu->arch.exception_vmexit.pending);
10716
10717 /*
10718 * New events, other than exceptions, cannot be injected if KVM needs
10719 * to re-inject a previous event. See above comments on re-injecting
10720 * for why pending exceptions get priority.
10721 */
10722 can_inject = !kvm_event_needs_reinjection(vcpu);
10723
10724 if (vcpu->arch.exception.pending) {
10725 /*
10726 * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS
10727 * value pushed on the stack. Trap-like exception and all #DBs
10728 * leave RF as-is (KVM follows Intel's behavior in this regard;
10729 * AMD states that code breakpoint #DBs excplitly clear RF=0).
10730 *
10731 * Note, most versions of Intel's SDM and AMD's APM incorrectly
10732 * describe the behavior of General Detect #DBs, which are
10733 * fault-like. They do _not_ set RF, a la code breakpoints.
10734 */
10735 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT)
10736 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
10737 X86_EFLAGS_RF);
10738
10739 if (vcpu->arch.exception.vector == DB_VECTOR) {
10740 kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception);
10741 if (vcpu->arch.dr7 & DR7_GD) {
10742 vcpu->arch.dr7 &= ~DR7_GD;
10743 kvm_update_dr7(vcpu);
10744 }
10745 }
10746
10747 kvm_inject_exception(vcpu);
10748
10749 vcpu->arch.exception.pending = false;
10750 vcpu->arch.exception.injected = true;
10751
10752 can_inject = false;
10753 }
10754
10755 /* Don't inject interrupts if the user asked to avoid doing so */
10756 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ)
10757 return 0;
10758
10759 /*
10760 * Finally, inject interrupt events. If an event cannot be injected
10761 * due to architectural conditions (e.g. IF=0) a window-open exit
10762 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending
10763 * and can architecturally be injected, but we cannot do it right now:
10764 * an interrupt could have arrived just now and we have to inject it
10765 * as a vmexit, or there could already an event in the queue, which is
10766 * indicated by can_inject. In that case we request an immediate exit
10767 * in order to make progress and get back here for another iteration.
10768 * The kvm_x86_ops hooks communicate this by returning -EBUSY.
10769 */
10770 #ifdef CONFIG_KVM_SMM
10771 if (vcpu->arch.smi_pending) {
10772 r = can_inject ? kvm_x86_call(smi_allowed)(vcpu, true) :
10773 -EBUSY;
10774 if (r < 0)
10775 goto out;
10776 if (r) {
10777 vcpu->arch.smi_pending = false;
10778 ++vcpu->arch.smi_count;
10779 enter_smm(vcpu);
10780 can_inject = false;
10781 } else
10782 kvm_x86_call(enable_smi_window)(vcpu);
10783 }
10784 #endif
10785
10786 if (vcpu->arch.nmi_pending) {
10787 r = can_inject ? kvm_x86_call(nmi_allowed)(vcpu, true) :
10788 -EBUSY;
10789 if (r < 0)
10790 goto out;
10791 if (r) {
10792 --vcpu->arch.nmi_pending;
10793 vcpu->arch.nmi_injected = true;
10794 kvm_x86_call(inject_nmi)(vcpu);
10795 can_inject = false;
10796 WARN_ON(kvm_x86_call(nmi_allowed)(vcpu, true) < 0);
10797 }
10798 if (vcpu->arch.nmi_pending)
10799 kvm_x86_call(enable_nmi_window)(vcpu);
10800 }
10801
10802 if (kvm_cpu_has_injectable_intr(vcpu)) {
10803 r = can_inject ? kvm_x86_call(interrupt_allowed)(vcpu, true) :
10804 -EBUSY;
10805 if (r < 0)
10806 goto out;
10807 if (r) {
10808 int irq = kvm_cpu_get_interrupt(vcpu);
10809
10810 if (!WARN_ON_ONCE(irq == -1)) {
10811 kvm_queue_interrupt(vcpu, irq, false);
10812 kvm_x86_call(inject_irq)(vcpu, false);
10813 WARN_ON(kvm_x86_call(interrupt_allowed)(vcpu, true) < 0);
10814 }
10815 }
10816 if (kvm_cpu_has_injectable_intr(vcpu))
10817 kvm_x86_call(enable_irq_window)(vcpu);
10818 }
10819
10820 if (is_guest_mode(vcpu) &&
10821 kvm_x86_ops.nested_ops->has_events &&
10822 kvm_x86_ops.nested_ops->has_events(vcpu, true))
10823 *req_immediate_exit = true;
10824
10825 /*
10826 * KVM must never queue a new exception while injecting an event; KVM
10827 * is done emulating and should only propagate the to-be-injected event
10828 * to the VMCS/VMCB. Queueing a new exception can put the vCPU into an
10829 * infinite loop as KVM will bail from VM-Enter to inject the pending
10830 * exception and start the cycle all over.
10831 *
10832 * Exempt triple faults as they have special handling and won't put the
10833 * vCPU into an infinite loop. Triple fault can be queued when running
10834 * VMX without unrestricted guest, as that requires KVM to emulate Real
10835 * Mode events (see kvm_inject_realmode_interrupt()).
10836 */
10837 WARN_ON_ONCE(vcpu->arch.exception.pending ||
10838 vcpu->arch.exception_vmexit.pending);
10839 return 0;
10840
10841 out:
10842 if (r == -EBUSY) {
10843 *req_immediate_exit = true;
10844 r = 0;
10845 }
10846 return r;
10847 }
10848
process_nmi(struct kvm_vcpu * vcpu)10849 static void process_nmi(struct kvm_vcpu *vcpu)
10850 {
10851 unsigned int limit;
10852
10853 /*
10854 * x86 is limited to one NMI pending, but because KVM can't react to
10855 * incoming NMIs as quickly as bare metal, e.g. if the vCPU is
10856 * scheduled out, KVM needs to play nice with two queued NMIs showing
10857 * up at the same time. To handle this scenario, allow two NMIs to be
10858 * (temporarily) pending so long as NMIs are not blocked and KVM is not
10859 * waiting for a previous NMI injection to complete (which effectively
10860 * blocks NMIs). KVM will immediately inject one of the two NMIs, and
10861 * will request an NMI window to handle the second NMI.
10862 */
10863 if (kvm_x86_call(get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected)
10864 limit = 1;
10865 else
10866 limit = 2;
10867
10868 /*
10869 * Adjust the limit to account for pending virtual NMIs, which aren't
10870 * tracked in vcpu->arch.nmi_pending.
10871 */
10872 if (kvm_x86_call(is_vnmi_pending)(vcpu))
10873 limit--;
10874
10875 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
10876 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
10877
10878 if (vcpu->arch.nmi_pending &&
10879 (kvm_x86_call(set_vnmi_pending)(vcpu)))
10880 vcpu->arch.nmi_pending--;
10881
10882 if (vcpu->arch.nmi_pending)
10883 kvm_make_request(KVM_REQ_EVENT, vcpu);
10884 }
10885
10886 /* Return total number of NMIs pending injection to the VM */
kvm_get_nr_pending_nmis(struct kvm_vcpu * vcpu)10887 int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu)
10888 {
10889 return vcpu->arch.nmi_pending +
10890 kvm_x86_call(is_vnmi_pending)(vcpu);
10891 }
10892
kvm_make_scan_ioapic_request_mask(struct kvm * kvm,unsigned long * vcpu_bitmap)10893 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
10894 unsigned long *vcpu_bitmap)
10895 {
10896 kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap);
10897 }
10898
kvm_make_scan_ioapic_request(struct kvm * kvm)10899 void kvm_make_scan_ioapic_request(struct kvm *kvm)
10900 {
10901 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
10902 }
10903
__kvm_vcpu_update_apicv(struct kvm_vcpu * vcpu)10904 void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
10905 {
10906 struct kvm_lapic *apic = vcpu->arch.apic;
10907 bool activate;
10908
10909 if (!lapic_in_kernel(vcpu))
10910 return;
10911
10912 down_read(&vcpu->kvm->arch.apicv_update_lock);
10913 preempt_disable();
10914
10915 /* Do not activate APICV when APIC is disabled */
10916 activate = kvm_vcpu_apicv_activated(vcpu) &&
10917 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED);
10918
10919 if (apic->apicv_active == activate)
10920 goto out;
10921
10922 apic->apicv_active = activate;
10923 kvm_apic_update_apicv(vcpu);
10924 kvm_x86_call(refresh_apicv_exec_ctrl)(vcpu);
10925
10926 /*
10927 * When APICv gets disabled, we may still have injected interrupts
10928 * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
10929 * still active when the interrupt got accepted. Make sure
10930 * kvm_check_and_inject_events() is called to check for that.
10931 */
10932 if (!apic->apicv_active)
10933 kvm_make_request(KVM_REQ_EVENT, vcpu);
10934
10935 out:
10936 preempt_enable();
10937 up_read(&vcpu->kvm->arch.apicv_update_lock);
10938 }
10939 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_update_apicv);
10940
kvm_vcpu_update_apicv(struct kvm_vcpu * vcpu)10941 static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
10942 {
10943 if (!lapic_in_kernel(vcpu))
10944 return;
10945
10946 /*
10947 * Due to sharing page tables across vCPUs, the xAPIC memslot must be
10948 * deleted if any vCPU has xAPIC virtualization and x2APIC enabled, but
10949 * and hardware doesn't support x2APIC virtualization. E.g. some AMD
10950 * CPUs support AVIC but not x2APIC. KVM still allows enabling AVIC in
10951 * this case so that KVM can use the AVIC doorbell to inject interrupts
10952 * to running vCPUs, but KVM must not create SPTEs for the APIC base as
10953 * the vCPU would incorrectly be able to access the vAPIC page via MMIO
10954 * despite being in x2APIC mode. For simplicity, inhibiting the APIC
10955 * access page is sticky.
10956 */
10957 if (apic_x2apic_mode(vcpu->arch.apic) &&
10958 kvm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization)
10959 kvm_inhibit_apic_access_page(vcpu);
10960
10961 __kvm_vcpu_update_apicv(vcpu);
10962 }
10963
__kvm_set_or_clear_apicv_inhibit(struct kvm * kvm,enum kvm_apicv_inhibit reason,bool set)10964 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
10965 enum kvm_apicv_inhibit reason, bool set)
10966 {
10967 unsigned long old, new;
10968
10969 lockdep_assert_held_write(&kvm->arch.apicv_update_lock);
10970
10971 if (!(kvm_x86_ops.required_apicv_inhibits & BIT(reason)))
10972 return;
10973
10974 old = new = kvm->arch.apicv_inhibit_reasons;
10975
10976 set_or_clear_apicv_inhibit(&new, reason, set);
10977
10978 if (!!old != !!new) {
10979 /*
10980 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid
10981 * false positives in the sanity check WARN in vcpu_enter_guest().
10982 * This task will wait for all vCPUs to ack the kick IRQ before
10983 * updating apicv_inhibit_reasons, and all other vCPUs will
10984 * block on acquiring apicv_update_lock so that vCPUs can't
10985 * redo vcpu_enter_guest() without seeing the new inhibit state.
10986 *
10987 * Note, holding apicv_update_lock and taking it in the read
10988 * side (handling the request) also prevents other vCPUs from
10989 * servicing the request with a stale apicv_inhibit_reasons.
10990 */
10991 kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
10992 kvm->arch.apicv_inhibit_reasons = new;
10993 if (new) {
10994 unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
10995 int idx = srcu_read_lock(&kvm->srcu);
10996
10997 kvm_zap_gfn_range(kvm, gfn, gfn+1);
10998 srcu_read_unlock(&kvm->srcu, idx);
10999 }
11000 } else {
11001 kvm->arch.apicv_inhibit_reasons = new;
11002 }
11003 }
11004
kvm_set_or_clear_apicv_inhibit(struct kvm * kvm,enum kvm_apicv_inhibit reason,bool set)11005 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
11006 enum kvm_apicv_inhibit reason, bool set)
11007 {
11008 if (!enable_apicv)
11009 return;
11010
11011 down_write(&kvm->arch.apicv_update_lock);
11012 __kvm_set_or_clear_apicv_inhibit(kvm, reason, set);
11013 up_write(&kvm->arch.apicv_update_lock);
11014 }
11015 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_or_clear_apicv_inhibit);
11016
vcpu_scan_ioapic(struct kvm_vcpu * vcpu)11017 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
11018 {
11019 if (!kvm_apic_present(vcpu))
11020 return;
11021
11022 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
11023 vcpu->arch.highest_stale_pending_ioapic_eoi = -1;
11024
11025 kvm_x86_call(sync_pir_to_irr)(vcpu);
11026
11027 if (irqchip_split(vcpu->kvm))
11028 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
11029 #ifdef CONFIG_KVM_IOAPIC
11030 else if (ioapic_in_kernel(vcpu->kvm))
11031 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
11032 #endif
11033
11034 if (is_guest_mode(vcpu))
11035 vcpu->arch.load_eoi_exitmap_pending = true;
11036 else
11037 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
11038 }
11039
vcpu_load_eoi_exitmap(struct kvm_vcpu * vcpu)11040 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
11041 {
11042 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
11043 return;
11044
11045 #ifdef CONFIG_KVM_HYPERV
11046 if (to_hv_vcpu(vcpu)) {
11047 u64 eoi_exit_bitmap[4];
11048
11049 bitmap_or((ulong *)eoi_exit_bitmap,
11050 vcpu->arch.ioapic_handled_vectors,
11051 to_hv_synic(vcpu)->vec_bitmap, 256);
11052 kvm_x86_call(load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
11053 return;
11054 }
11055 #endif
11056 kvm_x86_call(load_eoi_exitmap)(
11057 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
11058 }
11059
kvm_arch_guest_memory_reclaimed(struct kvm * kvm)11060 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
11061 {
11062 kvm_x86_call(guest_memory_reclaimed)(kvm);
11063 }
11064
kvm_vcpu_reload_apic_access_page(struct kvm_vcpu * vcpu)11065 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
11066 {
11067 if (!lapic_in_kernel(vcpu))
11068 return;
11069
11070 kvm_x86_call(set_apic_access_page_addr)(vcpu);
11071 }
11072
11073 /*
11074 * Called within kvm->srcu read side.
11075 * Returns 1 to let vcpu_run() continue the guest execution loop without
11076 * exiting to the userspace. Otherwise, the value will be returned to the
11077 * userspace.
11078 */
vcpu_enter_guest(struct kvm_vcpu * vcpu)11079 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
11080 {
11081 int r;
11082 bool req_int_win =
11083 dm_request_for_irq_injection(vcpu) &&
11084 kvm_cpu_accept_dm_intr(vcpu);
11085 fastpath_t exit_fastpath;
11086 u64 run_flags, debug_ctl;
11087
11088 bool req_immediate_exit = false;
11089
11090 if (kvm_request_pending(vcpu)) {
11091 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
11092 r = -EIO;
11093 goto out;
11094 }
11095
11096 if (kvm_dirty_ring_check_request(vcpu)) {
11097 r = 0;
11098 goto out;
11099 }
11100
11101 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
11102 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
11103 r = 0;
11104 goto out;
11105 }
11106 }
11107 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
11108 kvm_mmu_free_obsolete_roots(vcpu);
11109 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
11110 __kvm_migrate_timers(vcpu);
11111 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
11112 kvm_update_masterclock(vcpu->kvm);
11113 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
11114 kvm_gen_kvmclock_update(vcpu);
11115 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
11116 r = kvm_guest_time_update(vcpu);
11117 if (unlikely(r))
11118 goto out;
11119 }
11120 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
11121 kvm_mmu_sync_roots(vcpu);
11122 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
11123 kvm_mmu_load_pgd(vcpu);
11124
11125 /*
11126 * Note, the order matters here, as flushing "all" TLB entries
11127 * also flushes the "current" TLB entries, i.e. servicing the
11128 * flush "all" will clear any request to flush "current".
11129 */
11130 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
11131 kvm_vcpu_flush_tlb_all(vcpu);
11132
11133 kvm_service_local_tlb_flush_requests(vcpu);
11134
11135 /*
11136 * Fall back to a "full" guest flush if Hyper-V's precise
11137 * flushing fails. Note, Hyper-V's flushing is per-vCPU, but
11138 * the flushes are considered "remote" and not "local" because
11139 * the requests can be initiated from other vCPUs.
11140 */
11141 #ifdef CONFIG_KVM_HYPERV
11142 if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) &&
11143 kvm_hv_vcpu_flush_tlb(vcpu))
11144 kvm_vcpu_flush_tlb_guest(vcpu);
11145 #endif
11146
11147 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
11148 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
11149 r = 0;
11150 goto out;
11151 }
11152 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
11153 if (is_guest_mode(vcpu))
11154 kvm_x86_ops.nested_ops->triple_fault(vcpu);
11155
11156 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
11157 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
11158 vcpu->mmio_needed = 0;
11159 r = 0;
11160 goto out;
11161 }
11162 }
11163 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
11164 /* Page is swapped out. Do synthetic halt */
11165 vcpu->arch.apf.halted = true;
11166 r = 1;
11167 goto out;
11168 }
11169 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
11170 record_steal_time(vcpu);
11171 if (kvm_check_request(KVM_REQ_PMU, vcpu))
11172 kvm_pmu_handle_event(vcpu);
11173 if (kvm_check_request(KVM_REQ_PMI, vcpu))
11174 kvm_pmu_deliver_pmi(vcpu);
11175 #ifdef CONFIG_KVM_SMM
11176 if (kvm_check_request(KVM_REQ_SMI, vcpu))
11177 process_smi(vcpu);
11178 #endif
11179 if (kvm_check_request(KVM_REQ_NMI, vcpu))
11180 process_nmi(vcpu);
11181 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
11182 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
11183 if (test_bit(vcpu->arch.pending_ioapic_eoi,
11184 vcpu->arch.ioapic_handled_vectors)) {
11185 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
11186 vcpu->run->eoi.vector =
11187 vcpu->arch.pending_ioapic_eoi;
11188 r = 0;
11189 goto out;
11190 }
11191 }
11192 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
11193 vcpu_scan_ioapic(vcpu);
11194 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu))
11195 vcpu_load_eoi_exitmap(vcpu);
11196 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
11197 kvm_vcpu_reload_apic_access_page(vcpu);
11198 #ifdef CONFIG_KVM_HYPERV
11199 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
11200 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
11201 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
11202 vcpu->run->system_event.ndata = 0;
11203 r = 0;
11204 goto out;
11205 }
11206 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
11207 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
11208 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
11209 vcpu->run->system_event.ndata = 0;
11210 r = 0;
11211 goto out;
11212 }
11213 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
11214 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
11215
11216 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
11217 vcpu->run->hyperv = hv_vcpu->exit;
11218 r = 0;
11219 goto out;
11220 }
11221
11222 /*
11223 * KVM_REQ_HV_STIMER has to be processed after
11224 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
11225 * depend on the guest clock being up-to-date
11226 */
11227 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
11228 kvm_hv_process_stimers(vcpu);
11229 #endif
11230 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
11231 kvm_vcpu_update_apicv(vcpu);
11232 if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
11233 kvm_check_async_pf_completion(vcpu);
11234
11235 if (kvm_check_request(KVM_REQ_RECALC_INTERCEPTS, vcpu))
11236 kvm_x86_call(recalc_intercepts)(vcpu);
11237
11238 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
11239 kvm_x86_call(update_cpu_dirty_logging)(vcpu);
11240
11241 if (kvm_check_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu)) {
11242 kvm_vcpu_reset(vcpu, true);
11243 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) {
11244 r = 1;
11245 goto out;
11246 }
11247 }
11248 }
11249
11250 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
11251 kvm_xen_has_interrupt(vcpu)) {
11252 ++vcpu->stat.req_event;
11253 r = kvm_apic_accept_events(vcpu);
11254 if (r < 0) {
11255 r = 0;
11256 goto out;
11257 }
11258 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
11259 r = 1;
11260 goto out;
11261 }
11262
11263 r = kvm_check_and_inject_events(vcpu, &req_immediate_exit);
11264 if (r < 0) {
11265 r = 0;
11266 goto out;
11267 }
11268 if (req_int_win)
11269 kvm_x86_call(enable_irq_window)(vcpu);
11270
11271 if (kvm_lapic_enabled(vcpu)) {
11272 update_cr8_intercept(vcpu);
11273 kvm_lapic_sync_to_vapic(vcpu);
11274 }
11275 }
11276
11277 r = kvm_mmu_reload(vcpu);
11278 if (unlikely(r)) {
11279 goto cancel_injection;
11280 }
11281
11282 preempt_disable();
11283
11284 kvm_x86_call(prepare_switch_to_guest)(vcpu);
11285
11286 /*
11287 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt
11288 * IPI are then delayed after guest entry, which ensures that they
11289 * result in virtual interrupt delivery.
11290 */
11291 local_irq_disable();
11292
11293 /* Store vcpu->apicv_active before vcpu->mode. */
11294 smp_store_release(&vcpu->mode, IN_GUEST_MODE);
11295
11296 kvm_vcpu_srcu_read_unlock(vcpu);
11297
11298 /*
11299 * 1) We should set ->mode before checking ->requests. Please see
11300 * the comment in kvm_vcpu_exiting_guest_mode().
11301 *
11302 * 2) For APICv, we should set ->mode before checking PID.ON. This
11303 * pairs with the memory barrier implicit in pi_test_and_set_on
11304 * (see vmx_deliver_posted_interrupt).
11305 *
11306 * 3) This also orders the write to mode from any reads to the page
11307 * tables done while the VCPU is running. Please see the comment
11308 * in kvm_flush_remote_tlbs.
11309 */
11310 smp_mb__after_srcu_read_unlock();
11311
11312 /*
11313 * Process pending posted interrupts to handle the case where the
11314 * notification IRQ arrived in the host, or was never sent (because the
11315 * target vCPU wasn't running). Do this regardless of the vCPU's APICv
11316 * status, KVM doesn't update assigned devices when APICv is inhibited,
11317 * i.e. they can post interrupts even if APICv is temporarily disabled.
11318 */
11319 if (kvm_lapic_enabled(vcpu))
11320 kvm_x86_call(sync_pir_to_irr)(vcpu);
11321
11322 if (kvm_vcpu_exit_request(vcpu)) {
11323 vcpu->mode = OUTSIDE_GUEST_MODE;
11324 smp_wmb();
11325 local_irq_enable();
11326 preempt_enable();
11327 kvm_vcpu_srcu_read_lock(vcpu);
11328 r = 1;
11329 goto cancel_injection;
11330 }
11331
11332 run_flags = 0;
11333 if (req_immediate_exit) {
11334 run_flags |= KVM_RUN_FORCE_IMMEDIATE_EXIT;
11335 kvm_make_request(KVM_REQ_EVENT, vcpu);
11336 }
11337
11338 fpregs_assert_state_consistent();
11339 if (test_thread_flag(TIF_NEED_FPU_LOAD))
11340 switch_fpu_return();
11341
11342 if (vcpu->arch.guest_fpu.xfd_err)
11343 wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
11344
11345 kvm_load_xfeatures(vcpu, true);
11346
11347 if (unlikely(vcpu->arch.switch_db_regs &&
11348 !(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) {
11349 set_debugreg(DR7_FIXED_1, 7);
11350 set_debugreg(vcpu->arch.eff_db[0], 0);
11351 set_debugreg(vcpu->arch.eff_db[1], 1);
11352 set_debugreg(vcpu->arch.eff_db[2], 2);
11353 set_debugreg(vcpu->arch.eff_db[3], 3);
11354 /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
11355 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
11356 run_flags |= KVM_RUN_LOAD_GUEST_DR6;
11357 } else if (unlikely(hw_breakpoint_active())) {
11358 set_debugreg(DR7_FIXED_1, 7);
11359 }
11360
11361 /*
11362 * Refresh the host DEBUGCTL snapshot after disabling IRQs, as DEBUGCTL
11363 * can be modified in IRQ context, e.g. via SMP function calls. Inform
11364 * vendor code if any host-owned bits were changed, e.g. so that the
11365 * value loaded into hardware while running the guest can be updated.
11366 */
11367 debug_ctl = get_debugctlmsr();
11368 if ((debug_ctl ^ vcpu->arch.host_debugctl) & kvm_x86_ops.HOST_OWNED_DEBUGCTL &&
11369 !vcpu->arch.guest_state_protected)
11370 run_flags |= KVM_RUN_LOAD_DEBUGCTL;
11371 vcpu->arch.host_debugctl = debug_ctl;
11372
11373 kvm_mediated_pmu_load(vcpu);
11374
11375 guest_timing_enter_irqoff();
11376
11377 /*
11378 * Swap PKRU with hardware breakpoints disabled to minimize the number
11379 * of flows where non-KVM code can run with guest state loaded.
11380 */
11381 kvm_load_guest_pkru(vcpu);
11382
11383 for (;;) {
11384 /*
11385 * Assert that vCPU vs. VM APICv state is consistent. An APICv
11386 * update must kick and wait for all vCPUs before toggling the
11387 * per-VM state, and responding vCPUs must wait for the update
11388 * to complete before servicing KVM_REQ_APICV_UPDATE.
11389 */
11390 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
11391 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
11392
11393 exit_fastpath = kvm_x86_call(vcpu_run)(vcpu, run_flags);
11394 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
11395 break;
11396
11397 if (kvm_lapic_enabled(vcpu))
11398 kvm_x86_call(sync_pir_to_irr)(vcpu);
11399
11400 if (unlikely(kvm_vcpu_exit_request(vcpu))) {
11401 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
11402 break;
11403 }
11404
11405 run_flags = 0;
11406
11407 /* Note, VM-Exits that go down the "slow" path are accounted below. */
11408 ++vcpu->stat.exits;
11409 }
11410
11411 kvm_load_host_pkru(vcpu);
11412
11413 kvm_mediated_pmu_put(vcpu);
11414
11415 /*
11416 * Do this here before restoring debug registers on the host. And
11417 * since we do this before handling the vmexit, a DR access vmexit
11418 * can (a) read the correct value of the debug registers, (b) set
11419 * KVM_DEBUGREG_WONT_EXIT again.
11420 */
11421 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
11422 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
11423 WARN_ON(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH);
11424 kvm_x86_call(sync_dirty_debug_regs)(vcpu);
11425 kvm_update_dr0123(vcpu);
11426 kvm_update_dr7(vcpu);
11427 }
11428
11429 /*
11430 * If the guest has used debug registers, at least dr7
11431 * will be disabled while returning to the host.
11432 * If we don't have active breakpoints in the host, we don't
11433 * care about the messed up debug address registers. But if
11434 * we have some of them active, restore the old state.
11435 */
11436 if (hw_breakpoint_active())
11437 hw_breakpoint_restore();
11438
11439 vcpu->arch.last_vmentry_cpu = vcpu->cpu;
11440 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
11441
11442 vcpu->mode = OUTSIDE_GUEST_MODE;
11443 smp_wmb();
11444
11445 kvm_load_xfeatures(vcpu, false);
11446
11447 /*
11448 * Sync xfd before calling handle_exit_irqoff() which may
11449 * rely on the fact that guest_fpu::xfd is up-to-date (e.g.
11450 * in #NM irqoff handler).
11451 */
11452 if (vcpu->arch.xfd_no_write_intercept)
11453 fpu_sync_guest_vmexit_xfd_state();
11454
11455 kvm_x86_call(handle_exit_irqoff)(vcpu);
11456
11457 if (vcpu->arch.guest_fpu.xfd_err)
11458 wrmsrq(MSR_IA32_XFD_ERR, 0);
11459
11460 /*
11461 * Mark this CPU as needing a branch predictor flush before running
11462 * userspace. Must be done before enabling preemption to ensure it gets
11463 * set for the CPU that actually ran the guest, and not the CPU that it
11464 * may migrate to.
11465 */
11466 if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
11467 this_cpu_write(x86_ibpb_exit_to_user, true);
11468
11469 /*
11470 * Consume any pending interrupts, including the possible source of
11471 * VM-Exit on SVM and any ticks that occur between VM-Exit and now.
11472 * An instruction is required after local_irq_enable() to fully unblock
11473 * interrupts on processors that implement an interrupt shadow, the
11474 * stat.exits increment will do nicely.
11475 */
11476 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
11477 local_irq_enable();
11478 ++vcpu->stat.exits;
11479 local_irq_disable();
11480 kvm_after_interrupt(vcpu);
11481
11482 /*
11483 * Wait until after servicing IRQs to account guest time so that any
11484 * ticks that occurred while running the guest are properly accounted
11485 * to the guest. Waiting until IRQs are enabled degrades the accuracy
11486 * of accounting via context tracking, but the loss of accuracy is
11487 * acceptable for all known use cases.
11488 */
11489 guest_timing_exit_irqoff();
11490
11491 local_irq_enable();
11492 preempt_enable();
11493
11494 kvm_vcpu_srcu_read_lock(vcpu);
11495
11496 /*
11497 * Call this to ensure WC buffers in guest are evicted after each VM
11498 * Exit, so that the evicted WC writes can be snooped across all cpus
11499 */
11500 smp_mb__after_srcu_read_lock();
11501
11502 /*
11503 * Profile KVM exit RIPs:
11504 */
11505 if (unlikely(prof_on == KVM_PROFILING &&
11506 !vcpu->arch.guest_state_protected)) {
11507 unsigned long rip = kvm_rip_read(vcpu);
11508 profile_hit(KVM_PROFILING, (void *)rip);
11509 }
11510
11511 if (unlikely(vcpu->arch.tsc_always_catchup))
11512 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
11513
11514 if (vcpu->arch.apic_attention)
11515 kvm_lapic_sync_from_vapic(vcpu);
11516
11517 if (unlikely(exit_fastpath == EXIT_FASTPATH_EXIT_USERSPACE))
11518 return 0;
11519
11520 r = kvm_x86_call(handle_exit)(vcpu, exit_fastpath);
11521 return r;
11522
11523 cancel_injection:
11524 if (req_immediate_exit)
11525 kvm_make_request(KVM_REQ_EVENT, vcpu);
11526 kvm_x86_call(cancel_injection)(vcpu);
11527 if (unlikely(vcpu->arch.apic_attention))
11528 kvm_lapic_sync_from_vapic(vcpu);
11529 out:
11530 return r;
11531 }
11532
kvm_vcpu_running(struct kvm_vcpu * vcpu)11533 static bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
11534 {
11535 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
11536 !vcpu->arch.apf.halted);
11537 }
11538
kvm_vcpu_has_events(struct kvm_vcpu * vcpu)11539 bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
11540 {
11541 if (!list_empty_careful(&vcpu->async_pf.done))
11542 return true;
11543
11544 if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
11545 kvm_apic_init_sipi_allowed(vcpu))
11546 return true;
11547
11548 if (kvm_is_exception_pending(vcpu))
11549 return true;
11550
11551 if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
11552 (vcpu->arch.nmi_pending &&
11553 kvm_x86_call(nmi_allowed)(vcpu, false)))
11554 return true;
11555
11556 #ifdef CONFIG_KVM_SMM
11557 if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
11558 (vcpu->arch.smi_pending &&
11559 kvm_x86_call(smi_allowed)(vcpu, false)))
11560 return true;
11561 #endif
11562
11563 if (kvm_test_request(KVM_REQ_PMI, vcpu))
11564 return true;
11565
11566 if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
11567 return true;
11568
11569 if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
11570 return true;
11571
11572 if (kvm_hv_has_stimer_pending(vcpu))
11573 return true;
11574
11575 if (is_guest_mode(vcpu) &&
11576 kvm_x86_ops.nested_ops->has_events &&
11577 kvm_x86_ops.nested_ops->has_events(vcpu, false))
11578 return true;
11579
11580 if (kvm_xen_has_pending_events(vcpu))
11581 return true;
11582
11583 return false;
11584 }
11585 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_has_events);
11586
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)11587 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
11588 {
11589 return kvm_vcpu_running(vcpu) || vcpu->arch.pv.pv_unhalted ||
11590 kvm_vcpu_has_events(vcpu);
11591 }
11592
11593 /* Called within kvm->srcu read side. */
vcpu_block(struct kvm_vcpu * vcpu)11594 static inline int vcpu_block(struct kvm_vcpu *vcpu)
11595 {
11596 bool hv_timer;
11597
11598 if (!kvm_arch_vcpu_runnable(vcpu)) {
11599 /*
11600 * Switch to the software timer before halt-polling/blocking as
11601 * the guest's timer may be a break event for the vCPU, and the
11602 * hypervisor timer runs only when the CPU is in guest mode.
11603 * Switch before halt-polling so that KVM recognizes an expired
11604 * timer before blocking.
11605 */
11606 hv_timer = kvm_lapic_hv_timer_in_use(vcpu);
11607 if (hv_timer)
11608 kvm_lapic_switch_to_sw_timer(vcpu);
11609
11610 kvm_vcpu_srcu_read_unlock(vcpu);
11611 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
11612 kvm_vcpu_halt(vcpu);
11613 else
11614 kvm_vcpu_block(vcpu);
11615 kvm_vcpu_srcu_read_lock(vcpu);
11616
11617 if (hv_timer)
11618 kvm_lapic_switch_to_hv_timer(vcpu);
11619
11620 /*
11621 * If the vCPU is not runnable, a signal or another host event
11622 * of some kind is pending; service it without changing the
11623 * vCPU's activity state.
11624 */
11625 if (!kvm_arch_vcpu_runnable(vcpu))
11626 return 1;
11627 }
11628
11629 /*
11630 * Evaluate nested events before exiting the halted state. This allows
11631 * the halt state to be recorded properly in the VMCS12's activity
11632 * state field (AMD does not have a similar field and a VM-Exit always
11633 * causes a spurious wakeup from HLT).
11634 */
11635 if (is_guest_mode(vcpu)) {
11636 int r = kvm_check_nested_events(vcpu);
11637
11638 if (r < 0 && r != -EBUSY)
11639 return 0;
11640 }
11641
11642 if (kvm_apic_accept_events(vcpu) < 0)
11643 return 0;
11644 switch(vcpu->arch.mp_state) {
11645 case KVM_MP_STATE_HALTED:
11646 case KVM_MP_STATE_AP_RESET_HOLD:
11647 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
11648 fallthrough;
11649 case KVM_MP_STATE_RUNNABLE:
11650 vcpu->arch.apf.halted = false;
11651 break;
11652 case KVM_MP_STATE_INIT_RECEIVED:
11653 break;
11654 default:
11655 WARN_ON_ONCE(1);
11656 break;
11657 }
11658 return 1;
11659 }
11660
11661 /* Called within kvm->srcu read side. */
vcpu_run(struct kvm_vcpu * vcpu)11662 static int vcpu_run(struct kvm_vcpu *vcpu)
11663 {
11664 int r;
11665
11666 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
11667
11668 for (;;) {
11669 /*
11670 * If another guest vCPU requests a PV TLB flush in the middle
11671 * of instruction emulation, the rest of the emulation could
11672 * use a stale page translation. Assume that any code after
11673 * this point can start executing an instruction.
11674 */
11675 vcpu->arch.at_instruction_boundary = false;
11676 if (kvm_vcpu_running(vcpu)) {
11677 r = vcpu_enter_guest(vcpu);
11678 } else {
11679 r = vcpu_block(vcpu);
11680 }
11681
11682 if (r <= 0)
11683 break;
11684
11685 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
11686 if (kvm_xen_has_pending_events(vcpu))
11687 kvm_xen_inject_pending_events(vcpu);
11688
11689 if (kvm_cpu_has_pending_timer(vcpu))
11690 kvm_inject_pending_timer_irqs(vcpu);
11691
11692 if (dm_request_for_irq_injection(vcpu) &&
11693 kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
11694 r = 0;
11695 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
11696 ++vcpu->stat.request_irq_exits;
11697 break;
11698 }
11699
11700 if (__xfer_to_guest_mode_work_pending()) {
11701 kvm_vcpu_srcu_read_unlock(vcpu);
11702 r = kvm_xfer_to_guest_mode_handle_work(vcpu);
11703 kvm_vcpu_srcu_read_lock(vcpu);
11704 if (r)
11705 return r;
11706 }
11707 }
11708
11709 return r;
11710 }
11711
__kvm_emulate_halt(struct kvm_vcpu * vcpu,int state,int reason)11712 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
11713 {
11714 /*
11715 * The vCPU has halted, e.g. executed HLT. Update the run state if the
11716 * local APIC is in-kernel, the run loop will detect the non-runnable
11717 * state and halt the vCPU. Exit to userspace if the local APIC is
11718 * managed by userspace, in which case userspace is responsible for
11719 * handling wake events.
11720 */
11721 ++vcpu->stat.halt_exits;
11722 if (lapic_in_kernel(vcpu)) {
11723 if (kvm_vcpu_has_events(vcpu) || vcpu->arch.pv.pv_unhalted)
11724 state = KVM_MP_STATE_RUNNABLE;
11725 kvm_set_mp_state(vcpu, state);
11726 return 1;
11727 } else {
11728 vcpu->run->exit_reason = reason;
11729 return 0;
11730 }
11731 }
11732
kvm_emulate_halt_noskip(struct kvm_vcpu * vcpu)11733 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
11734 {
11735 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
11736 }
11737 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_halt_noskip);
11738
kvm_emulate_halt(struct kvm_vcpu * vcpu)11739 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
11740 {
11741 int ret = kvm_skip_emulated_instruction(vcpu);
11742 /*
11743 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
11744 * KVM_EXIT_DEBUG here.
11745 */
11746 return kvm_emulate_halt_noskip(vcpu) && ret;
11747 }
11748 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_halt);
11749
handle_fastpath_hlt(struct kvm_vcpu * vcpu)11750 fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
11751 {
11752 if (!kvm_pmu_is_fastpath_emulation_allowed(vcpu))
11753 return EXIT_FASTPATH_NONE;
11754
11755 if (!kvm_emulate_halt(vcpu))
11756 return EXIT_FASTPATH_EXIT_USERSPACE;
11757
11758 if (kvm_vcpu_running(vcpu))
11759 return EXIT_FASTPATH_REENTER_GUEST;
11760
11761 return EXIT_FASTPATH_EXIT_HANDLED;
11762 }
11763 EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_hlt);
11764
kvm_emulate_ap_reset_hold(struct kvm_vcpu * vcpu)11765 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
11766 {
11767 int ret = kvm_skip_emulated_instruction(vcpu);
11768
11769 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
11770 KVM_EXIT_AP_RESET_HOLD) && ret;
11771 }
11772 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_ap_reset_hold);
11773
kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu * vcpu)11774 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
11775 {
11776 return kvm_vcpu_apicv_active(vcpu) &&
11777 kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu);
11778 }
11779
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)11780 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
11781 {
11782 return vcpu->arch.preempted_in_kernel;
11783 }
11784
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)11785 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
11786 {
11787 if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
11788 return true;
11789
11790 if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
11791 #ifdef CONFIG_KVM_SMM
11792 kvm_test_request(KVM_REQ_SMI, vcpu) ||
11793 #endif
11794 kvm_test_request(KVM_REQ_EVENT, vcpu))
11795 return true;
11796
11797 return kvm_arch_dy_has_pending_interrupt(vcpu);
11798 }
11799
complete_emulated_io(struct kvm_vcpu * vcpu)11800 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
11801 {
11802 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
11803 }
11804
complete_emulated_pio(struct kvm_vcpu * vcpu)11805 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
11806 {
11807 BUG_ON(!vcpu->arch.pio.count);
11808
11809 return complete_emulated_io(vcpu);
11810 }
11811
11812 /*
11813 * Implements the following, as a state machine:
11814 *
11815 * read:
11816 * for each fragment
11817 * for each mmio piece in the fragment
11818 * write gpa, len
11819 * exit
11820 * copy data
11821 * execute insn
11822 *
11823 * write:
11824 * for each fragment
11825 * for each mmio piece in the fragment
11826 * write gpa, len
11827 * copy data
11828 * exit
11829 */
complete_emulated_mmio(struct kvm_vcpu * vcpu)11830 static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
11831 {
11832 struct kvm_run *run = vcpu->run;
11833 struct kvm_mmio_fragment *frag;
11834 unsigned len;
11835
11836 BUG_ON(!vcpu->mmio_needed);
11837
11838 /* Complete previous fragment */
11839 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
11840 len = min(8u, frag->len);
11841 if (!vcpu->mmio_is_write)
11842 memcpy(frag->data, run->mmio.data, len);
11843
11844 if (frag->len <= 8) {
11845 /* Switch to the next fragment. */
11846 frag++;
11847 vcpu->mmio_cur_fragment++;
11848 } else {
11849 /* Go forward to the next mmio piece. */
11850 frag->data += len;
11851 frag->gpa += len;
11852 frag->len -= len;
11853 }
11854
11855 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
11856 vcpu->mmio_needed = 0;
11857
11858 /* FIXME: return into emulator if single-stepping. */
11859 if (vcpu->mmio_is_write)
11860 return 1;
11861 vcpu->mmio_read_completed = 1;
11862 return complete_emulated_io(vcpu);
11863 }
11864
11865 run->exit_reason = KVM_EXIT_MMIO;
11866 run->mmio.phys_addr = frag->gpa;
11867 if (vcpu->mmio_is_write)
11868 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
11869 run->mmio.len = min(8u, frag->len);
11870 run->mmio.is_write = vcpu->mmio_is_write;
11871 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
11872 return 0;
11873 }
11874
11875 /* Swap (qemu) user FPU context for the guest FPU context. */
kvm_load_guest_fpu(struct kvm_vcpu * vcpu)11876 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
11877 {
11878 if (KVM_BUG_ON(vcpu->arch.guest_fpu.fpstate->in_use, vcpu->kvm))
11879 return;
11880
11881 /* Exclude PKRU, it's restored separately immediately after VM-Exit. */
11882 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true);
11883 trace_kvm_fpu(1);
11884 }
11885
11886 /* When vcpu_run ends, restore user space FPU context. */
kvm_put_guest_fpu(struct kvm_vcpu * vcpu)11887 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
11888 {
11889 if (KVM_BUG_ON(!vcpu->arch.guest_fpu.fpstate->in_use, vcpu->kvm))
11890 return;
11891
11892 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false);
11893 ++vcpu->stat.fpu_reload;
11894 trace_kvm_fpu(0);
11895 }
11896
kvm_x86_vcpu_pre_run(struct kvm_vcpu * vcpu)11897 static int kvm_x86_vcpu_pre_run(struct kvm_vcpu *vcpu)
11898 {
11899 /*
11900 * SIPI_RECEIVED is obsolete; KVM leaves the vCPU in Wait-For-SIPI and
11901 * tracks the pending SIPI separately. SIPI_RECEIVED is still accepted
11902 * by KVM_SET_VCPU_EVENTS for backwards compatibility, but should be
11903 * converted to INIT_RECEIVED.
11904 */
11905 if (WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED))
11906 return -EINVAL;
11907
11908 /*
11909 * Disallow running the vCPU if userspace forced it into an impossible
11910 * MP_STATE, e.g. if the vCPU is in WFS but SIPI is blocked.
11911 */
11912 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED &&
11913 !kvm_apic_init_sipi_allowed(vcpu))
11914 return -EINVAL;
11915
11916 return kvm_x86_call(vcpu_pre_run)(vcpu);
11917 }
11918
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)11919 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11920 {
11921 struct kvm_queued_exception *ex = &vcpu->arch.exception;
11922 struct kvm_run *kvm_run = vcpu->run;
11923 u64 sync_valid_fields;
11924 int r;
11925
11926 r = kvm_mmu_post_init_vm(vcpu->kvm);
11927 if (r)
11928 return r;
11929
11930 vcpu_load(vcpu);
11931 kvm_sigset_activate(vcpu);
11932 kvm_run->flags = 0;
11933 kvm_load_guest_fpu(vcpu);
11934
11935 kvm_vcpu_srcu_read_lock(vcpu);
11936 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
11937 if (!vcpu->wants_to_run) {
11938 r = -EINTR;
11939 goto out;
11940 }
11941
11942 /*
11943 * Don't bother switching APIC timer emulation from the
11944 * hypervisor timer to the software timer, the only way for the
11945 * APIC timer to be active is if userspace stuffed vCPU state,
11946 * i.e. put the vCPU into a nonsensical state. Only an INIT
11947 * will transition the vCPU out of UNINITIALIZED (without more
11948 * state stuffing from userspace), which will reset the local
11949 * APIC and thus cancel the timer or drop the IRQ (if the timer
11950 * already expired).
11951 */
11952 kvm_vcpu_srcu_read_unlock(vcpu);
11953 kvm_vcpu_block(vcpu);
11954 kvm_vcpu_srcu_read_lock(vcpu);
11955
11956 if (kvm_apic_accept_events(vcpu) < 0) {
11957 r = 0;
11958 goto out;
11959 }
11960 r = -EAGAIN;
11961 if (signal_pending(current)) {
11962 r = -EINTR;
11963 kvm_run->exit_reason = KVM_EXIT_INTR;
11964 ++vcpu->stat.signal_exits;
11965 }
11966 goto out;
11967 }
11968
11969 sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm);
11970 if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) ||
11971 (kvm_run->kvm_dirty_regs & ~sync_valid_fields)) {
11972 r = -EINVAL;
11973 goto out;
11974 }
11975
11976 if (kvm_run->kvm_dirty_regs) {
11977 r = sync_regs(vcpu);
11978 if (r != 0)
11979 goto out;
11980 }
11981
11982 /* re-sync apic's tpr */
11983 if (!lapic_in_kernel(vcpu)) {
11984 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
11985 r = -EINVAL;
11986 goto out;
11987 }
11988 }
11989
11990 /*
11991 * If userspace set a pending exception and L2 is active, convert it to
11992 * a pending VM-Exit if L1 wants to intercept the exception.
11993 */
11994 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) &&
11995 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector,
11996 ex->error_code)) {
11997 kvm_queue_exception_vmexit(vcpu, ex->vector,
11998 ex->has_error_code, ex->error_code,
11999 ex->has_payload, ex->payload);
12000 ex->injected = false;
12001 ex->pending = false;
12002 }
12003 vcpu->arch.exception_from_userspace = false;
12004
12005 if (unlikely(vcpu->arch.complete_userspace_io)) {
12006 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
12007 vcpu->arch.complete_userspace_io = NULL;
12008 r = cui(vcpu);
12009 if (r <= 0)
12010 goto out;
12011 } else {
12012 WARN_ON_ONCE(vcpu->arch.pio.count);
12013 WARN_ON_ONCE(vcpu->mmio_needed);
12014 }
12015
12016 if (!vcpu->wants_to_run) {
12017 r = -EINTR;
12018 goto out;
12019 }
12020
12021 r = kvm_x86_vcpu_pre_run(vcpu);
12022 if (r <= 0)
12023 goto out;
12024
12025 r = vcpu_run(vcpu);
12026
12027 out:
12028 kvm_put_guest_fpu(vcpu);
12029 if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected))
12030 store_regs(vcpu);
12031 post_kvm_run_save(vcpu);
12032 kvm_vcpu_srcu_read_unlock(vcpu);
12033
12034 kvm_sigset_deactivate(vcpu);
12035 vcpu_put(vcpu);
12036 return r;
12037 }
12038
__get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)12039 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
12040 {
12041 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
12042 /*
12043 * We are here if userspace calls get_regs() in the middle of
12044 * instruction emulation. Registers state needs to be copied
12045 * back from emulation context to vcpu. Userspace shouldn't do
12046 * that usually, but some bad designed PV devices (vmware
12047 * backdoor interface) need this to work
12048 */
12049 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt);
12050 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
12051 }
12052 regs->rax = kvm_rax_read(vcpu);
12053 regs->rbx = kvm_rbx_read(vcpu);
12054 regs->rcx = kvm_rcx_read(vcpu);
12055 regs->rdx = kvm_rdx_read(vcpu);
12056 regs->rsi = kvm_rsi_read(vcpu);
12057 regs->rdi = kvm_rdi_read(vcpu);
12058 regs->rsp = kvm_rsp_read(vcpu);
12059 regs->rbp = kvm_rbp_read(vcpu);
12060 #ifdef CONFIG_X86_64
12061 regs->r8 = kvm_r8_read(vcpu);
12062 regs->r9 = kvm_r9_read(vcpu);
12063 regs->r10 = kvm_r10_read(vcpu);
12064 regs->r11 = kvm_r11_read(vcpu);
12065 regs->r12 = kvm_r12_read(vcpu);
12066 regs->r13 = kvm_r13_read(vcpu);
12067 regs->r14 = kvm_r14_read(vcpu);
12068 regs->r15 = kvm_r15_read(vcpu);
12069 #endif
12070
12071 regs->rip = kvm_rip_read(vcpu);
12072 regs->rflags = kvm_get_rflags(vcpu);
12073 }
12074
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)12075 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
12076 {
12077 if (vcpu->kvm->arch.has_protected_state &&
12078 vcpu->arch.guest_state_protected)
12079 return -EINVAL;
12080
12081 vcpu_load(vcpu);
12082 __get_regs(vcpu, regs);
12083 vcpu_put(vcpu);
12084 return 0;
12085 }
12086
__set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)12087 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
12088 {
12089 vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
12090 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
12091
12092 kvm_rax_write(vcpu, regs->rax);
12093 kvm_rbx_write(vcpu, regs->rbx);
12094 kvm_rcx_write(vcpu, regs->rcx);
12095 kvm_rdx_write(vcpu, regs->rdx);
12096 kvm_rsi_write(vcpu, regs->rsi);
12097 kvm_rdi_write(vcpu, regs->rdi);
12098 kvm_rsp_write(vcpu, regs->rsp);
12099 kvm_rbp_write(vcpu, regs->rbp);
12100 #ifdef CONFIG_X86_64
12101 kvm_r8_write(vcpu, regs->r8);
12102 kvm_r9_write(vcpu, regs->r9);
12103 kvm_r10_write(vcpu, regs->r10);
12104 kvm_r11_write(vcpu, regs->r11);
12105 kvm_r12_write(vcpu, regs->r12);
12106 kvm_r13_write(vcpu, regs->r13);
12107 kvm_r14_write(vcpu, regs->r14);
12108 kvm_r15_write(vcpu, regs->r15);
12109 #endif
12110
12111 kvm_rip_write(vcpu, regs->rip);
12112 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
12113
12114 vcpu->arch.exception.pending = false;
12115 vcpu->arch.exception_vmexit.pending = false;
12116
12117 kvm_make_request(KVM_REQ_EVENT, vcpu);
12118 }
12119
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)12120 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
12121 {
12122 if (vcpu->kvm->arch.has_protected_state &&
12123 vcpu->arch.guest_state_protected)
12124 return -EINVAL;
12125
12126 vcpu_load(vcpu);
12127 __set_regs(vcpu, regs);
12128 vcpu_put(vcpu);
12129 return 0;
12130 }
12131
__get_sregs_common(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12132 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
12133 {
12134 struct desc_ptr dt;
12135
12136 if (vcpu->arch.guest_state_protected)
12137 goto skip_protected_regs;
12138
12139 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
12140 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
12141 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
12142 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
12143 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
12144 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
12145
12146 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
12147 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
12148
12149 kvm_x86_call(get_idt)(vcpu, &dt);
12150 sregs->idt.limit = dt.size;
12151 sregs->idt.base = dt.address;
12152 kvm_x86_call(get_gdt)(vcpu, &dt);
12153 sregs->gdt.limit = dt.size;
12154 sregs->gdt.base = dt.address;
12155
12156 sregs->cr2 = vcpu->arch.cr2;
12157 sregs->cr3 = kvm_read_cr3(vcpu);
12158
12159 skip_protected_regs:
12160 sregs->cr0 = kvm_read_cr0(vcpu);
12161 sregs->cr4 = kvm_read_cr4(vcpu);
12162 sregs->cr8 = kvm_get_cr8(vcpu);
12163 sregs->efer = vcpu->arch.efer;
12164 sregs->apic_base = vcpu->arch.apic_base;
12165 }
12166
__get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12167 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
12168 {
12169 __get_sregs_common(vcpu, sregs);
12170
12171 if (vcpu->arch.guest_state_protected)
12172 return;
12173
12174 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
12175 set_bit(vcpu->arch.interrupt.nr,
12176 (unsigned long *)sregs->interrupt_bitmap);
12177 }
12178
__get_sregs2(struct kvm_vcpu * vcpu,struct kvm_sregs2 * sregs2)12179 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
12180 {
12181 int i;
12182
12183 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2);
12184
12185 if (vcpu->arch.guest_state_protected)
12186 return;
12187
12188 if (is_pae_paging(vcpu)) {
12189 kvm_vcpu_srcu_read_lock(vcpu);
12190 for (i = 0 ; i < 4 ; i++)
12191 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i);
12192 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
12193 kvm_vcpu_srcu_read_unlock(vcpu);
12194 }
12195 }
12196
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12197 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
12198 struct kvm_sregs *sregs)
12199 {
12200 if (vcpu->kvm->arch.has_protected_state &&
12201 vcpu->arch.guest_state_protected)
12202 return -EINVAL;
12203
12204 vcpu_load(vcpu);
12205 __get_sregs(vcpu, sregs);
12206 vcpu_put(vcpu);
12207 return 0;
12208 }
12209
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)12210 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
12211 struct kvm_mp_state *mp_state)
12212 {
12213 int r;
12214
12215 vcpu_load(vcpu);
12216 kvm_vcpu_srcu_read_lock(vcpu);
12217
12218 r = kvm_apic_accept_events(vcpu);
12219 if (r < 0)
12220 goto out;
12221 r = 0;
12222
12223 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
12224 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
12225 vcpu->arch.pv.pv_unhalted)
12226 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
12227 else
12228 mp_state->mp_state = vcpu->arch.mp_state;
12229
12230 out:
12231 kvm_vcpu_srcu_read_unlock(vcpu);
12232 vcpu_put(vcpu);
12233 return r;
12234 }
12235
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)12236 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
12237 struct kvm_mp_state *mp_state)
12238 {
12239 int ret = -EINVAL;
12240
12241 vcpu_load(vcpu);
12242
12243 switch (mp_state->mp_state) {
12244 case KVM_MP_STATE_UNINITIALIZED:
12245 case KVM_MP_STATE_HALTED:
12246 case KVM_MP_STATE_AP_RESET_HOLD:
12247 case KVM_MP_STATE_INIT_RECEIVED:
12248 case KVM_MP_STATE_SIPI_RECEIVED:
12249 if (!lapic_in_kernel(vcpu))
12250 goto out;
12251 break;
12252
12253 case KVM_MP_STATE_RUNNABLE:
12254 break;
12255
12256 default:
12257 goto out;
12258 }
12259
12260 /*
12261 * SIPI_RECEIVED is obsolete and no longer used internally; KVM instead
12262 * leaves the vCPU in INIT_RECIEVED (Wait-For-SIPI) and pends the SIPI.
12263 * Translate SIPI_RECEIVED as appropriate for backwards compatibility.
12264 */
12265 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
12266 mp_state->mp_state = KVM_MP_STATE_INIT_RECEIVED;
12267 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
12268 }
12269
12270 kvm_set_mp_state(vcpu, mp_state->mp_state);
12271 kvm_make_request(KVM_REQ_EVENT, vcpu);
12272
12273 ret = 0;
12274 out:
12275 vcpu_put(vcpu);
12276 return ret;
12277 }
12278
kvm_task_switch(struct kvm_vcpu * vcpu,u16 tss_selector,int idt_index,int reason,bool has_error_code,u32 error_code)12279 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
12280 int reason, bool has_error_code, u32 error_code)
12281 {
12282 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
12283 int ret;
12284
12285 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_CET)) {
12286 u64 u_cet, s_cet;
12287
12288 /*
12289 * Check both User and Supervisor on task switches as inter-
12290 * privilege level task switches are impacted by CET at both
12291 * the current privilege level and the new privilege level, and
12292 * that information is not known at this time. The expectation
12293 * is that the guest won't require emulation of task switches
12294 * while using IBT or Shadow Stacks.
12295 */
12296 if (__kvm_emulate_msr_read(vcpu, MSR_IA32_U_CET, &u_cet) ||
12297 __kvm_emulate_msr_read(vcpu, MSR_IA32_S_CET, &s_cet))
12298 goto unhandled_task_switch;
12299
12300 if ((u_cet | s_cet) & (CET_ENDBR_EN | CET_SHSTK_EN))
12301 goto unhandled_task_switch;
12302 }
12303
12304 init_emulate_ctxt(vcpu);
12305
12306 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
12307 has_error_code, error_code);
12308
12309 /*
12310 * Report an error userspace if MMIO is needed, as KVM doesn't support
12311 * MMIO during a task switch (or any other complex operation).
12312 */
12313 if (ret || vcpu->mmio_needed)
12314 goto unhandled_task_switch;
12315
12316 kvm_rip_write(vcpu, ctxt->eip);
12317 kvm_set_rflags(vcpu, ctxt->eflags);
12318 return 1;
12319
12320 unhandled_task_switch:
12321 vcpu->mmio_needed = false;
12322 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
12323 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
12324 vcpu->run->internal.ndata = 0;
12325 return 0;
12326 }
12327 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_task_switch);
12328
kvm_is_valid_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12329 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
12330 {
12331 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
12332 /*
12333 * When EFER.LME and CR0.PG are set, the processor is in
12334 * 64-bit mode (though maybe in a 32-bit code segment).
12335 * CR4.PAE and EFER.LMA must be set.
12336 */
12337 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
12338 return false;
12339 if (!kvm_vcpu_is_legal_cr3(vcpu, sregs->cr3))
12340 return false;
12341 } else {
12342 /*
12343 * Not in 64-bit mode: EFER.LMA is clear and the code
12344 * segment cannot be 64-bit.
12345 */
12346 if (sregs->efer & EFER_LMA || sregs->cs.l)
12347 return false;
12348 }
12349
12350 return kvm_is_valid_cr4(vcpu, sregs->cr4) &&
12351 kvm_is_valid_cr0(vcpu, sregs->cr0);
12352 }
12353
__set_sregs_common(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs,int * mmu_reset_needed,bool update_pdptrs)12354 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
12355 int *mmu_reset_needed, bool update_pdptrs)
12356 {
12357 int idx;
12358 struct desc_ptr dt;
12359
12360 if (!kvm_is_valid_sregs(vcpu, sregs))
12361 return -EINVAL;
12362
12363 if (kvm_apic_set_base(vcpu, sregs->apic_base, true))
12364 return -EINVAL;
12365
12366 if (vcpu->arch.guest_state_protected)
12367 return 0;
12368
12369 dt.size = sregs->idt.limit;
12370 dt.address = sregs->idt.base;
12371 kvm_x86_call(set_idt)(vcpu, &dt);
12372 dt.size = sregs->gdt.limit;
12373 dt.address = sregs->gdt.base;
12374 kvm_x86_call(set_gdt)(vcpu, &dt);
12375
12376 vcpu->arch.cr2 = sregs->cr2;
12377 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
12378 vcpu->arch.cr3 = sregs->cr3;
12379 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
12380 kvm_x86_call(post_set_cr3)(vcpu, sregs->cr3);
12381
12382 kvm_set_cr8(vcpu, sregs->cr8);
12383
12384 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
12385 kvm_x86_call(set_efer)(vcpu, sregs->efer);
12386
12387 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
12388 kvm_x86_call(set_cr0)(vcpu, sregs->cr0);
12389
12390 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
12391 kvm_x86_call(set_cr4)(vcpu, sregs->cr4);
12392
12393 if (update_pdptrs) {
12394 idx = srcu_read_lock(&vcpu->kvm->srcu);
12395 if (is_pae_paging(vcpu)) {
12396 load_pdptrs(vcpu, kvm_read_cr3(vcpu));
12397 *mmu_reset_needed = 1;
12398 }
12399 srcu_read_unlock(&vcpu->kvm->srcu, idx);
12400 }
12401
12402 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
12403 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
12404 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
12405 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
12406 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
12407 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
12408
12409 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
12410 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
12411
12412 update_cr8_intercept(vcpu);
12413
12414 /* Older userspace won't unhalt the vcpu on reset. */
12415 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
12416 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
12417 !is_protmode(vcpu))
12418 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
12419
12420 return 0;
12421 }
12422
__set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12423 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
12424 {
12425 int pending_vec, max_bits;
12426 int mmu_reset_needed = 0;
12427 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true);
12428
12429 if (ret)
12430 return ret;
12431
12432 if (mmu_reset_needed) {
12433 kvm_mmu_reset_context(vcpu);
12434 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
12435 }
12436
12437 max_bits = KVM_NR_INTERRUPTS;
12438 pending_vec = find_first_bit(
12439 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
12440
12441 if (pending_vec < max_bits) {
12442 kvm_queue_interrupt(vcpu, pending_vec, false);
12443 pr_debug("Set back pending irq %d\n", pending_vec);
12444 kvm_make_request(KVM_REQ_EVENT, vcpu);
12445 }
12446 return 0;
12447 }
12448
__set_sregs2(struct kvm_vcpu * vcpu,struct kvm_sregs2 * sregs2)12449 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
12450 {
12451 int mmu_reset_needed = 0;
12452 bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
12453 bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) &&
12454 !(sregs2->efer & EFER_LMA);
12455 int i, ret;
12456
12457 if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID)
12458 return -EINVAL;
12459
12460 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected))
12461 return -EINVAL;
12462
12463 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2,
12464 &mmu_reset_needed, !valid_pdptrs);
12465 if (ret)
12466 return ret;
12467
12468 if (valid_pdptrs) {
12469 for (i = 0; i < 4 ; i++)
12470 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]);
12471
12472 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
12473 mmu_reset_needed = 1;
12474 vcpu->arch.pdptrs_from_userspace = true;
12475 }
12476 if (mmu_reset_needed) {
12477 kvm_mmu_reset_context(vcpu);
12478 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
12479 }
12480 return 0;
12481 }
12482
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12483 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
12484 struct kvm_sregs *sregs)
12485 {
12486 int ret;
12487
12488 if (vcpu->kvm->arch.has_protected_state &&
12489 vcpu->arch.guest_state_protected)
12490 return -EINVAL;
12491
12492 vcpu_load(vcpu);
12493 ret = __set_sregs(vcpu, sregs);
12494 vcpu_put(vcpu);
12495 return ret;
12496 }
12497
kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm * kvm)12498 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
12499 {
12500 bool set = false;
12501 struct kvm_vcpu *vcpu;
12502 unsigned long i;
12503
12504 if (!enable_apicv)
12505 return;
12506
12507 down_write(&kvm->arch.apicv_update_lock);
12508
12509 kvm_for_each_vcpu(i, vcpu, kvm) {
12510 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) {
12511 set = true;
12512 break;
12513 }
12514 }
12515 __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set);
12516 up_write(&kvm->arch.apicv_update_lock);
12517 }
12518
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)12519 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
12520 struct kvm_guest_debug *dbg)
12521 {
12522 unsigned long rflags;
12523 int i, r;
12524
12525 if (vcpu->arch.guest_state_protected)
12526 return -EINVAL;
12527
12528 vcpu_load(vcpu);
12529
12530 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
12531 r = -EBUSY;
12532 if (kvm_is_exception_pending(vcpu))
12533 goto out;
12534 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
12535 kvm_queue_exception(vcpu, DB_VECTOR);
12536 else
12537 kvm_queue_exception(vcpu, BP_VECTOR);
12538 }
12539
12540 /*
12541 * Read rflags as long as potentially injected trace flags are still
12542 * filtered out.
12543 */
12544 rflags = kvm_get_rflags(vcpu);
12545
12546 vcpu->guest_debug = dbg->control;
12547 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
12548 vcpu->guest_debug = 0;
12549
12550 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
12551 for (i = 0; i < KVM_NR_DB_REGS; ++i)
12552 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
12553 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
12554 } else {
12555 for (i = 0; i < KVM_NR_DB_REGS; i++)
12556 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
12557 }
12558 kvm_update_dr7(vcpu);
12559
12560 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
12561 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
12562
12563 /*
12564 * Trigger an rflags update that will inject or remove the trace
12565 * flags.
12566 */
12567 kvm_set_rflags(vcpu, rflags);
12568
12569 kvm_x86_call(update_exception_bitmap)(vcpu);
12570
12571 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm);
12572
12573 r = 0;
12574
12575 out:
12576 vcpu_put(vcpu);
12577 return r;
12578 }
12579
12580 /*
12581 * Translate a guest virtual address to a guest physical address.
12582 */
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)12583 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
12584 struct kvm_translation *tr)
12585 {
12586 unsigned long vaddr = tr->linear_address;
12587 gpa_t gpa;
12588 int idx;
12589
12590 vcpu_load(vcpu);
12591
12592 idx = srcu_read_lock(&vcpu->kvm->srcu);
12593 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
12594 srcu_read_unlock(&vcpu->kvm->srcu, idx);
12595 tr->physical_address = gpa;
12596 tr->valid = gpa != INVALID_GPA;
12597 tr->writeable = 1;
12598 tr->usermode = 0;
12599
12600 vcpu_put(vcpu);
12601 return 0;
12602 }
12603
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)12604 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
12605 {
12606 struct fxregs_state *fxsave;
12607
12608 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
12609 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
12610
12611 vcpu_load(vcpu);
12612
12613 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
12614 memcpy(fpu->fpr, fxsave->st_space, 128);
12615 fpu->fcw = fxsave->cwd;
12616 fpu->fsw = fxsave->swd;
12617 fpu->ftwx = fxsave->twd;
12618 fpu->last_opcode = fxsave->fop;
12619 fpu->last_ip = fxsave->rip;
12620 fpu->last_dp = fxsave->rdp;
12621 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space));
12622
12623 vcpu_put(vcpu);
12624 return 0;
12625 }
12626
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)12627 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
12628 {
12629 struct fxregs_state *fxsave;
12630
12631 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
12632 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
12633
12634 vcpu_load(vcpu);
12635
12636 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
12637
12638 memcpy(fxsave->st_space, fpu->fpr, 128);
12639 fxsave->cwd = fpu->fcw;
12640 fxsave->swd = fpu->fsw;
12641 fxsave->twd = fpu->ftwx;
12642 fxsave->fop = fpu->last_opcode;
12643 fxsave->rip = fpu->last_ip;
12644 fxsave->rdp = fpu->last_dp;
12645 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space));
12646
12647 vcpu_put(vcpu);
12648 return 0;
12649 }
12650
store_regs(struct kvm_vcpu * vcpu)12651 static void store_regs(struct kvm_vcpu *vcpu)
12652 {
12653 BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES);
12654
12655 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
12656 __get_regs(vcpu, &vcpu->run->s.regs.regs);
12657
12658 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
12659 __get_sregs(vcpu, &vcpu->run->s.regs.sregs);
12660
12661 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
12662 kvm_vcpu_ioctl_x86_get_vcpu_events(
12663 vcpu, &vcpu->run->s.regs.events);
12664 }
12665
sync_regs(struct kvm_vcpu * vcpu)12666 static int sync_regs(struct kvm_vcpu *vcpu)
12667 {
12668 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
12669 __set_regs(vcpu, &vcpu->run->s.regs.regs);
12670 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
12671 }
12672
12673 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
12674 struct kvm_sregs sregs = vcpu->run->s.regs.sregs;
12675
12676 if (__set_sregs(vcpu, &sregs))
12677 return -EINVAL;
12678
12679 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
12680 }
12681
12682 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
12683 struct kvm_vcpu_events events = vcpu->run->s.regs.events;
12684
12685 if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events))
12686 return -EINVAL;
12687
12688 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
12689 }
12690
12691 return 0;
12692 }
12693
12694 #define PERF_MEDIATED_PMU_MSG \
12695 "Failed to enable mediated vPMU, try disabling system wide perf events and nmi_watchdog.\n"
12696
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)12697 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
12698 {
12699 int r;
12700
12701 if (kvm_check_tsc_unstable() && kvm->created_vcpus)
12702 pr_warn_once("SMP vm created on host with unstable TSC; "
12703 "guest TSC will not be reliable\n");
12704
12705 if (!kvm->arch.max_vcpu_ids)
12706 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS;
12707
12708 if (id >= kvm->arch.max_vcpu_ids)
12709 return -EINVAL;
12710
12711 /*
12712 * Note, any actions done by .vcpu_create() must be idempotent with
12713 * respect to creating multiple vCPUs, and therefore are not undone if
12714 * creating a vCPU fails (including failure during pre-create).
12715 */
12716 r = kvm_x86_call(vcpu_precreate)(kvm);
12717 if (r)
12718 return r;
12719
12720 if (enable_mediated_pmu && kvm->arch.enable_pmu &&
12721 !kvm->arch.created_mediated_pmu) {
12722 if (irqchip_in_kernel(kvm)) {
12723 r = perf_create_mediated_pmu();
12724 if (r) {
12725 pr_warn_ratelimited(PERF_MEDIATED_PMU_MSG);
12726 return r;
12727 }
12728 kvm->arch.created_mediated_pmu = true;
12729 } else {
12730 kvm->arch.enable_pmu = false;
12731 }
12732 }
12733 return 0;
12734 }
12735
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)12736 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
12737 {
12738 struct page *page;
12739 int r;
12740
12741 vcpu->arch.last_vmentry_cpu = -1;
12742 vcpu->arch.regs_avail = ~0;
12743 vcpu->arch.regs_dirty = ~0;
12744
12745 kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
12746
12747 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
12748 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
12749 else
12750 kvm_set_mp_state(vcpu, KVM_MP_STATE_UNINITIALIZED);
12751
12752 r = kvm_mmu_create(vcpu);
12753 if (r < 0)
12754 return r;
12755
12756 r = kvm_create_lapic(vcpu);
12757 if (r < 0)
12758 goto fail_mmu_destroy;
12759
12760 r = -ENOMEM;
12761
12762 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
12763 if (!page)
12764 goto fail_free_lapic;
12765 vcpu->arch.pio_data = page_address(page);
12766
12767 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64),
12768 GFP_KERNEL_ACCOUNT);
12769 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64),
12770 GFP_KERNEL_ACCOUNT);
12771 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks)
12772 goto fail_free_mce_banks;
12773 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
12774
12775 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
12776 GFP_KERNEL_ACCOUNT))
12777 goto fail_free_mce_banks;
12778
12779 if (!alloc_emulate_ctxt(vcpu))
12780 goto free_wbinvd_dirty_mask;
12781
12782 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) {
12783 pr_err("failed to allocate vcpu's fpu\n");
12784 goto free_emulate_ctxt;
12785 }
12786
12787 kvm_async_pf_hash_reset(vcpu);
12788
12789 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) {
12790 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
12791 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
12792 vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
12793 }
12794 kvm_pmu_init(vcpu);
12795
12796 vcpu->arch.pending_external_vector = -1;
12797 vcpu->arch.preempted_in_kernel = false;
12798
12799 #if IS_ENABLED(CONFIG_HYPERV)
12800 vcpu->arch.hv_root_tdp = INVALID_PAGE;
12801 #endif
12802
12803 r = kvm_x86_call(vcpu_create)(vcpu);
12804 if (r)
12805 goto free_guest_fpu;
12806
12807 kvm_xen_init_vcpu(vcpu);
12808 vcpu_load(vcpu);
12809 kvm_vcpu_after_set_cpuid(vcpu);
12810 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
12811 kvm_vcpu_reset(vcpu, false);
12812 kvm_init_mmu(vcpu);
12813 vcpu_put(vcpu);
12814 return 0;
12815
12816 free_guest_fpu:
12817 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
12818 free_emulate_ctxt:
12819 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
12820 free_wbinvd_dirty_mask:
12821 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
12822 fail_free_mce_banks:
12823 kfree(vcpu->arch.mce_banks);
12824 kfree(vcpu->arch.mci_ctl2_banks);
12825 free_page((unsigned long)vcpu->arch.pio_data);
12826 fail_free_lapic:
12827 kvm_free_lapic(vcpu);
12828 fail_mmu_destroy:
12829 kvm_mmu_destroy(vcpu);
12830 return r;
12831 }
12832
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)12833 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
12834 {
12835 if (mutex_lock_killable(&vcpu->mutex))
12836 return;
12837 vcpu_load(vcpu);
12838 kvm_synchronize_tsc(vcpu, NULL);
12839 vcpu_put(vcpu);
12840
12841 /* poll control enabled by default */
12842 vcpu->arch.msr_kvm_poll_control = 1;
12843
12844 mutex_unlock(&vcpu->mutex);
12845 }
12846
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)12847 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
12848 {
12849 int idx, cpu;
12850
12851 kvm_clear_async_pf_completion_queue(vcpu);
12852 kvm_mmu_unload(vcpu);
12853
12854 kvmclock_reset(vcpu);
12855
12856 for_each_possible_cpu(cpu)
12857 cmpxchg(per_cpu_ptr(&last_vcpu, cpu), vcpu, NULL);
12858
12859 kvm_x86_call(vcpu_free)(vcpu);
12860
12861 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
12862 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
12863 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
12864
12865 kvm_xen_destroy_vcpu(vcpu);
12866 kvm_hv_vcpu_uninit(vcpu);
12867 kvm_pmu_destroy(vcpu);
12868 kfree(vcpu->arch.mce_banks);
12869 kfree(vcpu->arch.mci_ctl2_banks);
12870 kvm_free_lapic(vcpu);
12871 idx = srcu_read_lock(&vcpu->kvm->srcu);
12872 kvm_mmu_destroy(vcpu);
12873 srcu_read_unlock(&vcpu->kvm->srcu, idx);
12874 free_page((unsigned long)vcpu->arch.pio_data);
12875 kvfree(vcpu->arch.cpuid_entries);
12876 }
12877
kvm_xstate_reset(struct kvm_vcpu * vcpu,bool init_event)12878 static void kvm_xstate_reset(struct kvm_vcpu *vcpu, bool init_event)
12879 {
12880 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
12881 u64 xfeatures_mask;
12882 bool fpu_in_use;
12883 int i;
12884
12885 /*
12886 * Guest FPU state is zero allocated and so doesn't need to be manually
12887 * cleared on RESET, i.e. during vCPU creation.
12888 */
12889 if (!init_event || !fpstate)
12890 return;
12891
12892 /*
12893 * On INIT, only select XSTATE components are zeroed, most components
12894 * are unchanged. Currently, the only components that are zeroed and
12895 * supported by KVM are MPX and CET related.
12896 */
12897 xfeatures_mask = (kvm_caps.supported_xcr0 | kvm_caps.supported_xss) &
12898 (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR |
12899 XFEATURE_MASK_CET_ALL);
12900 if (!xfeatures_mask)
12901 return;
12902
12903 BUILD_BUG_ON(sizeof(xfeatures_mask) * BITS_PER_BYTE <= XFEATURE_MAX);
12904
12905 /*
12906 * Unload guest FPU state (if necessary) before zeroing XSTATE fields
12907 * as the kernel can only modify the state when its resident in memory,
12908 * i.e. when it's not loaded into hardware.
12909 *
12910 * WARN if the vCPU's desire to run, i.e. whether or not its in KVM_RUN,
12911 * doesn't match the loaded/in-use state of the FPU, as KVM_RUN is the
12912 * only path that can trigger INIT emulation _and_ loads FPU state, and
12913 * KVM_RUN should _always_ load FPU state.
12914 */
12915 WARN_ON_ONCE(vcpu->wants_to_run != fpstate->in_use);
12916 fpu_in_use = fpstate->in_use;
12917 if (fpu_in_use)
12918 kvm_put_guest_fpu(vcpu);
12919 for_each_set_bit(i, (unsigned long *)&xfeatures_mask, XFEATURE_MAX)
12920 fpstate_clear_xstate_component(fpstate, i);
12921 if (fpu_in_use)
12922 kvm_load_guest_fpu(vcpu);
12923 }
12924
kvm_vcpu_reset(struct kvm_vcpu * vcpu,bool init_event)12925 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
12926 {
12927 struct kvm_cpuid_entry2 *cpuid_0x1;
12928 unsigned long old_cr0 = kvm_read_cr0(vcpu);
12929 unsigned long new_cr0;
12930
12931 /*
12932 * Several of the "set" flows, e.g. ->set_cr0(), read other registers
12933 * to handle side effects. RESET emulation hits those flows and relies
12934 * on emulated/virtualized registers, including those that are loaded
12935 * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel
12936 * to detect improper or missing initialization.
12937 */
12938 WARN_ON_ONCE(!init_event &&
12939 (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu)));
12940
12941 /*
12942 * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's
12943 * possible to INIT the vCPU while L2 is active. Force the vCPU back
12944 * into L1 as EFER.SVME is cleared on INIT (along with all other EFER
12945 * bits), i.e. virtualization is disabled.
12946 */
12947 if (is_guest_mode(vcpu))
12948 kvm_leave_nested(vcpu);
12949
12950 kvm_lapic_reset(vcpu, init_event);
12951
12952 WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu));
12953 vcpu->arch.hflags = 0;
12954
12955 vcpu->arch.smi_pending = 0;
12956 vcpu->arch.smi_count = 0;
12957 atomic_set(&vcpu->arch.nmi_queued, 0);
12958 vcpu->arch.nmi_pending = 0;
12959 vcpu->arch.nmi_injected = false;
12960 kvm_clear_interrupt_queue(vcpu);
12961 kvm_clear_exception_queue(vcpu);
12962
12963 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
12964 kvm_update_dr0123(vcpu);
12965 vcpu->arch.dr6 = DR6_ACTIVE_LOW;
12966 vcpu->arch.dr7 = DR7_FIXED_1;
12967 kvm_update_dr7(vcpu);
12968
12969 vcpu->arch.cr2 = 0;
12970
12971 kvm_make_request(KVM_REQ_EVENT, vcpu);
12972 vcpu->arch.apf.msr_en_val = 0;
12973 vcpu->arch.apf.msr_int_val = 0;
12974 vcpu->arch.st.msr_val = 0;
12975
12976 kvmclock_reset(vcpu);
12977
12978 kvm_clear_async_pf_completion_queue(vcpu);
12979 kvm_async_pf_hash_reset(vcpu);
12980 vcpu->arch.apf.halted = false;
12981
12982 kvm_xstate_reset(vcpu, init_event);
12983
12984 if (!init_event) {
12985 vcpu->arch.smbase = 0x30000;
12986
12987 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
12988
12989 vcpu->arch.msr_misc_features_enables = 0;
12990 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |
12991 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
12992
12993 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP);
12994 kvm_msr_write(vcpu, MSR_IA32_XSS, 0);
12995 }
12996
12997 /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */
12998 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
12999 kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP);
13000
13001 /*
13002 * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon)
13003 * if no CPUID match is found. Note, it's impossible to get a match at
13004 * RESET since KVM emulates RESET before exposing the vCPU to userspace,
13005 * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry
13006 * on RESET. But, go through the motions in case that's ever remedied.
13007 */
13008 cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1);
13009 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600);
13010
13011 kvm_x86_call(vcpu_reset)(vcpu, init_event);
13012
13013 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
13014 kvm_rip_write(vcpu, 0xfff0);
13015
13016 vcpu->arch.cr3 = 0;
13017 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
13018
13019 /*
13020 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions
13021 * of Intel's SDM list CD/NW as being set on INIT, but they contradict
13022 * (or qualify) that with a footnote stating that CD/NW are preserved.
13023 */
13024 new_cr0 = X86_CR0_ET;
13025 if (init_event)
13026 new_cr0 |= (old_cr0 & (X86_CR0_NW | X86_CR0_CD));
13027 else
13028 new_cr0 |= X86_CR0_NW | X86_CR0_CD;
13029
13030 kvm_x86_call(set_cr0)(vcpu, new_cr0);
13031 kvm_x86_call(set_cr4)(vcpu, 0);
13032 kvm_x86_call(set_efer)(vcpu, 0);
13033 kvm_x86_call(update_exception_bitmap)(vcpu);
13034
13035 /*
13036 * On the standard CR0/CR4/EFER modification paths, there are several
13037 * complex conditions determining whether the MMU has to be reset and/or
13038 * which PCIDs have to be flushed. However, CR0.WP and the paging-related
13039 * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush
13040 * is needed anyway if CR0.PG was '1' (which can only happen for INIT, as
13041 * CR0 will be '0' prior to RESET). So we only need to check CR0.PG here.
13042 */
13043 if (old_cr0 & X86_CR0_PG) {
13044 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
13045 kvm_mmu_reset_context(vcpu);
13046 }
13047
13048 /*
13049 * Intel's SDM states that all TLB entries are flushed on INIT. AMD's
13050 * APM states the TLBs are untouched by INIT, but it also states that
13051 * the TLBs are flushed on "External initialization of the processor."
13052 * Flush the guest TLB regardless of vendor, there is no meaningful
13053 * benefit in relying on the guest to flush the TLB immediately after
13054 * INIT. A spurious TLB flush is benign and likely negligible from a
13055 * performance perspective.
13056 */
13057 if (init_event)
13058 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
13059 }
13060 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_reset);
13061
kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu * vcpu,u8 vector)13062 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
13063 {
13064 struct kvm_segment cs;
13065
13066 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
13067 cs.selector = vector << 8;
13068 cs.base = vector << 12;
13069 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
13070 kvm_rip_write(vcpu, 0);
13071 }
13072 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_deliver_sipi_vector);
13073
kvm_arch_enable_virtualization(void)13074 void kvm_arch_enable_virtualization(void)
13075 {
13076 cpu_emergency_register_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
13077 }
13078
kvm_arch_disable_virtualization(void)13079 void kvm_arch_disable_virtualization(void)
13080 {
13081 cpu_emergency_unregister_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
13082 }
13083
kvm_arch_enable_virtualization_cpu(void)13084 int kvm_arch_enable_virtualization_cpu(void)
13085 {
13086 struct kvm *kvm;
13087 struct kvm_vcpu *vcpu;
13088 unsigned long i;
13089 int ret;
13090 u64 local_tsc;
13091 u64 max_tsc = 0;
13092 bool stable, backwards_tsc = false;
13093
13094 kvm_user_return_msr_cpu_online();
13095
13096 ret = kvm_x86_check_processor_compatibility();
13097 if (ret)
13098 return ret;
13099
13100 ret = kvm_x86_call(enable_virtualization_cpu)();
13101 if (ret != 0)
13102 return ret;
13103
13104 local_tsc = rdtsc();
13105 stable = !kvm_check_tsc_unstable();
13106 list_for_each_entry(kvm, &vm_list, vm_list) {
13107 kvm_for_each_vcpu(i, vcpu, kvm) {
13108 if (!stable && vcpu->cpu == smp_processor_id())
13109 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
13110 if (stable && vcpu->arch.last_host_tsc > local_tsc) {
13111 backwards_tsc = true;
13112 if (vcpu->arch.last_host_tsc > max_tsc)
13113 max_tsc = vcpu->arch.last_host_tsc;
13114 }
13115 }
13116 }
13117
13118 /*
13119 * Sometimes, even reliable TSCs go backwards. This happens on
13120 * platforms that reset TSC during suspend or hibernate actions, but
13121 * maintain synchronization. We must compensate. Fortunately, we can
13122 * detect that condition here, which happens early in CPU bringup,
13123 * before any KVM threads can be running. Unfortunately, we can't
13124 * bring the TSCs fully up to date with real time, as we aren't yet far
13125 * enough into CPU bringup that we know how much real time has actually
13126 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot
13127 * variables that haven't been updated yet.
13128 *
13129 * So we simply find the maximum observed TSC above, then record the
13130 * adjustment to TSC in each VCPU. When the VCPU later gets loaded,
13131 * the adjustment will be applied. Note that we accumulate
13132 * adjustments, in case multiple suspend cycles happen before some VCPU
13133 * gets a chance to run again. In the event that no KVM threads get a
13134 * chance to run, we will miss the entire elapsed period, as we'll have
13135 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
13136 * loose cycle time. This isn't too big a deal, since the loss will be
13137 * uniform across all VCPUs (not to mention the scenario is extremely
13138 * unlikely). It is possible that a second hibernate recovery happens
13139 * much faster than a first, causing the observed TSC here to be
13140 * smaller; this would require additional padding adjustment, which is
13141 * why we set last_host_tsc to the local tsc observed here.
13142 *
13143 * N.B. - this code below runs only on platforms with reliable TSC,
13144 * as that is the only way backwards_tsc is set above. Also note
13145 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
13146 * have the same delta_cyc adjustment applied if backwards_tsc
13147 * is detected. Note further, this adjustment is only done once,
13148 * as we reset last_host_tsc on all VCPUs to stop this from being
13149 * called multiple times (one for each physical CPU bringup).
13150 *
13151 * Platforms with unreliable TSCs don't have to deal with this, they
13152 * will be compensated by the logic in vcpu_load, which sets the TSC to
13153 * catchup mode. This will catchup all VCPUs to real time, but cannot
13154 * guarantee that they stay in perfect synchronization.
13155 */
13156 if (backwards_tsc) {
13157 u64 delta_cyc = max_tsc - local_tsc;
13158 list_for_each_entry(kvm, &vm_list, vm_list) {
13159 kvm->arch.backwards_tsc_observed = true;
13160 kvm_for_each_vcpu(i, vcpu, kvm) {
13161 vcpu->arch.tsc_offset_adjustment += delta_cyc;
13162 vcpu->arch.last_host_tsc = local_tsc;
13163 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
13164 }
13165
13166 /*
13167 * We have to disable TSC offset matching.. if you were
13168 * booting a VM while issuing an S4 host suspend....
13169 * you may have some problem. Solving this issue is
13170 * left as an exercise to the reader.
13171 */
13172 kvm->arch.last_tsc_nsec = 0;
13173 kvm->arch.last_tsc_write = 0;
13174 }
13175
13176 }
13177 return 0;
13178 }
13179
kvm_arch_disable_virtualization_cpu(void)13180 void kvm_arch_disable_virtualization_cpu(void)
13181 {
13182 kvm_x86_call(disable_virtualization_cpu)();
13183
13184 /*
13185 * Leave the user-return notifiers as-is when disabling virtualization
13186 * for reboot, i.e. when disabling via IPI function call, and instead
13187 * pin kvm.ko (if it's a module) to defend against use-after-free (in
13188 * the *very* unlikely scenario module unload is racing with reboot).
13189 * On a forced reboot, tasks aren't frozen before shutdown, and so KVM
13190 * could be actively modifying user-return MSR state when the IPI to
13191 * disable virtualization arrives. Handle the extreme edge case here
13192 * instead of trying to account for it in the normal flows.
13193 */
13194 if (in_task() || WARN_ON_ONCE(!kvm_rebooting))
13195 drop_user_return_notifiers();
13196 else
13197 __module_get(THIS_MODULE);
13198 }
13199
kvm_vcpu_is_reset_bsp(struct kvm_vcpu * vcpu)13200 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
13201 {
13202 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
13203 }
13204 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_is_reset_bsp);
13205
kvm_vcpu_is_bsp(struct kvm_vcpu * vcpu)13206 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
13207 {
13208 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
13209 }
13210
kvm_arch_free_vm(struct kvm * kvm)13211 void kvm_arch_free_vm(struct kvm *kvm)
13212 {
13213 #if IS_ENABLED(CONFIG_HYPERV)
13214 kfree(kvm->arch.hv_pa_pg);
13215 #endif
13216 __kvm_arch_free_vm(kvm);
13217 }
13218
13219
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)13220 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
13221 {
13222 int ret;
13223 unsigned long flags;
13224
13225 if (!kvm_is_vm_type_supported(type))
13226 return -EINVAL;
13227
13228 kvm->arch.vm_type = type;
13229 kvm->arch.has_private_mem =
13230 (type == KVM_X86_SW_PROTECTED_VM);
13231 /* Decided by the vendor code for other VM types. */
13232 kvm->arch.pre_fault_allowed =
13233 type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM;
13234 kvm->arch.disabled_quirks = kvm_caps.inapplicable_quirks & kvm_caps.supported_quirks;
13235
13236 ret = kvm_page_track_init(kvm);
13237 if (ret)
13238 goto out;
13239
13240 ret = kvm_mmu_init_vm(kvm);
13241 if (ret)
13242 goto out_cleanup_page_track;
13243
13244 ret = kvm_x86_call(vm_init)(kvm);
13245 if (ret)
13246 goto out_uninit_mmu;
13247
13248 atomic_set(&kvm->arch.noncoherent_dma_count, 0);
13249
13250 raw_spin_lock_init(&kvm->arch.tsc_write_lock);
13251 mutex_init(&kvm->arch.apic_map_lock);
13252 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock);
13253 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
13254
13255 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
13256 pvclock_update_vm_gtod_copy(kvm);
13257 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
13258
13259 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz;
13260 kvm->arch.apic_bus_cycle_ns = APIC_BUS_CYCLE_NS_DEFAULT;
13261 kvm->arch.guest_can_read_msr_platform_info = true;
13262 kvm->arch.enable_pmu = enable_pmu;
13263
13264 #if IS_ENABLED(CONFIG_HYPERV)
13265 spin_lock_init(&kvm->arch.hv_root_tdp_lock);
13266 kvm->arch.hv_root_tdp = INVALID_PAGE;
13267 #endif
13268
13269 kvm_apicv_init(kvm);
13270 kvm_hv_init_vm(kvm);
13271 kvm_xen_init_vm(kvm);
13272
13273 if (ignore_msrs && !report_ignored_msrs) {
13274 pr_warn_once("Running KVM with ignore_msrs=1 and report_ignored_msrs=0 is not a\n"
13275 "a supported configuration. Lying to the guest about the existence of MSRs\n"
13276 "may cause the guest operating system to hang or produce errors. If a guest\n"
13277 "does not run without ignore_msrs=1, please report it to kvm@vger.kernel.org.\n");
13278 }
13279
13280 once_init(&kvm->arch.nx_once);
13281 return 0;
13282
13283 out_uninit_mmu:
13284 kvm_mmu_uninit_vm(kvm);
13285 out_cleanup_page_track:
13286 kvm_page_track_cleanup(kvm);
13287 out:
13288 return ret;
13289 }
13290
13291 /**
13292 * __x86_set_memory_region: Setup KVM internal memory slot
13293 *
13294 * @kvm: the kvm pointer to the VM.
13295 * @id: the slot ID to setup.
13296 * @gpa: the GPA to install the slot (unused when @size == 0).
13297 * @size: the size of the slot. Set to zero to uninstall a slot.
13298 *
13299 * This function helps to setup a KVM internal memory slot. Specify
13300 * @size > 0 to install a new slot, while @size == 0 to uninstall a
13301 * slot. The return code can be one of the following:
13302 *
13303 * HVA: on success (uninstall will return a bogus HVA)
13304 * -errno: on error
13305 *
13306 * The caller should always use IS_ERR() to check the return value
13307 * before use. Note, the KVM internal memory slots are guaranteed to
13308 * remain valid and unchanged until the VM is destroyed, i.e., the
13309 * GPA->HVA translation will not change. However, the HVA is a user
13310 * address, i.e. its accessibility is not guaranteed, and must be
13311 * accessed via __copy_{to,from}_user().
13312 */
__x86_set_memory_region(struct kvm * kvm,int id,gpa_t gpa,u32 size)13313 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
13314 u32 size)
13315 {
13316 int i, r;
13317 unsigned long hva, old_npages;
13318 struct kvm_memslots *slots = kvm_memslots(kvm);
13319 struct kvm_memory_slot *slot;
13320
13321 lockdep_assert_held(&kvm->slots_lock);
13322
13323 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
13324 return ERR_PTR_USR(-EINVAL);
13325
13326 slot = id_to_memslot(slots, id);
13327 if (size) {
13328 if (slot && slot->npages)
13329 return ERR_PTR_USR(-EEXIST);
13330
13331 /*
13332 * MAP_SHARED to prevent internal slot pages from being moved
13333 * by fork()/COW.
13334 */
13335 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
13336 MAP_SHARED | MAP_ANONYMOUS, 0);
13337 if (IS_ERR_VALUE(hva))
13338 return (void __user *)hva;
13339 } else {
13340 if (!slot || !slot->npages)
13341 return NULL;
13342
13343 old_npages = slot->npages;
13344 hva = slot->userspace_addr;
13345 }
13346
13347 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
13348 struct kvm_userspace_memory_region2 m;
13349
13350 m.slot = id | (i << 16);
13351 m.flags = 0;
13352 m.guest_phys_addr = gpa;
13353 m.userspace_addr = hva;
13354 m.memory_size = size;
13355 r = kvm_set_internal_memslot(kvm, &m);
13356 if (r < 0)
13357 return ERR_PTR_USR(r);
13358 }
13359
13360 if (!size)
13361 vm_munmap(hva, old_npages * PAGE_SIZE);
13362
13363 return (void __user *)hva;
13364 }
13365 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__x86_set_memory_region);
13366
kvm_arch_pre_destroy_vm(struct kvm * kvm)13367 void kvm_arch_pre_destroy_vm(struct kvm *kvm)
13368 {
13369 /*
13370 * Stop all background workers and kthreads before destroying vCPUs, as
13371 * iterating over vCPUs in a different task while vCPUs are being freed
13372 * is unsafe, i.e. will lead to use-after-free. The PIT also needs to
13373 * be stopped before IRQ routing is freed.
13374 */
13375 #ifdef CONFIG_KVM_IOAPIC
13376 kvm_free_pit(kvm);
13377 #endif
13378
13379 kvm_mmu_pre_destroy_vm(kvm);
13380 kvm_x86_call(vm_pre_destroy)(kvm);
13381 }
13382
kvm_arch_destroy_vm(struct kvm * kvm)13383 void kvm_arch_destroy_vm(struct kvm *kvm)
13384 {
13385 if (current->mm == kvm->mm) {
13386 /*
13387 * Free memory regions allocated on behalf of userspace,
13388 * unless the memory map has changed due to process exit
13389 * or fd copying.
13390 */
13391 mutex_lock(&kvm->slots_lock);
13392 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
13393 0, 0);
13394 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
13395 0, 0);
13396 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
13397 mutex_unlock(&kvm->slots_lock);
13398 }
13399 if (kvm->arch.created_mediated_pmu)
13400 perf_release_mediated_pmu();
13401 kvm_destroy_vcpus(kvm);
13402 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
13403 #ifdef CONFIG_KVM_IOAPIC
13404 kvm_pic_destroy(kvm);
13405 kvm_ioapic_destroy(kvm);
13406 #endif
13407 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
13408 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
13409 kvm_mmu_uninit_vm(kvm);
13410 kvm_page_track_cleanup(kvm);
13411 kvm_xen_destroy_vm(kvm);
13412 kvm_hv_destroy_vm(kvm);
13413 kvm_x86_call(vm_destroy)(kvm);
13414 }
13415
memslot_rmap_free(struct kvm_memory_slot * slot)13416 static void memslot_rmap_free(struct kvm_memory_slot *slot)
13417 {
13418 int i;
13419
13420 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
13421 vfree(slot->arch.rmap[i]);
13422 slot->arch.rmap[i] = NULL;
13423 }
13424 }
13425
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)13426 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
13427 {
13428 int i;
13429
13430 memslot_rmap_free(slot);
13431
13432 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
13433 vfree(slot->arch.lpage_info[i - 1]);
13434 slot->arch.lpage_info[i - 1] = NULL;
13435 }
13436
13437 kvm_page_track_free_memslot(slot);
13438 }
13439
memslot_rmap_alloc(struct kvm_memory_slot * slot,unsigned long npages)13440 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
13441 {
13442 const int sz = sizeof(*slot->arch.rmap[0]);
13443 int i;
13444
13445 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
13446 int level = i + 1;
13447 int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
13448
13449 if (slot->arch.rmap[i])
13450 continue;
13451
13452 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
13453 if (!slot->arch.rmap[i]) {
13454 memslot_rmap_free(slot);
13455 return -ENOMEM;
13456 }
13457 }
13458
13459 return 0;
13460 }
13461
kvm_alloc_memslot_metadata(struct kvm * kvm,struct kvm_memory_slot * slot)13462 static int kvm_alloc_memslot_metadata(struct kvm *kvm,
13463 struct kvm_memory_slot *slot)
13464 {
13465 unsigned long npages = slot->npages;
13466 int i, r;
13467
13468 /*
13469 * Clear out the previous array pointers for the KVM_MR_MOVE case. The
13470 * old arrays will be freed by kvm_set_memory_region() if installing
13471 * the new memslot is successful.
13472 */
13473 memset(&slot->arch, 0, sizeof(slot->arch));
13474
13475 if (kvm_memslots_have_rmaps(kvm)) {
13476 r = memslot_rmap_alloc(slot, npages);
13477 if (r)
13478 return r;
13479 }
13480
13481 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
13482 struct kvm_lpage_info *linfo;
13483 unsigned long ugfn;
13484 int lpages;
13485 int level = i + 1;
13486
13487 lpages = __kvm_mmu_slot_lpages(slot, npages, level);
13488
13489 linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
13490 if (!linfo)
13491 goto out_free;
13492
13493 slot->arch.lpage_info[i - 1] = linfo;
13494
13495 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
13496 linfo[0].disallow_lpage = 1;
13497 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
13498 linfo[lpages - 1].disallow_lpage = 1;
13499 ugfn = slot->userspace_addr >> PAGE_SHIFT;
13500 /*
13501 * If the gfn and userspace address are not aligned wrt each
13502 * other, disable large page support for this slot.
13503 */
13504 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) {
13505 unsigned long j;
13506
13507 for (j = 0; j < lpages; ++j)
13508 linfo[j].disallow_lpage = 1;
13509 }
13510 }
13511
13512 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
13513 kvm_mmu_init_memslot_memory_attributes(kvm, slot);
13514 #endif
13515
13516 if (kvm_page_track_create_memslot(kvm, slot, npages))
13517 goto out_free;
13518
13519 return 0;
13520
13521 out_free:
13522 memslot_rmap_free(slot);
13523
13524 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
13525 vfree(slot->arch.lpage_info[i - 1]);
13526 slot->arch.lpage_info[i - 1] = NULL;
13527 }
13528 return -ENOMEM;
13529 }
13530
kvm_arch_memslots_updated(struct kvm * kvm,u64 gen)13531 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
13532 {
13533 struct kvm_vcpu *vcpu;
13534 unsigned long i;
13535
13536 /*
13537 * memslots->generation has been incremented.
13538 * mmio generation may have reached its maximum value.
13539 */
13540 kvm_mmu_invalidate_mmio_sptes(kvm, gen);
13541
13542 /* Force re-initialization of steal_time cache */
13543 kvm_for_each_vcpu(i, vcpu, kvm)
13544 kvm_vcpu_kick(vcpu);
13545 }
13546
kvm_arch_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)13547 int kvm_arch_prepare_memory_region(struct kvm *kvm,
13548 const struct kvm_memory_slot *old,
13549 struct kvm_memory_slot *new,
13550 enum kvm_mr_change change)
13551 {
13552 /*
13553 * KVM doesn't support moving memslots when there are external page
13554 * trackers attached to the VM, i.e. if KVMGT is in use.
13555 */
13556 if (change == KVM_MR_MOVE && kvm_page_track_has_external_user(kvm))
13557 return -EINVAL;
13558
13559 if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) {
13560 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn())
13561 return -EINVAL;
13562
13563 if (kvm_is_gfn_alias(kvm, new->base_gfn + new->npages - 1))
13564 return -EINVAL;
13565
13566 return kvm_alloc_memslot_metadata(kvm, new);
13567 }
13568
13569 if (change == KVM_MR_FLAGS_ONLY)
13570 memcpy(&new->arch, &old->arch, sizeof(old->arch));
13571 else if (WARN_ON_ONCE(change != KVM_MR_DELETE))
13572 return -EIO;
13573
13574 return 0;
13575 }
13576
13577
kvm_mmu_update_cpu_dirty_logging(struct kvm * kvm,bool enable)13578 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
13579 {
13580 int nr_slots;
13581
13582 if (!kvm->arch.cpu_dirty_log_size)
13583 return;
13584
13585 nr_slots = atomic_read(&kvm->nr_memslots_dirty_logging);
13586 if ((enable && nr_slots == 1) || !nr_slots)
13587 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING);
13588 }
13589
kvm_mmu_slot_apply_flags(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)13590 static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
13591 struct kvm_memory_slot *old,
13592 const struct kvm_memory_slot *new,
13593 enum kvm_mr_change change)
13594 {
13595 u32 old_flags = old ? old->flags : 0;
13596 u32 new_flags = new ? new->flags : 0;
13597 bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
13598
13599 /*
13600 * Update CPU dirty logging if dirty logging is being toggled. This
13601 * applies to all operations.
13602 */
13603 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)
13604 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages);
13605
13606 /*
13607 * Nothing more to do for RO slots (which can't be dirtied and can't be
13608 * made writable) or CREATE/MOVE/DELETE of a slot.
13609 *
13610 * For a memslot with dirty logging disabled:
13611 * CREATE: No dirty mappings will already exist.
13612 * MOVE/DELETE: The old mappings will already have been cleaned up by
13613 * kvm_arch_flush_shadow_memslot()
13614 *
13615 * For a memslot with dirty logging enabled:
13616 * CREATE: No shadow pages exist, thus nothing to write-protect
13617 * and no dirty bits to clear.
13618 * MOVE/DELETE: The old mappings will already have been cleaned up by
13619 * kvm_arch_flush_shadow_memslot().
13620 */
13621 if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY))
13622 return;
13623
13624 /*
13625 * READONLY and non-flags changes were filtered out above, and the only
13626 * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty
13627 * logging isn't being toggled on or off.
13628 */
13629 if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)))
13630 return;
13631
13632 if (!log_dirty_pages) {
13633 /*
13634 * Recover huge page mappings in the slot now that dirty logging
13635 * is disabled, i.e. now that KVM does not have to track guest
13636 * writes at 4KiB granularity.
13637 *
13638 * Dirty logging might be disabled by userspace if an ongoing VM
13639 * live migration is cancelled and the VM must continue running
13640 * on the source.
13641 */
13642 kvm_mmu_recover_huge_pages(kvm, new);
13643 } else {
13644 /*
13645 * Initially-all-set does not require write protecting any page,
13646 * because they're all assumed to be dirty.
13647 */
13648 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
13649 return;
13650
13651 if (READ_ONCE(eager_page_split))
13652 kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K);
13653
13654 if (kvm->arch.cpu_dirty_log_size) {
13655 kvm_mmu_slot_leaf_clear_dirty(kvm, new);
13656 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
13657 } else {
13658 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
13659 }
13660
13661 /*
13662 * Unconditionally flush the TLBs after enabling dirty logging.
13663 * A flush is almost always going to be necessary (see below),
13664 * and unconditionally flushing allows the helpers to omit
13665 * the subtly complex checks when removing write access.
13666 *
13667 * Do the flush outside of mmu_lock to reduce the amount of
13668 * time mmu_lock is held. Flushing after dropping mmu_lock is
13669 * safe as KVM only needs to guarantee the slot is fully
13670 * write-protected before returning to userspace, i.e. before
13671 * userspace can consume the dirty status.
13672 *
13673 * Flushing outside of mmu_lock requires KVM to be careful when
13674 * making decisions based on writable status of an SPTE, e.g. a
13675 * !writable SPTE doesn't guarantee a CPU can't perform writes.
13676 *
13677 * Specifically, KVM also write-protects guest page tables to
13678 * monitor changes when using shadow paging, and must guarantee
13679 * no CPUs can write to those page before mmu_lock is dropped.
13680 * Because CPUs may have stale TLB entries at this point, a
13681 * !writable SPTE doesn't guarantee CPUs can't perform writes.
13682 *
13683 * KVM also allows making SPTES writable outside of mmu_lock,
13684 * e.g. to allow dirty logging without taking mmu_lock.
13685 *
13686 * To handle these scenarios, KVM uses a separate software-only
13687 * bit (MMU-writable) to track if a SPTE is !writable due to
13688 * a guest page table being write-protected (KVM clears the
13689 * MMU-writable flag when write-protecting for shadow paging).
13690 *
13691 * The use of MMU-writable is also the primary motivation for
13692 * the unconditional flush. Because KVM must guarantee that a
13693 * CPU doesn't contain stale, writable TLB entries for a
13694 * !MMU-writable SPTE, KVM must flush if it encounters any
13695 * MMU-writable SPTE regardless of whether the actual hardware
13696 * writable bit was set. I.e. KVM is almost guaranteed to need
13697 * to flush, while unconditionally flushing allows the "remove
13698 * write access" helpers to ignore MMU-writable entirely.
13699 *
13700 * See is_writable_pte() for more details (the case involving
13701 * access-tracked SPTEs is particularly relevant).
13702 */
13703 kvm_flush_remote_tlbs_memslot(kvm, new);
13704 }
13705 }
13706
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)13707 void kvm_arch_commit_memory_region(struct kvm *kvm,
13708 struct kvm_memory_slot *old,
13709 const struct kvm_memory_slot *new,
13710 enum kvm_mr_change change)
13711 {
13712 if (change == KVM_MR_DELETE)
13713 kvm_page_track_delete_slot(kvm, old);
13714
13715 if (!kvm->arch.n_requested_mmu_pages &&
13716 (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) {
13717 unsigned long nr_mmu_pages;
13718
13719 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO;
13720 nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
13721 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
13722 }
13723
13724 kvm_mmu_slot_apply_flags(kvm, old, new, change);
13725
13726 /* Free the arrays associated with the old memslot. */
13727 if (change == KVM_MR_MOVE)
13728 kvm_arch_free_memslot(kvm, old);
13729 }
13730
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)13731 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
13732 {
13733 WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu));
13734
13735 if (vcpu->arch.guest_state_protected)
13736 return true;
13737
13738 return kvm_x86_call(get_cpl)(vcpu) == 0;
13739 }
13740
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)13741 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
13742 {
13743 WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu));
13744
13745 if (vcpu->arch.guest_state_protected)
13746 return 0;
13747
13748 return kvm_rip_read(vcpu);
13749 }
13750
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)13751 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
13752 {
13753 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
13754 }
13755
kvm_arch_interrupt_allowed(struct kvm_vcpu * vcpu)13756 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
13757 {
13758 return kvm_x86_call(interrupt_allowed)(vcpu, false);
13759 }
13760
kvm_get_linear_rip(struct kvm_vcpu * vcpu)13761 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
13762 {
13763 /* Can't read the RIP when guest state is protected, just return 0 */
13764 if (vcpu->arch.guest_state_protected)
13765 return 0;
13766
13767 if (is_64_bit_mode(vcpu))
13768 return kvm_rip_read(vcpu);
13769 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
13770 kvm_rip_read(vcpu));
13771 }
13772 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_linear_rip);
13773
kvm_is_linear_rip(struct kvm_vcpu * vcpu,unsigned long linear_rip)13774 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
13775 {
13776 return kvm_get_linear_rip(vcpu) == linear_rip;
13777 }
13778 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_is_linear_rip);
13779
kvm_get_rflags(struct kvm_vcpu * vcpu)13780 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
13781 {
13782 unsigned long rflags;
13783
13784 rflags = kvm_x86_call(get_rflags)(vcpu);
13785 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
13786 rflags &= ~X86_EFLAGS_TF;
13787 return rflags;
13788 }
13789 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_rflags);
13790
__kvm_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)13791 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
13792 {
13793 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
13794 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
13795 rflags |= X86_EFLAGS_TF;
13796 kvm_x86_call(set_rflags)(vcpu, rflags);
13797 }
13798
kvm_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)13799 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
13800 {
13801 __kvm_set_rflags(vcpu, rflags);
13802 kvm_make_request(KVM_REQ_EVENT, vcpu);
13803 }
13804 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_rflags);
13805
kvm_async_pf_hash_fn(gfn_t gfn)13806 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
13807 {
13808 BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));
13809
13810 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
13811 }
13812
kvm_async_pf_next_probe(u32 key)13813 static inline u32 kvm_async_pf_next_probe(u32 key)
13814 {
13815 return (key + 1) & (ASYNC_PF_PER_VCPU - 1);
13816 }
13817
kvm_add_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)13818 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
13819 {
13820 u32 key = kvm_async_pf_hash_fn(gfn);
13821
13822 while (vcpu->arch.apf.gfns[key] != ~0)
13823 key = kvm_async_pf_next_probe(key);
13824
13825 vcpu->arch.apf.gfns[key] = gfn;
13826 }
13827
kvm_async_pf_gfn_slot(struct kvm_vcpu * vcpu,gfn_t gfn)13828 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
13829 {
13830 int i;
13831 u32 key = kvm_async_pf_hash_fn(gfn);
13832
13833 for (i = 0; i < ASYNC_PF_PER_VCPU &&
13834 (vcpu->arch.apf.gfns[key] != gfn &&
13835 vcpu->arch.apf.gfns[key] != ~0); i++)
13836 key = kvm_async_pf_next_probe(key);
13837
13838 return key;
13839 }
13840
kvm_find_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)13841 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
13842 {
13843 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
13844 }
13845
kvm_del_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)13846 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
13847 {
13848 u32 i, j, k;
13849
13850 i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
13851
13852 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn))
13853 return;
13854
13855 while (true) {
13856 vcpu->arch.apf.gfns[i] = ~0;
13857 do {
13858 j = kvm_async_pf_next_probe(j);
13859 if (vcpu->arch.apf.gfns[j] == ~0)
13860 return;
13861 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
13862 /*
13863 * k lies cyclically in ]i,j]
13864 * | i.k.j |
13865 * |....j i.k.| or |.k..j i...|
13866 */
13867 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
13868 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
13869 i = j;
13870 }
13871 }
13872
apf_put_user_notpresent(struct kvm_vcpu * vcpu)13873 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
13874 {
13875 u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT;
13876
13877 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
13878 sizeof(reason));
13879 }
13880
apf_put_user_ready(struct kvm_vcpu * vcpu,u32 token)13881 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token)
13882 {
13883 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
13884
13885 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13886 &token, offset, sizeof(token));
13887 }
13888
apf_pageready_slot_free(struct kvm_vcpu * vcpu)13889 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu)
13890 {
13891 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
13892 u32 val;
13893
13894 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13895 &val, offset, sizeof(val)))
13896 return false;
13897
13898 return !val;
13899 }
13900
kvm_can_deliver_async_pf(struct kvm_vcpu * vcpu)13901 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
13902 {
13903
13904 if (!kvm_pv_async_pf_enabled(vcpu))
13905 return false;
13906
13907 if (!vcpu->arch.apf.send_always &&
13908 (vcpu->arch.guest_state_protected || !kvm_x86_call(get_cpl)(vcpu)))
13909 return false;
13910
13911 if (is_guest_mode(vcpu)) {
13912 /*
13913 * L1 needs to opt into the special #PF vmexits that are
13914 * used to deliver async page faults.
13915 */
13916 return vcpu->arch.apf.delivery_as_pf_vmexit;
13917 } else {
13918 /*
13919 * Play it safe in case the guest temporarily disables paging.
13920 * The real mode IDT in particular is unlikely to have a #PF
13921 * exception setup.
13922 */
13923 return is_paging(vcpu);
13924 }
13925 }
13926
kvm_can_do_async_pf(struct kvm_vcpu * vcpu)13927 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
13928 {
13929 if (unlikely(!lapic_in_kernel(vcpu) ||
13930 kvm_event_needs_reinjection(vcpu) ||
13931 kvm_is_exception_pending(vcpu)))
13932 return false;
13933
13934 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
13935 return false;
13936
13937 /*
13938 * If interrupts are off we cannot even use an artificial
13939 * halt state.
13940 */
13941 return kvm_arch_interrupt_allowed(vcpu);
13942 }
13943
kvm_arch_async_page_not_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)13944 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
13945 struct kvm_async_pf *work)
13946 {
13947 struct x86_exception fault;
13948
13949 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
13950 kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
13951
13952 if (kvm_can_deliver_async_pf(vcpu) &&
13953 !apf_put_user_notpresent(vcpu)) {
13954 fault.vector = PF_VECTOR;
13955 fault.error_code_valid = true;
13956 fault.error_code = 0;
13957 fault.nested_page_fault = false;
13958 fault.address = work->arch.token;
13959 fault.async_page_fault = true;
13960 kvm_inject_page_fault(vcpu, &fault);
13961 return true;
13962 } else {
13963 /*
13964 * It is not possible to deliver a paravirtualized asynchronous
13965 * page fault, but putting the guest in an artificial halt state
13966 * can be beneficial nevertheless: if an interrupt arrives, we
13967 * can deliver it timely and perhaps the guest will schedule
13968 * another process. When the instruction that triggered a page
13969 * fault is retried, hopefully the page will be ready in the host.
13970 */
13971 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
13972 return false;
13973 }
13974 }
13975
kvm_arch_async_page_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)13976 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
13977 struct kvm_async_pf *work)
13978 {
13979 struct kvm_lapic_irq irq = {
13980 .delivery_mode = APIC_DM_FIXED,
13981 .vector = vcpu->arch.apf.vec
13982 };
13983
13984 if (work->wakeup_all)
13985 work->arch.token = ~0; /* broadcast wakeup */
13986 else
13987 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
13988 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
13989
13990 if ((work->wakeup_all || work->notpresent_injected) &&
13991 kvm_pv_async_pf_enabled(vcpu) &&
13992 !apf_put_user_ready(vcpu, work->arch.token)) {
13993 WRITE_ONCE(vcpu->arch.apf.pageready_pending, true);
13994 kvm_apic_set_irq(vcpu, &irq, NULL);
13995 }
13996
13997 vcpu->arch.apf.halted = false;
13998 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
13999 }
14000
kvm_arch_async_page_present_queued(struct kvm_vcpu * vcpu)14001 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
14002 {
14003 kvm_make_request(KVM_REQ_APF_READY, vcpu);
14004
14005 /* Pairs with smp_store_mb() in kvm_set_msr_common(). */
14006 smp_mb__after_atomic();
14007
14008 if (!READ_ONCE(vcpu->arch.apf.pageready_pending))
14009 kvm_vcpu_kick(vcpu);
14010 }
14011
kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu * vcpu)14012 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
14013 {
14014 if (!kvm_pv_async_pf_enabled(vcpu))
14015 return true;
14016 else
14017 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
14018 }
14019
kvm_noncoherent_dma_assignment_start_or_stop(struct kvm * kvm)14020 static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
14021 {
14022 /*
14023 * Non-coherent DMA assignment and de-assignment may affect whether or
14024 * not KVM honors guest PAT, and thus may cause changes in EPT SPTEs
14025 * due to toggling the "ignore PAT" bit. Zap all SPTEs when the first
14026 * (or last) non-coherent device is (un)registered to so that new SPTEs
14027 * with the correct "ignore guest PAT" setting are created.
14028 *
14029 * If KVM always honors guest PAT, however, there is nothing to do.
14030 */
14031 if (kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT))
14032 kvm_zap_gfn_range(kvm, gpa_to_gfn(0), gpa_to_gfn(~0ULL));
14033 }
14034
kvm_arch_register_noncoherent_dma(struct kvm * kvm)14035 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
14036 {
14037 if (atomic_inc_return(&kvm->arch.noncoherent_dma_count) == 1)
14038 kvm_noncoherent_dma_assignment_start_or_stop(kvm);
14039 }
14040
kvm_arch_unregister_noncoherent_dma(struct kvm * kvm)14041 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
14042 {
14043 if (!atomic_dec_return(&kvm->arch.noncoherent_dma_count))
14044 kvm_noncoherent_dma_assignment_start_or_stop(kvm);
14045 }
14046
kvm_arch_has_noncoherent_dma(struct kvm * kvm)14047 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
14048 {
14049 return atomic_read(&kvm->arch.noncoherent_dma_count);
14050 }
14051 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_arch_has_noncoherent_dma);
14052
kvm_arch_no_poll(struct kvm_vcpu * vcpu)14053 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
14054 {
14055 return (vcpu->arch.msr_kvm_poll_control & 1) == 0;
14056 }
14057
14058 #ifdef CONFIG_KVM_GUEST_MEMFD
14059 /*
14060 * KVM doesn't yet support initializing guest_memfd memory as shared for VMs
14061 * with private memory (the private vs. shared tracking needs to be moved into
14062 * guest_memfd).
14063 */
kvm_arch_supports_gmem_init_shared(struct kvm * kvm)14064 bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm)
14065 {
14066 return !kvm_arch_has_private_mem(kvm);
14067 }
14068
14069 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
kvm_arch_gmem_prepare(struct kvm * kvm,gfn_t gfn,kvm_pfn_t pfn,int max_order)14070 int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
14071 {
14072 return kvm_x86_call(gmem_prepare)(kvm, pfn, gfn, max_order);
14073 }
14074 #endif
14075
14076 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
kvm_arch_gmem_invalidate(kvm_pfn_t start,kvm_pfn_t end)14077 void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
14078 {
14079 kvm_x86_call(gmem_invalidate)(start, end);
14080 }
14081 #endif
14082 #endif
14083
kvm_spec_ctrl_test_value(u64 value)14084 int kvm_spec_ctrl_test_value(u64 value)
14085 {
14086 /*
14087 * test that setting IA32_SPEC_CTRL to given value
14088 * is allowed by the host processor
14089 */
14090
14091 u64 saved_value;
14092 unsigned long flags;
14093 int ret = 0;
14094
14095 local_irq_save(flags);
14096
14097 if (rdmsrq_safe(MSR_IA32_SPEC_CTRL, &saved_value))
14098 ret = 1;
14099 else if (wrmsrq_safe(MSR_IA32_SPEC_CTRL, value))
14100 ret = 1;
14101 else
14102 wrmsrq(MSR_IA32_SPEC_CTRL, saved_value);
14103
14104 local_irq_restore(flags);
14105
14106 return ret;
14107 }
14108 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_spec_ctrl_test_value);
14109
kvm_fixup_and_inject_pf_error(struct kvm_vcpu * vcpu,gva_t gva,u16 error_code)14110 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
14111 {
14112 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
14113 struct x86_exception fault;
14114 u64 access = error_code &
14115 (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
14116
14117 if (!(error_code & PFERR_PRESENT_MASK) ||
14118 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) {
14119 /*
14120 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
14121 * tables probably do not match the TLB. Just proceed
14122 * with the error code that the processor gave.
14123 */
14124 fault.vector = PF_VECTOR;
14125 fault.error_code_valid = true;
14126 fault.error_code = error_code;
14127 fault.nested_page_fault = false;
14128 fault.address = gva;
14129 fault.async_page_fault = false;
14130 }
14131 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
14132 }
14133 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_fixup_and_inject_pf_error);
14134
14135 /*
14136 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
14137 * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
14138 * indicates whether exit to userspace is needed.
14139 */
kvm_handle_memory_failure(struct kvm_vcpu * vcpu,int r,struct x86_exception * e)14140 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
14141 struct x86_exception *e)
14142 {
14143 if (r == X86EMUL_PROPAGATE_FAULT) {
14144 if (KVM_BUG_ON(!e, vcpu->kvm))
14145 return -EIO;
14146
14147 kvm_inject_emulated_page_fault(vcpu, e);
14148 return 1;
14149 }
14150
14151 /*
14152 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
14153 * while handling a VMX instruction KVM could've handled the request
14154 * correctly by exiting to userspace and performing I/O but there
14155 * doesn't seem to be a real use-case behind such requests, just return
14156 * KVM_EXIT_INTERNAL_ERROR for now.
14157 */
14158 kvm_prepare_emulation_failure_exit(vcpu);
14159
14160 return 0;
14161 }
14162 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_memory_failure);
14163
kvm_handle_invpcid(struct kvm_vcpu * vcpu,unsigned long type,gva_t gva)14164 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
14165 {
14166 bool pcid_enabled;
14167 struct x86_exception e;
14168 struct {
14169 u64 pcid;
14170 u64 gla;
14171 } operand;
14172 int r;
14173
14174 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
14175 if (r != X86EMUL_CONTINUE)
14176 return kvm_handle_memory_failure(vcpu, r, &e);
14177
14178 if (operand.pcid >> 12 != 0) {
14179 kvm_inject_gp(vcpu, 0);
14180 return 1;
14181 }
14182
14183 pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE);
14184
14185 switch (type) {
14186 case INVPCID_TYPE_INDIV_ADDR:
14187 /*
14188 * LAM doesn't apply to addresses that are inputs to TLB
14189 * invalidation.
14190 */
14191 if ((!pcid_enabled && (operand.pcid != 0)) ||
14192 is_noncanonical_invlpg_address(operand.gla, vcpu)) {
14193 kvm_inject_gp(vcpu, 0);
14194 return 1;
14195 }
14196 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
14197 return kvm_skip_emulated_instruction(vcpu);
14198
14199 case INVPCID_TYPE_SINGLE_CTXT:
14200 if (!pcid_enabled && (operand.pcid != 0)) {
14201 kvm_inject_gp(vcpu, 0);
14202 return 1;
14203 }
14204
14205 /*
14206 * When ERAPS is supported, invalidating a specific PCID clears
14207 * the RAP (Return Address Predicator).
14208 */
14209 if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS))
14210 kvm_register_is_dirty(vcpu, VCPU_EXREG_ERAPS);
14211
14212 kvm_invalidate_pcid(vcpu, operand.pcid);
14213 return kvm_skip_emulated_instruction(vcpu);
14214
14215 case INVPCID_TYPE_ALL_NON_GLOBAL:
14216 /*
14217 * Currently, KVM doesn't mark global entries in the shadow
14218 * page tables, so a non-global flush just degenerates to a
14219 * global flush. If needed, we could optimize this later by
14220 * keeping track of global entries in shadow page tables.
14221 */
14222
14223 fallthrough;
14224 case INVPCID_TYPE_ALL_INCL_GLOBAL:
14225 /*
14226 * Don't bother marking VCPU_EXREG_ERAPS dirty, SVM will take
14227 * care of doing so when emulating the full guest TLB flush
14228 * (the RAP is cleared on all implicit TLB flushes).
14229 */
14230 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
14231 return kvm_skip_emulated_instruction(vcpu);
14232
14233 default:
14234 kvm_inject_gp(vcpu, 0);
14235 return 1;
14236 }
14237 }
14238 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_invpcid);
14239
complete_sev_es_emulated_mmio(struct kvm_vcpu * vcpu)14240 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu)
14241 {
14242 struct kvm_run *run = vcpu->run;
14243 struct kvm_mmio_fragment *frag;
14244 unsigned int len;
14245
14246 BUG_ON(!vcpu->mmio_needed);
14247
14248 /* Complete previous fragment */
14249 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
14250 len = min(8u, frag->len);
14251 if (!vcpu->mmio_is_write)
14252 memcpy(frag->data, run->mmio.data, len);
14253
14254 if (frag->len <= 8) {
14255 /* Switch to the next fragment. */
14256 frag++;
14257 vcpu->mmio_cur_fragment++;
14258 } else {
14259 /* Go forward to the next mmio piece. */
14260 frag->data += len;
14261 frag->gpa += len;
14262 frag->len -= len;
14263 }
14264
14265 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
14266 vcpu->mmio_needed = 0;
14267
14268 // VMG change, at this point, we're always done
14269 // RIP has already been advanced
14270 return 1;
14271 }
14272
14273 // More MMIO is needed
14274 run->mmio.phys_addr = frag->gpa;
14275 run->mmio.len = min(8u, frag->len);
14276 run->mmio.is_write = vcpu->mmio_is_write;
14277 if (run->mmio.is_write)
14278 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
14279 run->exit_reason = KVM_EXIT_MMIO;
14280
14281 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
14282
14283 return 0;
14284 }
14285
kvm_sev_es_mmio_write(struct kvm_vcpu * vcpu,gpa_t gpa,unsigned int bytes,void * data)14286 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
14287 void *data)
14288 {
14289 int handled;
14290 struct kvm_mmio_fragment *frag;
14291
14292 if (!data)
14293 return -EINVAL;
14294
14295 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data);
14296 if (handled == bytes)
14297 return 1;
14298
14299 bytes -= handled;
14300 gpa += handled;
14301 data += handled;
14302
14303 /*TODO: Check if need to increment number of frags */
14304 frag = vcpu->mmio_fragments;
14305 vcpu->mmio_nr_fragments = 1;
14306 frag->len = bytes;
14307 frag->gpa = gpa;
14308 frag->data = data;
14309
14310 vcpu->mmio_needed = 1;
14311 vcpu->mmio_cur_fragment = 0;
14312
14313 vcpu->run->mmio.phys_addr = gpa;
14314 vcpu->run->mmio.len = min(8u, frag->len);
14315 vcpu->run->mmio.is_write = 1;
14316 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
14317 vcpu->run->exit_reason = KVM_EXIT_MMIO;
14318
14319 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
14320
14321 return 0;
14322 }
14323 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio_write);
14324
kvm_sev_es_mmio_read(struct kvm_vcpu * vcpu,gpa_t gpa,unsigned int bytes,void * data)14325 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
14326 void *data)
14327 {
14328 int handled;
14329 struct kvm_mmio_fragment *frag;
14330
14331 if (!data)
14332 return -EINVAL;
14333
14334 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data);
14335 if (handled == bytes)
14336 return 1;
14337
14338 bytes -= handled;
14339 gpa += handled;
14340 data += handled;
14341
14342 /*TODO: Check if need to increment number of frags */
14343 frag = vcpu->mmio_fragments;
14344 vcpu->mmio_nr_fragments = 1;
14345 frag->len = bytes;
14346 frag->gpa = gpa;
14347 frag->data = data;
14348
14349 vcpu->mmio_needed = 1;
14350 vcpu->mmio_cur_fragment = 0;
14351
14352 vcpu->run->mmio.phys_addr = gpa;
14353 vcpu->run->mmio.len = min(8u, frag->len);
14354 vcpu->run->mmio.is_write = 0;
14355 vcpu->run->exit_reason = KVM_EXIT_MMIO;
14356
14357 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
14358
14359 return 0;
14360 }
14361 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio_read);
14362
advance_sev_es_emulated_pio(struct kvm_vcpu * vcpu,unsigned count,int size)14363 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size)
14364 {
14365 vcpu->arch.sev_pio_count -= count;
14366 vcpu->arch.sev_pio_data += count * size;
14367 }
14368
14369 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
14370 unsigned int port);
14371
complete_sev_es_emulated_outs(struct kvm_vcpu * vcpu)14372 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
14373 {
14374 int size = vcpu->arch.pio.size;
14375 int port = vcpu->arch.pio.port;
14376
14377 vcpu->arch.pio.count = 0;
14378 if (vcpu->arch.sev_pio_count)
14379 return kvm_sev_es_outs(vcpu, size, port);
14380 return 1;
14381 }
14382
kvm_sev_es_outs(struct kvm_vcpu * vcpu,unsigned int size,unsigned int port)14383 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
14384 unsigned int port)
14385 {
14386 for (;;) {
14387 unsigned int count =
14388 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
14389 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
14390
14391 /* memcpy done already by emulator_pio_out. */
14392 advance_sev_es_emulated_pio(vcpu, count, size);
14393 if (!ret)
14394 break;
14395
14396 /* Emulation done by the kernel. */
14397 if (!vcpu->arch.sev_pio_count)
14398 return 1;
14399 }
14400
14401 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
14402 return 0;
14403 }
14404
14405 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
14406 unsigned int port);
14407
complete_sev_es_emulated_ins(struct kvm_vcpu * vcpu)14408 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
14409 {
14410 unsigned count = vcpu->arch.pio.count;
14411 int size = vcpu->arch.pio.size;
14412 int port = vcpu->arch.pio.port;
14413
14414 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
14415 advance_sev_es_emulated_pio(vcpu, count, size);
14416 if (vcpu->arch.sev_pio_count)
14417 return kvm_sev_es_ins(vcpu, size, port);
14418 return 1;
14419 }
14420
kvm_sev_es_ins(struct kvm_vcpu * vcpu,unsigned int size,unsigned int port)14421 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
14422 unsigned int port)
14423 {
14424 for (;;) {
14425 unsigned int count =
14426 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
14427 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count))
14428 break;
14429
14430 /* Emulation done by the kernel. */
14431 advance_sev_es_emulated_pio(vcpu, count, size);
14432 if (!vcpu->arch.sev_pio_count)
14433 return 1;
14434 }
14435
14436 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
14437 return 0;
14438 }
14439
kvm_sev_es_string_io(struct kvm_vcpu * vcpu,unsigned int size,unsigned int port,void * data,unsigned int count,int in)14440 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
14441 unsigned int port, void *data, unsigned int count,
14442 int in)
14443 {
14444 vcpu->arch.sev_pio_data = data;
14445 vcpu->arch.sev_pio_count = count;
14446 return in ? kvm_sev_es_ins(vcpu, size, port)
14447 : kvm_sev_es_outs(vcpu, size, port);
14448 }
14449 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_string_io);
14450
14451 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
14452 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
14453 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_mmio);
14454 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
14455 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
14456 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
14457 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
14458 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
14459 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter);
14460 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
14461 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
14462 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
14463 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed);
14464 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
14465 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
14466 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
14467 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
14468 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update);
14469 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
14470 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
14471 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
14472 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
14473 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath);
14474 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell);
14475 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq);
14476 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
14477 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
14478 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
14479 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
14480 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_rmp_fault);
14481
kvm_x86_init(void)14482 static int __init kvm_x86_init(void)
14483 {
14484 kvm_init_xstate_sizes();
14485
14486 kvm_mmu_x86_module_init();
14487 mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
14488 return 0;
14489 }
14490 module_init(kvm_x86_init);
14491
kvm_x86_exit(void)14492 static void __exit kvm_x86_exit(void)
14493 {
14494 WARN_ON_ONCE(static_branch_unlikely(&kvm_has_noapic_vcpu));
14495 }
14496 module_exit(kvm_x86_exit);
14497