1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * derived from drivers/kvm/kvm_main.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 *
12 * Authors:
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 */
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/kvm_host.h>
21 #include "irq.h"
22 #include "ioapic.h"
23 #include "mmu.h"
24 #include "i8254.h"
25 #include "tss.h"
26 #include "kvm_cache_regs.h"
27 #include "kvm_emulate.h"
28 #include "mmu/page_track.h"
29 #include "x86.h"
30 #include "cpuid.h"
31 #include "pmu.h"
32 #include "hyperv.h"
33 #include "lapic.h"
34 #include "xen.h"
35 #include "smm.h"
36
37 #include <linux/clocksource.h>
38 #include <linux/interrupt.h>
39 #include <linux/kvm.h>
40 #include <linux/fs.h>
41 #include <linux/vmalloc.h>
42 #include <linux/export.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mman.h>
45 #include <linux/highmem.h>
46 #include <linux/iommu.h>
47 #include <linux/cpufreq.h>
48 #include <linux/user-return-notifier.h>
49 #include <linux/srcu.h>
50 #include <linux/slab.h>
51 #include <linux/perf_event.h>
52 #include <linux/uaccess.h>
53 #include <linux/hash.h>
54 #include <linux/pci.h>
55 #include <linux/timekeeper_internal.h>
56 #include <linux/pvclock_gtod.h>
57 #include <linux/kvm_irqfd.h>
58 #include <linux/irqbypass.h>
59 #include <linux/sched/stat.h>
60 #include <linux/sched/isolation.h>
61 #include <linux/mem_encrypt.h>
62 #include <linux/suspend.h>
63 #include <linux/smp.h>
64
65 #include <trace/events/ipi.h>
66 #include <trace/events/kvm.h>
67
68 #include <asm/debugreg.h>
69 #include <asm/msr.h>
70 #include <asm/desc.h>
71 #include <asm/mce.h>
72 #include <asm/pkru.h>
73 #include <linux/kernel_stat.h>
74 #include <asm/fpu/api.h>
75 #include <asm/fpu/xcr.h>
76 #include <asm/fpu/xstate.h>
77 #include <asm/pvclock.h>
78 #include <asm/div64.h>
79 #include <asm/irq_remapping.h>
80 #include <asm/mshyperv.h>
81 #include <asm/hypervisor.h>
82 #include <asm/tlbflush.h>
83 #include <asm/intel_pt.h>
84 #include <asm/emulate_prefix.h>
85 #include <asm/sgx.h>
86 #include <asm/virt.h>
87
88 #include <clocksource/hyperv_timer.h>
89
90 #define CREATE_TRACE_POINTS
91 #include "trace.h"
92
93 #define MAX_IO_MSRS 256
94
95 /*
96 * Note, kvm_caps fields should *never* have default values, all fields must be
97 * recomputed from scratch during vendor module load, e.g. to account for a
98 * vendor module being reloaded with different module parameters.
99 */
100 struct kvm_caps kvm_caps __read_mostly;
101 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_caps);
102
103 struct kvm_host_values kvm_host __read_mostly;
104 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_host);
105
106 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
107
108 #define emul_to_vcpu(ctxt) \
109 ((struct kvm_vcpu *)(ctxt)->vcpu)
110
111 /* EFER defaults:
112 * - enable syscall per default because its emulated by KVM
113 * - enable LME and LMA per default on 64 bit KVM
114 */
115 #ifdef CONFIG_X86_64
116 static
117 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
118 #else
119 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
120 #endif
121
122 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
123
124 #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE
125
126 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
127 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK | \
128 KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST | \
129 KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST)
130
131 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
132 static void process_nmi(struct kvm_vcpu *vcpu);
133 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
134 static void store_regs(struct kvm_vcpu *vcpu);
135 static int sync_regs(struct kvm_vcpu *vcpu);
136 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
137
138 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
139 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
140
141 static DEFINE_MUTEX(vendor_module_lock);
142 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
143 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
144
145 struct kvm_x86_ops kvm_x86_ops __read_mostly;
146
147 #define KVM_X86_OP(func) \
148 DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \
149 *(((struct kvm_x86_ops *)0)->func));
150 #define KVM_X86_OP_OPTIONAL KVM_X86_OP
151 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
152 #include <asm/kvm-x86-ops.h>
153 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits);
154 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg);
155
156 static bool __read_mostly ignore_msrs = 0;
157 module_param(ignore_msrs, bool, 0644);
158
159 bool __read_mostly report_ignored_msrs = true;
160 module_param(report_ignored_msrs, bool, 0644);
161 EXPORT_SYMBOL_FOR_KVM_INTERNAL(report_ignored_msrs);
162
163 unsigned int min_timer_period_us = 200;
164 module_param(min_timer_period_us, uint, 0644);
165
166 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
167 static u32 __read_mostly tsc_tolerance_ppm = 250;
168 module_param(tsc_tolerance_ppm, uint, 0644);
169
170 bool __read_mostly enable_vmware_backdoor = false;
171 module_param(enable_vmware_backdoor, bool, 0444);
172 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_vmware_backdoor);
173
174 /*
175 * Flags to manipulate forced emulation behavior (any non-zero value will
176 * enable forced emulation).
177 */
178 #define KVM_FEP_CLEAR_RFLAGS_RF BIT(1)
179 static int __read_mostly force_emulation_prefix;
180 module_param(force_emulation_prefix, int, 0644);
181
182 int __read_mostly pi_inject_timer = -1;
183 module_param(pi_inject_timer, bint, 0644);
184
185 /* Enable/disable PMU virtualization */
186 bool __read_mostly enable_pmu = true;
187 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_pmu);
188 module_param(enable_pmu, bool, 0444);
189
190 /* Enable/disabled mediated PMU virtualization. */
191 bool __read_mostly enable_mediated_pmu;
192 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_mediated_pmu);
193
194 bool __read_mostly eager_page_split = true;
195 module_param(eager_page_split, bool, 0644);
196
197 /* Enable/disable SMT_RSB bug mitigation */
198 static bool __read_mostly mitigate_smt_rsb;
199 module_param(mitigate_smt_rsb, bool, 0444);
200
201 /*
202 * Restoring the host value for MSRs that are only consumed when running in
203 * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
204 * returns to userspace, i.e. the kernel can run with the guest's value.
205 */
206 #define KVM_MAX_NR_USER_RETURN_MSRS 16
207
208 struct kvm_user_return_msrs {
209 struct user_return_notifier urn;
210 bool registered;
211 struct kvm_user_return_msr_values {
212 u64 host;
213 u64 curr;
214 } values[KVM_MAX_NR_USER_RETURN_MSRS];
215 };
216
217 u32 __read_mostly kvm_nr_uret_msrs;
218 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_nr_uret_msrs);
219 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
220 static DEFINE_PER_CPU(struct kvm_user_return_msrs, user_return_msrs);
221
222 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
223 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
224 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
225 | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE)
226
227 #define XFEATURE_MASK_CET_ALL (XFEATURE_MASK_CET_USER | XFEATURE_MASK_CET_KERNEL)
228 /*
229 * Note, KVM supports exposing PT to the guest, but does not support context
230 * switching PT via XSTATE (KVM's PT virtualization relies on perf; swapping
231 * PT via guest XSTATE would clobber perf state), i.e. KVM doesn't support
232 * IA32_XSS[bit 8] (guests can/must use RDMSR/WRMSR to save/restore PT MSRs).
233 */
234 #define KVM_SUPPORTED_XSS (XFEATURE_MASK_CET_ALL)
235
236 bool __read_mostly allow_smaller_maxphyaddr = 0;
237 EXPORT_SYMBOL_FOR_KVM_INTERNAL(allow_smaller_maxphyaddr);
238
239 bool __read_mostly enable_apicv = true;
240 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_apicv);
241
242 bool __read_mostly enable_ipiv = true;
243 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_ipiv);
244
245 bool __read_mostly enable_device_posted_irqs = true;
246 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_device_posted_irqs);
247
248 const struct kvm_stats_desc kvm_vm_stats_desc[] = {
249 KVM_GENERIC_VM_STATS(),
250 STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
251 STATS_DESC_COUNTER(VM, mmu_pte_write),
252 STATS_DESC_COUNTER(VM, mmu_pde_zapped),
253 STATS_DESC_COUNTER(VM, mmu_flooded),
254 STATS_DESC_COUNTER(VM, mmu_recycled),
255 STATS_DESC_COUNTER(VM, mmu_cache_miss),
256 STATS_DESC_ICOUNTER(VM, mmu_unsync),
257 STATS_DESC_ICOUNTER(VM, pages_4k),
258 STATS_DESC_ICOUNTER(VM, pages_2m),
259 STATS_DESC_ICOUNTER(VM, pages_1g),
260 STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
261 STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size),
262 STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)
263 };
264
265 const struct kvm_stats_header kvm_vm_stats_header = {
266 .name_size = KVM_STATS_NAME_SIZE,
267 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
268 .id_offset = sizeof(struct kvm_stats_header),
269 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
270 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
271 sizeof(kvm_vm_stats_desc),
272 };
273
274 const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
275 KVM_GENERIC_VCPU_STATS(),
276 STATS_DESC_COUNTER(VCPU, pf_taken),
277 STATS_DESC_COUNTER(VCPU, pf_fixed),
278 STATS_DESC_COUNTER(VCPU, pf_emulate),
279 STATS_DESC_COUNTER(VCPU, pf_spurious),
280 STATS_DESC_COUNTER(VCPU, pf_fast),
281 STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created),
282 STATS_DESC_COUNTER(VCPU, pf_guest),
283 STATS_DESC_COUNTER(VCPU, tlb_flush),
284 STATS_DESC_COUNTER(VCPU, invlpg),
285 STATS_DESC_COUNTER(VCPU, exits),
286 STATS_DESC_COUNTER(VCPU, io_exits),
287 STATS_DESC_COUNTER(VCPU, mmio_exits),
288 STATS_DESC_COUNTER(VCPU, signal_exits),
289 STATS_DESC_COUNTER(VCPU, irq_window_exits),
290 STATS_DESC_COUNTER(VCPU, nmi_window_exits),
291 STATS_DESC_COUNTER(VCPU, l1d_flush),
292 STATS_DESC_COUNTER(VCPU, halt_exits),
293 STATS_DESC_COUNTER(VCPU, request_irq_exits),
294 STATS_DESC_COUNTER(VCPU, irq_exits),
295 STATS_DESC_COUNTER(VCPU, host_state_reload),
296 STATS_DESC_COUNTER(VCPU, fpu_reload),
297 STATS_DESC_COUNTER(VCPU, insn_emulation),
298 STATS_DESC_COUNTER(VCPU, insn_emulation_fail),
299 STATS_DESC_COUNTER(VCPU, hypercalls),
300 STATS_DESC_COUNTER(VCPU, irq_injections),
301 STATS_DESC_COUNTER(VCPU, nmi_injections),
302 STATS_DESC_COUNTER(VCPU, req_event),
303 STATS_DESC_COUNTER(VCPU, nested_run),
304 STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
305 STATS_DESC_COUNTER(VCPU, directed_yield_successful),
306 STATS_DESC_COUNTER(VCPU, preemption_reported),
307 STATS_DESC_COUNTER(VCPU, preemption_other),
308 STATS_DESC_IBOOLEAN(VCPU, guest_mode),
309 STATS_DESC_COUNTER(VCPU, notify_window_exits),
310 };
311
312 const struct kvm_stats_header kvm_vcpu_stats_header = {
313 .name_size = KVM_STATS_NAME_SIZE,
314 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
315 .id_offset = sizeof(struct kvm_stats_header),
316 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
317 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
318 sizeof(kvm_vcpu_stats_desc),
319 };
320
321 static struct kmem_cache *x86_emulator_cache;
322
323 /*
324 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
325 * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
326 * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that
327 * require host support, i.e. should be probed via RDMSR. emulated_msrs holds
328 * MSRs that KVM emulates without strictly requiring host support.
329 * msr_based_features holds MSRs that enumerate features, i.e. are effectively
330 * CPUID leafs. Note, msr_based_features isn't mutually exclusive with
331 * msrs_to_save and emulated_msrs.
332 */
333
334 static const u32 msrs_to_save_base[] = {
335 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
336 MSR_STAR,
337 #ifdef CONFIG_X86_64
338 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
339 #endif
340 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
341 MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
342 MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
343 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
344 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
345 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
346 MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
347 MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
348 MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
349 MSR_IA32_UMWAIT_CONTROL,
350
351 MSR_IA32_XFD, MSR_IA32_XFD_ERR, MSR_IA32_XSS,
352
353 MSR_IA32_U_CET, MSR_IA32_S_CET,
354 MSR_IA32_PL0_SSP, MSR_IA32_PL1_SSP, MSR_IA32_PL2_SSP,
355 MSR_IA32_PL3_SSP, MSR_IA32_INT_SSP_TAB,
356 MSR_IA32_DEBUGCTLMSR,
357 MSR_IA32_LASTBRANCHFROMIP, MSR_IA32_LASTBRANCHTOIP,
358 MSR_IA32_LASTINTFROMIP, MSR_IA32_LASTINTTOIP,
359 };
360
361 static const u32 msrs_to_save_pmu[] = {
362 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
363 MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
364 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
365 MSR_CORE_PERF_GLOBAL_CTRL,
366 MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
367
368 /* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */
369 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
370 MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
371 MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
372 MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
373 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
374 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
375 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
376 MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
377
378 MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
379 MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
380
381 /* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */
382 MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
383 MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
384 MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
385 MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
386
387 MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
388 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
389 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
390 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET,
391 };
392
393 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
394 ARRAY_SIZE(msrs_to_save_pmu)];
395 static unsigned num_msrs_to_save;
396
397 static const u32 emulated_msrs_all[] = {
398 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
399 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
400
401 #ifdef CONFIG_KVM_HYPERV
402 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
403 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
404 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
405 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
406 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
407 HV_X64_MSR_RESET,
408 HV_X64_MSR_VP_INDEX,
409 HV_X64_MSR_VP_RUNTIME,
410 HV_X64_MSR_SCONTROL,
411 HV_X64_MSR_STIMER0_CONFIG,
412 HV_X64_MSR_VP_ASSIST_PAGE,
413 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
414 HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL,
415 HV_X64_MSR_SYNDBG_OPTIONS,
416 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
417 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
418 HV_X64_MSR_SYNDBG_PENDING_BUFFER,
419 #endif
420
421 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
422 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
423
424 MSR_IA32_TSC_ADJUST,
425 MSR_IA32_TSC_DEADLINE,
426 MSR_IA32_ARCH_CAPABILITIES,
427 MSR_IA32_PERF_CAPABILITIES,
428 MSR_IA32_MISC_ENABLE,
429 MSR_IA32_MCG_STATUS,
430 MSR_IA32_MCG_CTL,
431 MSR_IA32_MCG_EXT_CTL,
432 MSR_IA32_SMBASE,
433 MSR_SMI_COUNT,
434 MSR_PLATFORM_INFO,
435 MSR_MISC_FEATURES_ENABLES,
436 MSR_AMD64_VIRT_SPEC_CTRL,
437 MSR_AMD64_TSC_RATIO,
438 MSR_IA32_POWER_CTL,
439 MSR_IA32_UCODE_REV,
440
441 /*
442 * KVM always supports the "true" VMX control MSRs, even if the host
443 * does not. The VMX MSRs as a whole are considered "emulated" as KVM
444 * doesn't strictly require them to exist in the host (ignoring that
445 * KVM would refuse to load in the first place if the core set of MSRs
446 * aren't supported).
447 */
448 MSR_IA32_VMX_BASIC,
449 MSR_IA32_VMX_TRUE_PINBASED_CTLS,
450 MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
451 MSR_IA32_VMX_TRUE_EXIT_CTLS,
452 MSR_IA32_VMX_TRUE_ENTRY_CTLS,
453 MSR_IA32_VMX_MISC,
454 MSR_IA32_VMX_CR0_FIXED0,
455 MSR_IA32_VMX_CR4_FIXED0,
456 MSR_IA32_VMX_VMCS_ENUM,
457 MSR_IA32_VMX_PROCBASED_CTLS2,
458 MSR_IA32_VMX_EPT_VPID_CAP,
459 MSR_IA32_VMX_VMFUNC,
460
461 MSR_K7_HWCR,
462 MSR_KVM_POLL_CONTROL,
463 };
464
465 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
466 static unsigned num_emulated_msrs;
467
468 /*
469 * List of MSRs that control the existence of MSR-based features, i.e. MSRs
470 * that are effectively CPUID leafs. VMX MSRs are also included in the set of
471 * feature MSRs, but are handled separately to allow expedited lookups.
472 */
473 static const u32 msr_based_features_all_except_vmx[] = {
474 MSR_AMD64_DE_CFG,
475 MSR_IA32_UCODE_REV,
476 MSR_IA32_ARCH_CAPABILITIES,
477 MSR_IA32_PERF_CAPABILITIES,
478 MSR_PLATFORM_INFO,
479 };
480
481 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
482 (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
483 static unsigned int num_msr_based_features;
484
485 /*
486 * All feature MSRs except uCode revID, which tracks the currently loaded uCode
487 * patch, are immutable once the vCPU model is defined.
488 */
kvm_is_immutable_feature_msr(u32 msr)489 static bool kvm_is_immutable_feature_msr(u32 msr)
490 {
491 int i;
492
493 if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR)
494 return true;
495
496 for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) {
497 if (msr == msr_based_features_all_except_vmx[i])
498 return msr != MSR_IA32_UCODE_REV;
499 }
500
501 return false;
502 }
503
kvm_is_advertised_msr(u32 msr_index)504 static bool kvm_is_advertised_msr(u32 msr_index)
505 {
506 unsigned int i;
507
508 for (i = 0; i < num_msrs_to_save; i++) {
509 if (msrs_to_save[i] == msr_index)
510 return true;
511 }
512
513 for (i = 0; i < num_emulated_msrs; i++) {
514 if (emulated_msrs[i] == msr_index)
515 return true;
516 }
517
518 return false;
519 }
520
521 typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data,
522 bool host_initiated);
523
kvm_do_msr_access(struct kvm_vcpu * vcpu,u32 msr,u64 * data,bool host_initiated,enum kvm_msr_access rw,msr_access_t msr_access_fn)524 static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr,
525 u64 *data, bool host_initiated,
526 enum kvm_msr_access rw,
527 msr_access_t msr_access_fn)
528 {
529 const char *op = rw == MSR_TYPE_W ? "wrmsr" : "rdmsr";
530 int ret;
531
532 BUILD_BUG_ON(rw != MSR_TYPE_R && rw != MSR_TYPE_W);
533
534 /*
535 * Zero the data on read failures to avoid leaking stack data to the
536 * guest and/or userspace, e.g. if the failure is ignored below.
537 */
538 ret = msr_access_fn(vcpu, msr, data, host_initiated);
539 if (ret && rw == MSR_TYPE_R)
540 *data = 0;
541
542 if (ret != KVM_MSR_RET_UNSUPPORTED)
543 return ret;
544
545 /*
546 * Userspace is allowed to read MSRs, and write '0' to MSRs, that KVM
547 * advertises to userspace, even if an MSR isn't fully supported.
548 * Simply check that @data is '0', which covers both the write '0' case
549 * and all reads (in which case @data is zeroed on failure; see above).
550 */
551 if (host_initiated && !*data && kvm_is_advertised_msr(msr))
552 return 0;
553
554 if (!ignore_msrs) {
555 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
556 op, msr, *data);
557 return ret;
558 }
559
560 if (report_ignored_msrs)
561 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", op, msr, *data);
562
563 return 0;
564 }
565
kvm_alloc_emulator_cache(void)566 static struct kmem_cache *kvm_alloc_emulator_cache(void)
567 {
568 unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src);
569 unsigned int size = sizeof(struct x86_emulate_ctxt);
570
571 return kmem_cache_create_usercopy("x86_emulator", size,
572 __alignof__(struct x86_emulate_ctxt),
573 SLAB_ACCOUNT, useroffset,
574 size - useroffset, NULL);
575 }
576
577 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
578
kvm_async_pf_hash_reset(struct kvm_vcpu * vcpu)579 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
580 {
581 int i;
582 for (i = 0; i < ASYNC_PF_PER_VCPU; i++)
583 vcpu->arch.apf.gfns[i] = ~0;
584 }
585
kvm_destroy_user_return_msrs(void)586 static void kvm_destroy_user_return_msrs(void)
587 {
588 int cpu;
589
590 for_each_possible_cpu(cpu)
591 WARN_ON_ONCE(per_cpu(user_return_msrs, cpu).registered);
592
593 kvm_nr_uret_msrs = 0;
594 }
595
kvm_on_user_return(struct user_return_notifier * urn)596 static void kvm_on_user_return(struct user_return_notifier *urn)
597 {
598 unsigned slot;
599 struct kvm_user_return_msrs *msrs
600 = container_of(urn, struct kvm_user_return_msrs, urn);
601 struct kvm_user_return_msr_values *values;
602
603 msrs->registered = false;
604 user_return_notifier_unregister(urn);
605
606 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
607 values = &msrs->values[slot];
608 if (values->host != values->curr) {
609 wrmsrq(kvm_uret_msrs_list[slot], values->host);
610 values->curr = values->host;
611 }
612 }
613 }
614
kvm_probe_user_return_msr(u32 msr)615 static int kvm_probe_user_return_msr(u32 msr)
616 {
617 u64 val;
618 int ret;
619
620 preempt_disable();
621 ret = rdmsrq_safe(msr, &val);
622 if (ret)
623 goto out;
624 ret = wrmsrq_safe(msr, val);
625 out:
626 preempt_enable();
627 return ret;
628 }
629
kvm_add_user_return_msr(u32 msr)630 int kvm_add_user_return_msr(u32 msr)
631 {
632 BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS);
633
634 if (kvm_probe_user_return_msr(msr))
635 return -1;
636
637 kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
638 return kvm_nr_uret_msrs++;
639 }
640 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_add_user_return_msr);
641
kvm_find_user_return_msr(u32 msr)642 int kvm_find_user_return_msr(u32 msr)
643 {
644 int i;
645
646 for (i = 0; i < kvm_nr_uret_msrs; ++i) {
647 if (kvm_uret_msrs_list[i] == msr)
648 return i;
649 }
650 return -1;
651 }
652 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_find_user_return_msr);
653
kvm_user_return_msr_cpu_online(void)654 static void kvm_user_return_msr_cpu_online(void)
655 {
656 struct kvm_user_return_msrs *msrs = this_cpu_ptr(&user_return_msrs);
657 u64 value;
658 int i;
659
660 for (i = 0; i < kvm_nr_uret_msrs; ++i) {
661 rdmsrq_safe(kvm_uret_msrs_list[i], &value);
662 msrs->values[i].host = value;
663 msrs->values[i].curr = value;
664 }
665 }
666
kvm_user_return_register_notifier(struct kvm_user_return_msrs * msrs)667 static void kvm_user_return_register_notifier(struct kvm_user_return_msrs *msrs)
668 {
669 if (!msrs->registered) {
670 msrs->urn.on_user_return = kvm_on_user_return;
671 user_return_notifier_register(&msrs->urn);
672 msrs->registered = true;
673 }
674 }
675
kvm_set_user_return_msr(unsigned slot,u64 value,u64 mask)676 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
677 {
678 struct kvm_user_return_msrs *msrs = this_cpu_ptr(&user_return_msrs);
679 int err;
680
681 value = (value & mask) | (msrs->values[slot].host & ~mask);
682 if (value == msrs->values[slot].curr)
683 return 0;
684 err = wrmsrq_safe(kvm_uret_msrs_list[slot], value);
685 if (err)
686 return 1;
687
688 msrs->values[slot].curr = value;
689 kvm_user_return_register_notifier(msrs);
690 return 0;
691 }
692 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_user_return_msr);
693
kvm_get_user_return_msr(unsigned int slot)694 u64 kvm_get_user_return_msr(unsigned int slot)
695 {
696 return this_cpu_ptr(&user_return_msrs)->values[slot].curr;
697 }
698 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_user_return_msr);
699
drop_user_return_notifiers(void)700 static void drop_user_return_notifiers(void)
701 {
702 struct kvm_user_return_msrs *msrs = this_cpu_ptr(&user_return_msrs);
703
704 if (msrs->registered)
705 kvm_on_user_return(&msrs->urn);
706 }
707
708 /*
709 * Handle a fault on a hardware virtualization (VMX or SVM) instruction.
710 *
711 * Hardware virtualization extension instructions may fault if a reboot turns
712 * off virtualization while processes are running. Usually after catching the
713 * fault we just panic; during reboot instead the instruction is ignored.
714 */
kvm_spurious_fault(void)715 noinstr void kvm_spurious_fault(void)
716 {
717 /* Fault while not rebooting. We want the trace. */
718 BUG_ON(!virt_rebooting);
719 }
720 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_spurious_fault);
721
722 #define EXCPT_BENIGN 0
723 #define EXCPT_CONTRIBUTORY 1
724 #define EXCPT_PF 2
725
exception_class(int vector)726 static int exception_class(int vector)
727 {
728 switch (vector) {
729 case PF_VECTOR:
730 return EXCPT_PF;
731 case DE_VECTOR:
732 case TS_VECTOR:
733 case NP_VECTOR:
734 case SS_VECTOR:
735 case GP_VECTOR:
736 return EXCPT_CONTRIBUTORY;
737 default:
738 break;
739 }
740 return EXCPT_BENIGN;
741 }
742
743 #define EXCPT_FAULT 0
744 #define EXCPT_TRAP 1
745 #define EXCPT_ABORT 2
746 #define EXCPT_INTERRUPT 3
747 #define EXCPT_DB 4
748
exception_type(int vector)749 static int exception_type(int vector)
750 {
751 unsigned int mask;
752
753 if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
754 return EXCPT_INTERRUPT;
755
756 mask = 1 << vector;
757
758 /*
759 * #DBs can be trap-like or fault-like, the caller must check other CPU
760 * state, e.g. DR6, to determine whether a #DB is a trap or fault.
761 */
762 if (mask & (1 << DB_VECTOR))
763 return EXCPT_DB;
764
765 if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR)))
766 return EXCPT_TRAP;
767
768 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
769 return EXCPT_ABORT;
770
771 /* Reserved exceptions will result in fault */
772 return EXCPT_FAULT;
773 }
774
kvm_deliver_exception_payload(struct kvm_vcpu * vcpu,struct kvm_queued_exception * ex)775 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
776 struct kvm_queued_exception *ex)
777 {
778 if (!ex->has_payload)
779 return;
780
781 switch (ex->vector) {
782 case DB_VECTOR:
783 /*
784 * "Certain debug exceptions may clear bit 0-3. The
785 * remaining contents of the DR6 register are never
786 * cleared by the processor".
787 */
788 vcpu->arch.dr6 &= ~DR_TRAP_BITS;
789 /*
790 * In order to reflect the #DB exception payload in guest
791 * dr6, three components need to be considered: active low
792 * bit, FIXED_1 bits and active high bits (e.g. DR6_BD,
793 * DR6_BS and DR6_BT)
794 * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits.
795 * In the target guest dr6:
796 * FIXED_1 bits should always be set.
797 * Active low bits should be cleared if 1-setting in payload.
798 * Active high bits should be set if 1-setting in payload.
799 *
800 * Note, the payload is compatible with the pending debug
801 * exceptions/exit qualification under VMX, that active_low bits
802 * are active high in payload.
803 * So they need to be flipped for DR6.
804 */
805 vcpu->arch.dr6 |= DR6_ACTIVE_LOW;
806 vcpu->arch.dr6 |= ex->payload;
807 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW;
808
809 /*
810 * The #DB payload is defined as compatible with the 'pending
811 * debug exceptions' field under VMX, not DR6. While bit 12 is
812 * defined in the 'pending debug exceptions' field (enabled
813 * breakpoint), it is reserved and must be zero in DR6.
814 */
815 vcpu->arch.dr6 &= ~BIT(12);
816 break;
817 case PF_VECTOR:
818 vcpu->arch.cr2 = ex->payload;
819 break;
820 }
821
822 ex->has_payload = false;
823 ex->payload = 0;
824 }
825 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_deliver_exception_payload);
826
kvm_queue_exception_vmexit(struct kvm_vcpu * vcpu,unsigned int vector,bool has_error_code,u32 error_code,bool has_payload,unsigned long payload)827 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector,
828 bool has_error_code, u32 error_code,
829 bool has_payload, unsigned long payload)
830 {
831 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
832
833 ex->vector = vector;
834 ex->injected = false;
835 ex->pending = true;
836 ex->has_error_code = has_error_code;
837 ex->error_code = error_code;
838 ex->has_payload = has_payload;
839 ex->payload = payload;
840 }
841
kvm_multiple_exception(struct kvm_vcpu * vcpu,unsigned int nr,bool has_error,u32 error_code,bool has_payload,unsigned long payload)842 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned int nr,
843 bool has_error, u32 error_code,
844 bool has_payload, unsigned long payload)
845 {
846 u32 prev_nr;
847 int class1, class2;
848
849 kvm_make_request(KVM_REQ_EVENT, vcpu);
850
851 /*
852 * If the exception is destined for L2, morph it to a VM-Exit if L1
853 * wants to intercept the exception.
854 */
855 if (is_guest_mode(vcpu) &&
856 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) {
857 kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code,
858 has_payload, payload);
859 return;
860 }
861
862 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
863 queue:
864 vcpu->arch.exception.pending = true;
865 vcpu->arch.exception.injected = false;
866
867 vcpu->arch.exception.has_error_code = has_error;
868 vcpu->arch.exception.vector = nr;
869 vcpu->arch.exception.error_code = error_code;
870 vcpu->arch.exception.has_payload = has_payload;
871 vcpu->arch.exception.payload = payload;
872 return;
873 }
874
875 /* to check exception */
876 prev_nr = vcpu->arch.exception.vector;
877 if (prev_nr == DF_VECTOR) {
878 /* triple fault -> shutdown */
879 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
880 return;
881 }
882 class1 = exception_class(prev_nr);
883 class2 = exception_class(nr);
884 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) ||
885 (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
886 /*
887 * Synthesize #DF. Clear the previously injected or pending
888 * exception so as not to incorrectly trigger shutdown.
889 */
890 vcpu->arch.exception.injected = false;
891 vcpu->arch.exception.pending = false;
892
893 kvm_queue_exception_e(vcpu, DF_VECTOR, 0);
894 } else {
895 /* replace previous exception with a new one in a hope
896 that instruction re-execution will regenerate lost
897 exception */
898 goto queue;
899 }
900 }
901
kvm_queue_exception(struct kvm_vcpu * vcpu,unsigned nr)902 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
903 {
904 kvm_multiple_exception(vcpu, nr, false, 0, false, 0);
905 }
906 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception);
907
908
kvm_queue_exception_p(struct kvm_vcpu * vcpu,unsigned nr,unsigned long payload)909 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
910 unsigned long payload)
911 {
912 kvm_multiple_exception(vcpu, nr, false, 0, true, payload);
913 }
914 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception_p);
915
kvm_queue_exception_e_p(struct kvm_vcpu * vcpu,unsigned nr,u32 error_code,unsigned long payload)916 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
917 u32 error_code, unsigned long payload)
918 {
919 kvm_multiple_exception(vcpu, nr, true, error_code, true, payload);
920 }
921
kvm_requeue_exception(struct kvm_vcpu * vcpu,unsigned int nr,bool has_error_code,u32 error_code)922 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr,
923 bool has_error_code, u32 error_code)
924 {
925
926 /*
927 * On VM-Entry, an exception can be pending if and only if event
928 * injection was blocked by nested_run_pending. In that case, however,
929 * vcpu_enter_guest() requests an immediate exit, and the guest
930 * shouldn't proceed far enough to need reinjection.
931 */
932 WARN_ON_ONCE(kvm_is_exception_pending(vcpu));
933
934 /*
935 * Do not check for interception when injecting an event for L2, as the
936 * exception was checked for intercept when it was original queued, and
937 * re-checking is incorrect if _L1_ injected the exception, in which
938 * case it's exempt from interception.
939 */
940 kvm_make_request(KVM_REQ_EVENT, vcpu);
941
942 vcpu->arch.exception.injected = true;
943 vcpu->arch.exception.has_error_code = has_error_code;
944 vcpu->arch.exception.vector = nr;
945 vcpu->arch.exception.error_code = error_code;
946 vcpu->arch.exception.has_payload = false;
947 vcpu->arch.exception.payload = 0;
948 }
949 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_requeue_exception);
950
kvm_complete_insn_gp(struct kvm_vcpu * vcpu,int err)951 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
952 {
953 if (err)
954 kvm_inject_gp(vcpu, 0);
955 else
956 return kvm_skip_emulated_instruction(vcpu);
957
958 return 1;
959 }
960 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_complete_insn_gp);
961
complete_emulated_insn_gp(struct kvm_vcpu * vcpu,int err)962 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err)
963 {
964 if (err) {
965 kvm_inject_gp(vcpu, 0);
966 return 1;
967 }
968
969 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
970 EMULTYPE_COMPLETE_USER_EXIT);
971 }
972
kvm_inject_page_fault(struct kvm_vcpu * vcpu,struct x86_exception * fault)973 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
974 {
975 ++vcpu->stat.pf_guest;
976
977 /*
978 * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of
979 * whether or not L1 wants to intercept "regular" #PF.
980 */
981 if (is_guest_mode(vcpu) && fault->async_page_fault)
982 kvm_queue_exception_vmexit(vcpu, PF_VECTOR,
983 true, fault->error_code,
984 true, fault->address);
985 else
986 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
987 fault->address);
988 }
989
kvm_inject_emulated_page_fault(struct kvm_vcpu * vcpu,struct x86_exception * fault)990 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
991 struct x86_exception *fault)
992 {
993 struct kvm_mmu *fault_mmu;
994 WARN_ON_ONCE(fault->vector != PF_VECTOR);
995
996 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
997 vcpu->arch.walk_mmu;
998
999 /*
1000 * Invalidate the TLB entry for the faulting address, if it exists,
1001 * else the access will fault indefinitely (and to emulate hardware).
1002 */
1003 if ((fault->error_code & PFERR_PRESENT_MASK) &&
1004 !(fault->error_code & PFERR_RSVD_MASK))
1005 kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address,
1006 KVM_MMU_ROOT_CURRENT);
1007
1008 fault_mmu->inject_page_fault(vcpu, fault);
1009 }
1010 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_inject_emulated_page_fault);
1011
kvm_inject_nmi(struct kvm_vcpu * vcpu)1012 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
1013 {
1014 atomic_inc(&vcpu->arch.nmi_queued);
1015 kvm_make_request(KVM_REQ_NMI, vcpu);
1016 }
1017
kvm_queue_exception_e(struct kvm_vcpu * vcpu,unsigned nr,u32 error_code)1018 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
1019 {
1020 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0);
1021 }
1022 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception_e);
1023
1024 /*
1025 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
1026 * a #GP and return false.
1027 */
kvm_require_cpl(struct kvm_vcpu * vcpu,int required_cpl)1028 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
1029 {
1030 if (kvm_x86_call(get_cpl)(vcpu) <= required_cpl)
1031 return true;
1032 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
1033 return false;
1034 }
1035
kvm_require_dr(struct kvm_vcpu * vcpu,int dr)1036 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
1037 {
1038 if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE))
1039 return true;
1040
1041 kvm_queue_exception(vcpu, UD_VECTOR);
1042 return false;
1043 }
1044 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_require_dr);
1045
kvm_pv_async_pf_enabled(struct kvm_vcpu * vcpu)1046 static bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
1047 {
1048 u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
1049
1050 return (vcpu->arch.apf.msr_en_val & mask) == mask;
1051 }
1052
pdptr_rsvd_bits(struct kvm_vcpu * vcpu)1053 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
1054 {
1055 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
1056 }
1057
1058 /*
1059 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
1060 */
load_pdptrs(struct kvm_vcpu * vcpu,unsigned long cr3)1061 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
1062 {
1063 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
1064 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
1065 gpa_t real_gpa;
1066 int i;
1067 int ret;
1068 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
1069
1070 /*
1071 * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated
1072 * to an L1 GPA.
1073 */
1074 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn),
1075 PFERR_USER_MASK | PFERR_WRITE_MASK, NULL);
1076 if (real_gpa == INVALID_GPA)
1077 return 0;
1078
1079 /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */
1080 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte,
1081 cr3 & GENMASK(11, 5), sizeof(pdpte));
1082 if (ret < 0)
1083 return 0;
1084
1085 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
1086 if ((pdpte[i] & PT_PRESENT_MASK) &&
1087 (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
1088 return 0;
1089 }
1090 }
1091
1092 /*
1093 * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled.
1094 * Shadow page roots need to be reconstructed instead.
1095 */
1096 if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)))
1097 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT);
1098
1099 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
1100 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
1101 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
1102 vcpu->arch.pdptrs_from_userspace = false;
1103
1104 return 1;
1105 }
1106 EXPORT_SYMBOL_FOR_KVM_INTERNAL(load_pdptrs);
1107
kvm_is_valid_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)1108 static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1109 {
1110 #ifdef CONFIG_X86_64
1111 if (cr0 & 0xffffffff00000000UL)
1112 return false;
1113 #endif
1114
1115 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
1116 return false;
1117
1118 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
1119 return false;
1120
1121 return kvm_x86_call(is_valid_cr0)(vcpu, cr0);
1122 }
1123
kvm_post_set_cr0(struct kvm_vcpu * vcpu,unsigned long old_cr0,unsigned long cr0)1124 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
1125 {
1126 /*
1127 * CR0.WP is incorporated into the MMU role, but only for non-nested,
1128 * indirect shadow MMUs. If paging is disabled, no updates are needed
1129 * as there are no permission bits to emulate. If TDP is enabled, the
1130 * MMU's metadata needs to be updated, e.g. so that emulating guest
1131 * translations does the right thing, but there's no need to unload the
1132 * root as CR0.WP doesn't affect SPTEs.
1133 */
1134 if ((cr0 ^ old_cr0) == X86_CR0_WP) {
1135 if (!(cr0 & X86_CR0_PG))
1136 return;
1137
1138 if (tdp_enabled) {
1139 kvm_init_mmu(vcpu);
1140 return;
1141 }
1142 }
1143
1144 if ((cr0 ^ old_cr0) & X86_CR0_PG) {
1145 /*
1146 * Clearing CR0.PG is defined to flush the TLB from the guest's
1147 * perspective.
1148 */
1149 if (!(cr0 & X86_CR0_PG))
1150 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1151 /*
1152 * Check for async #PF completion events when enabling paging,
1153 * as the vCPU may have previously encountered async #PFs (it's
1154 * entirely legal for the guest to toggle paging on/off without
1155 * waiting for the async #PF queue to drain).
1156 */
1157 else if (kvm_pv_async_pf_enabled(vcpu))
1158 kvm_make_request(KVM_REQ_APF_READY, vcpu);
1159 }
1160
1161 if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
1162 kvm_mmu_reset_context(vcpu);
1163 }
1164 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_post_set_cr0);
1165
kvm_set_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)1166 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1167 {
1168 unsigned long old_cr0 = kvm_read_cr0(vcpu);
1169
1170 if (!kvm_is_valid_cr0(vcpu, cr0))
1171 return 1;
1172
1173 cr0 |= X86_CR0_ET;
1174
1175 /* Write to CR0 reserved bits are ignored, even on Intel. */
1176 cr0 &= ~CR0_RESERVED_BITS;
1177
1178 #ifdef CONFIG_X86_64
1179 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
1180 (cr0 & X86_CR0_PG)) {
1181 int cs_db, cs_l;
1182
1183 if (!is_pae(vcpu))
1184 return 1;
1185 kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
1186 if (cs_l)
1187 return 1;
1188 }
1189 #endif
1190 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
1191 is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) &&
1192 !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
1193 return 1;
1194
1195 if (!(cr0 & X86_CR0_PG) &&
1196 (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
1197 return 1;
1198
1199 if (!(cr0 & X86_CR0_WP) && kvm_is_cr4_bit_set(vcpu, X86_CR4_CET))
1200 return 1;
1201
1202 kvm_x86_call(set_cr0)(vcpu, cr0);
1203
1204 kvm_post_set_cr0(vcpu, old_cr0, cr0);
1205
1206 return 0;
1207 }
1208 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr0);
1209
kvm_lmsw(struct kvm_vcpu * vcpu,unsigned long msw)1210 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
1211 {
1212 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
1213 }
1214 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lmsw);
1215
kvm_load_xfeatures(struct kvm_vcpu * vcpu,bool load_guest)1216 static void kvm_load_xfeatures(struct kvm_vcpu *vcpu, bool load_guest)
1217 {
1218 if (vcpu->arch.guest_state_protected)
1219 return;
1220
1221 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE))
1222 return;
1223
1224 if (vcpu->arch.xcr0 != kvm_host.xcr0)
1225 xsetbv(XCR_XFEATURE_ENABLED_MASK,
1226 load_guest ? vcpu->arch.xcr0 : kvm_host.xcr0);
1227
1228 if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
1229 vcpu->arch.ia32_xss != kvm_host.xss)
1230 wrmsrq(MSR_IA32_XSS, load_guest ? vcpu->arch.ia32_xss : kvm_host.xss);
1231 }
1232
kvm_load_guest_pkru(struct kvm_vcpu * vcpu)1233 static void kvm_load_guest_pkru(struct kvm_vcpu *vcpu)
1234 {
1235 if (vcpu->arch.guest_state_protected)
1236 return;
1237
1238 if (cpu_feature_enabled(X86_FEATURE_PKU) &&
1239 vcpu->arch.pkru != vcpu->arch.host_pkru &&
1240 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1241 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
1242 wrpkru(vcpu->arch.pkru);
1243 }
1244
kvm_load_host_pkru(struct kvm_vcpu * vcpu)1245 static void kvm_load_host_pkru(struct kvm_vcpu *vcpu)
1246 {
1247 if (vcpu->arch.guest_state_protected)
1248 return;
1249
1250 if (cpu_feature_enabled(X86_FEATURE_PKU) &&
1251 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1252 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
1253 vcpu->arch.pkru = rdpkru();
1254 if (vcpu->arch.pkru != vcpu->arch.host_pkru)
1255 wrpkru(vcpu->arch.host_pkru);
1256 }
1257 }
1258
1259 #ifdef CONFIG_X86_64
kvm_guest_supported_xfd(struct kvm_vcpu * vcpu)1260 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
1261 {
1262 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
1263 }
1264 #endif
1265
__kvm_set_xcr(struct kvm_vcpu * vcpu,u32 index,u64 xcr)1266 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
1267 {
1268 u64 xcr0 = xcr;
1269 u64 old_xcr0 = vcpu->arch.xcr0;
1270 u64 valid_bits;
1271
1272 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
1273 if (index != XCR_XFEATURE_ENABLED_MASK)
1274 return 1;
1275 if (!(xcr0 & XFEATURE_MASK_FP))
1276 return 1;
1277 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
1278 return 1;
1279
1280 /*
1281 * Do not allow the guest to set bits that we do not support
1282 * saving. However, xcr0 bit 0 is always set, even if the
1283 * emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
1284 */
1285 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
1286 if (xcr0 & ~valid_bits)
1287 return 1;
1288
1289 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
1290 (!(xcr0 & XFEATURE_MASK_BNDCSR)))
1291 return 1;
1292
1293 if (xcr0 & XFEATURE_MASK_AVX512) {
1294 if (!(xcr0 & XFEATURE_MASK_YMM))
1295 return 1;
1296 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
1297 return 1;
1298 }
1299
1300 if ((xcr0 & XFEATURE_MASK_XTILE) &&
1301 ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE))
1302 return 1;
1303
1304 vcpu->arch.xcr0 = xcr0;
1305
1306 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
1307 vcpu->arch.cpuid_dynamic_bits_dirty = true;
1308 return 0;
1309 }
1310 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_set_xcr);
1311
kvm_emulate_xsetbv(struct kvm_vcpu * vcpu)1312 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
1313 {
1314 /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
1315 if (kvm_x86_call(get_cpl)(vcpu) != 0 ||
1316 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
1317 kvm_inject_gp(vcpu, 0);
1318 return 1;
1319 }
1320
1321 return kvm_skip_emulated_instruction(vcpu);
1322 }
1323 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_xsetbv);
1324
kvm_is_valid_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)1325 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1326 {
1327 return __kvm_is_valid_cr4(vcpu, cr4) &&
1328 kvm_x86_call(is_valid_cr4)(vcpu, cr4);
1329 }
1330
kvm_post_set_cr4(struct kvm_vcpu * vcpu,unsigned long old_cr4,unsigned long cr4)1331 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
1332 {
1333 if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS)
1334 kvm_mmu_reset_context(vcpu);
1335
1336 /*
1337 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB
1338 * according to the SDM; however, stale prev_roots could be reused
1339 * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we
1340 * free them all. This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST
1341 * or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed,
1342 * so fall through.
1343 */
1344 if (!tdp_enabled &&
1345 (cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE))
1346 kvm_mmu_unload(vcpu);
1347
1348 /*
1349 * The TLB has to be flushed for all PCIDs if any of the following
1350 * (architecturally required) changes happen:
1351 * - CR4.PCIDE is changed from 1 to 0
1352 * - CR4.PGE is toggled
1353 *
1354 * This is a superset of KVM_REQ_TLB_FLUSH_CURRENT.
1355 */
1356 if (((cr4 ^ old_cr4) & X86_CR4_PGE) ||
1357 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
1358 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1359
1360 /*
1361 * The TLB has to be flushed for the current PCID if any of the
1362 * following (architecturally required) changes happen:
1363 * - CR4.SMEP is changed from 0 to 1
1364 * - CR4.PAE is toggled
1365 */
1366 else if (((cr4 ^ old_cr4) & X86_CR4_PAE) ||
1367 ((cr4 & X86_CR4_SMEP) && !(old_cr4 & X86_CR4_SMEP)))
1368 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1369
1370 }
1371 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_post_set_cr4);
1372
kvm_set_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)1373 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1374 {
1375 unsigned long old_cr4 = kvm_read_cr4(vcpu);
1376
1377 if (!kvm_is_valid_cr4(vcpu, cr4))
1378 return 1;
1379
1380 if (is_long_mode(vcpu)) {
1381 if (!(cr4 & X86_CR4_PAE))
1382 return 1;
1383 if ((cr4 ^ old_cr4) & X86_CR4_LA57)
1384 return 1;
1385 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
1386 && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS)
1387 && !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
1388 return 1;
1389
1390 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
1391 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
1392 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
1393 return 1;
1394 }
1395
1396 if ((cr4 & X86_CR4_CET) && !kvm_is_cr0_bit_set(vcpu, X86_CR0_WP))
1397 return 1;
1398
1399 kvm_x86_call(set_cr4)(vcpu, cr4);
1400
1401 kvm_post_set_cr4(vcpu, old_cr4, cr4);
1402
1403 return 0;
1404 }
1405 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr4);
1406
kvm_invalidate_pcid(struct kvm_vcpu * vcpu,unsigned long pcid)1407 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
1408 {
1409 struct kvm_mmu *mmu = vcpu->arch.mmu;
1410 unsigned long roots_to_free = 0;
1411 int i;
1412
1413 /*
1414 * MOV CR3 and INVPCID are usually not intercepted when using TDP, but
1415 * this is reachable when running EPT=1 and unrestricted_guest=0, and
1416 * also via the emulator. KVM's TDP page tables are not in the scope of
1417 * the invalidation, but the guest's TLB entries need to be flushed as
1418 * the CPU may have cached entries in its TLB for the target PCID.
1419 */
1420 if (unlikely(tdp_enabled)) {
1421 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1422 return;
1423 }
1424
1425 /*
1426 * If neither the current CR3 nor any of the prev_roots use the given
1427 * PCID, then nothing needs to be done here because a resync will
1428 * happen anyway before switching to any other CR3.
1429 */
1430 if (kvm_get_active_pcid(vcpu) == pcid) {
1431 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1432 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1433 }
1434
1435 /*
1436 * If PCID is disabled, there is no need to free prev_roots even if the
1437 * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
1438 * with PCIDE=0.
1439 */
1440 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))
1441 return;
1442
1443 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
1444 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid)
1445 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
1446
1447 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free);
1448 }
1449
kvm_set_cr3(struct kvm_vcpu * vcpu,unsigned long cr3)1450 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1451 {
1452 bool skip_tlb_flush = false;
1453 unsigned long pcid = 0;
1454 #ifdef CONFIG_X86_64
1455 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) {
1456 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
1457 cr3 &= ~X86_CR3_PCID_NOFLUSH;
1458 pcid = cr3 & X86_CR3_PCID_MASK;
1459 }
1460 #endif
1461
1462 /* PDPTRs are always reloaded for PAE paging. */
1463 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu))
1464 goto handle_tlb_flush;
1465
1466 /*
1467 * Do not condition the GPA check on long mode, this helper is used to
1468 * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
1469 * the current vCPU mode is accurate.
1470 */
1471 if (!kvm_vcpu_is_legal_cr3(vcpu, cr3))
1472 return 1;
1473
1474 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3))
1475 return 1;
1476
1477 if (cr3 != kvm_read_cr3(vcpu))
1478 kvm_mmu_new_pgd(vcpu, cr3);
1479
1480 vcpu->arch.cr3 = cr3;
1481 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
1482 /* Do not call post_set_cr3, we do not get here for confidential guests. */
1483
1484 handle_tlb_flush:
1485 /*
1486 * A load of CR3 that flushes the TLB flushes only the current PCID,
1487 * even if PCID is disabled, in which case PCID=0 is flushed. It's a
1488 * moot point in the end because _disabling_ PCID will flush all PCIDs,
1489 * and it's impossible to use a non-zero PCID when PCID is disabled,
1490 * i.e. only PCID=0 can be relevant.
1491 */
1492 if (!skip_tlb_flush)
1493 kvm_invalidate_pcid(vcpu, pcid);
1494
1495 return 0;
1496 }
1497 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr3);
1498
kvm_set_cr8(struct kvm_vcpu * vcpu,unsigned long cr8)1499 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
1500 {
1501 if (cr8 & CR8_RESERVED_BITS)
1502 return 1;
1503 if (lapic_in_kernel(vcpu))
1504 kvm_lapic_set_tpr(vcpu, cr8);
1505 else
1506 vcpu->arch.cr8 = cr8;
1507 return 0;
1508 }
1509 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr8);
1510
kvm_get_cr8(struct kvm_vcpu * vcpu)1511 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
1512 {
1513 if (lapic_in_kernel(vcpu))
1514 return kvm_lapic_get_cr8(vcpu);
1515 else
1516 return vcpu->arch.cr8;
1517 }
1518 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_cr8);
1519
kvm_update_dr0123(struct kvm_vcpu * vcpu)1520 static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
1521 {
1522 int i;
1523
1524 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1525 for (i = 0; i < KVM_NR_DB_REGS; i++)
1526 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
1527 }
1528 }
1529
kvm_update_dr7(struct kvm_vcpu * vcpu)1530 void kvm_update_dr7(struct kvm_vcpu *vcpu)
1531 {
1532 unsigned long dr7;
1533
1534 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1535 dr7 = vcpu->arch.guest_debug_dr7;
1536 else
1537 dr7 = vcpu->arch.dr7;
1538 kvm_x86_call(set_dr7)(vcpu, dr7);
1539 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
1540 if (dr7 & DR7_BP_EN_MASK)
1541 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
1542 }
1543 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_update_dr7);
1544
kvm_dr6_fixed(struct kvm_vcpu * vcpu)1545 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
1546 {
1547 u64 fixed = DR6_FIXED_1;
1548
1549 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_RTM))
1550 fixed |= DR6_RTM;
1551
1552 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
1553 fixed |= DR6_BUS_LOCK;
1554 return fixed;
1555 }
1556
kvm_set_dr(struct kvm_vcpu * vcpu,int dr,unsigned long val)1557 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
1558 {
1559 size_t size = ARRAY_SIZE(vcpu->arch.db);
1560
1561 switch (dr) {
1562 case 0 ... 3:
1563 vcpu->arch.db[array_index_nospec(dr, size)] = val;
1564 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1565 vcpu->arch.eff_db[dr] = val;
1566 break;
1567 case 4:
1568 case 6:
1569 if (!kvm_dr6_valid(val))
1570 return 1; /* #GP */
1571 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
1572 break;
1573 case 5:
1574 default: /* 7 */
1575 if (!kvm_dr7_valid(val))
1576 return 1; /* #GP */
1577 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
1578 kvm_update_dr7(vcpu);
1579 break;
1580 }
1581
1582 return 0;
1583 }
1584 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_dr);
1585
kvm_get_dr(struct kvm_vcpu * vcpu,int dr)1586 unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr)
1587 {
1588 size_t size = ARRAY_SIZE(vcpu->arch.db);
1589
1590 switch (dr) {
1591 case 0 ... 3:
1592 return vcpu->arch.db[array_index_nospec(dr, size)];
1593 case 4:
1594 case 6:
1595 return vcpu->arch.dr6;
1596 case 5:
1597 default: /* 7 */
1598 return vcpu->arch.dr7;
1599 }
1600 }
1601 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_dr);
1602
kvm_emulate_rdpmc(struct kvm_vcpu * vcpu)1603 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
1604 {
1605 u32 pmc = kvm_rcx_read(vcpu);
1606 u64 data;
1607
1608 if (kvm_pmu_rdpmc(vcpu, pmc, &data)) {
1609 kvm_inject_gp(vcpu, 0);
1610 return 1;
1611 }
1612
1613 kvm_rax_write(vcpu, (u32)data);
1614 kvm_rdx_write(vcpu, data >> 32);
1615 return kvm_skip_emulated_instruction(vcpu);
1616 }
1617 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdpmc);
1618
1619 /*
1620 * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
1621 * does not yet virtualize. These include:
1622 * 10 - MISC_PACKAGE_CTRLS
1623 * 11 - ENERGY_FILTERING_CTL
1624 * 12 - DOITM
1625 * 18 - FB_CLEAR_CTRL
1626 * 21 - XAPIC_DISABLE_STATUS
1627 * 23 - OVERCLOCKING_STATUS
1628 */
1629
1630 #define KVM_SUPPORTED_ARCH_CAP \
1631 (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \
1632 ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
1633 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
1634 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
1635 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
1636 ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO)
1637
kvm_get_arch_capabilities(void)1638 static u64 kvm_get_arch_capabilities(void)
1639 {
1640 u64 data = kvm_host.arch_capabilities & KVM_SUPPORTED_ARCH_CAP;
1641
1642 /*
1643 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
1644 * the nested hypervisor runs with NX huge pages. If it is not,
1645 * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other
1646 * L1 guests, so it need not worry about its own (L2) guests.
1647 */
1648 data |= ARCH_CAP_PSCHANGE_MC_NO;
1649
1650 /*
1651 * If we're doing cache flushes (either "always" or "cond")
1652 * we will do one whenever the guest does a vmlaunch/vmresume.
1653 * If an outer hypervisor is doing the cache flush for us
1654 * (ARCH_CAP_SKIP_VMENTRY_L1DFLUSH), we can safely pass that
1655 * capability to the guest too, and if EPT is disabled we're not
1656 * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
1657 * require a nested hypervisor to do a flush of its own.
1658 */
1659 if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
1660 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
1661
1662 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1663 data |= ARCH_CAP_RDCL_NO;
1664 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1665 data |= ARCH_CAP_SSB_NO;
1666 if (!boot_cpu_has_bug(X86_BUG_MDS))
1667 data |= ARCH_CAP_MDS_NO;
1668 if (!boot_cpu_has_bug(X86_BUG_RFDS))
1669 data |= ARCH_CAP_RFDS_NO;
1670 if (!boot_cpu_has_bug(X86_BUG_ITS))
1671 data |= ARCH_CAP_ITS_NO;
1672
1673 if (!boot_cpu_has(X86_FEATURE_RTM)) {
1674 /*
1675 * If RTM=0 because the kernel has disabled TSX, the host might
1676 * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0
1677 * and therefore knows that there cannot be TAA) but keep
1678 * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
1679 * and we want to allow migrating those guests to tsx=off hosts.
1680 */
1681 data &= ~ARCH_CAP_TAA_NO;
1682 } else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
1683 data |= ARCH_CAP_TAA_NO;
1684 } else {
1685 /*
1686 * Nothing to do here; we emulate TSX_CTRL if present on the
1687 * host so the guest can choose between disabling TSX or
1688 * using VERW to clear CPU buffers.
1689 */
1690 }
1691
1692 if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated())
1693 data |= ARCH_CAP_GDS_NO;
1694
1695 return data;
1696 }
1697
kvm_get_feature_msr(struct kvm_vcpu * vcpu,u32 index,u64 * data,bool host_initiated)1698 static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1699 bool host_initiated)
1700 {
1701 WARN_ON_ONCE(!host_initiated);
1702
1703 switch (index) {
1704 case MSR_IA32_ARCH_CAPABILITIES:
1705 *data = kvm_get_arch_capabilities();
1706 break;
1707 case MSR_IA32_PERF_CAPABILITIES:
1708 *data = kvm_caps.supported_perf_cap;
1709 break;
1710 case MSR_PLATFORM_INFO:
1711 *data = MSR_PLATFORM_INFO_CPUID_FAULT;
1712 break;
1713 case MSR_IA32_UCODE_REV:
1714 rdmsrq_safe(index, data);
1715 break;
1716 default:
1717 return kvm_x86_call(get_feature_msr)(index, data);
1718 }
1719 return 0;
1720 }
1721
do_get_feature_msr(struct kvm_vcpu * vcpu,unsigned index,u64 * data)1722 static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1723 {
1724 return kvm_do_msr_access(vcpu, index, data, true, MSR_TYPE_R,
1725 kvm_get_feature_msr);
1726 }
1727
__kvm_valid_efer(struct kvm_vcpu * vcpu,u64 efer)1728 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1729 {
1730 if (efer & EFER_AUTOIBRS && !guest_cpu_cap_has(vcpu, X86_FEATURE_AUTOIBRS))
1731 return false;
1732
1733 if (efer & EFER_FFXSR && !guest_cpu_cap_has(vcpu, X86_FEATURE_FXSR_OPT))
1734 return false;
1735
1736 if (efer & EFER_SVME && !guest_cpu_cap_has(vcpu, X86_FEATURE_SVM))
1737 return false;
1738
1739 if (efer & (EFER_LME | EFER_LMA) &&
1740 !guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
1741 return false;
1742
1743 if (efer & EFER_NX && !guest_cpu_cap_has(vcpu, X86_FEATURE_NX))
1744 return false;
1745
1746 return true;
1747
1748 }
kvm_valid_efer(struct kvm_vcpu * vcpu,u64 efer)1749 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1750 {
1751 if (efer & efer_reserved_bits)
1752 return false;
1753
1754 return __kvm_valid_efer(vcpu, efer);
1755 }
1756 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_valid_efer);
1757
set_efer(struct kvm_vcpu * vcpu,struct msr_data * msr_info)1758 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1759 {
1760 u64 old_efer = vcpu->arch.efer;
1761 u64 efer = msr_info->data;
1762 int r;
1763
1764 if (efer & efer_reserved_bits)
1765 return 1;
1766
1767 if (!msr_info->host_initiated) {
1768 if (!__kvm_valid_efer(vcpu, efer))
1769 return 1;
1770
1771 if (is_paging(vcpu) &&
1772 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1773 return 1;
1774 }
1775
1776 efer &= ~EFER_LMA;
1777 efer |= vcpu->arch.efer & EFER_LMA;
1778
1779 r = kvm_x86_call(set_efer)(vcpu, efer);
1780 if (r) {
1781 WARN_ON(r > 0);
1782 return r;
1783 }
1784
1785 if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS)
1786 kvm_mmu_reset_context(vcpu);
1787
1788 if (!static_cpu_has(X86_FEATURE_XSAVES) &&
1789 (efer & EFER_SVME))
1790 kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
1791
1792 return 0;
1793 }
1794
kvm_enable_efer_bits(u64 mask)1795 void kvm_enable_efer_bits(u64 mask)
1796 {
1797 efer_reserved_bits &= ~mask;
1798 }
1799 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_enable_efer_bits);
1800
kvm_msr_allowed(struct kvm_vcpu * vcpu,u32 index,u32 type)1801 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
1802 {
1803 struct kvm_x86_msr_filter *msr_filter;
1804 struct msr_bitmap_range *ranges;
1805 struct kvm *kvm = vcpu->kvm;
1806 bool allowed;
1807 int idx;
1808 u32 i;
1809
1810 /* x2APIC MSRs do not support filtering. */
1811 if (index >= 0x800 && index <= 0x8ff)
1812 return true;
1813
1814 idx = srcu_read_lock(&kvm->srcu);
1815
1816 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
1817 if (!msr_filter) {
1818 allowed = true;
1819 goto out;
1820 }
1821
1822 allowed = msr_filter->default_allow;
1823 ranges = msr_filter->ranges;
1824
1825 for (i = 0; i < msr_filter->count; i++) {
1826 u32 start = ranges[i].base;
1827 u32 end = start + ranges[i].nmsrs;
1828 u32 flags = ranges[i].flags;
1829 unsigned long *bitmap = ranges[i].bitmap;
1830
1831 if ((index >= start) && (index < end) && (flags & type)) {
1832 allowed = test_bit(index - start, bitmap);
1833 break;
1834 }
1835 }
1836
1837 out:
1838 srcu_read_unlock(&kvm->srcu, idx);
1839
1840 return allowed;
1841 }
1842 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_msr_allowed);
1843
1844 /*
1845 * Write @data into the MSR specified by @index. Select MSR specific fault
1846 * checks are bypassed if @host_initiated is %true.
1847 * Returns 0 on success, non-0 otherwise.
1848 * Assumes vcpu_load() was already called.
1849 */
__kvm_set_msr(struct kvm_vcpu * vcpu,u32 index,u64 data,bool host_initiated)1850 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
1851 bool host_initiated)
1852 {
1853 struct msr_data msr;
1854
1855 switch (index) {
1856 case MSR_FS_BASE:
1857 case MSR_GS_BASE:
1858 case MSR_KERNEL_GS_BASE:
1859 case MSR_CSTAR:
1860 case MSR_LSTAR:
1861 if (is_noncanonical_msr_address(data, vcpu))
1862 return 1;
1863 break;
1864 case MSR_IA32_SYSENTER_EIP:
1865 case MSR_IA32_SYSENTER_ESP:
1866 /*
1867 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1868 * non-canonical address is written on Intel but not on
1869 * AMD (which ignores the top 32-bits, because it does
1870 * not implement 64-bit SYSENTER).
1871 *
1872 * 64-bit code should hence be able to write a non-canonical
1873 * value on AMD. Making the address canonical ensures that
1874 * vmentry does not fail on Intel after writing a non-canonical
1875 * value, and that something deterministic happens if the guest
1876 * invokes 64-bit SYSENTER.
1877 */
1878 data = __canonical_address(data, max_host_virt_addr_bits());
1879 break;
1880 case MSR_TSC_AUX:
1881 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1882 return 1;
1883
1884 if (!host_initiated &&
1885 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
1886 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID))
1887 return 1;
1888
1889 /*
1890 * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
1891 * incomplete and conflicting architectural behavior. Current
1892 * AMD CPUs completely ignore bits 63:32, i.e. they aren't
1893 * reserved and always read as zeros. Enforce Intel's reserved
1894 * bits check if the guest CPU is Intel compatible, otherwise
1895 * clear the bits. This ensures cross-vendor migration will
1896 * provide consistent behavior for the guest.
1897 */
1898 if (guest_cpuid_is_intel_compatible(vcpu) && (data >> 32) != 0)
1899 return 1;
1900
1901 data = (u32)data;
1902 break;
1903 case MSR_IA32_U_CET:
1904 case MSR_IA32_S_CET:
1905 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
1906 !guest_cpu_cap_has(vcpu, X86_FEATURE_IBT))
1907 return KVM_MSR_RET_UNSUPPORTED;
1908 if (!kvm_is_valid_u_s_cet(vcpu, data))
1909 return 1;
1910 break;
1911 case MSR_KVM_INTERNAL_GUEST_SSP:
1912 if (!host_initiated)
1913 return 1;
1914 fallthrough;
1915 /*
1916 * Note that the MSR emulation here is flawed when a vCPU
1917 * doesn't support the Intel 64 architecture. The expected
1918 * architectural behavior in this case is that the upper 32
1919 * bits do not exist and should always read '0'. However,
1920 * because the actual hardware on which the virtual CPU is
1921 * running does support Intel 64, XRSTORS/XSAVES in the
1922 * guest could observe behavior that violates the
1923 * architecture. Intercepting XRSTORS/XSAVES for this
1924 * special case isn't deemed worthwhile.
1925 */
1926 case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
1927 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
1928 return KVM_MSR_RET_UNSUPPORTED;
1929 /*
1930 * MSR_IA32_INT_SSP_TAB is not present on processors that do
1931 * not support Intel 64 architecture.
1932 */
1933 if (index == MSR_IA32_INT_SSP_TAB && !guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
1934 return KVM_MSR_RET_UNSUPPORTED;
1935 if (is_noncanonical_msr_address(data, vcpu))
1936 return 1;
1937 /* All SSP MSRs except MSR_IA32_INT_SSP_TAB must be 4-byte aligned */
1938 if (index != MSR_IA32_INT_SSP_TAB && !IS_ALIGNED(data, 4))
1939 return 1;
1940 break;
1941 }
1942
1943 msr.data = data;
1944 msr.index = index;
1945 msr.host_initiated = host_initiated;
1946
1947 return kvm_x86_call(set_msr)(vcpu, &msr);
1948 }
1949
_kvm_set_msr(struct kvm_vcpu * vcpu,u32 index,u64 * data,bool host_initiated)1950 static int _kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1951 bool host_initiated)
1952 {
1953 return __kvm_set_msr(vcpu, index, *data, host_initiated);
1954 }
1955
kvm_set_msr_ignored_check(struct kvm_vcpu * vcpu,u32 index,u64 data,bool host_initiated)1956 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
1957 u32 index, u64 data, bool host_initiated)
1958 {
1959 return kvm_do_msr_access(vcpu, index, &data, host_initiated, MSR_TYPE_W,
1960 _kvm_set_msr);
1961 }
1962
1963 /*
1964 * Read the MSR specified by @index into @data. Select MSR specific fault
1965 * checks are bypassed if @host_initiated is %true.
1966 * Returns 0 on success, non-0 otherwise.
1967 * Assumes vcpu_load() was already called.
1968 */
__kvm_get_msr(struct kvm_vcpu * vcpu,u32 index,u64 * data,bool host_initiated)1969 static int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1970 bool host_initiated)
1971 {
1972 struct msr_data msr;
1973 int ret;
1974
1975 switch (index) {
1976 case MSR_TSC_AUX:
1977 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1978 return 1;
1979
1980 if (!host_initiated &&
1981 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
1982 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID))
1983 return 1;
1984 break;
1985 case MSR_IA32_U_CET:
1986 case MSR_IA32_S_CET:
1987 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
1988 !guest_cpu_cap_has(vcpu, X86_FEATURE_IBT))
1989 return KVM_MSR_RET_UNSUPPORTED;
1990 break;
1991 case MSR_KVM_INTERNAL_GUEST_SSP:
1992 if (!host_initiated)
1993 return 1;
1994 fallthrough;
1995 case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
1996 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
1997 return KVM_MSR_RET_UNSUPPORTED;
1998 break;
1999 }
2000
2001 msr.index = index;
2002 msr.host_initiated = host_initiated;
2003
2004 ret = kvm_x86_call(get_msr)(vcpu, &msr);
2005 if (!ret)
2006 *data = msr.data;
2007 return ret;
2008 }
2009
kvm_msr_write(struct kvm_vcpu * vcpu,u32 index,u64 data)2010 int kvm_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
2011 {
2012 return __kvm_set_msr(vcpu, index, data, true);
2013 }
2014
kvm_msr_read(struct kvm_vcpu * vcpu,u32 index,u64 * data)2015 int kvm_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
2016 {
2017 return __kvm_get_msr(vcpu, index, data, true);
2018 }
2019
kvm_get_msr_ignored_check(struct kvm_vcpu * vcpu,u32 index,u64 * data,bool host_initiated)2020 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
2021 u32 index, u64 *data, bool host_initiated)
2022 {
2023 return kvm_do_msr_access(vcpu, index, data, host_initiated, MSR_TYPE_R,
2024 __kvm_get_msr);
2025 }
2026
__kvm_emulate_msr_read(struct kvm_vcpu * vcpu,u32 index,u64 * data)2027 int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
2028 {
2029 return kvm_get_msr_ignored_check(vcpu, index, data, false);
2030 }
2031 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_emulate_msr_read);
2032
__kvm_emulate_msr_write(struct kvm_vcpu * vcpu,u32 index,u64 data)2033 int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
2034 {
2035 return kvm_set_msr_ignored_check(vcpu, index, data, false);
2036 }
2037 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_emulate_msr_write);
2038
kvm_emulate_msr_read(struct kvm_vcpu * vcpu,u32 index,u64 * data)2039 int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
2040 {
2041 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
2042 return KVM_MSR_RET_FILTERED;
2043
2044 return __kvm_emulate_msr_read(vcpu, index, data);
2045 }
2046 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_msr_read);
2047
kvm_emulate_msr_write(struct kvm_vcpu * vcpu,u32 index,u64 data)2048 int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
2049 {
2050 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
2051 return KVM_MSR_RET_FILTERED;
2052
2053 return __kvm_emulate_msr_write(vcpu, index, data);
2054 }
2055 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_msr_write);
2056
2057
complete_userspace_rdmsr(struct kvm_vcpu * vcpu)2058 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
2059 {
2060 if (!vcpu->run->msr.error) {
2061 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
2062 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
2063 }
2064 }
2065
complete_emulated_msr_access(struct kvm_vcpu * vcpu)2066 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu)
2067 {
2068 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error);
2069 }
2070
complete_emulated_rdmsr(struct kvm_vcpu * vcpu)2071 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
2072 {
2073 complete_userspace_rdmsr(vcpu);
2074 return complete_emulated_msr_access(vcpu);
2075 }
2076
complete_fast_msr_access(struct kvm_vcpu * vcpu)2077 static int complete_fast_msr_access(struct kvm_vcpu *vcpu)
2078 {
2079 return kvm_x86_call(complete_emulated_msr)(vcpu, vcpu->run->msr.error);
2080 }
2081
complete_fast_rdmsr(struct kvm_vcpu * vcpu)2082 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
2083 {
2084 complete_userspace_rdmsr(vcpu);
2085 return complete_fast_msr_access(vcpu);
2086 }
2087
complete_fast_rdmsr_imm(struct kvm_vcpu * vcpu)2088 static int complete_fast_rdmsr_imm(struct kvm_vcpu *vcpu)
2089 {
2090 if (!vcpu->run->msr.error)
2091 kvm_register_write(vcpu, vcpu->arch.cui_rdmsr_imm_reg,
2092 vcpu->run->msr.data);
2093
2094 return complete_fast_msr_access(vcpu);
2095 }
2096
kvm_msr_reason(int r)2097 static u64 kvm_msr_reason(int r)
2098 {
2099 switch (r) {
2100 case KVM_MSR_RET_UNSUPPORTED:
2101 return KVM_MSR_EXIT_REASON_UNKNOWN;
2102 case KVM_MSR_RET_FILTERED:
2103 return KVM_MSR_EXIT_REASON_FILTER;
2104 default:
2105 return KVM_MSR_EXIT_REASON_INVAL;
2106 }
2107 }
2108
kvm_msr_user_space(struct kvm_vcpu * vcpu,u32 index,u32 exit_reason,u64 data,int (* completion)(struct kvm_vcpu * vcpu),int r)2109 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
2110 u32 exit_reason, u64 data,
2111 int (*completion)(struct kvm_vcpu *vcpu),
2112 int r)
2113 {
2114 u64 msr_reason = kvm_msr_reason(r);
2115
2116 /* Check if the user wanted to know about this MSR fault */
2117 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason))
2118 return 0;
2119
2120 vcpu->run->exit_reason = exit_reason;
2121 vcpu->run->msr.error = 0;
2122 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad));
2123 vcpu->run->msr.reason = msr_reason;
2124 vcpu->run->msr.index = index;
2125 vcpu->run->msr.data = data;
2126 vcpu->arch.complete_userspace_io = completion;
2127
2128 return 1;
2129 }
2130
__kvm_emulate_rdmsr(struct kvm_vcpu * vcpu,u32 msr,int reg,int (* complete_rdmsr)(struct kvm_vcpu *))2131 static int __kvm_emulate_rdmsr(struct kvm_vcpu *vcpu, u32 msr, int reg,
2132 int (*complete_rdmsr)(struct kvm_vcpu *))
2133 {
2134 u64 data;
2135 int r;
2136
2137 r = kvm_emulate_msr_read(vcpu, msr, &data);
2138
2139 if (!r) {
2140 trace_kvm_msr_read(msr, data);
2141
2142 if (reg < 0) {
2143 kvm_rax_write(vcpu, data & -1u);
2144 kvm_rdx_write(vcpu, (data >> 32) & -1u);
2145 } else {
2146 kvm_register_write(vcpu, reg, data);
2147 }
2148 } else {
2149 /* MSR read failed? See if we should ask user space */
2150 if (kvm_msr_user_space(vcpu, msr, KVM_EXIT_X86_RDMSR, 0,
2151 complete_rdmsr, r))
2152 return 0;
2153 trace_kvm_msr_read_ex(msr);
2154 }
2155
2156 return kvm_x86_call(complete_emulated_msr)(vcpu, r);
2157 }
2158
kvm_emulate_rdmsr(struct kvm_vcpu * vcpu)2159 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
2160 {
2161 return __kvm_emulate_rdmsr(vcpu, kvm_rcx_read(vcpu), -1,
2162 complete_fast_rdmsr);
2163 }
2164 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdmsr);
2165
kvm_emulate_rdmsr_imm(struct kvm_vcpu * vcpu,u32 msr,int reg)2166 int kvm_emulate_rdmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
2167 {
2168 vcpu->arch.cui_rdmsr_imm_reg = reg;
2169
2170 return __kvm_emulate_rdmsr(vcpu, msr, reg, complete_fast_rdmsr_imm);
2171 }
2172 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdmsr_imm);
2173
__kvm_emulate_wrmsr(struct kvm_vcpu * vcpu,u32 msr,u64 data)2174 static int __kvm_emulate_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2175 {
2176 int r;
2177
2178 r = kvm_emulate_msr_write(vcpu, msr, data);
2179 if (!r) {
2180 trace_kvm_msr_write(msr, data);
2181 } else {
2182 /* MSR write failed? See if we should ask user space */
2183 if (kvm_msr_user_space(vcpu, msr, KVM_EXIT_X86_WRMSR, data,
2184 complete_fast_msr_access, r))
2185 return 0;
2186 /* Signal all other negative errors to userspace */
2187 if (r < 0)
2188 return r;
2189 trace_kvm_msr_write_ex(msr, data);
2190 }
2191
2192 return kvm_x86_call(complete_emulated_msr)(vcpu, r);
2193 }
2194
kvm_emulate_wrmsr(struct kvm_vcpu * vcpu)2195 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
2196 {
2197 return __kvm_emulate_wrmsr(vcpu, kvm_rcx_read(vcpu),
2198 kvm_read_edx_eax(vcpu));
2199 }
2200 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wrmsr);
2201
kvm_emulate_wrmsr_imm(struct kvm_vcpu * vcpu,u32 msr,int reg)2202 int kvm_emulate_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
2203 {
2204 return __kvm_emulate_wrmsr(vcpu, msr, kvm_register_read(vcpu, reg));
2205 }
2206 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wrmsr_imm);
2207
kvm_emulate_as_nop(struct kvm_vcpu * vcpu)2208 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu)
2209 {
2210 return kvm_skip_emulated_instruction(vcpu);
2211 }
2212
kvm_emulate_invd(struct kvm_vcpu * vcpu)2213 int kvm_emulate_invd(struct kvm_vcpu *vcpu)
2214 {
2215 /* Treat an INVD instruction as a NOP and just skip it. */
2216 return kvm_emulate_as_nop(vcpu);
2217 }
2218 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_invd);
2219
handle_fastpath_invd(struct kvm_vcpu * vcpu)2220 fastpath_t handle_fastpath_invd(struct kvm_vcpu *vcpu)
2221 {
2222 if (!kvm_pmu_is_fastpath_emulation_allowed(vcpu))
2223 return EXIT_FASTPATH_NONE;
2224
2225 if (!kvm_emulate_invd(vcpu))
2226 return EXIT_FASTPATH_EXIT_USERSPACE;
2227
2228 return EXIT_FASTPATH_REENTER_GUEST;
2229 }
2230 EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_invd);
2231
kvm_handle_invalid_op(struct kvm_vcpu * vcpu)2232 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
2233 {
2234 kvm_queue_exception(vcpu, UD_VECTOR);
2235 return 1;
2236 }
2237 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_invalid_op);
2238
2239
kvm_emulate_monitor_mwait(struct kvm_vcpu * vcpu,const char * insn)2240 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn)
2241 {
2242 bool enabled;
2243
2244 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS))
2245 goto emulate_as_nop;
2246
2247 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT))
2248 enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_MWAIT);
2249 else
2250 enabled = vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT;
2251
2252 if (!enabled)
2253 return kvm_handle_invalid_op(vcpu);
2254
2255 emulate_as_nop:
2256 pr_warn_once("%s instruction emulated as NOP!\n", insn);
2257 return kvm_emulate_as_nop(vcpu);
2258 }
kvm_emulate_mwait(struct kvm_vcpu * vcpu)2259 int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
2260 {
2261 return kvm_emulate_monitor_mwait(vcpu, "MWAIT");
2262 }
2263 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_mwait);
2264
kvm_emulate_monitor(struct kvm_vcpu * vcpu)2265 int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
2266 {
2267 return kvm_emulate_monitor_mwait(vcpu, "MONITOR");
2268 }
2269 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_monitor);
2270
kvm_vcpu_exit_request(struct kvm_vcpu * vcpu)2271 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
2272 {
2273 xfer_to_guest_mode_prepare();
2274
2275 return READ_ONCE(vcpu->mode) == EXITING_GUEST_MODE ||
2276 kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending();
2277 }
2278
__handle_fastpath_wrmsr(struct kvm_vcpu * vcpu,u32 msr,u64 data)2279 static fastpath_t __handle_fastpath_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2280 {
2281 if (!kvm_pmu_is_fastpath_emulation_allowed(vcpu))
2282 return EXIT_FASTPATH_NONE;
2283
2284 switch (msr) {
2285 case APIC_BASE_MSR + (APIC_ICR >> 4):
2286 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic) ||
2287 kvm_x2apic_icr_write_fast(vcpu->arch.apic, data))
2288 return EXIT_FASTPATH_NONE;
2289 break;
2290 case MSR_IA32_TSC_DEADLINE:
2291 kvm_set_lapic_tscdeadline_msr(vcpu, data);
2292 break;
2293 default:
2294 return EXIT_FASTPATH_NONE;
2295 }
2296
2297 trace_kvm_msr_write(msr, data);
2298
2299 if (!kvm_skip_emulated_instruction(vcpu))
2300 return EXIT_FASTPATH_EXIT_USERSPACE;
2301
2302 return EXIT_FASTPATH_REENTER_GUEST;
2303 }
2304
handle_fastpath_wrmsr(struct kvm_vcpu * vcpu)2305 fastpath_t handle_fastpath_wrmsr(struct kvm_vcpu *vcpu)
2306 {
2307 return __handle_fastpath_wrmsr(vcpu, kvm_rcx_read(vcpu),
2308 kvm_read_edx_eax(vcpu));
2309 }
2310 EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_wrmsr);
2311
handle_fastpath_wrmsr_imm(struct kvm_vcpu * vcpu,u32 msr,int reg)2312 fastpath_t handle_fastpath_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
2313 {
2314 return __handle_fastpath_wrmsr(vcpu, msr, kvm_register_read(vcpu, reg));
2315 }
2316 EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_wrmsr_imm);
2317
2318 /*
2319 * Adapt set_msr() to msr_io()'s calling convention
2320 */
do_get_msr(struct kvm_vcpu * vcpu,unsigned index,u64 * data)2321 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2322 {
2323 return kvm_get_msr_ignored_check(vcpu, index, data, true);
2324 }
2325
do_set_msr(struct kvm_vcpu * vcpu,unsigned index,u64 * data)2326 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2327 {
2328 u64 val;
2329
2330 /*
2331 * Reject writes to immutable feature MSRs if the vCPU model is frozen,
2332 * as KVM doesn't support modifying the guest vCPU model on the fly,
2333 * e.g. changing the VMX capabilities MSRs while L2 is active is
2334 * nonsensical. Allow writes of the same value, e.g. so that userspace
2335 * can blindly stuff all MSRs when emulating RESET.
2336 */
2337 if (!kvm_can_set_cpuid_and_feature_msrs(vcpu) &&
2338 kvm_is_immutable_feature_msr(index) &&
2339 (do_get_msr(vcpu, index, &val) || *data != val))
2340 return -EINVAL;
2341
2342 return kvm_set_msr_ignored_check(vcpu, index, *data, true);
2343 }
2344
2345 #ifdef CONFIG_X86_64
2346 struct pvclock_clock {
2347 int vclock_mode;
2348 u64 cycle_last;
2349 u64 mask;
2350 u32 mult;
2351 u32 shift;
2352 u64 base_cycles;
2353 u64 offset;
2354 };
2355
2356 struct pvclock_gtod_data {
2357 seqcount_t seq;
2358
2359 struct pvclock_clock clock; /* extract of a clocksource struct */
2360 struct pvclock_clock raw_clock; /* extract of a clocksource struct */
2361
2362 ktime_t offs_boot;
2363 u64 wall_time_sec;
2364 };
2365
2366 static struct pvclock_gtod_data pvclock_gtod_data;
2367
update_pvclock_gtod(struct timekeeper * tk)2368 static void update_pvclock_gtod(struct timekeeper *tk)
2369 {
2370 struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
2371
2372 write_seqcount_begin(&vdata->seq);
2373
2374 /* copy pvclock gtod data */
2375 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode;
2376 vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
2377 vdata->clock.mask = tk->tkr_mono.mask;
2378 vdata->clock.mult = tk->tkr_mono.mult;
2379 vdata->clock.shift = tk->tkr_mono.shift;
2380 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec;
2381 vdata->clock.offset = tk->tkr_mono.base;
2382
2383 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode;
2384 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last;
2385 vdata->raw_clock.mask = tk->tkr_raw.mask;
2386 vdata->raw_clock.mult = tk->tkr_raw.mult;
2387 vdata->raw_clock.shift = tk->tkr_raw.shift;
2388 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec;
2389 vdata->raw_clock.offset = tk->tkr_raw.base;
2390
2391 vdata->wall_time_sec = tk->xtime_sec;
2392
2393 vdata->offs_boot = tk->offs_boot;
2394
2395 write_seqcount_end(&vdata->seq);
2396 }
2397
get_kvmclock_base_ns(void)2398 static s64 get_kvmclock_base_ns(void)
2399 {
2400 /* Count up from boot time, but with the frequency of the raw clock. */
2401 return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot));
2402 }
2403 #else
get_kvmclock_base_ns(void)2404 static s64 get_kvmclock_base_ns(void)
2405 {
2406 /* Master clock not used, so we can just use CLOCK_BOOTTIME. */
2407 return ktime_get_boottime_ns();
2408 }
2409 #endif
2410
kvm_write_wall_clock(struct kvm * kvm,gpa_t wall_clock,int sec_hi_ofs)2411 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
2412 {
2413 int version;
2414 int r;
2415 struct pvclock_wall_clock wc;
2416 u32 wc_sec_hi;
2417 u64 wall_nsec;
2418
2419 if (!wall_clock)
2420 return;
2421
2422 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
2423 if (r)
2424 return;
2425
2426 if (version & 1)
2427 ++version; /* first time write, random junk */
2428
2429 ++version;
2430
2431 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
2432 return;
2433
2434 wall_nsec = kvm_get_wall_clock_epoch(kvm);
2435
2436 wc.nsec = do_div(wall_nsec, NSEC_PER_SEC);
2437 wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */
2438 wc.version = version;
2439
2440 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
2441
2442 if (sec_hi_ofs) {
2443 wc_sec_hi = wall_nsec >> 32;
2444 kvm_write_guest(kvm, wall_clock + sec_hi_ofs,
2445 &wc_sec_hi, sizeof(wc_sec_hi));
2446 }
2447
2448 version++;
2449 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
2450 }
2451
kvm_write_system_time(struct kvm_vcpu * vcpu,gpa_t system_time,bool old_msr,bool host_initiated)2452 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
2453 bool old_msr, bool host_initiated)
2454 {
2455 struct kvm_arch *ka = &vcpu->kvm->arch;
2456
2457 if (vcpu->vcpu_id == 0 && !host_initiated) {
2458 if (ka->boot_vcpu_runs_old_kvmclock != old_msr)
2459 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2460
2461 ka->boot_vcpu_runs_old_kvmclock = old_msr;
2462 }
2463
2464 vcpu->arch.time = system_time;
2465 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2466
2467 /* we verify if the enable bit is set... */
2468 if (system_time & 1)
2469 kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL,
2470 sizeof(struct pvclock_vcpu_time_info));
2471 else
2472 kvm_gpc_deactivate(&vcpu->arch.pv_time);
2473
2474 return;
2475 }
2476
div_frac(uint32_t dividend,uint32_t divisor)2477 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
2478 {
2479 do_shl32_div32(dividend, divisor);
2480 return dividend;
2481 }
2482
kvm_get_time_scale(uint64_t scaled_hz,uint64_t base_hz,s8 * pshift,u32 * pmultiplier)2483 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
2484 s8 *pshift, u32 *pmultiplier)
2485 {
2486 uint64_t scaled64;
2487 int32_t shift = 0;
2488 uint64_t tps64;
2489 uint32_t tps32;
2490
2491 tps64 = base_hz;
2492 scaled64 = scaled_hz;
2493 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
2494 tps64 >>= 1;
2495 shift--;
2496 }
2497
2498 tps32 = (uint32_t)tps64;
2499 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
2500 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
2501 scaled64 >>= 1;
2502 else
2503 tps32 <<= 1;
2504 shift++;
2505 }
2506
2507 *pshift = shift;
2508 *pmultiplier = div_frac(scaled64, tps32);
2509 }
2510
2511 #ifdef CONFIG_X86_64
2512 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
2513 #endif
2514
2515 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
2516 static unsigned long max_tsc_khz;
2517
adjust_tsc_khz(u32 khz,s32 ppm)2518 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
2519 {
2520 u64 v = (u64)khz * (1000000 + ppm);
2521 do_div(v, 1000000);
2522 return v;
2523 }
2524
2525 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
2526
set_tsc_khz(struct kvm_vcpu * vcpu,u32 user_tsc_khz,bool scale)2527 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2528 {
2529 u64 ratio;
2530
2531 /* Guest TSC same frequency as host TSC? */
2532 if (!scale) {
2533 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio);
2534 return 0;
2535 }
2536
2537 /* TSC scaling supported? */
2538 if (!kvm_caps.has_tsc_control) {
2539 if (user_tsc_khz > tsc_khz) {
2540 vcpu->arch.tsc_catchup = 1;
2541 vcpu->arch.tsc_always_catchup = 1;
2542 return 0;
2543 } else {
2544 pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
2545 return -1;
2546 }
2547 }
2548
2549 /* TSC scaling required - calculate ratio */
2550 ratio = mul_u64_u32_div(1ULL << kvm_caps.tsc_scaling_ratio_frac_bits,
2551 user_tsc_khz, tsc_khz);
2552
2553 if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) {
2554 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
2555 user_tsc_khz);
2556 return -1;
2557 }
2558
2559 kvm_vcpu_write_tsc_multiplier(vcpu, ratio);
2560 return 0;
2561 }
2562
kvm_set_tsc_khz(struct kvm_vcpu * vcpu,u32 user_tsc_khz)2563 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
2564 {
2565 u32 thresh_lo, thresh_hi;
2566 int use_scaling = 0;
2567
2568 /* tsc_khz can be zero if TSC calibration fails */
2569 if (user_tsc_khz == 0) {
2570 /* set tsc_scaling_ratio to a safe value */
2571 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio);
2572 return -1;
2573 }
2574
2575 /* Compute a scale to convert nanoseconds in TSC cycles */
2576 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
2577 &vcpu->arch.virtual_tsc_shift,
2578 &vcpu->arch.virtual_tsc_mult);
2579 vcpu->arch.virtual_tsc_khz = user_tsc_khz;
2580
2581 /*
2582 * Compute the variation in TSC rate which is acceptable
2583 * within the range of tolerance and decide if the
2584 * rate being applied is within that bounds of the hardware
2585 * rate. If so, no scaling or compensation need be done.
2586 */
2587 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
2588 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
2589 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
2590 pr_debug("requested TSC rate %u falls outside tolerance [%u,%u]\n",
2591 user_tsc_khz, thresh_lo, thresh_hi);
2592 use_scaling = 1;
2593 }
2594 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
2595 }
2596
compute_guest_tsc(struct kvm_vcpu * vcpu,s64 kernel_ns)2597 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
2598 {
2599 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
2600 vcpu->arch.virtual_tsc_mult,
2601 vcpu->arch.virtual_tsc_shift);
2602 tsc += vcpu->arch.this_tsc_write;
2603 return tsc;
2604 }
2605
2606 #ifdef CONFIG_X86_64
gtod_is_based_on_tsc(int mode)2607 static inline bool gtod_is_based_on_tsc(int mode)
2608 {
2609 return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
2610 }
2611 #endif
2612
kvm_track_tsc_matching(struct kvm_vcpu * vcpu,bool new_generation)2613 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu, bool new_generation)
2614 {
2615 #ifdef CONFIG_X86_64
2616 struct kvm_arch *ka = &vcpu->kvm->arch;
2617 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2618
2619 /*
2620 * To use the masterclock, the host clocksource must be based on TSC
2621 * and all vCPUs must have matching TSCs. Note, the count for matching
2622 * vCPUs doesn't include the reference vCPU, hence "+1".
2623 */
2624 bool use_master_clock = (ka->nr_vcpus_matched_tsc + 1 ==
2625 atomic_read(&vcpu->kvm->online_vcpus)) &&
2626 gtod_is_based_on_tsc(gtod->clock.vclock_mode);
2627
2628 /*
2629 * Request a masterclock update if the masterclock needs to be toggled
2630 * on/off, or when starting a new generation and the masterclock is
2631 * enabled (compute_guest_tsc() requires the masterclock snapshot to be
2632 * taken _after_ the new generation is created).
2633 */
2634 if ((ka->use_master_clock && new_generation) ||
2635 (ka->use_master_clock != use_master_clock))
2636 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2637
2638 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
2639 atomic_read(&vcpu->kvm->online_vcpus),
2640 ka->use_master_clock, gtod->clock.vclock_mode);
2641 #endif
2642 }
2643
2644 /*
2645 * Multiply tsc by a fixed point number represented by ratio.
2646 *
2647 * The most significant 64-N bits (mult) of ratio represent the
2648 * integral part of the fixed point number; the remaining N bits
2649 * (frac) represent the fractional part, ie. ratio represents a fixed
2650 * point number (mult + frac * 2^(-N)).
2651 *
2652 * N equals to kvm_caps.tsc_scaling_ratio_frac_bits.
2653 */
__scale_tsc(u64 ratio,u64 tsc)2654 static inline u64 __scale_tsc(u64 ratio, u64 tsc)
2655 {
2656 return mul_u64_u64_shr(tsc, ratio, kvm_caps.tsc_scaling_ratio_frac_bits);
2657 }
2658
kvm_scale_tsc(u64 tsc,u64 ratio)2659 u64 kvm_scale_tsc(u64 tsc, u64 ratio)
2660 {
2661 u64 _tsc = tsc;
2662
2663 if (ratio != kvm_caps.default_tsc_scaling_ratio)
2664 _tsc = __scale_tsc(ratio, tsc);
2665
2666 return _tsc;
2667 }
2668
kvm_compute_l1_tsc_offset(struct kvm_vcpu * vcpu,u64 target_tsc)2669 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
2670 {
2671 u64 tsc;
2672
2673 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
2674
2675 return target_tsc - tsc;
2676 }
2677
kvm_read_l1_tsc(struct kvm_vcpu * vcpu,u64 host_tsc)2678 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2679 {
2680 return vcpu->arch.l1_tsc_offset +
2681 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
2682 }
2683 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_l1_tsc);
2684
kvm_calc_nested_tsc_offset(u64 l1_offset,u64 l2_offset,u64 l2_multiplier)2685 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
2686 {
2687 u64 nested_offset;
2688
2689 if (l2_multiplier == kvm_caps.default_tsc_scaling_ratio)
2690 nested_offset = l1_offset;
2691 else
2692 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier,
2693 kvm_caps.tsc_scaling_ratio_frac_bits);
2694
2695 nested_offset += l2_offset;
2696 return nested_offset;
2697 }
2698 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_calc_nested_tsc_offset);
2699
kvm_calc_nested_tsc_multiplier(u64 l1_multiplier,u64 l2_multiplier)2700 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
2701 {
2702 if (l2_multiplier != kvm_caps.default_tsc_scaling_ratio)
2703 return mul_u64_u64_shr(l1_multiplier, l2_multiplier,
2704 kvm_caps.tsc_scaling_ratio_frac_bits);
2705
2706 return l1_multiplier;
2707 }
2708 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_calc_nested_tsc_multiplier);
2709
kvm_vcpu_write_tsc_offset(struct kvm_vcpu * vcpu,u64 l1_offset)2710 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
2711 {
2712 if (vcpu->arch.guest_tsc_protected)
2713 return;
2714
2715 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2716 vcpu->arch.l1_tsc_offset,
2717 l1_offset);
2718
2719 vcpu->arch.l1_tsc_offset = l1_offset;
2720
2721 /*
2722 * If we are here because L1 chose not to trap WRMSR to TSC then
2723 * according to the spec this should set L1's TSC (as opposed to
2724 * setting L1's offset for L2).
2725 */
2726 if (is_guest_mode(vcpu))
2727 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2728 l1_offset,
2729 kvm_x86_call(get_l2_tsc_offset)(vcpu),
2730 kvm_x86_call(get_l2_tsc_multiplier)(vcpu));
2731 else
2732 vcpu->arch.tsc_offset = l1_offset;
2733
2734 kvm_x86_call(write_tsc_offset)(vcpu);
2735 }
2736
kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu * vcpu,u64 l1_multiplier)2737 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
2738 {
2739 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
2740
2741 /* Userspace is changing the multiplier while L2 is active */
2742 if (is_guest_mode(vcpu))
2743 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2744 l1_multiplier,
2745 kvm_x86_call(get_l2_tsc_multiplier)(vcpu));
2746 else
2747 vcpu->arch.tsc_scaling_ratio = l1_multiplier;
2748
2749 if (kvm_caps.has_tsc_control)
2750 kvm_x86_call(write_tsc_multiplier)(vcpu);
2751 }
2752
kvm_check_tsc_unstable(void)2753 static inline bool kvm_check_tsc_unstable(void)
2754 {
2755 #ifdef CONFIG_X86_64
2756 /*
2757 * TSC is marked unstable when we're running on Hyper-V,
2758 * 'TSC page' clocksource is good.
2759 */
2760 if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK)
2761 return false;
2762 #endif
2763 return check_tsc_unstable();
2764 }
2765
2766 /*
2767 * Infers attempts to synchronize the guest's tsc from host writes. Sets the
2768 * offset for the vcpu and tracks the TSC matching generation that the vcpu
2769 * participates in.
2770 */
__kvm_synchronize_tsc(struct kvm_vcpu * vcpu,u64 offset,u64 tsc,u64 ns,bool matched,bool user_set_tsc)2771 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
2772 u64 ns, bool matched, bool user_set_tsc)
2773 {
2774 struct kvm *kvm = vcpu->kvm;
2775
2776 lockdep_assert_held(&kvm->arch.tsc_write_lock);
2777
2778 if (vcpu->arch.guest_tsc_protected)
2779 return;
2780
2781 if (user_set_tsc)
2782 vcpu->kvm->arch.user_set_tsc = true;
2783
2784 /*
2785 * We also track th most recent recorded KHZ, write and time to
2786 * allow the matching interval to be extended at each write.
2787 */
2788 kvm->arch.last_tsc_nsec = ns;
2789 kvm->arch.last_tsc_write = tsc;
2790 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
2791 kvm->arch.last_tsc_offset = offset;
2792
2793 vcpu->arch.last_guest_tsc = tsc;
2794
2795 kvm_vcpu_write_tsc_offset(vcpu, offset);
2796
2797 if (!matched) {
2798 /*
2799 * We split periods of matched TSC writes into generations.
2800 * For each generation, we track the original measured
2801 * nanosecond time, offset, and write, so if TSCs are in
2802 * sync, we can match exact offset, and if not, we can match
2803 * exact software computation in compute_guest_tsc()
2804 *
2805 * These values are tracked in kvm->arch.cur_xxx variables.
2806 */
2807 kvm->arch.cur_tsc_generation++;
2808 kvm->arch.cur_tsc_nsec = ns;
2809 kvm->arch.cur_tsc_write = tsc;
2810 kvm->arch.cur_tsc_offset = offset;
2811 kvm->arch.nr_vcpus_matched_tsc = 0;
2812 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) {
2813 kvm->arch.nr_vcpus_matched_tsc++;
2814 }
2815
2816 /* Keep track of which generation this VCPU has synchronized to */
2817 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
2818 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
2819 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
2820
2821 kvm_track_tsc_matching(vcpu, !matched);
2822 }
2823
kvm_synchronize_tsc(struct kvm_vcpu * vcpu,u64 * user_value)2824 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
2825 {
2826 u64 data = user_value ? *user_value : 0;
2827 struct kvm *kvm = vcpu->kvm;
2828 u64 offset, ns, elapsed;
2829 unsigned long flags;
2830 bool matched = false;
2831 bool synchronizing = false;
2832
2833 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
2834 offset = kvm_compute_l1_tsc_offset(vcpu, data);
2835 ns = get_kvmclock_base_ns();
2836 elapsed = ns - kvm->arch.last_tsc_nsec;
2837
2838 if (vcpu->arch.virtual_tsc_khz) {
2839 if (data == 0) {
2840 /*
2841 * Force synchronization when creating a vCPU, or when
2842 * userspace explicitly writes a zero value.
2843 */
2844 synchronizing = true;
2845 } else if (kvm->arch.user_set_tsc) {
2846 u64 tsc_exp = kvm->arch.last_tsc_write +
2847 nsec_to_cycles(vcpu, elapsed);
2848 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
2849 /*
2850 * Here lies UAPI baggage: when a user-initiated TSC write has
2851 * a small delta (1 second) of virtual cycle time against the
2852 * previously set vCPU, we assume that they were intended to be
2853 * in sync and the delta was only due to the racy nature of the
2854 * legacy API.
2855 *
2856 * This trick falls down when restoring a guest which genuinely
2857 * has been running for less time than the 1 second of imprecision
2858 * which we allow for in the legacy API. In this case, the first
2859 * value written by userspace (on any vCPU) should not be subject
2860 * to this 'correction' to make it sync up with values that only
2861 * come from the kernel's default vCPU creation. Make the 1-second
2862 * slop hack only trigger if the user_set_tsc flag is already set.
2863 */
2864 synchronizing = data < tsc_exp + tsc_hz &&
2865 data + tsc_hz > tsc_exp;
2866 }
2867 }
2868
2869
2870 /*
2871 * For a reliable TSC, we can match TSC offsets, and for an unstable
2872 * TSC, we add elapsed time in this computation. We could let the
2873 * compensation code attempt to catch up if we fall behind, but
2874 * it's better to try to match offsets from the beginning.
2875 */
2876 if (synchronizing &&
2877 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
2878 if (!kvm_check_tsc_unstable()) {
2879 offset = kvm->arch.cur_tsc_offset;
2880 } else {
2881 u64 delta = nsec_to_cycles(vcpu, elapsed);
2882 data += delta;
2883 offset = kvm_compute_l1_tsc_offset(vcpu, data);
2884 }
2885 matched = true;
2886 }
2887
2888 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched, !!user_value);
2889 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
2890 }
2891
adjust_tsc_offset_guest(struct kvm_vcpu * vcpu,s64 adjustment)2892 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
2893 s64 adjustment)
2894 {
2895 u64 tsc_offset = vcpu->arch.l1_tsc_offset;
2896 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
2897 }
2898
adjust_tsc_offset_host(struct kvm_vcpu * vcpu,s64 adjustment)2899 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
2900 {
2901 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio)
2902 WARN_ON(adjustment < 0);
2903 adjustment = kvm_scale_tsc((u64) adjustment,
2904 vcpu->arch.l1_tsc_scaling_ratio);
2905 adjust_tsc_offset_guest(vcpu, adjustment);
2906 }
2907
2908 #ifdef CONFIG_X86_64
2909
read_tsc(void)2910 static u64 read_tsc(void)
2911 {
2912 u64 ret = (u64)rdtsc_ordered();
2913 u64 last = pvclock_gtod_data.clock.cycle_last;
2914
2915 if (likely(ret >= last))
2916 return ret;
2917
2918 /*
2919 * GCC likes to generate cmov here, but this branch is extremely
2920 * predictable (it's just a function of time and the likely is
2921 * very likely) and there's a data dependence, so force GCC
2922 * to generate a branch instead. I don't barrier() because
2923 * we don't actually need a barrier, and if this function
2924 * ever gets inlined it will generate worse code.
2925 */
2926 asm volatile ("");
2927 return last;
2928 }
2929
vgettsc(struct pvclock_clock * clock,u64 * tsc_timestamp,int * mode)2930 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
2931 int *mode)
2932 {
2933 u64 tsc_pg_val;
2934 long v;
2935
2936 switch (clock->vclock_mode) {
2937 case VDSO_CLOCKMODE_HVCLOCK:
2938 if (hv_read_tsc_page_tsc(hv_get_tsc_page(),
2939 tsc_timestamp, &tsc_pg_val)) {
2940 /* TSC page valid */
2941 *mode = VDSO_CLOCKMODE_HVCLOCK;
2942 v = (tsc_pg_val - clock->cycle_last) &
2943 clock->mask;
2944 } else {
2945 /* TSC page invalid */
2946 *mode = VDSO_CLOCKMODE_NONE;
2947 }
2948 break;
2949 case VDSO_CLOCKMODE_TSC:
2950 *mode = VDSO_CLOCKMODE_TSC;
2951 *tsc_timestamp = read_tsc();
2952 v = (*tsc_timestamp - clock->cycle_last) &
2953 clock->mask;
2954 break;
2955 default:
2956 *mode = VDSO_CLOCKMODE_NONE;
2957 }
2958
2959 if (*mode == VDSO_CLOCKMODE_NONE)
2960 *tsc_timestamp = v = 0;
2961
2962 return v * clock->mult;
2963 }
2964
2965 /*
2966 * As with get_kvmclock_base_ns(), this counts from boot time, at the
2967 * frequency of CLOCK_MONOTONIC_RAW (hence adding gtos->offs_boot).
2968 */
do_kvmclock_base(s64 * t,u64 * tsc_timestamp)2969 static int do_kvmclock_base(s64 *t, u64 *tsc_timestamp)
2970 {
2971 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2972 unsigned long seq;
2973 int mode;
2974 u64 ns;
2975
2976 do {
2977 seq = read_seqcount_begin(>od->seq);
2978 ns = gtod->raw_clock.base_cycles;
2979 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode);
2980 ns >>= gtod->raw_clock.shift;
2981 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot));
2982 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
2983 *t = ns;
2984
2985 return mode;
2986 }
2987
2988 /*
2989 * This calculates CLOCK_MONOTONIC at the time of the TSC snapshot, with
2990 * no boot time offset.
2991 */
do_monotonic(s64 * t,u64 * tsc_timestamp)2992 static int do_monotonic(s64 *t, u64 *tsc_timestamp)
2993 {
2994 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2995 unsigned long seq;
2996 int mode;
2997 u64 ns;
2998
2999 do {
3000 seq = read_seqcount_begin(>od->seq);
3001 ns = gtod->clock.base_cycles;
3002 ns += vgettsc(>od->clock, tsc_timestamp, &mode);
3003 ns >>= gtod->clock.shift;
3004 ns += ktime_to_ns(gtod->clock.offset);
3005 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
3006 *t = ns;
3007
3008 return mode;
3009 }
3010
do_realtime(struct timespec64 * ts,u64 * tsc_timestamp)3011 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
3012 {
3013 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
3014 unsigned long seq;
3015 int mode;
3016 u64 ns;
3017
3018 do {
3019 seq = read_seqcount_begin(>od->seq);
3020 ts->tv_sec = gtod->wall_time_sec;
3021 ns = gtod->clock.base_cycles;
3022 ns += vgettsc(>od->clock, tsc_timestamp, &mode);
3023 ns >>= gtod->clock.shift;
3024 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
3025
3026 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
3027 ts->tv_nsec = ns;
3028
3029 return mode;
3030 }
3031
3032 /*
3033 * Calculates the kvmclock_base_ns (CLOCK_MONOTONIC_RAW + boot time) and
3034 * reports the TSC value from which it do so. Returns true if host is
3035 * using TSC based clocksource.
3036 */
kvm_get_time_and_clockread(s64 * kernel_ns,u64 * tsc_timestamp)3037 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
3038 {
3039 /* checked again under seqlock below */
3040 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
3041 return false;
3042
3043 return gtod_is_based_on_tsc(do_kvmclock_base(kernel_ns,
3044 tsc_timestamp));
3045 }
3046
3047 /*
3048 * Calculates CLOCK_MONOTONIC and reports the TSC value from which it did
3049 * so. Returns true if host is using TSC based clocksource.
3050 */
kvm_get_monotonic_and_clockread(s64 * kernel_ns,u64 * tsc_timestamp)3051 bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
3052 {
3053 /* checked again under seqlock below */
3054 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
3055 return false;
3056
3057 return gtod_is_based_on_tsc(do_monotonic(kernel_ns,
3058 tsc_timestamp));
3059 }
3060
3061 /*
3062 * Calculates CLOCK_REALTIME and reports the TSC value from which it did
3063 * so. Returns true if host is using TSC based clocksource.
3064 *
3065 * DO NOT USE this for anything related to migration. You want CLOCK_TAI
3066 * for that.
3067 */
kvm_get_walltime_and_clockread(struct timespec64 * ts,u64 * tsc_timestamp)3068 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
3069 u64 *tsc_timestamp)
3070 {
3071 /* checked again under seqlock below */
3072 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
3073 return false;
3074
3075 return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
3076 }
3077 #endif
3078
3079 /*
3080 *
3081 * Assuming a stable TSC across physical CPUS, and a stable TSC
3082 * across virtual CPUs, the following condition is possible.
3083 * Each numbered line represents an event visible to both
3084 * CPUs at the next numbered event.
3085 *
3086 * "timespecX" represents host monotonic time. "tscX" represents
3087 * RDTSC value.
3088 *
3089 * VCPU0 on CPU0 | VCPU1 on CPU1
3090 *
3091 * 1. read timespec0,tsc0
3092 * 2. | timespec1 = timespec0 + N
3093 * | tsc1 = tsc0 + M
3094 * 3. transition to guest | transition to guest
3095 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
3096 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
3097 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
3098 *
3099 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
3100 *
3101 * - ret0 < ret1
3102 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
3103 * ...
3104 * - 0 < N - M => M < N
3105 *
3106 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
3107 * always the case (the difference between two distinct xtime instances
3108 * might be smaller then the difference between corresponding TSC reads,
3109 * when updating guest vcpus pvclock areas).
3110 *
3111 * To avoid that problem, do not allow visibility of distinct
3112 * system_timestamp/tsc_timestamp values simultaneously: use a master
3113 * copy of host monotonic time values. Update that master copy
3114 * in lockstep.
3115 *
3116 * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
3117 *
3118 */
3119
pvclock_update_vm_gtod_copy(struct kvm * kvm)3120 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
3121 {
3122 #ifdef CONFIG_X86_64
3123 struct kvm_arch *ka = &kvm->arch;
3124 int vclock_mode;
3125 bool host_tsc_clocksource, vcpus_matched;
3126
3127 lockdep_assert_held(&kvm->arch.tsc_write_lock);
3128 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
3129 atomic_read(&kvm->online_vcpus));
3130
3131 /*
3132 * If the host uses TSC clock, then passthrough TSC as stable
3133 * to the guest.
3134 */
3135 host_tsc_clocksource = kvm_get_time_and_clockread(
3136 &ka->master_kernel_ns,
3137 &ka->master_cycle_now);
3138
3139 ka->use_master_clock = host_tsc_clocksource && vcpus_matched
3140 && !ka->backwards_tsc_observed
3141 && !ka->boot_vcpu_runs_old_kvmclock;
3142
3143 if (ka->use_master_clock)
3144 atomic_set(&kvm_guest_has_master_clock, 1);
3145
3146 vclock_mode = pvclock_gtod_data.clock.vclock_mode;
3147 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
3148 vcpus_matched);
3149 #endif
3150 }
3151
kvm_make_mclock_inprogress_request(struct kvm * kvm)3152 static void kvm_make_mclock_inprogress_request(struct kvm *kvm)
3153 {
3154 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
3155 }
3156
__kvm_start_pvclock_update(struct kvm * kvm)3157 static void __kvm_start_pvclock_update(struct kvm *kvm)
3158 {
3159 raw_spin_lock_irq(&kvm->arch.tsc_write_lock);
3160 write_seqcount_begin(&kvm->arch.pvclock_sc);
3161 }
3162
kvm_start_pvclock_update(struct kvm * kvm)3163 static void kvm_start_pvclock_update(struct kvm *kvm)
3164 {
3165 kvm_make_mclock_inprogress_request(kvm);
3166
3167 /* no guest entries from this point */
3168 __kvm_start_pvclock_update(kvm);
3169 }
3170
kvm_end_pvclock_update(struct kvm * kvm)3171 static void kvm_end_pvclock_update(struct kvm *kvm)
3172 {
3173 struct kvm_arch *ka = &kvm->arch;
3174 struct kvm_vcpu *vcpu;
3175 unsigned long i;
3176
3177 write_seqcount_end(&ka->pvclock_sc);
3178 raw_spin_unlock_irq(&ka->tsc_write_lock);
3179 kvm_for_each_vcpu(i, vcpu, kvm)
3180 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3181
3182 /* guest entries allowed */
3183 kvm_for_each_vcpu(i, vcpu, kvm)
3184 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
3185 }
3186
kvm_update_masterclock(struct kvm * kvm)3187 static void kvm_update_masterclock(struct kvm *kvm)
3188 {
3189 kvm_hv_request_tsc_page_update(kvm);
3190 kvm_start_pvclock_update(kvm);
3191 pvclock_update_vm_gtod_copy(kvm);
3192 kvm_end_pvclock_update(kvm);
3193 }
3194
3195 /*
3196 * Use the kernel's tsc_khz directly if the TSC is constant, otherwise use KVM's
3197 * per-CPU value (which may be zero if a CPU is going offline). Note, tsc_khz
3198 * can change during boot even if the TSC is constant, as it's possible for KVM
3199 * to be loaded before TSC calibration completes. Ideally, KVM would get a
3200 * notification when calibration completes, but practically speaking calibration
3201 * will complete before userspace is alive enough to create VMs.
3202 */
get_cpu_tsc_khz(void)3203 static unsigned long get_cpu_tsc_khz(void)
3204 {
3205 if (static_cpu_has(X86_FEATURE_CONSTANT_TSC))
3206 return tsc_khz;
3207 else
3208 return __this_cpu_read(cpu_tsc_khz);
3209 }
3210
3211 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */
__get_kvmclock(struct kvm * kvm,struct kvm_clock_data * data)3212 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3213 {
3214 struct kvm_arch *ka = &kvm->arch;
3215 struct pvclock_vcpu_time_info hv_clock;
3216
3217 /* both __this_cpu_read() and rdtsc() should be on the same cpu */
3218 get_cpu();
3219
3220 data->flags = 0;
3221 if (ka->use_master_clock &&
3222 (static_cpu_has(X86_FEATURE_CONSTANT_TSC) || __this_cpu_read(cpu_tsc_khz))) {
3223 #ifdef CONFIG_X86_64
3224 struct timespec64 ts;
3225
3226 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) {
3227 data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec;
3228 data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC;
3229 } else
3230 #endif
3231 data->host_tsc = rdtsc();
3232
3233 data->flags |= KVM_CLOCK_TSC_STABLE;
3234 hv_clock.tsc_timestamp = ka->master_cycle_now;
3235 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
3236 kvm_get_time_scale(NSEC_PER_SEC, get_cpu_tsc_khz() * 1000LL,
3237 &hv_clock.tsc_shift,
3238 &hv_clock.tsc_to_system_mul);
3239 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc);
3240 } else {
3241 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset;
3242 }
3243
3244 put_cpu();
3245 }
3246
get_kvmclock(struct kvm * kvm,struct kvm_clock_data * data)3247 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3248 {
3249 struct kvm_arch *ka = &kvm->arch;
3250 unsigned seq;
3251
3252 do {
3253 seq = read_seqcount_begin(&ka->pvclock_sc);
3254 __get_kvmclock(kvm, data);
3255 } while (read_seqcount_retry(&ka->pvclock_sc, seq));
3256 }
3257
get_kvmclock_ns(struct kvm * kvm)3258 u64 get_kvmclock_ns(struct kvm *kvm)
3259 {
3260 struct kvm_clock_data data;
3261
3262 get_kvmclock(kvm, &data);
3263 return data.clock;
3264 }
3265
kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info * ref_hv_clock,struct kvm_vcpu * vcpu,struct gfn_to_pfn_cache * gpc,unsigned int offset)3266 static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock,
3267 struct kvm_vcpu *vcpu,
3268 struct gfn_to_pfn_cache *gpc,
3269 unsigned int offset)
3270 {
3271 struct pvclock_vcpu_time_info *guest_hv_clock;
3272 struct pvclock_vcpu_time_info hv_clock;
3273 unsigned long flags;
3274
3275 memcpy(&hv_clock, ref_hv_clock, sizeof(hv_clock));
3276
3277 read_lock_irqsave(&gpc->lock, flags);
3278 while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) {
3279 read_unlock_irqrestore(&gpc->lock, flags);
3280
3281 if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock)))
3282 return;
3283
3284 read_lock_irqsave(&gpc->lock, flags);
3285 }
3286
3287 guest_hv_clock = (void *)(gpc->khva + offset);
3288
3289 /*
3290 * This VCPU is paused, but it's legal for a guest to read another
3291 * VCPU's kvmclock, so we really have to follow the specification where
3292 * it says that version is odd if data is being modified, and even after
3293 * it is consistent.
3294 */
3295
3296 guest_hv_clock->version = hv_clock.version = (guest_hv_clock->version + 1) | 1;
3297 smp_wmb();
3298
3299 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
3300 hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
3301
3302 memcpy(guest_hv_clock, &hv_clock, sizeof(*guest_hv_clock));
3303
3304 smp_wmb();
3305
3306 guest_hv_clock->version = ++hv_clock.version;
3307
3308 kvm_gpc_mark_dirty_in_slot(gpc);
3309 read_unlock_irqrestore(&gpc->lock, flags);
3310
3311 trace_kvm_pvclock_update(vcpu->vcpu_id, &hv_clock);
3312 }
3313
kvm_guest_time_update(struct kvm_vcpu * v)3314 int kvm_guest_time_update(struct kvm_vcpu *v)
3315 {
3316 struct pvclock_vcpu_time_info hv_clock = {};
3317 unsigned long flags, tgt_tsc_khz;
3318 unsigned seq;
3319 struct kvm_vcpu_arch *vcpu = &v->arch;
3320 struct kvm_arch *ka = &v->kvm->arch;
3321 s64 kernel_ns;
3322 u64 tsc_timestamp, host_tsc;
3323 bool use_master_clock;
3324
3325 kernel_ns = 0;
3326 host_tsc = 0;
3327
3328 /*
3329 * If the host uses TSC clock, then passthrough TSC as stable
3330 * to the guest.
3331 */
3332 do {
3333 seq = read_seqcount_begin(&ka->pvclock_sc);
3334 use_master_clock = ka->use_master_clock;
3335 if (use_master_clock) {
3336 host_tsc = ka->master_cycle_now;
3337 kernel_ns = ka->master_kernel_ns;
3338 }
3339 } while (read_seqcount_retry(&ka->pvclock_sc, seq));
3340
3341 /* Keep irq disabled to prevent changes to the clock */
3342 local_irq_save(flags);
3343 tgt_tsc_khz = get_cpu_tsc_khz();
3344 if (unlikely(tgt_tsc_khz == 0)) {
3345 local_irq_restore(flags);
3346 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
3347 return 1;
3348 }
3349 if (!use_master_clock) {
3350 host_tsc = rdtsc();
3351 kernel_ns = get_kvmclock_base_ns();
3352 }
3353
3354 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
3355
3356 /*
3357 * We may have to catch up the TSC to match elapsed wall clock
3358 * time for two reasons, even if kvmclock is used.
3359 * 1) CPU could have been running below the maximum TSC rate
3360 * 2) Broken TSC compensation resets the base at each VCPU
3361 * entry to avoid unknown leaps of TSC even when running
3362 * again on the same CPU. This may cause apparent elapsed
3363 * time to disappear, and the guest to stand still or run
3364 * very slowly.
3365 */
3366 if (vcpu->tsc_catchup) {
3367 u64 tsc = compute_guest_tsc(v, kernel_ns);
3368 if (tsc > tsc_timestamp) {
3369 adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
3370 tsc_timestamp = tsc;
3371 }
3372 }
3373
3374 local_irq_restore(flags);
3375
3376 /* With all the info we got, fill in the values */
3377
3378 if (kvm_caps.has_tsc_control) {
3379 tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz,
3380 v->arch.l1_tsc_scaling_ratio);
3381 tgt_tsc_khz = tgt_tsc_khz ? : 1;
3382 }
3383
3384 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
3385 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
3386 &vcpu->pvclock_tsc_shift,
3387 &vcpu->pvclock_tsc_mul);
3388 vcpu->hw_tsc_khz = tgt_tsc_khz;
3389 }
3390
3391 hv_clock.tsc_shift = vcpu->pvclock_tsc_shift;
3392 hv_clock.tsc_to_system_mul = vcpu->pvclock_tsc_mul;
3393 hv_clock.tsc_timestamp = tsc_timestamp;
3394 hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
3395 vcpu->last_guest_tsc = tsc_timestamp;
3396
3397 /* If the host uses TSC clocksource, then it is stable */
3398 hv_clock.flags = 0;
3399 if (use_master_clock)
3400 hv_clock.flags |= PVCLOCK_TSC_STABLE_BIT;
3401
3402 if (vcpu->pv_time.active) {
3403 /*
3404 * GUEST_STOPPED is only supported by kvmclock, and KVM's
3405 * historic behavior is to only process the request if kvmclock
3406 * is active/enabled.
3407 */
3408 if (vcpu->pvclock_set_guest_stopped_request) {
3409 hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
3410 vcpu->pvclock_set_guest_stopped_request = false;
3411 }
3412 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->pv_time, 0);
3413
3414 hv_clock.flags &= ~PVCLOCK_GUEST_STOPPED;
3415 }
3416
3417 kvm_hv_setup_tsc_page(v->kvm, &hv_clock);
3418
3419 #ifdef CONFIG_KVM_XEN
3420 /*
3421 * For Xen guests we may need to override PVCLOCK_TSC_STABLE_BIT as unless
3422 * explicitly told to use TSC as its clocksource Xen will not set this bit.
3423 * This default behaviour led to bugs in some guest kernels which cause
3424 * problems if they observe PVCLOCK_TSC_STABLE_BIT in the pvclock flags.
3425 *
3426 * Note! Clear TSC_STABLE only for Xen clocks, i.e. the order matters!
3427 */
3428 if (ka->xen.hvm_config.flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE)
3429 hv_clock.flags &= ~PVCLOCK_TSC_STABLE_BIT;
3430
3431 if (vcpu->xen.vcpu_info_cache.active)
3432 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_info_cache,
3433 offsetof(struct compat_vcpu_info, time));
3434 if (vcpu->xen.vcpu_time_info_cache.active)
3435 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_time_info_cache, 0);
3436 #endif
3437 return 0;
3438 }
3439
3440 /*
3441 * The pvclock_wall_clock ABI tells the guest the wall clock time at
3442 * which it started (i.e. its epoch, when its kvmclock was zero).
3443 *
3444 * In fact those clocks are subtly different; wall clock frequency is
3445 * adjusted by NTP and has leap seconds, while the kvmclock is a
3446 * simple function of the TSC without any such adjustment.
3447 *
3448 * Perhaps the ABI should have exposed CLOCK_TAI and a ratio between
3449 * that and kvmclock, but even that would be subject to change over
3450 * time.
3451 *
3452 * Attempt to calculate the epoch at a given moment using the *same*
3453 * TSC reading via kvm_get_walltime_and_clockread() to obtain both
3454 * wallclock and kvmclock times, and subtracting one from the other.
3455 *
3456 * Fall back to using their values at slightly different moments by
3457 * calling ktime_get_real_ns() and get_kvmclock_ns() separately.
3458 */
kvm_get_wall_clock_epoch(struct kvm * kvm)3459 uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm)
3460 {
3461 #ifdef CONFIG_X86_64
3462 struct pvclock_vcpu_time_info hv_clock;
3463 struct kvm_arch *ka = &kvm->arch;
3464 unsigned long seq, local_tsc_khz;
3465 struct timespec64 ts;
3466 uint64_t host_tsc;
3467
3468 do {
3469 seq = read_seqcount_begin(&ka->pvclock_sc);
3470
3471 local_tsc_khz = 0;
3472 if (!ka->use_master_clock)
3473 break;
3474
3475 /*
3476 * The TSC read and the call to get_cpu_tsc_khz() must happen
3477 * on the same CPU.
3478 */
3479 get_cpu();
3480
3481 local_tsc_khz = get_cpu_tsc_khz();
3482
3483 if (local_tsc_khz &&
3484 !kvm_get_walltime_and_clockread(&ts, &host_tsc))
3485 local_tsc_khz = 0; /* Fall back to old method */
3486
3487 put_cpu();
3488
3489 /*
3490 * These values must be snapshotted within the seqcount loop.
3491 * After that, it's just mathematics which can happen on any
3492 * CPU at any time.
3493 */
3494 hv_clock.tsc_timestamp = ka->master_cycle_now;
3495 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
3496
3497 } while (read_seqcount_retry(&ka->pvclock_sc, seq));
3498
3499 /*
3500 * If the conditions were right, and obtaining the wallclock+TSC was
3501 * successful, calculate the KVM clock at the corresponding time and
3502 * subtract one from the other to get the guest's epoch in nanoseconds
3503 * since 1970-01-01.
3504 */
3505 if (local_tsc_khz) {
3506 kvm_get_time_scale(NSEC_PER_SEC, local_tsc_khz * NSEC_PER_USEC,
3507 &hv_clock.tsc_shift,
3508 &hv_clock.tsc_to_system_mul);
3509 return ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec -
3510 __pvclock_read_cycles(&hv_clock, host_tsc);
3511 }
3512 #endif
3513 return ktime_get_real_ns() - get_kvmclock_ns(kvm);
3514 }
3515
3516 /*
3517 * kvmclock updates which are isolated to a given vcpu, such as
3518 * vcpu->cpu migration, should not allow system_timestamp from
3519 * the rest of the vcpus to remain static.
3520 *
3521 * So in those cases, request a kvmclock update for all vcpus.
3522 * The worst case for a remote vcpu to update its kvmclock
3523 * is then bounded by maximum nohz sleep latency.
3524 */
kvm_gen_kvmclock_update(struct kvm_vcpu * v)3525 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
3526 {
3527 unsigned long i;
3528 struct kvm_vcpu *vcpu;
3529 struct kvm *kvm = v->kvm;
3530
3531 kvm_for_each_vcpu(i, vcpu, kvm) {
3532 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3533 kvm_vcpu_kick(vcpu);
3534 }
3535 }
3536
3537 /* These helpers are safe iff @msr is known to be an MCx bank MSR. */
is_mci_control_msr(u32 msr)3538 static bool is_mci_control_msr(u32 msr)
3539 {
3540 return (msr & 3) == 0;
3541 }
is_mci_status_msr(u32 msr)3542 static bool is_mci_status_msr(u32 msr)
3543 {
3544 return (msr & 3) == 1;
3545 }
3546
3547 /*
3548 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
3549 */
can_set_mci_status(struct kvm_vcpu * vcpu)3550 static bool can_set_mci_status(struct kvm_vcpu *vcpu)
3551 {
3552 /* McStatusWrEn enabled? */
3553 if (guest_cpuid_is_amd_compatible(vcpu))
3554 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
3555
3556 return false;
3557 }
3558
set_msr_mce(struct kvm_vcpu * vcpu,struct msr_data * msr_info)3559 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3560 {
3561 u64 mcg_cap = vcpu->arch.mcg_cap;
3562 unsigned bank_num = mcg_cap & 0xff;
3563 u32 msr = msr_info->index;
3564 u64 data = msr_info->data;
3565 u32 offset, last_msr;
3566
3567 switch (msr) {
3568 case MSR_IA32_MCG_STATUS:
3569 vcpu->arch.mcg_status = data;
3570 break;
3571 case MSR_IA32_MCG_CTL:
3572 if (!(mcg_cap & MCG_CTL_P) &&
3573 (data || !msr_info->host_initiated))
3574 return 1;
3575 if (data != 0 && data != ~(u64)0)
3576 return 1;
3577 vcpu->arch.mcg_ctl = data;
3578 break;
3579 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
3580 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1;
3581 if (msr > last_msr)
3582 return 1;
3583
3584 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated))
3585 return 1;
3586 /* An attempt to write a 1 to a reserved bit raises #GP */
3587 if (data & ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK))
3588 return 1;
3589 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2,
3590 last_msr + 1 - MSR_IA32_MC0_CTL2);
3591 vcpu->arch.mci_ctl2_banks[offset] = data;
3592 break;
3593 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3594 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1;
3595 if (msr > last_msr)
3596 return 1;
3597
3598 /*
3599 * Only 0 or all 1s can be written to IA32_MCi_CTL, all other
3600 * values are architecturally undefined. But, some Linux
3601 * kernels clear bit 10 in bank 4 to workaround a BIOS/GART TLB
3602 * issue on AMD K8s, allow bit 10 to be clear when setting all
3603 * other bits in order to avoid an uncaught #GP in the guest.
3604 *
3605 * UNIXWARE clears bit 0 of MC1_CTL to ignore correctable,
3606 * single-bit ECC data errors.
3607 */
3608 if (is_mci_control_msr(msr) &&
3609 data != 0 && (data | (1 << 10) | 1) != ~(u64)0)
3610 return 1;
3611
3612 /*
3613 * All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR.
3614 * AMD-based CPUs allow non-zero values, but if and only if
3615 * HWCR[McStatusWrEn] is set.
3616 */
3617 if (!msr_info->host_initiated && is_mci_status_msr(msr) &&
3618 data != 0 && !can_set_mci_status(vcpu))
3619 return 1;
3620
3621 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL,
3622 last_msr + 1 - MSR_IA32_MC0_CTL);
3623 vcpu->arch.mce_banks[offset] = data;
3624 break;
3625 default:
3626 return 1;
3627 }
3628 return 0;
3629 }
3630
kvm_pv_enable_async_pf(struct kvm_vcpu * vcpu,u64 data)3631 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
3632 {
3633 gpa_t gpa = data & ~0x3f;
3634
3635 /* Bits 4:5 are reserved, Should be zero */
3636 if (data & 0x30)
3637 return 1;
3638
3639 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) &&
3640 (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT))
3641 return 1;
3642
3643 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) &&
3644 (data & KVM_ASYNC_PF_DELIVERY_AS_INT))
3645 return 1;
3646
3647 if (!lapic_in_kernel(vcpu))
3648 return data ? 1 : 0;
3649
3650 vcpu->arch.apf.msr_en_val = data;
3651
3652 if (!kvm_pv_async_pf_enabled(vcpu)) {
3653 kvm_clear_async_pf_completion_queue(vcpu);
3654 kvm_async_pf_hash_reset(vcpu);
3655 return 0;
3656 }
3657
3658 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
3659 sizeof(u64)))
3660 return 1;
3661
3662 vcpu->arch.apf.send_always = (data & KVM_ASYNC_PF_SEND_ALWAYS);
3663 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
3664
3665 kvm_async_pf_wakeup_all(vcpu);
3666
3667 return 0;
3668 }
3669
kvm_pv_enable_async_pf_int(struct kvm_vcpu * vcpu,u64 data)3670 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
3671 {
3672 /* Bits 8-63 are reserved */
3673 if (data >> 8)
3674 return 1;
3675
3676 if (!lapic_in_kernel(vcpu))
3677 return 1;
3678
3679 vcpu->arch.apf.msr_int_val = data;
3680
3681 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK;
3682
3683 return 0;
3684 }
3685
kvmclock_reset(struct kvm_vcpu * vcpu)3686 static void kvmclock_reset(struct kvm_vcpu *vcpu)
3687 {
3688 kvm_gpc_deactivate(&vcpu->arch.pv_time);
3689 vcpu->arch.time = 0;
3690 }
3691
kvm_vcpu_flush_tlb_all(struct kvm_vcpu * vcpu)3692 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
3693 {
3694 ++vcpu->stat.tlb_flush;
3695 kvm_x86_call(flush_tlb_all)(vcpu);
3696
3697 /* Flushing all ASIDs flushes the current ASID... */
3698 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
3699 }
3700
kvm_vcpu_flush_tlb_guest(struct kvm_vcpu * vcpu)3701 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
3702 {
3703 ++vcpu->stat.tlb_flush;
3704
3705 if (!tdp_enabled) {
3706 /*
3707 * A TLB flush on behalf of the guest is equivalent to
3708 * INVPCID(all), toggling CR4.PGE, etc., which requires
3709 * a forced sync of the shadow page tables. Ensure all the
3710 * roots are synced and the guest TLB in hardware is clean.
3711 */
3712 kvm_mmu_sync_roots(vcpu);
3713 kvm_mmu_sync_prev_roots(vcpu);
3714 }
3715
3716 kvm_x86_call(flush_tlb_guest)(vcpu);
3717
3718 /*
3719 * Flushing all "guest" TLB is always a superset of Hyper-V's fine
3720 * grained flushing.
3721 */
3722 kvm_hv_vcpu_purge_flush_tlb(vcpu);
3723 }
3724
3725
kvm_vcpu_flush_tlb_current(struct kvm_vcpu * vcpu)3726 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
3727 {
3728 ++vcpu->stat.tlb_flush;
3729 kvm_x86_call(flush_tlb_current)(vcpu);
3730 }
3731
3732 /*
3733 * Service "local" TLB flush requests, which are specific to the current MMU
3734 * context. In addition to the generic event handling in vcpu_enter_guest(),
3735 * TLB flushes that are targeted at an MMU context also need to be serviced
3736 * prior before nested VM-Enter/VM-Exit.
3737 */
kvm_service_local_tlb_flush_requests(struct kvm_vcpu * vcpu)3738 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
3739 {
3740 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
3741 kvm_vcpu_flush_tlb_current(vcpu);
3742
3743 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
3744 kvm_vcpu_flush_tlb_guest(vcpu);
3745 }
3746 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_service_local_tlb_flush_requests);
3747
record_steal_time(struct kvm_vcpu * vcpu)3748 static void record_steal_time(struct kvm_vcpu *vcpu)
3749 {
3750 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
3751 struct kvm_steal_time __user *st;
3752 struct kvm_memslots *slots;
3753 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
3754 u64 steal;
3755 u32 version;
3756
3757 if (kvm_xen_msr_enabled(vcpu->kvm)) {
3758 kvm_xen_runstate_set_running(vcpu);
3759 return;
3760 }
3761
3762 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
3763 return;
3764
3765 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm))
3766 return;
3767
3768 slots = kvm_memslots(vcpu->kvm);
3769
3770 if (unlikely(slots->generation != ghc->generation ||
3771 gpa != ghc->gpa ||
3772 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
3773 /* We rely on the fact that it fits in a single page. */
3774 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
3775
3776 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
3777 kvm_is_error_hva(ghc->hva) || !ghc->memslot)
3778 return;
3779 }
3780
3781 st = (struct kvm_steal_time __user *)ghc->hva;
3782 /*
3783 * Doing a TLB flush here, on the guest's behalf, can avoid
3784 * expensive IPIs.
3785 */
3786 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
3787 u8 st_preempted = 0;
3788 int err = -EFAULT;
3789
3790 if (!user_access_begin(st, sizeof(*st)))
3791 return;
3792
3793 asm volatile("1: xchgb %0, %2\n"
3794 "xor %1, %1\n"
3795 "2:\n"
3796 _ASM_EXTABLE_UA(1b, 2b)
3797 : "+q" (st_preempted),
3798 "+&r" (err),
3799 "+m" (st->preempted));
3800 if (err)
3801 goto out;
3802
3803 user_access_end();
3804
3805 vcpu->arch.st.preempted = 0;
3806
3807 trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
3808 st_preempted & KVM_VCPU_FLUSH_TLB);
3809 if (st_preempted & KVM_VCPU_FLUSH_TLB)
3810 kvm_vcpu_flush_tlb_guest(vcpu);
3811
3812 if (!user_access_begin(st, sizeof(*st)))
3813 goto dirty;
3814 } else {
3815 if (!user_access_begin(st, sizeof(*st)))
3816 return;
3817
3818 unsafe_put_user(0, &st->preempted, out);
3819 vcpu->arch.st.preempted = 0;
3820 }
3821
3822 unsafe_get_user(version, &st->version, out);
3823 if (version & 1)
3824 version += 1; /* first time write, random junk */
3825
3826 version += 1;
3827 unsafe_put_user(version, &st->version, out);
3828
3829 smp_wmb();
3830
3831 unsafe_get_user(steal, &st->steal, out);
3832 steal += current->sched_info.run_delay -
3833 vcpu->arch.st.last_steal;
3834 vcpu->arch.st.last_steal = current->sched_info.run_delay;
3835 unsafe_put_user(steal, &st->steal, out);
3836
3837 version += 1;
3838 unsafe_put_user(version, &st->version, out);
3839
3840 out:
3841 user_access_end();
3842 dirty:
3843 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
3844 }
3845
3846 /*
3847 * Returns true if the MSR in question is managed via XSTATE, i.e. is context
3848 * switched with the rest of guest FPU state.
3849 *
3850 * Note, S_CET is _not_ saved/restored via XSAVES/XRSTORS.
3851 */
is_xstate_managed_msr(struct kvm_vcpu * vcpu,u32 msr)3852 static bool is_xstate_managed_msr(struct kvm_vcpu *vcpu, u32 msr)
3853 {
3854 if (!vcpu)
3855 return false;
3856
3857 switch (msr) {
3858 case MSR_IA32_U_CET:
3859 return guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) ||
3860 guest_cpu_cap_has(vcpu, X86_FEATURE_IBT);
3861 case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
3862 return guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
3863 default:
3864 return false;
3865 }
3866 }
3867
3868 /*
3869 * Lock (and if necessary, re-load) the guest FPU, i.e. XSTATE, and access an
3870 * MSR that is managed via XSTATE. Note, the caller is responsible for doing
3871 * the initial FPU load, this helper only ensures that guest state is resident
3872 * in hardware (the kernel can load its FPU state in IRQ context).
3873 *
3874 * Note, loading guest values for U_CET and PL[0-3]_SSP while executing in the
3875 * kernel is safe, as U_CET is specific to userspace, and PL[0-3]_SSP are only
3876 * consumed when transitioning to lower privilege levels, i.e. are effectively
3877 * only consumed by userspace as well.
3878 */
kvm_access_xstate_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info,int access)3879 static __always_inline void kvm_access_xstate_msr(struct kvm_vcpu *vcpu,
3880 struct msr_data *msr_info,
3881 int access)
3882 {
3883 BUILD_BUG_ON(access != MSR_TYPE_R && access != MSR_TYPE_W);
3884
3885 KVM_BUG_ON(!is_xstate_managed_msr(vcpu, msr_info->index), vcpu->kvm);
3886 KVM_BUG_ON(!vcpu->arch.guest_fpu.fpstate->in_use, vcpu->kvm);
3887
3888 kvm_fpu_get();
3889 if (access == MSR_TYPE_R)
3890 rdmsrq(msr_info->index, msr_info->data);
3891 else
3892 wrmsrq(msr_info->index, msr_info->data);
3893 kvm_fpu_put();
3894 }
3895
kvm_set_xstate_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)3896 static void kvm_set_xstate_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3897 {
3898 kvm_access_xstate_msr(vcpu, msr_info, MSR_TYPE_W);
3899 }
3900
kvm_get_xstate_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)3901 static void kvm_get_xstate_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3902 {
3903 kvm_access_xstate_msr(vcpu, msr_info, MSR_TYPE_R);
3904 }
3905
kvm_set_msr_common(struct kvm_vcpu * vcpu,struct msr_data * msr_info)3906 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3907 {
3908 u32 msr = msr_info->index;
3909 u64 data = msr_info->data;
3910
3911 /*
3912 * Do not allow host-initiated writes to trigger the Xen hypercall
3913 * page setup; it could incur locking paths which are not expected
3914 * if userspace sets the MSR in an unusual location.
3915 */
3916 if (kvm_xen_is_hypercall_page_msr(vcpu->kvm, msr) &&
3917 !msr_info->host_initiated)
3918 return kvm_xen_write_hypercall_page(vcpu, data);
3919
3920 switch (msr) {
3921 case MSR_AMD64_NB_CFG:
3922 case MSR_IA32_UCODE_WRITE:
3923 case MSR_VM_HSAVE_PA:
3924 case MSR_AMD64_PATCH_LOADER:
3925 case MSR_AMD64_BU_CFG2:
3926 case MSR_AMD64_DC_CFG:
3927 case MSR_AMD64_TW_CFG:
3928 case MSR_F15H_EX_CFG:
3929 break;
3930
3931 case MSR_IA32_UCODE_REV:
3932 if (msr_info->host_initiated)
3933 vcpu->arch.microcode_version = data;
3934 break;
3935 case MSR_IA32_ARCH_CAPABILITIES:
3936 if (!msr_info->host_initiated ||
3937 !guest_cpu_cap_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3938 return KVM_MSR_RET_UNSUPPORTED;
3939 vcpu->arch.arch_capabilities = data;
3940 break;
3941 case MSR_IA32_PERF_CAPABILITIES:
3942 if (!msr_info->host_initiated ||
3943 !guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
3944 return KVM_MSR_RET_UNSUPPORTED;
3945
3946 if (data & ~kvm_caps.supported_perf_cap)
3947 return 1;
3948
3949 /*
3950 * Note, this is not just a performance optimization! KVM
3951 * disallows changing feature MSRs after the vCPU has run; PMU
3952 * refresh will bug the VM if called after the vCPU has run.
3953 */
3954 if (vcpu->arch.perf_capabilities == data)
3955 break;
3956
3957 vcpu->arch.perf_capabilities = data;
3958 kvm_pmu_refresh(vcpu);
3959 kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
3960 break;
3961 case MSR_IA32_PRED_CMD: {
3962 u64 reserved_bits = ~(PRED_CMD_IBPB | PRED_CMD_SBPB);
3963
3964 if (!msr_info->host_initiated) {
3965 if ((!guest_has_pred_cmd_msr(vcpu)))
3966 return 1;
3967
3968 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
3969 !guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBPB))
3970 reserved_bits |= PRED_CMD_IBPB;
3971
3972 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SBPB))
3973 reserved_bits |= PRED_CMD_SBPB;
3974 }
3975
3976 if (!boot_cpu_has(X86_FEATURE_IBPB))
3977 reserved_bits |= PRED_CMD_IBPB;
3978
3979 if (!boot_cpu_has(X86_FEATURE_SBPB))
3980 reserved_bits |= PRED_CMD_SBPB;
3981
3982 if (data & reserved_bits)
3983 return 1;
3984
3985 if (!data)
3986 break;
3987
3988 wrmsrq(MSR_IA32_PRED_CMD, data);
3989 break;
3990 }
3991 case MSR_IA32_FLUSH_CMD:
3992 if (!msr_info->host_initiated &&
3993 !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D))
3994 return 1;
3995
3996 if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D) || (data & ~L1D_FLUSH))
3997 return 1;
3998 if (!data)
3999 break;
4000
4001 wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
4002 break;
4003 case MSR_EFER:
4004 return set_efer(vcpu, msr_info);
4005 case MSR_K7_HWCR:
4006 data &= ~(u64)0x40; /* ignore flush filter disable */
4007 data &= ~(u64)0x100; /* ignore ignne emulation enable */
4008 data &= ~(u64)0x8; /* ignore TLB cache disable */
4009
4010 /*
4011 * Allow McStatusWrEn and TscFreqSel. (Linux guests from v3.2
4012 * through at least v6.6 whine if TscFreqSel is clear,
4013 * depending on F/M/S.
4014 */
4015 if (data & ~(BIT_ULL(18) | BIT_ULL(24))) {
4016 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
4017 return 1;
4018 }
4019 vcpu->arch.msr_hwcr = data;
4020 break;
4021 case MSR_FAM10H_MMIO_CONF_BASE:
4022 if (data != 0) {
4023 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
4024 return 1;
4025 }
4026 break;
4027 case MSR_IA32_CR_PAT:
4028 if (!kvm_pat_valid(data))
4029 return 1;
4030
4031 vcpu->arch.pat = data;
4032 break;
4033 case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
4034 case MSR_MTRRdefType:
4035 return kvm_mtrr_set_msr(vcpu, msr, data);
4036 case MSR_IA32_APICBASE:
4037 return kvm_apic_set_base(vcpu, data, msr_info->host_initiated);
4038 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
4039 return kvm_x2apic_msr_write(vcpu, msr, data);
4040 case MSR_IA32_TSC_DEADLINE:
4041 kvm_set_lapic_tscdeadline_msr(vcpu, data);
4042 break;
4043 case MSR_IA32_TSC_ADJUST:
4044 if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
4045 if (!msr_info->host_initiated) {
4046 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
4047 adjust_tsc_offset_guest(vcpu, adj);
4048 /* Before back to guest, tsc_timestamp must be adjusted
4049 * as well, otherwise guest's percpu pvclock time could jump.
4050 */
4051 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4052 }
4053 vcpu->arch.ia32_tsc_adjust_msr = data;
4054 }
4055 break;
4056 case MSR_IA32_MISC_ENABLE: {
4057 u64 old_val = vcpu->arch.ia32_misc_enable_msr;
4058
4059 if (!msr_info->host_initiated) {
4060 /* RO bits */
4061 if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK)
4062 return 1;
4063
4064 /* R bits, i.e. writes are ignored, but don't fault. */
4065 data = data & ~MSR_IA32_MISC_ENABLE_EMON;
4066 data |= old_val & MSR_IA32_MISC_ENABLE_EMON;
4067 }
4068
4069 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
4070 ((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
4071 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_XMM3))
4072 return 1;
4073 vcpu->arch.ia32_misc_enable_msr = data;
4074 vcpu->arch.cpuid_dynamic_bits_dirty = true;
4075 } else {
4076 vcpu->arch.ia32_misc_enable_msr = data;
4077 }
4078 break;
4079 }
4080 case MSR_IA32_SMBASE:
4081 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
4082 return 1;
4083 vcpu->arch.smbase = data;
4084 break;
4085 case MSR_IA32_POWER_CTL:
4086 vcpu->arch.msr_ia32_power_ctl = data;
4087 break;
4088 case MSR_IA32_TSC:
4089 if (msr_info->host_initiated) {
4090 kvm_synchronize_tsc(vcpu, &data);
4091 } else if (!vcpu->arch.guest_tsc_protected) {
4092 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
4093 adjust_tsc_offset_guest(vcpu, adj);
4094 vcpu->arch.ia32_tsc_adjust_msr += adj;
4095 }
4096 break;
4097 case MSR_IA32_XSS:
4098 if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
4099 return KVM_MSR_RET_UNSUPPORTED;
4100
4101 if (data & ~vcpu->arch.guest_supported_xss)
4102 return 1;
4103 if (vcpu->arch.ia32_xss == data)
4104 break;
4105 vcpu->arch.ia32_xss = data;
4106 vcpu->arch.cpuid_dynamic_bits_dirty = true;
4107 break;
4108 case MSR_SMI_COUNT:
4109 if (!msr_info->host_initiated)
4110 return 1;
4111 vcpu->arch.smi_count = data;
4112 break;
4113 case MSR_KVM_WALL_CLOCK_NEW:
4114 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4115 return KVM_MSR_RET_UNSUPPORTED;
4116
4117 vcpu->kvm->arch.wall_clock = data;
4118 kvm_write_wall_clock(vcpu->kvm, data, 0);
4119 break;
4120 case MSR_KVM_WALL_CLOCK:
4121 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4122 return KVM_MSR_RET_UNSUPPORTED;
4123
4124 vcpu->kvm->arch.wall_clock = data;
4125 kvm_write_wall_clock(vcpu->kvm, data, 0);
4126 break;
4127 case MSR_KVM_SYSTEM_TIME_NEW:
4128 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4129 return KVM_MSR_RET_UNSUPPORTED;
4130
4131 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated);
4132 break;
4133 case MSR_KVM_SYSTEM_TIME:
4134 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4135 return KVM_MSR_RET_UNSUPPORTED;
4136
4137 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated);
4138 break;
4139 case MSR_KVM_ASYNC_PF_EN:
4140 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
4141 return KVM_MSR_RET_UNSUPPORTED;
4142
4143 if (kvm_pv_enable_async_pf(vcpu, data))
4144 return 1;
4145 break;
4146 case MSR_KVM_ASYNC_PF_INT:
4147 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4148 return KVM_MSR_RET_UNSUPPORTED;
4149
4150 if (kvm_pv_enable_async_pf_int(vcpu, data))
4151 return 1;
4152 break;
4153 case MSR_KVM_ASYNC_PF_ACK:
4154 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4155 return KVM_MSR_RET_UNSUPPORTED;
4156 if (data & 0x1) {
4157 /*
4158 * Pairs with the smp_mb__after_atomic() in
4159 * kvm_arch_async_page_present_queued().
4160 */
4161 smp_store_mb(vcpu->arch.apf.pageready_pending, false);
4162
4163 kvm_check_async_pf_completion(vcpu);
4164 }
4165 break;
4166 case MSR_KVM_STEAL_TIME:
4167 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
4168 return KVM_MSR_RET_UNSUPPORTED;
4169
4170 if (unlikely(!sched_info_on()))
4171 return 1;
4172
4173 if (data & KVM_STEAL_RESERVED_MASK)
4174 return 1;
4175
4176 vcpu->arch.st.msr_val = data;
4177
4178 if (!(data & KVM_MSR_ENABLED))
4179 break;
4180
4181 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
4182
4183 break;
4184 case MSR_KVM_PV_EOI_EN:
4185 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
4186 return KVM_MSR_RET_UNSUPPORTED;
4187
4188 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8)))
4189 return 1;
4190 break;
4191
4192 case MSR_KVM_POLL_CONTROL:
4193 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
4194 return KVM_MSR_RET_UNSUPPORTED;
4195
4196 /* only enable bit supported */
4197 if (data & (-1ULL << 1))
4198 return 1;
4199
4200 vcpu->arch.msr_kvm_poll_control = data;
4201 break;
4202
4203 case MSR_IA32_MCG_CTL:
4204 case MSR_IA32_MCG_STATUS:
4205 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4206 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4207 return set_msr_mce(vcpu, msr_info);
4208
4209 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
4210 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
4211 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
4212 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
4213 if (kvm_pmu_is_valid_msr(vcpu, msr))
4214 return kvm_pmu_set_msr(vcpu, msr_info);
4215
4216 if (data)
4217 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
4218 break;
4219 case MSR_K7_CLK_CTL:
4220 /*
4221 * Ignore all writes to this no longer documented MSR.
4222 * Writes are only relevant for old K7 processors,
4223 * all pre-dating SVM, but a recommended workaround from
4224 * AMD for these chips. It is possible to specify the
4225 * affected processor models on the command line, hence
4226 * the need to ignore the workaround.
4227 */
4228 break;
4229 #ifdef CONFIG_KVM_HYPERV
4230 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
4231 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
4232 case HV_X64_MSR_SYNDBG_OPTIONS:
4233 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4234 case HV_X64_MSR_CRASH_CTL:
4235 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
4236 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4237 case HV_X64_MSR_TSC_EMULATION_CONTROL:
4238 case HV_X64_MSR_TSC_EMULATION_STATUS:
4239 case HV_X64_MSR_TSC_INVARIANT_CONTROL:
4240 return kvm_hv_set_msr_common(vcpu, msr, data,
4241 msr_info->host_initiated);
4242 #endif
4243 case MSR_IA32_BBL_CR_CTL3:
4244 /* Drop writes to this legacy MSR -- see rdmsr
4245 * counterpart for further detail.
4246 */
4247 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
4248 break;
4249 case MSR_AMD64_OSVW_ID_LENGTH:
4250 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4251 return 1;
4252 vcpu->arch.osvw.length = data;
4253 break;
4254 case MSR_AMD64_OSVW_STATUS:
4255 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4256 return 1;
4257 vcpu->arch.osvw.status = data;
4258 break;
4259 case MSR_PLATFORM_INFO:
4260 if (!msr_info->host_initiated)
4261 return 1;
4262 vcpu->arch.msr_platform_info = data;
4263 break;
4264 case MSR_MISC_FEATURES_ENABLES:
4265 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
4266 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
4267 !supports_cpuid_fault(vcpu)))
4268 return 1;
4269 vcpu->arch.msr_misc_features_enables = data;
4270 break;
4271 #ifdef CONFIG_X86_64
4272 case MSR_IA32_XFD:
4273 if (!msr_info->host_initiated &&
4274 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4275 return 1;
4276
4277 if (data & ~kvm_guest_supported_xfd(vcpu))
4278 return 1;
4279
4280 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data);
4281 break;
4282 case MSR_IA32_XFD_ERR:
4283 if (!msr_info->host_initiated &&
4284 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4285 return 1;
4286
4287 if (data & ~kvm_guest_supported_xfd(vcpu))
4288 return 1;
4289
4290 vcpu->arch.guest_fpu.xfd_err = data;
4291 break;
4292 #endif
4293 case MSR_IA32_U_CET:
4294 case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
4295 kvm_set_xstate_msr(vcpu, msr_info);
4296 break;
4297 default:
4298 if (kvm_pmu_is_valid_msr(vcpu, msr))
4299 return kvm_pmu_set_msr(vcpu, msr_info);
4300
4301 return KVM_MSR_RET_UNSUPPORTED;
4302 }
4303 return 0;
4304 }
4305 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_msr_common);
4306
get_msr_mce(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)4307 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
4308 {
4309 u64 data;
4310 u64 mcg_cap = vcpu->arch.mcg_cap;
4311 unsigned bank_num = mcg_cap & 0xff;
4312 u32 offset, last_msr;
4313
4314 switch (msr) {
4315 case MSR_IA32_P5_MC_ADDR:
4316 case MSR_IA32_P5_MC_TYPE:
4317 data = 0;
4318 break;
4319 case MSR_IA32_MCG_CAP:
4320 data = vcpu->arch.mcg_cap;
4321 break;
4322 case MSR_IA32_MCG_CTL:
4323 if (!(mcg_cap & MCG_CTL_P) && !host)
4324 return 1;
4325 data = vcpu->arch.mcg_ctl;
4326 break;
4327 case MSR_IA32_MCG_STATUS:
4328 data = vcpu->arch.mcg_status;
4329 break;
4330 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4331 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1;
4332 if (msr > last_msr)
4333 return 1;
4334
4335 if (!(mcg_cap & MCG_CMCI_P) && !host)
4336 return 1;
4337 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2,
4338 last_msr + 1 - MSR_IA32_MC0_CTL2);
4339 data = vcpu->arch.mci_ctl2_banks[offset];
4340 break;
4341 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4342 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1;
4343 if (msr > last_msr)
4344 return 1;
4345
4346 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL,
4347 last_msr + 1 - MSR_IA32_MC0_CTL);
4348 data = vcpu->arch.mce_banks[offset];
4349 break;
4350 default:
4351 return 1;
4352 }
4353 *pdata = data;
4354 return 0;
4355 }
4356
kvm_get_msr_common(struct kvm_vcpu * vcpu,struct msr_data * msr_info)4357 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4358 {
4359 switch (msr_info->index) {
4360 case MSR_IA32_PLATFORM_ID:
4361 case MSR_IA32_EBL_CR_POWERON:
4362 case MSR_IA32_LASTBRANCHFROMIP:
4363 case MSR_IA32_LASTBRANCHTOIP:
4364 case MSR_IA32_LASTINTFROMIP:
4365 case MSR_IA32_LASTINTTOIP:
4366 case MSR_AMD64_SYSCFG:
4367 case MSR_K8_TSEG_ADDR:
4368 case MSR_K8_TSEG_MASK:
4369 case MSR_VM_HSAVE_PA:
4370 case MSR_K8_INT_PENDING_MSG:
4371 case MSR_AMD64_NB_CFG:
4372 case MSR_FAM10H_MMIO_CONF_BASE:
4373 case MSR_AMD64_BU_CFG2:
4374 case MSR_IA32_PERF_CTL:
4375 case MSR_AMD64_DC_CFG:
4376 case MSR_AMD64_TW_CFG:
4377 case MSR_F15H_EX_CFG:
4378 /*
4379 * Intel Sandy Bridge CPUs must support the RAPL (running average power
4380 * limit) MSRs. Just return 0, as we do not want to expose the host
4381 * data here. Do not conditionalize this on CPUID, as KVM does not do
4382 * so for existing CPU-specific MSRs.
4383 */
4384 case MSR_RAPL_POWER_UNIT:
4385 case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */
4386 case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */
4387 case MSR_PKG_ENERGY_STATUS: /* Total package */
4388 case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */
4389 msr_info->data = 0;
4390 break;
4391 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
4392 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
4393 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
4394 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
4395 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
4396 return kvm_pmu_get_msr(vcpu, msr_info);
4397 msr_info->data = 0;
4398 break;
4399 case MSR_IA32_UCODE_REV:
4400 msr_info->data = vcpu->arch.microcode_version;
4401 break;
4402 case MSR_IA32_ARCH_CAPABILITIES:
4403 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
4404 return KVM_MSR_RET_UNSUPPORTED;
4405 msr_info->data = vcpu->arch.arch_capabilities;
4406 break;
4407 case MSR_IA32_PERF_CAPABILITIES:
4408 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
4409 return KVM_MSR_RET_UNSUPPORTED;
4410 msr_info->data = vcpu->arch.perf_capabilities;
4411 break;
4412 case MSR_IA32_POWER_CTL:
4413 msr_info->data = vcpu->arch.msr_ia32_power_ctl;
4414 break;
4415 case MSR_IA32_TSC: {
4416 /*
4417 * Intel SDM states that MSR_IA32_TSC read adds the TSC offset
4418 * even when not intercepted. AMD manual doesn't explicitly
4419 * state this but appears to behave the same.
4420 *
4421 * On userspace reads and writes, however, we unconditionally
4422 * return L1's TSC value to ensure backwards-compatible
4423 * behavior for migration.
4424 */
4425 u64 offset, ratio;
4426
4427 if (msr_info->host_initiated) {
4428 offset = vcpu->arch.l1_tsc_offset;
4429 ratio = vcpu->arch.l1_tsc_scaling_ratio;
4430 } else {
4431 offset = vcpu->arch.tsc_offset;
4432 ratio = vcpu->arch.tsc_scaling_ratio;
4433 }
4434
4435 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
4436 break;
4437 }
4438 case MSR_IA32_CR_PAT:
4439 msr_info->data = vcpu->arch.pat;
4440 break;
4441 case MSR_MTRRcap:
4442 case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
4443 case MSR_MTRRdefType:
4444 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
4445 case 0xcd: /* fsb frequency */
4446 msr_info->data = 3;
4447 break;
4448 /*
4449 * MSR_EBC_FREQUENCY_ID
4450 * Conservative value valid for even the basic CPU models.
4451 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
4452 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
4453 * and 266MHz for model 3, or 4. Set Core Clock
4454 * Frequency to System Bus Frequency Ratio to 1 (bits
4455 * 31:24) even though these are only valid for CPU
4456 * models > 2, however guests may end up dividing or
4457 * multiplying by zero otherwise.
4458 */
4459 case MSR_EBC_FREQUENCY_ID:
4460 msr_info->data = 1 << 24;
4461 break;
4462 case MSR_IA32_APICBASE:
4463 msr_info->data = vcpu->arch.apic_base;
4464 break;
4465 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
4466 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
4467 case MSR_IA32_TSC_DEADLINE:
4468 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
4469 break;
4470 case MSR_IA32_TSC_ADJUST:
4471 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
4472 break;
4473 case MSR_IA32_MISC_ENABLE:
4474 msr_info->data = vcpu->arch.ia32_misc_enable_msr;
4475 break;
4476 case MSR_IA32_SMBASE:
4477 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
4478 return 1;
4479 msr_info->data = vcpu->arch.smbase;
4480 break;
4481 case MSR_SMI_COUNT:
4482 msr_info->data = vcpu->arch.smi_count;
4483 break;
4484 case MSR_IA32_PERF_STATUS:
4485 /* TSC increment by tick */
4486 msr_info->data = 1000ULL;
4487 /* CPU multiplier */
4488 msr_info->data |= (((uint64_t)4ULL) << 40);
4489 break;
4490 case MSR_EFER:
4491 msr_info->data = vcpu->arch.efer;
4492 break;
4493 case MSR_KVM_WALL_CLOCK:
4494 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4495 return KVM_MSR_RET_UNSUPPORTED;
4496
4497 msr_info->data = vcpu->kvm->arch.wall_clock;
4498 break;
4499 case MSR_KVM_WALL_CLOCK_NEW:
4500 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4501 return KVM_MSR_RET_UNSUPPORTED;
4502
4503 msr_info->data = vcpu->kvm->arch.wall_clock;
4504 break;
4505 case MSR_KVM_SYSTEM_TIME:
4506 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4507 return KVM_MSR_RET_UNSUPPORTED;
4508
4509 msr_info->data = vcpu->arch.time;
4510 break;
4511 case MSR_KVM_SYSTEM_TIME_NEW:
4512 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4513 return KVM_MSR_RET_UNSUPPORTED;
4514
4515 msr_info->data = vcpu->arch.time;
4516 break;
4517 case MSR_KVM_ASYNC_PF_EN:
4518 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
4519 return KVM_MSR_RET_UNSUPPORTED;
4520
4521 msr_info->data = vcpu->arch.apf.msr_en_val;
4522 break;
4523 case MSR_KVM_ASYNC_PF_INT:
4524 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4525 return KVM_MSR_RET_UNSUPPORTED;
4526
4527 msr_info->data = vcpu->arch.apf.msr_int_val;
4528 break;
4529 case MSR_KVM_ASYNC_PF_ACK:
4530 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4531 return KVM_MSR_RET_UNSUPPORTED;
4532
4533 msr_info->data = 0;
4534 break;
4535 case MSR_KVM_STEAL_TIME:
4536 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
4537 return KVM_MSR_RET_UNSUPPORTED;
4538
4539 msr_info->data = vcpu->arch.st.msr_val;
4540 break;
4541 case MSR_KVM_PV_EOI_EN:
4542 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
4543 return KVM_MSR_RET_UNSUPPORTED;
4544
4545 msr_info->data = vcpu->arch.pv_eoi.msr_val;
4546 break;
4547 case MSR_KVM_POLL_CONTROL:
4548 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
4549 return KVM_MSR_RET_UNSUPPORTED;
4550
4551 msr_info->data = vcpu->arch.msr_kvm_poll_control;
4552 break;
4553 case MSR_IA32_P5_MC_ADDR:
4554 case MSR_IA32_P5_MC_TYPE:
4555 case MSR_IA32_MCG_CAP:
4556 case MSR_IA32_MCG_CTL:
4557 case MSR_IA32_MCG_STATUS:
4558 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4559 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4560 return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
4561 msr_info->host_initiated);
4562 case MSR_IA32_XSS:
4563 if (!msr_info->host_initiated &&
4564 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
4565 return 1;
4566 msr_info->data = vcpu->arch.ia32_xss;
4567 break;
4568 case MSR_K7_CLK_CTL:
4569 /*
4570 * Provide expected ramp-up count for K7. All other
4571 * are set to zero, indicating minimum divisors for
4572 * every field.
4573 *
4574 * This prevents guest kernels on AMD host with CPU
4575 * type 6, model 8 and higher from exploding due to
4576 * the rdmsr failing.
4577 */
4578 msr_info->data = 0x20000000;
4579 break;
4580 #ifdef CONFIG_KVM_HYPERV
4581 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
4582 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
4583 case HV_X64_MSR_SYNDBG_OPTIONS:
4584 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4585 case HV_X64_MSR_CRASH_CTL:
4586 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
4587 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4588 case HV_X64_MSR_TSC_EMULATION_CONTROL:
4589 case HV_X64_MSR_TSC_EMULATION_STATUS:
4590 case HV_X64_MSR_TSC_INVARIANT_CONTROL:
4591 return kvm_hv_get_msr_common(vcpu,
4592 msr_info->index, &msr_info->data,
4593 msr_info->host_initiated);
4594 #endif
4595 case MSR_IA32_BBL_CR_CTL3:
4596 /* This legacy MSR exists but isn't fully documented in current
4597 * silicon. It is however accessed by winxp in very narrow
4598 * scenarios where it sets bit #19, itself documented as
4599 * a "reserved" bit. Best effort attempt to source coherent
4600 * read data here should the balance of the register be
4601 * interpreted by the guest:
4602 *
4603 * L2 cache control register 3: 64GB range, 256KB size,
4604 * enabled, latency 0x1, configured
4605 */
4606 msr_info->data = 0xbe702111;
4607 break;
4608 case MSR_AMD64_OSVW_ID_LENGTH:
4609 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4610 return 1;
4611 msr_info->data = vcpu->arch.osvw.length;
4612 break;
4613 case MSR_AMD64_OSVW_STATUS:
4614 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
4615 return 1;
4616 msr_info->data = vcpu->arch.osvw.status;
4617 break;
4618 case MSR_PLATFORM_INFO:
4619 if (!msr_info->host_initiated &&
4620 !vcpu->kvm->arch.guest_can_read_msr_platform_info)
4621 return 1;
4622 msr_info->data = vcpu->arch.msr_platform_info;
4623 break;
4624 case MSR_MISC_FEATURES_ENABLES:
4625 msr_info->data = vcpu->arch.msr_misc_features_enables;
4626 break;
4627 case MSR_K7_HWCR:
4628 msr_info->data = vcpu->arch.msr_hwcr;
4629 break;
4630 #ifdef CONFIG_X86_64
4631 case MSR_IA32_XFD:
4632 if (!msr_info->host_initiated &&
4633 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4634 return 1;
4635
4636 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd;
4637 break;
4638 case MSR_IA32_XFD_ERR:
4639 if (!msr_info->host_initiated &&
4640 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
4641 return 1;
4642
4643 msr_info->data = vcpu->arch.guest_fpu.xfd_err;
4644 break;
4645 #endif
4646 case MSR_IA32_U_CET:
4647 case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
4648 kvm_get_xstate_msr(vcpu, msr_info);
4649 break;
4650 default:
4651 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
4652 return kvm_pmu_get_msr(vcpu, msr_info);
4653
4654 return KVM_MSR_RET_UNSUPPORTED;
4655 }
4656 return 0;
4657 }
4658 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_msr_common);
4659
4660 /*
4661 * Read or write a bunch of msrs. All parameters are kernel addresses.
4662 *
4663 * @return number of msrs set successfully.
4664 */
__msr_io(struct kvm_vcpu * vcpu,struct kvm_msrs * msrs,struct kvm_msr_entry * entries,int (* do_msr)(struct kvm_vcpu * vcpu,unsigned index,u64 * data))4665 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
4666 struct kvm_msr_entry *entries,
4667 int (*do_msr)(struct kvm_vcpu *vcpu,
4668 unsigned index, u64 *data))
4669 {
4670 bool fpu_loaded = false;
4671 int i;
4672
4673 for (i = 0; i < msrs->nmsrs; ++i) {
4674 /*
4675 * If userspace is accessing one or more XSTATE-managed MSRs,
4676 * temporarily load the guest's FPU state so that the guest's
4677 * MSR value(s) is resident in hardware and thus can be accessed
4678 * via RDMSR/WRMSR.
4679 */
4680 if (!fpu_loaded && is_xstate_managed_msr(vcpu, entries[i].index)) {
4681 kvm_load_guest_fpu(vcpu);
4682 fpu_loaded = true;
4683 }
4684 if (do_msr(vcpu, entries[i].index, &entries[i].data))
4685 break;
4686 }
4687 if (fpu_loaded)
4688 kvm_put_guest_fpu(vcpu);
4689
4690 return i;
4691 }
4692
4693 /*
4694 * Read or write a bunch of msrs. Parameters are user addresses.
4695 *
4696 * @return number of msrs set successfully.
4697 */
msr_io(struct kvm_vcpu * vcpu,struct kvm_msrs __user * user_msrs,int (* do_msr)(struct kvm_vcpu * vcpu,unsigned index,u64 * data),int writeback)4698 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
4699 int (*do_msr)(struct kvm_vcpu *vcpu,
4700 unsigned index, u64 *data),
4701 int writeback)
4702 {
4703 struct kvm_msrs msrs;
4704 struct kvm_msr_entry *entries;
4705 unsigned size;
4706 int r;
4707
4708 r = -EFAULT;
4709 if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))
4710 goto out;
4711
4712 r = -E2BIG;
4713 if (msrs.nmsrs >= MAX_IO_MSRS)
4714 goto out;
4715
4716 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
4717 entries = memdup_user(user_msrs->entries, size);
4718 if (IS_ERR(entries)) {
4719 r = PTR_ERR(entries);
4720 goto out;
4721 }
4722
4723 r = __msr_io(vcpu, &msrs, entries, do_msr);
4724
4725 if (writeback && copy_to_user(user_msrs->entries, entries, size))
4726 r = -EFAULT;
4727
4728 kfree(entries);
4729 out:
4730 return r;
4731 }
4732
kvm_can_mwait_in_guest(void)4733 static inline bool kvm_can_mwait_in_guest(void)
4734 {
4735 return boot_cpu_has(X86_FEATURE_MWAIT) &&
4736 !boot_cpu_has_bug(X86_BUG_MONITOR) &&
4737 boot_cpu_has(X86_FEATURE_ARAT);
4738 }
4739
kvm_get_allowed_disable_exits(void)4740 static u64 kvm_get_allowed_disable_exits(void)
4741 {
4742 u64 r = KVM_X86_DISABLE_EXITS_PAUSE;
4743
4744 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
4745 r |= KVM_X86_DISABLE_EXITS_APERFMPERF;
4746
4747 if (!mitigate_smt_rsb) {
4748 r |= KVM_X86_DISABLE_EXITS_HLT |
4749 KVM_X86_DISABLE_EXITS_CSTATE;
4750
4751 if (kvm_can_mwait_in_guest())
4752 r |= KVM_X86_DISABLE_EXITS_MWAIT;
4753 }
4754 return r;
4755 }
4756
4757 #ifdef CONFIG_KVM_HYPERV
kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu * vcpu,struct kvm_cpuid2 __user * cpuid_arg)4758 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
4759 struct kvm_cpuid2 __user *cpuid_arg)
4760 {
4761 struct kvm_cpuid2 cpuid;
4762 int r;
4763
4764 r = -EFAULT;
4765 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4766 return r;
4767
4768 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries);
4769 if (r)
4770 return r;
4771
4772 r = -EFAULT;
4773 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
4774 return r;
4775
4776 return 0;
4777 }
4778 #endif
4779
kvm_is_vm_type_supported(unsigned long type)4780 static bool kvm_is_vm_type_supported(unsigned long type)
4781 {
4782 return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
4783 }
4784
kvm_sync_valid_fields(struct kvm * kvm)4785 static inline u64 kvm_sync_valid_fields(struct kvm *kvm)
4786 {
4787 return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
4788 }
4789
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)4790 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
4791 {
4792 int r = 0;
4793
4794 switch (ext) {
4795 case KVM_CAP_IRQCHIP:
4796 case KVM_CAP_HLT:
4797 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
4798 case KVM_CAP_SET_TSS_ADDR:
4799 case KVM_CAP_EXT_CPUID:
4800 case KVM_CAP_EXT_EMUL_CPUID:
4801 case KVM_CAP_CLOCKSOURCE:
4802 #ifdef CONFIG_KVM_IOAPIC
4803 case KVM_CAP_PIT:
4804 case KVM_CAP_PIT2:
4805 case KVM_CAP_PIT_STATE2:
4806 case KVM_CAP_REINJECT_CONTROL:
4807 #endif
4808 case KVM_CAP_NOP_IO_DELAY:
4809 case KVM_CAP_MP_STATE:
4810 case KVM_CAP_USER_NMI:
4811 case KVM_CAP_IRQ_INJECT_STATUS:
4812 case KVM_CAP_IOEVENTFD:
4813 case KVM_CAP_IOEVENTFD_NO_LENGTH:
4814
4815 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
4816 case KVM_CAP_VCPU_EVENTS:
4817 #ifdef CONFIG_KVM_HYPERV
4818 case KVM_CAP_HYPERV:
4819 case KVM_CAP_HYPERV_VAPIC:
4820 case KVM_CAP_HYPERV_SPIN:
4821 case KVM_CAP_HYPERV_TIME:
4822 case KVM_CAP_HYPERV_SYNIC:
4823 case KVM_CAP_HYPERV_SYNIC2:
4824 case KVM_CAP_HYPERV_VP_INDEX:
4825 case KVM_CAP_HYPERV_EVENTFD:
4826 case KVM_CAP_HYPERV_TLBFLUSH:
4827 case KVM_CAP_HYPERV_SEND_IPI:
4828 case KVM_CAP_HYPERV_CPUID:
4829 case KVM_CAP_HYPERV_ENFORCE_CPUID:
4830 case KVM_CAP_SYS_HYPERV_CPUID:
4831 #endif
4832 case KVM_CAP_PCI_SEGMENT:
4833 case KVM_CAP_DEBUGREGS:
4834 case KVM_CAP_X86_ROBUST_SINGLESTEP:
4835 case KVM_CAP_XSAVE:
4836 case KVM_CAP_ASYNC_PF:
4837 case KVM_CAP_ASYNC_PF_INT:
4838 case KVM_CAP_GET_TSC_KHZ:
4839 case KVM_CAP_KVMCLOCK_CTRL:
4840 case KVM_CAP_IOAPIC_POLARITY_IGNORED:
4841 case KVM_CAP_TSC_DEADLINE_TIMER:
4842 case KVM_CAP_DISABLE_QUIRKS:
4843 case KVM_CAP_SET_BOOT_CPU_ID:
4844 case KVM_CAP_SPLIT_IRQCHIP:
4845 case KVM_CAP_IMMEDIATE_EXIT:
4846 case KVM_CAP_PMU_EVENT_FILTER:
4847 case KVM_CAP_PMU_EVENT_MASKED_EVENTS:
4848 case KVM_CAP_GET_MSR_FEATURES:
4849 case KVM_CAP_MSR_PLATFORM_INFO:
4850 case KVM_CAP_EXCEPTION_PAYLOAD:
4851 case KVM_CAP_X86_TRIPLE_FAULT_EVENT:
4852 case KVM_CAP_SET_GUEST_DEBUG:
4853 case KVM_CAP_LAST_CPU:
4854 case KVM_CAP_X86_USER_SPACE_MSR:
4855 case KVM_CAP_X86_MSR_FILTER:
4856 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
4857 #ifdef CONFIG_X86_SGX_KVM
4858 case KVM_CAP_SGX_ATTRIBUTE:
4859 #endif
4860 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
4861 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
4862 case KVM_CAP_SREGS2:
4863 case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
4864 case KVM_CAP_VCPU_ATTRIBUTES:
4865 case KVM_CAP_SYS_ATTRIBUTES:
4866 case KVM_CAP_VAPIC:
4867 case KVM_CAP_ENABLE_CAP:
4868 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
4869 case KVM_CAP_IRQFD_RESAMPLE:
4870 case KVM_CAP_MEMORY_FAULT_INFO:
4871 case KVM_CAP_X86_GUEST_MODE:
4872 case KVM_CAP_ONE_REG:
4873 r = 1;
4874 break;
4875 case KVM_CAP_PRE_FAULT_MEMORY:
4876 r = tdp_enabled;
4877 break;
4878 case KVM_CAP_X86_APIC_BUS_CYCLES_NS:
4879 r = APIC_BUS_CYCLE_NS_DEFAULT;
4880 break;
4881 case KVM_CAP_EXIT_HYPERCALL:
4882 r = KVM_EXIT_HYPERCALL_VALID_MASK;
4883 break;
4884 case KVM_CAP_SET_GUEST_DEBUG2:
4885 return KVM_GUESTDBG_VALID_MASK;
4886 #ifdef CONFIG_KVM_XEN
4887 case KVM_CAP_XEN_HVM:
4888 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
4889 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
4890 KVM_XEN_HVM_CONFIG_SHARED_INFO |
4891 KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL |
4892 KVM_XEN_HVM_CONFIG_EVTCHN_SEND |
4893 KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE |
4894 KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA;
4895 if (sched_info_on())
4896 r |= KVM_XEN_HVM_CONFIG_RUNSTATE |
4897 KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG;
4898 break;
4899 #endif
4900 case KVM_CAP_SYNC_REGS:
4901 r = kvm_sync_valid_fields(kvm);
4902 break;
4903 case KVM_CAP_ADJUST_CLOCK:
4904 r = KVM_CLOCK_VALID_FLAGS;
4905 break;
4906 case KVM_CAP_X86_DISABLE_EXITS:
4907 r = kvm_get_allowed_disable_exits();
4908 break;
4909 case KVM_CAP_X86_SMM:
4910 if (!IS_ENABLED(CONFIG_KVM_SMM))
4911 break;
4912
4913 /* SMBASE is usually relocated above 1M on modern chipsets,
4914 * and SMM handlers might indeed rely on 4G segment limits,
4915 * so do not report SMM to be available if real mode is
4916 * emulated via vm86 mode. Still, do not go to great lengths
4917 * to avoid userspace's usage of the feature, because it is a
4918 * fringe case that is not enabled except via specific settings
4919 * of the module parameters.
4920 */
4921 r = kvm_x86_call(has_emulated_msr)(kvm, MSR_IA32_SMBASE);
4922 break;
4923 case KVM_CAP_NR_VCPUS:
4924 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
4925 break;
4926 case KVM_CAP_MAX_VCPUS:
4927 r = KVM_MAX_VCPUS;
4928 if (kvm)
4929 r = kvm->max_vcpus;
4930 break;
4931 case KVM_CAP_MAX_VCPU_ID:
4932 r = KVM_MAX_VCPU_IDS;
4933 break;
4934 case KVM_CAP_PV_MMU: /* obsolete */
4935 r = 0;
4936 break;
4937 case KVM_CAP_MCE:
4938 r = KVM_MAX_MCE_BANKS;
4939 break;
4940 case KVM_CAP_XCRS:
4941 r = boot_cpu_has(X86_FEATURE_XSAVE);
4942 break;
4943 case KVM_CAP_TSC_CONTROL:
4944 case KVM_CAP_VM_TSC_CONTROL:
4945 r = kvm_caps.has_tsc_control;
4946 break;
4947 case KVM_CAP_X2APIC_API:
4948 r = KVM_X2APIC_API_VALID_FLAGS;
4949 if (kvm && !irqchip_split(kvm))
4950 r &= ~KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST;
4951 break;
4952 case KVM_CAP_NESTED_STATE:
4953 r = kvm_x86_ops.nested_ops->get_state ?
4954 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
4955 break;
4956 #ifdef CONFIG_KVM_HYPERV
4957 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
4958 r = kvm_x86_ops.enable_l2_tlb_flush != NULL;
4959 break;
4960 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
4961 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
4962 break;
4963 #endif
4964 case KVM_CAP_SMALLER_MAXPHYADDR:
4965 r = (int) allow_smaller_maxphyaddr;
4966 break;
4967 case KVM_CAP_STEAL_TIME:
4968 r = sched_info_on();
4969 break;
4970 case KVM_CAP_X86_BUS_LOCK_EXIT:
4971 if (kvm_caps.has_bus_lock_exit)
4972 r = KVM_BUS_LOCK_DETECTION_OFF |
4973 KVM_BUS_LOCK_DETECTION_EXIT;
4974 else
4975 r = 0;
4976 break;
4977 case KVM_CAP_XSAVE2: {
4978 r = xstate_required_size(kvm_get_filtered_xcr0(), false);
4979 if (r < sizeof(struct kvm_xsave))
4980 r = sizeof(struct kvm_xsave);
4981 break;
4982 }
4983 case KVM_CAP_PMU_CAPABILITY:
4984 r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0;
4985 break;
4986 case KVM_CAP_DISABLE_QUIRKS2:
4987 r = kvm_caps.supported_quirks;
4988 break;
4989 case KVM_CAP_X86_NOTIFY_VMEXIT:
4990 r = kvm_caps.has_notify_vmexit;
4991 break;
4992 case KVM_CAP_VM_TYPES:
4993 r = kvm_caps.supported_vm_types;
4994 break;
4995 case KVM_CAP_READONLY_MEM:
4996 r = kvm ? kvm_arch_has_readonly_mem(kvm) : 1;
4997 break;
4998 default:
4999 break;
5000 }
5001 return r;
5002 }
5003
__kvm_x86_dev_get_attr(struct kvm_device_attr * attr,u64 * val)5004 static int __kvm_x86_dev_get_attr(struct kvm_device_attr *attr, u64 *val)
5005 {
5006 if (attr->group) {
5007 if (kvm_x86_ops.dev_get_attr)
5008 return kvm_x86_call(dev_get_attr)(attr->group, attr->attr, val);
5009 return -ENXIO;
5010 }
5011
5012 switch (attr->attr) {
5013 case KVM_X86_XCOMP_GUEST_SUPP:
5014 *val = kvm_caps.supported_xcr0;
5015 return 0;
5016 default:
5017 return -ENXIO;
5018 }
5019 }
5020
kvm_x86_dev_get_attr(struct kvm_device_attr * attr)5021 static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
5022 {
5023 u64 __user *uaddr = u64_to_user_ptr(attr->addr);
5024 int r;
5025 u64 val;
5026
5027 r = __kvm_x86_dev_get_attr(attr, &val);
5028 if (r < 0)
5029 return r;
5030
5031 if (put_user(val, uaddr))
5032 return -EFAULT;
5033
5034 return 0;
5035 }
5036
kvm_x86_dev_has_attr(struct kvm_device_attr * attr)5037 static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr)
5038 {
5039 u64 val;
5040
5041 return __kvm_x86_dev_get_attr(attr, &val);
5042 }
5043
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5044 long kvm_arch_dev_ioctl(struct file *filp,
5045 unsigned int ioctl, unsigned long arg)
5046 {
5047 void __user *argp = (void __user *)arg;
5048 long r;
5049
5050 switch (ioctl) {
5051 case KVM_GET_MSR_INDEX_LIST: {
5052 struct kvm_msr_list __user *user_msr_list = argp;
5053 struct kvm_msr_list msr_list;
5054 unsigned n;
5055
5056 r = -EFAULT;
5057 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
5058 goto out;
5059 n = msr_list.nmsrs;
5060 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
5061 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
5062 goto out;
5063 r = -E2BIG;
5064 if (n < msr_list.nmsrs)
5065 goto out;
5066 r = -EFAULT;
5067 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
5068 num_msrs_to_save * sizeof(u32)))
5069 goto out;
5070 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
5071 &emulated_msrs,
5072 num_emulated_msrs * sizeof(u32)))
5073 goto out;
5074 r = 0;
5075 break;
5076 }
5077 case KVM_GET_SUPPORTED_CPUID:
5078 case KVM_GET_EMULATED_CPUID: {
5079 struct kvm_cpuid2 __user *cpuid_arg = argp;
5080 struct kvm_cpuid2 cpuid;
5081
5082 r = -EFAULT;
5083 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5084 goto out;
5085
5086 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
5087 ioctl);
5088 if (r)
5089 goto out;
5090
5091 r = -EFAULT;
5092 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
5093 goto out;
5094 r = 0;
5095 break;
5096 }
5097 case KVM_X86_GET_MCE_CAP_SUPPORTED:
5098 r = -EFAULT;
5099 if (copy_to_user(argp, &kvm_caps.supported_mce_cap,
5100 sizeof(kvm_caps.supported_mce_cap)))
5101 goto out;
5102 r = 0;
5103 break;
5104 case KVM_GET_MSR_FEATURE_INDEX_LIST: {
5105 struct kvm_msr_list __user *user_msr_list = argp;
5106 struct kvm_msr_list msr_list;
5107 unsigned int n;
5108
5109 r = -EFAULT;
5110 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
5111 goto out;
5112 n = msr_list.nmsrs;
5113 msr_list.nmsrs = num_msr_based_features;
5114 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
5115 goto out;
5116 r = -E2BIG;
5117 if (n < msr_list.nmsrs)
5118 goto out;
5119 r = -EFAULT;
5120 if (copy_to_user(user_msr_list->indices, &msr_based_features,
5121 num_msr_based_features * sizeof(u32)))
5122 goto out;
5123 r = 0;
5124 break;
5125 }
5126 case KVM_GET_MSRS:
5127 r = msr_io(NULL, argp, do_get_feature_msr, 1);
5128 break;
5129 #ifdef CONFIG_KVM_HYPERV
5130 case KVM_GET_SUPPORTED_HV_CPUID:
5131 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp);
5132 break;
5133 #endif
5134 case KVM_GET_DEVICE_ATTR: {
5135 struct kvm_device_attr attr;
5136 r = -EFAULT;
5137 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
5138 break;
5139 r = kvm_x86_dev_get_attr(&attr);
5140 break;
5141 }
5142 case KVM_HAS_DEVICE_ATTR: {
5143 struct kvm_device_attr attr;
5144 r = -EFAULT;
5145 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
5146 break;
5147 r = kvm_x86_dev_has_attr(&attr);
5148 break;
5149 }
5150 default:
5151 r = -EINVAL;
5152 break;
5153 }
5154 out:
5155 return r;
5156 }
5157
need_emulate_wbinvd(struct kvm_vcpu * vcpu)5158 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
5159 {
5160 return kvm_arch_has_noncoherent_dma(vcpu->kvm);
5161 }
5162
5163 static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
5164
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)5165 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
5166 {
5167 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
5168
5169 kvm_request_l1tf_flush_l1d();
5170
5171 if (vcpu->scheduled_out && pmu->version && pmu->event_count) {
5172 pmu->need_cleanup = true;
5173 kvm_make_request(KVM_REQ_PMU, vcpu);
5174 }
5175
5176 /* Address WBINVD may be executed by guest */
5177 if (need_emulate_wbinvd(vcpu)) {
5178 if (kvm_x86_call(has_wbinvd_exit)())
5179 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
5180 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
5181 wbinvd_on_cpu(vcpu->cpu);
5182 }
5183
5184 kvm_x86_call(vcpu_load)(vcpu, cpu);
5185
5186 if (vcpu != per_cpu(last_vcpu, cpu)) {
5187 /*
5188 * Flush the branch predictor when switching vCPUs on the same
5189 * physical CPU, as each vCPU needs its own branch prediction
5190 * domain. No IBPB is needed when switching between L1 and L2
5191 * on the same vCPU unless IBRS is advertised to the vCPU; that
5192 * is handled on the nested VM-Exit path.
5193 */
5194 if (static_branch_likely(&switch_vcpu_ibpb))
5195 indirect_branch_prediction_barrier();
5196 per_cpu(last_vcpu, cpu) = vcpu;
5197 }
5198
5199 /* Save host pkru register if supported */
5200 vcpu->arch.host_pkru = read_pkru();
5201
5202 /* Apply any externally detected TSC adjustments (due to suspend) */
5203 if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
5204 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
5205 vcpu->arch.tsc_offset_adjustment = 0;
5206 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5207 }
5208
5209 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
5210 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
5211 rdtsc() - vcpu->arch.last_host_tsc;
5212 if (tsc_delta < 0)
5213 mark_tsc_unstable("KVM discovered backwards TSC");
5214
5215 if (kvm_check_tsc_unstable()) {
5216 u64 offset = kvm_compute_l1_tsc_offset(vcpu,
5217 vcpu->arch.last_guest_tsc);
5218 kvm_vcpu_write_tsc_offset(vcpu, offset);
5219 if (!vcpu->arch.guest_tsc_protected)
5220 vcpu->arch.tsc_catchup = 1;
5221 }
5222
5223 if (kvm_lapic_hv_timer_in_use(vcpu))
5224 kvm_lapic_restart_hv_timer(vcpu);
5225
5226 /*
5227 * On a host with synchronized TSC, there is no need to update
5228 * kvmclock on vcpu->cpu migration
5229 */
5230 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
5231 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
5232 if (vcpu->cpu != cpu)
5233 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
5234 vcpu->cpu = cpu;
5235 }
5236
5237 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
5238 }
5239
kvm_steal_time_set_preempted(struct kvm_vcpu * vcpu)5240 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
5241 {
5242 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
5243 struct kvm_steal_time __user *st;
5244 struct kvm_memslots *slots;
5245 static const u8 preempted = KVM_VCPU_PREEMPTED;
5246 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
5247
5248 /*
5249 * The vCPU can be marked preempted if and only if the VM-Exit was on
5250 * an instruction boundary and will not trigger guest emulation of any
5251 * kind (see vcpu_run). Vendor specific code controls (conservatively)
5252 * when this is true, for example allowing the vCPU to be marked
5253 * preempted if and only if the VM-Exit was due to a host interrupt.
5254 */
5255 if (!vcpu->arch.at_instruction_boundary) {
5256 vcpu->stat.preemption_other++;
5257 return;
5258 }
5259
5260 vcpu->stat.preemption_reported++;
5261 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
5262 return;
5263
5264 if (vcpu->arch.st.preempted)
5265 return;
5266
5267 /* This happens on process exit */
5268 if (unlikely(current->mm != vcpu->kvm->mm))
5269 return;
5270
5271 slots = kvm_memslots(vcpu->kvm);
5272
5273 if (unlikely(slots->generation != ghc->generation ||
5274 gpa != ghc->gpa ||
5275 kvm_is_error_hva(ghc->hva) || !ghc->memslot))
5276 return;
5277
5278 st = (struct kvm_steal_time __user *)ghc->hva;
5279 BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted));
5280
5281 if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted)))
5282 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
5283
5284 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
5285 }
5286
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)5287 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
5288 {
5289 int idx;
5290
5291 if (vcpu->preempted) {
5292 /*
5293 * Assume protected guests are in-kernel. Inefficient yielding
5294 * due to false positives is preferable to never yielding due
5295 * to false negatives.
5296 */
5297 vcpu->arch.preempted_in_kernel = vcpu->arch.guest_state_protected ||
5298 !kvm_x86_call(get_cpl_no_cache)(vcpu);
5299
5300 /*
5301 * Take the srcu lock as memslots will be accessed to check the gfn
5302 * cache generation against the memslots generation.
5303 */
5304 idx = srcu_read_lock(&vcpu->kvm->srcu);
5305 if (kvm_xen_msr_enabled(vcpu->kvm))
5306 kvm_xen_runstate_set_preempted(vcpu);
5307 else
5308 kvm_steal_time_set_preempted(vcpu);
5309 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5310 }
5311
5312 kvm_x86_call(vcpu_put)(vcpu);
5313 vcpu->arch.last_host_tsc = rdtsc();
5314 }
5315
kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu * vcpu,struct kvm_lapic_state * s)5316 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
5317 struct kvm_lapic_state *s)
5318 {
5319 if (vcpu->arch.apic->guest_apic_protected)
5320 return -EINVAL;
5321
5322 kvm_x86_call(sync_pir_to_irr)(vcpu);
5323
5324 return kvm_apic_get_state(vcpu, s);
5325 }
5326
kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu * vcpu,struct kvm_lapic_state * s)5327 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
5328 struct kvm_lapic_state *s)
5329 {
5330 int r;
5331
5332 if (vcpu->arch.apic->guest_apic_protected)
5333 return -EINVAL;
5334
5335 r = kvm_apic_set_state(vcpu, s);
5336 if (r)
5337 return r;
5338 update_cr8_intercept(vcpu);
5339
5340 return 0;
5341 }
5342
kvm_cpu_accept_dm_intr(struct kvm_vcpu * vcpu)5343 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
5344 {
5345 /*
5346 * We can accept userspace's request for interrupt injection
5347 * as long as we have a place to store the interrupt number.
5348 * The actual injection will happen when the CPU is able to
5349 * deliver the interrupt.
5350 */
5351 if (kvm_cpu_has_extint(vcpu))
5352 return false;
5353
5354 /* Acknowledging ExtINT does not happen if LINT0 is masked. */
5355 return (!lapic_in_kernel(vcpu) ||
5356 kvm_apic_accept_pic_intr(vcpu));
5357 }
5358
kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu * vcpu)5359 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
5360 {
5361 /*
5362 * Do not cause an interrupt window exit if an exception
5363 * is pending or an event needs reinjection; userspace
5364 * might want to inject the interrupt manually using KVM_SET_REGS
5365 * or KVM_SET_SREGS. For that to work, we must be at an
5366 * instruction boundary and with no events half-injected.
5367 */
5368 return (kvm_arch_interrupt_allowed(vcpu) &&
5369 kvm_cpu_accept_dm_intr(vcpu) &&
5370 !kvm_event_needs_reinjection(vcpu) &&
5371 !kvm_is_exception_pending(vcpu));
5372 }
5373
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)5374 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
5375 struct kvm_interrupt *irq)
5376 {
5377 if (irq->irq >= KVM_NR_INTERRUPTS)
5378 return -EINVAL;
5379
5380 if (!irqchip_in_kernel(vcpu->kvm)) {
5381 kvm_queue_interrupt(vcpu, irq->irq, false);
5382 kvm_make_request(KVM_REQ_EVENT, vcpu);
5383 return 0;
5384 }
5385
5386 /*
5387 * With in-kernel LAPIC, we only use this to inject EXTINT, so
5388 * fail for in-kernel 8259.
5389 */
5390 if (pic_in_kernel(vcpu->kvm))
5391 return -ENXIO;
5392
5393 if (vcpu->arch.pending_external_vector != -1)
5394 return -EEXIST;
5395
5396 vcpu->arch.pending_external_vector = irq->irq;
5397 kvm_make_request(KVM_REQ_EVENT, vcpu);
5398 return 0;
5399 }
5400
kvm_vcpu_ioctl_nmi(struct kvm_vcpu * vcpu)5401 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
5402 {
5403 kvm_inject_nmi(vcpu);
5404
5405 return 0;
5406 }
5407
vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu * vcpu,struct kvm_tpr_access_ctl * tac)5408 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
5409 struct kvm_tpr_access_ctl *tac)
5410 {
5411 if (tac->flags)
5412 return -EINVAL;
5413 vcpu->arch.tpr_access_reporting = !!tac->enabled;
5414 return 0;
5415 }
5416
kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu * vcpu,u64 mcg_cap)5417 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
5418 u64 mcg_cap)
5419 {
5420 int r;
5421 unsigned bank_num = mcg_cap & 0xff, bank;
5422
5423 r = -EINVAL;
5424 if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
5425 goto out;
5426 if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000))
5427 goto out;
5428 r = 0;
5429 vcpu->arch.mcg_cap = mcg_cap;
5430 /* Init IA32_MCG_CTL to all 1s */
5431 if (mcg_cap & MCG_CTL_P)
5432 vcpu->arch.mcg_ctl = ~(u64)0;
5433 /* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */
5434 for (bank = 0; bank < bank_num; bank++) {
5435 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
5436 if (mcg_cap & MCG_CMCI_P)
5437 vcpu->arch.mci_ctl2_banks[bank] = 0;
5438 }
5439
5440 kvm_apic_after_set_mcg_cap(vcpu);
5441
5442 kvm_x86_call(setup_mce)(vcpu);
5443 out:
5444 return r;
5445 }
5446
5447 /*
5448 * Validate this is an UCNA (uncorrectable no action) error by checking the
5449 * MCG_STATUS and MCi_STATUS registers:
5450 * - none of the bits for Machine Check Exceptions are set
5451 * - both the VAL (valid) and UC (uncorrectable) bits are set
5452 * MCI_STATUS_PCC - Processor Context Corrupted
5453 * MCI_STATUS_S - Signaled as a Machine Check Exception
5454 * MCI_STATUS_AR - Software recoverable Action Required
5455 */
is_ucna(struct kvm_x86_mce * mce)5456 static bool is_ucna(struct kvm_x86_mce *mce)
5457 {
5458 return !mce->mcg_status &&
5459 !(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) &&
5460 (mce->status & MCI_STATUS_VAL) &&
5461 (mce->status & MCI_STATUS_UC);
5462 }
5463
kvm_vcpu_x86_set_ucna(struct kvm_vcpu * vcpu,struct kvm_x86_mce * mce,u64 * banks)5464 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks)
5465 {
5466 u64 mcg_cap = vcpu->arch.mcg_cap;
5467
5468 banks[1] = mce->status;
5469 banks[2] = mce->addr;
5470 banks[3] = mce->misc;
5471 vcpu->arch.mcg_status = mce->mcg_status;
5472
5473 if (!(mcg_cap & MCG_CMCI_P) ||
5474 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN))
5475 return 0;
5476
5477 if (lapic_in_kernel(vcpu))
5478 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI);
5479
5480 return 0;
5481 }
5482
kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu * vcpu,struct kvm_x86_mce * mce)5483 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
5484 struct kvm_x86_mce *mce)
5485 {
5486 u64 mcg_cap = vcpu->arch.mcg_cap;
5487 unsigned bank_num = mcg_cap & 0xff;
5488 u64 *banks = vcpu->arch.mce_banks;
5489
5490 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
5491 return -EINVAL;
5492
5493 banks += array_index_nospec(4 * mce->bank, 4 * bank_num);
5494
5495 if (is_ucna(mce))
5496 return kvm_vcpu_x86_set_ucna(vcpu, mce, banks);
5497
5498 /*
5499 * if IA32_MCG_CTL is not all 1s, the uncorrected error
5500 * reporting is disabled
5501 */
5502 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
5503 vcpu->arch.mcg_ctl != ~(u64)0)
5504 return 0;
5505 /*
5506 * if IA32_MCi_CTL is not all 1s, the uncorrected error
5507 * reporting is disabled for the bank
5508 */
5509 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
5510 return 0;
5511 if (mce->status & MCI_STATUS_UC) {
5512 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
5513 !kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) {
5514 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5515 return 0;
5516 }
5517 if (banks[1] & MCI_STATUS_VAL)
5518 mce->status |= MCI_STATUS_OVER;
5519 banks[2] = mce->addr;
5520 banks[3] = mce->misc;
5521 vcpu->arch.mcg_status = mce->mcg_status;
5522 banks[1] = mce->status;
5523 kvm_queue_exception(vcpu, MC_VECTOR);
5524 } else if (!(banks[1] & MCI_STATUS_VAL)
5525 || !(banks[1] & MCI_STATUS_UC)) {
5526 if (banks[1] & MCI_STATUS_VAL)
5527 mce->status |= MCI_STATUS_OVER;
5528 banks[2] = mce->addr;
5529 banks[3] = mce->misc;
5530 banks[1] = mce->status;
5531 } else
5532 banks[1] |= MCI_STATUS_OVER;
5533 return 0;
5534 }
5535
kvm_get_exception_to_save(struct kvm_vcpu * vcpu)5536 static struct kvm_queued_exception *kvm_get_exception_to_save(struct kvm_vcpu *vcpu)
5537 {
5538 /*
5539 * KVM's ABI only allows for one exception to be migrated. Luckily,
5540 * the only time there can be two queued exceptions is if there's a
5541 * non-exiting _injected_ exception, and a pending exiting exception.
5542 * In that case, ignore the VM-Exiting exception as it's an extension
5543 * of the injected exception.
5544 */
5545 if (vcpu->arch.exception_vmexit.pending &&
5546 !vcpu->arch.exception.pending &&
5547 !vcpu->arch.exception.injected)
5548 return &vcpu->arch.exception_vmexit;
5549
5550 return &vcpu->arch.exception;
5551 }
5552
kvm_handle_exception_payload_quirk(struct kvm_vcpu * vcpu)5553 static void kvm_handle_exception_payload_quirk(struct kvm_vcpu *vcpu)
5554 {
5555 struct kvm_queued_exception *ex = kvm_get_exception_to_save(vcpu);
5556
5557 /*
5558 * If KVM_CAP_EXCEPTION_PAYLOAD is disabled, then (prematurely) deliver
5559 * the pending exception payload when userspace saves *any* vCPU state
5560 * that interacts with exception payloads to avoid breaking userspace.
5561 *
5562 * Architecturally, KVM must not deliver an exception payload until the
5563 * exception is actually injected, e.g. to avoid losing pending #DB
5564 * information (which VMX tracks in the VMCS), and to avoid clobbering
5565 * state if the exception is never injected for whatever reason. But
5566 * if KVM_CAP_EXCEPTION_PAYLOAD isn't enabled, then userspace may or
5567 * may not propagate the payload across save+restore, and so KVM can't
5568 * safely defer delivery of the payload.
5569 */
5570 if (!vcpu->kvm->arch.exception_payload_enabled &&
5571 ex->pending && ex->has_payload)
5572 kvm_deliver_exception_payload(vcpu, ex);
5573 }
5574
kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)5575 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
5576 struct kvm_vcpu_events *events)
5577 {
5578 struct kvm_queued_exception *ex = kvm_get_exception_to_save(vcpu);
5579
5580 process_nmi(vcpu);
5581
5582 #ifdef CONFIG_KVM_SMM
5583 if (kvm_check_request(KVM_REQ_SMI, vcpu))
5584 process_smi(vcpu);
5585 #endif
5586
5587 kvm_handle_exception_payload_quirk(vcpu);
5588
5589 memset(events, 0, sizeof(*events));
5590
5591 /*
5592 * The API doesn't provide the instruction length for software
5593 * exceptions, so don't report them. As long as the guest RIP
5594 * isn't advanced, we should expect to encounter the exception
5595 * again.
5596 */
5597 if (!kvm_exception_is_soft(ex->vector)) {
5598 events->exception.injected = ex->injected;
5599 events->exception.pending = ex->pending;
5600 /*
5601 * For ABI compatibility, deliberately conflate
5602 * pending and injected exceptions when
5603 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
5604 */
5605 if (!vcpu->kvm->arch.exception_payload_enabled)
5606 events->exception.injected |= ex->pending;
5607 }
5608 events->exception.nr = ex->vector;
5609 events->exception.has_error_code = ex->has_error_code;
5610 events->exception.error_code = ex->error_code;
5611 events->exception_has_payload = ex->has_payload;
5612 events->exception_payload = ex->payload;
5613
5614 events->interrupt.injected =
5615 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
5616 events->interrupt.nr = vcpu->arch.interrupt.nr;
5617 events->interrupt.shadow = kvm_x86_call(get_interrupt_shadow)(vcpu);
5618
5619 events->nmi.injected = vcpu->arch.nmi_injected;
5620 events->nmi.pending = kvm_get_nr_pending_nmis(vcpu);
5621 events->nmi.masked = kvm_x86_call(get_nmi_mask)(vcpu);
5622
5623 /* events->sipi_vector is never valid when reporting to user space */
5624
5625 #ifdef CONFIG_KVM_SMM
5626 events->smi.smm = is_smm(vcpu);
5627 events->smi.pending = vcpu->arch.smi_pending;
5628 events->smi.smm_inside_nmi =
5629 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
5630 #endif
5631 events->smi.latched_init = kvm_lapic_latched_init(vcpu);
5632
5633 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
5634 | KVM_VCPUEVENT_VALID_SHADOW
5635 | KVM_VCPUEVENT_VALID_SMM);
5636 if (vcpu->kvm->arch.exception_payload_enabled)
5637 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
5638 if (vcpu->kvm->arch.triple_fault_event) {
5639 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5640 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
5641 }
5642 }
5643
kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)5644 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
5645 struct kvm_vcpu_events *events)
5646 {
5647 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
5648 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
5649 | KVM_VCPUEVENT_VALID_SHADOW
5650 | KVM_VCPUEVENT_VALID_SMM
5651 | KVM_VCPUEVENT_VALID_PAYLOAD
5652 | KVM_VCPUEVENT_VALID_TRIPLE_FAULT))
5653 return -EINVAL;
5654
5655 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
5656 if (!vcpu->kvm->arch.exception_payload_enabled)
5657 return -EINVAL;
5658 if (events->exception.pending)
5659 events->exception.injected = 0;
5660 else
5661 events->exception_has_payload = 0;
5662 } else {
5663 events->exception.pending = 0;
5664 events->exception_has_payload = 0;
5665 }
5666
5667 if ((events->exception.injected || events->exception.pending) &&
5668 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
5669 return -EINVAL;
5670
5671 process_nmi(vcpu);
5672
5673 /*
5674 * Flag that userspace is stuffing an exception, the next KVM_RUN will
5675 * morph the exception to a VM-Exit if appropriate. Do this only for
5676 * pending exceptions, already-injected exceptions are not subject to
5677 * intercpetion. Note, userspace that conflates pending and injected
5678 * is hosed, and will incorrectly convert an injected exception into a
5679 * pending exception, which in turn may cause a spurious VM-Exit.
5680 */
5681 vcpu->arch.exception_from_userspace = events->exception.pending;
5682
5683 vcpu->arch.exception_vmexit.pending = false;
5684
5685 vcpu->arch.exception.injected = events->exception.injected;
5686 vcpu->arch.exception.pending = events->exception.pending;
5687 vcpu->arch.exception.vector = events->exception.nr;
5688 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
5689 vcpu->arch.exception.error_code = events->exception.error_code;
5690 vcpu->arch.exception.has_payload = events->exception_has_payload;
5691 vcpu->arch.exception.payload = events->exception_payload;
5692
5693 vcpu->arch.interrupt.injected = events->interrupt.injected;
5694 vcpu->arch.interrupt.nr = events->interrupt.nr;
5695 vcpu->arch.interrupt.soft = events->interrupt.soft;
5696 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
5697 kvm_x86_call(set_interrupt_shadow)(vcpu,
5698 events->interrupt.shadow);
5699
5700 vcpu->arch.nmi_injected = events->nmi.injected;
5701 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
5702 vcpu->arch.nmi_pending = 0;
5703 atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
5704 if (events->nmi.pending)
5705 kvm_make_request(KVM_REQ_NMI, vcpu);
5706 }
5707 kvm_x86_call(set_nmi_mask)(vcpu, events->nmi.masked);
5708
5709 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
5710 lapic_in_kernel(vcpu))
5711 vcpu->arch.apic->sipi_vector = events->sipi_vector;
5712
5713 if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
5714 #ifdef CONFIG_KVM_SMM
5715 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
5716 kvm_leave_nested(vcpu);
5717 kvm_smm_changed(vcpu, events->smi.smm);
5718 }
5719
5720 vcpu->arch.smi_pending = events->smi.pending;
5721
5722 if (events->smi.smm) {
5723 if (events->smi.smm_inside_nmi)
5724 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
5725 else
5726 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
5727 }
5728
5729 #else
5730 if (events->smi.smm || events->smi.pending ||
5731 events->smi.smm_inside_nmi)
5732 return -EINVAL;
5733 #endif
5734
5735 if (lapic_in_kernel(vcpu)) {
5736 if (events->smi.latched_init)
5737 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5738 else
5739 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5740 }
5741 }
5742
5743 if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
5744 if (!vcpu->kvm->arch.triple_fault_event)
5745 return -EINVAL;
5746 if (events->triple_fault.pending)
5747 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5748 else
5749 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5750 }
5751
5752 kvm_make_request(KVM_REQ_EVENT, vcpu);
5753
5754 return 0;
5755 }
5756
kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu * vcpu,struct kvm_debugregs * dbgregs)5757 static int kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
5758 struct kvm_debugregs *dbgregs)
5759 {
5760 unsigned int i;
5761
5762 if (vcpu->kvm->arch.has_protected_state &&
5763 vcpu->arch.guest_state_protected)
5764 return -EINVAL;
5765
5766 kvm_handle_exception_payload_quirk(vcpu);
5767
5768 memset(dbgregs, 0, sizeof(*dbgregs));
5769
5770 BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.db) != ARRAY_SIZE(dbgregs->db));
5771 for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++)
5772 dbgregs->db[i] = vcpu->arch.db[i];
5773
5774 dbgregs->dr6 = vcpu->arch.dr6;
5775 dbgregs->dr7 = vcpu->arch.dr7;
5776 return 0;
5777 }
5778
kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu * vcpu,struct kvm_debugregs * dbgregs)5779 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
5780 struct kvm_debugregs *dbgregs)
5781 {
5782 unsigned int i;
5783
5784 if (vcpu->kvm->arch.has_protected_state &&
5785 vcpu->arch.guest_state_protected)
5786 return -EINVAL;
5787
5788 if (dbgregs->flags)
5789 return -EINVAL;
5790
5791 if (!kvm_dr6_valid(dbgregs->dr6))
5792 return -EINVAL;
5793 if (!kvm_dr7_valid(dbgregs->dr7))
5794 return -EINVAL;
5795
5796 for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++)
5797 vcpu->arch.db[i] = dbgregs->db[i];
5798
5799 kvm_update_dr0123(vcpu);
5800 vcpu->arch.dr6 = dbgregs->dr6;
5801 vcpu->arch.dr7 = dbgregs->dr7;
5802 kvm_update_dr7(vcpu);
5803
5804 return 0;
5805 }
5806
5807
kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu * vcpu,u8 * state,unsigned int size)5808 static int kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
5809 u8 *state, unsigned int size)
5810 {
5811 /*
5812 * Only copy state for features that are enabled for the guest. The
5813 * state itself isn't problematic, but setting bits in the header for
5814 * features that are supported in *this* host but not exposed to the
5815 * guest can result in KVM_SET_XSAVE failing when live migrating to a
5816 * compatible host without the features that are NOT exposed to the
5817 * guest.
5818 *
5819 * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
5820 * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
5821 * supported by the host.
5822 */
5823 u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
5824 XFEATURE_MASK_FPSSE;
5825
5826 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5827 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
5828
5829 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
5830 supported_xcr0, vcpu->arch.pkru);
5831 return 0;
5832 }
5833
kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu * vcpu,struct kvm_xsave * guest_xsave)5834 static int kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
5835 struct kvm_xsave *guest_xsave)
5836 {
5837 return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
5838 sizeof(guest_xsave->region));
5839 }
5840
kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu * vcpu,struct kvm_xsave * guest_xsave)5841 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
5842 struct kvm_xsave *guest_xsave)
5843 {
5844 union fpregs_state *xstate = (union fpregs_state *)guest_xsave->region;
5845
5846 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5847 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
5848
5849 /*
5850 * For backwards compatibility, do not expect disabled features to be in
5851 * their initial state. XSTATE_BV[i] must still be cleared whenever
5852 * XFD[i]=1, or XRSTOR would cause a #NM.
5853 */
5854 xstate->xsave.header.xfeatures &= ~vcpu->arch.guest_fpu.fpstate->xfd;
5855
5856 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
5857 guest_xsave->region,
5858 kvm_caps.supported_xcr0,
5859 &vcpu->arch.pkru);
5860 }
5861
kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu * vcpu,struct kvm_xcrs * guest_xcrs)5862 static int kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
5863 struct kvm_xcrs *guest_xcrs)
5864 {
5865 if (vcpu->kvm->arch.has_protected_state &&
5866 vcpu->arch.guest_state_protected)
5867 return -EINVAL;
5868
5869 if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
5870 guest_xcrs->nr_xcrs = 0;
5871 return 0;
5872 }
5873
5874 guest_xcrs->nr_xcrs = 1;
5875 guest_xcrs->flags = 0;
5876 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
5877 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
5878 return 0;
5879 }
5880
kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu * vcpu,struct kvm_xcrs * guest_xcrs)5881 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
5882 struct kvm_xcrs *guest_xcrs)
5883 {
5884 int i, r = 0;
5885
5886 if (vcpu->kvm->arch.has_protected_state &&
5887 vcpu->arch.guest_state_protected)
5888 return -EINVAL;
5889
5890 if (!boot_cpu_has(X86_FEATURE_XSAVE))
5891 return -EINVAL;
5892
5893 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
5894 return -EINVAL;
5895
5896 for (i = 0; i < guest_xcrs->nr_xcrs; i++)
5897 /* Only support XCR0 currently */
5898 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
5899 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
5900 guest_xcrs->xcrs[i].value);
5901 break;
5902 }
5903 if (r)
5904 r = -EINVAL;
5905 return r;
5906 }
5907
5908 /*
5909 * kvm_set_guest_paused() indicates to the guest kernel that it has been
5910 * stopped by the hypervisor. This function will be called from the host only.
5911 * EINVAL is returned when the host attempts to set the flag for a guest that
5912 * does not support pv clocks.
5913 */
kvm_set_guest_paused(struct kvm_vcpu * vcpu)5914 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
5915 {
5916 if (!vcpu->arch.pv_time.active)
5917 return -EINVAL;
5918 vcpu->arch.pvclock_set_guest_stopped_request = true;
5919 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5920 return 0;
5921 }
5922
kvm_arch_tsc_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)5923 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu,
5924 struct kvm_device_attr *attr)
5925 {
5926 int r;
5927
5928 switch (attr->attr) {
5929 case KVM_VCPU_TSC_OFFSET:
5930 r = 0;
5931 break;
5932 default:
5933 r = -ENXIO;
5934 }
5935
5936 return r;
5937 }
5938
kvm_arch_tsc_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)5939 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
5940 struct kvm_device_attr *attr)
5941 {
5942 u64 __user *uaddr = u64_to_user_ptr(attr->addr);
5943 int r;
5944
5945 switch (attr->attr) {
5946 case KVM_VCPU_TSC_OFFSET:
5947 r = -EFAULT;
5948 if (put_user(vcpu->arch.l1_tsc_offset, uaddr))
5949 break;
5950 r = 0;
5951 break;
5952 default:
5953 r = -ENXIO;
5954 }
5955
5956 return r;
5957 }
5958
kvm_arch_tsc_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)5959 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
5960 struct kvm_device_attr *attr)
5961 {
5962 u64 __user *uaddr = u64_to_user_ptr(attr->addr);
5963 struct kvm *kvm = vcpu->kvm;
5964 int r;
5965
5966 switch (attr->attr) {
5967 case KVM_VCPU_TSC_OFFSET: {
5968 u64 offset, tsc, ns;
5969 unsigned long flags;
5970 bool matched;
5971
5972 r = -EFAULT;
5973 if (get_user(offset, uaddr))
5974 break;
5975
5976 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
5977
5978 matched = (vcpu->arch.virtual_tsc_khz &&
5979 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz &&
5980 kvm->arch.last_tsc_offset == offset);
5981
5982 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
5983 ns = get_kvmclock_base_ns();
5984
5985 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched, true);
5986 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
5987
5988 r = 0;
5989 break;
5990 }
5991 default:
5992 r = -ENXIO;
5993 }
5994
5995 return r;
5996 }
5997
kvm_vcpu_ioctl_device_attr(struct kvm_vcpu * vcpu,unsigned int ioctl,void __user * argp)5998 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
5999 unsigned int ioctl,
6000 void __user *argp)
6001 {
6002 struct kvm_device_attr attr;
6003 int r;
6004
6005 if (copy_from_user(&attr, argp, sizeof(attr)))
6006 return -EFAULT;
6007
6008 if (attr.group != KVM_VCPU_TSC_CTRL)
6009 return -ENXIO;
6010
6011 switch (ioctl) {
6012 case KVM_HAS_DEVICE_ATTR:
6013 r = kvm_arch_tsc_has_attr(vcpu, &attr);
6014 break;
6015 case KVM_GET_DEVICE_ATTR:
6016 r = kvm_arch_tsc_get_attr(vcpu, &attr);
6017 break;
6018 case KVM_SET_DEVICE_ATTR:
6019 r = kvm_arch_tsc_set_attr(vcpu, &attr);
6020 break;
6021 }
6022
6023 return r;
6024 }
6025
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)6026 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
6027 struct kvm_enable_cap *cap)
6028 {
6029 if (cap->flags)
6030 return -EINVAL;
6031
6032 switch (cap->cap) {
6033 #ifdef CONFIG_KVM_HYPERV
6034 case KVM_CAP_HYPERV_SYNIC2:
6035 if (cap->args[0])
6036 return -EINVAL;
6037 fallthrough;
6038
6039 case KVM_CAP_HYPERV_SYNIC:
6040 if (!irqchip_in_kernel(vcpu->kvm))
6041 return -EINVAL;
6042 return kvm_hv_activate_synic(vcpu, cap->cap ==
6043 KVM_CAP_HYPERV_SYNIC2);
6044 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
6045 {
6046 int r;
6047 uint16_t vmcs_version;
6048 void __user *user_ptr;
6049
6050 if (!kvm_x86_ops.nested_ops->enable_evmcs)
6051 return -ENOTTY;
6052 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
6053 if (!r) {
6054 user_ptr = (void __user *)(uintptr_t)cap->args[0];
6055 if (copy_to_user(user_ptr, &vmcs_version,
6056 sizeof(vmcs_version)))
6057 r = -EFAULT;
6058 }
6059 return r;
6060 }
6061 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
6062 if (!kvm_x86_ops.enable_l2_tlb_flush)
6063 return -ENOTTY;
6064
6065 return kvm_x86_call(enable_l2_tlb_flush)(vcpu);
6066
6067 case KVM_CAP_HYPERV_ENFORCE_CPUID:
6068 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
6069 #endif
6070
6071 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
6072 vcpu->arch.pv_cpuid.enforce = cap->args[0];
6073 return 0;
6074 default:
6075 return -EINVAL;
6076 }
6077 }
6078
6079 struct kvm_x86_reg_id {
6080 __u32 index;
6081 __u8 type;
6082 __u8 rsvd1;
6083 __u8 rsvd2:4;
6084 __u8 size:4;
6085 __u8 x86;
6086 };
6087
kvm_translate_kvm_reg(struct kvm_vcpu * vcpu,struct kvm_x86_reg_id * reg)6088 static int kvm_translate_kvm_reg(struct kvm_vcpu *vcpu,
6089 struct kvm_x86_reg_id *reg)
6090 {
6091 switch (reg->index) {
6092 case KVM_REG_GUEST_SSP:
6093 /*
6094 * FIXME: If host-initiated accesses are ever exempted from
6095 * ignore_msrs (in kvm_do_msr_access()), drop this manual check
6096 * and rely on KVM's standard checks to reject accesses to regs
6097 * that don't exist.
6098 */
6099 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
6100 return -EINVAL;
6101
6102 reg->type = KVM_X86_REG_TYPE_MSR;
6103 reg->index = MSR_KVM_INTERNAL_GUEST_SSP;
6104 break;
6105 default:
6106 return -EINVAL;
6107 }
6108 return 0;
6109 }
6110
kvm_get_one_msr(struct kvm_vcpu * vcpu,u32 msr,u64 __user * user_val)6111 static int kvm_get_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *user_val)
6112 {
6113 u64 val;
6114
6115 if (do_get_msr(vcpu, msr, &val))
6116 return -EINVAL;
6117
6118 if (put_user(val, user_val))
6119 return -EFAULT;
6120
6121 return 0;
6122 }
6123
kvm_set_one_msr(struct kvm_vcpu * vcpu,u32 msr,u64 __user * user_val)6124 static int kvm_set_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *user_val)
6125 {
6126 u64 val;
6127
6128 if (get_user(val, user_val))
6129 return -EFAULT;
6130
6131 if (do_set_msr(vcpu, msr, &val))
6132 return -EINVAL;
6133
6134 return 0;
6135 }
6136
kvm_get_set_one_reg(struct kvm_vcpu * vcpu,unsigned int ioctl,void __user * argp)6137 static int kvm_get_set_one_reg(struct kvm_vcpu *vcpu, unsigned int ioctl,
6138 void __user *argp)
6139 {
6140 struct kvm_one_reg one_reg;
6141 struct kvm_x86_reg_id *reg;
6142 u64 __user *user_val;
6143 bool load_fpu;
6144 int r;
6145
6146 if (copy_from_user(&one_reg, argp, sizeof(one_reg)))
6147 return -EFAULT;
6148
6149 if ((one_reg.id & KVM_REG_ARCH_MASK) != KVM_REG_X86)
6150 return -EINVAL;
6151
6152 reg = (struct kvm_x86_reg_id *)&one_reg.id;
6153 if (reg->rsvd1 || reg->rsvd2)
6154 return -EINVAL;
6155
6156 if (reg->type == KVM_X86_REG_TYPE_KVM) {
6157 r = kvm_translate_kvm_reg(vcpu, reg);
6158 if (r)
6159 return r;
6160 }
6161
6162 if (reg->type != KVM_X86_REG_TYPE_MSR)
6163 return -EINVAL;
6164
6165 if ((one_reg.id & KVM_REG_SIZE_MASK) != KVM_REG_SIZE_U64)
6166 return -EINVAL;
6167
6168 guard(srcu)(&vcpu->kvm->srcu);
6169
6170 load_fpu = is_xstate_managed_msr(vcpu, reg->index);
6171 if (load_fpu)
6172 kvm_load_guest_fpu(vcpu);
6173
6174 user_val = u64_to_user_ptr(one_reg.addr);
6175 if (ioctl == KVM_GET_ONE_REG)
6176 r = kvm_get_one_msr(vcpu, reg->index, user_val);
6177 else
6178 r = kvm_set_one_msr(vcpu, reg->index, user_val);
6179
6180 if (load_fpu)
6181 kvm_put_guest_fpu(vcpu);
6182 return r;
6183 }
6184
kvm_get_reg_list(struct kvm_vcpu * vcpu,struct kvm_reg_list __user * user_list)6185 static int kvm_get_reg_list(struct kvm_vcpu *vcpu,
6186 struct kvm_reg_list __user *user_list)
6187 {
6188 u64 nr_regs = guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) ? 1 : 0;
6189 u64 user_nr_regs;
6190
6191 if (get_user(user_nr_regs, &user_list->n))
6192 return -EFAULT;
6193
6194 if (put_user(nr_regs, &user_list->n))
6195 return -EFAULT;
6196
6197 if (user_nr_regs < nr_regs)
6198 return -E2BIG;
6199
6200 if (nr_regs &&
6201 put_user(KVM_X86_REG_KVM(KVM_REG_GUEST_SSP), &user_list->reg[0]))
6202 return -EFAULT;
6203
6204 return 0;
6205 }
6206
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)6207 long kvm_arch_vcpu_ioctl(struct file *filp,
6208 unsigned int ioctl, unsigned long arg)
6209 {
6210 struct kvm_vcpu *vcpu = filp->private_data;
6211 void __user *argp = (void __user *)arg;
6212 int r;
6213 union {
6214 struct kvm_sregs2 *sregs2;
6215 struct kvm_lapic_state *lapic;
6216 struct kvm_xsave *xsave;
6217 struct kvm_xcrs *xcrs;
6218 void *buffer;
6219 } u;
6220
6221 vcpu_load(vcpu);
6222
6223 u.buffer = NULL;
6224 switch (ioctl) {
6225 case KVM_GET_LAPIC: {
6226 r = -EINVAL;
6227 if (!lapic_in_kernel(vcpu))
6228 goto out;
6229 u.lapic = kzalloc_obj(struct kvm_lapic_state);
6230
6231 r = -ENOMEM;
6232 if (!u.lapic)
6233 goto out;
6234 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
6235 if (r)
6236 goto out;
6237 r = -EFAULT;
6238 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
6239 goto out;
6240 r = 0;
6241 break;
6242 }
6243 case KVM_SET_LAPIC: {
6244 r = -EINVAL;
6245 if (!lapic_in_kernel(vcpu))
6246 goto out;
6247 u.lapic = memdup_user(argp, sizeof(*u.lapic));
6248 if (IS_ERR(u.lapic)) {
6249 r = PTR_ERR(u.lapic);
6250 goto out_nofree;
6251 }
6252
6253 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
6254 break;
6255 }
6256 case KVM_INTERRUPT: {
6257 struct kvm_interrupt irq;
6258
6259 r = -EFAULT;
6260 if (copy_from_user(&irq, argp, sizeof(irq)))
6261 goto out;
6262 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
6263 break;
6264 }
6265 case KVM_NMI: {
6266 r = kvm_vcpu_ioctl_nmi(vcpu);
6267 break;
6268 }
6269 case KVM_SMI: {
6270 r = kvm_inject_smi(vcpu);
6271 break;
6272 }
6273 case KVM_SET_CPUID: {
6274 struct kvm_cpuid __user *cpuid_arg = argp;
6275 struct kvm_cpuid cpuid;
6276
6277 r = -EFAULT;
6278 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
6279 goto out;
6280 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
6281 break;
6282 }
6283 case KVM_SET_CPUID2: {
6284 struct kvm_cpuid2 __user *cpuid_arg = argp;
6285 struct kvm_cpuid2 cpuid;
6286
6287 r = -EFAULT;
6288 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
6289 goto out;
6290 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
6291 cpuid_arg->entries);
6292 break;
6293 }
6294 case KVM_GET_CPUID2: {
6295 struct kvm_cpuid2 __user *cpuid_arg = argp;
6296 struct kvm_cpuid2 cpuid;
6297
6298 r = -EFAULT;
6299 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
6300 goto out;
6301 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
6302 cpuid_arg->entries);
6303 if (r)
6304 goto out;
6305 r = -EFAULT;
6306 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
6307 goto out;
6308 r = 0;
6309 break;
6310 }
6311 case KVM_GET_MSRS: {
6312 int idx = srcu_read_lock(&vcpu->kvm->srcu);
6313 r = msr_io(vcpu, argp, do_get_msr, 1);
6314 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6315 break;
6316 }
6317 case KVM_SET_MSRS: {
6318 int idx = srcu_read_lock(&vcpu->kvm->srcu);
6319 r = msr_io(vcpu, argp, do_set_msr, 0);
6320 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6321 break;
6322 }
6323 case KVM_GET_ONE_REG:
6324 case KVM_SET_ONE_REG:
6325 r = kvm_get_set_one_reg(vcpu, ioctl, argp);
6326 break;
6327 case KVM_GET_REG_LIST:
6328 r = kvm_get_reg_list(vcpu, argp);
6329 break;
6330 case KVM_TPR_ACCESS_REPORTING: {
6331 struct kvm_tpr_access_ctl tac;
6332
6333 r = -EFAULT;
6334 if (copy_from_user(&tac, argp, sizeof(tac)))
6335 goto out;
6336 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
6337 if (r)
6338 goto out;
6339 r = -EFAULT;
6340 if (copy_to_user(argp, &tac, sizeof(tac)))
6341 goto out;
6342 r = 0;
6343 break;
6344 };
6345 case KVM_SET_VAPIC_ADDR: {
6346 struct kvm_vapic_addr va;
6347 int idx;
6348
6349 r = -EINVAL;
6350 if (!lapic_in_kernel(vcpu))
6351 goto out;
6352 r = -EFAULT;
6353 if (copy_from_user(&va, argp, sizeof(va)))
6354 goto out;
6355 idx = srcu_read_lock(&vcpu->kvm->srcu);
6356 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
6357 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6358 break;
6359 }
6360 case KVM_X86_SETUP_MCE: {
6361 u64 mcg_cap;
6362
6363 r = -EFAULT;
6364 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))
6365 goto out;
6366 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
6367 break;
6368 }
6369 case KVM_X86_SET_MCE: {
6370 struct kvm_x86_mce mce;
6371
6372 r = -EFAULT;
6373 if (copy_from_user(&mce, argp, sizeof(mce)))
6374 goto out;
6375 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
6376 break;
6377 }
6378 case KVM_GET_VCPU_EVENTS: {
6379 struct kvm_vcpu_events events;
6380
6381 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
6382
6383 r = -EFAULT;
6384 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
6385 break;
6386 r = 0;
6387 break;
6388 }
6389 case KVM_SET_VCPU_EVENTS: {
6390 struct kvm_vcpu_events events;
6391
6392 r = -EFAULT;
6393 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
6394 break;
6395
6396 kvm_vcpu_srcu_read_lock(vcpu);
6397 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
6398 kvm_vcpu_srcu_read_unlock(vcpu);
6399 break;
6400 }
6401 case KVM_GET_DEBUGREGS: {
6402 struct kvm_debugregs dbgregs;
6403
6404 r = kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
6405 if (r < 0)
6406 break;
6407
6408 r = -EFAULT;
6409 if (copy_to_user(argp, &dbgregs,
6410 sizeof(struct kvm_debugregs)))
6411 break;
6412 r = 0;
6413 break;
6414 }
6415 case KVM_SET_DEBUGREGS: {
6416 struct kvm_debugregs dbgregs;
6417
6418 r = -EFAULT;
6419 if (copy_from_user(&dbgregs, argp,
6420 sizeof(struct kvm_debugregs)))
6421 break;
6422
6423 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
6424 break;
6425 }
6426 case KVM_GET_XSAVE: {
6427 r = -EINVAL;
6428 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave))
6429 break;
6430
6431 u.xsave = kzalloc_obj(struct kvm_xsave);
6432 r = -ENOMEM;
6433 if (!u.xsave)
6434 break;
6435
6436 r = kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
6437 if (r < 0)
6438 break;
6439
6440 r = -EFAULT;
6441 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
6442 break;
6443 r = 0;
6444 break;
6445 }
6446 case KVM_SET_XSAVE: {
6447 int size = vcpu->arch.guest_fpu.uabi_size;
6448
6449 u.xsave = memdup_user(argp, size);
6450 if (IS_ERR(u.xsave)) {
6451 r = PTR_ERR(u.xsave);
6452 goto out_nofree;
6453 }
6454
6455 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
6456 break;
6457 }
6458
6459 case KVM_GET_XSAVE2: {
6460 int size = vcpu->arch.guest_fpu.uabi_size;
6461
6462 u.xsave = kzalloc(size, GFP_KERNEL);
6463 r = -ENOMEM;
6464 if (!u.xsave)
6465 break;
6466
6467 r = kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size);
6468 if (r < 0)
6469 break;
6470
6471 r = -EFAULT;
6472 if (copy_to_user(argp, u.xsave, size))
6473 break;
6474
6475 r = 0;
6476 break;
6477 }
6478
6479 case KVM_GET_XCRS: {
6480 u.xcrs = kzalloc_obj(struct kvm_xcrs);
6481 r = -ENOMEM;
6482 if (!u.xcrs)
6483 break;
6484
6485 r = kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
6486 if (r < 0)
6487 break;
6488
6489 r = -EFAULT;
6490 if (copy_to_user(argp, u.xcrs,
6491 sizeof(struct kvm_xcrs)))
6492 break;
6493 r = 0;
6494 break;
6495 }
6496 case KVM_SET_XCRS: {
6497 u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
6498 if (IS_ERR(u.xcrs)) {
6499 r = PTR_ERR(u.xcrs);
6500 goto out_nofree;
6501 }
6502
6503 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
6504 break;
6505 }
6506 case KVM_SET_TSC_KHZ: {
6507 u32 user_tsc_khz;
6508
6509 r = -EINVAL;
6510
6511 if (vcpu->arch.guest_tsc_protected)
6512 goto out;
6513
6514 user_tsc_khz = (u32)arg;
6515
6516 if (kvm_caps.has_tsc_control &&
6517 user_tsc_khz >= kvm_caps.max_guest_tsc_khz)
6518 goto out;
6519
6520 if (user_tsc_khz == 0)
6521 user_tsc_khz = tsc_khz;
6522
6523 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
6524 r = 0;
6525
6526 goto out;
6527 }
6528 case KVM_GET_TSC_KHZ: {
6529 r = vcpu->arch.virtual_tsc_khz;
6530 goto out;
6531 }
6532 case KVM_KVMCLOCK_CTRL: {
6533 r = kvm_set_guest_paused(vcpu);
6534 goto out;
6535 }
6536 case KVM_ENABLE_CAP: {
6537 struct kvm_enable_cap cap;
6538
6539 r = -EFAULT;
6540 if (copy_from_user(&cap, argp, sizeof(cap)))
6541 goto out;
6542 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
6543 break;
6544 }
6545 case KVM_GET_NESTED_STATE: {
6546 struct kvm_nested_state __user *user_kvm_nested_state = argp;
6547 u32 user_data_size;
6548
6549 r = -EINVAL;
6550 if (!kvm_x86_ops.nested_ops->get_state)
6551 break;
6552
6553 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
6554 r = -EFAULT;
6555 if (get_user(user_data_size, &user_kvm_nested_state->size))
6556 break;
6557
6558 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state,
6559 user_data_size);
6560 if (r < 0)
6561 break;
6562
6563 if (r > user_data_size) {
6564 if (put_user(r, &user_kvm_nested_state->size))
6565 r = -EFAULT;
6566 else
6567 r = -E2BIG;
6568 break;
6569 }
6570
6571 r = 0;
6572 break;
6573 }
6574 case KVM_SET_NESTED_STATE: {
6575 struct kvm_nested_state __user *user_kvm_nested_state = argp;
6576 struct kvm_nested_state kvm_state;
6577 int idx;
6578
6579 r = -EINVAL;
6580 if (!kvm_x86_ops.nested_ops->set_state)
6581 break;
6582
6583 r = -EFAULT;
6584 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
6585 break;
6586
6587 r = -EINVAL;
6588 if (kvm_state.size < sizeof(kvm_state))
6589 break;
6590
6591 if (kvm_state.flags &
6592 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
6593 | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING
6594 | KVM_STATE_NESTED_GIF_SET))
6595 break;
6596
6597 /* nested_run_pending implies guest_mode. */
6598 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
6599 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
6600 break;
6601
6602 idx = srcu_read_lock(&vcpu->kvm->srcu);
6603 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state);
6604 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6605 break;
6606 }
6607 #ifdef CONFIG_KVM_HYPERV
6608 case KVM_GET_SUPPORTED_HV_CPUID:
6609 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp);
6610 break;
6611 #endif
6612 #ifdef CONFIG_KVM_XEN
6613 case KVM_XEN_VCPU_GET_ATTR: {
6614 struct kvm_xen_vcpu_attr xva;
6615
6616 r = -EFAULT;
6617 if (copy_from_user(&xva, argp, sizeof(xva)))
6618 goto out;
6619 r = kvm_xen_vcpu_get_attr(vcpu, &xva);
6620 if (!r && copy_to_user(argp, &xva, sizeof(xva)))
6621 r = -EFAULT;
6622 break;
6623 }
6624 case KVM_XEN_VCPU_SET_ATTR: {
6625 struct kvm_xen_vcpu_attr xva;
6626
6627 r = -EFAULT;
6628 if (copy_from_user(&xva, argp, sizeof(xva)))
6629 goto out;
6630 r = kvm_xen_vcpu_set_attr(vcpu, &xva);
6631 break;
6632 }
6633 #endif
6634 case KVM_GET_SREGS2: {
6635 r = -EINVAL;
6636 if (vcpu->kvm->arch.has_protected_state &&
6637 vcpu->arch.guest_state_protected)
6638 goto out;
6639
6640 u.sregs2 = kzalloc_obj(struct kvm_sregs2);
6641 r = -ENOMEM;
6642 if (!u.sregs2)
6643 goto out;
6644 __get_sregs2(vcpu, u.sregs2);
6645 r = -EFAULT;
6646 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2)))
6647 goto out;
6648 r = 0;
6649 break;
6650 }
6651 case KVM_SET_SREGS2: {
6652 r = -EINVAL;
6653 if (vcpu->kvm->arch.has_protected_state &&
6654 vcpu->arch.guest_state_protected)
6655 goto out;
6656
6657 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2));
6658 if (IS_ERR(u.sregs2)) {
6659 r = PTR_ERR(u.sregs2);
6660 u.sregs2 = NULL;
6661 goto out;
6662 }
6663 r = __set_sregs2(vcpu, u.sregs2);
6664 break;
6665 }
6666 case KVM_HAS_DEVICE_ATTR:
6667 case KVM_GET_DEVICE_ATTR:
6668 case KVM_SET_DEVICE_ATTR:
6669 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp);
6670 break;
6671 case KVM_MEMORY_ENCRYPT_OP:
6672 r = -ENOTTY;
6673 if (!kvm_x86_ops.vcpu_mem_enc_ioctl)
6674 goto out;
6675 r = kvm_x86_ops.vcpu_mem_enc_ioctl(vcpu, argp);
6676 break;
6677 default:
6678 r = -EINVAL;
6679 }
6680 out:
6681 kfree(u.buffer);
6682 out_nofree:
6683 vcpu_put(vcpu);
6684 return r;
6685 }
6686
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)6687 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
6688 {
6689 return VM_FAULT_SIGBUS;
6690 }
6691
kvm_vm_ioctl_set_tss_addr(struct kvm * kvm,unsigned long addr)6692 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
6693 {
6694 int ret;
6695
6696 if (addr > (unsigned int)(-3 * PAGE_SIZE))
6697 return -EINVAL;
6698 ret = kvm_x86_call(set_tss_addr)(kvm, addr);
6699 return ret;
6700 }
6701
kvm_vm_ioctl_set_identity_map_addr(struct kvm * kvm,u64 ident_addr)6702 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
6703 u64 ident_addr)
6704 {
6705 return kvm_x86_call(set_identity_map_addr)(kvm, ident_addr);
6706 }
6707
kvm_vm_ioctl_set_nr_mmu_pages(struct kvm * kvm,unsigned long kvm_nr_mmu_pages)6708 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
6709 unsigned long kvm_nr_mmu_pages)
6710 {
6711 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
6712 return -EINVAL;
6713
6714 mutex_lock(&kvm->slots_lock);
6715
6716 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
6717 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
6718
6719 mutex_unlock(&kvm->slots_lock);
6720 return 0;
6721 }
6722
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)6723 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
6724 {
6725
6726 /*
6727 * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called
6728 * before reporting dirty_bitmap to userspace. KVM flushes the buffers
6729 * on all VM-Exits, thus we only need to kick running vCPUs to force a
6730 * VM-Exit.
6731 */
6732 struct kvm_vcpu *vcpu;
6733 unsigned long i;
6734
6735 if (!kvm->arch.cpu_dirty_log_size)
6736 return;
6737
6738 kvm_for_each_vcpu(i, vcpu, kvm)
6739 kvm_vcpu_kick(vcpu);
6740 }
6741
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)6742 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
6743 struct kvm_enable_cap *cap)
6744 {
6745 int r;
6746
6747 if (cap->flags)
6748 return -EINVAL;
6749
6750 switch (cap->cap) {
6751 case KVM_CAP_DISABLE_QUIRKS2:
6752 r = -EINVAL;
6753 if (cap->args[0] & ~kvm_caps.supported_quirks)
6754 break;
6755 fallthrough;
6756 case KVM_CAP_DISABLE_QUIRKS:
6757 kvm->arch.disabled_quirks |= cap->args[0] & kvm_caps.supported_quirks;
6758 r = 0;
6759 break;
6760 case KVM_CAP_SPLIT_IRQCHIP: {
6761 mutex_lock(&kvm->lock);
6762 r = -EINVAL;
6763 if (cap->args[0] > KVM_MAX_IRQ_ROUTES)
6764 goto split_irqchip_unlock;
6765 r = -EEXIST;
6766 if (irqchip_in_kernel(kvm))
6767 goto split_irqchip_unlock;
6768 if (kvm->created_vcpus)
6769 goto split_irqchip_unlock;
6770 /* Pairs with irqchip_in_kernel. */
6771 smp_wmb();
6772 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
6773 kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
6774 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
6775 r = 0;
6776 split_irqchip_unlock:
6777 mutex_unlock(&kvm->lock);
6778 break;
6779 }
6780 case KVM_CAP_X2APIC_API:
6781 r = -EINVAL;
6782 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
6783 break;
6784
6785 if ((cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST) &&
6786 (cap->args[0] & KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST))
6787 break;
6788
6789 if ((cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST) &&
6790 !irqchip_split(kvm))
6791 break;
6792
6793 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
6794 kvm->arch.x2apic_format = true;
6795 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
6796 kvm->arch.x2apic_broadcast_quirk_disabled = true;
6797
6798 if (cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST)
6799 kvm->arch.suppress_eoi_broadcast_mode = KVM_SUPPRESS_EOI_BROADCAST_ENABLED;
6800 if (cap->args[0] & KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST)
6801 kvm->arch.suppress_eoi_broadcast_mode = KVM_SUPPRESS_EOI_BROADCAST_DISABLED;
6802
6803 r = 0;
6804 break;
6805 case KVM_CAP_X86_DISABLE_EXITS:
6806 r = -EINVAL;
6807 if (cap->args[0] & ~kvm_get_allowed_disable_exits())
6808 break;
6809
6810 mutex_lock(&kvm->lock);
6811 if (kvm->created_vcpus)
6812 goto disable_exits_unlock;
6813
6814 #define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \
6815 "KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests."
6816
6817 if (!mitigate_smt_rsb && boot_cpu_has_bug(X86_BUG_SMT_RSB) &&
6818 cpu_smt_possible() &&
6819 (cap->args[0] & ~(KVM_X86_DISABLE_EXITS_PAUSE |
6820 KVM_X86_DISABLE_EXITS_APERFMPERF)))
6821 pr_warn_once(SMT_RSB_MSG);
6822
6823 kvm_disable_exits(kvm, cap->args[0]);
6824 r = 0;
6825 disable_exits_unlock:
6826 mutex_unlock(&kvm->lock);
6827 break;
6828 case KVM_CAP_MSR_PLATFORM_INFO:
6829 kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
6830 r = 0;
6831 break;
6832 case KVM_CAP_EXCEPTION_PAYLOAD:
6833 kvm->arch.exception_payload_enabled = cap->args[0];
6834 r = 0;
6835 break;
6836 case KVM_CAP_X86_TRIPLE_FAULT_EVENT:
6837 kvm->arch.triple_fault_event = cap->args[0];
6838 r = 0;
6839 break;
6840 case KVM_CAP_X86_USER_SPACE_MSR:
6841 r = -EINVAL;
6842 if (cap->args[0] & ~KVM_MSR_EXIT_REASON_VALID_MASK)
6843 break;
6844 kvm->arch.user_space_msr_mask = cap->args[0];
6845 r = 0;
6846 break;
6847 case KVM_CAP_X86_BUS_LOCK_EXIT:
6848 r = -EINVAL;
6849 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE)
6850 break;
6851
6852 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) &&
6853 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT))
6854 break;
6855
6856 if (kvm_caps.has_bus_lock_exit &&
6857 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)
6858 kvm->arch.bus_lock_detection_enabled = true;
6859 r = 0;
6860 break;
6861 #ifdef CONFIG_X86_SGX_KVM
6862 case KVM_CAP_SGX_ATTRIBUTE: {
6863 unsigned long allowed_attributes = 0;
6864
6865 r = sgx_set_attribute(&allowed_attributes, cap->args[0]);
6866 if (r)
6867 break;
6868
6869 /* KVM only supports the PROVISIONKEY privileged attribute. */
6870 if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) &&
6871 !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY))
6872 kvm->arch.sgx_provisioning_allowed = true;
6873 else
6874 r = -EINVAL;
6875 break;
6876 }
6877 #endif
6878 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
6879 r = -EINVAL;
6880 if (!kvm_x86_ops.vm_copy_enc_context_from)
6881 break;
6882
6883 r = kvm_x86_call(vm_copy_enc_context_from)(kvm, cap->args[0]);
6884 break;
6885 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
6886 r = -EINVAL;
6887 if (!kvm_x86_ops.vm_move_enc_context_from)
6888 break;
6889
6890 r = kvm_x86_call(vm_move_enc_context_from)(kvm, cap->args[0]);
6891 break;
6892 case KVM_CAP_EXIT_HYPERCALL:
6893 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) {
6894 r = -EINVAL;
6895 break;
6896 }
6897 kvm->arch.hypercall_exit_enabled = cap->args[0];
6898 r = 0;
6899 break;
6900 case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
6901 r = -EINVAL;
6902 if (cap->args[0] & ~1)
6903 break;
6904 kvm->arch.exit_on_emulation_error = cap->args[0];
6905 r = 0;
6906 break;
6907 case KVM_CAP_PMU_CAPABILITY:
6908 r = -EINVAL;
6909 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK))
6910 break;
6911
6912 mutex_lock(&kvm->lock);
6913 if (!kvm->created_vcpus && !kvm->arch.created_mediated_pmu) {
6914 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE);
6915 r = 0;
6916 }
6917 mutex_unlock(&kvm->lock);
6918 break;
6919 case KVM_CAP_MAX_VCPU_ID:
6920 r = -EINVAL;
6921 if (cap->args[0] > KVM_MAX_VCPU_IDS)
6922 break;
6923
6924 mutex_lock(&kvm->lock);
6925 if (kvm->arch.bsp_vcpu_id > cap->args[0]) {
6926 ;
6927 } else if (kvm->arch.max_vcpu_ids == cap->args[0]) {
6928 r = 0;
6929 } else if (!kvm->arch.max_vcpu_ids) {
6930 kvm->arch.max_vcpu_ids = cap->args[0];
6931 r = 0;
6932 }
6933 mutex_unlock(&kvm->lock);
6934 break;
6935 case KVM_CAP_X86_NOTIFY_VMEXIT:
6936 r = -EINVAL;
6937 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS)
6938 break;
6939 if (!kvm_caps.has_notify_vmexit)
6940 break;
6941 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED))
6942 break;
6943 mutex_lock(&kvm->lock);
6944 if (!kvm->created_vcpus) {
6945 kvm->arch.notify_window = cap->args[0] >> 32;
6946 kvm->arch.notify_vmexit_flags = (u32)cap->args[0];
6947 r = 0;
6948 }
6949 mutex_unlock(&kvm->lock);
6950 break;
6951 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
6952 r = -EINVAL;
6953
6954 /*
6955 * Since the risk of disabling NX hugepages is a guest crashing
6956 * the system, ensure the userspace process has permission to
6957 * reboot the system.
6958 *
6959 * Note that unlike the reboot() syscall, the process must have
6960 * this capability in the root namespace because exposing
6961 * /dev/kvm into a container does not limit the scope of the
6962 * iTLB multihit bug to that container. In other words,
6963 * this must use capable(), not ns_capable().
6964 */
6965 if (!capable(CAP_SYS_BOOT)) {
6966 r = -EPERM;
6967 break;
6968 }
6969
6970 if (cap->args[0])
6971 break;
6972
6973 mutex_lock(&kvm->lock);
6974 if (!kvm->created_vcpus) {
6975 kvm->arch.disable_nx_huge_pages = true;
6976 r = 0;
6977 }
6978 mutex_unlock(&kvm->lock);
6979 break;
6980 case KVM_CAP_X86_APIC_BUS_CYCLES_NS: {
6981 u64 bus_cycle_ns = cap->args[0];
6982 u64 unused;
6983
6984 /*
6985 * Guard against overflow in tmict_to_ns(). 128 is the highest
6986 * divide value that can be programmed in APIC_TDCR.
6987 */
6988 r = -EINVAL;
6989 if (!bus_cycle_ns ||
6990 check_mul_overflow((u64)U32_MAX * 128, bus_cycle_ns, &unused))
6991 break;
6992
6993 r = 0;
6994 mutex_lock(&kvm->lock);
6995 if (!irqchip_in_kernel(kvm))
6996 r = -ENXIO;
6997 else if (kvm->created_vcpus)
6998 r = -EINVAL;
6999 else
7000 kvm->arch.apic_bus_cycle_ns = bus_cycle_ns;
7001 mutex_unlock(&kvm->lock);
7002 break;
7003 }
7004 default:
7005 r = -EINVAL;
7006 break;
7007 }
7008 return r;
7009 }
7010
kvm_alloc_msr_filter(bool default_allow)7011 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
7012 {
7013 struct kvm_x86_msr_filter *msr_filter;
7014
7015 msr_filter = kzalloc_obj(*msr_filter, GFP_KERNEL_ACCOUNT);
7016 if (!msr_filter)
7017 return NULL;
7018
7019 msr_filter->default_allow = default_allow;
7020 return msr_filter;
7021 }
7022
kvm_free_msr_filter(struct kvm_x86_msr_filter * msr_filter)7023 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
7024 {
7025 u32 i;
7026
7027 if (!msr_filter)
7028 return;
7029
7030 for (i = 0; i < msr_filter->count; i++)
7031 kfree(msr_filter->ranges[i].bitmap);
7032
7033 kfree(msr_filter);
7034 }
7035
kvm_add_msr_filter(struct kvm_x86_msr_filter * msr_filter,struct kvm_msr_filter_range * user_range)7036 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
7037 struct kvm_msr_filter_range *user_range)
7038 {
7039 unsigned long *bitmap;
7040 size_t bitmap_size;
7041
7042 if (!user_range->nmsrs)
7043 return 0;
7044
7045 if (user_range->flags & ~KVM_MSR_FILTER_RANGE_VALID_MASK)
7046 return -EINVAL;
7047
7048 if (!user_range->flags)
7049 return -EINVAL;
7050
7051 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long);
7052 if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE)
7053 return -EINVAL;
7054
7055 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size);
7056 if (IS_ERR(bitmap))
7057 return PTR_ERR(bitmap);
7058
7059 msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) {
7060 .flags = user_range->flags,
7061 .base = user_range->base,
7062 .nmsrs = user_range->nmsrs,
7063 .bitmap = bitmap,
7064 };
7065
7066 msr_filter->count++;
7067 return 0;
7068 }
7069
kvm_vm_ioctl_set_msr_filter(struct kvm * kvm,struct kvm_msr_filter * filter)7070 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
7071 struct kvm_msr_filter *filter)
7072 {
7073 struct kvm_x86_msr_filter *new_filter, *old_filter;
7074 bool default_allow;
7075 bool empty = true;
7076 int r;
7077 u32 i;
7078
7079 if (filter->flags & ~KVM_MSR_FILTER_VALID_MASK)
7080 return -EINVAL;
7081
7082 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++)
7083 empty &= !filter->ranges[i].nmsrs;
7084
7085 default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY);
7086 if (empty && !default_allow)
7087 return -EINVAL;
7088
7089 new_filter = kvm_alloc_msr_filter(default_allow);
7090 if (!new_filter)
7091 return -ENOMEM;
7092
7093 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) {
7094 r = kvm_add_msr_filter(new_filter, &filter->ranges[i]);
7095 if (r) {
7096 kvm_free_msr_filter(new_filter);
7097 return r;
7098 }
7099 }
7100
7101 mutex_lock(&kvm->lock);
7102 old_filter = rcu_replace_pointer(kvm->arch.msr_filter, new_filter,
7103 mutex_is_locked(&kvm->lock));
7104 mutex_unlock(&kvm->lock);
7105 synchronize_srcu(&kvm->srcu);
7106
7107 kvm_free_msr_filter(old_filter);
7108
7109 /*
7110 * Recalc MSR intercepts as userspace may want to intercept accesses to
7111 * MSRs that KVM would otherwise pass through to the guest.
7112 */
7113 kvm_make_all_cpus_request(kvm, KVM_REQ_RECALC_INTERCEPTS);
7114
7115 return 0;
7116 }
7117
7118 #ifdef CONFIG_KVM_COMPAT
7119 /* for KVM_X86_SET_MSR_FILTER */
7120 struct kvm_msr_filter_range_compat {
7121 __u32 flags;
7122 __u32 nmsrs;
7123 __u32 base;
7124 __u32 bitmap;
7125 };
7126
7127 struct kvm_msr_filter_compat {
7128 __u32 flags;
7129 struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES];
7130 };
7131
7132 #define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)
7133
kvm_arch_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)7134 long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
7135 unsigned long arg)
7136 {
7137 void __user *argp = (void __user *)arg;
7138 struct kvm *kvm = filp->private_data;
7139 long r = -ENOTTY;
7140
7141 switch (ioctl) {
7142 case KVM_X86_SET_MSR_FILTER_COMPAT: {
7143 struct kvm_msr_filter __user *user_msr_filter = argp;
7144 struct kvm_msr_filter_compat filter_compat;
7145 struct kvm_msr_filter filter;
7146 int i;
7147
7148 if (copy_from_user(&filter_compat, user_msr_filter,
7149 sizeof(filter_compat)))
7150 return -EFAULT;
7151
7152 filter.flags = filter_compat.flags;
7153 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
7154 struct kvm_msr_filter_range_compat *cr;
7155
7156 cr = &filter_compat.ranges[i];
7157 filter.ranges[i] = (struct kvm_msr_filter_range) {
7158 .flags = cr->flags,
7159 .nmsrs = cr->nmsrs,
7160 .base = cr->base,
7161 .bitmap = (__u8 *)(ulong)cr->bitmap,
7162 };
7163 }
7164
7165 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
7166 break;
7167 }
7168 }
7169
7170 return r;
7171 }
7172 #endif
7173
7174 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
kvm_arch_suspend_notifier(struct kvm * kvm)7175 static int kvm_arch_suspend_notifier(struct kvm *kvm)
7176 {
7177 struct kvm_vcpu *vcpu;
7178 unsigned long i;
7179
7180 /*
7181 * Ignore the return, marking the guest paused only "fails" if the vCPU
7182 * isn't using kvmclock; continuing on is correct and desirable.
7183 */
7184 kvm_for_each_vcpu(i, vcpu, kvm)
7185 (void)kvm_set_guest_paused(vcpu);
7186
7187 return NOTIFY_DONE;
7188 }
7189
kvm_arch_pm_notifier(struct kvm * kvm,unsigned long state)7190 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
7191 {
7192 switch (state) {
7193 case PM_HIBERNATION_PREPARE:
7194 case PM_SUSPEND_PREPARE:
7195 return kvm_arch_suspend_notifier(kvm);
7196 }
7197
7198 return NOTIFY_DONE;
7199 }
7200 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
7201
kvm_vm_ioctl_get_clock(struct kvm * kvm,void __user * argp)7202 static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp)
7203 {
7204 struct kvm_clock_data data = { 0 };
7205
7206 get_kvmclock(kvm, &data);
7207 if (copy_to_user(argp, &data, sizeof(data)))
7208 return -EFAULT;
7209
7210 return 0;
7211 }
7212
kvm_vm_ioctl_set_clock(struct kvm * kvm,void __user * argp)7213 static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
7214 {
7215 struct kvm_arch *ka = &kvm->arch;
7216 struct kvm_clock_data data;
7217 u64 now_raw_ns;
7218
7219 if (copy_from_user(&data, argp, sizeof(data)))
7220 return -EFAULT;
7221
7222 /*
7223 * Only KVM_CLOCK_REALTIME is used, but allow passing the
7224 * result of KVM_GET_CLOCK back to KVM_SET_CLOCK.
7225 */
7226 if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
7227 return -EINVAL;
7228
7229 kvm_hv_request_tsc_page_update(kvm);
7230 kvm_start_pvclock_update(kvm);
7231 pvclock_update_vm_gtod_copy(kvm);
7232
7233 /*
7234 * This pairs with kvm_guest_time_update(): when masterclock is
7235 * in use, we use master_kernel_ns + kvmclock_offset to set
7236 * unsigned 'system_time' so if we use get_kvmclock_ns() (which
7237 * is slightly ahead) here we risk going negative on unsigned
7238 * 'system_time' when 'data.clock' is very small.
7239 */
7240 if (data.flags & KVM_CLOCK_REALTIME) {
7241 u64 now_real_ns = ktime_get_real_ns();
7242
7243 /*
7244 * Avoid stepping the kvmclock backwards.
7245 */
7246 if (now_real_ns > data.realtime)
7247 data.clock += now_real_ns - data.realtime;
7248 }
7249
7250 if (ka->use_master_clock)
7251 now_raw_ns = ka->master_kernel_ns;
7252 else
7253 now_raw_ns = get_kvmclock_base_ns();
7254 ka->kvmclock_offset = data.clock - now_raw_ns;
7255 kvm_end_pvclock_update(kvm);
7256 return 0;
7257 }
7258
kvm_arch_vcpu_unlocked_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)7259 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
7260 unsigned long arg)
7261 {
7262 struct kvm_vcpu *vcpu = filp->private_data;
7263 void __user *argp = (void __user *)arg;
7264
7265 if (ioctl == KVM_MEMORY_ENCRYPT_OP &&
7266 kvm_x86_ops.vcpu_mem_enc_unlocked_ioctl)
7267 return kvm_x86_call(vcpu_mem_enc_unlocked_ioctl)(vcpu, argp);
7268
7269 return -ENOIOCTLCMD;
7270 }
7271
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)7272 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
7273 {
7274 struct kvm *kvm = filp->private_data;
7275 void __user *argp = (void __user *)arg;
7276 int r = -ENOTTY;
7277
7278 #ifdef CONFIG_KVM_IOAPIC
7279 /*
7280 * This union makes it completely explicit to gcc-3.x
7281 * that these three variables' stack usage should be
7282 * combined, not added together.
7283 */
7284 union {
7285 struct kvm_pit_state ps;
7286 struct kvm_pit_state2 ps2;
7287 struct kvm_pit_config pit_config;
7288 } u;
7289 #endif
7290
7291 switch (ioctl) {
7292 case KVM_SET_TSS_ADDR:
7293 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
7294 break;
7295 case KVM_SET_IDENTITY_MAP_ADDR: {
7296 u64 ident_addr;
7297
7298 mutex_lock(&kvm->lock);
7299 r = -EINVAL;
7300 if (kvm->created_vcpus)
7301 goto set_identity_unlock;
7302 r = -EFAULT;
7303 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr)))
7304 goto set_identity_unlock;
7305 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
7306 set_identity_unlock:
7307 mutex_unlock(&kvm->lock);
7308 break;
7309 }
7310 case KVM_SET_NR_MMU_PAGES:
7311 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
7312 break;
7313 #ifdef CONFIG_KVM_IOAPIC
7314 case KVM_CREATE_IRQCHIP: {
7315 mutex_lock(&kvm->lock);
7316
7317 r = -EEXIST;
7318 if (irqchip_in_kernel(kvm))
7319 goto create_irqchip_unlock;
7320
7321 /*
7322 * Disallow an in-kernel I/O APIC if the VM has protected EOIs,
7323 * i.e. if KVM can't intercept EOIs and thus can't properly
7324 * emulate level-triggered interrupts.
7325 */
7326 r = -ENOTTY;
7327 if (kvm->arch.has_protected_eoi)
7328 goto create_irqchip_unlock;
7329
7330 r = -EINVAL;
7331 if (kvm->created_vcpus)
7332 goto create_irqchip_unlock;
7333
7334 r = kvm_pic_init(kvm);
7335 if (r)
7336 goto create_irqchip_unlock;
7337
7338 r = kvm_ioapic_init(kvm);
7339 if (r) {
7340 kvm_pic_destroy(kvm);
7341 goto create_irqchip_unlock;
7342 }
7343
7344 r = kvm_setup_default_ioapic_and_pic_routing(kvm);
7345 if (r) {
7346 kvm_ioapic_destroy(kvm);
7347 kvm_pic_destroy(kvm);
7348 goto create_irqchip_unlock;
7349 }
7350 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
7351 smp_wmb();
7352 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
7353 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
7354 create_irqchip_unlock:
7355 mutex_unlock(&kvm->lock);
7356 break;
7357 }
7358 case KVM_CREATE_PIT:
7359 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
7360 goto create_pit;
7361 case KVM_CREATE_PIT2:
7362 r = -EFAULT;
7363 if (copy_from_user(&u.pit_config, argp,
7364 sizeof(struct kvm_pit_config)))
7365 goto out;
7366 create_pit:
7367 mutex_lock(&kvm->lock);
7368 r = -EEXIST;
7369 if (kvm->arch.vpit)
7370 goto create_pit_unlock;
7371 r = -ENOENT;
7372 if (!pic_in_kernel(kvm))
7373 goto create_pit_unlock;
7374 r = -ENOMEM;
7375 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
7376 if (kvm->arch.vpit)
7377 r = 0;
7378 create_pit_unlock:
7379 mutex_unlock(&kvm->lock);
7380 break;
7381 case KVM_GET_IRQCHIP: {
7382 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
7383 struct kvm_irqchip *chip;
7384
7385 chip = memdup_user(argp, sizeof(*chip));
7386 if (IS_ERR(chip)) {
7387 r = PTR_ERR(chip);
7388 goto out;
7389 }
7390
7391 r = -ENXIO;
7392 if (!irqchip_full(kvm))
7393 goto get_irqchip_out;
7394 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
7395 if (r)
7396 goto get_irqchip_out;
7397 r = -EFAULT;
7398 if (copy_to_user(argp, chip, sizeof(*chip)))
7399 goto get_irqchip_out;
7400 r = 0;
7401 get_irqchip_out:
7402 kfree(chip);
7403 break;
7404 }
7405 case KVM_SET_IRQCHIP: {
7406 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
7407 struct kvm_irqchip *chip;
7408
7409 chip = memdup_user(argp, sizeof(*chip));
7410 if (IS_ERR(chip)) {
7411 r = PTR_ERR(chip);
7412 goto out;
7413 }
7414
7415 r = -ENXIO;
7416 if (!irqchip_full(kvm))
7417 goto set_irqchip_out;
7418 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
7419 set_irqchip_out:
7420 kfree(chip);
7421 break;
7422 }
7423 case KVM_GET_PIT: {
7424 r = -EFAULT;
7425 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
7426 goto out;
7427 r = -ENXIO;
7428 if (!kvm->arch.vpit)
7429 goto out;
7430 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
7431 if (r)
7432 goto out;
7433 r = -EFAULT;
7434 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
7435 goto out;
7436 r = 0;
7437 break;
7438 }
7439 case KVM_SET_PIT: {
7440 r = -EFAULT;
7441 if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
7442 goto out;
7443 mutex_lock(&kvm->lock);
7444 r = -ENXIO;
7445 if (!kvm->arch.vpit)
7446 goto set_pit_out;
7447 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
7448 set_pit_out:
7449 mutex_unlock(&kvm->lock);
7450 break;
7451 }
7452 case KVM_GET_PIT2: {
7453 r = -ENXIO;
7454 if (!kvm->arch.vpit)
7455 goto out;
7456 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
7457 if (r)
7458 goto out;
7459 r = -EFAULT;
7460 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
7461 goto out;
7462 r = 0;
7463 break;
7464 }
7465 case KVM_SET_PIT2: {
7466 r = -EFAULT;
7467 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
7468 goto out;
7469 mutex_lock(&kvm->lock);
7470 r = -ENXIO;
7471 if (!kvm->arch.vpit)
7472 goto set_pit2_out;
7473 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
7474 set_pit2_out:
7475 mutex_unlock(&kvm->lock);
7476 break;
7477 }
7478 case KVM_REINJECT_CONTROL: {
7479 struct kvm_reinject_control control;
7480 r = -EFAULT;
7481 if (copy_from_user(&control, argp, sizeof(control)))
7482 goto out;
7483 r = -ENXIO;
7484 if (!kvm->arch.vpit)
7485 goto out;
7486 r = kvm_vm_ioctl_reinject(kvm, &control);
7487 break;
7488 }
7489 #endif
7490 case KVM_SET_BOOT_CPU_ID:
7491 r = 0;
7492 mutex_lock(&kvm->lock);
7493 if (kvm->created_vcpus)
7494 r = -EBUSY;
7495 else if (arg > KVM_MAX_VCPU_IDS ||
7496 (kvm->arch.max_vcpu_ids && arg > kvm->arch.max_vcpu_ids))
7497 r = -EINVAL;
7498 else
7499 kvm->arch.bsp_vcpu_id = arg;
7500 mutex_unlock(&kvm->lock);
7501 break;
7502 #ifdef CONFIG_KVM_XEN
7503 case KVM_XEN_HVM_CONFIG: {
7504 struct kvm_xen_hvm_config xhc;
7505 r = -EFAULT;
7506 if (copy_from_user(&xhc, argp, sizeof(xhc)))
7507 goto out;
7508 r = kvm_xen_hvm_config(kvm, &xhc);
7509 break;
7510 }
7511 case KVM_XEN_HVM_GET_ATTR: {
7512 struct kvm_xen_hvm_attr xha;
7513
7514 r = -EFAULT;
7515 if (copy_from_user(&xha, argp, sizeof(xha)))
7516 goto out;
7517 r = kvm_xen_hvm_get_attr(kvm, &xha);
7518 if (!r && copy_to_user(argp, &xha, sizeof(xha)))
7519 r = -EFAULT;
7520 break;
7521 }
7522 case KVM_XEN_HVM_SET_ATTR: {
7523 struct kvm_xen_hvm_attr xha;
7524
7525 r = -EFAULT;
7526 if (copy_from_user(&xha, argp, sizeof(xha)))
7527 goto out;
7528 r = kvm_xen_hvm_set_attr(kvm, &xha);
7529 break;
7530 }
7531 case KVM_XEN_HVM_EVTCHN_SEND: {
7532 struct kvm_irq_routing_xen_evtchn uxe;
7533
7534 r = -EFAULT;
7535 if (copy_from_user(&uxe, argp, sizeof(uxe)))
7536 goto out;
7537 r = kvm_xen_hvm_evtchn_send(kvm, &uxe);
7538 break;
7539 }
7540 #endif
7541 case KVM_SET_CLOCK:
7542 r = kvm_vm_ioctl_set_clock(kvm, argp);
7543 break;
7544 case KVM_GET_CLOCK:
7545 r = kvm_vm_ioctl_get_clock(kvm, argp);
7546 break;
7547 case KVM_SET_TSC_KHZ: {
7548 u32 user_tsc_khz;
7549
7550 r = -EINVAL;
7551 user_tsc_khz = (u32)arg;
7552
7553 if (kvm_caps.has_tsc_control &&
7554 user_tsc_khz >= kvm_caps.max_guest_tsc_khz)
7555 goto out;
7556
7557 if (user_tsc_khz == 0)
7558 user_tsc_khz = tsc_khz;
7559
7560 mutex_lock(&kvm->lock);
7561 if (!kvm->created_vcpus) {
7562 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz);
7563 r = 0;
7564 }
7565 mutex_unlock(&kvm->lock);
7566 goto out;
7567 }
7568 case KVM_GET_TSC_KHZ: {
7569 r = READ_ONCE(kvm->arch.default_tsc_khz);
7570 goto out;
7571 }
7572 case KVM_MEMORY_ENCRYPT_OP:
7573 r = -ENOTTY;
7574 if (!kvm_x86_ops.mem_enc_ioctl)
7575 goto out;
7576
7577 r = kvm_x86_call(mem_enc_ioctl)(kvm, argp);
7578 break;
7579 case KVM_MEMORY_ENCRYPT_REG_REGION: {
7580 struct kvm_enc_region region;
7581
7582 r = -EFAULT;
7583 if (copy_from_user(®ion, argp, sizeof(region)))
7584 goto out;
7585
7586 r = -ENOTTY;
7587 if (!kvm_x86_ops.mem_enc_register_region)
7588 goto out;
7589
7590 r = kvm_x86_call(mem_enc_register_region)(kvm, ®ion);
7591 break;
7592 }
7593 case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
7594 struct kvm_enc_region region;
7595
7596 r = -EFAULT;
7597 if (copy_from_user(®ion, argp, sizeof(region)))
7598 goto out;
7599
7600 r = -ENOTTY;
7601 if (!kvm_x86_ops.mem_enc_unregister_region)
7602 goto out;
7603
7604 r = kvm_x86_call(mem_enc_unregister_region)(kvm, ®ion);
7605 break;
7606 }
7607 #ifdef CONFIG_KVM_HYPERV
7608 case KVM_HYPERV_EVENTFD: {
7609 struct kvm_hyperv_eventfd hvevfd;
7610
7611 r = -EFAULT;
7612 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd)))
7613 goto out;
7614 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
7615 break;
7616 }
7617 #endif
7618 case KVM_SET_PMU_EVENT_FILTER:
7619 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
7620 break;
7621 case KVM_X86_SET_MSR_FILTER: {
7622 struct kvm_msr_filter __user *user_msr_filter = argp;
7623 struct kvm_msr_filter filter;
7624
7625 if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
7626 return -EFAULT;
7627
7628 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
7629 break;
7630 }
7631 default:
7632 r = -ENOTTY;
7633 }
7634 out:
7635 return r;
7636 }
7637
kvm_probe_feature_msr(u32 msr_index)7638 static void kvm_probe_feature_msr(u32 msr_index)
7639 {
7640 u64 data;
7641
7642 if (kvm_get_feature_msr(NULL, msr_index, &data, true))
7643 return;
7644
7645 msr_based_features[num_msr_based_features++] = msr_index;
7646 }
7647
kvm_probe_msr_to_save(u32 msr_index)7648 static void kvm_probe_msr_to_save(u32 msr_index)
7649 {
7650 u32 dummy[2];
7651
7652 if (rdmsr_safe(msr_index, &dummy[0], &dummy[1]))
7653 return;
7654
7655 /*
7656 * Even MSRs that are valid in the host may not be exposed to guests in
7657 * some cases.
7658 */
7659 switch (msr_index) {
7660 case MSR_IA32_BNDCFGS:
7661 if (!kvm_mpx_supported())
7662 return;
7663 break;
7664 case MSR_TSC_AUX:
7665 if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
7666 !kvm_cpu_cap_has(X86_FEATURE_RDPID))
7667 return;
7668 break;
7669 case MSR_IA32_UMWAIT_CONTROL:
7670 if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG))
7671 return;
7672 break;
7673 case MSR_IA32_RTIT_CTL:
7674 case MSR_IA32_RTIT_STATUS:
7675 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
7676 return;
7677 break;
7678 case MSR_IA32_RTIT_CR3_MATCH:
7679 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7680 !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
7681 return;
7682 break;
7683 case MSR_IA32_RTIT_OUTPUT_BASE:
7684 case MSR_IA32_RTIT_OUTPUT_MASK:
7685 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7686 (!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
7687 !intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
7688 return;
7689 break;
7690 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
7691 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7692 (msr_index - MSR_IA32_RTIT_ADDR0_A >=
7693 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2))
7694 return;
7695 break;
7696 case MSR_ARCH_PERFMON_PERFCTR0 ...
7697 MSR_ARCH_PERFMON_PERFCTR0 + KVM_MAX_NR_GP_COUNTERS - 1:
7698 if (msr_index - MSR_ARCH_PERFMON_PERFCTR0 >=
7699 kvm_pmu_cap.num_counters_gp)
7700 return;
7701 break;
7702 case MSR_ARCH_PERFMON_EVENTSEL0 ...
7703 MSR_ARCH_PERFMON_EVENTSEL0 + KVM_MAX_NR_GP_COUNTERS - 1:
7704 if (msr_index - MSR_ARCH_PERFMON_EVENTSEL0 >=
7705 kvm_pmu_cap.num_counters_gp)
7706 return;
7707 break;
7708 case MSR_ARCH_PERFMON_FIXED_CTR0 ...
7709 MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_MAX_NR_FIXED_COUNTERS - 1:
7710 if (msr_index - MSR_ARCH_PERFMON_FIXED_CTR0 >=
7711 kvm_pmu_cap.num_counters_fixed)
7712 return;
7713 break;
7714 case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
7715 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
7716 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
7717 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
7718 if (!kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
7719 return;
7720 break;
7721 case MSR_IA32_XFD:
7722 case MSR_IA32_XFD_ERR:
7723 if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
7724 return;
7725 break;
7726 case MSR_IA32_TSX_CTRL:
7727 if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR))
7728 return;
7729 break;
7730 case MSR_IA32_XSS:
7731 if (!kvm_caps.supported_xss)
7732 return;
7733 break;
7734 case MSR_IA32_U_CET:
7735 case MSR_IA32_S_CET:
7736 if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&
7737 !kvm_cpu_cap_has(X86_FEATURE_IBT))
7738 return;
7739 break;
7740 case MSR_IA32_INT_SSP_TAB:
7741 if (!kvm_cpu_cap_has(X86_FEATURE_LM))
7742 return;
7743 fallthrough;
7744 case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
7745 if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK))
7746 return;
7747 break;
7748 default:
7749 break;
7750 }
7751
7752 msrs_to_save[num_msrs_to_save++] = msr_index;
7753 }
7754
kvm_init_msr_lists(void)7755 static void kvm_init_msr_lists(void)
7756 {
7757 unsigned i;
7758
7759 BUILD_BUG_ON_MSG(KVM_MAX_NR_FIXED_COUNTERS != 3,
7760 "Please update the fixed PMCs in msrs_to_save_pmu[]");
7761
7762 num_msrs_to_save = 0;
7763 num_emulated_msrs = 0;
7764 num_msr_based_features = 0;
7765
7766 for (i = 0; i < ARRAY_SIZE(msrs_to_save_base); i++)
7767 kvm_probe_msr_to_save(msrs_to_save_base[i]);
7768
7769 if (enable_pmu) {
7770 for (i = 0; i < ARRAY_SIZE(msrs_to_save_pmu); i++)
7771 kvm_probe_msr_to_save(msrs_to_save_pmu[i]);
7772 }
7773
7774 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
7775 if (!kvm_x86_call(has_emulated_msr)(NULL,
7776 emulated_msrs_all[i]))
7777 continue;
7778
7779 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
7780 }
7781
7782 for (i = KVM_FIRST_EMULATED_VMX_MSR; i <= KVM_LAST_EMULATED_VMX_MSR; i++)
7783 kvm_probe_feature_msr(i);
7784
7785 for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++)
7786 kvm_probe_feature_msr(msr_based_features_all_except_vmx[i]);
7787 }
7788
vcpu_mmio_write(struct kvm_vcpu * vcpu,gpa_t addr,int len,void * __v)7789 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
7790 void *__v)
7791 {
7792 const void *v = __v;
7793 int handled = 0;
7794 int n;
7795
7796 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, addr, __v);
7797
7798 do {
7799 n = min(len, 8);
7800 if (!(lapic_in_kernel(vcpu) &&
7801 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
7802 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
7803 break;
7804 handled += n;
7805 addr += n;
7806 len -= n;
7807 v += n;
7808 } while (len);
7809
7810 return handled;
7811 }
7812
vcpu_mmio_read(struct kvm_vcpu * vcpu,gpa_t addr,int len,void * v)7813 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
7814 {
7815 int handled = 0;
7816 int n;
7817
7818 do {
7819 n = min(len, 8);
7820 if (!(lapic_in_kernel(vcpu) &&
7821 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
7822 addr, n, v))
7823 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
7824 break;
7825 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
7826 handled += n;
7827 addr += n;
7828 len -= n;
7829 v += n;
7830 } while (len);
7831
7832 if (len)
7833 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len, addr, NULL);
7834
7835 return handled;
7836 }
7837
kvm_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)7838 void kvm_set_segment(struct kvm_vcpu *vcpu,
7839 struct kvm_segment *var, int seg)
7840 {
7841 kvm_x86_call(set_segment)(vcpu, var, seg);
7842 }
7843
kvm_get_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)7844 void kvm_get_segment(struct kvm_vcpu *vcpu,
7845 struct kvm_segment *var, int seg)
7846 {
7847 kvm_x86_call(get_segment)(vcpu, var, seg);
7848 }
7849
translate_nested_gpa(struct kvm_vcpu * vcpu,gpa_t gpa,u64 access,struct x86_exception * exception)7850 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
7851 struct x86_exception *exception)
7852 {
7853 struct kvm_mmu *mmu = vcpu->arch.mmu;
7854 gpa_t t_gpa;
7855
7856 BUG_ON(!mmu_is_nested(vcpu));
7857
7858 /* NPT walks are always user-walks */
7859 access |= PFERR_USER_MASK;
7860 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
7861
7862 return t_gpa;
7863 }
7864
kvm_mmu_gva_to_gpa_read(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)7865 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
7866 struct x86_exception *exception)
7867 {
7868 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7869
7870 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7871 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7872 }
7873 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_gva_to_gpa_read);
7874
kvm_mmu_gva_to_gpa_write(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)7875 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
7876 struct x86_exception *exception)
7877 {
7878 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7879
7880 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7881 access |= PFERR_WRITE_MASK;
7882 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7883 }
7884 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_gva_to_gpa_write);
7885
7886 /* uses this to access any guest's mapped memory without checking CPL */
kvm_mmu_gva_to_gpa_system(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)7887 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
7888 struct x86_exception *exception)
7889 {
7890 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7891
7892 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception);
7893 }
7894
kvm_read_guest_virt_helper(gva_t addr,void * val,unsigned int bytes,struct kvm_vcpu * vcpu,u64 access,struct x86_exception * exception)7895 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
7896 struct kvm_vcpu *vcpu, u64 access,
7897 struct x86_exception *exception)
7898 {
7899 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7900 void *data = val;
7901 int r = X86EMUL_CONTINUE;
7902
7903 while (bytes) {
7904 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7905 unsigned offset = addr & (PAGE_SIZE-1);
7906 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
7907 int ret;
7908
7909 if (gpa == INVALID_GPA)
7910 return X86EMUL_PROPAGATE_FAULT;
7911 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
7912 offset, toread);
7913 if (ret < 0) {
7914 r = X86EMUL_IO_NEEDED;
7915 goto out;
7916 }
7917
7918 bytes -= toread;
7919 data += toread;
7920 addr += toread;
7921 }
7922 out:
7923 return r;
7924 }
7925
7926 /* used for instruction fetching */
kvm_fetch_guest_virt(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)7927 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
7928 gva_t addr, void *val, unsigned int bytes,
7929 struct x86_exception *exception)
7930 {
7931 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7932 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7933 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7934 unsigned offset;
7935 int ret;
7936
7937 /* Inline kvm_read_guest_virt_helper for speed. */
7938 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK,
7939 exception);
7940 if (unlikely(gpa == INVALID_GPA))
7941 return X86EMUL_PROPAGATE_FAULT;
7942
7943 offset = addr & (PAGE_SIZE-1);
7944 if (WARN_ON(offset + bytes > PAGE_SIZE))
7945 bytes = (unsigned)PAGE_SIZE - offset;
7946 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
7947 offset, bytes);
7948 if (unlikely(ret < 0))
7949 return X86EMUL_IO_NEEDED;
7950
7951 return X86EMUL_CONTINUE;
7952 }
7953
kvm_read_guest_virt(struct kvm_vcpu * vcpu,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)7954 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
7955 gva_t addr, void *val, unsigned int bytes,
7956 struct x86_exception *exception)
7957 {
7958 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7959
7960 /*
7961 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
7962 * is returned, but our callers are not ready for that and they blindly
7963 * call kvm_inject_page_fault. Ensure that they at least do not leak
7964 * uninitialized kernel stack memory into cr2 and error code.
7965 */
7966 memset(exception, 0, sizeof(*exception));
7967 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
7968 exception);
7969 }
7970 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_virt);
7971
emulator_read_std(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception,bool system)7972 static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
7973 gva_t addr, void *val, unsigned int bytes,
7974 struct x86_exception *exception, bool system)
7975 {
7976 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7977 u64 access = 0;
7978
7979 if (system)
7980 access |= PFERR_IMPLICIT_ACCESS;
7981 else if (kvm_x86_call(get_cpl)(vcpu) == 3)
7982 access |= PFERR_USER_MASK;
7983
7984 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
7985 }
7986
kvm_write_guest_virt_helper(gva_t addr,void * val,unsigned int bytes,struct kvm_vcpu * vcpu,u64 access,struct x86_exception * exception)7987 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
7988 struct kvm_vcpu *vcpu, u64 access,
7989 struct x86_exception *exception)
7990 {
7991 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7992 void *data = val;
7993 int r = X86EMUL_CONTINUE;
7994
7995 while (bytes) {
7996 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7997 unsigned offset = addr & (PAGE_SIZE-1);
7998 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
7999 int ret;
8000
8001 if (gpa == INVALID_GPA)
8002 return X86EMUL_PROPAGATE_FAULT;
8003 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
8004 if (ret < 0) {
8005 r = X86EMUL_IO_NEEDED;
8006 goto out;
8007 }
8008
8009 bytes -= towrite;
8010 data += towrite;
8011 addr += towrite;
8012 }
8013 out:
8014 return r;
8015 }
8016
emulator_write_std(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception,bool system)8017 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
8018 unsigned int bytes, struct x86_exception *exception,
8019 bool system)
8020 {
8021 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8022 u64 access = PFERR_WRITE_MASK;
8023
8024 if (system)
8025 access |= PFERR_IMPLICIT_ACCESS;
8026 else if (kvm_x86_call(get_cpl)(vcpu) == 3)
8027 access |= PFERR_USER_MASK;
8028
8029 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
8030 access, exception);
8031 }
8032
kvm_write_guest_virt_system(struct kvm_vcpu * vcpu,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)8033 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
8034 unsigned int bytes, struct x86_exception *exception)
8035 {
8036 /* kvm_write_guest_virt_system can pull in tons of pages. */
8037 kvm_request_l1tf_flush_l1d();
8038
8039 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
8040 PFERR_WRITE_MASK, exception);
8041 }
8042 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_virt_system);
8043
kvm_check_emulate_insn(struct kvm_vcpu * vcpu,int emul_type,void * insn,int insn_len)8044 static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
8045 void *insn, int insn_len)
8046 {
8047 return kvm_x86_call(check_emulate_instruction)(vcpu, emul_type,
8048 insn, insn_len);
8049 }
8050
handle_ud(struct kvm_vcpu * vcpu)8051 int handle_ud(struct kvm_vcpu *vcpu)
8052 {
8053 static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
8054 int fep_flags = READ_ONCE(force_emulation_prefix);
8055 int emul_type = EMULTYPE_TRAP_UD;
8056 char sig[5]; /* ud2; .ascii "kvm" */
8057 struct x86_exception e;
8058 int r;
8059
8060 r = kvm_check_emulate_insn(vcpu, emul_type, NULL, 0);
8061 if (r != X86EMUL_CONTINUE)
8062 return 1;
8063
8064 if (fep_flags &&
8065 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
8066 sig, sizeof(sig), &e) == 0 &&
8067 memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
8068 if (fep_flags & KVM_FEP_CLEAR_RFLAGS_RF)
8069 kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF);
8070 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
8071 emul_type = EMULTYPE_TRAP_UD_FORCED;
8072 }
8073
8074 return kvm_emulate_instruction(vcpu, emul_type);
8075 }
8076 EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_ud);
8077
vcpu_is_mmio_gpa(struct kvm_vcpu * vcpu,unsigned long gva,gpa_t gpa,bool write)8078 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
8079 gpa_t gpa, bool write)
8080 {
8081 /* For APIC access vmexit */
8082 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
8083 return 1;
8084
8085 if (vcpu_match_mmio_gpa(vcpu, gpa)) {
8086 trace_vcpu_match_mmio(gva, gpa, write, true);
8087 return 1;
8088 }
8089
8090 return 0;
8091 }
8092
vcpu_mmio_gva_to_gpa(struct kvm_vcpu * vcpu,unsigned long gva,gpa_t * gpa,struct x86_exception * exception,bool write)8093 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
8094 gpa_t *gpa, struct x86_exception *exception,
8095 bool write)
8096 {
8097 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
8098 u64 access = ((kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
8099 | (write ? PFERR_WRITE_MASK : 0);
8100
8101 /*
8102 * currently PKRU is only applied to ept enabled guest so
8103 * there is no pkey in EPT page table for L1 guest or EPT
8104 * shadow page table for L2 guest.
8105 */
8106 if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) ||
8107 !permission_fault(vcpu, vcpu->arch.walk_mmu,
8108 vcpu->arch.mmio_access, 0, access))) {
8109 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
8110 (gva & (PAGE_SIZE - 1));
8111 trace_vcpu_match_mmio(gva, *gpa, write, false);
8112 return 1;
8113 }
8114
8115 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
8116
8117 if (*gpa == INVALID_GPA)
8118 return -1;
8119
8120 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
8121 }
8122
8123 struct read_write_emulator_ops {
8124 int (*read_write_guest)(struct kvm_vcpu *vcpu, gpa_t gpa,
8125 void *val, int bytes);
8126 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
8127 int bytes, void *val);
8128 bool write;
8129 };
8130
emulator_read_guest(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)8131 static int emulator_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa,
8132 void *val, int bytes)
8133 {
8134 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
8135 }
8136
emulator_write_guest(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)8137 static int emulator_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa,
8138 void *val, int bytes)
8139 {
8140 int ret;
8141
8142 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
8143 if (ret < 0)
8144 return 0;
8145 kvm_page_track_write(vcpu, gpa, val, bytes);
8146 return 1;
8147 }
8148
emulator_read_write_onepage(unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception,struct kvm_vcpu * vcpu,const struct read_write_emulator_ops * ops)8149 static int emulator_read_write_onepage(unsigned long addr, void *val,
8150 unsigned int bytes,
8151 struct x86_exception *exception,
8152 struct kvm_vcpu *vcpu,
8153 const struct read_write_emulator_ops *ops)
8154 {
8155 gpa_t gpa;
8156 int handled, ret;
8157 bool write = ops->write;
8158 struct kvm_mmio_fragment *frag;
8159 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8160
8161 /*
8162 * If the exit was due to a NPF we may already have a GPA.
8163 * If the GPA is present, use it to avoid the GVA to GPA table walk.
8164 * Note, this cannot be used on string operations since string
8165 * operation using rep will only have the initial GPA from the NPF
8166 * occurred.
8167 */
8168 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) &&
8169 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) {
8170 gpa = ctxt->gpa_val;
8171 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
8172 } else {
8173 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
8174 if (ret < 0)
8175 return X86EMUL_PROPAGATE_FAULT;
8176 }
8177
8178 /*
8179 * If the memory is not _known_ to be emulated MMIO, attempt to access
8180 * guest memory. If accessing guest memory fails, e.g. because there's
8181 * no memslot, then handle the access as MMIO. Note, treating the
8182 * access as emulated MMIO is technically wrong if there is a memslot,
8183 * i.e. if accessing host user memory failed, but this has been KVM's
8184 * historical ABI for decades.
8185 */
8186 if (!ret && ops->read_write_guest(vcpu, gpa, val, bytes))
8187 return X86EMUL_CONTINUE;
8188
8189 /*
8190 * Attempt to handle emulated MMIO within the kernel, e.g. for accesses
8191 * to an in-kernel local or I/O APIC, or to an ioeventfd range attached
8192 * to MMIO bus. If the access isn't fully resolved, insert an MMIO
8193 * fragment with the relevant details.
8194 */
8195 handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
8196 if (handled == bytes)
8197 return X86EMUL_CONTINUE;
8198
8199 gpa += handled;
8200 bytes -= handled;
8201 val += handled;
8202
8203 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
8204 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
8205 frag->gpa = gpa;
8206 if (write && bytes <= 8u) {
8207 frag->val = 0;
8208 frag->data = &frag->val;
8209 memcpy(&frag->val, val, bytes);
8210 } else {
8211 frag->data = val;
8212 }
8213 frag->len = bytes;
8214
8215 /*
8216 * Continue emulating, even though KVM needs to (eventually) do an MMIO
8217 * exit to userspace. If the access splits multiple pages, then KVM
8218 * needs to exit to userspace only after emulating both parts of the
8219 * access.
8220 */
8221 return X86EMUL_CONTINUE;
8222 }
8223
emulator_read_write(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception,const struct read_write_emulator_ops * ops)8224 static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
8225 unsigned long addr,
8226 void *val, unsigned int bytes,
8227 struct x86_exception *exception,
8228 const struct read_write_emulator_ops *ops)
8229 {
8230 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8231 int rc;
8232
8233 if (WARN_ON_ONCE((bytes > 8u || !ops->write) && object_is_on_stack(val)))
8234 return X86EMUL_UNHANDLEABLE;
8235
8236 /*
8237 * If the read was already completed via a userspace MMIO exit, there's
8238 * nothing left to do except trace the MMIO read. When completing MMIO
8239 * reads, KVM re-emulates the instruction to propagate the value into
8240 * the correct destination, e.g. into the correct register, but the
8241 * value itself has already been copied to the read cache.
8242 *
8243 * Note! This is *tightly* coupled to read_emulated() satisfying reads
8244 * from the emulator's mem_read cache, so that the MMIO fragment data
8245 * is copied to the correct chunk of the correct operand.
8246 */
8247 if (!ops->write && vcpu->mmio_read_completed) {
8248 /*
8249 * For simplicity, trace the entire MMIO read in one shot, even
8250 * though the GPA might be incorrect if there are two fragments
8251 * that aren't contiguous in the GPA space.
8252 */
8253 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
8254 vcpu->mmio_fragments[0].gpa, val);
8255 vcpu->mmio_read_completed = 0;
8256 return X86EMUL_CONTINUE;
8257 }
8258
8259 vcpu->mmio_nr_fragments = 0;
8260
8261 /* Crossing a page boundary? */
8262 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
8263 int now;
8264
8265 now = -addr & ~PAGE_MASK;
8266 rc = emulator_read_write_onepage(addr, val, now, exception,
8267 vcpu, ops);
8268
8269 if (rc != X86EMUL_CONTINUE)
8270 return rc;
8271 addr += now;
8272 if (ctxt->mode != X86EMUL_MODE_PROT64)
8273 addr = (u32)addr;
8274 val += now;
8275 bytes -= now;
8276 }
8277
8278 rc = emulator_read_write_onepage(addr, val, bytes, exception,
8279 vcpu, ops);
8280 if (rc != X86EMUL_CONTINUE)
8281 return rc;
8282
8283 if (!vcpu->mmio_nr_fragments)
8284 return X86EMUL_CONTINUE;
8285
8286 vcpu->mmio_needed = 1;
8287 vcpu->mmio_cur_fragment = 0;
8288 vcpu->mmio_is_write = ops->write;
8289
8290 kvm_prepare_emulated_mmio_exit(vcpu, &vcpu->mmio_fragments[0]);
8291
8292 /*
8293 * For MMIO reads, stop emulating and immediately exit to userspace, as
8294 * KVM needs the value to correctly emulate the instruction. For MMIO
8295 * writes, continue emulating as the write to MMIO is a side effect for
8296 * all intents and purposes. KVM will still exit to userspace, but
8297 * after completing emulation (see the check on vcpu->mmio_needed in
8298 * x86_emulate_instruction()).
8299 */
8300 return ops->write ? X86EMUL_CONTINUE : X86EMUL_IO_NEEDED;
8301 }
8302
emulator_read_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception)8303 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
8304 unsigned long addr,
8305 void *val,
8306 unsigned int bytes,
8307 struct x86_exception *exception)
8308 {
8309 static const struct read_write_emulator_ops ops = {
8310 .read_write_guest = emulator_read_guest,
8311 .read_write_mmio = vcpu_mmio_read,
8312 .write = false,
8313 };
8314
8315 return emulator_read_write(ctxt, addr, val, bytes, exception, &ops);
8316 }
8317
emulator_write_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,const void * val,unsigned int bytes,struct x86_exception * exception)8318 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
8319 unsigned long addr,
8320 const void *val,
8321 unsigned int bytes,
8322 struct x86_exception *exception)
8323 {
8324 static const struct read_write_emulator_ops ops = {
8325 .read_write_guest = emulator_write_guest,
8326 .read_write_mmio = vcpu_mmio_write,
8327 .write = true,
8328 };
8329
8330 return emulator_read_write(ctxt, addr, (void *)val, bytes, exception, &ops);
8331 }
8332
8333 #define emulator_try_cmpxchg_user(t, ptr, old, new) \
8334 (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t))
8335
emulator_cmpxchg_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,const void * old,const void * new,unsigned int bytes,struct x86_exception * exception)8336 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
8337 unsigned long addr,
8338 const void *old,
8339 const void *new,
8340 unsigned int bytes,
8341 struct x86_exception *exception)
8342 {
8343 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8344 u64 page_line_mask;
8345 unsigned long hva;
8346 gpa_t gpa;
8347 int r;
8348
8349 /* guests cmpxchg8b have to be emulated atomically */
8350 if (bytes > 8 || (bytes & (bytes - 1)))
8351 goto emul_write;
8352
8353 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
8354
8355 if (gpa == INVALID_GPA ||
8356 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
8357 goto emul_write;
8358
8359 /*
8360 * Emulate the atomic as a straight write to avoid #AC if SLD is
8361 * enabled in the host and the access splits a cache line.
8362 */
8363 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
8364 page_line_mask = ~(cache_line_size() - 1);
8365 else
8366 page_line_mask = PAGE_MASK;
8367
8368 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
8369 goto emul_write;
8370
8371 hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa));
8372 if (kvm_is_error_hva(hva))
8373 goto emul_write;
8374
8375 hva += offset_in_page(gpa);
8376
8377 switch (bytes) {
8378 case 1:
8379 r = emulator_try_cmpxchg_user(u8, hva, old, new);
8380 break;
8381 case 2:
8382 r = emulator_try_cmpxchg_user(u16, hva, old, new);
8383 break;
8384 case 4:
8385 r = emulator_try_cmpxchg_user(u32, hva, old, new);
8386 break;
8387 case 8:
8388 r = emulator_try_cmpxchg_user(u64, hva, old, new);
8389 break;
8390 default:
8391 BUG();
8392 }
8393
8394 if (r < 0)
8395 return X86EMUL_UNHANDLEABLE;
8396
8397 /*
8398 * Mark the page dirty _before_ checking whether or not the CMPXCHG was
8399 * successful, as the old value is written back on failure. Note, for
8400 * live migration, this is unnecessarily conservative as CMPXCHG writes
8401 * back the original value and the access is atomic, but KVM's ABI is
8402 * that all writes are dirty logged, regardless of the value written.
8403 */
8404 kvm_vcpu_mark_page_dirty(vcpu, gpa_to_gfn(gpa));
8405
8406 if (r)
8407 return X86EMUL_CMPXCHG_FAILED;
8408
8409 kvm_page_track_write(vcpu, gpa, new, bytes);
8410
8411 return X86EMUL_CONTINUE;
8412
8413 emul_write:
8414 pr_warn_once("emulating exchange as write\n");
8415
8416 return emulator_write_emulated(ctxt, addr, new, bytes, exception);
8417 }
8418
emulator_pio_in_out(struct kvm_vcpu * vcpu,int size,unsigned short port,void * data,unsigned int count,bool in)8419 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
8420 unsigned short port, void *data,
8421 unsigned int count, bool in)
8422 {
8423 unsigned i;
8424 int r;
8425
8426 WARN_ON_ONCE(vcpu->arch.pio.count);
8427 for (i = 0; i < count; i++) {
8428 if (in)
8429 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data);
8430 else
8431 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data);
8432
8433 if (r) {
8434 if (i == 0)
8435 goto userspace_io;
8436
8437 /*
8438 * Userspace must have unregistered the device while PIO
8439 * was running. Drop writes / read as 0.
8440 */
8441 if (in)
8442 memset(data, 0, size * (count - i));
8443 break;
8444 }
8445
8446 data += size;
8447 }
8448 return 1;
8449
8450 userspace_io:
8451 vcpu->arch.pio.port = port;
8452 vcpu->arch.pio.in = in;
8453 vcpu->arch.pio.count = count;
8454 vcpu->arch.pio.size = size;
8455
8456 if (in)
8457 memset(vcpu->arch.pio_data, 0, size * count);
8458 else
8459 memcpy(vcpu->arch.pio_data, data, size * count);
8460
8461 vcpu->run->exit_reason = KVM_EXIT_IO;
8462 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
8463 vcpu->run->io.size = size;
8464 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
8465 vcpu->run->io.count = count;
8466 vcpu->run->io.port = port;
8467 return 0;
8468 }
8469
emulator_pio_in(struct kvm_vcpu * vcpu,int size,unsigned short port,void * val,unsigned int count)8470 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
8471 unsigned short port, void *val, unsigned int count)
8472 {
8473 int r = emulator_pio_in_out(vcpu, size, port, val, count, true);
8474 if (r)
8475 trace_kvm_pio(KVM_PIO_IN, port, size, count, val);
8476
8477 return r;
8478 }
8479
complete_emulator_pio_in(struct kvm_vcpu * vcpu,void * val)8480 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
8481 {
8482 int size = vcpu->arch.pio.size;
8483 unsigned int count = vcpu->arch.pio.count;
8484 memcpy(val, vcpu->arch.pio_data, size * count);
8485 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
8486 vcpu->arch.pio.count = 0;
8487 }
8488
emulator_pio_in_emulated(struct x86_emulate_ctxt * ctxt,int size,unsigned short port,void * val,unsigned int count)8489 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
8490 int size, unsigned short port, void *val,
8491 unsigned int count)
8492 {
8493 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8494 if (vcpu->arch.pio.count) {
8495 /*
8496 * Complete a previous iteration that required userspace I/O.
8497 * Note, @count isn't guaranteed to match pio.count as userspace
8498 * can modify ECX before rerunning the vCPU. Ignore any such
8499 * shenanigans as KVM doesn't support modifying the rep count,
8500 * and the emulator ensures @count doesn't overflow the buffer.
8501 */
8502 complete_emulator_pio_in(vcpu, val);
8503 return 1;
8504 }
8505
8506 return emulator_pio_in(vcpu, size, port, val, count);
8507 }
8508
emulator_pio_out(struct kvm_vcpu * vcpu,int size,unsigned short port,const void * val,unsigned int count)8509 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
8510 unsigned short port, const void *val,
8511 unsigned int count)
8512 {
8513 trace_kvm_pio(KVM_PIO_OUT, port, size, count, val);
8514 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
8515 }
8516
emulator_pio_out_emulated(struct x86_emulate_ctxt * ctxt,int size,unsigned short port,const void * val,unsigned int count)8517 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
8518 int size, unsigned short port,
8519 const void *val, unsigned int count)
8520 {
8521 return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count);
8522 }
8523
get_segment_base(struct kvm_vcpu * vcpu,int seg)8524 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
8525 {
8526 return kvm_x86_call(get_segment_base)(vcpu, seg);
8527 }
8528
emulator_invlpg(struct x86_emulate_ctxt * ctxt,ulong address)8529 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
8530 {
8531 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
8532 }
8533
kvm_emulate_wbinvd_noskip(struct kvm_vcpu * vcpu)8534 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
8535 {
8536 if (!need_emulate_wbinvd(vcpu))
8537 return X86EMUL_CONTINUE;
8538
8539 if (kvm_x86_call(has_wbinvd_exit)()) {
8540 int cpu = get_cpu();
8541
8542 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
8543 wbinvd_on_cpus_mask(vcpu->arch.wbinvd_dirty_mask);
8544 put_cpu();
8545 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
8546 } else
8547 wbinvd();
8548 return X86EMUL_CONTINUE;
8549 }
8550
kvm_emulate_wbinvd(struct kvm_vcpu * vcpu)8551 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
8552 {
8553 kvm_emulate_wbinvd_noskip(vcpu);
8554 return kvm_skip_emulated_instruction(vcpu);
8555 }
8556 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wbinvd);
8557
8558
8559
emulator_wbinvd(struct x86_emulate_ctxt * ctxt)8560 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
8561 {
8562 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
8563 }
8564
emulator_get_dr(struct x86_emulate_ctxt * ctxt,int dr)8565 static unsigned long emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr)
8566 {
8567 return kvm_get_dr(emul_to_vcpu(ctxt), dr);
8568 }
8569
emulator_set_dr(struct x86_emulate_ctxt * ctxt,int dr,unsigned long value)8570 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
8571 unsigned long value)
8572 {
8573
8574 return kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
8575 }
8576
mk_cr_64(u64 curr_cr,u32 new_val)8577 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
8578 {
8579 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
8580 }
8581
emulator_get_cr(struct x86_emulate_ctxt * ctxt,int cr)8582 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
8583 {
8584 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8585 unsigned long value;
8586
8587 switch (cr) {
8588 case 0:
8589 value = kvm_read_cr0(vcpu);
8590 break;
8591 case 2:
8592 value = vcpu->arch.cr2;
8593 break;
8594 case 3:
8595 value = kvm_read_cr3(vcpu);
8596 break;
8597 case 4:
8598 value = kvm_read_cr4(vcpu);
8599 break;
8600 case 8:
8601 value = kvm_get_cr8(vcpu);
8602 break;
8603 default:
8604 kvm_err("%s: unexpected cr %u\n", __func__, cr);
8605 return 0;
8606 }
8607
8608 return value;
8609 }
8610
emulator_set_cr(struct x86_emulate_ctxt * ctxt,int cr,ulong val)8611 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
8612 {
8613 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8614 int res = 0;
8615
8616 switch (cr) {
8617 case 0:
8618 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
8619 break;
8620 case 2:
8621 vcpu->arch.cr2 = val;
8622 break;
8623 case 3:
8624 res = kvm_set_cr3(vcpu, val);
8625 break;
8626 case 4:
8627 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
8628 break;
8629 case 8:
8630 res = kvm_set_cr8(vcpu, val);
8631 break;
8632 default:
8633 kvm_err("%s: unexpected cr %u\n", __func__, cr);
8634 res = -1;
8635 }
8636
8637 return res;
8638 }
8639
emulator_get_cpl(struct x86_emulate_ctxt * ctxt)8640 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
8641 {
8642 return kvm_x86_call(get_cpl)(emul_to_vcpu(ctxt));
8643 }
8644
emulator_get_gdt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)8645 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8646 {
8647 kvm_x86_call(get_gdt)(emul_to_vcpu(ctxt), dt);
8648 }
8649
emulator_get_idt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)8650 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8651 {
8652 kvm_x86_call(get_idt)(emul_to_vcpu(ctxt), dt);
8653 }
8654
emulator_set_gdt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)8655 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8656 {
8657 kvm_x86_call(set_gdt)(emul_to_vcpu(ctxt), dt);
8658 }
8659
emulator_set_idt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)8660 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8661 {
8662 kvm_x86_call(set_idt)(emul_to_vcpu(ctxt), dt);
8663 }
8664
emulator_get_cached_segment_base(struct x86_emulate_ctxt * ctxt,int seg)8665 static unsigned long emulator_get_cached_segment_base(
8666 struct x86_emulate_ctxt *ctxt, int seg)
8667 {
8668 return get_segment_base(emul_to_vcpu(ctxt), seg);
8669 }
8670
emulator_get_segment(struct x86_emulate_ctxt * ctxt,u16 * selector,struct desc_struct * desc,u32 * base3,int seg)8671 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
8672 struct desc_struct *desc, u32 *base3,
8673 int seg)
8674 {
8675 struct kvm_segment var;
8676
8677 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
8678 *selector = var.selector;
8679
8680 if (var.unusable) {
8681 memset(desc, 0, sizeof(*desc));
8682 if (base3)
8683 *base3 = 0;
8684 return false;
8685 }
8686
8687 if (var.g)
8688 var.limit >>= 12;
8689 set_desc_limit(desc, var.limit);
8690 set_desc_base(desc, (unsigned long)var.base);
8691 #ifdef CONFIG_X86_64
8692 if (base3)
8693 *base3 = var.base >> 32;
8694 #endif
8695 desc->type = var.type;
8696 desc->s = var.s;
8697 desc->dpl = var.dpl;
8698 desc->p = var.present;
8699 desc->avl = var.avl;
8700 desc->l = var.l;
8701 desc->d = var.db;
8702 desc->g = var.g;
8703
8704 return true;
8705 }
8706
emulator_set_segment(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_struct * desc,u32 base3,int seg)8707 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
8708 struct desc_struct *desc, u32 base3,
8709 int seg)
8710 {
8711 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8712 struct kvm_segment var;
8713
8714 var.selector = selector;
8715 var.base = get_desc_base(desc);
8716 #ifdef CONFIG_X86_64
8717 var.base |= ((u64)base3) << 32;
8718 #endif
8719 var.limit = get_desc_limit(desc);
8720 if (desc->g)
8721 var.limit = (var.limit << 12) | 0xfff;
8722 var.type = desc->type;
8723 var.dpl = desc->dpl;
8724 var.db = desc->d;
8725 var.s = desc->s;
8726 var.l = desc->l;
8727 var.g = desc->g;
8728 var.avl = desc->avl;
8729 var.present = desc->p;
8730 var.unusable = !var.present;
8731 var.padding = 0;
8732
8733 kvm_set_segment(vcpu, &var, seg);
8734 return;
8735 }
8736
emulator_get_msr_with_filter(struct x86_emulate_ctxt * ctxt,u32 msr_index,u64 * pdata)8737 static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
8738 u32 msr_index, u64 *pdata)
8739 {
8740 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8741 int r;
8742
8743 r = kvm_emulate_msr_read(vcpu, msr_index, pdata);
8744 if (r < 0)
8745 return X86EMUL_UNHANDLEABLE;
8746
8747 if (r) {
8748 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0,
8749 complete_emulated_rdmsr, r))
8750 return X86EMUL_IO_NEEDED;
8751
8752 trace_kvm_msr_read_ex(msr_index);
8753 return X86EMUL_PROPAGATE_FAULT;
8754 }
8755
8756 trace_kvm_msr_read(msr_index, *pdata);
8757 return X86EMUL_CONTINUE;
8758 }
8759
emulator_set_msr_with_filter(struct x86_emulate_ctxt * ctxt,u32 msr_index,u64 data)8760 static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
8761 u32 msr_index, u64 data)
8762 {
8763 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8764 int r;
8765
8766 r = kvm_emulate_msr_write(vcpu, msr_index, data);
8767 if (r < 0)
8768 return X86EMUL_UNHANDLEABLE;
8769
8770 if (r) {
8771 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
8772 complete_emulated_msr_access, r))
8773 return X86EMUL_IO_NEEDED;
8774
8775 trace_kvm_msr_write_ex(msr_index, data);
8776 return X86EMUL_PROPAGATE_FAULT;
8777 }
8778
8779 trace_kvm_msr_write(msr_index, data);
8780 return X86EMUL_CONTINUE;
8781 }
8782
emulator_get_msr(struct x86_emulate_ctxt * ctxt,u32 msr_index,u64 * pdata)8783 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
8784 u32 msr_index, u64 *pdata)
8785 {
8786 /*
8787 * Treat emulator accesses to the current shadow stack pointer as host-
8788 * initiated, as they aren't true MSR accesses (SSP is a "just a reg"),
8789 * and this API is used only for implicit accesses, i.e. not RDMSR, and
8790 * so the index is fully KVM-controlled.
8791 */
8792 if (unlikely(msr_index == MSR_KVM_INTERNAL_GUEST_SSP))
8793 return kvm_msr_read(emul_to_vcpu(ctxt), msr_index, pdata);
8794
8795 return __kvm_emulate_msr_read(emul_to_vcpu(ctxt), msr_index, pdata);
8796 }
8797
emulator_check_rdpmc_early(struct x86_emulate_ctxt * ctxt,u32 pmc)8798 static int emulator_check_rdpmc_early(struct x86_emulate_ctxt *ctxt, u32 pmc)
8799 {
8800 return kvm_pmu_check_rdpmc_early(emul_to_vcpu(ctxt), pmc);
8801 }
8802
emulator_read_pmc(struct x86_emulate_ctxt * ctxt,u32 pmc,u64 * pdata)8803 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
8804 u32 pmc, u64 *pdata)
8805 {
8806 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
8807 }
8808
emulator_halt(struct x86_emulate_ctxt * ctxt)8809 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
8810 {
8811 emul_to_vcpu(ctxt)->arch.halt_request = 1;
8812 }
8813
emulator_intercept(struct x86_emulate_ctxt * ctxt,struct x86_instruction_info * info,enum x86_intercept_stage stage)8814 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
8815 struct x86_instruction_info *info,
8816 enum x86_intercept_stage stage)
8817 {
8818 return kvm_x86_call(check_intercept)(emul_to_vcpu(ctxt), info, stage,
8819 &ctxt->exception);
8820 }
8821
emulator_get_cpuid(struct x86_emulate_ctxt * ctxt,u32 * eax,u32 * ebx,u32 * ecx,u32 * edx,bool exact_only)8822 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
8823 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx,
8824 bool exact_only)
8825 {
8826 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only);
8827 }
8828
emulator_guest_has_movbe(struct x86_emulate_ctxt * ctxt)8829 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
8830 {
8831 return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE);
8832 }
8833
emulator_guest_has_fxsr(struct x86_emulate_ctxt * ctxt)8834 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
8835 {
8836 return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
8837 }
8838
emulator_guest_has_rdpid(struct x86_emulate_ctxt * ctxt)8839 static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
8840 {
8841 return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
8842 }
8843
emulator_guest_cpuid_is_intel_compatible(struct x86_emulate_ctxt * ctxt)8844 static bool emulator_guest_cpuid_is_intel_compatible(struct x86_emulate_ctxt *ctxt)
8845 {
8846 return guest_cpuid_is_intel_compatible(emul_to_vcpu(ctxt));
8847 }
8848
emulator_read_gpr(struct x86_emulate_ctxt * ctxt,unsigned reg)8849 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
8850 {
8851 return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
8852 }
8853
emulator_write_gpr(struct x86_emulate_ctxt * ctxt,unsigned reg,ulong val)8854 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
8855 {
8856 kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val);
8857 }
8858
emulator_set_nmi_mask(struct x86_emulate_ctxt * ctxt,bool masked)8859 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
8860 {
8861 kvm_x86_call(set_nmi_mask)(emul_to_vcpu(ctxt), masked);
8862 }
8863
emulator_is_smm(struct x86_emulate_ctxt * ctxt)8864 static bool emulator_is_smm(struct x86_emulate_ctxt *ctxt)
8865 {
8866 return is_smm(emul_to_vcpu(ctxt));
8867 }
8868
8869 #ifndef CONFIG_KVM_SMM
emulator_leave_smm(struct x86_emulate_ctxt * ctxt)8870 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
8871 {
8872 WARN_ON_ONCE(1);
8873 return X86EMUL_UNHANDLEABLE;
8874 }
8875 #endif
8876
emulator_triple_fault(struct x86_emulate_ctxt * ctxt)8877 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
8878 {
8879 kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
8880 }
8881
emulator_get_xcr(struct x86_emulate_ctxt * ctxt,u32 index,u64 * xcr)8882 static int emulator_get_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 *xcr)
8883 {
8884 if (index != XCR_XFEATURE_ENABLED_MASK)
8885 return 1;
8886 *xcr = emul_to_vcpu(ctxt)->arch.xcr0;
8887 return 0;
8888 }
8889
emulator_set_xcr(struct x86_emulate_ctxt * ctxt,u32 index,u64 xcr)8890 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
8891 {
8892 return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);
8893 }
8894
emulator_vm_bugged(struct x86_emulate_ctxt * ctxt)8895 static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
8896 {
8897 struct kvm *kvm = emul_to_vcpu(ctxt)->kvm;
8898
8899 if (!kvm->vm_bugged)
8900 kvm_vm_bugged(kvm);
8901 }
8902
emulator_get_untagged_addr(struct x86_emulate_ctxt * ctxt,gva_t addr,unsigned int flags)8903 static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt,
8904 gva_t addr, unsigned int flags)
8905 {
8906 if (!kvm_x86_ops.get_untagged_addr)
8907 return addr;
8908
8909 return kvm_x86_call(get_untagged_addr)(emul_to_vcpu(ctxt),
8910 addr, flags);
8911 }
8912
emulator_is_canonical_addr(struct x86_emulate_ctxt * ctxt,gva_t addr,unsigned int flags)8913 static bool emulator_is_canonical_addr(struct x86_emulate_ctxt *ctxt,
8914 gva_t addr, unsigned int flags)
8915 {
8916 return !is_noncanonical_address(addr, emul_to_vcpu(ctxt), flags);
8917 }
8918
emulator_page_address_valid(struct x86_emulate_ctxt * ctxt,gpa_t gpa)8919 static bool emulator_page_address_valid(struct x86_emulate_ctxt *ctxt, gpa_t gpa)
8920 {
8921 return page_address_valid(emul_to_vcpu(ctxt), gpa);
8922 }
8923
8924 static const struct x86_emulate_ops emulate_ops = {
8925 .vm_bugged = emulator_vm_bugged,
8926 .read_gpr = emulator_read_gpr,
8927 .write_gpr = emulator_write_gpr,
8928 .read_std = emulator_read_std,
8929 .write_std = emulator_write_std,
8930 .fetch = kvm_fetch_guest_virt,
8931 .read_emulated = emulator_read_emulated,
8932 .write_emulated = emulator_write_emulated,
8933 .cmpxchg_emulated = emulator_cmpxchg_emulated,
8934 .invlpg = emulator_invlpg,
8935 .pio_in_emulated = emulator_pio_in_emulated,
8936 .pio_out_emulated = emulator_pio_out_emulated,
8937 .get_segment = emulator_get_segment,
8938 .set_segment = emulator_set_segment,
8939 .get_cached_segment_base = emulator_get_cached_segment_base,
8940 .get_gdt = emulator_get_gdt,
8941 .get_idt = emulator_get_idt,
8942 .set_gdt = emulator_set_gdt,
8943 .set_idt = emulator_set_idt,
8944 .get_cr = emulator_get_cr,
8945 .set_cr = emulator_set_cr,
8946 .cpl = emulator_get_cpl,
8947 .get_dr = emulator_get_dr,
8948 .set_dr = emulator_set_dr,
8949 .set_msr_with_filter = emulator_set_msr_with_filter,
8950 .get_msr_with_filter = emulator_get_msr_with_filter,
8951 .get_msr = emulator_get_msr,
8952 .check_rdpmc_early = emulator_check_rdpmc_early,
8953 .read_pmc = emulator_read_pmc,
8954 .halt = emulator_halt,
8955 .wbinvd = emulator_wbinvd,
8956 .fix_hypercall = emulator_fix_hypercall,
8957 .intercept = emulator_intercept,
8958 .get_cpuid = emulator_get_cpuid,
8959 .guest_has_movbe = emulator_guest_has_movbe,
8960 .guest_has_fxsr = emulator_guest_has_fxsr,
8961 .guest_has_rdpid = emulator_guest_has_rdpid,
8962 .guest_cpuid_is_intel_compatible = emulator_guest_cpuid_is_intel_compatible,
8963 .set_nmi_mask = emulator_set_nmi_mask,
8964 .is_smm = emulator_is_smm,
8965 .leave_smm = emulator_leave_smm,
8966 .triple_fault = emulator_triple_fault,
8967 .get_xcr = emulator_get_xcr,
8968 .set_xcr = emulator_set_xcr,
8969 .get_untagged_addr = emulator_get_untagged_addr,
8970 .is_canonical_addr = emulator_is_canonical_addr,
8971 .page_address_valid = emulator_page_address_valid,
8972 };
8973
toggle_interruptibility(struct kvm_vcpu * vcpu,u32 mask)8974 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
8975 {
8976 u32 int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu);
8977 /*
8978 * an sti; sti; sequence only disable interrupts for the first
8979 * instruction. So, if the last instruction, be it emulated or
8980 * not, left the system with the INT_STI flag enabled, it
8981 * means that the last instruction is an sti. We should not
8982 * leave the flag on in this case. The same goes for mov ss
8983 */
8984 if (int_shadow & mask)
8985 mask = 0;
8986 if (unlikely(int_shadow || mask)) {
8987 kvm_x86_call(set_interrupt_shadow)(vcpu, mask);
8988 if (!mask)
8989 kvm_make_request(KVM_REQ_EVENT, vcpu);
8990 }
8991 }
8992
inject_emulated_exception(struct kvm_vcpu * vcpu)8993 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
8994 {
8995 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8996
8997 if (ctxt->exception.vector == PF_VECTOR)
8998 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception);
8999 else if (ctxt->exception.error_code_valid)
9000 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
9001 ctxt->exception.error_code);
9002 else
9003 kvm_queue_exception(vcpu, ctxt->exception.vector);
9004 }
9005
alloc_emulate_ctxt(struct kvm_vcpu * vcpu)9006 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu)
9007 {
9008 struct x86_emulate_ctxt *ctxt;
9009
9010 ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT);
9011 if (!ctxt) {
9012 pr_err("failed to allocate vcpu's emulator\n");
9013 return NULL;
9014 }
9015
9016 ctxt->vcpu = vcpu;
9017 ctxt->ops = &emulate_ops;
9018 vcpu->arch.emulate_ctxt = ctxt;
9019
9020 return ctxt;
9021 }
9022
init_emulate_ctxt(struct kvm_vcpu * vcpu)9023 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
9024 {
9025 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9026 int cs_db, cs_l;
9027
9028 kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
9029
9030 ctxt->gpa_available = false;
9031 ctxt->eflags = kvm_get_rflags(vcpu);
9032 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
9033
9034 ctxt->eip = kvm_rip_read(vcpu);
9035 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
9036 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
9037 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 :
9038 cs_db ? X86EMUL_MODE_PROT32 :
9039 X86EMUL_MODE_PROT16;
9040 ctxt->interruptibility = 0;
9041 ctxt->have_exception = false;
9042 ctxt->exception.vector = -1;
9043 ctxt->perm_ok = false;
9044
9045 init_decode_cache(ctxt);
9046 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
9047 }
9048
kvm_inject_realmode_interrupt(struct kvm_vcpu * vcpu,int irq,int inc_eip)9049 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
9050 {
9051 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9052 int ret;
9053
9054 init_emulate_ctxt(vcpu);
9055
9056 ctxt->op_bytes = 2;
9057 ctxt->ad_bytes = 2;
9058 ctxt->_eip = ctxt->eip + inc_eip;
9059 ret = emulate_int_real(ctxt, irq);
9060
9061 if (ret != X86EMUL_CONTINUE) {
9062 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
9063 } else {
9064 ctxt->eip = ctxt->_eip;
9065 kvm_rip_write(vcpu, ctxt->eip);
9066 kvm_set_rflags(vcpu, ctxt->eflags);
9067 }
9068 }
9069 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_inject_realmode_interrupt);
9070
prepare_emulation_failure_exit(struct kvm_vcpu * vcpu,u64 * data,u8 ndata,u8 * insn_bytes,u8 insn_size)9071 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
9072 u8 ndata, u8 *insn_bytes, u8 insn_size)
9073 {
9074 struct kvm_run *run = vcpu->run;
9075 u64 info[5];
9076 u8 info_start;
9077
9078 /*
9079 * Zero the whole array used to retrieve the exit info, as casting to
9080 * u32 for select entries will leave some chunks uninitialized.
9081 */
9082 memset(&info, 0, sizeof(info));
9083
9084 kvm_x86_call(get_exit_info)(vcpu, (u32 *)&info[0], &info[1], &info[2],
9085 (u32 *)&info[3], (u32 *)&info[4]);
9086
9087 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
9088 run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION;
9089
9090 /*
9091 * There's currently space for 13 entries, but 5 are used for the exit
9092 * reason and info. Restrict to 4 to reduce the maintenance burden
9093 * when expanding kvm_run.emulation_failure in the future.
9094 */
9095 if (WARN_ON_ONCE(ndata > 4))
9096 ndata = 4;
9097
9098 /* Always include the flags as a 'data' entry. */
9099 info_start = 1;
9100 run->emulation_failure.flags = 0;
9101
9102 if (insn_size) {
9103 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) +
9104 sizeof(run->emulation_failure.insn_bytes) != 16));
9105 info_start += 2;
9106 run->emulation_failure.flags |=
9107 KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES;
9108 run->emulation_failure.insn_size = insn_size;
9109 memset(run->emulation_failure.insn_bytes, 0x90,
9110 sizeof(run->emulation_failure.insn_bytes));
9111 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size);
9112 }
9113
9114 memcpy(&run->internal.data[info_start], info, sizeof(info));
9115 memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data,
9116 ndata * sizeof(data[0]));
9117
9118 run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata;
9119 }
9120
prepare_emulation_ctxt_failure_exit(struct kvm_vcpu * vcpu)9121 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu)
9122 {
9123 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9124
9125 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data,
9126 ctxt->fetch.end - ctxt->fetch.data);
9127 }
9128
__kvm_prepare_emulation_failure_exit(struct kvm_vcpu * vcpu,u64 * data,u8 ndata)9129 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
9130 u8 ndata)
9131 {
9132 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0);
9133 }
9134 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_prepare_emulation_failure_exit);
9135
kvm_prepare_emulation_failure_exit(struct kvm_vcpu * vcpu)9136 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
9137 {
9138 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0);
9139 }
9140 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_emulation_failure_exit);
9141
kvm_prepare_event_vectoring_exit(struct kvm_vcpu * vcpu,gpa_t gpa)9142 void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa)
9143 {
9144 u32 reason, intr_info, error_code;
9145 struct kvm_run *run = vcpu->run;
9146 u64 info1, info2;
9147 int ndata = 0;
9148
9149 kvm_x86_call(get_exit_info)(vcpu, &reason, &info1, &info2,
9150 &intr_info, &error_code);
9151
9152 run->internal.data[ndata++] = info2;
9153 run->internal.data[ndata++] = reason;
9154 run->internal.data[ndata++] = info1;
9155 run->internal.data[ndata++] = gpa;
9156 run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
9157
9158 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
9159 run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
9160 run->internal.ndata = ndata;
9161 }
9162 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_event_vectoring_exit);
9163
kvm_prepare_unexpected_reason_exit(struct kvm_vcpu * vcpu,u64 exit_reason)9164 void kvm_prepare_unexpected_reason_exit(struct kvm_vcpu *vcpu, u64 exit_reason)
9165 {
9166 vcpu_unimpl(vcpu, "unexpected exit reason 0x%llx\n", exit_reason);
9167
9168 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
9169 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
9170 vcpu->run->internal.ndata = 2;
9171 vcpu->run->internal.data[0] = exit_reason;
9172 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
9173 }
9174 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_unexpected_reason_exit);
9175
handle_emulation_failure(struct kvm_vcpu * vcpu,int emulation_type)9176 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
9177 {
9178 struct kvm *kvm = vcpu->kvm;
9179
9180 ++vcpu->stat.insn_emulation_fail;
9181 trace_kvm_emulate_insn_failed(vcpu);
9182
9183 if (emulation_type & EMULTYPE_VMWARE_GP) {
9184 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
9185 return 1;
9186 }
9187
9188 if (kvm->arch.exit_on_emulation_error ||
9189 (emulation_type & EMULTYPE_SKIP)) {
9190 prepare_emulation_ctxt_failure_exit(vcpu);
9191 return 0;
9192 }
9193
9194 kvm_queue_exception(vcpu, UD_VECTOR);
9195
9196 if (!is_guest_mode(vcpu) && kvm_x86_call(get_cpl)(vcpu) == 0) {
9197 prepare_emulation_ctxt_failure_exit(vcpu);
9198 return 0;
9199 }
9200
9201 return 1;
9202 }
9203
kvm_unprotect_and_retry_on_failure(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,int emulation_type)9204 static bool kvm_unprotect_and_retry_on_failure(struct kvm_vcpu *vcpu,
9205 gpa_t cr2_or_gpa,
9206 int emulation_type)
9207 {
9208 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
9209 return false;
9210
9211 /*
9212 * If the failed instruction faulted on an access to page tables that
9213 * are used to translate any part of the instruction, KVM can't resolve
9214 * the issue by unprotecting the gfn, as zapping the shadow page will
9215 * result in the instruction taking a !PRESENT page fault and thus put
9216 * the vCPU into an infinite loop of page faults. E.g. KVM will create
9217 * a SPTE and write-protect the gfn to resolve the !PRESENT fault, and
9218 * then zap the SPTE to unprotect the gfn, and then do it all over
9219 * again. Report the error to userspace.
9220 */
9221 if (emulation_type & EMULTYPE_WRITE_PF_TO_SP)
9222 return false;
9223
9224 /*
9225 * If emulation may have been triggered by a write to a shadowed page
9226 * table, unprotect the gfn (zap any relevant SPTEs) and re-enter the
9227 * guest to let the CPU re-execute the instruction in the hope that the
9228 * CPU can cleanly execute the instruction that KVM failed to emulate.
9229 */
9230 __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, true);
9231
9232 /*
9233 * Retry even if _this_ vCPU didn't unprotect the gfn, as it's possible
9234 * all SPTEs were already zapped by a different task. The alternative
9235 * is to report the error to userspace and likely terminate the guest,
9236 * and the last_retry_{eip,addr} checks will prevent retrying the page
9237 * fault indefinitely, i.e. there's nothing to lose by retrying.
9238 */
9239 return true;
9240 }
9241
9242 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
9243 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
9244
kvm_vcpu_check_hw_bp(unsigned long addr,u32 type,u32 dr7,unsigned long * db)9245 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
9246 unsigned long *db)
9247 {
9248 u32 dr6 = 0;
9249 int i;
9250 u32 enable, rwlen;
9251
9252 enable = dr7;
9253 rwlen = dr7 >> 16;
9254 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
9255 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
9256 dr6 |= (1 << i);
9257 return dr6;
9258 }
9259
kvm_vcpu_do_singlestep(struct kvm_vcpu * vcpu)9260 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
9261 {
9262 struct kvm_run *kvm_run = vcpu->run;
9263
9264 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
9265 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW;
9266 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
9267 kvm_run->debug.arch.exception = DB_VECTOR;
9268 kvm_run->exit_reason = KVM_EXIT_DEBUG;
9269 return 0;
9270 }
9271 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
9272 return 1;
9273 }
9274
kvm_skip_emulated_instruction(struct kvm_vcpu * vcpu)9275 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
9276 {
9277 unsigned long rflags = kvm_x86_call(get_rflags)(vcpu);
9278 int r;
9279
9280 r = kvm_x86_call(skip_emulated_instruction)(vcpu);
9281 if (unlikely(!r))
9282 return 0;
9283
9284 kvm_pmu_instruction_retired(vcpu);
9285
9286 /*
9287 * rflags is the old, "raw" value of the flags. The new value has
9288 * not been saved yet.
9289 *
9290 * This is correct even for TF set by the guest, because "the
9291 * processor will not generate this exception after the instruction
9292 * that sets the TF flag".
9293 */
9294 if (unlikely(rflags & X86_EFLAGS_TF))
9295 r = kvm_vcpu_do_singlestep(vcpu);
9296 return r;
9297 }
9298 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_skip_emulated_instruction);
9299
kvm_is_code_breakpoint_inhibited(struct kvm_vcpu * vcpu)9300 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu)
9301 {
9302 if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF)
9303 return true;
9304
9305 /*
9306 * Intel compatible CPUs inhibit code #DBs when MOV/POP SS blocking is
9307 * active, but AMD compatible CPUs do not.
9308 */
9309 if (!guest_cpuid_is_intel_compatible(vcpu))
9310 return false;
9311
9312 return kvm_x86_call(get_interrupt_shadow)(vcpu) & KVM_X86_SHADOW_INT_MOV_SS;
9313 }
9314
kvm_vcpu_check_code_breakpoint(struct kvm_vcpu * vcpu,int emulation_type,int * r)9315 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu,
9316 int emulation_type, int *r)
9317 {
9318 WARN_ON_ONCE(emulation_type & EMULTYPE_NO_DECODE);
9319
9320 /*
9321 * Do not check for code breakpoints if hardware has already done the
9322 * checks, as inferred from the emulation type. On NO_DECODE and SKIP,
9323 * the instruction has passed all exception checks, and all intercepted
9324 * exceptions that trigger emulation have lower priority than code
9325 * breakpoints, i.e. the fact that the intercepted exception occurred
9326 * means any code breakpoints have already been serviced.
9327 *
9328 * Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as
9329 * hardware has checked the RIP of the magic prefix, but not the RIP of
9330 * the instruction being emulated. The intent of forced emulation is
9331 * to behave as if KVM intercepted the instruction without an exception
9332 * and without a prefix.
9333 */
9334 if (emulation_type & (EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
9335 EMULTYPE_TRAP_UD | EMULTYPE_VMWARE_GP | EMULTYPE_PF))
9336 return false;
9337
9338 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
9339 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
9340 struct kvm_run *kvm_run = vcpu->run;
9341 unsigned long eip = kvm_get_linear_rip(vcpu);
9342 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
9343 vcpu->arch.guest_debug_dr7,
9344 vcpu->arch.eff_db);
9345
9346 if (dr6 != 0) {
9347 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
9348 kvm_run->debug.arch.pc = eip;
9349 kvm_run->debug.arch.exception = DB_VECTOR;
9350 kvm_run->exit_reason = KVM_EXIT_DEBUG;
9351 *r = 0;
9352 return true;
9353 }
9354 }
9355
9356 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
9357 !kvm_is_code_breakpoint_inhibited(vcpu)) {
9358 unsigned long eip = kvm_get_linear_rip(vcpu);
9359 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
9360 vcpu->arch.dr7,
9361 vcpu->arch.db);
9362
9363 if (dr6 != 0) {
9364 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
9365 *r = 1;
9366 return true;
9367 }
9368 }
9369
9370 return false;
9371 }
9372
is_vmware_backdoor_opcode(struct x86_emulate_ctxt * ctxt)9373 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
9374 {
9375 switch (ctxt->opcode_len) {
9376 case 1:
9377 switch (ctxt->b) {
9378 case 0xe4: /* IN */
9379 case 0xe5:
9380 case 0xec:
9381 case 0xed:
9382 case 0xe6: /* OUT */
9383 case 0xe7:
9384 case 0xee:
9385 case 0xef:
9386 case 0x6c: /* INS */
9387 case 0x6d:
9388 case 0x6e: /* OUTS */
9389 case 0x6f:
9390 return true;
9391 }
9392 break;
9393 case 2:
9394 switch (ctxt->b) {
9395 case 0x33: /* RDPMC */
9396 return true;
9397 }
9398 break;
9399 }
9400
9401 return false;
9402 }
9403
is_soft_int_instruction(struct x86_emulate_ctxt * ctxt,int emulation_type)9404 static bool is_soft_int_instruction(struct x86_emulate_ctxt *ctxt,
9405 int emulation_type)
9406 {
9407 u8 vector = EMULTYPE_GET_SOFT_INT_VECTOR(emulation_type);
9408
9409 switch (ctxt->b) {
9410 case 0xcc:
9411 return vector == BP_VECTOR;
9412 case 0xcd:
9413 return vector == ctxt->src.val;
9414 case 0xce:
9415 return vector == OF_VECTOR;
9416 default:
9417 return false;
9418 }
9419 }
9420
9421 /*
9422 * Decode an instruction for emulation. The caller is responsible for handling
9423 * code breakpoints. Note, manually detecting code breakpoints is unnecessary
9424 * (and wrong) when emulating on an intercepted fault-like exception[*], as
9425 * code breakpoints have higher priority and thus have already been done by
9426 * hardware.
9427 *
9428 * [*] Except #MC, which is higher priority, but KVM should never emulate in
9429 * response to a machine check.
9430 */
x86_decode_emulated_instruction(struct kvm_vcpu * vcpu,int emulation_type,void * insn,int insn_len)9431 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
9432 void *insn, int insn_len)
9433 {
9434 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9435 int r;
9436
9437 init_emulate_ctxt(vcpu);
9438
9439 r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
9440
9441 trace_kvm_emulate_insn_start(vcpu);
9442 ++vcpu->stat.insn_emulation;
9443
9444 return r;
9445 }
9446 EXPORT_SYMBOL_FOR_KVM_INTERNAL(x86_decode_emulated_instruction);
9447
x86_emulate_instruction(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,int emulation_type,void * insn,int insn_len)9448 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
9449 int emulation_type, void *insn, int insn_len)
9450 {
9451 int r;
9452 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9453 bool writeback = true;
9454
9455 if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
9456 (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
9457 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))))
9458 emulation_type &= ~EMULTYPE_ALLOW_RETRY_PF;
9459
9460 r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len);
9461 if (r != X86EMUL_CONTINUE) {
9462 if (r == X86EMUL_RETRY_INSTR || r == X86EMUL_PROPAGATE_FAULT)
9463 return 1;
9464
9465 if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
9466 emulation_type))
9467 return 1;
9468
9469 if (r == X86EMUL_UNHANDLEABLE_VECTORING) {
9470 kvm_prepare_event_vectoring_exit(vcpu, cr2_or_gpa);
9471 return 0;
9472 }
9473
9474 WARN_ON_ONCE(r != X86EMUL_UNHANDLEABLE);
9475 return handle_emulation_failure(vcpu, emulation_type);
9476 }
9477
9478 kvm_request_l1tf_flush_l1d();
9479
9480 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
9481 kvm_clear_exception_queue(vcpu);
9482
9483 /*
9484 * Return immediately if RIP hits a code breakpoint, such #DBs
9485 * are fault-like and are higher priority than any faults on
9486 * the code fetch itself.
9487 */
9488 if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r))
9489 return r;
9490
9491 r = x86_decode_emulated_instruction(vcpu, emulation_type,
9492 insn, insn_len);
9493 if (r != EMULATION_OK) {
9494 if ((emulation_type & EMULTYPE_TRAP_UD) ||
9495 (emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
9496 kvm_queue_exception(vcpu, UD_VECTOR);
9497 return 1;
9498 }
9499 if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
9500 emulation_type))
9501 return 1;
9502
9503 if (ctxt->have_exception &&
9504 !(emulation_type & EMULTYPE_SKIP)) {
9505 /*
9506 * #UD should result in just EMULATION_FAILED, and trap-like
9507 * exception should not be encountered during decode.
9508 */
9509 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
9510 exception_type(ctxt->exception.vector) == EXCPT_TRAP);
9511 inject_emulated_exception(vcpu);
9512 return 1;
9513 }
9514 return handle_emulation_failure(vcpu, emulation_type);
9515 }
9516 }
9517
9518 if ((emulation_type & EMULTYPE_VMWARE_GP) &&
9519 !is_vmware_backdoor_opcode(ctxt)) {
9520 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
9521 return 1;
9522 }
9523
9524 /*
9525 * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for
9526 * use *only* by vendor callbacks for kvm_skip_emulated_instruction().
9527 * The caller is responsible for updating interruptibility state and
9528 * injecting single-step #DBs.
9529 */
9530 if (emulation_type & EMULTYPE_SKIP) {
9531 if (emulation_type & EMULTYPE_SKIP_SOFT_INT &&
9532 !is_soft_int_instruction(ctxt, emulation_type))
9533 return 0;
9534
9535 if (ctxt->mode != X86EMUL_MODE_PROT64)
9536 ctxt->eip = (u32)ctxt->_eip;
9537 else
9538 ctxt->eip = ctxt->_eip;
9539
9540 if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) {
9541 r = 1;
9542 goto writeback;
9543 }
9544
9545 kvm_rip_write(vcpu, ctxt->eip);
9546 if (ctxt->eflags & X86_EFLAGS_RF)
9547 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
9548 return 1;
9549 }
9550
9551 /*
9552 * If emulation was caused by a write-protection #PF on a non-page_table
9553 * writing instruction, try to unprotect the gfn, i.e. zap shadow pages,
9554 * and retry the instruction, as the vCPU is likely no longer using the
9555 * gfn as a page table.
9556 */
9557 if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
9558 !x86_page_table_writing_insn(ctxt) &&
9559 kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
9560 return 1;
9561
9562 /* this is needed for vmware backdoor interface to work since it
9563 changes registers values during IO operation */
9564 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
9565 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
9566 emulator_invalidate_register_cache(ctxt);
9567 }
9568
9569 restart:
9570 if (emulation_type & EMULTYPE_PF) {
9571 /* Save the faulting GPA (cr2) in the address field */
9572 ctxt->exception.address = cr2_or_gpa;
9573
9574 /* With shadow page tables, cr2 contains a GVA or nGPA. */
9575 if (vcpu->arch.mmu->root_role.direct) {
9576 ctxt->gpa_available = true;
9577 ctxt->gpa_val = cr2_or_gpa;
9578 }
9579 } else {
9580 /* Sanitize the address out of an abundance of paranoia. */
9581 ctxt->exception.address = 0;
9582 }
9583
9584 /*
9585 * Check L1's instruction intercepts when emulating instructions for
9586 * L2, unless KVM is re-emulating a previously decoded instruction,
9587 * e.g. to complete userspace I/O, in which case KVM has already
9588 * checked the intercepts.
9589 */
9590 r = x86_emulate_insn(ctxt, is_guest_mode(vcpu) &&
9591 !(emulation_type & EMULTYPE_NO_DECODE));
9592
9593 if (r == EMULATION_INTERCEPTED)
9594 return 1;
9595
9596 if (r == EMULATION_FAILED) {
9597 if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
9598 emulation_type))
9599 return 1;
9600
9601 return handle_emulation_failure(vcpu, emulation_type);
9602 }
9603
9604 if (ctxt->have_exception) {
9605 WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write);
9606 vcpu->mmio_needed = false;
9607 r = 1;
9608 inject_emulated_exception(vcpu);
9609 } else if (vcpu->arch.pio.count) {
9610 if (!vcpu->arch.pio.in) {
9611 /* FIXME: return into emulator if single-stepping. */
9612 vcpu->arch.pio.count = 0;
9613 } else {
9614 writeback = false;
9615 vcpu->arch.complete_userspace_io = complete_emulated_pio;
9616 }
9617 r = 0;
9618 } else if (vcpu->mmio_needed) {
9619 ++vcpu->stat.mmio_exits;
9620
9621 if (!vcpu->mmio_is_write)
9622 writeback = false;
9623 r = 0;
9624 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
9625 } else if (vcpu->arch.complete_userspace_io) {
9626 writeback = false;
9627 r = 0;
9628 } else if (r == EMULATION_RESTART)
9629 goto restart;
9630 else
9631 r = 1;
9632
9633 writeback:
9634 if (writeback) {
9635 unsigned long rflags = kvm_x86_call(get_rflags)(vcpu);
9636 toggle_interruptibility(vcpu, ctxt->interruptibility);
9637 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
9638
9639 /*
9640 * Note, EXCPT_DB is assumed to be fault-like as the emulator
9641 * only supports code breakpoints and general detect #DB, both
9642 * of which are fault-like.
9643 */
9644 if (!ctxt->have_exception ||
9645 exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
9646 kvm_pmu_instruction_retired(vcpu);
9647 if (ctxt->is_branch)
9648 kvm_pmu_branch_retired(vcpu);
9649 kvm_rip_write(vcpu, ctxt->eip);
9650 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
9651 r = kvm_vcpu_do_singlestep(vcpu);
9652 kvm_x86_call(update_emulated_instruction)(vcpu);
9653 __kvm_set_rflags(vcpu, ctxt->eflags);
9654 }
9655
9656 /*
9657 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
9658 * do nothing, and it will be requested again as soon as
9659 * the shadow expires. But we still need to check here,
9660 * because POPF has no interrupt shadow.
9661 */
9662 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
9663 kvm_make_request(KVM_REQ_EVENT, vcpu);
9664 } else
9665 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
9666
9667 return r;
9668 }
9669
kvm_emulate_instruction(struct kvm_vcpu * vcpu,int emulation_type)9670 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
9671 {
9672 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
9673 }
9674 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_instruction);
9675
kvm_emulate_instruction_from_buffer(struct kvm_vcpu * vcpu,void * insn,int insn_len)9676 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
9677 void *insn, int insn_len)
9678 {
9679 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
9680 }
9681 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_instruction_from_buffer);
9682
complete_fast_pio_out_port_0x7e(struct kvm_vcpu * vcpu)9683 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
9684 {
9685 vcpu->arch.pio.count = 0;
9686 return 1;
9687 }
9688
complete_fast_pio_out(struct kvm_vcpu * vcpu)9689 static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
9690 {
9691 vcpu->arch.pio.count = 0;
9692
9693 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip)))
9694 return 1;
9695
9696 return kvm_skip_emulated_instruction(vcpu);
9697 }
9698
kvm_fast_pio_out(struct kvm_vcpu * vcpu,int size,unsigned short port)9699 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
9700 unsigned short port)
9701 {
9702 unsigned long val = kvm_rax_read(vcpu);
9703 int ret = emulator_pio_out(vcpu, size, port, &val, 1);
9704
9705 if (ret)
9706 return ret;
9707
9708 /*
9709 * Workaround userspace that relies on old KVM behavior of %rip being
9710 * incremented prior to exiting to userspace to handle "OUT 0x7e".
9711 */
9712 if (port == 0x7e &&
9713 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
9714 vcpu->arch.complete_userspace_io =
9715 complete_fast_pio_out_port_0x7e;
9716 kvm_skip_emulated_instruction(vcpu);
9717 } else {
9718 vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
9719 vcpu->arch.complete_userspace_io = complete_fast_pio_out;
9720 }
9721 return 0;
9722 }
9723
complete_fast_pio_in(struct kvm_vcpu * vcpu)9724 static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
9725 {
9726 unsigned long val;
9727
9728 /* We should only ever be called with arch.pio.count equal to 1 */
9729 if (KVM_BUG_ON(vcpu->arch.pio.count != 1, vcpu->kvm))
9730 return -EIO;
9731
9732 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip))) {
9733 vcpu->arch.pio.count = 0;
9734 return 1;
9735 }
9736
9737 /* For size less than 4 we merge, else we zero extend */
9738 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
9739
9740 complete_emulator_pio_in(vcpu, &val);
9741 kvm_rax_write(vcpu, val);
9742
9743 return kvm_skip_emulated_instruction(vcpu);
9744 }
9745
kvm_fast_pio_in(struct kvm_vcpu * vcpu,int size,unsigned short port)9746 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
9747 unsigned short port)
9748 {
9749 unsigned long val;
9750 int ret;
9751
9752 /* For size less than 4 we merge, else we zero extend */
9753 val = (size < 4) ? kvm_rax_read(vcpu) : 0;
9754
9755 ret = emulator_pio_in(vcpu, size, port, &val, 1);
9756 if (ret) {
9757 kvm_rax_write(vcpu, val);
9758 return ret;
9759 }
9760
9761 vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
9762 vcpu->arch.complete_userspace_io = complete_fast_pio_in;
9763
9764 return 0;
9765 }
9766
kvm_fast_pio(struct kvm_vcpu * vcpu,int size,unsigned short port,int in)9767 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
9768 {
9769 int ret;
9770
9771 if (in)
9772 ret = kvm_fast_pio_in(vcpu, size, port);
9773 else
9774 ret = kvm_fast_pio_out(vcpu, size, port);
9775 return ret && kvm_skip_emulated_instruction(vcpu);
9776 }
9777 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_fast_pio);
9778
kvmclock_cpu_down_prep(unsigned int cpu)9779 static int kvmclock_cpu_down_prep(unsigned int cpu)
9780 {
9781 __this_cpu_write(cpu_tsc_khz, 0);
9782 return 0;
9783 }
9784
tsc_khz_changed(void * data)9785 static void tsc_khz_changed(void *data)
9786 {
9787 struct cpufreq_freqs *freq = data;
9788 unsigned long khz;
9789
9790 WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_CONSTANT_TSC));
9791
9792 if (data)
9793 khz = freq->new;
9794 else
9795 khz = cpufreq_quick_get(raw_smp_processor_id());
9796 if (!khz)
9797 khz = tsc_khz;
9798 __this_cpu_write(cpu_tsc_khz, khz);
9799 }
9800
9801 #ifdef CONFIG_X86_64
kvm_hyperv_tsc_notifier(void)9802 static void kvm_hyperv_tsc_notifier(void)
9803 {
9804 struct kvm *kvm;
9805 int cpu;
9806
9807 mutex_lock(&kvm_lock);
9808 list_for_each_entry(kvm, &vm_list, vm_list)
9809 kvm_make_mclock_inprogress_request(kvm);
9810
9811 /* no guest entries from this point */
9812 hyperv_stop_tsc_emulation();
9813
9814 /* TSC frequency always matches when on Hyper-V */
9815 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9816 for_each_present_cpu(cpu)
9817 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
9818 }
9819 kvm_caps.max_guest_tsc_khz = tsc_khz;
9820
9821 list_for_each_entry(kvm, &vm_list, vm_list) {
9822 __kvm_start_pvclock_update(kvm);
9823 pvclock_update_vm_gtod_copy(kvm);
9824 kvm_end_pvclock_update(kvm);
9825 }
9826
9827 mutex_unlock(&kvm_lock);
9828 }
9829 #endif
9830
__kvmclock_cpufreq_notifier(struct cpufreq_freqs * freq,int cpu)9831 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
9832 {
9833 struct kvm *kvm;
9834 struct kvm_vcpu *vcpu;
9835 int send_ipi = 0;
9836 unsigned long i;
9837
9838 /*
9839 * We allow guests to temporarily run on slowing clocks,
9840 * provided we notify them after, or to run on accelerating
9841 * clocks, provided we notify them before. Thus time never
9842 * goes backwards.
9843 *
9844 * However, we have a problem. We can't atomically update
9845 * the frequency of a given CPU from this function; it is
9846 * merely a notifier, which can be called from any CPU.
9847 * Changing the TSC frequency at arbitrary points in time
9848 * requires a recomputation of local variables related to
9849 * the TSC for each VCPU. We must flag these local variables
9850 * to be updated and be sure the update takes place with the
9851 * new frequency before any guests proceed.
9852 *
9853 * Unfortunately, the combination of hotplug CPU and frequency
9854 * change creates an intractable locking scenario; the order
9855 * of when these callouts happen is undefined with respect to
9856 * CPU hotplug, and they can race with each other. As such,
9857 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
9858 * undefined; you can actually have a CPU frequency change take
9859 * place in between the computation of X and the setting of the
9860 * variable. To protect against this problem, all updates of
9861 * the per_cpu tsc_khz variable are done in an interrupt
9862 * protected IPI, and all callers wishing to update the value
9863 * must wait for a synchronous IPI to complete (which is trivial
9864 * if the caller is on the CPU already). This establishes the
9865 * necessary total order on variable updates.
9866 *
9867 * Note that because a guest time update may take place
9868 * anytime after the setting of the VCPU's request bit, the
9869 * correct TSC value must be set before the request. However,
9870 * to ensure the update actually makes it to any guest which
9871 * starts running in hardware virtualization between the set
9872 * and the acquisition of the spinlock, we must also ping the
9873 * CPU after setting the request bit.
9874 *
9875 */
9876
9877 smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
9878
9879 mutex_lock(&kvm_lock);
9880 list_for_each_entry(kvm, &vm_list, vm_list) {
9881 kvm_for_each_vcpu(i, vcpu, kvm) {
9882 if (vcpu->cpu != cpu)
9883 continue;
9884 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
9885 if (vcpu->cpu != raw_smp_processor_id())
9886 send_ipi = 1;
9887 }
9888 }
9889 mutex_unlock(&kvm_lock);
9890
9891 if (freq->old < freq->new && send_ipi) {
9892 /*
9893 * We upscale the frequency. Must make the guest
9894 * doesn't see old kvmclock values while running with
9895 * the new frequency, otherwise we risk the guest sees
9896 * time go backwards.
9897 *
9898 * In case we update the frequency for another cpu
9899 * (which might be in guest context) send an interrupt
9900 * to kick the cpu out of guest context. Next time
9901 * guest context is entered kvmclock will be updated,
9902 * so the guest will not see stale values.
9903 */
9904 smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
9905 }
9906 }
9907
kvmclock_cpufreq_notifier(struct notifier_block * nb,unsigned long val,void * data)9908 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
9909 void *data)
9910 {
9911 struct cpufreq_freqs *freq = data;
9912 int cpu;
9913
9914 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
9915 return 0;
9916 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
9917 return 0;
9918
9919 for_each_cpu(cpu, freq->policy->cpus)
9920 __kvmclock_cpufreq_notifier(freq, cpu);
9921
9922 return 0;
9923 }
9924
9925 static struct notifier_block kvmclock_cpufreq_notifier_block = {
9926 .notifier_call = kvmclock_cpufreq_notifier
9927 };
9928
kvmclock_cpu_online(unsigned int cpu)9929 static int kvmclock_cpu_online(unsigned int cpu)
9930 {
9931 tsc_khz_changed(NULL);
9932 return 0;
9933 }
9934
kvm_timer_init(void)9935 static void kvm_timer_init(void)
9936 {
9937 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9938 max_tsc_khz = tsc_khz;
9939
9940 if (IS_ENABLED(CONFIG_CPU_FREQ)) {
9941 struct cpufreq_policy *policy;
9942 int cpu;
9943
9944 cpu = get_cpu();
9945 policy = cpufreq_cpu_get(cpu);
9946 if (policy) {
9947 if (policy->cpuinfo.max_freq)
9948 max_tsc_khz = policy->cpuinfo.max_freq;
9949 cpufreq_cpu_put(policy);
9950 }
9951 put_cpu();
9952 }
9953 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
9954 CPUFREQ_TRANSITION_NOTIFIER);
9955
9956 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
9957 kvmclock_cpu_online, kvmclock_cpu_down_prep);
9958 }
9959 }
9960
9961 #ifdef CONFIG_X86_64
pvclock_gtod_update_fn(struct work_struct * work)9962 static void pvclock_gtod_update_fn(struct work_struct *work)
9963 {
9964 struct kvm *kvm;
9965 struct kvm_vcpu *vcpu;
9966 unsigned long i;
9967
9968 mutex_lock(&kvm_lock);
9969 list_for_each_entry(kvm, &vm_list, vm_list)
9970 kvm_for_each_vcpu(i, vcpu, kvm)
9971 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
9972 atomic_set(&kvm_guest_has_master_clock, 0);
9973 mutex_unlock(&kvm_lock);
9974 }
9975
9976 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
9977
9978 /*
9979 * Indirection to move queue_work() out of the tk_core.seq write held
9980 * region to prevent possible deadlocks against time accessors which
9981 * are invoked with work related locks held.
9982 */
pvclock_irq_work_fn(struct irq_work * w)9983 static void pvclock_irq_work_fn(struct irq_work *w)
9984 {
9985 queue_work(system_long_wq, &pvclock_gtod_work);
9986 }
9987
9988 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
9989
9990 /*
9991 * Notification about pvclock gtod data update.
9992 */
pvclock_gtod_notify(struct notifier_block * nb,unsigned long unused,void * priv)9993 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
9994 void *priv)
9995 {
9996 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
9997 struct timekeeper *tk = priv;
9998
9999 update_pvclock_gtod(tk);
10000
10001 /*
10002 * Disable master clock if host does not trust, or does not use,
10003 * TSC based clocksource. Delegate queue_work() to irq_work as
10004 * this is invoked with tk_core.seq write held.
10005 */
10006 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
10007 atomic_read(&kvm_guest_has_master_clock) != 0)
10008 irq_work_queue(&pvclock_irq_work);
10009 return 0;
10010 }
10011
10012 static struct notifier_block pvclock_gtod_notifier = {
10013 .notifier_call = pvclock_gtod_notify,
10014 };
10015 #endif
10016
kvm_setup_xss_caps(void)10017 void kvm_setup_xss_caps(void)
10018 {
10019 if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
10020 kvm_caps.supported_xss = 0;
10021
10022 if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&
10023 !kvm_cpu_cap_has(X86_FEATURE_IBT))
10024 kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;
10025
10026 if ((kvm_caps.supported_xss & XFEATURE_MASK_CET_ALL) != XFEATURE_MASK_CET_ALL) {
10027 kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
10028 kvm_cpu_cap_clear(X86_FEATURE_IBT);
10029 kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;
10030 }
10031 }
10032 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_setup_xss_caps);
10033
kvm_setup_efer_caps(void)10034 static void kvm_setup_efer_caps(void)
10035 {
10036 if (kvm_cpu_cap_has(X86_FEATURE_NX))
10037 kvm_enable_efer_bits(EFER_NX);
10038
10039 if (kvm_cpu_cap_has(X86_FEATURE_FXSR_OPT))
10040 kvm_enable_efer_bits(EFER_FFXSR);
10041
10042 if (kvm_cpu_cap_has(X86_FEATURE_AUTOIBRS))
10043 kvm_enable_efer_bits(EFER_AUTOIBRS);
10044 }
10045
kvm_ops_update(struct kvm_x86_init_ops * ops)10046 static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
10047 {
10048 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
10049
10050 #define __KVM_X86_OP(func) \
10051 static_call_update(kvm_x86_##func, kvm_x86_ops.func);
10052 #define KVM_X86_OP(func) \
10053 WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func)
10054 #define KVM_X86_OP_OPTIONAL __KVM_X86_OP
10055 #define KVM_X86_OP_OPTIONAL_RET0(func) \
10056 static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \
10057 (void *)__static_call_return0);
10058 #include <asm/kvm-x86-ops.h>
10059 #undef __KVM_X86_OP
10060
10061 kvm_pmu_ops_update(ops->pmu_ops);
10062 }
10063
kvm_x86_check_processor_compatibility(void)10064 static int kvm_x86_check_processor_compatibility(void)
10065 {
10066 int cpu = smp_processor_id();
10067 struct cpuinfo_x86 *c = &cpu_data(cpu);
10068
10069 /*
10070 * Compatibility checks are done when loading KVM and when enabling
10071 * hardware, e.g. during CPU hotplug, to ensure all online CPUs are
10072 * compatible, i.e. KVM should never perform a compatibility check on
10073 * an offline CPU.
10074 */
10075 WARN_ON(!cpu_online(cpu));
10076
10077 if (__cr4_reserved_bits(cpu_has, c) !=
10078 __cr4_reserved_bits(cpu_has, &boot_cpu_data))
10079 return -EIO;
10080
10081 return kvm_x86_call(check_processor_compatibility)();
10082 }
10083
kvm_x86_check_cpu_compat(void * ret)10084 static void kvm_x86_check_cpu_compat(void *ret)
10085 {
10086 *(int *)ret = kvm_x86_check_processor_compatibility();
10087 }
10088
kvm_x86_vendor_init(struct kvm_x86_init_ops * ops)10089 int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
10090 {
10091 u64 host_pat;
10092 int r, cpu;
10093
10094 guard(mutex)(&vendor_module_lock);
10095
10096 if (kvm_x86_ops.enable_virtualization_cpu) {
10097 pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
10098 return -EEXIST;
10099 }
10100
10101 /*
10102 * KVM explicitly assumes that the guest has an FPU and
10103 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the
10104 * vCPU's FPU state as a fxregs_state struct.
10105 */
10106 if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) {
10107 pr_err("inadequate fpu\n");
10108 return -EOPNOTSUPP;
10109 }
10110
10111 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
10112 pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n");
10113 return -EOPNOTSUPP;
10114 }
10115
10116 /*
10117 * KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes
10118 * the PAT bits in SPTEs. Bail if PAT[0] is programmed to something
10119 * other than WB. Note, EPT doesn't utilize the PAT, but don't bother
10120 * with an exception. PAT[0] is set to WB on RESET and also by the
10121 * kernel, i.e. failure indicates a kernel bug or broken firmware.
10122 */
10123 if (rdmsrq_safe(MSR_IA32_CR_PAT, &host_pat) ||
10124 (host_pat & GENMASK(2, 0)) != 6) {
10125 pr_err("host PAT[0] is not WB\n");
10126 return -EIO;
10127 }
10128
10129 if (boot_cpu_has(X86_FEATURE_SHSTK) || boot_cpu_has(X86_FEATURE_IBT)) {
10130 rdmsrq(MSR_IA32_S_CET, kvm_host.s_cet);
10131 /*
10132 * Linux doesn't yet support supervisor shadow stacks (SSS), so
10133 * KVM doesn't save/restore the associated MSRs, i.e. KVM may
10134 * clobber the host values. Yell and refuse to load if SSS is
10135 * unexpectedly enabled, e.g. to avoid crashing the host.
10136 */
10137 if (WARN_ON_ONCE(kvm_host.s_cet & CET_SHSTK_EN))
10138 return -EIO;
10139 }
10140
10141 memset(&kvm_caps, 0, sizeof(kvm_caps));
10142
10143 x86_emulator_cache = kvm_alloc_emulator_cache();
10144 if (!x86_emulator_cache) {
10145 pr_err("failed to allocate cache for x86 emulator\n");
10146 return -ENOMEM;
10147 }
10148
10149 r = kvm_mmu_vendor_module_init();
10150 if (r)
10151 goto out_free_x86_emulator_cache;
10152
10153 kvm_caps.supported_vm_types = BIT(KVM_X86_DEFAULT_VM);
10154 kvm_caps.supported_mce_cap = MCG_CTL_P | MCG_SER_P;
10155
10156 if (boot_cpu_has(X86_FEATURE_XSAVE)) {
10157 kvm_host.xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
10158 kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0;
10159 }
10160
10161 if (boot_cpu_has(X86_FEATURE_XSAVES)) {
10162 rdmsrq(MSR_IA32_XSS, kvm_host.xss);
10163 kvm_caps.supported_xss = kvm_host.xss & KVM_SUPPORTED_XSS;
10164 }
10165
10166 kvm_caps.supported_quirks = KVM_X86_VALID_QUIRKS;
10167 kvm_caps.inapplicable_quirks = KVM_X86_CONDITIONAL_QUIRKS;
10168
10169 rdmsrq_safe(MSR_EFER, &kvm_host.efer);
10170
10171 kvm_init_pmu_capability(ops->pmu_ops);
10172
10173 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
10174 rdmsrq(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities);
10175
10176 WARN_ON_ONCE(kvm_nr_uret_msrs);
10177
10178 r = ops->hardware_setup();
10179 if (r != 0)
10180 goto out_mmu_exit;
10181
10182 kvm_setup_efer_caps();
10183
10184 enable_device_posted_irqs &= enable_apicv &&
10185 irq_remapping_cap(IRQ_POSTING_CAP);
10186
10187 kvm_ops_update(ops);
10188
10189 for_each_online_cpu(cpu) {
10190 smp_call_function_single(cpu, kvm_x86_check_cpu_compat, &r, 1);
10191 if (r < 0)
10192 goto out_unwind_ops;
10193 }
10194
10195 /*
10196 * Point of no return! DO NOT add error paths below this point unless
10197 * absolutely necessary, as most operations from this point forward
10198 * require unwinding.
10199 */
10200 kvm_timer_init();
10201
10202 if (pi_inject_timer == -1)
10203 pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER);
10204 #ifdef CONFIG_X86_64
10205 pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
10206
10207 if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
10208 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier);
10209 #endif
10210
10211 __kvm_register_perf_callbacks(ops->handle_intel_pt_intr,
10212 enable_mediated_pmu ? kvm_handle_guest_mediated_pmi : NULL);
10213
10214 if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_mmu_enabled)
10215 kvm_caps.supported_vm_types |= BIT(KVM_X86_SW_PROTECTED_VM);
10216
10217 /* KVM always ignores guest PAT for shadow paging. */
10218 if (!tdp_enabled)
10219 kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
10220
10221 if (kvm_caps.has_tsc_control) {
10222 /*
10223 * Make sure the user can only configure tsc_khz values that
10224 * fit into a signed integer.
10225 * A min value is not calculated because it will always
10226 * be 1 on all machines.
10227 */
10228 u64 max = min(0x7fffffffULL,
10229 __scale_tsc(kvm_caps.max_tsc_scaling_ratio, tsc_khz));
10230 kvm_caps.max_guest_tsc_khz = max;
10231 }
10232 kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits;
10233 kvm_init_msr_lists();
10234 return 0;
10235
10236 out_unwind_ops:
10237 kvm_x86_ops.enable_virtualization_cpu = NULL;
10238 kvm_x86_call(hardware_unsetup)();
10239 out_mmu_exit:
10240 kvm_destroy_user_return_msrs();
10241 kvm_mmu_vendor_module_exit();
10242 out_free_x86_emulator_cache:
10243 kmem_cache_destroy(x86_emulator_cache);
10244 return r;
10245 }
10246 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_x86_vendor_init);
10247
kvm_x86_vendor_exit(void)10248 void kvm_x86_vendor_exit(void)
10249 {
10250 kvm_unregister_perf_callbacks();
10251
10252 #ifdef CONFIG_X86_64
10253 if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
10254 clear_hv_tscchange_cb();
10255 #endif
10256 kvm_lapic_exit();
10257
10258 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
10259 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
10260 CPUFREQ_TRANSITION_NOTIFIER);
10261 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
10262 }
10263 #ifdef CONFIG_X86_64
10264 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
10265 irq_work_sync(&pvclock_irq_work);
10266 cancel_work_sync(&pvclock_gtod_work);
10267 #endif
10268 kvm_x86_call(hardware_unsetup)();
10269 kvm_destroy_user_return_msrs();
10270 kvm_mmu_vendor_module_exit();
10271 kmem_cache_destroy(x86_emulator_cache);
10272 #ifdef CONFIG_KVM_XEN
10273 static_key_deferred_flush(&kvm_xen_enabled);
10274 WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
10275 #endif
10276 mutex_lock(&vendor_module_lock);
10277 kvm_x86_ops.enable_virtualization_cpu = NULL;
10278 mutex_unlock(&vendor_module_lock);
10279 }
10280 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_x86_vendor_exit);
10281
10282 #ifdef CONFIG_X86_64
kvm_pv_clock_pairing(struct kvm_vcpu * vcpu,gpa_t paddr,unsigned long clock_type)10283 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
10284 unsigned long clock_type)
10285 {
10286 struct kvm_clock_pairing clock_pairing;
10287 struct timespec64 ts;
10288 u64 cycle;
10289 int ret;
10290
10291 if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
10292 return -KVM_EOPNOTSUPP;
10293
10294 /*
10295 * When tsc is in permanent catchup mode guests won't be able to use
10296 * pvclock_read_retry loop to get consistent view of pvclock
10297 */
10298 if (vcpu->arch.tsc_always_catchup)
10299 return -KVM_EOPNOTSUPP;
10300
10301 if (!kvm_get_walltime_and_clockread(&ts, &cycle))
10302 return -KVM_EOPNOTSUPP;
10303
10304 clock_pairing.sec = ts.tv_sec;
10305 clock_pairing.nsec = ts.tv_nsec;
10306 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
10307 clock_pairing.flags = 0;
10308 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
10309
10310 ret = 0;
10311 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
10312 sizeof(struct kvm_clock_pairing)))
10313 ret = -KVM_EFAULT;
10314
10315 return ret;
10316 }
10317 #endif
10318
10319 /*
10320 * kvm_pv_kick_cpu_op: Kick a vcpu.
10321 *
10322 * @apicid - apicid of vcpu to be kicked.
10323 */
kvm_pv_kick_cpu_op(struct kvm * kvm,int apicid)10324 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid)
10325 {
10326 /*
10327 * All other fields are unused for APIC_DM_REMRD, but may be consumed by
10328 * common code, e.g. for tracing. Defer initialization to the compiler.
10329 */
10330 struct kvm_lapic_irq lapic_irq = {
10331 .delivery_mode = APIC_DM_REMRD,
10332 .dest_mode = APIC_DEST_PHYSICAL,
10333 .shorthand = APIC_DEST_NOSHORT,
10334 .dest_id = apicid,
10335 };
10336
10337 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq);
10338 }
10339
kvm_apicv_activated(struct kvm * kvm)10340 bool kvm_apicv_activated(struct kvm *kvm)
10341 {
10342 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
10343 }
10344 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apicv_activated);
10345
kvm_vcpu_apicv_activated(struct kvm_vcpu * vcpu)10346 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
10347 {
10348 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons);
10349 ulong vcpu_reasons =
10350 kvm_x86_call(vcpu_get_apicv_inhibit_reasons)(vcpu);
10351
10352 return (vm_reasons | vcpu_reasons) == 0;
10353 }
10354 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_apicv_activated);
10355
set_or_clear_apicv_inhibit(unsigned long * inhibits,enum kvm_apicv_inhibit reason,bool set)10356 static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
10357 enum kvm_apicv_inhibit reason, bool set)
10358 {
10359 const struct trace_print_flags apicv_inhibits[] = { APICV_INHIBIT_REASONS };
10360
10361 BUILD_BUG_ON(ARRAY_SIZE(apicv_inhibits) != NR_APICV_INHIBIT_REASONS);
10362
10363 if (set)
10364 __set_bit(reason, inhibits);
10365 else
10366 __clear_bit(reason, inhibits);
10367
10368 trace_kvm_apicv_inhibit_changed(reason, set, *inhibits);
10369 }
10370
kvm_apicv_init(struct kvm * kvm)10371 static void kvm_apicv_init(struct kvm *kvm)
10372 {
10373 enum kvm_apicv_inhibit reason = enable_apicv ? APICV_INHIBIT_REASON_ABSENT :
10374 APICV_INHIBIT_REASON_DISABLED;
10375
10376 set_or_clear_apicv_inhibit(&kvm->arch.apicv_inhibit_reasons, reason, true);
10377
10378 init_rwsem(&kvm->arch.apicv_update_lock);
10379 }
10380
kvm_sched_yield(struct kvm_vcpu * vcpu,unsigned long dest_id)10381 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
10382 {
10383 struct kvm_vcpu *target = NULL;
10384 struct kvm_apic_map *map;
10385
10386 vcpu->stat.directed_yield_attempted++;
10387
10388 if (single_task_running())
10389 goto no_yield;
10390
10391 rcu_read_lock();
10392 map = rcu_dereference(vcpu->kvm->arch.apic_map);
10393
10394 if (likely(map) && dest_id <= map->max_apic_id) {
10395 dest_id = array_index_nospec(dest_id, map->max_apic_id + 1);
10396 if (map->phys_map[dest_id])
10397 target = map->phys_map[dest_id]->vcpu;
10398 }
10399
10400 rcu_read_unlock();
10401
10402 if (!target || !READ_ONCE(target->ready))
10403 goto no_yield;
10404
10405 /* Ignore requests to yield to self */
10406 if (vcpu == target)
10407 goto no_yield;
10408
10409 if (kvm_vcpu_yield_to(target) <= 0)
10410 goto no_yield;
10411
10412 vcpu->stat.directed_yield_successful++;
10413
10414 no_yield:
10415 return;
10416 }
10417
complete_hypercall_exit(struct kvm_vcpu * vcpu)10418 static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
10419 {
10420 u64 ret = vcpu->run->hypercall.ret;
10421
10422 if (!is_64_bit_hypercall(vcpu))
10423 ret = (u32)ret;
10424 kvm_rax_write(vcpu, ret);
10425 return kvm_skip_emulated_instruction(vcpu);
10426 }
10427
____kvm_emulate_hypercall(struct kvm_vcpu * vcpu,int cpl,int (* complete_hypercall)(struct kvm_vcpu *))10428 int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl,
10429 int (*complete_hypercall)(struct kvm_vcpu *))
10430 {
10431 unsigned long ret;
10432 unsigned long nr = kvm_rax_read(vcpu);
10433 unsigned long a0 = kvm_rbx_read(vcpu);
10434 unsigned long a1 = kvm_rcx_read(vcpu);
10435 unsigned long a2 = kvm_rdx_read(vcpu);
10436 unsigned long a3 = kvm_rsi_read(vcpu);
10437 int op_64_bit = is_64_bit_hypercall(vcpu);
10438
10439 ++vcpu->stat.hypercalls;
10440
10441 trace_kvm_hypercall(nr, a0, a1, a2, a3);
10442
10443 if (!op_64_bit) {
10444 nr &= 0xFFFFFFFF;
10445 a0 &= 0xFFFFFFFF;
10446 a1 &= 0xFFFFFFFF;
10447 a2 &= 0xFFFFFFFF;
10448 a3 &= 0xFFFFFFFF;
10449 }
10450
10451 if (cpl) {
10452 ret = -KVM_EPERM;
10453 goto out;
10454 }
10455
10456 ret = -KVM_ENOSYS;
10457
10458 switch (nr) {
10459 case KVM_HC_VAPIC_POLL_IRQ:
10460 ret = 0;
10461 break;
10462 case KVM_HC_KICK_CPU:
10463 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT))
10464 break;
10465
10466 kvm_pv_kick_cpu_op(vcpu->kvm, a1);
10467 kvm_sched_yield(vcpu, a1);
10468 ret = 0;
10469 break;
10470 #ifdef CONFIG_X86_64
10471 case KVM_HC_CLOCK_PAIRING:
10472 ret = kvm_pv_clock_pairing(vcpu, a0, a1);
10473 break;
10474 #endif
10475 case KVM_HC_SEND_IPI:
10476 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI))
10477 break;
10478
10479 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
10480 break;
10481 case KVM_HC_SCHED_YIELD:
10482 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD))
10483 break;
10484
10485 kvm_sched_yield(vcpu, a0);
10486 ret = 0;
10487 break;
10488 case KVM_HC_MAP_GPA_RANGE: {
10489 u64 gpa = a0, npages = a1, attrs = a2;
10490
10491 ret = -KVM_ENOSYS;
10492 if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE))
10493 break;
10494
10495 if (!PAGE_ALIGNED(gpa) || !npages ||
10496 gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) {
10497 ret = -KVM_EINVAL;
10498 break;
10499 }
10500
10501 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
10502 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
10503 /*
10504 * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
10505 * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
10506 * it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
10507 * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
10508 */
10509 vcpu->run->hypercall.ret = 0;
10510 vcpu->run->hypercall.args[0] = gpa;
10511 vcpu->run->hypercall.args[1] = npages;
10512 vcpu->run->hypercall.args[2] = attrs;
10513 vcpu->run->hypercall.flags = 0;
10514 if (op_64_bit)
10515 vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE;
10516
10517 WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ);
10518 vcpu->arch.complete_userspace_io = complete_hypercall;
10519 return 0;
10520 }
10521 default:
10522 ret = -KVM_ENOSYS;
10523 break;
10524 }
10525
10526 out:
10527 vcpu->run->hypercall.ret = ret;
10528 return 1;
10529 }
10530 EXPORT_SYMBOL_FOR_KVM_INTERNAL(____kvm_emulate_hypercall);
10531
kvm_emulate_hypercall(struct kvm_vcpu * vcpu)10532 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
10533 {
10534 if (kvm_xen_hypercall_enabled(vcpu->kvm))
10535 return kvm_xen_hypercall(vcpu);
10536
10537 if (kvm_hv_hypercall_enabled(vcpu))
10538 return kvm_hv_hypercall(vcpu);
10539
10540 return __kvm_emulate_hypercall(vcpu, kvm_x86_call(get_cpl)(vcpu),
10541 complete_hypercall_exit);
10542 }
10543 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_hypercall);
10544
emulator_fix_hypercall(struct x86_emulate_ctxt * ctxt)10545 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
10546 {
10547 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
10548 char instruction[3];
10549 unsigned long rip = kvm_rip_read(vcpu);
10550
10551 /*
10552 * If the quirk is disabled, synthesize a #UD and let the guest pick up
10553 * the pieces.
10554 */
10555 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) {
10556 ctxt->exception.error_code_valid = false;
10557 ctxt->exception.vector = UD_VECTOR;
10558 ctxt->have_exception = true;
10559 return X86EMUL_PROPAGATE_FAULT;
10560 }
10561
10562 kvm_x86_call(patch_hypercall)(vcpu, instruction);
10563
10564 return emulator_write_emulated(ctxt, rip, instruction, 3,
10565 &ctxt->exception);
10566 }
10567
dm_request_for_irq_injection(struct kvm_vcpu * vcpu)10568 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
10569 {
10570 return vcpu->run->request_interrupt_window &&
10571 likely(!pic_in_kernel(vcpu->kvm));
10572 }
10573
10574 /* Called within kvm->srcu read side. */
post_kvm_run_save(struct kvm_vcpu * vcpu)10575 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
10576 {
10577 struct kvm_run *kvm_run = vcpu->run;
10578
10579 kvm_run->if_flag = kvm_x86_call(get_if_flag)(vcpu);
10580 kvm_run->cr8 = kvm_get_cr8(vcpu);
10581 kvm_run->apic_base = vcpu->arch.apic_base;
10582
10583 kvm_run->ready_for_interrupt_injection =
10584 pic_in_kernel(vcpu->kvm) ||
10585 kvm_vcpu_ready_for_interrupt_injection(vcpu);
10586
10587 if (is_smm(vcpu))
10588 kvm_run->flags |= KVM_RUN_X86_SMM;
10589 if (is_guest_mode(vcpu))
10590 kvm_run->flags |= KVM_RUN_X86_GUEST_MODE;
10591 }
10592
update_cr8_intercept(struct kvm_vcpu * vcpu)10593 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
10594 {
10595 int max_irr, tpr;
10596
10597 if (!kvm_x86_ops.update_cr8_intercept)
10598 return;
10599
10600 if (!lapic_in_kernel(vcpu))
10601 return;
10602
10603 if (vcpu->arch.apic->apicv_active)
10604 return;
10605
10606 if (!vcpu->arch.apic->vapic_addr)
10607 max_irr = kvm_lapic_find_highest_irr(vcpu);
10608 else
10609 max_irr = -1;
10610
10611 if (max_irr != -1)
10612 max_irr >>= 4;
10613
10614 tpr = kvm_lapic_get_cr8(vcpu);
10615
10616 kvm_x86_call(update_cr8_intercept)(vcpu, tpr, max_irr);
10617 }
10618
10619
kvm_check_nested_events(struct kvm_vcpu * vcpu)10620 int kvm_check_nested_events(struct kvm_vcpu *vcpu)
10621 {
10622 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
10623 kvm_x86_ops.nested_ops->triple_fault(vcpu);
10624 return 1;
10625 }
10626
10627 return kvm_x86_ops.nested_ops->check_events(vcpu);
10628 }
10629
kvm_inject_exception(struct kvm_vcpu * vcpu)10630 static void kvm_inject_exception(struct kvm_vcpu *vcpu)
10631 {
10632 /*
10633 * Suppress the error code if the vCPU is in Real Mode, as Real Mode
10634 * exceptions don't report error codes. The presence of an error code
10635 * is carried with the exception and only stripped when the exception
10636 * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
10637 * report an error code despite the CPU being in Real Mode.
10638 */
10639 vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
10640
10641 trace_kvm_inj_exception(vcpu->arch.exception.vector,
10642 vcpu->arch.exception.has_error_code,
10643 vcpu->arch.exception.error_code,
10644 vcpu->arch.exception.injected);
10645
10646 kvm_x86_call(inject_exception)(vcpu);
10647 }
10648
10649 /*
10650 * Check for any event (interrupt or exception) that is ready to be injected,
10651 * and if there is at least one event, inject the event with the highest
10652 * priority. This handles both "pending" events, i.e. events that have never
10653 * been injected into the guest, and "injected" events, i.e. events that were
10654 * injected as part of a previous VM-Enter, but weren't successfully delivered
10655 * and need to be re-injected.
10656 *
10657 * Note, this is not guaranteed to be invoked on a guest instruction boundary,
10658 * i.e. doesn't guarantee that there's an event window in the guest. KVM must
10659 * be able to inject exceptions in the "middle" of an instruction, and so must
10660 * also be able to re-inject NMIs and IRQs in the middle of an instruction.
10661 * I.e. for exceptions and re-injected events, NOT invoking this on instruction
10662 * boundaries is necessary and correct.
10663 *
10664 * For simplicity, KVM uses a single path to inject all events (except events
10665 * that are injected directly from L1 to L2) and doesn't explicitly track
10666 * instruction boundaries for asynchronous events. However, because VM-Exits
10667 * that can occur during instruction execution typically result in KVM skipping
10668 * the instruction or injecting an exception, e.g. instruction and exception
10669 * intercepts, and because pending exceptions have higher priority than pending
10670 * interrupts, KVM still honors instruction boundaries in most scenarios.
10671 *
10672 * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip
10673 * the instruction or inject an exception, then KVM can incorrecty inject a new
10674 * asynchronous event if the event became pending after the CPU fetched the
10675 * instruction (in the guest). E.g. if a page fault (#PF, #NPF, EPT violation)
10676 * occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be
10677 * injected on the restarted instruction instead of being deferred until the
10678 * instruction completes.
10679 *
10680 * In practice, this virtualization hole is unlikely to be observed by the
10681 * guest, and even less likely to cause functional problems. To detect the
10682 * hole, the guest would have to trigger an event on a side effect of an early
10683 * phase of instruction execution, e.g. on the instruction fetch from memory.
10684 * And for it to be a functional problem, the guest would need to depend on the
10685 * ordering between that side effect, the instruction completing, _and_ the
10686 * delivery of the asynchronous event.
10687 */
kvm_check_and_inject_events(struct kvm_vcpu * vcpu,bool * req_immediate_exit)10688 static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
10689 bool *req_immediate_exit)
10690 {
10691 bool can_inject;
10692 int r;
10693
10694 /*
10695 * Process nested events first, as nested VM-Exit supersedes event
10696 * re-injection. If there's an event queued for re-injection, it will
10697 * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit.
10698 */
10699 if (is_guest_mode(vcpu))
10700 r = kvm_check_nested_events(vcpu);
10701 else
10702 r = 0;
10703
10704 /*
10705 * Re-inject exceptions and events *especially* if immediate entry+exit
10706 * to/from L2 is needed, as any event that has already been injected
10707 * into L2 needs to complete its lifecycle before injecting a new event.
10708 *
10709 * Don't re-inject an NMI or interrupt if there is a pending exception.
10710 * This collision arises if an exception occurred while vectoring the
10711 * injected event, KVM intercepted said exception, and KVM ultimately
10712 * determined the fault belongs to the guest and queues the exception
10713 * for injection back into the guest.
10714 *
10715 * "Injected" interrupts can also collide with pending exceptions if
10716 * userspace ignores the "ready for injection" flag and blindly queues
10717 * an interrupt. In that case, prioritizing the exception is correct,
10718 * as the exception "occurred" before the exit to userspace. Trap-like
10719 * exceptions, e.g. most #DBs, have higher priority than interrupts.
10720 * And while fault-like exceptions, e.g. #GP and #PF, are the lowest
10721 * priority, they're only generated (pended) during instruction
10722 * execution, and interrupts are recognized at instruction boundaries.
10723 * Thus a pending fault-like exception means the fault occurred on the
10724 * *previous* instruction and must be serviced prior to recognizing any
10725 * new events in order to fully complete the previous instruction.
10726 */
10727 if (vcpu->arch.exception.injected)
10728 kvm_inject_exception(vcpu);
10729 else if (kvm_is_exception_pending(vcpu))
10730 ; /* see above */
10731 else if (vcpu->arch.nmi_injected)
10732 kvm_x86_call(inject_nmi)(vcpu);
10733 else if (vcpu->arch.interrupt.injected)
10734 kvm_x86_call(inject_irq)(vcpu, true);
10735
10736 /*
10737 * Exceptions that morph to VM-Exits are handled above, and pending
10738 * exceptions on top of injected exceptions that do not VM-Exit should
10739 * either morph to #DF or, sadly, override the injected exception.
10740 */
10741 WARN_ON_ONCE(vcpu->arch.exception.injected &&
10742 vcpu->arch.exception.pending);
10743
10744 /*
10745 * Bail if immediate entry+exit to/from the guest is needed to complete
10746 * nested VM-Enter or event re-injection so that a different pending
10747 * event can be serviced (or if KVM needs to exit to userspace).
10748 *
10749 * Otherwise, continue processing events even if VM-Exit occurred. The
10750 * VM-Exit will have cleared exceptions that were meant for L2, but
10751 * there may now be events that can be injected into L1.
10752 */
10753 if (r < 0)
10754 goto out;
10755
10756 /*
10757 * A pending exception VM-Exit should either result in nested VM-Exit
10758 * or force an immediate re-entry and exit to/from L2, and exception
10759 * VM-Exits cannot be injected (flag should _never_ be set).
10760 */
10761 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected ||
10762 vcpu->arch.exception_vmexit.pending);
10763
10764 /*
10765 * New events, other than exceptions, cannot be injected if KVM needs
10766 * to re-inject a previous event. See above comments on re-injecting
10767 * for why pending exceptions get priority.
10768 */
10769 can_inject = !kvm_event_needs_reinjection(vcpu);
10770
10771 if (vcpu->arch.exception.pending) {
10772 /*
10773 * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS
10774 * value pushed on the stack. Trap-like exception and all #DBs
10775 * leave RF as-is (KVM follows Intel's behavior in this regard;
10776 * AMD states that code breakpoint #DBs excplitly clear RF=0).
10777 *
10778 * Note, most versions of Intel's SDM and AMD's APM incorrectly
10779 * describe the behavior of General Detect #DBs, which are
10780 * fault-like. They do _not_ set RF, a la code breakpoints.
10781 */
10782 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT)
10783 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
10784 X86_EFLAGS_RF);
10785
10786 if (vcpu->arch.exception.vector == DB_VECTOR &&
10787 vcpu->arch.dr7 & DR7_GD) {
10788 vcpu->arch.dr7 &= ~DR7_GD;
10789 kvm_update_dr7(vcpu);
10790 }
10791
10792 kvm_inject_exception(vcpu);
10793
10794 vcpu->arch.exception.pending = false;
10795 vcpu->arch.exception.injected = true;
10796
10797 can_inject = false;
10798 }
10799
10800 /* Don't inject interrupts if the user asked to avoid doing so */
10801 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ)
10802 return 0;
10803
10804 /*
10805 * Finally, inject interrupt events. If an event cannot be injected
10806 * due to architectural conditions (e.g. IF=0) a window-open exit
10807 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending
10808 * and can architecturally be injected, but we cannot do it right now:
10809 * an interrupt could have arrived just now and we have to inject it
10810 * as a vmexit, or there could already an event in the queue, which is
10811 * indicated by can_inject. In that case we request an immediate exit
10812 * in order to make progress and get back here for another iteration.
10813 * The kvm_x86_ops hooks communicate this by returning -EBUSY.
10814 */
10815 #ifdef CONFIG_KVM_SMM
10816 if (vcpu->arch.smi_pending) {
10817 r = can_inject ? kvm_x86_call(smi_allowed)(vcpu, true) :
10818 -EBUSY;
10819 if (r < 0)
10820 goto out;
10821 if (r) {
10822 vcpu->arch.smi_pending = false;
10823 ++vcpu->arch.smi_count;
10824 enter_smm(vcpu);
10825 can_inject = false;
10826 } else
10827 kvm_x86_call(enable_smi_window)(vcpu);
10828 }
10829 #endif
10830
10831 if (vcpu->arch.nmi_pending) {
10832 r = can_inject ? kvm_x86_call(nmi_allowed)(vcpu, true) :
10833 -EBUSY;
10834 if (r < 0)
10835 goto out;
10836 if (r) {
10837 --vcpu->arch.nmi_pending;
10838 vcpu->arch.nmi_injected = true;
10839 kvm_x86_call(inject_nmi)(vcpu);
10840 can_inject = false;
10841 WARN_ON(kvm_x86_call(nmi_allowed)(vcpu, true) < 0);
10842 }
10843 if (vcpu->arch.nmi_pending)
10844 kvm_x86_call(enable_nmi_window)(vcpu);
10845 }
10846
10847 if (kvm_cpu_has_injectable_intr(vcpu)) {
10848 r = can_inject ? kvm_x86_call(interrupt_allowed)(vcpu, true) :
10849 -EBUSY;
10850 if (r < 0)
10851 goto out;
10852 if (r) {
10853 int irq = kvm_cpu_get_interrupt(vcpu);
10854
10855 if (!WARN_ON_ONCE(irq == -1)) {
10856 kvm_queue_interrupt(vcpu, irq, false);
10857 kvm_x86_call(inject_irq)(vcpu, false);
10858 WARN_ON(kvm_x86_call(interrupt_allowed)(vcpu, true) < 0);
10859 }
10860 }
10861 if (kvm_cpu_has_injectable_intr(vcpu))
10862 kvm_x86_call(enable_irq_window)(vcpu);
10863 }
10864
10865 if (is_guest_mode(vcpu) &&
10866 kvm_x86_ops.nested_ops->has_events &&
10867 kvm_x86_ops.nested_ops->has_events(vcpu, true))
10868 *req_immediate_exit = true;
10869
10870 /*
10871 * KVM must never queue a new exception while injecting an event; KVM
10872 * is done emulating and should only propagate the to-be-injected event
10873 * to the VMCS/VMCB. Queueing a new exception can put the vCPU into an
10874 * infinite loop as KVM will bail from VM-Enter to inject the pending
10875 * exception and start the cycle all over.
10876 *
10877 * Exempt triple faults as they have special handling and won't put the
10878 * vCPU into an infinite loop. Triple fault can be queued when running
10879 * VMX without unrestricted guest, as that requires KVM to emulate Real
10880 * Mode events (see kvm_inject_realmode_interrupt()).
10881 */
10882 WARN_ON_ONCE(vcpu->arch.exception.pending ||
10883 vcpu->arch.exception_vmexit.pending);
10884 return 0;
10885
10886 out:
10887 if (r == -EBUSY) {
10888 *req_immediate_exit = true;
10889 r = 0;
10890 }
10891 return r;
10892 }
10893
process_nmi(struct kvm_vcpu * vcpu)10894 static void process_nmi(struct kvm_vcpu *vcpu)
10895 {
10896 unsigned int limit;
10897
10898 /*
10899 * x86 is limited to one NMI pending, but because KVM can't react to
10900 * incoming NMIs as quickly as bare metal, e.g. if the vCPU is
10901 * scheduled out, KVM needs to play nice with two queued NMIs showing
10902 * up at the same time. To handle this scenario, allow two NMIs to be
10903 * (temporarily) pending so long as NMIs are not blocked and KVM is not
10904 * waiting for a previous NMI injection to complete (which effectively
10905 * blocks NMIs). KVM will immediately inject one of the two NMIs, and
10906 * will request an NMI window to handle the second NMI.
10907 */
10908 if (kvm_x86_call(get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected)
10909 limit = 1;
10910 else
10911 limit = 2;
10912
10913 /*
10914 * Adjust the limit to account for pending virtual NMIs, which aren't
10915 * tracked in vcpu->arch.nmi_pending.
10916 */
10917 if (kvm_x86_call(is_vnmi_pending)(vcpu))
10918 limit--;
10919
10920 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
10921 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
10922
10923 if (vcpu->arch.nmi_pending &&
10924 (kvm_x86_call(set_vnmi_pending)(vcpu)))
10925 vcpu->arch.nmi_pending--;
10926
10927 if (vcpu->arch.nmi_pending)
10928 kvm_make_request(KVM_REQ_EVENT, vcpu);
10929 }
10930
10931 /* Return total number of NMIs pending injection to the VM */
kvm_get_nr_pending_nmis(struct kvm_vcpu * vcpu)10932 int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu)
10933 {
10934 return vcpu->arch.nmi_pending +
10935 kvm_x86_call(is_vnmi_pending)(vcpu);
10936 }
10937
kvm_make_scan_ioapic_request_mask(struct kvm * kvm,unsigned long * vcpu_bitmap)10938 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
10939 unsigned long *vcpu_bitmap)
10940 {
10941 kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap);
10942 }
10943
kvm_make_scan_ioapic_request(struct kvm * kvm)10944 void kvm_make_scan_ioapic_request(struct kvm *kvm)
10945 {
10946 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
10947 }
10948
__kvm_vcpu_update_apicv(struct kvm_vcpu * vcpu)10949 void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
10950 {
10951 struct kvm_lapic *apic = vcpu->arch.apic;
10952 bool activate;
10953
10954 if (!lapic_in_kernel(vcpu))
10955 return;
10956
10957 down_read(&vcpu->kvm->arch.apicv_update_lock);
10958 preempt_disable();
10959
10960 /* Do not activate APICV when APIC is disabled */
10961 activate = kvm_vcpu_apicv_activated(vcpu) &&
10962 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED);
10963
10964 if (apic->apicv_active == activate)
10965 goto out;
10966
10967 apic->apicv_active = activate;
10968 kvm_apic_update_apicv(vcpu);
10969 kvm_x86_call(refresh_apicv_exec_ctrl)(vcpu);
10970
10971 /*
10972 * When APICv gets disabled, we may still have injected interrupts
10973 * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
10974 * still active when the interrupt got accepted. Make sure
10975 * kvm_check_and_inject_events() is called to check for that.
10976 */
10977 if (!apic->apicv_active)
10978 kvm_make_request(KVM_REQ_EVENT, vcpu);
10979
10980 out:
10981 preempt_enable();
10982 up_read(&vcpu->kvm->arch.apicv_update_lock);
10983 }
10984 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_update_apicv);
10985
kvm_vcpu_update_apicv(struct kvm_vcpu * vcpu)10986 static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
10987 {
10988 if (!lapic_in_kernel(vcpu))
10989 return;
10990
10991 /*
10992 * Due to sharing page tables across vCPUs, the xAPIC memslot must be
10993 * deleted if any vCPU has xAPIC virtualization and x2APIC enabled, but
10994 * and hardware doesn't support x2APIC virtualization. E.g. some AMD
10995 * CPUs support AVIC but not x2APIC. KVM still allows enabling AVIC in
10996 * this case so that KVM can use the AVIC doorbell to inject interrupts
10997 * to running vCPUs, but KVM must not create SPTEs for the APIC base as
10998 * the vCPU would incorrectly be able to access the vAPIC page via MMIO
10999 * despite being in x2APIC mode. For simplicity, inhibiting the APIC
11000 * access page is sticky.
11001 */
11002 if (apic_x2apic_mode(vcpu->arch.apic) &&
11003 kvm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization)
11004 kvm_inhibit_apic_access_page(vcpu);
11005
11006 __kvm_vcpu_update_apicv(vcpu);
11007 }
11008
__kvm_set_or_clear_apicv_inhibit(struct kvm * kvm,enum kvm_apicv_inhibit reason,bool set)11009 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
11010 enum kvm_apicv_inhibit reason, bool set)
11011 {
11012 unsigned long old, new;
11013
11014 lockdep_assert_held_write(&kvm->arch.apicv_update_lock);
11015
11016 if (!(kvm_x86_ops.required_apicv_inhibits & BIT(reason)))
11017 return;
11018
11019 old = new = kvm->arch.apicv_inhibit_reasons;
11020
11021 if (reason != APICV_INHIBIT_REASON_IRQWIN)
11022 set_or_clear_apicv_inhibit(&new, reason, set);
11023
11024 set_or_clear_apicv_inhibit(&new, APICV_INHIBIT_REASON_IRQWIN,
11025 atomic_read(&kvm->arch.apicv_nr_irq_window_req));
11026
11027 if (!!old != !!new) {
11028 /*
11029 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid
11030 * false positives in the sanity check WARN in vcpu_enter_guest().
11031 * This task will wait for all vCPUs to ack the kick IRQ before
11032 * updating apicv_inhibit_reasons, and all other vCPUs will
11033 * block on acquiring apicv_update_lock so that vCPUs can't
11034 * redo vcpu_enter_guest() without seeing the new inhibit state.
11035 *
11036 * Note, holding apicv_update_lock and taking it in the read
11037 * side (handling the request) also prevents other vCPUs from
11038 * servicing the request with a stale apicv_inhibit_reasons.
11039 */
11040 kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
11041 kvm->arch.apicv_inhibit_reasons = new;
11042 if (new) {
11043 unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
11044 int idx = srcu_read_lock(&kvm->srcu);
11045
11046 kvm_zap_gfn_range(kvm, gfn, gfn+1);
11047 srcu_read_unlock(&kvm->srcu, idx);
11048 }
11049 } else {
11050 kvm->arch.apicv_inhibit_reasons = new;
11051 }
11052 }
11053
kvm_set_or_clear_apicv_inhibit(struct kvm * kvm,enum kvm_apicv_inhibit reason,bool set)11054 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
11055 enum kvm_apicv_inhibit reason, bool set)
11056 {
11057 if (!enable_apicv)
11058 return;
11059
11060 down_write(&kvm->arch.apicv_update_lock);
11061 __kvm_set_or_clear_apicv_inhibit(kvm, reason, set);
11062 up_write(&kvm->arch.apicv_update_lock);
11063 }
11064 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_or_clear_apicv_inhibit);
11065
kvm_inc_or_dec_irq_window_inhibit(struct kvm * kvm,bool inc)11066 void kvm_inc_or_dec_irq_window_inhibit(struct kvm *kvm, bool inc)
11067 {
11068 int add = inc ? 1 : -1;
11069
11070 if (!enable_apicv)
11071 return;
11072
11073 /*
11074 * IRQ windows are requested either because of ExtINT injections, or
11075 * because APICv is already disabled/inhibited for another reason.
11076 * While ExtINT injections are rare and should not happen while the
11077 * vCPU is running its actual workload, it's worth avoiding thrashing
11078 * if the IRQ window is being requested because APICv is already
11079 * inhibited. So, toggle the actual inhibit (which requires taking
11080 * the lock for write) if and only if there's no other inhibit.
11081 * kvm_set_or_clear_apicv_inhibit() always evaluates the IRQ window
11082 * count; thus the IRQ window inhibit call _will_ be lazily updated on
11083 * the next call, if it ever happens.
11084 */
11085 if (READ_ONCE(kvm->arch.apicv_inhibit_reasons) & ~BIT(APICV_INHIBIT_REASON_IRQWIN)) {
11086 guard(rwsem_read)(&kvm->arch.apicv_update_lock);
11087 if (READ_ONCE(kvm->arch.apicv_inhibit_reasons) & ~BIT(APICV_INHIBIT_REASON_IRQWIN)) {
11088 atomic_add(add, &kvm->arch.apicv_nr_irq_window_req);
11089 return;
11090 }
11091 }
11092
11093 /*
11094 * Strictly speaking, the lock is only needed if going 0->1 or 1->0,
11095 * a la atomic_dec_and_mutex_lock. However, ExtINTs are rare and
11096 * only target a single CPU, so that is the common case; do not
11097 * bother eliding the down_write()/up_write() pair.
11098 */
11099 guard(rwsem_write)(&kvm->arch.apicv_update_lock);
11100 if (atomic_add_return(add, &kvm->arch.apicv_nr_irq_window_req) == inc)
11101 __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_IRQWIN, inc);
11102 }
11103 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_inc_or_dec_irq_window_inhibit);
11104
vcpu_scan_ioapic(struct kvm_vcpu * vcpu)11105 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
11106 {
11107 if (!kvm_apic_present(vcpu))
11108 return;
11109
11110 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
11111 vcpu->arch.highest_stale_pending_ioapic_eoi = -1;
11112
11113 kvm_x86_call(sync_pir_to_irr)(vcpu);
11114
11115 if (irqchip_split(vcpu->kvm))
11116 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
11117 #ifdef CONFIG_KVM_IOAPIC
11118 else if (ioapic_in_kernel(vcpu->kvm))
11119 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
11120 #endif
11121
11122 if (is_guest_mode(vcpu))
11123 vcpu->arch.load_eoi_exitmap_pending = true;
11124 else
11125 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
11126 }
11127
vcpu_load_eoi_exitmap(struct kvm_vcpu * vcpu)11128 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
11129 {
11130 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
11131 return;
11132
11133 #ifdef CONFIG_KVM_HYPERV
11134 if (to_hv_vcpu(vcpu)) {
11135 u64 eoi_exit_bitmap[4];
11136
11137 bitmap_or((ulong *)eoi_exit_bitmap,
11138 vcpu->arch.ioapic_handled_vectors,
11139 to_hv_synic(vcpu)->vec_bitmap, 256);
11140 kvm_x86_call(load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
11141 return;
11142 }
11143 #endif
11144 kvm_x86_call(load_eoi_exitmap)(
11145 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
11146 }
11147
kvm_arch_guest_memory_reclaimed(struct kvm * kvm)11148 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
11149 {
11150 kvm_x86_call(guest_memory_reclaimed)(kvm);
11151 }
11152
kvm_vcpu_reload_apic_access_page(struct kvm_vcpu * vcpu)11153 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
11154 {
11155 if (!lapic_in_kernel(vcpu))
11156 return;
11157
11158 kvm_x86_call(set_apic_access_page_addr)(vcpu);
11159 }
11160
11161 /*
11162 * Called within kvm->srcu read side.
11163 * Returns 1 to let vcpu_run() continue the guest execution loop without
11164 * exiting to the userspace. Otherwise, the value will be returned to the
11165 * userspace.
11166 */
vcpu_enter_guest(struct kvm_vcpu * vcpu)11167 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
11168 {
11169 int r;
11170 bool req_int_win =
11171 dm_request_for_irq_injection(vcpu) &&
11172 kvm_cpu_accept_dm_intr(vcpu);
11173 fastpath_t exit_fastpath;
11174 u64 run_flags, debug_ctl;
11175
11176 bool req_immediate_exit = false;
11177
11178 if (kvm_request_pending(vcpu)) {
11179 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
11180 r = -EIO;
11181 goto out;
11182 }
11183
11184 if (kvm_dirty_ring_check_request(vcpu)) {
11185 r = 0;
11186 goto out;
11187 }
11188
11189 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
11190 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
11191 r = 0;
11192 goto out;
11193 }
11194 }
11195 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
11196 kvm_mmu_free_obsolete_roots(vcpu);
11197 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
11198 __kvm_migrate_timers(vcpu);
11199 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
11200 kvm_update_masterclock(vcpu->kvm);
11201 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
11202 kvm_gen_kvmclock_update(vcpu);
11203 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
11204 r = kvm_guest_time_update(vcpu);
11205 if (unlikely(r))
11206 goto out;
11207 }
11208 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
11209 kvm_mmu_sync_roots(vcpu);
11210 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
11211 kvm_mmu_load_pgd(vcpu);
11212
11213 /*
11214 * Note, the order matters here, as flushing "all" TLB entries
11215 * also flushes the "current" TLB entries, i.e. servicing the
11216 * flush "all" will clear any request to flush "current".
11217 */
11218 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
11219 kvm_vcpu_flush_tlb_all(vcpu);
11220
11221 kvm_service_local_tlb_flush_requests(vcpu);
11222
11223 /*
11224 * Fall back to a "full" guest flush if Hyper-V's precise
11225 * flushing fails. Note, Hyper-V's flushing is per-vCPU, but
11226 * the flushes are considered "remote" and not "local" because
11227 * the requests can be initiated from other vCPUs.
11228 */
11229 #ifdef CONFIG_KVM_HYPERV
11230 if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) &&
11231 kvm_hv_vcpu_flush_tlb(vcpu))
11232 kvm_vcpu_flush_tlb_guest(vcpu);
11233 #endif
11234
11235 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
11236 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
11237 r = 0;
11238 goto out;
11239 }
11240 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
11241 if (is_guest_mode(vcpu))
11242 kvm_x86_ops.nested_ops->triple_fault(vcpu);
11243
11244 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
11245 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
11246 vcpu->mmio_needed = 0;
11247 r = 0;
11248 goto out;
11249 }
11250 }
11251 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
11252 /* Page is swapped out. Do synthetic halt */
11253 vcpu->arch.apf.halted = true;
11254 r = 1;
11255 goto out;
11256 }
11257 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
11258 record_steal_time(vcpu);
11259 if (kvm_check_request(KVM_REQ_PMU, vcpu))
11260 kvm_pmu_handle_event(vcpu);
11261 if (kvm_check_request(KVM_REQ_PMI, vcpu))
11262 kvm_pmu_deliver_pmi(vcpu);
11263 #ifdef CONFIG_KVM_SMM
11264 if (kvm_check_request(KVM_REQ_SMI, vcpu))
11265 process_smi(vcpu);
11266 #endif
11267 if (kvm_check_request(KVM_REQ_NMI, vcpu))
11268 process_nmi(vcpu);
11269 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
11270 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
11271 if (test_bit(vcpu->arch.pending_ioapic_eoi,
11272 vcpu->arch.ioapic_handled_vectors)) {
11273 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
11274 vcpu->run->eoi.vector =
11275 vcpu->arch.pending_ioapic_eoi;
11276 r = 0;
11277 goto out;
11278 }
11279 }
11280 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
11281 vcpu_scan_ioapic(vcpu);
11282 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu))
11283 vcpu_load_eoi_exitmap(vcpu);
11284 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
11285 kvm_vcpu_reload_apic_access_page(vcpu);
11286 #ifdef CONFIG_KVM_HYPERV
11287 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
11288 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
11289 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
11290 vcpu->run->system_event.ndata = 0;
11291 r = 0;
11292 goto out;
11293 }
11294 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
11295 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
11296 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
11297 vcpu->run->system_event.ndata = 0;
11298 r = 0;
11299 goto out;
11300 }
11301 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
11302 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
11303
11304 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
11305 vcpu->run->hyperv = hv_vcpu->exit;
11306 r = 0;
11307 goto out;
11308 }
11309
11310 /*
11311 * KVM_REQ_HV_STIMER has to be processed after
11312 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
11313 * depend on the guest clock being up-to-date
11314 */
11315 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
11316 kvm_hv_process_stimers(vcpu);
11317 #endif
11318 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
11319 kvm_vcpu_update_apicv(vcpu);
11320 if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
11321 kvm_check_async_pf_completion(vcpu);
11322
11323 if (kvm_check_request(KVM_REQ_RECALC_INTERCEPTS, vcpu))
11324 kvm_x86_call(recalc_intercepts)(vcpu);
11325
11326 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
11327 kvm_x86_call(update_cpu_dirty_logging)(vcpu);
11328
11329 if (kvm_check_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu)) {
11330 kvm_vcpu_reset(vcpu, true);
11331 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) {
11332 r = 1;
11333 goto out;
11334 }
11335 }
11336 }
11337
11338 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
11339 kvm_xen_has_interrupt(vcpu)) {
11340 ++vcpu->stat.req_event;
11341 r = kvm_apic_accept_events(vcpu);
11342 if (r < 0) {
11343 r = 0;
11344 goto out;
11345 }
11346 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
11347 r = 1;
11348 goto out;
11349 }
11350
11351 r = kvm_check_and_inject_events(vcpu, &req_immediate_exit);
11352 if (r < 0) {
11353 r = 0;
11354 goto out;
11355 }
11356 if (req_int_win)
11357 kvm_x86_call(enable_irq_window)(vcpu);
11358
11359 if (kvm_lapic_enabled(vcpu)) {
11360 update_cr8_intercept(vcpu);
11361 kvm_lapic_sync_to_vapic(vcpu);
11362 }
11363 }
11364
11365 r = kvm_mmu_reload(vcpu);
11366 if (unlikely(r)) {
11367 goto cancel_injection;
11368 }
11369
11370 preempt_disable();
11371
11372 kvm_x86_call(prepare_switch_to_guest)(vcpu);
11373
11374 /*
11375 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt
11376 * IPI are then delayed after guest entry, which ensures that they
11377 * result in virtual interrupt delivery.
11378 */
11379 local_irq_disable();
11380
11381 /* Store vcpu->apicv_active before vcpu->mode. */
11382 smp_store_release(&vcpu->mode, IN_GUEST_MODE);
11383
11384 kvm_vcpu_srcu_read_unlock(vcpu);
11385
11386 /*
11387 * 1) We should set ->mode before checking ->requests. Please see
11388 * the comment in kvm_vcpu_exiting_guest_mode().
11389 *
11390 * 2) For APICv, we should set ->mode before checking PID.ON. This
11391 * pairs with the memory barrier implicit in pi_test_and_set_on
11392 * (see vmx_deliver_posted_interrupt).
11393 *
11394 * 3) This also orders the write to mode from any reads to the page
11395 * tables done while the VCPU is running. Please see the comment
11396 * in kvm_flush_remote_tlbs.
11397 */
11398 smp_mb__after_srcu_read_unlock();
11399
11400 /*
11401 * Process pending posted interrupts to handle the case where the
11402 * notification IRQ arrived in the host, or was never sent (because the
11403 * target vCPU wasn't running). Do this regardless of the vCPU's APICv
11404 * status, KVM doesn't update assigned devices when APICv is inhibited,
11405 * i.e. they can post interrupts even if APICv is temporarily disabled.
11406 */
11407 if (kvm_lapic_enabled(vcpu))
11408 kvm_x86_call(sync_pir_to_irr)(vcpu);
11409
11410 if (kvm_vcpu_exit_request(vcpu)) {
11411 vcpu->mode = OUTSIDE_GUEST_MODE;
11412 smp_wmb();
11413 local_irq_enable();
11414 preempt_enable();
11415 kvm_vcpu_srcu_read_lock(vcpu);
11416 r = 1;
11417 goto cancel_injection;
11418 }
11419
11420 run_flags = 0;
11421 if (req_immediate_exit) {
11422 run_flags |= KVM_RUN_FORCE_IMMEDIATE_EXIT;
11423 kvm_make_request(KVM_REQ_EVENT, vcpu);
11424 }
11425
11426 fpregs_assert_state_consistent();
11427 if (test_thread_flag(TIF_NEED_FPU_LOAD))
11428 switch_fpu_return();
11429
11430 if (vcpu->arch.guest_fpu.xfd_err)
11431 wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
11432
11433 kvm_load_xfeatures(vcpu, true);
11434
11435 if (unlikely(vcpu->arch.switch_db_regs &&
11436 !(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) {
11437 set_debugreg(DR7_FIXED_1, 7);
11438 set_debugreg(vcpu->arch.eff_db[0], 0);
11439 set_debugreg(vcpu->arch.eff_db[1], 1);
11440 set_debugreg(vcpu->arch.eff_db[2], 2);
11441 set_debugreg(vcpu->arch.eff_db[3], 3);
11442 /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
11443 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
11444 run_flags |= KVM_RUN_LOAD_GUEST_DR6;
11445 } else if (unlikely(hw_breakpoint_active())) {
11446 set_debugreg(DR7_FIXED_1, 7);
11447 }
11448
11449 /*
11450 * Refresh the host DEBUGCTL snapshot after disabling IRQs, as DEBUGCTL
11451 * can be modified in IRQ context, e.g. via SMP function calls. Inform
11452 * vendor code if any host-owned bits were changed, e.g. so that the
11453 * value loaded into hardware while running the guest can be updated.
11454 */
11455 debug_ctl = get_debugctlmsr();
11456 if ((debug_ctl ^ vcpu->arch.host_debugctl) & kvm_x86_ops.HOST_OWNED_DEBUGCTL &&
11457 !vcpu->arch.guest_state_protected)
11458 run_flags |= KVM_RUN_LOAD_DEBUGCTL;
11459 vcpu->arch.host_debugctl = debug_ctl;
11460
11461 kvm_mediated_pmu_load(vcpu);
11462
11463 guest_timing_enter_irqoff();
11464
11465 /*
11466 * Swap PKRU with hardware breakpoints disabled to minimize the number
11467 * of flows where non-KVM code can run with guest state loaded.
11468 */
11469 kvm_load_guest_pkru(vcpu);
11470
11471 for (;;) {
11472 /*
11473 * Assert that vCPU vs. VM APICv state is consistent. An APICv
11474 * update must kick and wait for all vCPUs before toggling the
11475 * per-VM state, and responding vCPUs must wait for the update
11476 * to complete before servicing KVM_REQ_APICV_UPDATE.
11477 */
11478 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
11479 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
11480
11481 exit_fastpath = kvm_x86_call(vcpu_run)(vcpu, run_flags);
11482 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
11483 break;
11484
11485 if (kvm_lapic_enabled(vcpu))
11486 kvm_x86_call(sync_pir_to_irr)(vcpu);
11487
11488 if (unlikely(kvm_vcpu_exit_request(vcpu))) {
11489 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
11490 break;
11491 }
11492
11493 run_flags = 0;
11494
11495 /* Note, VM-Exits that go down the "slow" path are accounted below. */
11496 ++vcpu->stat.exits;
11497 }
11498
11499 kvm_load_host_pkru(vcpu);
11500
11501 kvm_mediated_pmu_put(vcpu);
11502
11503 /*
11504 * Do this here before restoring debug registers on the host. And
11505 * since we do this before handling the vmexit, a DR access vmexit
11506 * can (a) read the correct value of the debug registers, (b) set
11507 * KVM_DEBUGREG_WONT_EXIT again.
11508 */
11509 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
11510 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
11511 WARN_ON(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH);
11512 kvm_x86_call(sync_dirty_debug_regs)(vcpu);
11513 kvm_update_dr0123(vcpu);
11514 kvm_update_dr7(vcpu);
11515 }
11516
11517 /*
11518 * If the guest has used debug registers, at least dr7
11519 * will be disabled while returning to the host.
11520 * If we don't have active breakpoints in the host, we don't
11521 * care about the messed up debug address registers. But if
11522 * we have some of them active, restore the old state.
11523 */
11524 if (hw_breakpoint_active())
11525 hw_breakpoint_restore();
11526
11527 vcpu->arch.last_vmentry_cpu = vcpu->cpu;
11528 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
11529
11530 vcpu->mode = OUTSIDE_GUEST_MODE;
11531 smp_wmb();
11532
11533 kvm_load_xfeatures(vcpu, false);
11534
11535 /*
11536 * Sync xfd before calling handle_exit_irqoff() which may
11537 * rely on the fact that guest_fpu::xfd is up-to-date (e.g.
11538 * in #NM irqoff handler).
11539 */
11540 if (vcpu->arch.xfd_no_write_intercept)
11541 fpu_sync_guest_vmexit_xfd_state();
11542
11543 kvm_x86_call(handle_exit_irqoff)(vcpu);
11544
11545 if (vcpu->arch.guest_fpu.xfd_err)
11546 wrmsrq(MSR_IA32_XFD_ERR, 0);
11547
11548 /*
11549 * Mark this CPU as needing a branch predictor flush before running
11550 * userspace. Must be done before enabling preemption to ensure it gets
11551 * set for the CPU that actually ran the guest, and not the CPU that it
11552 * may migrate to.
11553 */
11554 if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
11555 this_cpu_write(x86_ibpb_exit_to_user, true);
11556
11557 /*
11558 * Consume any pending interrupts, including the possible source of
11559 * VM-Exit on SVM and any ticks that occur between VM-Exit and now.
11560 * An instruction is required after local_irq_enable() to fully unblock
11561 * interrupts on processors that implement an interrupt shadow, the
11562 * stat.exits increment will do nicely.
11563 */
11564 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
11565 local_irq_enable();
11566 ++vcpu->stat.exits;
11567 local_irq_disable();
11568 kvm_after_interrupt(vcpu);
11569
11570 /*
11571 * Wait until after servicing IRQs to account guest time so that any
11572 * ticks that occurred while running the guest are properly accounted
11573 * to the guest. Waiting until IRQs are enabled degrades the accuracy
11574 * of accounting via context tracking, but the loss of accuracy is
11575 * acceptable for all known use cases.
11576 */
11577 guest_timing_exit_irqoff();
11578
11579 local_irq_enable();
11580 preempt_enable();
11581
11582 kvm_vcpu_srcu_read_lock(vcpu);
11583
11584 /*
11585 * Call this to ensure WC buffers in guest are evicted after each VM
11586 * Exit, so that the evicted WC writes can be snooped across all cpus
11587 */
11588 smp_mb__after_srcu_read_lock();
11589
11590 /*
11591 * Profile KVM exit RIPs:
11592 */
11593 if (unlikely(prof_on == KVM_PROFILING &&
11594 !vcpu->arch.guest_state_protected)) {
11595 unsigned long rip = kvm_rip_read(vcpu);
11596 profile_hit(KVM_PROFILING, (void *)rip);
11597 }
11598
11599 if (unlikely(vcpu->arch.tsc_always_catchup))
11600 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
11601
11602 if (vcpu->arch.apic_attention)
11603 kvm_lapic_sync_from_vapic(vcpu);
11604
11605 if (unlikely(exit_fastpath == EXIT_FASTPATH_EXIT_USERSPACE))
11606 return 0;
11607
11608 r = kvm_x86_call(handle_exit)(vcpu, exit_fastpath);
11609 return r;
11610
11611 cancel_injection:
11612 if (req_immediate_exit)
11613 kvm_make_request(KVM_REQ_EVENT, vcpu);
11614 kvm_x86_call(cancel_injection)(vcpu);
11615 if (unlikely(vcpu->arch.apic_attention))
11616 kvm_lapic_sync_from_vapic(vcpu);
11617 out:
11618 return r;
11619 }
11620
kvm_vcpu_running(struct kvm_vcpu * vcpu)11621 static bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
11622 {
11623 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
11624 !vcpu->arch.apf.halted);
11625 }
11626
kvm_vcpu_has_events(struct kvm_vcpu * vcpu)11627 bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
11628 {
11629 if (!list_empty_careful(&vcpu->async_pf.done))
11630 return true;
11631
11632 if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
11633 kvm_apic_init_sipi_allowed(vcpu))
11634 return true;
11635
11636 if (kvm_is_exception_pending(vcpu))
11637 return true;
11638
11639 if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
11640 (vcpu->arch.nmi_pending &&
11641 kvm_x86_call(nmi_allowed)(vcpu, false)))
11642 return true;
11643
11644 #ifdef CONFIG_KVM_SMM
11645 if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
11646 (vcpu->arch.smi_pending &&
11647 kvm_x86_call(smi_allowed)(vcpu, false)))
11648 return true;
11649 #endif
11650
11651 if (kvm_test_request(KVM_REQ_PMI, vcpu))
11652 return true;
11653
11654 if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
11655 return true;
11656
11657 if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
11658 return true;
11659
11660 if (kvm_hv_has_stimer_pending(vcpu))
11661 return true;
11662
11663 if (is_guest_mode(vcpu) &&
11664 kvm_x86_ops.nested_ops->has_events &&
11665 kvm_x86_ops.nested_ops->has_events(vcpu, false))
11666 return true;
11667
11668 if (kvm_xen_has_pending_events(vcpu))
11669 return true;
11670
11671 return false;
11672 }
11673 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_has_events);
11674
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)11675 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
11676 {
11677 return kvm_vcpu_running(vcpu) || vcpu->arch.pv.pv_unhalted ||
11678 kvm_vcpu_has_events(vcpu);
11679 }
11680
11681 /* Called within kvm->srcu read side. */
vcpu_block(struct kvm_vcpu * vcpu)11682 static inline int vcpu_block(struct kvm_vcpu *vcpu)
11683 {
11684 bool hv_timer;
11685
11686 if (!kvm_arch_vcpu_runnable(vcpu)) {
11687 /*
11688 * Switch to the software timer before halt-polling/blocking as
11689 * the guest's timer may be a break event for the vCPU, and the
11690 * hypervisor timer runs only when the CPU is in guest mode.
11691 * Switch before halt-polling so that KVM recognizes an expired
11692 * timer before blocking.
11693 */
11694 hv_timer = kvm_lapic_hv_timer_in_use(vcpu);
11695 if (hv_timer)
11696 kvm_lapic_switch_to_sw_timer(vcpu);
11697
11698 kvm_vcpu_srcu_read_unlock(vcpu);
11699 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
11700 kvm_vcpu_halt(vcpu);
11701 else
11702 kvm_vcpu_block(vcpu);
11703 kvm_vcpu_srcu_read_lock(vcpu);
11704
11705 if (hv_timer)
11706 kvm_lapic_switch_to_hv_timer(vcpu);
11707
11708 /*
11709 * If the vCPU is not runnable, a signal or another host event
11710 * of some kind is pending; service it without changing the
11711 * vCPU's activity state.
11712 */
11713 if (!kvm_arch_vcpu_runnable(vcpu))
11714 return 1;
11715 }
11716
11717 /*
11718 * Evaluate nested events before exiting the halted state. This allows
11719 * the halt state to be recorded properly in the VMCS12's activity
11720 * state field (AMD does not have a similar field and a VM-Exit always
11721 * causes a spurious wakeup from HLT).
11722 */
11723 if (is_guest_mode(vcpu)) {
11724 int r = kvm_check_nested_events(vcpu);
11725
11726 if (r < 0 && r != -EBUSY)
11727 return 0;
11728 }
11729
11730 if (kvm_apic_accept_events(vcpu) < 0)
11731 return 0;
11732 switch(vcpu->arch.mp_state) {
11733 case KVM_MP_STATE_HALTED:
11734 case KVM_MP_STATE_AP_RESET_HOLD:
11735 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
11736 fallthrough;
11737 case KVM_MP_STATE_RUNNABLE:
11738 vcpu->arch.apf.halted = false;
11739 break;
11740 case KVM_MP_STATE_INIT_RECEIVED:
11741 break;
11742 default:
11743 WARN_ON_ONCE(1);
11744 break;
11745 }
11746 return 1;
11747 }
11748
11749 /* Called within kvm->srcu read side. */
vcpu_run(struct kvm_vcpu * vcpu)11750 static int vcpu_run(struct kvm_vcpu *vcpu)
11751 {
11752 int r;
11753
11754 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
11755
11756 for (;;) {
11757 /*
11758 * If another guest vCPU requests a PV TLB flush in the middle
11759 * of instruction emulation, the rest of the emulation could
11760 * use a stale page translation. Assume that any code after
11761 * this point can start executing an instruction.
11762 */
11763 vcpu->arch.at_instruction_boundary = false;
11764 if (kvm_vcpu_running(vcpu)) {
11765 r = vcpu_enter_guest(vcpu);
11766 } else {
11767 r = vcpu_block(vcpu);
11768 }
11769
11770 if (r <= 0)
11771 break;
11772
11773 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
11774 if (kvm_xen_has_pending_events(vcpu))
11775 kvm_xen_inject_pending_events(vcpu);
11776
11777 if (kvm_cpu_has_pending_timer(vcpu))
11778 kvm_inject_pending_timer_irqs(vcpu);
11779
11780 if (dm_request_for_irq_injection(vcpu) &&
11781 kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
11782 r = 0;
11783 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
11784 ++vcpu->stat.request_irq_exits;
11785 break;
11786 }
11787
11788 if (__xfer_to_guest_mode_work_pending()) {
11789 kvm_vcpu_srcu_read_unlock(vcpu);
11790 r = kvm_xfer_to_guest_mode_handle_work(vcpu);
11791 kvm_vcpu_srcu_read_lock(vcpu);
11792 if (r)
11793 return r;
11794 }
11795 }
11796
11797 return r;
11798 }
11799
__kvm_emulate_halt(struct kvm_vcpu * vcpu,int state,int reason)11800 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
11801 {
11802 /*
11803 * The vCPU has halted, e.g. executed HLT. Update the run state if the
11804 * local APIC is in-kernel, the run loop will detect the non-runnable
11805 * state and halt the vCPU. Exit to userspace if the local APIC is
11806 * managed by userspace, in which case userspace is responsible for
11807 * handling wake events.
11808 */
11809 ++vcpu->stat.halt_exits;
11810 if (lapic_in_kernel(vcpu)) {
11811 if (kvm_vcpu_has_events(vcpu) || vcpu->arch.pv.pv_unhalted)
11812 state = KVM_MP_STATE_RUNNABLE;
11813 kvm_set_mp_state(vcpu, state);
11814 return 1;
11815 } else {
11816 vcpu->run->exit_reason = reason;
11817 return 0;
11818 }
11819 }
11820
kvm_emulate_halt_noskip(struct kvm_vcpu * vcpu)11821 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
11822 {
11823 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
11824 }
11825 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_halt_noskip);
11826
kvm_emulate_halt(struct kvm_vcpu * vcpu)11827 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
11828 {
11829 int ret = kvm_skip_emulated_instruction(vcpu);
11830 /*
11831 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
11832 * KVM_EXIT_DEBUG here.
11833 */
11834 return kvm_emulate_halt_noskip(vcpu) && ret;
11835 }
11836 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_halt);
11837
handle_fastpath_hlt(struct kvm_vcpu * vcpu)11838 fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
11839 {
11840 if (!kvm_pmu_is_fastpath_emulation_allowed(vcpu))
11841 return EXIT_FASTPATH_NONE;
11842
11843 if (!kvm_emulate_halt(vcpu))
11844 return EXIT_FASTPATH_EXIT_USERSPACE;
11845
11846 if (kvm_vcpu_running(vcpu))
11847 return EXIT_FASTPATH_REENTER_GUEST;
11848
11849 return EXIT_FASTPATH_EXIT_HANDLED;
11850 }
11851 EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_hlt);
11852
kvm_emulate_ap_reset_hold(struct kvm_vcpu * vcpu)11853 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
11854 {
11855 int ret = kvm_skip_emulated_instruction(vcpu);
11856
11857 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
11858 KVM_EXIT_AP_RESET_HOLD) && ret;
11859 }
11860 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_ap_reset_hold);
11861
kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu * vcpu)11862 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
11863 {
11864 return kvm_vcpu_apicv_active(vcpu) &&
11865 kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu);
11866 }
11867
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)11868 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
11869 {
11870 return vcpu->arch.preempted_in_kernel;
11871 }
11872
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)11873 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
11874 {
11875 if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
11876 return true;
11877
11878 if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
11879 #ifdef CONFIG_KVM_SMM
11880 kvm_test_request(KVM_REQ_SMI, vcpu) ||
11881 #endif
11882 kvm_test_request(KVM_REQ_EVENT, vcpu))
11883 return true;
11884
11885 return kvm_arch_dy_has_pending_interrupt(vcpu);
11886 }
11887
complete_emulated_io(struct kvm_vcpu * vcpu)11888 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
11889 {
11890 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
11891 }
11892
complete_emulated_pio(struct kvm_vcpu * vcpu)11893 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
11894 {
11895 if (KVM_BUG_ON(!vcpu->arch.pio.count, vcpu->kvm))
11896 return -EIO;
11897
11898 return complete_emulated_io(vcpu);
11899 }
11900
11901 /*
11902 * Implements the following, as a state machine:
11903 *
11904 * read:
11905 * for each fragment
11906 * for each mmio piece in the fragment
11907 * write gpa, len
11908 * exit
11909 * copy data
11910 * execute insn
11911 *
11912 * write:
11913 * for each fragment
11914 * for each mmio piece in the fragment
11915 * write gpa, len
11916 * copy data
11917 * exit
11918 */
complete_emulated_mmio(struct kvm_vcpu * vcpu)11919 static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
11920 {
11921 struct kvm_run *run = vcpu->run;
11922 struct kvm_mmio_fragment *frag;
11923 unsigned len;
11924
11925 if (KVM_BUG_ON(!vcpu->mmio_needed, vcpu->kvm))
11926 return -EIO;
11927
11928 /* Complete previous fragment */
11929 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
11930 len = min(8u, frag->len);
11931 if (!vcpu->mmio_is_write)
11932 memcpy(frag->data, run->mmio.data, len);
11933
11934 if (frag->len <= 8) {
11935 /* Switch to the next fragment. */
11936 frag++;
11937 vcpu->mmio_cur_fragment++;
11938 } else {
11939 if (WARN_ON_ONCE(frag->data == &frag->val))
11940 return -EIO;
11941
11942 /* Go forward to the next mmio piece. */
11943 frag->data += len;
11944 frag->gpa += len;
11945 frag->len -= len;
11946 }
11947
11948 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
11949 vcpu->mmio_needed = 0;
11950
11951 /* FIXME: return into emulator if single-stepping. */
11952 if (vcpu->mmio_is_write)
11953 return 1;
11954 vcpu->mmio_read_completed = 1;
11955 return complete_emulated_io(vcpu);
11956 }
11957
11958 kvm_prepare_emulated_mmio_exit(vcpu, frag);
11959 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
11960 return 0;
11961 }
11962
11963 /* Swap (qemu) user FPU context for the guest FPU context. */
kvm_load_guest_fpu(struct kvm_vcpu * vcpu)11964 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
11965 {
11966 if (KVM_BUG_ON(vcpu->arch.guest_fpu.fpstate->in_use, vcpu->kvm))
11967 return;
11968
11969 /* Exclude PKRU, it's restored separately immediately after VM-Exit. */
11970 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true);
11971 trace_kvm_fpu(1);
11972 }
11973
11974 /* When vcpu_run ends, restore user space FPU context. */
kvm_put_guest_fpu(struct kvm_vcpu * vcpu)11975 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
11976 {
11977 if (KVM_BUG_ON(!vcpu->arch.guest_fpu.fpstate->in_use, vcpu->kvm))
11978 return;
11979
11980 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false);
11981 ++vcpu->stat.fpu_reload;
11982 trace_kvm_fpu(0);
11983 }
11984
kvm_x86_vcpu_pre_run(struct kvm_vcpu * vcpu)11985 static int kvm_x86_vcpu_pre_run(struct kvm_vcpu *vcpu)
11986 {
11987 /*
11988 * Userspace may have modified vCPU state, mark nested_run_pending as
11989 * "untrusted" to avoid triggering false-positive WARNs.
11990 */
11991 if (vcpu->arch.nested_run_pending == KVM_NESTED_RUN_PENDING)
11992 vcpu->arch.nested_run_pending = KVM_NESTED_RUN_PENDING_UNTRUSTED;
11993
11994 /*
11995 * SIPI_RECEIVED is obsolete; KVM leaves the vCPU in Wait-For-SIPI and
11996 * tracks the pending SIPI separately. SIPI_RECEIVED is still accepted
11997 * by KVM_SET_VCPU_EVENTS for backwards compatibility, but should be
11998 * converted to INIT_RECEIVED.
11999 */
12000 if (WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED))
12001 return -EINVAL;
12002
12003 /*
12004 * Disallow running the vCPU if userspace forced it into an impossible
12005 * MP_STATE, e.g. if the vCPU is in WFS but SIPI is blocked.
12006 */
12007 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED &&
12008 !kvm_apic_init_sipi_allowed(vcpu))
12009 return -EINVAL;
12010
12011 return kvm_x86_call(vcpu_pre_run)(vcpu);
12012 }
12013
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)12014 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
12015 {
12016 struct kvm_queued_exception *ex = &vcpu->arch.exception;
12017 struct kvm_run *kvm_run = vcpu->run;
12018 u64 sync_valid_fields;
12019 int r;
12020
12021 r = kvm_mmu_post_init_vm(vcpu->kvm);
12022 if (r)
12023 return r;
12024
12025 vcpu_load(vcpu);
12026 kvm_sigset_activate(vcpu);
12027 kvm_run->flags = 0;
12028 kvm_load_guest_fpu(vcpu);
12029
12030 kvm_vcpu_srcu_read_lock(vcpu);
12031 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
12032 if (!vcpu->wants_to_run) {
12033 r = -EINTR;
12034 goto out;
12035 }
12036
12037 /*
12038 * Don't bother switching APIC timer emulation from the
12039 * hypervisor timer to the software timer, the only way for the
12040 * APIC timer to be active is if userspace stuffed vCPU state,
12041 * i.e. put the vCPU into a nonsensical state. Only an INIT
12042 * will transition the vCPU out of UNINITIALIZED (without more
12043 * state stuffing from userspace), which will reset the local
12044 * APIC and thus cancel the timer or drop the IRQ (if the timer
12045 * already expired).
12046 */
12047 kvm_vcpu_srcu_read_unlock(vcpu);
12048 kvm_vcpu_block(vcpu);
12049 kvm_vcpu_srcu_read_lock(vcpu);
12050
12051 if (kvm_apic_accept_events(vcpu) < 0) {
12052 r = 0;
12053 goto out;
12054 }
12055 r = -EAGAIN;
12056 if (signal_pending(current)) {
12057 r = -EINTR;
12058 kvm_run->exit_reason = KVM_EXIT_INTR;
12059 ++vcpu->stat.signal_exits;
12060 }
12061 goto out;
12062 }
12063
12064 sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm);
12065 if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) ||
12066 (kvm_run->kvm_dirty_regs & ~sync_valid_fields)) {
12067 r = -EINVAL;
12068 goto out;
12069 }
12070
12071 if (kvm_run->kvm_dirty_regs) {
12072 r = sync_regs(vcpu);
12073 if (r != 0)
12074 goto out;
12075 }
12076
12077 /* re-sync apic's tpr */
12078 if (!lapic_in_kernel(vcpu)) {
12079 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
12080 r = -EINVAL;
12081 goto out;
12082 }
12083 }
12084
12085 /*
12086 * If userspace set a pending exception and L2 is active, convert it to
12087 * a pending VM-Exit if L1 wants to intercept the exception.
12088 */
12089 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) &&
12090 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector,
12091 ex->error_code)) {
12092 kvm_queue_exception_vmexit(vcpu, ex->vector,
12093 ex->has_error_code, ex->error_code,
12094 ex->has_payload, ex->payload);
12095 ex->injected = false;
12096 ex->pending = false;
12097 }
12098 vcpu->arch.exception_from_userspace = false;
12099
12100 if (unlikely(vcpu->arch.complete_userspace_io)) {
12101 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
12102 vcpu->arch.complete_userspace_io = NULL;
12103 r = cui(vcpu);
12104 if (r <= 0)
12105 goto out;
12106 } else {
12107 WARN_ON_ONCE(vcpu->arch.pio.count);
12108 WARN_ON_ONCE(vcpu->mmio_needed);
12109 }
12110
12111 if (!vcpu->wants_to_run) {
12112 r = -EINTR;
12113 goto out;
12114 }
12115
12116 r = kvm_x86_vcpu_pre_run(vcpu);
12117 if (r <= 0)
12118 goto out;
12119
12120 r = vcpu_run(vcpu);
12121
12122 out:
12123 kvm_put_guest_fpu(vcpu);
12124 if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected))
12125 store_regs(vcpu);
12126 post_kvm_run_save(vcpu);
12127 kvm_vcpu_srcu_read_unlock(vcpu);
12128
12129 kvm_sigset_deactivate(vcpu);
12130 vcpu_put(vcpu);
12131 return r;
12132 }
12133
__get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)12134 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
12135 {
12136 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
12137 /*
12138 * We are here if userspace calls get_regs() in the middle of
12139 * instruction emulation. Registers state needs to be copied
12140 * back from emulation context to vcpu. Userspace shouldn't do
12141 * that usually, but some bad designed PV devices (vmware
12142 * backdoor interface) need this to work
12143 */
12144 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt);
12145 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
12146 }
12147 regs->rax = kvm_rax_read(vcpu);
12148 regs->rbx = kvm_rbx_read(vcpu);
12149 regs->rcx = kvm_rcx_read(vcpu);
12150 regs->rdx = kvm_rdx_read(vcpu);
12151 regs->rsi = kvm_rsi_read(vcpu);
12152 regs->rdi = kvm_rdi_read(vcpu);
12153 regs->rsp = kvm_rsp_read(vcpu);
12154 regs->rbp = kvm_rbp_read(vcpu);
12155 #ifdef CONFIG_X86_64
12156 regs->r8 = kvm_r8_read(vcpu);
12157 regs->r9 = kvm_r9_read(vcpu);
12158 regs->r10 = kvm_r10_read(vcpu);
12159 regs->r11 = kvm_r11_read(vcpu);
12160 regs->r12 = kvm_r12_read(vcpu);
12161 regs->r13 = kvm_r13_read(vcpu);
12162 regs->r14 = kvm_r14_read(vcpu);
12163 regs->r15 = kvm_r15_read(vcpu);
12164 #endif
12165
12166 regs->rip = kvm_rip_read(vcpu);
12167 regs->rflags = kvm_get_rflags(vcpu);
12168 }
12169
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)12170 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
12171 {
12172 if (vcpu->kvm->arch.has_protected_state &&
12173 vcpu->arch.guest_state_protected)
12174 return -EINVAL;
12175
12176 vcpu_load(vcpu);
12177 __get_regs(vcpu, regs);
12178 vcpu_put(vcpu);
12179 return 0;
12180 }
12181
__set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)12182 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
12183 {
12184 vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
12185 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
12186
12187 kvm_rax_write(vcpu, regs->rax);
12188 kvm_rbx_write(vcpu, regs->rbx);
12189 kvm_rcx_write(vcpu, regs->rcx);
12190 kvm_rdx_write(vcpu, regs->rdx);
12191 kvm_rsi_write(vcpu, regs->rsi);
12192 kvm_rdi_write(vcpu, regs->rdi);
12193 kvm_rsp_write(vcpu, regs->rsp);
12194 kvm_rbp_write(vcpu, regs->rbp);
12195 #ifdef CONFIG_X86_64
12196 kvm_r8_write(vcpu, regs->r8);
12197 kvm_r9_write(vcpu, regs->r9);
12198 kvm_r10_write(vcpu, regs->r10);
12199 kvm_r11_write(vcpu, regs->r11);
12200 kvm_r12_write(vcpu, regs->r12);
12201 kvm_r13_write(vcpu, regs->r13);
12202 kvm_r14_write(vcpu, regs->r14);
12203 kvm_r15_write(vcpu, regs->r15);
12204 #endif
12205
12206 kvm_rip_write(vcpu, regs->rip);
12207 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
12208
12209 vcpu->arch.exception.pending = false;
12210 vcpu->arch.exception_vmexit.pending = false;
12211
12212 kvm_make_request(KVM_REQ_EVENT, vcpu);
12213 }
12214
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)12215 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
12216 {
12217 if (vcpu->kvm->arch.has_protected_state &&
12218 vcpu->arch.guest_state_protected)
12219 return -EINVAL;
12220
12221 vcpu_load(vcpu);
12222 __set_regs(vcpu, regs);
12223 vcpu_put(vcpu);
12224 return 0;
12225 }
12226
__get_sregs_common(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12227 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
12228 {
12229 struct desc_ptr dt;
12230
12231 if (vcpu->arch.guest_state_protected)
12232 goto skip_protected_regs;
12233
12234 kvm_handle_exception_payload_quirk(vcpu);
12235
12236 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
12237 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
12238 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
12239 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
12240 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
12241 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
12242
12243 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
12244 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
12245
12246 kvm_x86_call(get_idt)(vcpu, &dt);
12247 sregs->idt.limit = dt.size;
12248 sregs->idt.base = dt.address;
12249 kvm_x86_call(get_gdt)(vcpu, &dt);
12250 sregs->gdt.limit = dt.size;
12251 sregs->gdt.base = dt.address;
12252
12253 sregs->cr2 = vcpu->arch.cr2;
12254 sregs->cr3 = kvm_read_cr3(vcpu);
12255
12256 skip_protected_regs:
12257 sregs->cr0 = kvm_read_cr0(vcpu);
12258 sregs->cr4 = kvm_read_cr4(vcpu);
12259 sregs->cr8 = kvm_get_cr8(vcpu);
12260 sregs->efer = vcpu->arch.efer;
12261 sregs->apic_base = vcpu->arch.apic_base;
12262 }
12263
__get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12264 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
12265 {
12266 __get_sregs_common(vcpu, sregs);
12267
12268 if (vcpu->arch.guest_state_protected)
12269 return;
12270
12271 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
12272 set_bit(vcpu->arch.interrupt.nr,
12273 (unsigned long *)sregs->interrupt_bitmap);
12274 }
12275
__get_sregs2(struct kvm_vcpu * vcpu,struct kvm_sregs2 * sregs2)12276 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
12277 {
12278 int i;
12279
12280 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2);
12281
12282 if (vcpu->arch.guest_state_protected)
12283 return;
12284
12285 if (is_pae_paging(vcpu)) {
12286 kvm_vcpu_srcu_read_lock(vcpu);
12287 for (i = 0 ; i < 4 ; i++)
12288 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i);
12289 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
12290 kvm_vcpu_srcu_read_unlock(vcpu);
12291 }
12292 }
12293
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12294 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
12295 struct kvm_sregs *sregs)
12296 {
12297 if (vcpu->kvm->arch.has_protected_state &&
12298 vcpu->arch.guest_state_protected)
12299 return -EINVAL;
12300
12301 vcpu_load(vcpu);
12302 __get_sregs(vcpu, sregs);
12303 vcpu_put(vcpu);
12304 return 0;
12305 }
12306
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)12307 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
12308 struct kvm_mp_state *mp_state)
12309 {
12310 int r;
12311
12312 vcpu_load(vcpu);
12313 kvm_vcpu_srcu_read_lock(vcpu);
12314
12315 r = kvm_apic_accept_events(vcpu);
12316 if (r < 0)
12317 goto out;
12318 r = 0;
12319
12320 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
12321 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
12322 vcpu->arch.pv.pv_unhalted)
12323 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
12324 else
12325 mp_state->mp_state = vcpu->arch.mp_state;
12326
12327 out:
12328 kvm_vcpu_srcu_read_unlock(vcpu);
12329 vcpu_put(vcpu);
12330 return r;
12331 }
12332
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)12333 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
12334 struct kvm_mp_state *mp_state)
12335 {
12336 int ret = -EINVAL;
12337
12338 vcpu_load(vcpu);
12339
12340 switch (mp_state->mp_state) {
12341 case KVM_MP_STATE_UNINITIALIZED:
12342 case KVM_MP_STATE_HALTED:
12343 case KVM_MP_STATE_AP_RESET_HOLD:
12344 case KVM_MP_STATE_INIT_RECEIVED:
12345 case KVM_MP_STATE_SIPI_RECEIVED:
12346 if (!lapic_in_kernel(vcpu))
12347 goto out;
12348 break;
12349
12350 case KVM_MP_STATE_RUNNABLE:
12351 break;
12352
12353 default:
12354 goto out;
12355 }
12356
12357 /*
12358 * SIPI_RECEIVED is obsolete and no longer used internally; KVM instead
12359 * leaves the vCPU in INIT_RECIEVED (Wait-For-SIPI) and pends the SIPI.
12360 * Translate SIPI_RECEIVED as appropriate for backwards compatibility.
12361 */
12362 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
12363 mp_state->mp_state = KVM_MP_STATE_INIT_RECEIVED;
12364 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
12365 }
12366
12367 kvm_set_mp_state(vcpu, mp_state->mp_state);
12368 kvm_make_request(KVM_REQ_EVENT, vcpu);
12369
12370 ret = 0;
12371 out:
12372 vcpu_put(vcpu);
12373 return ret;
12374 }
12375
kvm_task_switch(struct kvm_vcpu * vcpu,u16 tss_selector,int idt_index,int reason,bool has_error_code,u32 error_code)12376 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
12377 int reason, bool has_error_code, u32 error_code)
12378 {
12379 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
12380 int ret;
12381
12382 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_CET)) {
12383 u64 u_cet, s_cet;
12384
12385 /*
12386 * Check both User and Supervisor on task switches as inter-
12387 * privilege level task switches are impacted by CET at both
12388 * the current privilege level and the new privilege level, and
12389 * that information is not known at this time. The expectation
12390 * is that the guest won't require emulation of task switches
12391 * while using IBT or Shadow Stacks.
12392 */
12393 if (__kvm_emulate_msr_read(vcpu, MSR_IA32_U_CET, &u_cet) ||
12394 __kvm_emulate_msr_read(vcpu, MSR_IA32_S_CET, &s_cet))
12395 goto unhandled_task_switch;
12396
12397 if ((u_cet | s_cet) & (CET_ENDBR_EN | CET_SHSTK_EN))
12398 goto unhandled_task_switch;
12399 }
12400
12401 init_emulate_ctxt(vcpu);
12402
12403 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
12404 has_error_code, error_code);
12405
12406 /*
12407 * Report an error userspace if MMIO is needed, as KVM doesn't support
12408 * MMIO during a task switch (or any other complex operation).
12409 */
12410 if (ret || vcpu->mmio_needed)
12411 goto unhandled_task_switch;
12412
12413 kvm_rip_write(vcpu, ctxt->eip);
12414 kvm_set_rflags(vcpu, ctxt->eflags);
12415 return 1;
12416
12417 unhandled_task_switch:
12418 vcpu->mmio_needed = false;
12419 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
12420 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
12421 vcpu->run->internal.ndata = 0;
12422 return 0;
12423 }
12424 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_task_switch);
12425
kvm_is_valid_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12426 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
12427 {
12428 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
12429 /*
12430 * When EFER.LME and CR0.PG are set, the processor is in
12431 * 64-bit mode (though maybe in a 32-bit code segment).
12432 * CR4.PAE and EFER.LMA must be set.
12433 */
12434 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
12435 return false;
12436 if (!kvm_vcpu_is_legal_cr3(vcpu, sregs->cr3))
12437 return false;
12438 } else {
12439 /*
12440 * Not in 64-bit mode: EFER.LMA is clear and the code
12441 * segment cannot be 64-bit.
12442 */
12443 if (sregs->efer & EFER_LMA || sregs->cs.l)
12444 return false;
12445 }
12446
12447 return kvm_is_valid_cr4(vcpu, sregs->cr4) &&
12448 kvm_is_valid_cr0(vcpu, sregs->cr0);
12449 }
12450
__set_sregs_common(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs,int * mmu_reset_needed,bool update_pdptrs)12451 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
12452 int *mmu_reset_needed, bool update_pdptrs)
12453 {
12454 int idx;
12455 struct desc_ptr dt;
12456
12457 if (!kvm_is_valid_sregs(vcpu, sregs))
12458 return -EINVAL;
12459
12460 if (kvm_apic_set_base(vcpu, sregs->apic_base, true))
12461 return -EINVAL;
12462
12463 if (vcpu->arch.guest_state_protected)
12464 return 0;
12465
12466 dt.size = sregs->idt.limit;
12467 dt.address = sregs->idt.base;
12468 kvm_x86_call(set_idt)(vcpu, &dt);
12469 dt.size = sregs->gdt.limit;
12470 dt.address = sregs->gdt.base;
12471 kvm_x86_call(set_gdt)(vcpu, &dt);
12472
12473 vcpu->arch.cr2 = sregs->cr2;
12474 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
12475 vcpu->arch.cr3 = sregs->cr3;
12476 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
12477 kvm_x86_call(post_set_cr3)(vcpu, sregs->cr3);
12478
12479 kvm_set_cr8(vcpu, sregs->cr8);
12480
12481 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
12482 kvm_x86_call(set_efer)(vcpu, sregs->efer);
12483
12484 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
12485 kvm_x86_call(set_cr0)(vcpu, sregs->cr0);
12486
12487 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
12488 kvm_x86_call(set_cr4)(vcpu, sregs->cr4);
12489
12490 if (update_pdptrs) {
12491 idx = srcu_read_lock(&vcpu->kvm->srcu);
12492 if (is_pae_paging(vcpu)) {
12493 load_pdptrs(vcpu, kvm_read_cr3(vcpu));
12494 *mmu_reset_needed = 1;
12495 }
12496 srcu_read_unlock(&vcpu->kvm->srcu, idx);
12497 }
12498
12499 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
12500 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
12501 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
12502 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
12503 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
12504 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
12505
12506 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
12507 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
12508
12509 update_cr8_intercept(vcpu);
12510
12511 /* Older userspace won't unhalt the vcpu on reset. */
12512 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
12513 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
12514 !is_protmode(vcpu))
12515 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
12516
12517 return 0;
12518 }
12519
__set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12520 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
12521 {
12522 int pending_vec, max_bits;
12523 int mmu_reset_needed = 0;
12524 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true);
12525
12526 if (ret)
12527 return ret;
12528
12529 if (mmu_reset_needed) {
12530 kvm_mmu_reset_context(vcpu);
12531 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
12532 }
12533
12534 max_bits = KVM_NR_INTERRUPTS;
12535 pending_vec = find_first_bit(
12536 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
12537
12538 if (pending_vec < max_bits) {
12539 kvm_queue_interrupt(vcpu, pending_vec, false);
12540 pr_debug("Set back pending irq %d\n", pending_vec);
12541 kvm_make_request(KVM_REQ_EVENT, vcpu);
12542 }
12543 return 0;
12544 }
12545
__set_sregs2(struct kvm_vcpu * vcpu,struct kvm_sregs2 * sregs2)12546 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
12547 {
12548 int mmu_reset_needed = 0;
12549 bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
12550 bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) &&
12551 !(sregs2->efer & EFER_LMA);
12552 int i, ret;
12553
12554 if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID)
12555 return -EINVAL;
12556
12557 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected))
12558 return -EINVAL;
12559
12560 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2,
12561 &mmu_reset_needed, !valid_pdptrs);
12562 if (ret)
12563 return ret;
12564
12565 if (valid_pdptrs) {
12566 for (i = 0; i < 4 ; i++)
12567 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]);
12568
12569 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
12570 mmu_reset_needed = 1;
12571 vcpu->arch.pdptrs_from_userspace = true;
12572 }
12573 if (mmu_reset_needed) {
12574 kvm_mmu_reset_context(vcpu);
12575 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
12576 }
12577 return 0;
12578 }
12579
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)12580 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
12581 struct kvm_sregs *sregs)
12582 {
12583 int ret;
12584
12585 if (vcpu->kvm->arch.has_protected_state &&
12586 vcpu->arch.guest_state_protected)
12587 return -EINVAL;
12588
12589 vcpu_load(vcpu);
12590 ret = __set_sregs(vcpu, sregs);
12591 vcpu_put(vcpu);
12592 return ret;
12593 }
12594
kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm * kvm)12595 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
12596 {
12597 bool set = false;
12598 struct kvm_vcpu *vcpu;
12599 unsigned long i;
12600
12601 if (!enable_apicv)
12602 return;
12603
12604 down_write(&kvm->arch.apicv_update_lock);
12605
12606 kvm_for_each_vcpu(i, vcpu, kvm) {
12607 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) {
12608 set = true;
12609 break;
12610 }
12611 }
12612 __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set);
12613 up_write(&kvm->arch.apicv_update_lock);
12614 }
12615
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)12616 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
12617 struct kvm_guest_debug *dbg)
12618 {
12619 unsigned long rflags;
12620 int i, r;
12621
12622 if (vcpu->arch.guest_state_protected)
12623 return -EINVAL;
12624
12625 vcpu_load(vcpu);
12626
12627 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
12628 r = -EBUSY;
12629 if (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected)
12630 goto out;
12631 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
12632 kvm_queue_exception(vcpu, DB_VECTOR);
12633 else
12634 kvm_queue_exception(vcpu, BP_VECTOR);
12635 }
12636
12637 /*
12638 * Read rflags as long as potentially injected trace flags are still
12639 * filtered out.
12640 */
12641 rflags = kvm_get_rflags(vcpu);
12642
12643 vcpu->guest_debug = dbg->control;
12644 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
12645 vcpu->guest_debug = 0;
12646
12647 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
12648 for (i = 0; i < KVM_NR_DB_REGS; ++i)
12649 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
12650 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
12651 } else {
12652 for (i = 0; i < KVM_NR_DB_REGS; i++)
12653 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
12654 }
12655 kvm_update_dr7(vcpu);
12656
12657 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
12658 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
12659
12660 /*
12661 * Trigger an rflags update that will inject or remove the trace
12662 * flags.
12663 */
12664 kvm_set_rflags(vcpu, rflags);
12665
12666 kvm_x86_call(update_exception_bitmap)(vcpu);
12667
12668 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm);
12669
12670 r = 0;
12671
12672 out:
12673 vcpu_put(vcpu);
12674 return r;
12675 }
12676
12677 /*
12678 * Translate a guest virtual address to a guest physical address.
12679 */
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)12680 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
12681 struct kvm_translation *tr)
12682 {
12683 unsigned long vaddr = tr->linear_address;
12684 gpa_t gpa;
12685 int idx;
12686
12687 vcpu_load(vcpu);
12688
12689 idx = srcu_read_lock(&vcpu->kvm->srcu);
12690 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
12691 srcu_read_unlock(&vcpu->kvm->srcu, idx);
12692 tr->physical_address = gpa;
12693 tr->valid = gpa != INVALID_GPA;
12694 tr->writeable = 1;
12695 tr->usermode = 0;
12696
12697 vcpu_put(vcpu);
12698 return 0;
12699 }
12700
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)12701 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
12702 {
12703 struct fxregs_state *fxsave;
12704
12705 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
12706 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
12707
12708 vcpu_load(vcpu);
12709
12710 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
12711 memcpy(fpu->fpr, fxsave->st_space, 128);
12712 fpu->fcw = fxsave->cwd;
12713 fpu->fsw = fxsave->swd;
12714 fpu->ftwx = fxsave->twd;
12715 fpu->last_opcode = fxsave->fop;
12716 fpu->last_ip = fxsave->rip;
12717 fpu->last_dp = fxsave->rdp;
12718 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space));
12719
12720 vcpu_put(vcpu);
12721 return 0;
12722 }
12723
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)12724 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
12725 {
12726 struct fxregs_state *fxsave;
12727
12728 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
12729 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
12730
12731 vcpu_load(vcpu);
12732
12733 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
12734
12735 memcpy(fxsave->st_space, fpu->fpr, 128);
12736 fxsave->cwd = fpu->fcw;
12737 fxsave->swd = fpu->fsw;
12738 fxsave->twd = fpu->ftwx;
12739 fxsave->fop = fpu->last_opcode;
12740 fxsave->rip = fpu->last_ip;
12741 fxsave->rdp = fpu->last_dp;
12742 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space));
12743
12744 vcpu_put(vcpu);
12745 return 0;
12746 }
12747
store_regs(struct kvm_vcpu * vcpu)12748 static void store_regs(struct kvm_vcpu *vcpu)
12749 {
12750 BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES);
12751
12752 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
12753 __get_regs(vcpu, &vcpu->run->s.regs.regs);
12754
12755 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
12756 __get_sregs(vcpu, &vcpu->run->s.regs.sregs);
12757
12758 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
12759 kvm_vcpu_ioctl_x86_get_vcpu_events(
12760 vcpu, &vcpu->run->s.regs.events);
12761 }
12762
sync_regs(struct kvm_vcpu * vcpu)12763 static int sync_regs(struct kvm_vcpu *vcpu)
12764 {
12765 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
12766 __set_regs(vcpu, &vcpu->run->s.regs.regs);
12767 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
12768 }
12769
12770 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
12771 struct kvm_sregs sregs = vcpu->run->s.regs.sregs;
12772
12773 if (__set_sregs(vcpu, &sregs))
12774 return -EINVAL;
12775
12776 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
12777 }
12778
12779 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
12780 struct kvm_vcpu_events events = vcpu->run->s.regs.events;
12781
12782 if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events))
12783 return -EINVAL;
12784
12785 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
12786 }
12787
12788 return 0;
12789 }
12790
12791 #define PERF_MEDIATED_PMU_MSG \
12792 "Failed to enable mediated vPMU, try disabling system wide perf events and nmi_watchdog.\n"
12793
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)12794 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
12795 {
12796 int r;
12797
12798 if (kvm_check_tsc_unstable() && kvm->created_vcpus)
12799 pr_warn_once("SMP vm created on host with unstable TSC; "
12800 "guest TSC will not be reliable\n");
12801
12802 if (!kvm->arch.max_vcpu_ids)
12803 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS;
12804
12805 if (id >= kvm->arch.max_vcpu_ids)
12806 return -EINVAL;
12807
12808 /*
12809 * Note, any actions done by .vcpu_create() must be idempotent with
12810 * respect to creating multiple vCPUs, and therefore are not undone if
12811 * creating a vCPU fails (including failure during pre-create).
12812 */
12813 r = kvm_x86_call(vcpu_precreate)(kvm);
12814 if (r)
12815 return r;
12816
12817 if (enable_mediated_pmu && kvm->arch.enable_pmu &&
12818 !kvm->arch.created_mediated_pmu) {
12819 if (irqchip_in_kernel(kvm)) {
12820 r = perf_create_mediated_pmu();
12821 if (r) {
12822 pr_warn_ratelimited(PERF_MEDIATED_PMU_MSG);
12823 return r;
12824 }
12825 kvm->arch.created_mediated_pmu = true;
12826 } else {
12827 kvm->arch.enable_pmu = false;
12828 }
12829 }
12830 return 0;
12831 }
12832
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)12833 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
12834 {
12835 struct page *page;
12836 int r;
12837
12838 vcpu->arch.last_vmentry_cpu = -1;
12839 vcpu->arch.regs_avail = ~0;
12840 vcpu->arch.regs_dirty = ~0;
12841
12842 kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
12843
12844 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
12845 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
12846 else
12847 kvm_set_mp_state(vcpu, KVM_MP_STATE_UNINITIALIZED);
12848
12849 r = kvm_mmu_create(vcpu);
12850 if (r < 0)
12851 return r;
12852
12853 r = kvm_create_lapic(vcpu);
12854 if (r < 0)
12855 goto fail_mmu_destroy;
12856
12857 r = -ENOMEM;
12858
12859 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
12860 if (!page)
12861 goto fail_free_lapic;
12862 vcpu->arch.pio_data = page_address(page);
12863
12864 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64),
12865 GFP_KERNEL_ACCOUNT);
12866 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64),
12867 GFP_KERNEL_ACCOUNT);
12868 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks)
12869 goto fail_free_mce_banks;
12870 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
12871
12872 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
12873 GFP_KERNEL_ACCOUNT))
12874 goto fail_free_mce_banks;
12875
12876 if (!alloc_emulate_ctxt(vcpu))
12877 goto free_wbinvd_dirty_mask;
12878
12879 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) {
12880 pr_err("failed to allocate vcpu's fpu\n");
12881 goto free_emulate_ctxt;
12882 }
12883
12884 kvm_async_pf_hash_reset(vcpu);
12885
12886 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) {
12887 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
12888 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
12889 vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
12890 }
12891 kvm_pmu_init(vcpu);
12892
12893 vcpu->arch.pending_external_vector = -1;
12894 vcpu->arch.preempted_in_kernel = false;
12895
12896 #if IS_ENABLED(CONFIG_HYPERV)
12897 vcpu->arch.hv_root_tdp = INVALID_PAGE;
12898 #endif
12899
12900 r = kvm_x86_call(vcpu_create)(vcpu);
12901 if (r)
12902 goto free_guest_fpu;
12903
12904 kvm_xen_init_vcpu(vcpu);
12905 vcpu_load(vcpu);
12906 kvm_vcpu_after_set_cpuid(vcpu);
12907 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
12908 kvm_vcpu_reset(vcpu, false);
12909 kvm_init_mmu(vcpu);
12910 vcpu_put(vcpu);
12911 return 0;
12912
12913 free_guest_fpu:
12914 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
12915 free_emulate_ctxt:
12916 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
12917 free_wbinvd_dirty_mask:
12918 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
12919 fail_free_mce_banks:
12920 kfree(vcpu->arch.mce_banks);
12921 kfree(vcpu->arch.mci_ctl2_banks);
12922 free_page((unsigned long)vcpu->arch.pio_data);
12923 fail_free_lapic:
12924 kvm_free_lapic(vcpu);
12925 fail_mmu_destroy:
12926 kvm_mmu_destroy(vcpu);
12927 return r;
12928 }
12929
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)12930 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
12931 {
12932 if (mutex_lock_killable(&vcpu->mutex))
12933 return;
12934 vcpu_load(vcpu);
12935 kvm_synchronize_tsc(vcpu, NULL);
12936 vcpu_put(vcpu);
12937
12938 /* poll control enabled by default */
12939 vcpu->arch.msr_kvm_poll_control = 1;
12940
12941 mutex_unlock(&vcpu->mutex);
12942 }
12943
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)12944 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
12945 {
12946 int idx, cpu;
12947
12948 kvm_clear_async_pf_completion_queue(vcpu);
12949 kvm_mmu_unload(vcpu);
12950
12951 kvmclock_reset(vcpu);
12952
12953 for_each_possible_cpu(cpu)
12954 cmpxchg(per_cpu_ptr(&last_vcpu, cpu), vcpu, NULL);
12955
12956 kvm_x86_call(vcpu_free)(vcpu);
12957
12958 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
12959 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
12960 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
12961
12962 kvm_xen_destroy_vcpu(vcpu);
12963 kvm_hv_vcpu_uninit(vcpu);
12964 kvm_pmu_destroy(vcpu);
12965 kfree(vcpu->arch.mce_banks);
12966 kfree(vcpu->arch.mci_ctl2_banks);
12967 kvm_free_lapic(vcpu);
12968 idx = srcu_read_lock(&vcpu->kvm->srcu);
12969 kvm_mmu_destroy(vcpu);
12970 srcu_read_unlock(&vcpu->kvm->srcu, idx);
12971 free_page((unsigned long)vcpu->arch.pio_data);
12972 kvfree(vcpu->arch.cpuid_entries);
12973 }
12974
kvm_xstate_reset(struct kvm_vcpu * vcpu,bool init_event)12975 static void kvm_xstate_reset(struct kvm_vcpu *vcpu, bool init_event)
12976 {
12977 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
12978 u64 xfeatures_mask;
12979 bool fpu_in_use;
12980 int i;
12981
12982 /*
12983 * Guest FPU state is zero allocated and so doesn't need to be manually
12984 * cleared on RESET, i.e. during vCPU creation.
12985 */
12986 if (!init_event || !fpstate)
12987 return;
12988
12989 /*
12990 * On INIT, only select XSTATE components are zeroed, most components
12991 * are unchanged. Currently, the only components that are zeroed and
12992 * supported by KVM are MPX and CET related.
12993 */
12994 xfeatures_mask = (kvm_caps.supported_xcr0 | kvm_caps.supported_xss) &
12995 (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR |
12996 XFEATURE_MASK_CET_ALL);
12997 if (!xfeatures_mask)
12998 return;
12999
13000 BUILD_BUG_ON(sizeof(xfeatures_mask) * BITS_PER_BYTE <= XFEATURE_MAX);
13001
13002 /*
13003 * Unload guest FPU state (if necessary) before zeroing XSTATE fields
13004 * as the kernel can only modify the state when its resident in memory,
13005 * i.e. when it's not loaded into hardware.
13006 *
13007 * WARN if the vCPU's desire to run, i.e. whether or not its in KVM_RUN,
13008 * doesn't match the loaded/in-use state of the FPU, as KVM_RUN is the
13009 * only path that can trigger INIT emulation _and_ loads FPU state, and
13010 * KVM_RUN should _always_ load FPU state.
13011 */
13012 WARN_ON_ONCE(vcpu->wants_to_run != fpstate->in_use);
13013 fpu_in_use = fpstate->in_use;
13014 if (fpu_in_use)
13015 kvm_put_guest_fpu(vcpu);
13016 for_each_set_bit(i, (unsigned long *)&xfeatures_mask, XFEATURE_MAX)
13017 fpstate_clear_xstate_component(fpstate, i);
13018 if (fpu_in_use)
13019 kvm_load_guest_fpu(vcpu);
13020 }
13021
kvm_vcpu_reset(struct kvm_vcpu * vcpu,bool init_event)13022 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
13023 {
13024 struct kvm_cpuid_entry2 *cpuid_0x1;
13025 unsigned long old_cr0 = kvm_read_cr0(vcpu);
13026 unsigned long new_cr0;
13027
13028 /*
13029 * Several of the "set" flows, e.g. ->set_cr0(), read other registers
13030 * to handle side effects. RESET emulation hits those flows and relies
13031 * on emulated/virtualized registers, including those that are loaded
13032 * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel
13033 * to detect improper or missing initialization.
13034 */
13035 WARN_ON_ONCE(!init_event &&
13036 (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu)));
13037
13038 /*
13039 * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's
13040 * possible to INIT the vCPU while L2 is active. Force the vCPU back
13041 * into L1 as EFER.SVME is cleared on INIT (along with all other EFER
13042 * bits), i.e. virtualization is disabled.
13043 */
13044 if (is_guest_mode(vcpu))
13045 kvm_leave_nested(vcpu);
13046
13047 kvm_lapic_reset(vcpu, init_event);
13048
13049 WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu));
13050 vcpu->arch.hflags = 0;
13051
13052 vcpu->arch.smi_pending = 0;
13053 vcpu->arch.smi_count = 0;
13054 atomic_set(&vcpu->arch.nmi_queued, 0);
13055 vcpu->arch.nmi_pending = 0;
13056 vcpu->arch.nmi_injected = false;
13057 kvm_clear_interrupt_queue(vcpu);
13058 kvm_clear_exception_queue(vcpu);
13059
13060 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
13061 kvm_update_dr0123(vcpu);
13062 vcpu->arch.dr6 = DR6_ACTIVE_LOW;
13063 vcpu->arch.dr7 = DR7_FIXED_1;
13064 kvm_update_dr7(vcpu);
13065
13066 vcpu->arch.cr2 = 0;
13067
13068 kvm_make_request(KVM_REQ_EVENT, vcpu);
13069 vcpu->arch.apf.msr_en_val = 0;
13070 vcpu->arch.apf.msr_int_val = 0;
13071 vcpu->arch.st.msr_val = 0;
13072
13073 kvmclock_reset(vcpu);
13074
13075 kvm_clear_async_pf_completion_queue(vcpu);
13076 kvm_async_pf_hash_reset(vcpu);
13077 vcpu->arch.apf.halted = false;
13078
13079 kvm_xstate_reset(vcpu, init_event);
13080
13081 if (!init_event) {
13082 vcpu->arch.smbase = 0x30000;
13083
13084 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
13085
13086 vcpu->arch.msr_misc_features_enables = 0;
13087 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |
13088 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
13089
13090 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP);
13091 kvm_msr_write(vcpu, MSR_IA32_XSS, 0);
13092 }
13093
13094 /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */
13095 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
13096 kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP);
13097
13098 /*
13099 * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon)
13100 * if no CPUID match is found. Note, it's impossible to get a match at
13101 * RESET since KVM emulates RESET before exposing the vCPU to userspace,
13102 * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry
13103 * on RESET. But, go through the motions in case that's ever remedied.
13104 */
13105 cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1);
13106 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600);
13107
13108 kvm_x86_call(vcpu_reset)(vcpu, init_event);
13109
13110 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
13111 kvm_rip_write(vcpu, 0xfff0);
13112
13113 vcpu->arch.cr3 = 0;
13114 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
13115
13116 /*
13117 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions
13118 * of Intel's SDM list CD/NW as being set on INIT, but they contradict
13119 * (or qualify) that with a footnote stating that CD/NW are preserved.
13120 */
13121 new_cr0 = X86_CR0_ET;
13122 if (init_event)
13123 new_cr0 |= (old_cr0 & (X86_CR0_NW | X86_CR0_CD));
13124 else
13125 new_cr0 |= X86_CR0_NW | X86_CR0_CD;
13126
13127 kvm_x86_call(set_cr0)(vcpu, new_cr0);
13128 kvm_x86_call(set_cr4)(vcpu, 0);
13129 kvm_x86_call(set_efer)(vcpu, 0);
13130 kvm_x86_call(update_exception_bitmap)(vcpu);
13131
13132 /*
13133 * On the standard CR0/CR4/EFER modification paths, there are several
13134 * complex conditions determining whether the MMU has to be reset and/or
13135 * which PCIDs have to be flushed. However, CR0.WP and the paging-related
13136 * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush
13137 * is needed anyway if CR0.PG was '1' (which can only happen for INIT, as
13138 * CR0 will be '0' prior to RESET). So we only need to check CR0.PG here.
13139 */
13140 if (old_cr0 & X86_CR0_PG) {
13141 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
13142 kvm_mmu_reset_context(vcpu);
13143 }
13144
13145 /*
13146 * Intel's SDM states that all TLB entries are flushed on INIT. AMD's
13147 * APM states the TLBs are untouched by INIT, but it also states that
13148 * the TLBs are flushed on "External initialization of the processor."
13149 * Flush the guest TLB regardless of vendor, there is no meaningful
13150 * benefit in relying on the guest to flush the TLB immediately after
13151 * INIT. A spurious TLB flush is benign and likely negligible from a
13152 * performance perspective.
13153 */
13154 if (init_event)
13155 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
13156 }
13157 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_reset);
13158
kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu * vcpu,u8 vector)13159 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
13160 {
13161 struct kvm_segment cs;
13162
13163 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
13164 cs.selector = vector << 8;
13165 cs.base = vector << 12;
13166 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
13167 kvm_rip_write(vcpu, 0);
13168 }
13169 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_deliver_sipi_vector);
13170
kvm_arch_enable_virtualization(void)13171 void kvm_arch_enable_virtualization(void)
13172 {
13173 x86_virt_register_emergency_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
13174 }
13175
kvm_arch_disable_virtualization(void)13176 void kvm_arch_disable_virtualization(void)
13177 {
13178 x86_virt_unregister_emergency_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
13179 }
13180
kvm_arch_enable_virtualization_cpu(void)13181 int kvm_arch_enable_virtualization_cpu(void)
13182 {
13183 struct kvm *kvm;
13184 struct kvm_vcpu *vcpu;
13185 unsigned long i;
13186 int ret;
13187 u64 local_tsc;
13188 u64 max_tsc = 0;
13189 bool stable, backwards_tsc = false;
13190
13191 kvm_user_return_msr_cpu_online();
13192
13193 ret = kvm_x86_check_processor_compatibility();
13194 if (ret)
13195 return ret;
13196
13197 ret = kvm_x86_call(enable_virtualization_cpu)();
13198 if (ret != 0)
13199 return ret;
13200
13201 local_tsc = rdtsc();
13202 stable = !kvm_check_tsc_unstable();
13203 list_for_each_entry(kvm, &vm_list, vm_list) {
13204 kvm_for_each_vcpu(i, vcpu, kvm) {
13205 if (!stable && vcpu->cpu == smp_processor_id())
13206 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
13207 if (stable && vcpu->arch.last_host_tsc > local_tsc) {
13208 backwards_tsc = true;
13209 if (vcpu->arch.last_host_tsc > max_tsc)
13210 max_tsc = vcpu->arch.last_host_tsc;
13211 }
13212 }
13213 }
13214
13215 /*
13216 * Sometimes, even reliable TSCs go backwards. This happens on
13217 * platforms that reset TSC during suspend or hibernate actions, but
13218 * maintain synchronization. We must compensate. Fortunately, we can
13219 * detect that condition here, which happens early in CPU bringup,
13220 * before any KVM threads can be running. Unfortunately, we can't
13221 * bring the TSCs fully up to date with real time, as we aren't yet far
13222 * enough into CPU bringup that we know how much real time has actually
13223 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot
13224 * variables that haven't been updated yet.
13225 *
13226 * So we simply find the maximum observed TSC above, then record the
13227 * adjustment to TSC in each VCPU. When the VCPU later gets loaded,
13228 * the adjustment will be applied. Note that we accumulate
13229 * adjustments, in case multiple suspend cycles happen before some VCPU
13230 * gets a chance to run again. In the event that no KVM threads get a
13231 * chance to run, we will miss the entire elapsed period, as we'll have
13232 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
13233 * loose cycle time. This isn't too big a deal, since the loss will be
13234 * uniform across all VCPUs (not to mention the scenario is extremely
13235 * unlikely). It is possible that a second hibernate recovery happens
13236 * much faster than a first, causing the observed TSC here to be
13237 * smaller; this would require additional padding adjustment, which is
13238 * why we set last_host_tsc to the local tsc observed here.
13239 *
13240 * N.B. - this code below runs only on platforms with reliable TSC,
13241 * as that is the only way backwards_tsc is set above. Also note
13242 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
13243 * have the same delta_cyc adjustment applied if backwards_tsc
13244 * is detected. Note further, this adjustment is only done once,
13245 * as we reset last_host_tsc on all VCPUs to stop this from being
13246 * called multiple times (one for each physical CPU bringup).
13247 *
13248 * Platforms with unreliable TSCs don't have to deal with this, they
13249 * will be compensated by the logic in vcpu_load, which sets the TSC to
13250 * catchup mode. This will catchup all VCPUs to real time, but cannot
13251 * guarantee that they stay in perfect synchronization.
13252 */
13253 if (backwards_tsc) {
13254 u64 delta_cyc = max_tsc - local_tsc;
13255 list_for_each_entry(kvm, &vm_list, vm_list) {
13256 kvm->arch.backwards_tsc_observed = true;
13257 kvm_for_each_vcpu(i, vcpu, kvm) {
13258 vcpu->arch.tsc_offset_adjustment += delta_cyc;
13259 vcpu->arch.last_host_tsc = local_tsc;
13260 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
13261 }
13262
13263 /*
13264 * We have to disable TSC offset matching.. if you were
13265 * booting a VM while issuing an S4 host suspend....
13266 * you may have some problem. Solving this issue is
13267 * left as an exercise to the reader.
13268 */
13269 kvm->arch.last_tsc_nsec = 0;
13270 kvm->arch.last_tsc_write = 0;
13271 }
13272
13273 }
13274 return 0;
13275 }
13276
kvm_arch_shutdown(void)13277 void kvm_arch_shutdown(void)
13278 {
13279 /*
13280 * Set virt_rebooting to indicate that KVM has asynchronously disabled
13281 * hardware virtualization, i.e. that errors and/or exceptions on SVM
13282 * and VMX instructions are expected and should be ignored.
13283 */
13284 virt_rebooting = true;
13285
13286 /*
13287 * Ensure virt_rebooting is visible before IPIs are sent to other CPUs
13288 * to disable virtualization. Effectively pairs with the reception of
13289 * the IPI (virt_rebooting is read in task/exception context, but only
13290 * _needs_ to be read as %true after the IPI function callback disables
13291 * virtualization).
13292 */
13293 smp_wmb();
13294 }
13295
kvm_arch_disable_virtualization_cpu(void)13296 void kvm_arch_disable_virtualization_cpu(void)
13297 {
13298 kvm_x86_call(disable_virtualization_cpu)();
13299
13300 /*
13301 * Leave the user-return notifiers as-is when disabling virtualization
13302 * for reboot, i.e. when disabling via IPI function call, and instead
13303 * pin kvm.ko (if it's a module) to defend against use-after-free (in
13304 * the *very* unlikely scenario module unload is racing with reboot).
13305 * On a forced reboot, tasks aren't frozen before shutdown, and so KVM
13306 * could be actively modifying user-return MSR state when the IPI to
13307 * disable virtualization arrives. Handle the extreme edge case here
13308 * instead of trying to account for it in the normal flows.
13309 */
13310 if (in_task() || WARN_ON_ONCE(!virt_rebooting))
13311 drop_user_return_notifiers();
13312 else
13313 __module_get(THIS_MODULE);
13314 }
13315
kvm_vcpu_is_reset_bsp(struct kvm_vcpu * vcpu)13316 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
13317 {
13318 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
13319 }
13320 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_is_reset_bsp);
13321
kvm_vcpu_is_bsp(struct kvm_vcpu * vcpu)13322 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
13323 {
13324 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
13325 }
13326
kvm_arch_free_vm(struct kvm * kvm)13327 void kvm_arch_free_vm(struct kvm *kvm)
13328 {
13329 #if IS_ENABLED(CONFIG_HYPERV)
13330 kfree(kvm->arch.hv_pa_pg);
13331 #endif
13332 __kvm_arch_free_vm(kvm);
13333 }
13334
13335
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)13336 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
13337 {
13338 int ret;
13339 unsigned long flags;
13340
13341 if (!kvm_is_vm_type_supported(type))
13342 return -EINVAL;
13343
13344 kvm->arch.vm_type = type;
13345 kvm->arch.has_private_mem =
13346 (type == KVM_X86_SW_PROTECTED_VM);
13347 /* Decided by the vendor code for other VM types. */
13348 kvm->arch.pre_fault_allowed =
13349 type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM;
13350 kvm->arch.disabled_quirks = kvm_caps.inapplicable_quirks & kvm_caps.supported_quirks;
13351
13352 ret = kvm_page_track_init(kvm);
13353 if (ret)
13354 goto out;
13355
13356 ret = kvm_mmu_init_vm(kvm);
13357 if (ret)
13358 goto out_cleanup_page_track;
13359
13360 ret = kvm_x86_call(vm_init)(kvm);
13361 if (ret)
13362 goto out_uninit_mmu;
13363
13364 atomic_set(&kvm->arch.noncoherent_dma_count, 0);
13365
13366 raw_spin_lock_init(&kvm->arch.tsc_write_lock);
13367 mutex_init(&kvm->arch.apic_map_lock);
13368 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock);
13369 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
13370
13371 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
13372 pvclock_update_vm_gtod_copy(kvm);
13373 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
13374
13375 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz;
13376 kvm->arch.apic_bus_cycle_ns = APIC_BUS_CYCLE_NS_DEFAULT;
13377 kvm->arch.guest_can_read_msr_platform_info = true;
13378 kvm->arch.enable_pmu = enable_pmu;
13379
13380 #if IS_ENABLED(CONFIG_HYPERV)
13381 spin_lock_init(&kvm->arch.hv_root_tdp_lock);
13382 kvm->arch.hv_root_tdp = INVALID_PAGE;
13383 #endif
13384
13385 kvm_apicv_init(kvm);
13386 kvm_hv_init_vm(kvm);
13387 kvm_xen_init_vm(kvm);
13388
13389 if (ignore_msrs && !report_ignored_msrs) {
13390 pr_warn_once("Running KVM with ignore_msrs=1 and report_ignored_msrs=0 is not a\n"
13391 "a supported configuration. Lying to the guest about the existence of MSRs\n"
13392 "may cause the guest operating system to hang or produce errors. If a guest\n"
13393 "does not run without ignore_msrs=1, please report it to kvm@vger.kernel.org.\n");
13394 }
13395
13396 once_init(&kvm->arch.nx_once);
13397 return 0;
13398
13399 out_uninit_mmu:
13400 kvm_mmu_uninit_vm(kvm);
13401 out_cleanup_page_track:
13402 kvm_page_track_cleanup(kvm);
13403 out:
13404 return ret;
13405 }
13406
13407 /**
13408 * __x86_set_memory_region: Setup KVM internal memory slot
13409 *
13410 * @kvm: the kvm pointer to the VM.
13411 * @id: the slot ID to setup.
13412 * @gpa: the GPA to install the slot (unused when @size == 0).
13413 * @size: the size of the slot. Set to zero to uninstall a slot.
13414 *
13415 * This function helps to setup a KVM internal memory slot. Specify
13416 * @size > 0 to install a new slot, while @size == 0 to uninstall a
13417 * slot. The return code can be one of the following:
13418 *
13419 * HVA: on success (uninstall will return a bogus HVA)
13420 * -errno: on error
13421 *
13422 * The caller should always use IS_ERR() to check the return value
13423 * before use. Note, the KVM internal memory slots are guaranteed to
13424 * remain valid and unchanged until the VM is destroyed, i.e., the
13425 * GPA->HVA translation will not change. However, the HVA is a user
13426 * address, i.e. its accessibility is not guaranteed, and must be
13427 * accessed via __copy_{to,from}_user().
13428 */
__x86_set_memory_region(struct kvm * kvm,int id,gpa_t gpa,u32 size)13429 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
13430 u32 size)
13431 {
13432 int i, r;
13433 unsigned long hva, old_npages;
13434 struct kvm_memslots *slots = kvm_memslots(kvm);
13435 struct kvm_memory_slot *slot;
13436
13437 lockdep_assert_held(&kvm->slots_lock);
13438
13439 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
13440 return ERR_PTR_USR(-EINVAL);
13441
13442 slot = id_to_memslot(slots, id);
13443 if (size) {
13444 if (slot && slot->npages)
13445 return ERR_PTR_USR(-EEXIST);
13446
13447 /*
13448 * MAP_SHARED to prevent internal slot pages from being moved
13449 * by fork()/COW.
13450 */
13451 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
13452 MAP_SHARED | MAP_ANONYMOUS, 0);
13453 if (IS_ERR_VALUE(hva))
13454 return (void __user *)hva;
13455 } else {
13456 if (!slot || !slot->npages)
13457 return NULL;
13458
13459 old_npages = slot->npages;
13460 hva = slot->userspace_addr;
13461 }
13462
13463 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
13464 struct kvm_userspace_memory_region2 m;
13465
13466 m.slot = id | (i << 16);
13467 m.flags = 0;
13468 m.guest_phys_addr = gpa;
13469 m.userspace_addr = hva;
13470 m.memory_size = size;
13471 r = kvm_set_internal_memslot(kvm, &m);
13472 if (r < 0)
13473 return ERR_PTR_USR(r);
13474 }
13475
13476 if (!size)
13477 vm_munmap(hva, old_npages * PAGE_SIZE);
13478
13479 return (void __user *)hva;
13480 }
13481 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__x86_set_memory_region);
13482
kvm_arch_pre_destroy_vm(struct kvm * kvm)13483 void kvm_arch_pre_destroy_vm(struct kvm *kvm)
13484 {
13485 /*
13486 * Stop all background workers and kthreads before destroying vCPUs, as
13487 * iterating over vCPUs in a different task while vCPUs are being freed
13488 * is unsafe, i.e. will lead to use-after-free. The PIT also needs to
13489 * be stopped before IRQ routing is freed.
13490 */
13491 #ifdef CONFIG_KVM_IOAPIC
13492 kvm_free_pit(kvm);
13493 #endif
13494
13495 kvm_mmu_pre_destroy_vm(kvm);
13496 kvm_x86_call(vm_pre_destroy)(kvm);
13497 }
13498
kvm_arch_destroy_vm(struct kvm * kvm)13499 void kvm_arch_destroy_vm(struct kvm *kvm)
13500 {
13501 if (current->mm == kvm->mm) {
13502 /*
13503 * Free memory regions allocated on behalf of userspace,
13504 * unless the memory map has changed due to process exit
13505 * or fd copying.
13506 */
13507 mutex_lock(&kvm->slots_lock);
13508 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
13509 0, 0);
13510 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
13511 0, 0);
13512 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
13513 mutex_unlock(&kvm->slots_lock);
13514 }
13515 if (kvm->arch.created_mediated_pmu)
13516 perf_release_mediated_pmu();
13517 kvm_destroy_vcpus(kvm);
13518 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
13519 #ifdef CONFIG_KVM_IOAPIC
13520 kvm_pic_destroy(kvm);
13521 kvm_ioapic_destroy(kvm);
13522 #endif
13523 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
13524 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
13525 kvm_mmu_uninit_vm(kvm);
13526 kvm_page_track_cleanup(kvm);
13527 kvm_xen_destroy_vm(kvm);
13528 kvm_hv_destroy_vm(kvm);
13529 kvm_x86_call(vm_destroy)(kvm);
13530 }
13531
memslot_rmap_free(struct kvm_memory_slot * slot)13532 static void memslot_rmap_free(struct kvm_memory_slot *slot)
13533 {
13534 int i;
13535
13536 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
13537 vfree(slot->arch.rmap[i]);
13538 slot->arch.rmap[i] = NULL;
13539 }
13540 }
13541
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)13542 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
13543 {
13544 int i;
13545
13546 memslot_rmap_free(slot);
13547
13548 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
13549 vfree(slot->arch.lpage_info[i - 1]);
13550 slot->arch.lpage_info[i - 1] = NULL;
13551 }
13552
13553 kvm_page_track_free_memslot(slot);
13554 }
13555
memslot_rmap_alloc(struct kvm_memory_slot * slot,unsigned long npages)13556 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
13557 {
13558 const int sz = sizeof(*slot->arch.rmap[0]);
13559 int i;
13560
13561 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
13562 int level = i + 1;
13563 int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
13564
13565 if (slot->arch.rmap[i])
13566 continue;
13567
13568 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
13569 if (!slot->arch.rmap[i]) {
13570 memslot_rmap_free(slot);
13571 return -ENOMEM;
13572 }
13573 }
13574
13575 return 0;
13576 }
13577
kvm_alloc_memslot_metadata(struct kvm * kvm,struct kvm_memory_slot * slot)13578 static int kvm_alloc_memslot_metadata(struct kvm *kvm,
13579 struct kvm_memory_slot *slot)
13580 {
13581 unsigned long npages = slot->npages;
13582 int i, r;
13583
13584 /*
13585 * Clear out the previous array pointers for the KVM_MR_MOVE case. The
13586 * old arrays will be freed by kvm_set_memory_region() if installing
13587 * the new memslot is successful.
13588 */
13589 memset(&slot->arch, 0, sizeof(slot->arch));
13590
13591 if (kvm_memslots_have_rmaps(kvm)) {
13592 r = memslot_rmap_alloc(slot, npages);
13593 if (r)
13594 return r;
13595 }
13596
13597 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
13598 struct kvm_lpage_info *linfo;
13599 unsigned long ugfn;
13600 int lpages;
13601 int level = i + 1;
13602
13603 lpages = __kvm_mmu_slot_lpages(slot, npages, level);
13604
13605 linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
13606 if (!linfo)
13607 goto out_free;
13608
13609 slot->arch.lpage_info[i - 1] = linfo;
13610
13611 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
13612 linfo[0].disallow_lpage = 1;
13613 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
13614 linfo[lpages - 1].disallow_lpage = 1;
13615 ugfn = slot->userspace_addr >> PAGE_SHIFT;
13616 /*
13617 * If the gfn and userspace address are not aligned wrt each
13618 * other, disable large page support for this slot.
13619 */
13620 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) {
13621 unsigned long j;
13622
13623 for (j = 0; j < lpages; ++j)
13624 linfo[j].disallow_lpage = 1;
13625 }
13626 }
13627
13628 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
13629 kvm_mmu_init_memslot_memory_attributes(kvm, slot);
13630 #endif
13631
13632 if (kvm_page_track_create_memslot(kvm, slot, npages))
13633 goto out_free;
13634
13635 return 0;
13636
13637 out_free:
13638 memslot_rmap_free(slot);
13639
13640 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
13641 vfree(slot->arch.lpage_info[i - 1]);
13642 slot->arch.lpage_info[i - 1] = NULL;
13643 }
13644 return -ENOMEM;
13645 }
13646
kvm_arch_memslots_updated(struct kvm * kvm,u64 gen)13647 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
13648 {
13649 struct kvm_vcpu *vcpu;
13650 unsigned long i;
13651
13652 /*
13653 * memslots->generation has been incremented.
13654 * mmio generation may have reached its maximum value.
13655 */
13656 kvm_mmu_invalidate_mmio_sptes(kvm, gen);
13657
13658 /* Force re-initialization of steal_time cache */
13659 kvm_for_each_vcpu(i, vcpu, kvm)
13660 kvm_vcpu_kick(vcpu);
13661 }
13662
kvm_arch_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)13663 int kvm_arch_prepare_memory_region(struct kvm *kvm,
13664 const struct kvm_memory_slot *old,
13665 struct kvm_memory_slot *new,
13666 enum kvm_mr_change change)
13667 {
13668 /*
13669 * KVM doesn't support moving memslots when there are external page
13670 * trackers attached to the VM, i.e. if KVMGT is in use.
13671 */
13672 if (change == KVM_MR_MOVE && kvm_page_track_has_external_user(kvm))
13673 return -EINVAL;
13674
13675 if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) {
13676 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn())
13677 return -EINVAL;
13678
13679 if (kvm_is_gfn_alias(kvm, new->base_gfn + new->npages - 1))
13680 return -EINVAL;
13681
13682 return kvm_alloc_memslot_metadata(kvm, new);
13683 }
13684
13685 if (change == KVM_MR_FLAGS_ONLY)
13686 memcpy(&new->arch, &old->arch, sizeof(old->arch));
13687 else if (WARN_ON_ONCE(change != KVM_MR_DELETE))
13688 return -EIO;
13689
13690 return 0;
13691 }
13692
13693
kvm_mmu_update_cpu_dirty_logging(struct kvm * kvm,bool enable)13694 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
13695 {
13696 int nr_slots;
13697
13698 if (!kvm->arch.cpu_dirty_log_size)
13699 return;
13700
13701 nr_slots = atomic_read(&kvm->nr_memslots_dirty_logging);
13702 if ((enable && nr_slots == 1) || !nr_slots)
13703 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING);
13704 }
13705
kvm_mmu_slot_apply_flags(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)13706 static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
13707 struct kvm_memory_slot *old,
13708 const struct kvm_memory_slot *new,
13709 enum kvm_mr_change change)
13710 {
13711 u32 old_flags = old ? old->flags : 0;
13712 u32 new_flags = new ? new->flags : 0;
13713 bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
13714
13715 /*
13716 * Update CPU dirty logging if dirty logging is being toggled. This
13717 * applies to all operations.
13718 */
13719 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)
13720 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages);
13721
13722 /*
13723 * Nothing more to do for RO slots (which can't be dirtied and can't be
13724 * made writable) or CREATE/MOVE/DELETE of a slot.
13725 *
13726 * For a memslot with dirty logging disabled:
13727 * CREATE: No dirty mappings will already exist.
13728 * MOVE/DELETE: The old mappings will already have been cleaned up by
13729 * kvm_arch_flush_shadow_memslot()
13730 *
13731 * For a memslot with dirty logging enabled:
13732 * CREATE: No shadow pages exist, thus nothing to write-protect
13733 * and no dirty bits to clear.
13734 * MOVE/DELETE: The old mappings will already have been cleaned up by
13735 * kvm_arch_flush_shadow_memslot().
13736 */
13737 if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY))
13738 return;
13739
13740 /*
13741 * READONLY and non-flags changes were filtered out above, and the only
13742 * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty
13743 * logging isn't being toggled on or off.
13744 */
13745 if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)))
13746 return;
13747
13748 if (!log_dirty_pages) {
13749 /*
13750 * Recover huge page mappings in the slot now that dirty logging
13751 * is disabled, i.e. now that KVM does not have to track guest
13752 * writes at 4KiB granularity.
13753 *
13754 * Dirty logging might be disabled by userspace if an ongoing VM
13755 * live migration is cancelled and the VM must continue running
13756 * on the source.
13757 */
13758 kvm_mmu_recover_huge_pages(kvm, new);
13759 } else {
13760 /*
13761 * Initially-all-set does not require write protecting any page,
13762 * because they're all assumed to be dirty.
13763 */
13764 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
13765 return;
13766
13767 if (READ_ONCE(eager_page_split))
13768 kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K);
13769
13770 if (kvm->arch.cpu_dirty_log_size) {
13771 kvm_mmu_slot_leaf_clear_dirty(kvm, new);
13772 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
13773 } else {
13774 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
13775 }
13776
13777 /*
13778 * Unconditionally flush the TLBs after enabling dirty logging.
13779 * A flush is almost always going to be necessary (see below),
13780 * and unconditionally flushing allows the helpers to omit
13781 * the subtly complex checks when removing write access.
13782 *
13783 * Do the flush outside of mmu_lock to reduce the amount of
13784 * time mmu_lock is held. Flushing after dropping mmu_lock is
13785 * safe as KVM only needs to guarantee the slot is fully
13786 * write-protected before returning to userspace, i.e. before
13787 * userspace can consume the dirty status.
13788 *
13789 * Flushing outside of mmu_lock requires KVM to be careful when
13790 * making decisions based on writable status of an SPTE, e.g. a
13791 * !writable SPTE doesn't guarantee a CPU can't perform writes.
13792 *
13793 * Specifically, KVM also write-protects guest page tables to
13794 * monitor changes when using shadow paging, and must guarantee
13795 * no CPUs can write to those page before mmu_lock is dropped.
13796 * Because CPUs may have stale TLB entries at this point, a
13797 * !writable SPTE doesn't guarantee CPUs can't perform writes.
13798 *
13799 * KVM also allows making SPTES writable outside of mmu_lock,
13800 * e.g. to allow dirty logging without taking mmu_lock.
13801 *
13802 * To handle these scenarios, KVM uses a separate software-only
13803 * bit (MMU-writable) to track if a SPTE is !writable due to
13804 * a guest page table being write-protected (KVM clears the
13805 * MMU-writable flag when write-protecting for shadow paging).
13806 *
13807 * The use of MMU-writable is also the primary motivation for
13808 * the unconditional flush. Because KVM must guarantee that a
13809 * CPU doesn't contain stale, writable TLB entries for a
13810 * !MMU-writable SPTE, KVM must flush if it encounters any
13811 * MMU-writable SPTE regardless of whether the actual hardware
13812 * writable bit was set. I.e. KVM is almost guaranteed to need
13813 * to flush, while unconditionally flushing allows the "remove
13814 * write access" helpers to ignore MMU-writable entirely.
13815 *
13816 * See is_writable_pte() for more details (the case involving
13817 * access-tracked SPTEs is particularly relevant).
13818 */
13819 kvm_flush_remote_tlbs_memslot(kvm, new);
13820 }
13821 }
13822
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)13823 void kvm_arch_commit_memory_region(struct kvm *kvm,
13824 struct kvm_memory_slot *old,
13825 const struct kvm_memory_slot *new,
13826 enum kvm_mr_change change)
13827 {
13828 if (change == KVM_MR_DELETE)
13829 kvm_page_track_delete_slot(kvm, old);
13830
13831 if (!kvm->arch.n_requested_mmu_pages &&
13832 (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) {
13833 unsigned long nr_mmu_pages;
13834
13835 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO;
13836 nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
13837 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
13838 }
13839
13840 kvm_mmu_slot_apply_flags(kvm, old, new, change);
13841
13842 /* Free the arrays associated with the old memslot. */
13843 if (change == KVM_MR_MOVE)
13844 kvm_arch_free_memslot(kvm, old);
13845 }
13846
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)13847 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
13848 {
13849 WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu));
13850
13851 if (vcpu->arch.guest_state_protected)
13852 return true;
13853
13854 return kvm_x86_call(get_cpl)(vcpu) == 0;
13855 }
13856
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)13857 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
13858 {
13859 WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu));
13860
13861 if (vcpu->arch.guest_state_protected)
13862 return 0;
13863
13864 return kvm_rip_read(vcpu);
13865 }
13866
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)13867 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
13868 {
13869 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
13870 }
13871
kvm_arch_interrupt_allowed(struct kvm_vcpu * vcpu)13872 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
13873 {
13874 return kvm_x86_call(interrupt_allowed)(vcpu, false);
13875 }
13876
kvm_get_linear_rip(struct kvm_vcpu * vcpu)13877 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
13878 {
13879 /* Can't read the RIP when guest state is protected, just return 0 */
13880 if (vcpu->arch.guest_state_protected)
13881 return 0;
13882
13883 if (is_64_bit_mode(vcpu))
13884 return kvm_rip_read(vcpu);
13885 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
13886 kvm_rip_read(vcpu));
13887 }
13888 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_linear_rip);
13889
kvm_is_linear_rip(struct kvm_vcpu * vcpu,unsigned long linear_rip)13890 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
13891 {
13892 return kvm_get_linear_rip(vcpu) == linear_rip;
13893 }
13894 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_is_linear_rip);
13895
kvm_get_rflags(struct kvm_vcpu * vcpu)13896 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
13897 {
13898 unsigned long rflags;
13899
13900 rflags = kvm_x86_call(get_rflags)(vcpu);
13901 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
13902 rflags &= ~X86_EFLAGS_TF;
13903 return rflags;
13904 }
13905 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_rflags);
13906
__kvm_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)13907 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
13908 {
13909 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
13910 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
13911 rflags |= X86_EFLAGS_TF;
13912 kvm_x86_call(set_rflags)(vcpu, rflags);
13913 }
13914
kvm_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)13915 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
13916 {
13917 __kvm_set_rflags(vcpu, rflags);
13918 kvm_make_request(KVM_REQ_EVENT, vcpu);
13919 }
13920 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_rflags);
13921
kvm_async_pf_hash_fn(gfn_t gfn)13922 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
13923 {
13924 BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));
13925
13926 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
13927 }
13928
kvm_async_pf_next_probe(u32 key)13929 static inline u32 kvm_async_pf_next_probe(u32 key)
13930 {
13931 return (key + 1) & (ASYNC_PF_PER_VCPU - 1);
13932 }
13933
kvm_add_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)13934 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
13935 {
13936 u32 key = kvm_async_pf_hash_fn(gfn);
13937
13938 while (vcpu->arch.apf.gfns[key] != ~0)
13939 key = kvm_async_pf_next_probe(key);
13940
13941 vcpu->arch.apf.gfns[key] = gfn;
13942 }
13943
kvm_async_pf_gfn_slot(struct kvm_vcpu * vcpu,gfn_t gfn)13944 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
13945 {
13946 int i;
13947 u32 key = kvm_async_pf_hash_fn(gfn);
13948
13949 for (i = 0; i < ASYNC_PF_PER_VCPU &&
13950 (vcpu->arch.apf.gfns[key] != gfn &&
13951 vcpu->arch.apf.gfns[key] != ~0); i++)
13952 key = kvm_async_pf_next_probe(key);
13953
13954 return key;
13955 }
13956
kvm_find_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)13957 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
13958 {
13959 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
13960 }
13961
kvm_del_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)13962 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
13963 {
13964 u32 i, j, k;
13965
13966 i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
13967
13968 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn))
13969 return;
13970
13971 while (true) {
13972 vcpu->arch.apf.gfns[i] = ~0;
13973 do {
13974 j = kvm_async_pf_next_probe(j);
13975 if (vcpu->arch.apf.gfns[j] == ~0)
13976 return;
13977 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
13978 /*
13979 * k lies cyclically in ]i,j]
13980 * | i.k.j |
13981 * |....j i.k.| or |.k..j i...|
13982 */
13983 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
13984 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
13985 i = j;
13986 }
13987 }
13988
apf_put_user_notpresent(struct kvm_vcpu * vcpu)13989 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
13990 {
13991 u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT;
13992
13993 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
13994 sizeof(reason));
13995 }
13996
apf_put_user_ready(struct kvm_vcpu * vcpu,u32 token)13997 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token)
13998 {
13999 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
14000
14001 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
14002 &token, offset, sizeof(token));
14003 }
14004
apf_pageready_slot_free(struct kvm_vcpu * vcpu)14005 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu)
14006 {
14007 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
14008 u32 val;
14009
14010 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
14011 &val, offset, sizeof(val)))
14012 return false;
14013
14014 return !val;
14015 }
14016
kvm_can_deliver_async_pf(struct kvm_vcpu * vcpu)14017 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
14018 {
14019
14020 if (!kvm_pv_async_pf_enabled(vcpu))
14021 return false;
14022
14023 if (!vcpu->arch.apf.send_always &&
14024 (vcpu->arch.guest_state_protected || !kvm_x86_call(get_cpl)(vcpu)))
14025 return false;
14026
14027 if (is_guest_mode(vcpu)) {
14028 /*
14029 * L1 needs to opt into the special #PF vmexits that are
14030 * used to deliver async page faults.
14031 */
14032 return vcpu->arch.apf.delivery_as_pf_vmexit;
14033 } else {
14034 /*
14035 * Play it safe in case the guest temporarily disables paging.
14036 * The real mode IDT in particular is unlikely to have a #PF
14037 * exception setup.
14038 */
14039 return is_paging(vcpu);
14040 }
14041 }
14042
kvm_can_do_async_pf(struct kvm_vcpu * vcpu)14043 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
14044 {
14045 if (unlikely(!lapic_in_kernel(vcpu) ||
14046 kvm_event_needs_reinjection(vcpu) ||
14047 kvm_is_exception_pending(vcpu)))
14048 return false;
14049
14050 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
14051 return false;
14052
14053 /*
14054 * If interrupts are off we cannot even use an artificial
14055 * halt state.
14056 */
14057 return kvm_arch_interrupt_allowed(vcpu);
14058 }
14059
kvm_arch_async_page_not_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)14060 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
14061 struct kvm_async_pf *work)
14062 {
14063 struct x86_exception fault;
14064
14065 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
14066 kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
14067
14068 if (kvm_can_deliver_async_pf(vcpu) &&
14069 !apf_put_user_notpresent(vcpu)) {
14070 fault.vector = PF_VECTOR;
14071 fault.error_code_valid = true;
14072 fault.error_code = 0;
14073 fault.nested_page_fault = false;
14074 fault.address = work->arch.token;
14075 fault.async_page_fault = true;
14076 kvm_inject_page_fault(vcpu, &fault);
14077 return true;
14078 } else {
14079 /*
14080 * It is not possible to deliver a paravirtualized asynchronous
14081 * page fault, but putting the guest in an artificial halt state
14082 * can be beneficial nevertheless: if an interrupt arrives, we
14083 * can deliver it timely and perhaps the guest will schedule
14084 * another process. When the instruction that triggered a page
14085 * fault is retried, hopefully the page will be ready in the host.
14086 */
14087 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
14088 return false;
14089 }
14090 }
14091
kvm_arch_async_page_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)14092 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
14093 struct kvm_async_pf *work)
14094 {
14095 struct kvm_lapic_irq irq = {
14096 .delivery_mode = APIC_DM_FIXED,
14097 .vector = vcpu->arch.apf.vec
14098 };
14099
14100 if (work->wakeup_all)
14101 work->arch.token = ~0; /* broadcast wakeup */
14102 else
14103 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
14104 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
14105
14106 if ((work->wakeup_all || work->notpresent_injected) &&
14107 kvm_pv_async_pf_enabled(vcpu) &&
14108 !apf_put_user_ready(vcpu, work->arch.token)) {
14109 WRITE_ONCE(vcpu->arch.apf.pageready_pending, true);
14110 kvm_apic_set_irq(vcpu, &irq, NULL);
14111 }
14112
14113 vcpu->arch.apf.halted = false;
14114 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
14115 }
14116
kvm_arch_async_page_present_queued(struct kvm_vcpu * vcpu)14117 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
14118 {
14119 kvm_make_request(KVM_REQ_APF_READY, vcpu);
14120
14121 /* Pairs with smp_store_mb() in kvm_set_msr_common(). */
14122 smp_mb__after_atomic();
14123
14124 if (!READ_ONCE(vcpu->arch.apf.pageready_pending))
14125 kvm_vcpu_kick(vcpu);
14126 }
14127
kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu * vcpu)14128 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
14129 {
14130 if (!kvm_pv_async_pf_enabled(vcpu))
14131 return true;
14132 else
14133 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
14134 }
14135
kvm_noncoherent_dma_assignment_start_or_stop(struct kvm * kvm)14136 static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
14137 {
14138 /*
14139 * Non-coherent DMA assignment and de-assignment may affect whether or
14140 * not KVM honors guest PAT, and thus may cause changes in EPT SPTEs
14141 * due to toggling the "ignore PAT" bit. Zap all SPTEs when the first
14142 * (or last) non-coherent device is (un)registered to so that new SPTEs
14143 * with the correct "ignore guest PAT" setting are created.
14144 *
14145 * If KVM always honors guest PAT, however, there is nothing to do.
14146 */
14147 if (kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT))
14148 kvm_zap_gfn_range(kvm, gpa_to_gfn(0), gpa_to_gfn(~0ULL));
14149 }
14150
kvm_arch_register_noncoherent_dma(struct kvm * kvm)14151 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
14152 {
14153 if (atomic_inc_return(&kvm->arch.noncoherent_dma_count) == 1)
14154 kvm_noncoherent_dma_assignment_start_or_stop(kvm);
14155 }
14156
kvm_arch_unregister_noncoherent_dma(struct kvm * kvm)14157 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
14158 {
14159 if (!atomic_dec_return(&kvm->arch.noncoherent_dma_count))
14160 kvm_noncoherent_dma_assignment_start_or_stop(kvm);
14161 }
14162
kvm_arch_has_noncoherent_dma(struct kvm * kvm)14163 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
14164 {
14165 return atomic_read(&kvm->arch.noncoherent_dma_count);
14166 }
14167 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_arch_has_noncoherent_dma);
14168
kvm_arch_no_poll(struct kvm_vcpu * vcpu)14169 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
14170 {
14171 return (vcpu->arch.msr_kvm_poll_control & 1) == 0;
14172 }
14173
14174 #ifdef CONFIG_KVM_GUEST_MEMFD
14175 /*
14176 * KVM doesn't yet support initializing guest_memfd memory as shared for VMs
14177 * with private memory (the private vs. shared tracking needs to be moved into
14178 * guest_memfd).
14179 */
kvm_arch_supports_gmem_init_shared(struct kvm * kvm)14180 bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm)
14181 {
14182 return !kvm_arch_has_private_mem(kvm);
14183 }
14184
14185 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
kvm_arch_gmem_prepare(struct kvm * kvm,gfn_t gfn,kvm_pfn_t pfn,int max_order)14186 int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
14187 {
14188 return kvm_x86_call(gmem_prepare)(kvm, pfn, gfn, max_order);
14189 }
14190 #endif
14191
14192 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
kvm_arch_gmem_invalidate(kvm_pfn_t start,kvm_pfn_t end)14193 void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
14194 {
14195 kvm_x86_call(gmem_invalidate)(start, end);
14196 }
14197 #endif
14198 #endif
14199
kvm_spec_ctrl_test_value(u64 value)14200 int kvm_spec_ctrl_test_value(u64 value)
14201 {
14202 /*
14203 * test that setting IA32_SPEC_CTRL to given value
14204 * is allowed by the host processor
14205 */
14206
14207 u64 saved_value;
14208 unsigned long flags;
14209 int ret = 0;
14210
14211 local_irq_save(flags);
14212
14213 if (rdmsrq_safe(MSR_IA32_SPEC_CTRL, &saved_value))
14214 ret = 1;
14215 else if (wrmsrq_safe(MSR_IA32_SPEC_CTRL, value))
14216 ret = 1;
14217 else
14218 wrmsrq(MSR_IA32_SPEC_CTRL, saved_value);
14219
14220 local_irq_restore(flags);
14221
14222 return ret;
14223 }
14224 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_spec_ctrl_test_value);
14225
kvm_fixup_and_inject_pf_error(struct kvm_vcpu * vcpu,gva_t gva,u16 error_code)14226 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
14227 {
14228 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
14229 struct x86_exception fault;
14230 u64 access = error_code &
14231 (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
14232
14233 if (!(error_code & PFERR_PRESENT_MASK) ||
14234 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) {
14235 /*
14236 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
14237 * tables probably do not match the TLB. Just proceed
14238 * with the error code that the processor gave.
14239 */
14240 fault.vector = PF_VECTOR;
14241 fault.error_code_valid = true;
14242 fault.error_code = error_code;
14243 fault.nested_page_fault = false;
14244 fault.address = gva;
14245 fault.async_page_fault = false;
14246 }
14247 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
14248 }
14249 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_fixup_and_inject_pf_error);
14250
14251 /*
14252 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
14253 * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
14254 * indicates whether exit to userspace is needed.
14255 */
kvm_handle_memory_failure(struct kvm_vcpu * vcpu,int r,struct x86_exception * e)14256 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
14257 struct x86_exception *e)
14258 {
14259 if (r == X86EMUL_PROPAGATE_FAULT) {
14260 if (KVM_BUG_ON(!e, vcpu->kvm))
14261 return -EIO;
14262
14263 kvm_inject_emulated_page_fault(vcpu, e);
14264 return 1;
14265 }
14266
14267 /*
14268 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
14269 * while handling a VMX instruction KVM could've handled the request
14270 * correctly by exiting to userspace and performing I/O but there
14271 * doesn't seem to be a real use-case behind such requests, just return
14272 * KVM_EXIT_INTERNAL_ERROR for now.
14273 */
14274 kvm_prepare_emulation_failure_exit(vcpu);
14275
14276 return 0;
14277 }
14278 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_memory_failure);
14279
kvm_handle_invpcid(struct kvm_vcpu * vcpu,unsigned long type,gva_t gva)14280 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
14281 {
14282 bool pcid_enabled;
14283 struct x86_exception e;
14284 struct {
14285 u64 pcid;
14286 u64 gla;
14287 } operand;
14288 int r;
14289
14290 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
14291 if (r != X86EMUL_CONTINUE)
14292 return kvm_handle_memory_failure(vcpu, r, &e);
14293
14294 if (operand.pcid >> 12 != 0) {
14295 kvm_inject_gp(vcpu, 0);
14296 return 1;
14297 }
14298
14299 pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE);
14300
14301 switch (type) {
14302 case INVPCID_TYPE_INDIV_ADDR:
14303 /*
14304 * LAM doesn't apply to addresses that are inputs to TLB
14305 * invalidation.
14306 */
14307 if ((!pcid_enabled && (operand.pcid != 0)) ||
14308 is_noncanonical_invlpg_address(operand.gla, vcpu)) {
14309 kvm_inject_gp(vcpu, 0);
14310 return 1;
14311 }
14312 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
14313 return kvm_skip_emulated_instruction(vcpu);
14314
14315 case INVPCID_TYPE_SINGLE_CTXT:
14316 if (!pcid_enabled && (operand.pcid != 0)) {
14317 kvm_inject_gp(vcpu, 0);
14318 return 1;
14319 }
14320
14321 /*
14322 * When ERAPS is supported, invalidating a specific PCID clears
14323 * the RAP (Return Address Predicator).
14324 */
14325 if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS))
14326 kvm_register_is_dirty(vcpu, VCPU_EXREG_ERAPS);
14327
14328 kvm_invalidate_pcid(vcpu, operand.pcid);
14329 return kvm_skip_emulated_instruction(vcpu);
14330
14331 case INVPCID_TYPE_ALL_NON_GLOBAL:
14332 /*
14333 * Currently, KVM doesn't mark global entries in the shadow
14334 * page tables, so a non-global flush just degenerates to a
14335 * global flush. If needed, we could optimize this later by
14336 * keeping track of global entries in shadow page tables.
14337 */
14338
14339 fallthrough;
14340 case INVPCID_TYPE_ALL_INCL_GLOBAL:
14341 /*
14342 * Don't bother marking VCPU_EXREG_ERAPS dirty, SVM will take
14343 * care of doing so when emulating the full guest TLB flush
14344 * (the RAP is cleared on all implicit TLB flushes).
14345 */
14346 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
14347 return kvm_skip_emulated_instruction(vcpu);
14348
14349 default:
14350 kvm_inject_gp(vcpu, 0);
14351 return 1;
14352 }
14353 }
14354 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_invpcid);
14355
complete_sev_es_emulated_mmio(struct kvm_vcpu * vcpu)14356 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu)
14357 {
14358 struct kvm_run *run = vcpu->run;
14359 struct kvm_mmio_fragment *frag;
14360 unsigned int len;
14361
14362 if (KVM_BUG_ON(!vcpu->mmio_needed, vcpu->kvm))
14363 return -EIO;
14364
14365 /* Complete previous fragment */
14366 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
14367 len = min(8u, frag->len);
14368 if (!vcpu->mmio_is_write)
14369 memcpy(frag->data, run->mmio.data, len);
14370
14371 if (frag->len <= 8) {
14372 /* Switch to the next fragment. */
14373 frag++;
14374 vcpu->mmio_cur_fragment++;
14375 } else {
14376 /* Go forward to the next mmio piece. */
14377 frag->data += len;
14378 frag->gpa += len;
14379 frag->len -= len;
14380 }
14381
14382 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
14383 vcpu->mmio_needed = 0;
14384
14385 /*
14386 * All done, as frag->data always points at the GHCB scratch
14387 * area and VMGEXIT is trap-like (RIP is advanced by hardware).
14388 */
14389 return 1;
14390 }
14391
14392 // More MMIO is needed
14393 kvm_prepare_emulated_mmio_exit(vcpu, frag);
14394 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
14395 return 0;
14396 }
14397
kvm_sev_es_mmio(struct kvm_vcpu * vcpu,bool is_write,gpa_t gpa,unsigned int bytes,void * data)14398 int kvm_sev_es_mmio(struct kvm_vcpu *vcpu, bool is_write, gpa_t gpa,
14399 unsigned int bytes, void *data)
14400 {
14401 struct kvm_mmio_fragment *frag;
14402 int handled;
14403
14404 if (!data || WARN_ON_ONCE(object_is_on_stack(data)))
14405 return -EINVAL;
14406
14407 if (is_write)
14408 handled = vcpu_mmio_write(vcpu, gpa, bytes, data);
14409 else
14410 handled = vcpu_mmio_read(vcpu, gpa, bytes, data);
14411 if (handled == bytes)
14412 return 1;
14413
14414 bytes -= handled;
14415 gpa += handled;
14416 data += handled;
14417
14418 /*
14419 * TODO: Determine whether or not userspace plays nice with MMIO
14420 * requests that split a page boundary.
14421 */
14422 frag = vcpu->mmio_fragments;
14423 frag->len = bytes;
14424 frag->gpa = gpa;
14425 frag->data = data;
14426
14427 vcpu->mmio_needed = 1;
14428 vcpu->mmio_cur_fragment = 0;
14429 vcpu->mmio_nr_fragments = 1;
14430 vcpu->mmio_is_write = is_write;
14431
14432 kvm_prepare_emulated_mmio_exit(vcpu, frag);
14433 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
14434 return 0;
14435 }
14436 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio);
14437
advance_sev_es_emulated_pio(struct kvm_vcpu * vcpu,unsigned count,int size)14438 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size)
14439 {
14440 vcpu->arch.sev_pio_count -= count;
14441 vcpu->arch.sev_pio_data += count * size;
14442 }
14443
14444 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
14445 unsigned int port);
14446
complete_sev_es_emulated_outs(struct kvm_vcpu * vcpu)14447 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
14448 {
14449 int size = vcpu->arch.pio.size;
14450 int port = vcpu->arch.pio.port;
14451
14452 vcpu->arch.pio.count = 0;
14453 if (vcpu->arch.sev_pio_count)
14454 return kvm_sev_es_outs(vcpu, size, port);
14455 return 1;
14456 }
14457
kvm_sev_es_outs(struct kvm_vcpu * vcpu,unsigned int size,unsigned int port)14458 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
14459 unsigned int port)
14460 {
14461 for (;;) {
14462 unsigned int count =
14463 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
14464 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
14465
14466 /* memcpy done already by emulator_pio_out. */
14467 advance_sev_es_emulated_pio(vcpu, count, size);
14468 if (!ret)
14469 break;
14470
14471 /* Emulation done by the kernel. */
14472 if (!vcpu->arch.sev_pio_count)
14473 return 1;
14474 }
14475
14476 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
14477 return 0;
14478 }
14479
14480 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
14481 unsigned int port);
14482
complete_sev_es_emulated_ins(struct kvm_vcpu * vcpu)14483 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
14484 {
14485 unsigned count = vcpu->arch.pio.count;
14486 int size = vcpu->arch.pio.size;
14487 int port = vcpu->arch.pio.port;
14488
14489 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
14490 advance_sev_es_emulated_pio(vcpu, count, size);
14491 if (vcpu->arch.sev_pio_count)
14492 return kvm_sev_es_ins(vcpu, size, port);
14493 return 1;
14494 }
14495
kvm_sev_es_ins(struct kvm_vcpu * vcpu,unsigned int size,unsigned int port)14496 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
14497 unsigned int port)
14498 {
14499 for (;;) {
14500 unsigned int count =
14501 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
14502 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count))
14503 break;
14504
14505 /* Emulation done by the kernel. */
14506 advance_sev_es_emulated_pio(vcpu, count, size);
14507 if (!vcpu->arch.sev_pio_count)
14508 return 1;
14509 }
14510
14511 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
14512 return 0;
14513 }
14514
kvm_sev_es_string_io(struct kvm_vcpu * vcpu,unsigned int size,unsigned int port,void * data,unsigned int count,int in)14515 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
14516 unsigned int port, void *data, unsigned int count,
14517 int in)
14518 {
14519 vcpu->arch.sev_pio_data = data;
14520 vcpu->arch.sev_pio_count = count;
14521 return in ? kvm_sev_es_ins(vcpu, size, port)
14522 : kvm_sev_es_outs(vcpu, size, port);
14523 }
14524 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_string_io);
14525
14526 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
14527 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
14528 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_mmio);
14529 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
14530 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
14531 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
14532 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
14533 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
14534 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter);
14535 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
14536 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
14537 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
14538 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed);
14539 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
14540 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
14541 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
14542 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
14543 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update);
14544 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
14545 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
14546 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
14547 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
14548 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath);
14549 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell);
14550 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq);
14551 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
14552 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
14553 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
14554 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
14555 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_rmp_fault);
14556
kvm_x86_init(void)14557 static int __init kvm_x86_init(void)
14558 {
14559 kvm_init_xstate_sizes();
14560
14561 kvm_mmu_x86_module_init();
14562 mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
14563 return 0;
14564 }
14565 module_init(kvm_x86_init);
14566
kvm_x86_exit(void)14567 static void __exit kvm_x86_exit(void)
14568 {
14569 WARN_ON_ONCE(static_branch_unlikely(&kvm_has_noapic_vcpu));
14570 }
14571 module_exit(kvm_x86_exit);
14572