xref: /linux/arch/x86/kvm/x86.c (revision e73c14ebc36bf2ba28784ff05133283003aaafdb)
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21 
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29 #include "cpuid.h"
30 #include "assigned-dev.h"
31 #include "pmu.h"
32 #include "hyperv.h"
33 
34 #include <linux/clocksource.h>
35 #include <linux/interrupt.h>
36 #include <linux/kvm.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/export.h>
40 #include <linux/moduleparam.h>
41 #include <linux/mman.h>
42 #include <linux/highmem.h>
43 #include <linux/iommu.h>
44 #include <linux/intel-iommu.h>
45 #include <linux/cpufreq.h>
46 #include <linux/user-return-notifier.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <linux/perf_event.h>
50 #include <linux/uaccess.h>
51 #include <linux/hash.h>
52 #include <linux/pci.h>
53 #include <linux/timekeeper_internal.h>
54 #include <linux/pvclock_gtod.h>
55 #include <linux/kvm_irqfd.h>
56 #include <linux/irqbypass.h>
57 #include <trace/events/kvm.h>
58 
59 #include <asm/debugreg.h>
60 #include <asm/msr.h>
61 #include <asm/desc.h>
62 #include <asm/mce.h>
63 #include <linux/kernel_stat.h>
64 #include <asm/fpu/internal.h> /* Ugh! */
65 #include <asm/pvclock.h>
66 #include <asm/div64.h>
67 #include <asm/irq_remapping.h>
68 
69 #define CREATE_TRACE_POINTS
70 #include "trace.h"
71 
72 #define MAX_IO_MSRS 256
73 #define KVM_MAX_MCE_BANKS 32
74 u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
75 EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
76 
77 #define emul_to_vcpu(ctxt) \
78 	container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
79 
80 /* EFER defaults:
81  * - enable syscall per default because its emulated by KVM
82  * - enable LME and LMA per default on 64 bit KVM
83  */
84 #ifdef CONFIG_X86_64
85 static
86 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
87 #else
88 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
89 #endif
90 
91 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
92 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
93 
94 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
95                                     KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
96 
97 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
98 static void process_nmi(struct kvm_vcpu *vcpu);
99 static void enter_smm(struct kvm_vcpu *vcpu);
100 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
101 
102 struct kvm_x86_ops *kvm_x86_ops __read_mostly;
103 EXPORT_SYMBOL_GPL(kvm_x86_ops);
104 
105 static bool __read_mostly ignore_msrs = 0;
106 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
107 
108 unsigned int min_timer_period_us = 500;
109 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
110 
111 static bool __read_mostly kvmclock_periodic_sync = true;
112 module_param(kvmclock_periodic_sync, bool, S_IRUGO);
113 
114 bool __read_mostly kvm_has_tsc_control;
115 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
116 u32  __read_mostly kvm_max_guest_tsc_khz;
117 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
118 u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;
119 EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
120 u64  __read_mostly kvm_max_tsc_scaling_ratio;
121 EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
122 u64 __read_mostly kvm_default_tsc_scaling_ratio;
123 EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
124 
125 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
126 static u32 __read_mostly tsc_tolerance_ppm = 250;
127 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
128 
129 /* lapic timer advance (tscdeadline mode only) in nanoseconds */
130 unsigned int __read_mostly lapic_timer_advance_ns = 0;
131 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
132 
133 static bool __read_mostly vector_hashing = true;
134 module_param(vector_hashing, bool, S_IRUGO);
135 
136 static bool __read_mostly backwards_tsc_observed = false;
137 
138 #define KVM_NR_SHARED_MSRS 16
139 
140 struct kvm_shared_msrs_global {
141 	int nr;
142 	u32 msrs[KVM_NR_SHARED_MSRS];
143 };
144 
145 struct kvm_shared_msrs {
146 	struct user_return_notifier urn;
147 	bool registered;
148 	struct kvm_shared_msr_values {
149 		u64 host;
150 		u64 curr;
151 	} values[KVM_NR_SHARED_MSRS];
152 };
153 
154 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
155 static struct kvm_shared_msrs __percpu *shared_msrs;
156 
157 struct kvm_stats_debugfs_item debugfs_entries[] = {
158 	{ "pf_fixed", VCPU_STAT(pf_fixed) },
159 	{ "pf_guest", VCPU_STAT(pf_guest) },
160 	{ "tlb_flush", VCPU_STAT(tlb_flush) },
161 	{ "invlpg", VCPU_STAT(invlpg) },
162 	{ "exits", VCPU_STAT(exits) },
163 	{ "io_exits", VCPU_STAT(io_exits) },
164 	{ "mmio_exits", VCPU_STAT(mmio_exits) },
165 	{ "signal_exits", VCPU_STAT(signal_exits) },
166 	{ "irq_window", VCPU_STAT(irq_window_exits) },
167 	{ "nmi_window", VCPU_STAT(nmi_window_exits) },
168 	{ "halt_exits", VCPU_STAT(halt_exits) },
169 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
170 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
171 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
172 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
173 	{ "hypercalls", VCPU_STAT(hypercalls) },
174 	{ "request_irq", VCPU_STAT(request_irq_exits) },
175 	{ "irq_exits", VCPU_STAT(irq_exits) },
176 	{ "host_state_reload", VCPU_STAT(host_state_reload) },
177 	{ "efer_reload", VCPU_STAT(efer_reload) },
178 	{ "fpu_reload", VCPU_STAT(fpu_reload) },
179 	{ "insn_emulation", VCPU_STAT(insn_emulation) },
180 	{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
181 	{ "irq_injections", VCPU_STAT(irq_injections) },
182 	{ "nmi_injections", VCPU_STAT(nmi_injections) },
183 	{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
184 	{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
185 	{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
186 	{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
187 	{ "mmu_flooded", VM_STAT(mmu_flooded) },
188 	{ "mmu_recycled", VM_STAT(mmu_recycled) },
189 	{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
190 	{ "mmu_unsync", VM_STAT(mmu_unsync) },
191 	{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
192 	{ "largepages", VM_STAT(lpages) },
193 	{ NULL }
194 };
195 
196 u64 __read_mostly host_xcr0;
197 
198 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
199 
200 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
201 {
202 	int i;
203 	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
204 		vcpu->arch.apf.gfns[i] = ~0;
205 }
206 
207 static void kvm_on_user_return(struct user_return_notifier *urn)
208 {
209 	unsigned slot;
210 	struct kvm_shared_msrs *locals
211 		= container_of(urn, struct kvm_shared_msrs, urn);
212 	struct kvm_shared_msr_values *values;
213 	unsigned long flags;
214 
215 	/*
216 	 * Disabling irqs at this point since the following code could be
217 	 * interrupted and executed through kvm_arch_hardware_disable()
218 	 */
219 	local_irq_save(flags);
220 	if (locals->registered) {
221 		locals->registered = false;
222 		user_return_notifier_unregister(urn);
223 	}
224 	local_irq_restore(flags);
225 	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
226 		values = &locals->values[slot];
227 		if (values->host != values->curr) {
228 			wrmsrl(shared_msrs_global.msrs[slot], values->host);
229 			values->curr = values->host;
230 		}
231 	}
232 }
233 
234 static void shared_msr_update(unsigned slot, u32 msr)
235 {
236 	u64 value;
237 	unsigned int cpu = smp_processor_id();
238 	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
239 
240 	/* only read, and nobody should modify it at this time,
241 	 * so don't need lock */
242 	if (slot >= shared_msrs_global.nr) {
243 		printk(KERN_ERR "kvm: invalid MSR slot!");
244 		return;
245 	}
246 	rdmsrl_safe(msr, &value);
247 	smsr->values[slot].host = value;
248 	smsr->values[slot].curr = value;
249 }
250 
251 void kvm_define_shared_msr(unsigned slot, u32 msr)
252 {
253 	BUG_ON(slot >= KVM_NR_SHARED_MSRS);
254 	shared_msrs_global.msrs[slot] = msr;
255 	if (slot >= shared_msrs_global.nr)
256 		shared_msrs_global.nr = slot + 1;
257 }
258 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
259 
260 static void kvm_shared_msr_cpu_online(void)
261 {
262 	unsigned i;
263 
264 	for (i = 0; i < shared_msrs_global.nr; ++i)
265 		shared_msr_update(i, shared_msrs_global.msrs[i]);
266 }
267 
268 int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
269 {
270 	unsigned int cpu = smp_processor_id();
271 	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
272 	int err;
273 
274 	if (((value ^ smsr->values[slot].curr) & mask) == 0)
275 		return 0;
276 	smsr->values[slot].curr = value;
277 	err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
278 	if (err)
279 		return 1;
280 
281 	if (!smsr->registered) {
282 		smsr->urn.on_user_return = kvm_on_user_return;
283 		user_return_notifier_register(&smsr->urn);
284 		smsr->registered = true;
285 	}
286 	return 0;
287 }
288 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
289 
290 static void drop_user_return_notifiers(void)
291 {
292 	unsigned int cpu = smp_processor_id();
293 	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
294 
295 	if (smsr->registered)
296 		kvm_on_user_return(&smsr->urn);
297 }
298 
299 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
300 {
301 	return vcpu->arch.apic_base;
302 }
303 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
304 
305 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
306 {
307 	u64 old_state = vcpu->arch.apic_base &
308 		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
309 	u64 new_state = msr_info->data &
310 		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
311 	u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
312 		0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
313 
314 	if (!msr_info->host_initiated &&
315 	    ((msr_info->data & reserved_bits) != 0 ||
316 	     new_state == X2APIC_ENABLE ||
317 	     (new_state == MSR_IA32_APICBASE_ENABLE &&
318 	      old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
319 	     (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
320 	      old_state == 0)))
321 		return 1;
322 
323 	kvm_lapic_set_base(vcpu, msr_info->data);
324 	return 0;
325 }
326 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
327 
328 asmlinkage __visible void kvm_spurious_fault(void)
329 {
330 	/* Fault while not rebooting.  We want the trace. */
331 	BUG();
332 }
333 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
334 
335 #define EXCPT_BENIGN		0
336 #define EXCPT_CONTRIBUTORY	1
337 #define EXCPT_PF		2
338 
339 static int exception_class(int vector)
340 {
341 	switch (vector) {
342 	case PF_VECTOR:
343 		return EXCPT_PF;
344 	case DE_VECTOR:
345 	case TS_VECTOR:
346 	case NP_VECTOR:
347 	case SS_VECTOR:
348 	case GP_VECTOR:
349 		return EXCPT_CONTRIBUTORY;
350 	default:
351 		break;
352 	}
353 	return EXCPT_BENIGN;
354 }
355 
356 #define EXCPT_FAULT		0
357 #define EXCPT_TRAP		1
358 #define EXCPT_ABORT		2
359 #define EXCPT_INTERRUPT		3
360 
361 static int exception_type(int vector)
362 {
363 	unsigned int mask;
364 
365 	if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
366 		return EXCPT_INTERRUPT;
367 
368 	mask = 1 << vector;
369 
370 	/* #DB is trap, as instruction watchpoints are handled elsewhere */
371 	if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
372 		return EXCPT_TRAP;
373 
374 	if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
375 		return EXCPT_ABORT;
376 
377 	/* Reserved exceptions will result in fault */
378 	return EXCPT_FAULT;
379 }
380 
381 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
382 		unsigned nr, bool has_error, u32 error_code,
383 		bool reinject)
384 {
385 	u32 prev_nr;
386 	int class1, class2;
387 
388 	kvm_make_request(KVM_REQ_EVENT, vcpu);
389 
390 	if (!vcpu->arch.exception.pending) {
391 	queue:
392 		if (has_error && !is_protmode(vcpu))
393 			has_error = false;
394 		vcpu->arch.exception.pending = true;
395 		vcpu->arch.exception.has_error_code = has_error;
396 		vcpu->arch.exception.nr = nr;
397 		vcpu->arch.exception.error_code = error_code;
398 		vcpu->arch.exception.reinject = reinject;
399 		return;
400 	}
401 
402 	/* to check exception */
403 	prev_nr = vcpu->arch.exception.nr;
404 	if (prev_nr == DF_VECTOR) {
405 		/* triple fault -> shutdown */
406 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
407 		return;
408 	}
409 	class1 = exception_class(prev_nr);
410 	class2 = exception_class(nr);
411 	if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
412 		|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
413 		/* generate double fault per SDM Table 5-5 */
414 		vcpu->arch.exception.pending = true;
415 		vcpu->arch.exception.has_error_code = true;
416 		vcpu->arch.exception.nr = DF_VECTOR;
417 		vcpu->arch.exception.error_code = 0;
418 	} else
419 		/* replace previous exception with a new one in a hope
420 		   that instruction re-execution will regenerate lost
421 		   exception */
422 		goto queue;
423 }
424 
425 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
426 {
427 	kvm_multiple_exception(vcpu, nr, false, 0, false);
428 }
429 EXPORT_SYMBOL_GPL(kvm_queue_exception);
430 
431 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
432 {
433 	kvm_multiple_exception(vcpu, nr, false, 0, true);
434 }
435 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
436 
437 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
438 {
439 	if (err)
440 		kvm_inject_gp(vcpu, 0);
441 	else
442 		return kvm_skip_emulated_instruction(vcpu);
443 
444 	return 1;
445 }
446 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
447 
448 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
449 {
450 	++vcpu->stat.pf_guest;
451 	vcpu->arch.cr2 = fault->address;
452 	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
453 }
454 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
455 
456 static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
457 {
458 	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
459 		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
460 	else
461 		vcpu->arch.mmu.inject_page_fault(vcpu, fault);
462 
463 	return fault->nested_page_fault;
464 }
465 
466 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
467 {
468 	atomic_inc(&vcpu->arch.nmi_queued);
469 	kvm_make_request(KVM_REQ_NMI, vcpu);
470 }
471 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
472 
473 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
474 {
475 	kvm_multiple_exception(vcpu, nr, true, error_code, false);
476 }
477 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
478 
479 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
480 {
481 	kvm_multiple_exception(vcpu, nr, true, error_code, true);
482 }
483 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
484 
485 /*
486  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
487  * a #GP and return false.
488  */
489 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
490 {
491 	if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
492 		return true;
493 	kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
494 	return false;
495 }
496 EXPORT_SYMBOL_GPL(kvm_require_cpl);
497 
498 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
499 {
500 	if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
501 		return true;
502 
503 	kvm_queue_exception(vcpu, UD_VECTOR);
504 	return false;
505 }
506 EXPORT_SYMBOL_GPL(kvm_require_dr);
507 
508 /*
509  * This function will be used to read from the physical memory of the currently
510  * running guest. The difference to kvm_vcpu_read_guest_page is that this function
511  * can read from guest physical or from the guest's guest physical memory.
512  */
513 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
514 			    gfn_t ngfn, void *data, int offset, int len,
515 			    u32 access)
516 {
517 	struct x86_exception exception;
518 	gfn_t real_gfn;
519 	gpa_t ngpa;
520 
521 	ngpa     = gfn_to_gpa(ngfn);
522 	real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
523 	if (real_gfn == UNMAPPED_GVA)
524 		return -EFAULT;
525 
526 	real_gfn = gpa_to_gfn(real_gfn);
527 
528 	return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
529 }
530 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
531 
532 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
533 			       void *data, int offset, int len, u32 access)
534 {
535 	return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
536 				       data, offset, len, access);
537 }
538 
539 /*
540  * Load the pae pdptrs.  Return true is they are all valid.
541  */
542 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
543 {
544 	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
545 	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
546 	int i;
547 	int ret;
548 	u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
549 
550 	ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
551 				      offset * sizeof(u64), sizeof(pdpte),
552 				      PFERR_USER_MASK|PFERR_WRITE_MASK);
553 	if (ret < 0) {
554 		ret = 0;
555 		goto out;
556 	}
557 	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
558 		if ((pdpte[i] & PT_PRESENT_MASK) &&
559 		    (pdpte[i] &
560 		     vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
561 			ret = 0;
562 			goto out;
563 		}
564 	}
565 	ret = 1;
566 
567 	memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
568 	__set_bit(VCPU_EXREG_PDPTR,
569 		  (unsigned long *)&vcpu->arch.regs_avail);
570 	__set_bit(VCPU_EXREG_PDPTR,
571 		  (unsigned long *)&vcpu->arch.regs_dirty);
572 out:
573 
574 	return ret;
575 }
576 EXPORT_SYMBOL_GPL(load_pdptrs);
577 
578 bool pdptrs_changed(struct kvm_vcpu *vcpu)
579 {
580 	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
581 	bool changed = true;
582 	int offset;
583 	gfn_t gfn;
584 	int r;
585 
586 	if (is_long_mode(vcpu) || !is_pae(vcpu))
587 		return false;
588 
589 	if (!test_bit(VCPU_EXREG_PDPTR,
590 		      (unsigned long *)&vcpu->arch.regs_avail))
591 		return true;
592 
593 	gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
594 	offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
595 	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
596 				       PFERR_USER_MASK | PFERR_WRITE_MASK);
597 	if (r < 0)
598 		goto out;
599 	changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
600 out:
601 
602 	return changed;
603 }
604 EXPORT_SYMBOL_GPL(pdptrs_changed);
605 
606 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
607 {
608 	unsigned long old_cr0 = kvm_read_cr0(vcpu);
609 	unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
610 
611 	cr0 |= X86_CR0_ET;
612 
613 #ifdef CONFIG_X86_64
614 	if (cr0 & 0xffffffff00000000UL)
615 		return 1;
616 #endif
617 
618 	cr0 &= ~CR0_RESERVED_BITS;
619 
620 	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
621 		return 1;
622 
623 	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
624 		return 1;
625 
626 	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
627 #ifdef CONFIG_X86_64
628 		if ((vcpu->arch.efer & EFER_LME)) {
629 			int cs_db, cs_l;
630 
631 			if (!is_pae(vcpu))
632 				return 1;
633 			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
634 			if (cs_l)
635 				return 1;
636 		} else
637 #endif
638 		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
639 						 kvm_read_cr3(vcpu)))
640 			return 1;
641 	}
642 
643 	if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
644 		return 1;
645 
646 	kvm_x86_ops->set_cr0(vcpu, cr0);
647 
648 	if ((cr0 ^ old_cr0) & X86_CR0_PG) {
649 		kvm_clear_async_pf_completion_queue(vcpu);
650 		kvm_async_pf_hash_reset(vcpu);
651 	}
652 
653 	if ((cr0 ^ old_cr0) & update_bits)
654 		kvm_mmu_reset_context(vcpu);
655 
656 	if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
657 	    kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
658 	    !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
659 		kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
660 
661 	return 0;
662 }
663 EXPORT_SYMBOL_GPL(kvm_set_cr0);
664 
665 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
666 {
667 	(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
668 }
669 EXPORT_SYMBOL_GPL(kvm_lmsw);
670 
671 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
672 {
673 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
674 			!vcpu->guest_xcr0_loaded) {
675 		/* kvm_set_xcr() also depends on this */
676 		xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
677 		vcpu->guest_xcr0_loaded = 1;
678 	}
679 }
680 
681 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
682 {
683 	if (vcpu->guest_xcr0_loaded) {
684 		if (vcpu->arch.xcr0 != host_xcr0)
685 			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
686 		vcpu->guest_xcr0_loaded = 0;
687 	}
688 }
689 
690 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
691 {
692 	u64 xcr0 = xcr;
693 	u64 old_xcr0 = vcpu->arch.xcr0;
694 	u64 valid_bits;
695 
696 	/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
697 	if (index != XCR_XFEATURE_ENABLED_MASK)
698 		return 1;
699 	if (!(xcr0 & XFEATURE_MASK_FP))
700 		return 1;
701 	if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
702 		return 1;
703 
704 	/*
705 	 * Do not allow the guest to set bits that we do not support
706 	 * saving.  However, xcr0 bit 0 is always set, even if the
707 	 * emulated CPU does not support XSAVE (see fx_init).
708 	 */
709 	valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
710 	if (xcr0 & ~valid_bits)
711 		return 1;
712 
713 	if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
714 	    (!(xcr0 & XFEATURE_MASK_BNDCSR)))
715 		return 1;
716 
717 	if (xcr0 & XFEATURE_MASK_AVX512) {
718 		if (!(xcr0 & XFEATURE_MASK_YMM))
719 			return 1;
720 		if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
721 			return 1;
722 	}
723 	vcpu->arch.xcr0 = xcr0;
724 
725 	if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
726 		kvm_update_cpuid(vcpu);
727 	return 0;
728 }
729 
730 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
731 {
732 	if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
733 	    __kvm_set_xcr(vcpu, index, xcr)) {
734 		kvm_inject_gp(vcpu, 0);
735 		return 1;
736 	}
737 	return 0;
738 }
739 EXPORT_SYMBOL_GPL(kvm_set_xcr);
740 
741 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
742 {
743 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
744 	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
745 				   X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
746 
747 	if (cr4 & CR4_RESERVED_BITS)
748 		return 1;
749 
750 	if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
751 		return 1;
752 
753 	if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
754 		return 1;
755 
756 	if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
757 		return 1;
758 
759 	if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
760 		return 1;
761 
762 	if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE))
763 		return 1;
764 
765 	if (is_long_mode(vcpu)) {
766 		if (!(cr4 & X86_CR4_PAE))
767 			return 1;
768 	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
769 		   && ((cr4 ^ old_cr4) & pdptr_bits)
770 		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
771 				   kvm_read_cr3(vcpu)))
772 		return 1;
773 
774 	if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
775 		if (!guest_cpuid_has_pcid(vcpu))
776 			return 1;
777 
778 		/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
779 		if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
780 			return 1;
781 	}
782 
783 	if (kvm_x86_ops->set_cr4(vcpu, cr4))
784 		return 1;
785 
786 	if (((cr4 ^ old_cr4) & pdptr_bits) ||
787 	    (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
788 		kvm_mmu_reset_context(vcpu);
789 
790 	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
791 		kvm_update_cpuid(vcpu);
792 
793 	return 0;
794 }
795 EXPORT_SYMBOL_GPL(kvm_set_cr4);
796 
797 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
798 {
799 #ifdef CONFIG_X86_64
800 	cr3 &= ~CR3_PCID_INVD;
801 #endif
802 
803 	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
804 		kvm_mmu_sync_roots(vcpu);
805 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
806 		return 0;
807 	}
808 
809 	if (is_long_mode(vcpu)) {
810 		if (cr3 & CR3_L_MODE_RESERVED_BITS)
811 			return 1;
812 	} else if (is_pae(vcpu) && is_paging(vcpu) &&
813 		   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
814 		return 1;
815 
816 	vcpu->arch.cr3 = cr3;
817 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
818 	kvm_mmu_new_cr3(vcpu);
819 	return 0;
820 }
821 EXPORT_SYMBOL_GPL(kvm_set_cr3);
822 
823 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
824 {
825 	if (cr8 & CR8_RESERVED_BITS)
826 		return 1;
827 	if (lapic_in_kernel(vcpu))
828 		kvm_lapic_set_tpr(vcpu, cr8);
829 	else
830 		vcpu->arch.cr8 = cr8;
831 	return 0;
832 }
833 EXPORT_SYMBOL_GPL(kvm_set_cr8);
834 
835 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
836 {
837 	if (lapic_in_kernel(vcpu))
838 		return kvm_lapic_get_cr8(vcpu);
839 	else
840 		return vcpu->arch.cr8;
841 }
842 EXPORT_SYMBOL_GPL(kvm_get_cr8);
843 
844 static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
845 {
846 	int i;
847 
848 	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
849 		for (i = 0; i < KVM_NR_DB_REGS; i++)
850 			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
851 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
852 	}
853 }
854 
855 static void kvm_update_dr6(struct kvm_vcpu *vcpu)
856 {
857 	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
858 		kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
859 }
860 
861 static void kvm_update_dr7(struct kvm_vcpu *vcpu)
862 {
863 	unsigned long dr7;
864 
865 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
866 		dr7 = vcpu->arch.guest_debug_dr7;
867 	else
868 		dr7 = vcpu->arch.dr7;
869 	kvm_x86_ops->set_dr7(vcpu, dr7);
870 	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
871 	if (dr7 & DR7_BP_EN_MASK)
872 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
873 }
874 
875 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
876 {
877 	u64 fixed = DR6_FIXED_1;
878 
879 	if (!guest_cpuid_has_rtm(vcpu))
880 		fixed |= DR6_RTM;
881 	return fixed;
882 }
883 
884 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
885 {
886 	switch (dr) {
887 	case 0 ... 3:
888 		vcpu->arch.db[dr] = val;
889 		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
890 			vcpu->arch.eff_db[dr] = val;
891 		break;
892 	case 4:
893 		/* fall through */
894 	case 6:
895 		if (val & 0xffffffff00000000ULL)
896 			return -1; /* #GP */
897 		vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
898 		kvm_update_dr6(vcpu);
899 		break;
900 	case 5:
901 		/* fall through */
902 	default: /* 7 */
903 		if (val & 0xffffffff00000000ULL)
904 			return -1; /* #GP */
905 		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
906 		kvm_update_dr7(vcpu);
907 		break;
908 	}
909 
910 	return 0;
911 }
912 
913 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
914 {
915 	if (__kvm_set_dr(vcpu, dr, val)) {
916 		kvm_inject_gp(vcpu, 0);
917 		return 1;
918 	}
919 	return 0;
920 }
921 EXPORT_SYMBOL_GPL(kvm_set_dr);
922 
923 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
924 {
925 	switch (dr) {
926 	case 0 ... 3:
927 		*val = vcpu->arch.db[dr];
928 		break;
929 	case 4:
930 		/* fall through */
931 	case 6:
932 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
933 			*val = vcpu->arch.dr6;
934 		else
935 			*val = kvm_x86_ops->get_dr6(vcpu);
936 		break;
937 	case 5:
938 		/* fall through */
939 	default: /* 7 */
940 		*val = vcpu->arch.dr7;
941 		break;
942 	}
943 	return 0;
944 }
945 EXPORT_SYMBOL_GPL(kvm_get_dr);
946 
947 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
948 {
949 	u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
950 	u64 data;
951 	int err;
952 
953 	err = kvm_pmu_rdpmc(vcpu, ecx, &data);
954 	if (err)
955 		return err;
956 	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
957 	kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
958 	return err;
959 }
960 EXPORT_SYMBOL_GPL(kvm_rdpmc);
961 
962 /*
963  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
964  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
965  *
966  * This list is modified at module load time to reflect the
967  * capabilities of the host cpu. This capabilities test skips MSRs that are
968  * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
969  * may depend on host virtualization features rather than host cpu features.
970  */
971 
972 static u32 msrs_to_save[] = {
973 	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
974 	MSR_STAR,
975 #ifdef CONFIG_X86_64
976 	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
977 #endif
978 	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
979 	MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
980 };
981 
982 static unsigned num_msrs_to_save;
983 
984 static u32 emulated_msrs[] = {
985 	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
986 	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
987 	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
988 	HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
989 	HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
990 	HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
991 	HV_X64_MSR_RESET,
992 	HV_X64_MSR_VP_INDEX,
993 	HV_X64_MSR_VP_RUNTIME,
994 	HV_X64_MSR_SCONTROL,
995 	HV_X64_MSR_STIMER0_CONFIG,
996 	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
997 	MSR_KVM_PV_EOI_EN,
998 
999 	MSR_IA32_TSC_ADJUST,
1000 	MSR_IA32_TSCDEADLINE,
1001 	MSR_IA32_MISC_ENABLE,
1002 	MSR_IA32_MCG_STATUS,
1003 	MSR_IA32_MCG_CTL,
1004 	MSR_IA32_MCG_EXT_CTL,
1005 	MSR_IA32_SMBASE,
1006 };
1007 
1008 static unsigned num_emulated_msrs;
1009 
1010 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1011 {
1012 	if (efer & efer_reserved_bits)
1013 		return false;
1014 
1015 	if (efer & EFER_FFXSR) {
1016 		struct kvm_cpuid_entry2 *feat;
1017 
1018 		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1019 		if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
1020 			return false;
1021 	}
1022 
1023 	if (efer & EFER_SVME) {
1024 		struct kvm_cpuid_entry2 *feat;
1025 
1026 		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1027 		if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
1028 			return false;
1029 	}
1030 
1031 	return true;
1032 }
1033 EXPORT_SYMBOL_GPL(kvm_valid_efer);
1034 
1035 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
1036 {
1037 	u64 old_efer = vcpu->arch.efer;
1038 
1039 	if (!kvm_valid_efer(vcpu, efer))
1040 		return 1;
1041 
1042 	if (is_paging(vcpu)
1043 	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1044 		return 1;
1045 
1046 	efer &= ~EFER_LMA;
1047 	efer |= vcpu->arch.efer & EFER_LMA;
1048 
1049 	kvm_x86_ops->set_efer(vcpu, efer);
1050 
1051 	/* Update reserved bits */
1052 	if ((efer ^ old_efer) & EFER_NX)
1053 		kvm_mmu_reset_context(vcpu);
1054 
1055 	return 0;
1056 }
1057 
1058 void kvm_enable_efer_bits(u64 mask)
1059 {
1060        efer_reserved_bits &= ~mask;
1061 }
1062 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1063 
1064 /*
1065  * Writes msr value into into the appropriate "register".
1066  * Returns 0 on success, non-0 otherwise.
1067  * Assumes vcpu_load() was already called.
1068  */
1069 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1070 {
1071 	switch (msr->index) {
1072 	case MSR_FS_BASE:
1073 	case MSR_GS_BASE:
1074 	case MSR_KERNEL_GS_BASE:
1075 	case MSR_CSTAR:
1076 	case MSR_LSTAR:
1077 		if (is_noncanonical_address(msr->data))
1078 			return 1;
1079 		break;
1080 	case MSR_IA32_SYSENTER_EIP:
1081 	case MSR_IA32_SYSENTER_ESP:
1082 		/*
1083 		 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1084 		 * non-canonical address is written on Intel but not on
1085 		 * AMD (which ignores the top 32-bits, because it does
1086 		 * not implement 64-bit SYSENTER).
1087 		 *
1088 		 * 64-bit code should hence be able to write a non-canonical
1089 		 * value on AMD.  Making the address canonical ensures that
1090 		 * vmentry does not fail on Intel after writing a non-canonical
1091 		 * value, and that something deterministic happens if the guest
1092 		 * invokes 64-bit SYSENTER.
1093 		 */
1094 		msr->data = get_canonical(msr->data);
1095 	}
1096 	return kvm_x86_ops->set_msr(vcpu, msr);
1097 }
1098 EXPORT_SYMBOL_GPL(kvm_set_msr);
1099 
1100 /*
1101  * Adapt set_msr() to msr_io()'s calling convention
1102  */
1103 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1104 {
1105 	struct msr_data msr;
1106 	int r;
1107 
1108 	msr.index = index;
1109 	msr.host_initiated = true;
1110 	r = kvm_get_msr(vcpu, &msr);
1111 	if (r)
1112 		return r;
1113 
1114 	*data = msr.data;
1115 	return 0;
1116 }
1117 
1118 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1119 {
1120 	struct msr_data msr;
1121 
1122 	msr.data = *data;
1123 	msr.index = index;
1124 	msr.host_initiated = true;
1125 	return kvm_set_msr(vcpu, &msr);
1126 }
1127 
1128 #ifdef CONFIG_X86_64
1129 struct pvclock_gtod_data {
1130 	seqcount_t	seq;
1131 
1132 	struct { /* extract of a clocksource struct */
1133 		int vclock_mode;
1134 		u64	cycle_last;
1135 		u64	mask;
1136 		u32	mult;
1137 		u32	shift;
1138 	} clock;
1139 
1140 	u64		boot_ns;
1141 	u64		nsec_base;
1142 };
1143 
1144 static struct pvclock_gtod_data pvclock_gtod_data;
1145 
1146 static void update_pvclock_gtod(struct timekeeper *tk)
1147 {
1148 	struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
1149 	u64 boot_ns;
1150 
1151 	boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
1152 
1153 	write_seqcount_begin(&vdata->seq);
1154 
1155 	/* copy pvclock gtod data */
1156 	vdata->clock.vclock_mode	= tk->tkr_mono.clock->archdata.vclock_mode;
1157 	vdata->clock.cycle_last		= tk->tkr_mono.cycle_last;
1158 	vdata->clock.mask		= tk->tkr_mono.mask;
1159 	vdata->clock.mult		= tk->tkr_mono.mult;
1160 	vdata->clock.shift		= tk->tkr_mono.shift;
1161 
1162 	vdata->boot_ns			= boot_ns;
1163 	vdata->nsec_base		= tk->tkr_mono.xtime_nsec;
1164 
1165 	write_seqcount_end(&vdata->seq);
1166 }
1167 #endif
1168 
1169 void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
1170 {
1171 	/*
1172 	 * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
1173 	 * vcpu_enter_guest.  This function is only called from
1174 	 * the physical CPU that is running vcpu.
1175 	 */
1176 	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1177 }
1178 
1179 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
1180 {
1181 	int version;
1182 	int r;
1183 	struct pvclock_wall_clock wc;
1184 	struct timespec64 boot;
1185 
1186 	if (!wall_clock)
1187 		return;
1188 
1189 	r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
1190 	if (r)
1191 		return;
1192 
1193 	if (version & 1)
1194 		++version;  /* first time write, random junk */
1195 
1196 	++version;
1197 
1198 	if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
1199 		return;
1200 
1201 	/*
1202 	 * The guest calculates current wall clock time by adding
1203 	 * system time (updated by kvm_guest_time_update below) to the
1204 	 * wall clock specified here.  guest system time equals host
1205 	 * system time for us, thus we must fill in host boot time here.
1206 	 */
1207 	getboottime64(&boot);
1208 
1209 	if (kvm->arch.kvmclock_offset) {
1210 		struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset);
1211 		boot = timespec64_sub(boot, ts);
1212 	}
1213 	wc.sec = (u32)boot.tv_sec; /* overflow in 2106 guest time */
1214 	wc.nsec = boot.tv_nsec;
1215 	wc.version = version;
1216 
1217 	kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
1218 
1219 	version++;
1220 	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
1221 }
1222 
1223 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
1224 {
1225 	do_shl32_div32(dividend, divisor);
1226 	return dividend;
1227 }
1228 
1229 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
1230 			       s8 *pshift, u32 *pmultiplier)
1231 {
1232 	uint64_t scaled64;
1233 	int32_t  shift = 0;
1234 	uint64_t tps64;
1235 	uint32_t tps32;
1236 
1237 	tps64 = base_hz;
1238 	scaled64 = scaled_hz;
1239 	while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
1240 		tps64 >>= 1;
1241 		shift--;
1242 	}
1243 
1244 	tps32 = (uint32_t)tps64;
1245 	while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
1246 		if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
1247 			scaled64 >>= 1;
1248 		else
1249 			tps32 <<= 1;
1250 		shift++;
1251 	}
1252 
1253 	*pshift = shift;
1254 	*pmultiplier = div_frac(scaled64, tps32);
1255 
1256 	pr_debug("%s: base_hz %llu => %llu, shift %d, mul %u\n",
1257 		 __func__, base_hz, scaled_hz, shift, *pmultiplier);
1258 }
1259 
1260 #ifdef CONFIG_X86_64
1261 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
1262 #endif
1263 
1264 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1265 static unsigned long max_tsc_khz;
1266 
1267 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1268 {
1269 	u64 v = (u64)khz * (1000000 + ppm);
1270 	do_div(v, 1000000);
1271 	return v;
1272 }
1273 
1274 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1275 {
1276 	u64 ratio;
1277 
1278 	/* Guest TSC same frequency as host TSC? */
1279 	if (!scale) {
1280 		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1281 		return 0;
1282 	}
1283 
1284 	/* TSC scaling supported? */
1285 	if (!kvm_has_tsc_control) {
1286 		if (user_tsc_khz > tsc_khz) {
1287 			vcpu->arch.tsc_catchup = 1;
1288 			vcpu->arch.tsc_always_catchup = 1;
1289 			return 0;
1290 		} else {
1291 			WARN(1, "user requested TSC rate below hardware speed\n");
1292 			return -1;
1293 		}
1294 	}
1295 
1296 	/* TSC scaling required  - calculate ratio */
1297 	ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
1298 				user_tsc_khz, tsc_khz);
1299 
1300 	if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
1301 		WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
1302 			  user_tsc_khz);
1303 		return -1;
1304 	}
1305 
1306 	vcpu->arch.tsc_scaling_ratio = ratio;
1307 	return 0;
1308 }
1309 
1310 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
1311 {
1312 	u32 thresh_lo, thresh_hi;
1313 	int use_scaling = 0;
1314 
1315 	/* tsc_khz can be zero if TSC calibration fails */
1316 	if (user_tsc_khz == 0) {
1317 		/* set tsc_scaling_ratio to a safe value */
1318 		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1319 		return -1;
1320 	}
1321 
1322 	/* Compute a scale to convert nanoseconds in TSC cycles */
1323 	kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
1324 			   &vcpu->arch.virtual_tsc_shift,
1325 			   &vcpu->arch.virtual_tsc_mult);
1326 	vcpu->arch.virtual_tsc_khz = user_tsc_khz;
1327 
1328 	/*
1329 	 * Compute the variation in TSC rate which is acceptable
1330 	 * within the range of tolerance and decide if the
1331 	 * rate being applied is within that bounds of the hardware
1332 	 * rate.  If so, no scaling or compensation need be done.
1333 	 */
1334 	thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
1335 	thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1336 	if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
1337 		pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
1338 		use_scaling = 1;
1339 	}
1340 	return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
1341 }
1342 
1343 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1344 {
1345 	u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1346 				      vcpu->arch.virtual_tsc_mult,
1347 				      vcpu->arch.virtual_tsc_shift);
1348 	tsc += vcpu->arch.this_tsc_write;
1349 	return tsc;
1350 }
1351 
1352 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1353 {
1354 #ifdef CONFIG_X86_64
1355 	bool vcpus_matched;
1356 	struct kvm_arch *ka = &vcpu->kvm->arch;
1357 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1358 
1359 	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1360 			 atomic_read(&vcpu->kvm->online_vcpus));
1361 
1362 	/*
1363 	 * Once the masterclock is enabled, always perform request in
1364 	 * order to update it.
1365 	 *
1366 	 * In order to enable masterclock, the host clocksource must be TSC
1367 	 * and the vcpus need to have matched TSCs.  When that happens,
1368 	 * perform request to enable masterclock.
1369 	 */
1370 	if (ka->use_master_clock ||
1371 	    (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
1372 		kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1373 
1374 	trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
1375 			    atomic_read(&vcpu->kvm->online_vcpus),
1376 		            ka->use_master_clock, gtod->clock.vclock_mode);
1377 #endif
1378 }
1379 
1380 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
1381 {
1382 	u64 curr_offset = vcpu->arch.tsc_offset;
1383 	vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
1384 }
1385 
1386 /*
1387  * Multiply tsc by a fixed point number represented by ratio.
1388  *
1389  * The most significant 64-N bits (mult) of ratio represent the
1390  * integral part of the fixed point number; the remaining N bits
1391  * (frac) represent the fractional part, ie. ratio represents a fixed
1392  * point number (mult + frac * 2^(-N)).
1393  *
1394  * N equals to kvm_tsc_scaling_ratio_frac_bits.
1395  */
1396 static inline u64 __scale_tsc(u64 ratio, u64 tsc)
1397 {
1398 	return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
1399 }
1400 
1401 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
1402 {
1403 	u64 _tsc = tsc;
1404 	u64 ratio = vcpu->arch.tsc_scaling_ratio;
1405 
1406 	if (ratio != kvm_default_tsc_scaling_ratio)
1407 		_tsc = __scale_tsc(ratio, tsc);
1408 
1409 	return _tsc;
1410 }
1411 EXPORT_SYMBOL_GPL(kvm_scale_tsc);
1412 
1413 static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1414 {
1415 	u64 tsc;
1416 
1417 	tsc = kvm_scale_tsc(vcpu, rdtsc());
1418 
1419 	return target_tsc - tsc;
1420 }
1421 
1422 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
1423 {
1424 	return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
1425 }
1426 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
1427 
1428 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1429 {
1430 	kvm_x86_ops->write_tsc_offset(vcpu, offset);
1431 	vcpu->arch.tsc_offset = offset;
1432 }
1433 
1434 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1435 {
1436 	struct kvm *kvm = vcpu->kvm;
1437 	u64 offset, ns, elapsed;
1438 	unsigned long flags;
1439 	s64 usdiff;
1440 	bool matched;
1441 	bool already_matched;
1442 	u64 data = msr->data;
1443 
1444 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1445 	offset = kvm_compute_tsc_offset(vcpu, data);
1446 	ns = ktime_get_boot_ns();
1447 	elapsed = ns - kvm->arch.last_tsc_nsec;
1448 
1449 	if (vcpu->arch.virtual_tsc_khz) {
1450 		int faulted = 0;
1451 
1452 		/* n.b - signed multiplication and division required */
1453 		usdiff = data - kvm->arch.last_tsc_write;
1454 #ifdef CONFIG_X86_64
1455 		usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1456 #else
1457 		/* do_div() only does unsigned */
1458 		asm("1: idivl %[divisor]\n"
1459 		    "2: xor %%edx, %%edx\n"
1460 		    "   movl $0, %[faulted]\n"
1461 		    "3:\n"
1462 		    ".section .fixup,\"ax\"\n"
1463 		    "4: movl $1, %[faulted]\n"
1464 		    "   jmp  3b\n"
1465 		    ".previous\n"
1466 
1467 		_ASM_EXTABLE(1b, 4b)
1468 
1469 		: "=A"(usdiff), [faulted] "=r" (faulted)
1470 		: "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
1471 
1472 #endif
1473 		do_div(elapsed, 1000);
1474 		usdiff -= elapsed;
1475 		if (usdiff < 0)
1476 			usdiff = -usdiff;
1477 
1478 		/* idivl overflow => difference is larger than USEC_PER_SEC */
1479 		if (faulted)
1480 			usdiff = USEC_PER_SEC;
1481 	} else
1482 		usdiff = USEC_PER_SEC; /* disable TSC match window below */
1483 
1484 	/*
1485 	 * Special case: TSC write with a small delta (1 second) of virtual
1486 	 * cycle time against real time is interpreted as an attempt to
1487 	 * synchronize the CPU.
1488          *
1489 	 * For a reliable TSC, we can match TSC offsets, and for an unstable
1490 	 * TSC, we add elapsed time in this computation.  We could let the
1491 	 * compensation code attempt to catch up if we fall behind, but
1492 	 * it's better to try to match offsets from the beginning.
1493          */
1494 	if (usdiff < USEC_PER_SEC &&
1495 	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1496 		if (!check_tsc_unstable()) {
1497 			offset = kvm->arch.cur_tsc_offset;
1498 			pr_debug("kvm: matched tsc offset for %llu\n", data);
1499 		} else {
1500 			u64 delta = nsec_to_cycles(vcpu, elapsed);
1501 			data += delta;
1502 			offset = kvm_compute_tsc_offset(vcpu, data);
1503 			pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1504 		}
1505 		matched = true;
1506 		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
1507 	} else {
1508 		/*
1509 		 * We split periods of matched TSC writes into generations.
1510 		 * For each generation, we track the original measured
1511 		 * nanosecond time, offset, and write, so if TSCs are in
1512 		 * sync, we can match exact offset, and if not, we can match
1513 		 * exact software computation in compute_guest_tsc()
1514 		 *
1515 		 * These values are tracked in kvm->arch.cur_xxx variables.
1516 		 */
1517 		kvm->arch.cur_tsc_generation++;
1518 		kvm->arch.cur_tsc_nsec = ns;
1519 		kvm->arch.cur_tsc_write = data;
1520 		kvm->arch.cur_tsc_offset = offset;
1521 		matched = false;
1522 		pr_debug("kvm: new tsc generation %llu, clock %llu\n",
1523 			 kvm->arch.cur_tsc_generation, data);
1524 	}
1525 
1526 	/*
1527 	 * We also track th most recent recorded KHZ, write and time to
1528 	 * allow the matching interval to be extended at each write.
1529 	 */
1530 	kvm->arch.last_tsc_nsec = ns;
1531 	kvm->arch.last_tsc_write = data;
1532 	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1533 
1534 	vcpu->arch.last_guest_tsc = data;
1535 
1536 	/* Keep track of which generation this VCPU has synchronized to */
1537 	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
1538 	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
1539 	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
1540 
1541 	if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
1542 		update_ia32_tsc_adjust_msr(vcpu, offset);
1543 	kvm_vcpu_write_tsc_offset(vcpu, offset);
1544 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1545 
1546 	spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
1547 	if (!matched) {
1548 		kvm->arch.nr_vcpus_matched_tsc = 0;
1549 	} else if (!already_matched) {
1550 		kvm->arch.nr_vcpus_matched_tsc++;
1551 	}
1552 
1553 	kvm_track_tsc_matching(vcpu);
1554 	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
1555 }
1556 
1557 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1558 
1559 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
1560 					   s64 adjustment)
1561 {
1562 	kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
1563 }
1564 
1565 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
1566 {
1567 	if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
1568 		WARN_ON(adjustment < 0);
1569 	adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
1570 	adjust_tsc_offset_guest(vcpu, adjustment);
1571 }
1572 
1573 #ifdef CONFIG_X86_64
1574 
1575 static u64 read_tsc(void)
1576 {
1577 	u64 ret = (u64)rdtsc_ordered();
1578 	u64 last = pvclock_gtod_data.clock.cycle_last;
1579 
1580 	if (likely(ret >= last))
1581 		return ret;
1582 
1583 	/*
1584 	 * GCC likes to generate cmov here, but this branch is extremely
1585 	 * predictable (it's just a function of time and the likely is
1586 	 * very likely) and there's a data dependence, so force GCC
1587 	 * to generate a branch instead.  I don't barrier() because
1588 	 * we don't actually need a barrier, and if this function
1589 	 * ever gets inlined it will generate worse code.
1590 	 */
1591 	asm volatile ("");
1592 	return last;
1593 }
1594 
1595 static inline u64 vgettsc(u64 *cycle_now)
1596 {
1597 	long v;
1598 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1599 
1600 	*cycle_now = read_tsc();
1601 
1602 	v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
1603 	return v * gtod->clock.mult;
1604 }
1605 
1606 static int do_monotonic_boot(s64 *t, u64 *cycle_now)
1607 {
1608 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1609 	unsigned long seq;
1610 	int mode;
1611 	u64 ns;
1612 
1613 	do {
1614 		seq = read_seqcount_begin(&gtod->seq);
1615 		mode = gtod->clock.vclock_mode;
1616 		ns = gtod->nsec_base;
1617 		ns += vgettsc(cycle_now);
1618 		ns >>= gtod->clock.shift;
1619 		ns += gtod->boot_ns;
1620 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
1621 	*t = ns;
1622 
1623 	return mode;
1624 }
1625 
1626 /* returns true if host is using tsc clocksource */
1627 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
1628 {
1629 	/* checked again under seqlock below */
1630 	if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
1631 		return false;
1632 
1633 	return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
1634 }
1635 #endif
1636 
1637 /*
1638  *
1639  * Assuming a stable TSC across physical CPUS, and a stable TSC
1640  * across virtual CPUs, the following condition is possible.
1641  * Each numbered line represents an event visible to both
1642  * CPUs at the next numbered event.
1643  *
1644  * "timespecX" represents host monotonic time. "tscX" represents
1645  * RDTSC value.
1646  *
1647  * 		VCPU0 on CPU0		|	VCPU1 on CPU1
1648  *
1649  * 1.  read timespec0,tsc0
1650  * 2.					| timespec1 = timespec0 + N
1651  * 					| tsc1 = tsc0 + M
1652  * 3. transition to guest		| transition to guest
1653  * 4. ret0 = timespec0 + (rdtsc - tsc0) |
1654  * 5.				        | ret1 = timespec1 + (rdtsc - tsc1)
1655  * 				        | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
1656  *
1657  * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
1658  *
1659  * 	- ret0 < ret1
1660  *	- timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
1661  *		...
1662  *	- 0 < N - M => M < N
1663  *
1664  * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
1665  * always the case (the difference between two distinct xtime instances
1666  * might be smaller then the difference between corresponding TSC reads,
1667  * when updating guest vcpus pvclock areas).
1668  *
1669  * To avoid that problem, do not allow visibility of distinct
1670  * system_timestamp/tsc_timestamp values simultaneously: use a master
1671  * copy of host monotonic time values. Update that master copy
1672  * in lockstep.
1673  *
1674  * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
1675  *
1676  */
1677 
1678 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
1679 {
1680 #ifdef CONFIG_X86_64
1681 	struct kvm_arch *ka = &kvm->arch;
1682 	int vclock_mode;
1683 	bool host_tsc_clocksource, vcpus_matched;
1684 
1685 	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1686 			atomic_read(&kvm->online_vcpus));
1687 
1688 	/*
1689 	 * If the host uses TSC clock, then passthrough TSC as stable
1690 	 * to the guest.
1691 	 */
1692 	host_tsc_clocksource = kvm_get_time_and_clockread(
1693 					&ka->master_kernel_ns,
1694 					&ka->master_cycle_now);
1695 
1696 	ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1697 				&& !backwards_tsc_observed
1698 				&& !ka->boot_vcpu_runs_old_kvmclock;
1699 
1700 	if (ka->use_master_clock)
1701 		atomic_set(&kvm_guest_has_master_clock, 1);
1702 
1703 	vclock_mode = pvclock_gtod_data.clock.vclock_mode;
1704 	trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
1705 					vcpus_matched);
1706 #endif
1707 }
1708 
1709 void kvm_make_mclock_inprogress_request(struct kvm *kvm)
1710 {
1711 	kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
1712 }
1713 
1714 static void kvm_gen_update_masterclock(struct kvm *kvm)
1715 {
1716 #ifdef CONFIG_X86_64
1717 	int i;
1718 	struct kvm_vcpu *vcpu;
1719 	struct kvm_arch *ka = &kvm->arch;
1720 
1721 	spin_lock(&ka->pvclock_gtod_sync_lock);
1722 	kvm_make_mclock_inprogress_request(kvm);
1723 	/* no guest entries from this point */
1724 	pvclock_update_vm_gtod_copy(kvm);
1725 
1726 	kvm_for_each_vcpu(i, vcpu, kvm)
1727 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1728 
1729 	/* guest entries allowed */
1730 	kvm_for_each_vcpu(i, vcpu, kvm)
1731 		clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
1732 
1733 	spin_unlock(&ka->pvclock_gtod_sync_lock);
1734 #endif
1735 }
1736 
1737 static u64 __get_kvmclock_ns(struct kvm *kvm)
1738 {
1739 	struct kvm_arch *ka = &kvm->arch;
1740 	struct pvclock_vcpu_time_info hv_clock;
1741 
1742 	spin_lock(&ka->pvclock_gtod_sync_lock);
1743 	if (!ka->use_master_clock) {
1744 		spin_unlock(&ka->pvclock_gtod_sync_lock);
1745 		return ktime_get_boot_ns() + ka->kvmclock_offset;
1746 	}
1747 
1748 	hv_clock.tsc_timestamp = ka->master_cycle_now;
1749 	hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
1750 	spin_unlock(&ka->pvclock_gtod_sync_lock);
1751 
1752 	kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
1753 			   &hv_clock.tsc_shift,
1754 			   &hv_clock.tsc_to_system_mul);
1755 	return __pvclock_read_cycles(&hv_clock, rdtsc());
1756 }
1757 
1758 u64 get_kvmclock_ns(struct kvm *kvm)
1759 {
1760 	unsigned long flags;
1761 	s64 ns;
1762 
1763 	local_irq_save(flags);
1764 	ns = __get_kvmclock_ns(kvm);
1765 	local_irq_restore(flags);
1766 
1767 	return ns;
1768 }
1769 
1770 static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
1771 {
1772 	struct kvm_vcpu_arch *vcpu = &v->arch;
1773 	struct pvclock_vcpu_time_info guest_hv_clock;
1774 
1775 	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1776 		&guest_hv_clock, sizeof(guest_hv_clock))))
1777 		return;
1778 
1779 	/* This VCPU is paused, but it's legal for a guest to read another
1780 	 * VCPU's kvmclock, so we really have to follow the specification where
1781 	 * it says that version is odd if data is being modified, and even after
1782 	 * it is consistent.
1783 	 *
1784 	 * Version field updates must be kept separate.  This is because
1785 	 * kvm_write_guest_cached might use a "rep movs" instruction, and
1786 	 * writes within a string instruction are weakly ordered.  So there
1787 	 * are three writes overall.
1788 	 *
1789 	 * As a small optimization, only write the version field in the first
1790 	 * and third write.  The vcpu->pv_time cache is still valid, because the
1791 	 * version field is the first in the struct.
1792 	 */
1793 	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
1794 
1795 	vcpu->hv_clock.version = guest_hv_clock.version + 1;
1796 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1797 				&vcpu->hv_clock,
1798 				sizeof(vcpu->hv_clock.version));
1799 
1800 	smp_wmb();
1801 
1802 	/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1803 	vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
1804 
1805 	if (vcpu->pvclock_set_guest_stopped_request) {
1806 		vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
1807 		vcpu->pvclock_set_guest_stopped_request = false;
1808 	}
1809 
1810 	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
1811 
1812 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1813 				&vcpu->hv_clock,
1814 				sizeof(vcpu->hv_clock));
1815 
1816 	smp_wmb();
1817 
1818 	vcpu->hv_clock.version++;
1819 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1820 				&vcpu->hv_clock,
1821 				sizeof(vcpu->hv_clock.version));
1822 }
1823 
1824 static int kvm_guest_time_update(struct kvm_vcpu *v)
1825 {
1826 	unsigned long flags, tgt_tsc_khz;
1827 	struct kvm_vcpu_arch *vcpu = &v->arch;
1828 	struct kvm_arch *ka = &v->kvm->arch;
1829 	s64 kernel_ns;
1830 	u64 tsc_timestamp, host_tsc;
1831 	u8 pvclock_flags;
1832 	bool use_master_clock;
1833 
1834 	kernel_ns = 0;
1835 	host_tsc = 0;
1836 
1837 	/*
1838 	 * If the host uses TSC clock, then passthrough TSC as stable
1839 	 * to the guest.
1840 	 */
1841 	spin_lock(&ka->pvclock_gtod_sync_lock);
1842 	use_master_clock = ka->use_master_clock;
1843 	if (use_master_clock) {
1844 		host_tsc = ka->master_cycle_now;
1845 		kernel_ns = ka->master_kernel_ns;
1846 	}
1847 	spin_unlock(&ka->pvclock_gtod_sync_lock);
1848 
1849 	/* Keep irq disabled to prevent changes to the clock */
1850 	local_irq_save(flags);
1851 	tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
1852 	if (unlikely(tgt_tsc_khz == 0)) {
1853 		local_irq_restore(flags);
1854 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1855 		return 1;
1856 	}
1857 	if (!use_master_clock) {
1858 		host_tsc = rdtsc();
1859 		kernel_ns = ktime_get_boot_ns();
1860 	}
1861 
1862 	tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
1863 
1864 	/*
1865 	 * We may have to catch up the TSC to match elapsed wall clock
1866 	 * time for two reasons, even if kvmclock is used.
1867 	 *   1) CPU could have been running below the maximum TSC rate
1868 	 *   2) Broken TSC compensation resets the base at each VCPU
1869 	 *      entry to avoid unknown leaps of TSC even when running
1870 	 *      again on the same CPU.  This may cause apparent elapsed
1871 	 *      time to disappear, and the guest to stand still or run
1872 	 *	very slowly.
1873 	 */
1874 	if (vcpu->tsc_catchup) {
1875 		u64 tsc = compute_guest_tsc(v, kernel_ns);
1876 		if (tsc > tsc_timestamp) {
1877 			adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
1878 			tsc_timestamp = tsc;
1879 		}
1880 	}
1881 
1882 	local_irq_restore(flags);
1883 
1884 	/* With all the info we got, fill in the values */
1885 
1886 	if (kvm_has_tsc_control)
1887 		tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
1888 
1889 	if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
1890 		kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
1891 				   &vcpu->hv_clock.tsc_shift,
1892 				   &vcpu->hv_clock.tsc_to_system_mul);
1893 		vcpu->hw_tsc_khz = tgt_tsc_khz;
1894 	}
1895 
1896 	vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1897 	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1898 	vcpu->last_guest_tsc = tsc_timestamp;
1899 
1900 	/* If the host uses TSC clocksource, then it is stable */
1901 	pvclock_flags = 0;
1902 	if (use_master_clock)
1903 		pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
1904 
1905 	vcpu->hv_clock.flags = pvclock_flags;
1906 
1907 	if (vcpu->pv_time_enabled)
1908 		kvm_setup_pvclock_page(v);
1909 	if (v == kvm_get_vcpu(v->kvm, 0))
1910 		kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
1911 	return 0;
1912 }
1913 
1914 /*
1915  * kvmclock updates which are isolated to a given vcpu, such as
1916  * vcpu->cpu migration, should not allow system_timestamp from
1917  * the rest of the vcpus to remain static. Otherwise ntp frequency
1918  * correction applies to one vcpu's system_timestamp but not
1919  * the others.
1920  *
1921  * So in those cases, request a kvmclock update for all vcpus.
1922  * We need to rate-limit these requests though, as they can
1923  * considerably slow guests that have a large number of vcpus.
1924  * The time for a remote vcpu to update its kvmclock is bound
1925  * by the delay we use to rate-limit the updates.
1926  */
1927 
1928 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
1929 
1930 static void kvmclock_update_fn(struct work_struct *work)
1931 {
1932 	int i;
1933 	struct delayed_work *dwork = to_delayed_work(work);
1934 	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1935 					   kvmclock_update_work);
1936 	struct kvm *kvm = container_of(ka, struct kvm, arch);
1937 	struct kvm_vcpu *vcpu;
1938 
1939 	kvm_for_each_vcpu(i, vcpu, kvm) {
1940 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1941 		kvm_vcpu_kick(vcpu);
1942 	}
1943 }
1944 
1945 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
1946 {
1947 	struct kvm *kvm = v->kvm;
1948 
1949 	kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1950 	schedule_delayed_work(&kvm->arch.kvmclock_update_work,
1951 					KVMCLOCK_UPDATE_DELAY);
1952 }
1953 
1954 #define KVMCLOCK_SYNC_PERIOD (300 * HZ)
1955 
1956 static void kvmclock_sync_fn(struct work_struct *work)
1957 {
1958 	struct delayed_work *dwork = to_delayed_work(work);
1959 	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1960 					   kvmclock_sync_work);
1961 	struct kvm *kvm = container_of(ka, struct kvm, arch);
1962 
1963 	if (!kvmclock_periodic_sync)
1964 		return;
1965 
1966 	schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
1967 	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
1968 					KVMCLOCK_SYNC_PERIOD);
1969 }
1970 
1971 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1972 {
1973 	u64 mcg_cap = vcpu->arch.mcg_cap;
1974 	unsigned bank_num = mcg_cap & 0xff;
1975 
1976 	switch (msr) {
1977 	case MSR_IA32_MCG_STATUS:
1978 		vcpu->arch.mcg_status = data;
1979 		break;
1980 	case MSR_IA32_MCG_CTL:
1981 		if (!(mcg_cap & MCG_CTL_P))
1982 			return 1;
1983 		if (data != 0 && data != ~(u64)0)
1984 			return -1;
1985 		vcpu->arch.mcg_ctl = data;
1986 		break;
1987 	default:
1988 		if (msr >= MSR_IA32_MC0_CTL &&
1989 		    msr < MSR_IA32_MCx_CTL(bank_num)) {
1990 			u32 offset = msr - MSR_IA32_MC0_CTL;
1991 			/* only 0 or all 1s can be written to IA32_MCi_CTL
1992 			 * some Linux kernels though clear bit 10 in bank 4 to
1993 			 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1994 			 * this to avoid an uncatched #GP in the guest
1995 			 */
1996 			if ((offset & 0x3) == 0 &&
1997 			    data != 0 && (data | (1 << 10)) != ~(u64)0)
1998 				return -1;
1999 			vcpu->arch.mce_banks[offset] = data;
2000 			break;
2001 		}
2002 		return 1;
2003 	}
2004 	return 0;
2005 }
2006 
2007 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
2008 {
2009 	struct kvm *kvm = vcpu->kvm;
2010 	int lm = is_long_mode(vcpu);
2011 	u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
2012 		: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
2013 	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
2014 		: kvm->arch.xen_hvm_config.blob_size_32;
2015 	u32 page_num = data & ~PAGE_MASK;
2016 	u64 page_addr = data & PAGE_MASK;
2017 	u8 *page;
2018 	int r;
2019 
2020 	r = -E2BIG;
2021 	if (page_num >= blob_size)
2022 		goto out;
2023 	r = -ENOMEM;
2024 	page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
2025 	if (IS_ERR(page)) {
2026 		r = PTR_ERR(page);
2027 		goto out;
2028 	}
2029 	if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
2030 		goto out_free;
2031 	r = 0;
2032 out_free:
2033 	kfree(page);
2034 out:
2035 	return r;
2036 }
2037 
2038 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
2039 {
2040 	gpa_t gpa = data & ~0x3f;
2041 
2042 	/* Bits 2:5 are reserved, Should be zero */
2043 	if (data & 0x3c)
2044 		return 1;
2045 
2046 	vcpu->arch.apf.msr_val = data;
2047 
2048 	if (!(data & KVM_ASYNC_PF_ENABLED)) {
2049 		kvm_clear_async_pf_completion_queue(vcpu);
2050 		kvm_async_pf_hash_reset(vcpu);
2051 		return 0;
2052 	}
2053 
2054 	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
2055 					sizeof(u32)))
2056 		return 1;
2057 
2058 	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
2059 	kvm_async_pf_wakeup_all(vcpu);
2060 	return 0;
2061 }
2062 
2063 static void kvmclock_reset(struct kvm_vcpu *vcpu)
2064 {
2065 	vcpu->arch.pv_time_enabled = false;
2066 }
2067 
2068 static void record_steal_time(struct kvm_vcpu *vcpu)
2069 {
2070 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2071 		return;
2072 
2073 	if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2074 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
2075 		return;
2076 
2077 	vcpu->arch.st.steal.preempted = 0;
2078 
2079 	if (vcpu->arch.st.steal.version & 1)
2080 		vcpu->arch.st.steal.version += 1;  /* first time write, random junk */
2081 
2082 	vcpu->arch.st.steal.version += 1;
2083 
2084 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2085 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2086 
2087 	smp_wmb();
2088 
2089 	vcpu->arch.st.steal.steal += current->sched_info.run_delay -
2090 		vcpu->arch.st.last_steal;
2091 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
2092 
2093 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2094 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2095 
2096 	smp_wmb();
2097 
2098 	vcpu->arch.st.steal.version += 1;
2099 
2100 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2101 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2102 }
2103 
2104 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2105 {
2106 	bool pr = false;
2107 	u32 msr = msr_info->index;
2108 	u64 data = msr_info->data;
2109 
2110 	switch (msr) {
2111 	case MSR_AMD64_NB_CFG:
2112 	case MSR_IA32_UCODE_REV:
2113 	case MSR_IA32_UCODE_WRITE:
2114 	case MSR_VM_HSAVE_PA:
2115 	case MSR_AMD64_PATCH_LOADER:
2116 	case MSR_AMD64_BU_CFG2:
2117 		break;
2118 
2119 	case MSR_EFER:
2120 		return set_efer(vcpu, data);
2121 	case MSR_K7_HWCR:
2122 		data &= ~(u64)0x40;	/* ignore flush filter disable */
2123 		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
2124 		data &= ~(u64)0x8;	/* ignore TLB cache disable */
2125 		data &= ~(u64)0x40000;  /* ignore Mc status write enable */
2126 		if (data != 0) {
2127 			vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
2128 				    data);
2129 			return 1;
2130 		}
2131 		break;
2132 	case MSR_FAM10H_MMIO_CONF_BASE:
2133 		if (data != 0) {
2134 			vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
2135 				    "0x%llx\n", data);
2136 			return 1;
2137 		}
2138 		break;
2139 	case MSR_IA32_DEBUGCTLMSR:
2140 		if (!data) {
2141 			/* We support the non-activated case already */
2142 			break;
2143 		} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
2144 			/* Values other than LBR and BTF are vendor-specific,
2145 			   thus reserved and should throw a #GP */
2146 			return 1;
2147 		}
2148 		vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
2149 			    __func__, data);
2150 		break;
2151 	case 0x200 ... 0x2ff:
2152 		return kvm_mtrr_set_msr(vcpu, msr, data);
2153 	case MSR_IA32_APICBASE:
2154 		return kvm_set_apic_base(vcpu, msr_info);
2155 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2156 		return kvm_x2apic_msr_write(vcpu, msr, data);
2157 	case MSR_IA32_TSCDEADLINE:
2158 		kvm_set_lapic_tscdeadline_msr(vcpu, data);
2159 		break;
2160 	case MSR_IA32_TSC_ADJUST:
2161 		if (guest_cpuid_has_tsc_adjust(vcpu)) {
2162 			if (!msr_info->host_initiated) {
2163 				s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2164 				adjust_tsc_offset_guest(vcpu, adj);
2165 			}
2166 			vcpu->arch.ia32_tsc_adjust_msr = data;
2167 		}
2168 		break;
2169 	case MSR_IA32_MISC_ENABLE:
2170 		vcpu->arch.ia32_misc_enable_msr = data;
2171 		break;
2172 	case MSR_IA32_SMBASE:
2173 		if (!msr_info->host_initiated)
2174 			return 1;
2175 		vcpu->arch.smbase = data;
2176 		break;
2177 	case MSR_KVM_WALL_CLOCK_NEW:
2178 	case MSR_KVM_WALL_CLOCK:
2179 		vcpu->kvm->arch.wall_clock = data;
2180 		kvm_write_wall_clock(vcpu->kvm, data);
2181 		break;
2182 	case MSR_KVM_SYSTEM_TIME_NEW:
2183 	case MSR_KVM_SYSTEM_TIME: {
2184 		struct kvm_arch *ka = &vcpu->kvm->arch;
2185 
2186 		kvmclock_reset(vcpu);
2187 
2188 		if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
2189 			bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
2190 
2191 			if (ka->boot_vcpu_runs_old_kvmclock != tmp)
2192 				set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
2193 					&vcpu->requests);
2194 
2195 			ka->boot_vcpu_runs_old_kvmclock = tmp;
2196 		}
2197 
2198 		vcpu->arch.time = data;
2199 		kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2200 
2201 		/* we verify if the enable bit is set... */
2202 		if (!(data & 1))
2203 			break;
2204 
2205 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2206 		     &vcpu->arch.pv_time, data & ~1ULL,
2207 		     sizeof(struct pvclock_vcpu_time_info)))
2208 			vcpu->arch.pv_time_enabled = false;
2209 		else
2210 			vcpu->arch.pv_time_enabled = true;
2211 
2212 		break;
2213 	}
2214 	case MSR_KVM_ASYNC_PF_EN:
2215 		if (kvm_pv_enable_async_pf(vcpu, data))
2216 			return 1;
2217 		break;
2218 	case MSR_KVM_STEAL_TIME:
2219 
2220 		if (unlikely(!sched_info_on()))
2221 			return 1;
2222 
2223 		if (data & KVM_STEAL_RESERVED_MASK)
2224 			return 1;
2225 
2226 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
2227 						data & KVM_STEAL_VALID_BITS,
2228 						sizeof(struct kvm_steal_time)))
2229 			return 1;
2230 
2231 		vcpu->arch.st.msr_val = data;
2232 
2233 		if (!(data & KVM_MSR_ENABLED))
2234 			break;
2235 
2236 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2237 
2238 		break;
2239 	case MSR_KVM_PV_EOI_EN:
2240 		if (kvm_lapic_enable_pv_eoi(vcpu, data))
2241 			return 1;
2242 		break;
2243 
2244 	case MSR_IA32_MCG_CTL:
2245 	case MSR_IA32_MCG_STATUS:
2246 	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2247 		return set_msr_mce(vcpu, msr, data);
2248 
2249 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
2250 	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
2251 		pr = true; /* fall through */
2252 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
2253 	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2254 		if (kvm_pmu_is_valid_msr(vcpu, msr))
2255 			return kvm_pmu_set_msr(vcpu, msr_info);
2256 
2257 		if (pr || data != 0)
2258 			vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
2259 				    "0x%x data 0x%llx\n", msr, data);
2260 		break;
2261 	case MSR_K7_CLK_CTL:
2262 		/*
2263 		 * Ignore all writes to this no longer documented MSR.
2264 		 * Writes are only relevant for old K7 processors,
2265 		 * all pre-dating SVM, but a recommended workaround from
2266 		 * AMD for these chips. It is possible to specify the
2267 		 * affected processor models on the command line, hence
2268 		 * the need to ignore the workaround.
2269 		 */
2270 		break;
2271 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2272 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2273 	case HV_X64_MSR_CRASH_CTL:
2274 	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2275 		return kvm_hv_set_msr_common(vcpu, msr, data,
2276 					     msr_info->host_initiated);
2277 	case MSR_IA32_BBL_CR_CTL3:
2278 		/* Drop writes to this legacy MSR -- see rdmsr
2279 		 * counterpart for further detail.
2280 		 */
2281 		vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
2282 		break;
2283 	case MSR_AMD64_OSVW_ID_LENGTH:
2284 		if (!guest_cpuid_has_osvw(vcpu))
2285 			return 1;
2286 		vcpu->arch.osvw.length = data;
2287 		break;
2288 	case MSR_AMD64_OSVW_STATUS:
2289 		if (!guest_cpuid_has_osvw(vcpu))
2290 			return 1;
2291 		vcpu->arch.osvw.status = data;
2292 		break;
2293 	default:
2294 		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
2295 			return xen_hvm_config(vcpu, data);
2296 		if (kvm_pmu_is_valid_msr(vcpu, msr))
2297 			return kvm_pmu_set_msr(vcpu, msr_info);
2298 		if (!ignore_msrs) {
2299 			vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
2300 				    msr, data);
2301 			return 1;
2302 		} else {
2303 			vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
2304 				    msr, data);
2305 			break;
2306 		}
2307 	}
2308 	return 0;
2309 }
2310 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
2311 
2312 
2313 /*
2314  * Reads an msr value (of 'msr_index') into 'pdata'.
2315  * Returns 0 on success, non-0 otherwise.
2316  * Assumes vcpu_load() was already called.
2317  */
2318 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2319 {
2320 	return kvm_x86_ops->get_msr(vcpu, msr);
2321 }
2322 EXPORT_SYMBOL_GPL(kvm_get_msr);
2323 
2324 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2325 {
2326 	u64 data;
2327 	u64 mcg_cap = vcpu->arch.mcg_cap;
2328 	unsigned bank_num = mcg_cap & 0xff;
2329 
2330 	switch (msr) {
2331 	case MSR_IA32_P5_MC_ADDR:
2332 	case MSR_IA32_P5_MC_TYPE:
2333 		data = 0;
2334 		break;
2335 	case MSR_IA32_MCG_CAP:
2336 		data = vcpu->arch.mcg_cap;
2337 		break;
2338 	case MSR_IA32_MCG_CTL:
2339 		if (!(mcg_cap & MCG_CTL_P))
2340 			return 1;
2341 		data = vcpu->arch.mcg_ctl;
2342 		break;
2343 	case MSR_IA32_MCG_STATUS:
2344 		data = vcpu->arch.mcg_status;
2345 		break;
2346 	default:
2347 		if (msr >= MSR_IA32_MC0_CTL &&
2348 		    msr < MSR_IA32_MCx_CTL(bank_num)) {
2349 			u32 offset = msr - MSR_IA32_MC0_CTL;
2350 			data = vcpu->arch.mce_banks[offset];
2351 			break;
2352 		}
2353 		return 1;
2354 	}
2355 	*pdata = data;
2356 	return 0;
2357 }
2358 
2359 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2360 {
2361 	switch (msr_info->index) {
2362 	case MSR_IA32_PLATFORM_ID:
2363 	case MSR_IA32_EBL_CR_POWERON:
2364 	case MSR_IA32_DEBUGCTLMSR:
2365 	case MSR_IA32_LASTBRANCHFROMIP:
2366 	case MSR_IA32_LASTBRANCHTOIP:
2367 	case MSR_IA32_LASTINTFROMIP:
2368 	case MSR_IA32_LASTINTTOIP:
2369 	case MSR_K8_SYSCFG:
2370 	case MSR_K8_TSEG_ADDR:
2371 	case MSR_K8_TSEG_MASK:
2372 	case MSR_K7_HWCR:
2373 	case MSR_VM_HSAVE_PA:
2374 	case MSR_K8_INT_PENDING_MSG:
2375 	case MSR_AMD64_NB_CFG:
2376 	case MSR_FAM10H_MMIO_CONF_BASE:
2377 	case MSR_AMD64_BU_CFG2:
2378 	case MSR_IA32_PERF_CTL:
2379 		msr_info->data = 0;
2380 		break;
2381 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
2382 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
2383 	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
2384 	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2385 		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2386 			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2387 		msr_info->data = 0;
2388 		break;
2389 	case MSR_IA32_UCODE_REV:
2390 		msr_info->data = 0x100000000ULL;
2391 		break;
2392 	case MSR_MTRRcap:
2393 	case 0x200 ... 0x2ff:
2394 		return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
2395 	case 0xcd: /* fsb frequency */
2396 		msr_info->data = 3;
2397 		break;
2398 		/*
2399 		 * MSR_EBC_FREQUENCY_ID
2400 		 * Conservative value valid for even the basic CPU models.
2401 		 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
2402 		 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
2403 		 * and 266MHz for model 3, or 4. Set Core Clock
2404 		 * Frequency to System Bus Frequency Ratio to 1 (bits
2405 		 * 31:24) even though these are only valid for CPU
2406 		 * models > 2, however guests may end up dividing or
2407 		 * multiplying by zero otherwise.
2408 		 */
2409 	case MSR_EBC_FREQUENCY_ID:
2410 		msr_info->data = 1 << 24;
2411 		break;
2412 	case MSR_IA32_APICBASE:
2413 		msr_info->data = kvm_get_apic_base(vcpu);
2414 		break;
2415 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2416 		return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
2417 		break;
2418 	case MSR_IA32_TSCDEADLINE:
2419 		msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
2420 		break;
2421 	case MSR_IA32_TSC_ADJUST:
2422 		msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
2423 		break;
2424 	case MSR_IA32_MISC_ENABLE:
2425 		msr_info->data = vcpu->arch.ia32_misc_enable_msr;
2426 		break;
2427 	case MSR_IA32_SMBASE:
2428 		if (!msr_info->host_initiated)
2429 			return 1;
2430 		msr_info->data = vcpu->arch.smbase;
2431 		break;
2432 	case MSR_IA32_PERF_STATUS:
2433 		/* TSC increment by tick */
2434 		msr_info->data = 1000ULL;
2435 		/* CPU multiplier */
2436 		msr_info->data |= (((uint64_t)4ULL) << 40);
2437 		break;
2438 	case MSR_EFER:
2439 		msr_info->data = vcpu->arch.efer;
2440 		break;
2441 	case MSR_KVM_WALL_CLOCK:
2442 	case MSR_KVM_WALL_CLOCK_NEW:
2443 		msr_info->data = vcpu->kvm->arch.wall_clock;
2444 		break;
2445 	case MSR_KVM_SYSTEM_TIME:
2446 	case MSR_KVM_SYSTEM_TIME_NEW:
2447 		msr_info->data = vcpu->arch.time;
2448 		break;
2449 	case MSR_KVM_ASYNC_PF_EN:
2450 		msr_info->data = vcpu->arch.apf.msr_val;
2451 		break;
2452 	case MSR_KVM_STEAL_TIME:
2453 		msr_info->data = vcpu->arch.st.msr_val;
2454 		break;
2455 	case MSR_KVM_PV_EOI_EN:
2456 		msr_info->data = vcpu->arch.pv_eoi.msr_val;
2457 		break;
2458 	case MSR_IA32_P5_MC_ADDR:
2459 	case MSR_IA32_P5_MC_TYPE:
2460 	case MSR_IA32_MCG_CAP:
2461 	case MSR_IA32_MCG_CTL:
2462 	case MSR_IA32_MCG_STATUS:
2463 	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2464 		return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
2465 	case MSR_K7_CLK_CTL:
2466 		/*
2467 		 * Provide expected ramp-up count for K7. All other
2468 		 * are set to zero, indicating minimum divisors for
2469 		 * every field.
2470 		 *
2471 		 * This prevents guest kernels on AMD host with CPU
2472 		 * type 6, model 8 and higher from exploding due to
2473 		 * the rdmsr failing.
2474 		 */
2475 		msr_info->data = 0x20000000;
2476 		break;
2477 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2478 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2479 	case HV_X64_MSR_CRASH_CTL:
2480 	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2481 		return kvm_hv_get_msr_common(vcpu,
2482 					     msr_info->index, &msr_info->data);
2483 		break;
2484 	case MSR_IA32_BBL_CR_CTL3:
2485 		/* This legacy MSR exists but isn't fully documented in current
2486 		 * silicon.  It is however accessed by winxp in very narrow
2487 		 * scenarios where it sets bit #19, itself documented as
2488 		 * a "reserved" bit.  Best effort attempt to source coherent
2489 		 * read data here should the balance of the register be
2490 		 * interpreted by the guest:
2491 		 *
2492 		 * L2 cache control register 3: 64GB range, 256KB size,
2493 		 * enabled, latency 0x1, configured
2494 		 */
2495 		msr_info->data = 0xbe702111;
2496 		break;
2497 	case MSR_AMD64_OSVW_ID_LENGTH:
2498 		if (!guest_cpuid_has_osvw(vcpu))
2499 			return 1;
2500 		msr_info->data = vcpu->arch.osvw.length;
2501 		break;
2502 	case MSR_AMD64_OSVW_STATUS:
2503 		if (!guest_cpuid_has_osvw(vcpu))
2504 			return 1;
2505 		msr_info->data = vcpu->arch.osvw.status;
2506 		break;
2507 	default:
2508 		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2509 			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2510 		if (!ignore_msrs) {
2511 			vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
2512 					       msr_info->index);
2513 			return 1;
2514 		} else {
2515 			vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
2516 			msr_info->data = 0;
2517 		}
2518 		break;
2519 	}
2520 	return 0;
2521 }
2522 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
2523 
2524 /*
2525  * Read or write a bunch of msrs. All parameters are kernel addresses.
2526  *
2527  * @return number of msrs set successfully.
2528  */
2529 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2530 		    struct kvm_msr_entry *entries,
2531 		    int (*do_msr)(struct kvm_vcpu *vcpu,
2532 				  unsigned index, u64 *data))
2533 {
2534 	int i, idx;
2535 
2536 	idx = srcu_read_lock(&vcpu->kvm->srcu);
2537 	for (i = 0; i < msrs->nmsrs; ++i)
2538 		if (do_msr(vcpu, entries[i].index, &entries[i].data))
2539 			break;
2540 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
2541 
2542 	return i;
2543 }
2544 
2545 /*
2546  * Read or write a bunch of msrs. Parameters are user addresses.
2547  *
2548  * @return number of msrs set successfully.
2549  */
2550 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2551 		  int (*do_msr)(struct kvm_vcpu *vcpu,
2552 				unsigned index, u64 *data),
2553 		  int writeback)
2554 {
2555 	struct kvm_msrs msrs;
2556 	struct kvm_msr_entry *entries;
2557 	int r, n;
2558 	unsigned size;
2559 
2560 	r = -EFAULT;
2561 	if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2562 		goto out;
2563 
2564 	r = -E2BIG;
2565 	if (msrs.nmsrs >= MAX_IO_MSRS)
2566 		goto out;
2567 
2568 	size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2569 	entries = memdup_user(user_msrs->entries, size);
2570 	if (IS_ERR(entries)) {
2571 		r = PTR_ERR(entries);
2572 		goto out;
2573 	}
2574 
2575 	r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2576 	if (r < 0)
2577 		goto out_free;
2578 
2579 	r = -EFAULT;
2580 	if (writeback && copy_to_user(user_msrs->entries, entries, size))
2581 		goto out_free;
2582 
2583 	r = n;
2584 
2585 out_free:
2586 	kfree(entries);
2587 out:
2588 	return r;
2589 }
2590 
2591 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2592 {
2593 	int r;
2594 
2595 	switch (ext) {
2596 	case KVM_CAP_IRQCHIP:
2597 	case KVM_CAP_HLT:
2598 	case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
2599 	case KVM_CAP_SET_TSS_ADDR:
2600 	case KVM_CAP_EXT_CPUID:
2601 	case KVM_CAP_EXT_EMUL_CPUID:
2602 	case KVM_CAP_CLOCKSOURCE:
2603 	case KVM_CAP_PIT:
2604 	case KVM_CAP_NOP_IO_DELAY:
2605 	case KVM_CAP_MP_STATE:
2606 	case KVM_CAP_SYNC_MMU:
2607 	case KVM_CAP_USER_NMI:
2608 	case KVM_CAP_REINJECT_CONTROL:
2609 	case KVM_CAP_IRQ_INJECT_STATUS:
2610 	case KVM_CAP_IOEVENTFD:
2611 	case KVM_CAP_IOEVENTFD_NO_LENGTH:
2612 	case KVM_CAP_PIT2:
2613 	case KVM_CAP_PIT_STATE2:
2614 	case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2615 	case KVM_CAP_XEN_HVM:
2616 	case KVM_CAP_VCPU_EVENTS:
2617 	case KVM_CAP_HYPERV:
2618 	case KVM_CAP_HYPERV_VAPIC:
2619 	case KVM_CAP_HYPERV_SPIN:
2620 	case KVM_CAP_HYPERV_SYNIC:
2621 	case KVM_CAP_PCI_SEGMENT:
2622 	case KVM_CAP_DEBUGREGS:
2623 	case KVM_CAP_X86_ROBUST_SINGLESTEP:
2624 	case KVM_CAP_XSAVE:
2625 	case KVM_CAP_ASYNC_PF:
2626 	case KVM_CAP_GET_TSC_KHZ:
2627 	case KVM_CAP_KVMCLOCK_CTRL:
2628 	case KVM_CAP_READONLY_MEM:
2629 	case KVM_CAP_HYPERV_TIME:
2630 	case KVM_CAP_IOAPIC_POLARITY_IGNORED:
2631 	case KVM_CAP_TSC_DEADLINE_TIMER:
2632 	case KVM_CAP_ENABLE_CAP_VM:
2633 	case KVM_CAP_DISABLE_QUIRKS:
2634 	case KVM_CAP_SET_BOOT_CPU_ID:
2635  	case KVM_CAP_SPLIT_IRQCHIP:
2636 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2637 	case KVM_CAP_ASSIGN_DEV_IRQ:
2638 	case KVM_CAP_PCI_2_3:
2639 #endif
2640 		r = 1;
2641 		break;
2642 	case KVM_CAP_ADJUST_CLOCK:
2643 		r = KVM_CLOCK_TSC_STABLE;
2644 		break;
2645 	case KVM_CAP_X86_SMM:
2646 		/* SMBASE is usually relocated above 1M on modern chipsets,
2647 		 * and SMM handlers might indeed rely on 4G segment limits,
2648 		 * so do not report SMM to be available if real mode is
2649 		 * emulated via vm86 mode.  Still, do not go to great lengths
2650 		 * to avoid userspace's usage of the feature, because it is a
2651 		 * fringe case that is not enabled except via specific settings
2652 		 * of the module parameters.
2653 		 */
2654 		r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
2655 		break;
2656 	case KVM_CAP_COALESCED_MMIO:
2657 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
2658 		break;
2659 	case KVM_CAP_VAPIC:
2660 		r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2661 		break;
2662 	case KVM_CAP_NR_VCPUS:
2663 		r = KVM_SOFT_MAX_VCPUS;
2664 		break;
2665 	case KVM_CAP_MAX_VCPUS:
2666 		r = KVM_MAX_VCPUS;
2667 		break;
2668 	case KVM_CAP_NR_MEMSLOTS:
2669 		r = KVM_USER_MEM_SLOTS;
2670 		break;
2671 	case KVM_CAP_PV_MMU:	/* obsolete */
2672 		r = 0;
2673 		break;
2674 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2675 	case KVM_CAP_IOMMU:
2676 		r = iommu_present(&pci_bus_type);
2677 		break;
2678 #endif
2679 	case KVM_CAP_MCE:
2680 		r = KVM_MAX_MCE_BANKS;
2681 		break;
2682 	case KVM_CAP_XCRS:
2683 		r = boot_cpu_has(X86_FEATURE_XSAVE);
2684 		break;
2685 	case KVM_CAP_TSC_CONTROL:
2686 		r = kvm_has_tsc_control;
2687 		break;
2688 	case KVM_CAP_X2APIC_API:
2689 		r = KVM_X2APIC_API_VALID_FLAGS;
2690 		break;
2691 	default:
2692 		r = 0;
2693 		break;
2694 	}
2695 	return r;
2696 
2697 }
2698 
2699 long kvm_arch_dev_ioctl(struct file *filp,
2700 			unsigned int ioctl, unsigned long arg)
2701 {
2702 	void __user *argp = (void __user *)arg;
2703 	long r;
2704 
2705 	switch (ioctl) {
2706 	case KVM_GET_MSR_INDEX_LIST: {
2707 		struct kvm_msr_list __user *user_msr_list = argp;
2708 		struct kvm_msr_list msr_list;
2709 		unsigned n;
2710 
2711 		r = -EFAULT;
2712 		if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2713 			goto out;
2714 		n = msr_list.nmsrs;
2715 		msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
2716 		if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2717 			goto out;
2718 		r = -E2BIG;
2719 		if (n < msr_list.nmsrs)
2720 			goto out;
2721 		r = -EFAULT;
2722 		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2723 				 num_msrs_to_save * sizeof(u32)))
2724 			goto out;
2725 		if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2726 				 &emulated_msrs,
2727 				 num_emulated_msrs * sizeof(u32)))
2728 			goto out;
2729 		r = 0;
2730 		break;
2731 	}
2732 	case KVM_GET_SUPPORTED_CPUID:
2733 	case KVM_GET_EMULATED_CPUID: {
2734 		struct kvm_cpuid2 __user *cpuid_arg = argp;
2735 		struct kvm_cpuid2 cpuid;
2736 
2737 		r = -EFAULT;
2738 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2739 			goto out;
2740 
2741 		r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
2742 					    ioctl);
2743 		if (r)
2744 			goto out;
2745 
2746 		r = -EFAULT;
2747 		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2748 			goto out;
2749 		r = 0;
2750 		break;
2751 	}
2752 	case KVM_X86_GET_MCE_CAP_SUPPORTED: {
2753 		r = -EFAULT;
2754 		if (copy_to_user(argp, &kvm_mce_cap_supported,
2755 				 sizeof(kvm_mce_cap_supported)))
2756 			goto out;
2757 		r = 0;
2758 		break;
2759 	}
2760 	default:
2761 		r = -EINVAL;
2762 	}
2763 out:
2764 	return r;
2765 }
2766 
2767 static void wbinvd_ipi(void *garbage)
2768 {
2769 	wbinvd();
2770 }
2771 
2772 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2773 {
2774 	return kvm_arch_has_noncoherent_dma(vcpu->kvm);
2775 }
2776 
2777 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
2778 {
2779 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
2780 }
2781 
2782 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2783 {
2784 	/* Address WBINVD may be executed by guest */
2785 	if (need_emulate_wbinvd(vcpu)) {
2786 		if (kvm_x86_ops->has_wbinvd_exit())
2787 			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2788 		else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2789 			smp_call_function_single(vcpu->cpu,
2790 					wbinvd_ipi, NULL, 1);
2791 	}
2792 
2793 	kvm_x86_ops->vcpu_load(vcpu, cpu);
2794 
2795 	/* Apply any externally detected TSC adjustments (due to suspend) */
2796 	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
2797 		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
2798 		vcpu->arch.tsc_offset_adjustment = 0;
2799 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2800 	}
2801 
2802 	if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2803 		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
2804 				rdtsc() - vcpu->arch.last_host_tsc;
2805 		if (tsc_delta < 0)
2806 			mark_tsc_unstable("KVM discovered backwards TSC");
2807 
2808 		if (check_tsc_unstable()) {
2809 			u64 offset = kvm_compute_tsc_offset(vcpu,
2810 						vcpu->arch.last_guest_tsc);
2811 			kvm_vcpu_write_tsc_offset(vcpu, offset);
2812 			vcpu->arch.tsc_catchup = 1;
2813 		}
2814 		if (kvm_lapic_hv_timer_in_use(vcpu) &&
2815 				kvm_x86_ops->set_hv_timer(vcpu,
2816 					kvm_get_lapic_target_expiration_tsc(vcpu)))
2817 			kvm_lapic_switch_to_sw_timer(vcpu);
2818 		/*
2819 		 * On a host with synchronized TSC, there is no need to update
2820 		 * kvmclock on vcpu->cpu migration
2821 		 */
2822 		if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
2823 			kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2824 		if (vcpu->cpu != cpu)
2825 			kvm_migrate_timers(vcpu);
2826 		vcpu->cpu = cpu;
2827 	}
2828 
2829 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2830 }
2831 
2832 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
2833 {
2834 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2835 		return;
2836 
2837 	vcpu->arch.st.steal.preempted = 1;
2838 
2839 	kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
2840 			&vcpu->arch.st.steal.preempted,
2841 			offsetof(struct kvm_steal_time, preempted),
2842 			sizeof(vcpu->arch.st.steal.preempted));
2843 }
2844 
2845 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2846 {
2847 	int idx;
2848 	/*
2849 	 * Disable page faults because we're in atomic context here.
2850 	 * kvm_write_guest_offset_cached() would call might_fault()
2851 	 * that relies on pagefault_disable() to tell if there's a
2852 	 * bug. NOTE: the write to guest memory may not go through if
2853 	 * during postcopy live migration or if there's heavy guest
2854 	 * paging.
2855 	 */
2856 	pagefault_disable();
2857 	/*
2858 	 * kvm_memslots() will be called by
2859 	 * kvm_write_guest_offset_cached() so take the srcu lock.
2860 	 */
2861 	idx = srcu_read_lock(&vcpu->kvm->srcu);
2862 	kvm_steal_time_set_preempted(vcpu);
2863 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
2864 	pagefault_enable();
2865 	kvm_x86_ops->vcpu_put(vcpu);
2866 	kvm_put_guest_fpu(vcpu);
2867 	vcpu->arch.last_host_tsc = rdtsc();
2868 }
2869 
2870 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2871 				    struct kvm_lapic_state *s)
2872 {
2873 	if (vcpu->arch.apicv_active)
2874 		kvm_x86_ops->sync_pir_to_irr(vcpu);
2875 
2876 	return kvm_apic_get_state(vcpu, s);
2877 }
2878 
2879 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2880 				    struct kvm_lapic_state *s)
2881 {
2882 	int r;
2883 
2884 	r = kvm_apic_set_state(vcpu, s);
2885 	if (r)
2886 		return r;
2887 	update_cr8_intercept(vcpu);
2888 
2889 	return 0;
2890 }
2891 
2892 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
2893 {
2894 	return (!lapic_in_kernel(vcpu) ||
2895 		kvm_apic_accept_pic_intr(vcpu));
2896 }
2897 
2898 /*
2899  * if userspace requested an interrupt window, check that the
2900  * interrupt window is open.
2901  *
2902  * No need to exit to userspace if we already have an interrupt queued.
2903  */
2904 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
2905 {
2906 	return kvm_arch_interrupt_allowed(vcpu) &&
2907 		!kvm_cpu_has_interrupt(vcpu) &&
2908 		!kvm_event_needs_reinjection(vcpu) &&
2909 		kvm_cpu_accept_dm_intr(vcpu);
2910 }
2911 
2912 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2913 				    struct kvm_interrupt *irq)
2914 {
2915 	if (irq->irq >= KVM_NR_INTERRUPTS)
2916 		return -EINVAL;
2917 
2918 	if (!irqchip_in_kernel(vcpu->kvm)) {
2919 		kvm_queue_interrupt(vcpu, irq->irq, false);
2920 		kvm_make_request(KVM_REQ_EVENT, vcpu);
2921 		return 0;
2922 	}
2923 
2924 	/*
2925 	 * With in-kernel LAPIC, we only use this to inject EXTINT, so
2926 	 * fail for in-kernel 8259.
2927 	 */
2928 	if (pic_in_kernel(vcpu->kvm))
2929 		return -ENXIO;
2930 
2931 	if (vcpu->arch.pending_external_vector != -1)
2932 		return -EEXIST;
2933 
2934 	vcpu->arch.pending_external_vector = irq->irq;
2935 	kvm_make_request(KVM_REQ_EVENT, vcpu);
2936 	return 0;
2937 }
2938 
2939 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2940 {
2941 	kvm_inject_nmi(vcpu);
2942 
2943 	return 0;
2944 }
2945 
2946 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
2947 {
2948 	kvm_make_request(KVM_REQ_SMI, vcpu);
2949 
2950 	return 0;
2951 }
2952 
2953 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2954 					   struct kvm_tpr_access_ctl *tac)
2955 {
2956 	if (tac->flags)
2957 		return -EINVAL;
2958 	vcpu->arch.tpr_access_reporting = !!tac->enabled;
2959 	return 0;
2960 }
2961 
2962 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2963 					u64 mcg_cap)
2964 {
2965 	int r;
2966 	unsigned bank_num = mcg_cap & 0xff, bank;
2967 
2968 	r = -EINVAL;
2969 	if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2970 		goto out;
2971 	if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
2972 		goto out;
2973 	r = 0;
2974 	vcpu->arch.mcg_cap = mcg_cap;
2975 	/* Init IA32_MCG_CTL to all 1s */
2976 	if (mcg_cap & MCG_CTL_P)
2977 		vcpu->arch.mcg_ctl = ~(u64)0;
2978 	/* Init IA32_MCi_CTL to all 1s */
2979 	for (bank = 0; bank < bank_num; bank++)
2980 		vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2981 
2982 	if (kvm_x86_ops->setup_mce)
2983 		kvm_x86_ops->setup_mce(vcpu);
2984 out:
2985 	return r;
2986 }
2987 
2988 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2989 				      struct kvm_x86_mce *mce)
2990 {
2991 	u64 mcg_cap = vcpu->arch.mcg_cap;
2992 	unsigned bank_num = mcg_cap & 0xff;
2993 	u64 *banks = vcpu->arch.mce_banks;
2994 
2995 	if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2996 		return -EINVAL;
2997 	/*
2998 	 * if IA32_MCG_CTL is not all 1s, the uncorrected error
2999 	 * reporting is disabled
3000 	 */
3001 	if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
3002 	    vcpu->arch.mcg_ctl != ~(u64)0)
3003 		return 0;
3004 	banks += 4 * mce->bank;
3005 	/*
3006 	 * if IA32_MCi_CTL is not all 1s, the uncorrected error
3007 	 * reporting is disabled for the bank
3008 	 */
3009 	if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
3010 		return 0;
3011 	if (mce->status & MCI_STATUS_UC) {
3012 		if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
3013 		    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
3014 			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3015 			return 0;
3016 		}
3017 		if (banks[1] & MCI_STATUS_VAL)
3018 			mce->status |= MCI_STATUS_OVER;
3019 		banks[2] = mce->addr;
3020 		banks[3] = mce->misc;
3021 		vcpu->arch.mcg_status = mce->mcg_status;
3022 		banks[1] = mce->status;
3023 		kvm_queue_exception(vcpu, MC_VECTOR);
3024 	} else if (!(banks[1] & MCI_STATUS_VAL)
3025 		   || !(banks[1] & MCI_STATUS_UC)) {
3026 		if (banks[1] & MCI_STATUS_VAL)
3027 			mce->status |= MCI_STATUS_OVER;
3028 		banks[2] = mce->addr;
3029 		banks[3] = mce->misc;
3030 		banks[1] = mce->status;
3031 	} else
3032 		banks[1] |= MCI_STATUS_OVER;
3033 	return 0;
3034 }
3035 
3036 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
3037 					       struct kvm_vcpu_events *events)
3038 {
3039 	process_nmi(vcpu);
3040 	events->exception.injected =
3041 		vcpu->arch.exception.pending &&
3042 		!kvm_exception_is_soft(vcpu->arch.exception.nr);
3043 	events->exception.nr = vcpu->arch.exception.nr;
3044 	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
3045 	events->exception.pad = 0;
3046 	events->exception.error_code = vcpu->arch.exception.error_code;
3047 
3048 	events->interrupt.injected =
3049 		vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
3050 	events->interrupt.nr = vcpu->arch.interrupt.nr;
3051 	events->interrupt.soft = 0;
3052 	events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
3053 
3054 	events->nmi.injected = vcpu->arch.nmi_injected;
3055 	events->nmi.pending = vcpu->arch.nmi_pending != 0;
3056 	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
3057 	events->nmi.pad = 0;
3058 
3059 	events->sipi_vector = 0; /* never valid when reporting to user space */
3060 
3061 	events->smi.smm = is_smm(vcpu);
3062 	events->smi.pending = vcpu->arch.smi_pending;
3063 	events->smi.smm_inside_nmi =
3064 		!!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
3065 	events->smi.latched_init = kvm_lapic_latched_init(vcpu);
3066 
3067 	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
3068 			 | KVM_VCPUEVENT_VALID_SHADOW
3069 			 | KVM_VCPUEVENT_VALID_SMM);
3070 	memset(&events->reserved, 0, sizeof(events->reserved));
3071 }
3072 
3073 static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
3074 
3075 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
3076 					      struct kvm_vcpu_events *events)
3077 {
3078 	if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
3079 			      | KVM_VCPUEVENT_VALID_SIPI_VECTOR
3080 			      | KVM_VCPUEVENT_VALID_SHADOW
3081 			      | KVM_VCPUEVENT_VALID_SMM))
3082 		return -EINVAL;
3083 
3084 	if (events->exception.injected &&
3085 	    (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
3086 		return -EINVAL;
3087 
3088 	process_nmi(vcpu);
3089 	vcpu->arch.exception.pending = events->exception.injected;
3090 	vcpu->arch.exception.nr = events->exception.nr;
3091 	vcpu->arch.exception.has_error_code = events->exception.has_error_code;
3092 	vcpu->arch.exception.error_code = events->exception.error_code;
3093 
3094 	vcpu->arch.interrupt.pending = events->interrupt.injected;
3095 	vcpu->arch.interrupt.nr = events->interrupt.nr;
3096 	vcpu->arch.interrupt.soft = events->interrupt.soft;
3097 	if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
3098 		kvm_x86_ops->set_interrupt_shadow(vcpu,
3099 						  events->interrupt.shadow);
3100 
3101 	vcpu->arch.nmi_injected = events->nmi.injected;
3102 	if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
3103 		vcpu->arch.nmi_pending = events->nmi.pending;
3104 	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
3105 
3106 	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
3107 	    lapic_in_kernel(vcpu))
3108 		vcpu->arch.apic->sipi_vector = events->sipi_vector;
3109 
3110 	if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
3111 		u32 hflags = vcpu->arch.hflags;
3112 		if (events->smi.smm)
3113 			hflags |= HF_SMM_MASK;
3114 		else
3115 			hflags &= ~HF_SMM_MASK;
3116 		kvm_set_hflags(vcpu, hflags);
3117 
3118 		vcpu->arch.smi_pending = events->smi.pending;
3119 		if (events->smi.smm_inside_nmi)
3120 			vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
3121 		else
3122 			vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
3123 		if (lapic_in_kernel(vcpu)) {
3124 			if (events->smi.latched_init)
3125 				set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
3126 			else
3127 				clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
3128 		}
3129 	}
3130 
3131 	kvm_make_request(KVM_REQ_EVENT, vcpu);
3132 
3133 	return 0;
3134 }
3135 
3136 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
3137 					     struct kvm_debugregs *dbgregs)
3138 {
3139 	unsigned long val;
3140 
3141 	memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
3142 	kvm_get_dr(vcpu, 6, &val);
3143 	dbgregs->dr6 = val;
3144 	dbgregs->dr7 = vcpu->arch.dr7;
3145 	dbgregs->flags = 0;
3146 	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
3147 }
3148 
3149 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
3150 					    struct kvm_debugregs *dbgregs)
3151 {
3152 	if (dbgregs->flags)
3153 		return -EINVAL;
3154 
3155 	if (dbgregs->dr6 & ~0xffffffffull)
3156 		return -EINVAL;
3157 	if (dbgregs->dr7 & ~0xffffffffull)
3158 		return -EINVAL;
3159 
3160 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
3161 	kvm_update_dr0123(vcpu);
3162 	vcpu->arch.dr6 = dbgregs->dr6;
3163 	kvm_update_dr6(vcpu);
3164 	vcpu->arch.dr7 = dbgregs->dr7;
3165 	kvm_update_dr7(vcpu);
3166 
3167 	return 0;
3168 }
3169 
3170 #define XSTATE_COMPACTION_ENABLED (1ULL << 63)
3171 
3172 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
3173 {
3174 	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3175 	u64 xstate_bv = xsave->header.xfeatures;
3176 	u64 valid;
3177 
3178 	/*
3179 	 * Copy legacy XSAVE area, to avoid complications with CPUID
3180 	 * leaves 0 and 1 in the loop below.
3181 	 */
3182 	memcpy(dest, xsave, XSAVE_HDR_OFFSET);
3183 
3184 	/* Set XSTATE_BV */
3185 	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
3186 
3187 	/*
3188 	 * Copy each region from the possibly compacted offset to the
3189 	 * non-compacted offset.
3190 	 */
3191 	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3192 	while (valid) {
3193 		u64 feature = valid & -valid;
3194 		int index = fls64(feature) - 1;
3195 		void *src = get_xsave_addr(xsave, feature);
3196 
3197 		if (src) {
3198 			u32 size, offset, ecx, edx;
3199 			cpuid_count(XSTATE_CPUID, index,
3200 				    &size, &offset, &ecx, &edx);
3201 			memcpy(dest + offset, src, size);
3202 		}
3203 
3204 		valid -= feature;
3205 	}
3206 }
3207 
3208 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
3209 {
3210 	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3211 	u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
3212 	u64 valid;
3213 
3214 	/*
3215 	 * Copy legacy XSAVE area, to avoid complications with CPUID
3216 	 * leaves 0 and 1 in the loop below.
3217 	 */
3218 	memcpy(xsave, src, XSAVE_HDR_OFFSET);
3219 
3220 	/* Set XSTATE_BV and possibly XCOMP_BV.  */
3221 	xsave->header.xfeatures = xstate_bv;
3222 	if (boot_cpu_has(X86_FEATURE_XSAVES))
3223 		xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
3224 
3225 	/*
3226 	 * Copy each region from the non-compacted offset to the
3227 	 * possibly compacted offset.
3228 	 */
3229 	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3230 	while (valid) {
3231 		u64 feature = valid & -valid;
3232 		int index = fls64(feature) - 1;
3233 		void *dest = get_xsave_addr(xsave, feature);
3234 
3235 		if (dest) {
3236 			u32 size, offset, ecx, edx;
3237 			cpuid_count(XSTATE_CPUID, index,
3238 				    &size, &offset, &ecx, &edx);
3239 			memcpy(dest, src + offset, size);
3240 		}
3241 
3242 		valid -= feature;
3243 	}
3244 }
3245 
3246 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
3247 					 struct kvm_xsave *guest_xsave)
3248 {
3249 	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3250 		memset(guest_xsave, 0, sizeof(struct kvm_xsave));
3251 		fill_xsave((u8 *) guest_xsave->region, vcpu);
3252 	} else {
3253 		memcpy(guest_xsave->region,
3254 			&vcpu->arch.guest_fpu.state.fxsave,
3255 			sizeof(struct fxregs_state));
3256 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
3257 			XFEATURE_MASK_FPSSE;
3258 	}
3259 }
3260 
3261 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
3262 					struct kvm_xsave *guest_xsave)
3263 {
3264 	u64 xstate_bv =
3265 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
3266 
3267 	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3268 		/*
3269 		 * Here we allow setting states that are not present in
3270 		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
3271 		 * with old userspace.
3272 		 */
3273 		if (xstate_bv & ~kvm_supported_xcr0())
3274 			return -EINVAL;
3275 		load_xsave(vcpu, (u8 *)guest_xsave->region);
3276 	} else {
3277 		if (xstate_bv & ~XFEATURE_MASK_FPSSE)
3278 			return -EINVAL;
3279 		memcpy(&vcpu->arch.guest_fpu.state.fxsave,
3280 			guest_xsave->region, sizeof(struct fxregs_state));
3281 	}
3282 	return 0;
3283 }
3284 
3285 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
3286 					struct kvm_xcrs *guest_xcrs)
3287 {
3288 	if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
3289 		guest_xcrs->nr_xcrs = 0;
3290 		return;
3291 	}
3292 
3293 	guest_xcrs->nr_xcrs = 1;
3294 	guest_xcrs->flags = 0;
3295 	guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
3296 	guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
3297 }
3298 
3299 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
3300 				       struct kvm_xcrs *guest_xcrs)
3301 {
3302 	int i, r = 0;
3303 
3304 	if (!boot_cpu_has(X86_FEATURE_XSAVE))
3305 		return -EINVAL;
3306 
3307 	if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
3308 		return -EINVAL;
3309 
3310 	for (i = 0; i < guest_xcrs->nr_xcrs; i++)
3311 		/* Only support XCR0 currently */
3312 		if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
3313 			r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
3314 				guest_xcrs->xcrs[i].value);
3315 			break;
3316 		}
3317 	if (r)
3318 		r = -EINVAL;
3319 	return r;
3320 }
3321 
3322 /*
3323  * kvm_set_guest_paused() indicates to the guest kernel that it has been
3324  * stopped by the hypervisor.  This function will be called from the host only.
3325  * EINVAL is returned when the host attempts to set the flag for a guest that
3326  * does not support pv clocks.
3327  */
3328 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
3329 {
3330 	if (!vcpu->arch.pv_time_enabled)
3331 		return -EINVAL;
3332 	vcpu->arch.pvclock_set_guest_stopped_request = true;
3333 	kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3334 	return 0;
3335 }
3336 
3337 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3338 				     struct kvm_enable_cap *cap)
3339 {
3340 	if (cap->flags)
3341 		return -EINVAL;
3342 
3343 	switch (cap->cap) {
3344 	case KVM_CAP_HYPERV_SYNIC:
3345 		return kvm_hv_activate_synic(vcpu);
3346 	default:
3347 		return -EINVAL;
3348 	}
3349 }
3350 
3351 long kvm_arch_vcpu_ioctl(struct file *filp,
3352 			 unsigned int ioctl, unsigned long arg)
3353 {
3354 	struct kvm_vcpu *vcpu = filp->private_data;
3355 	void __user *argp = (void __user *)arg;
3356 	int r;
3357 	union {
3358 		struct kvm_lapic_state *lapic;
3359 		struct kvm_xsave *xsave;
3360 		struct kvm_xcrs *xcrs;
3361 		void *buffer;
3362 	} u;
3363 
3364 	u.buffer = NULL;
3365 	switch (ioctl) {
3366 	case KVM_GET_LAPIC: {
3367 		r = -EINVAL;
3368 		if (!lapic_in_kernel(vcpu))
3369 			goto out;
3370 		u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3371 
3372 		r = -ENOMEM;
3373 		if (!u.lapic)
3374 			goto out;
3375 		r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
3376 		if (r)
3377 			goto out;
3378 		r = -EFAULT;
3379 		if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
3380 			goto out;
3381 		r = 0;
3382 		break;
3383 	}
3384 	case KVM_SET_LAPIC: {
3385 		r = -EINVAL;
3386 		if (!lapic_in_kernel(vcpu))
3387 			goto out;
3388 		u.lapic = memdup_user(argp, sizeof(*u.lapic));
3389 		if (IS_ERR(u.lapic))
3390 			return PTR_ERR(u.lapic);
3391 
3392 		r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
3393 		break;
3394 	}
3395 	case KVM_INTERRUPT: {
3396 		struct kvm_interrupt irq;
3397 
3398 		r = -EFAULT;
3399 		if (copy_from_user(&irq, argp, sizeof irq))
3400 			goto out;
3401 		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
3402 		break;
3403 	}
3404 	case KVM_NMI: {
3405 		r = kvm_vcpu_ioctl_nmi(vcpu);
3406 		break;
3407 	}
3408 	case KVM_SMI: {
3409 		r = kvm_vcpu_ioctl_smi(vcpu);
3410 		break;
3411 	}
3412 	case KVM_SET_CPUID: {
3413 		struct kvm_cpuid __user *cpuid_arg = argp;
3414 		struct kvm_cpuid cpuid;
3415 
3416 		r = -EFAULT;
3417 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3418 			goto out;
3419 		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
3420 		break;
3421 	}
3422 	case KVM_SET_CPUID2: {
3423 		struct kvm_cpuid2 __user *cpuid_arg = argp;
3424 		struct kvm_cpuid2 cpuid;
3425 
3426 		r = -EFAULT;
3427 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3428 			goto out;
3429 		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
3430 					      cpuid_arg->entries);
3431 		break;
3432 	}
3433 	case KVM_GET_CPUID2: {
3434 		struct kvm_cpuid2 __user *cpuid_arg = argp;
3435 		struct kvm_cpuid2 cpuid;
3436 
3437 		r = -EFAULT;
3438 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3439 			goto out;
3440 		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
3441 					      cpuid_arg->entries);
3442 		if (r)
3443 			goto out;
3444 		r = -EFAULT;
3445 		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
3446 			goto out;
3447 		r = 0;
3448 		break;
3449 	}
3450 	case KVM_GET_MSRS:
3451 		r = msr_io(vcpu, argp, do_get_msr, 1);
3452 		break;
3453 	case KVM_SET_MSRS:
3454 		r = msr_io(vcpu, argp, do_set_msr, 0);
3455 		break;
3456 	case KVM_TPR_ACCESS_REPORTING: {
3457 		struct kvm_tpr_access_ctl tac;
3458 
3459 		r = -EFAULT;
3460 		if (copy_from_user(&tac, argp, sizeof tac))
3461 			goto out;
3462 		r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
3463 		if (r)
3464 			goto out;
3465 		r = -EFAULT;
3466 		if (copy_to_user(argp, &tac, sizeof tac))
3467 			goto out;
3468 		r = 0;
3469 		break;
3470 	};
3471 	case KVM_SET_VAPIC_ADDR: {
3472 		struct kvm_vapic_addr va;
3473 		int idx;
3474 
3475 		r = -EINVAL;
3476 		if (!lapic_in_kernel(vcpu))
3477 			goto out;
3478 		r = -EFAULT;
3479 		if (copy_from_user(&va, argp, sizeof va))
3480 			goto out;
3481 		idx = srcu_read_lock(&vcpu->kvm->srcu);
3482 		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3483 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
3484 		break;
3485 	}
3486 	case KVM_X86_SETUP_MCE: {
3487 		u64 mcg_cap;
3488 
3489 		r = -EFAULT;
3490 		if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
3491 			goto out;
3492 		r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
3493 		break;
3494 	}
3495 	case KVM_X86_SET_MCE: {
3496 		struct kvm_x86_mce mce;
3497 
3498 		r = -EFAULT;
3499 		if (copy_from_user(&mce, argp, sizeof mce))
3500 			goto out;
3501 		r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
3502 		break;
3503 	}
3504 	case KVM_GET_VCPU_EVENTS: {
3505 		struct kvm_vcpu_events events;
3506 
3507 		kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
3508 
3509 		r = -EFAULT;
3510 		if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
3511 			break;
3512 		r = 0;
3513 		break;
3514 	}
3515 	case KVM_SET_VCPU_EVENTS: {
3516 		struct kvm_vcpu_events events;
3517 
3518 		r = -EFAULT;
3519 		if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
3520 			break;
3521 
3522 		r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
3523 		break;
3524 	}
3525 	case KVM_GET_DEBUGREGS: {
3526 		struct kvm_debugregs dbgregs;
3527 
3528 		kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
3529 
3530 		r = -EFAULT;
3531 		if (copy_to_user(argp, &dbgregs,
3532 				 sizeof(struct kvm_debugregs)))
3533 			break;
3534 		r = 0;
3535 		break;
3536 	}
3537 	case KVM_SET_DEBUGREGS: {
3538 		struct kvm_debugregs dbgregs;
3539 
3540 		r = -EFAULT;
3541 		if (copy_from_user(&dbgregs, argp,
3542 				   sizeof(struct kvm_debugregs)))
3543 			break;
3544 
3545 		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
3546 		break;
3547 	}
3548 	case KVM_GET_XSAVE: {
3549 		u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
3550 		r = -ENOMEM;
3551 		if (!u.xsave)
3552 			break;
3553 
3554 		kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
3555 
3556 		r = -EFAULT;
3557 		if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
3558 			break;
3559 		r = 0;
3560 		break;
3561 	}
3562 	case KVM_SET_XSAVE: {
3563 		u.xsave = memdup_user(argp, sizeof(*u.xsave));
3564 		if (IS_ERR(u.xsave))
3565 			return PTR_ERR(u.xsave);
3566 
3567 		r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
3568 		break;
3569 	}
3570 	case KVM_GET_XCRS: {
3571 		u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
3572 		r = -ENOMEM;
3573 		if (!u.xcrs)
3574 			break;
3575 
3576 		kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
3577 
3578 		r = -EFAULT;
3579 		if (copy_to_user(argp, u.xcrs,
3580 				 sizeof(struct kvm_xcrs)))
3581 			break;
3582 		r = 0;
3583 		break;
3584 	}
3585 	case KVM_SET_XCRS: {
3586 		u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
3587 		if (IS_ERR(u.xcrs))
3588 			return PTR_ERR(u.xcrs);
3589 
3590 		r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
3591 		break;
3592 	}
3593 	case KVM_SET_TSC_KHZ: {
3594 		u32 user_tsc_khz;
3595 
3596 		r = -EINVAL;
3597 		user_tsc_khz = (u32)arg;
3598 
3599 		if (user_tsc_khz >= kvm_max_guest_tsc_khz)
3600 			goto out;
3601 
3602 		if (user_tsc_khz == 0)
3603 			user_tsc_khz = tsc_khz;
3604 
3605 		if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
3606 			r = 0;
3607 
3608 		goto out;
3609 	}
3610 	case KVM_GET_TSC_KHZ: {
3611 		r = vcpu->arch.virtual_tsc_khz;
3612 		goto out;
3613 	}
3614 	case KVM_KVMCLOCK_CTRL: {
3615 		r = kvm_set_guest_paused(vcpu);
3616 		goto out;
3617 	}
3618 	case KVM_ENABLE_CAP: {
3619 		struct kvm_enable_cap cap;
3620 
3621 		r = -EFAULT;
3622 		if (copy_from_user(&cap, argp, sizeof(cap)))
3623 			goto out;
3624 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3625 		break;
3626 	}
3627 	default:
3628 		r = -EINVAL;
3629 	}
3630 out:
3631 	kfree(u.buffer);
3632 	return r;
3633 }
3634 
3635 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3636 {
3637 	return VM_FAULT_SIGBUS;
3638 }
3639 
3640 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
3641 {
3642 	int ret;
3643 
3644 	if (addr > (unsigned int)(-3 * PAGE_SIZE))
3645 		return -EINVAL;
3646 	ret = kvm_x86_ops->set_tss_addr(kvm, addr);
3647 	return ret;
3648 }
3649 
3650 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
3651 					      u64 ident_addr)
3652 {
3653 	kvm->arch.ept_identity_map_addr = ident_addr;
3654 	return 0;
3655 }
3656 
3657 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
3658 					  u32 kvm_nr_mmu_pages)
3659 {
3660 	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
3661 		return -EINVAL;
3662 
3663 	mutex_lock(&kvm->slots_lock);
3664 
3665 	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
3666 	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
3667 
3668 	mutex_unlock(&kvm->slots_lock);
3669 	return 0;
3670 }
3671 
3672 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
3673 {
3674 	return kvm->arch.n_max_mmu_pages;
3675 }
3676 
3677 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3678 {
3679 	int r;
3680 
3681 	r = 0;
3682 	switch (chip->chip_id) {
3683 	case KVM_IRQCHIP_PIC_MASTER:
3684 		memcpy(&chip->chip.pic,
3685 			&pic_irqchip(kvm)->pics[0],
3686 			sizeof(struct kvm_pic_state));
3687 		break;
3688 	case KVM_IRQCHIP_PIC_SLAVE:
3689 		memcpy(&chip->chip.pic,
3690 			&pic_irqchip(kvm)->pics[1],
3691 			sizeof(struct kvm_pic_state));
3692 		break;
3693 	case KVM_IRQCHIP_IOAPIC:
3694 		r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
3695 		break;
3696 	default:
3697 		r = -EINVAL;
3698 		break;
3699 	}
3700 	return r;
3701 }
3702 
3703 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3704 {
3705 	int r;
3706 
3707 	r = 0;
3708 	switch (chip->chip_id) {
3709 	case KVM_IRQCHIP_PIC_MASTER:
3710 		spin_lock(&pic_irqchip(kvm)->lock);
3711 		memcpy(&pic_irqchip(kvm)->pics[0],
3712 			&chip->chip.pic,
3713 			sizeof(struct kvm_pic_state));
3714 		spin_unlock(&pic_irqchip(kvm)->lock);
3715 		break;
3716 	case KVM_IRQCHIP_PIC_SLAVE:
3717 		spin_lock(&pic_irqchip(kvm)->lock);
3718 		memcpy(&pic_irqchip(kvm)->pics[1],
3719 			&chip->chip.pic,
3720 			sizeof(struct kvm_pic_state));
3721 		spin_unlock(&pic_irqchip(kvm)->lock);
3722 		break;
3723 	case KVM_IRQCHIP_IOAPIC:
3724 		r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
3725 		break;
3726 	default:
3727 		r = -EINVAL;
3728 		break;
3729 	}
3730 	kvm_pic_update_irq(pic_irqchip(kvm));
3731 	return r;
3732 }
3733 
3734 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3735 {
3736 	struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
3737 
3738 	BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
3739 
3740 	mutex_lock(&kps->lock);
3741 	memcpy(ps, &kps->channels, sizeof(*ps));
3742 	mutex_unlock(&kps->lock);
3743 	return 0;
3744 }
3745 
3746 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3747 {
3748 	int i;
3749 	struct kvm_pit *pit = kvm->arch.vpit;
3750 
3751 	mutex_lock(&pit->pit_state.lock);
3752 	memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
3753 	for (i = 0; i < 3; i++)
3754 		kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
3755 	mutex_unlock(&pit->pit_state.lock);
3756 	return 0;
3757 }
3758 
3759 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3760 {
3761 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
3762 	memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
3763 		sizeof(ps->channels));
3764 	ps->flags = kvm->arch.vpit->pit_state.flags;
3765 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3766 	memset(&ps->reserved, 0, sizeof(ps->reserved));
3767 	return 0;
3768 }
3769 
3770 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3771 {
3772 	int start = 0;
3773 	int i;
3774 	u32 prev_legacy, cur_legacy;
3775 	struct kvm_pit *pit = kvm->arch.vpit;
3776 
3777 	mutex_lock(&pit->pit_state.lock);
3778 	prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
3779 	cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
3780 	if (!prev_legacy && cur_legacy)
3781 		start = 1;
3782 	memcpy(&pit->pit_state.channels, &ps->channels,
3783 	       sizeof(pit->pit_state.channels));
3784 	pit->pit_state.flags = ps->flags;
3785 	for (i = 0; i < 3; i++)
3786 		kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
3787 				   start && i == 0);
3788 	mutex_unlock(&pit->pit_state.lock);
3789 	return 0;
3790 }
3791 
3792 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
3793 				 struct kvm_reinject_control *control)
3794 {
3795 	struct kvm_pit *pit = kvm->arch.vpit;
3796 
3797 	if (!pit)
3798 		return -ENXIO;
3799 
3800 	/* pit->pit_state.lock was overloaded to prevent userspace from getting
3801 	 * an inconsistent state after running multiple KVM_REINJECT_CONTROL
3802 	 * ioctls in parallel.  Use a separate lock if that ioctl isn't rare.
3803 	 */
3804 	mutex_lock(&pit->pit_state.lock);
3805 	kvm_pit_set_reinject(pit, control->pit_reinject);
3806 	mutex_unlock(&pit->pit_state.lock);
3807 
3808 	return 0;
3809 }
3810 
3811 /**
3812  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
3813  * @kvm: kvm instance
3814  * @log: slot id and address to which we copy the log
3815  *
3816  * Steps 1-4 below provide general overview of dirty page logging. See
3817  * kvm_get_dirty_log_protect() function description for additional details.
3818  *
3819  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
3820  * always flush the TLB (step 4) even if previous step failed  and the dirty
3821  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
3822  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
3823  * writes will be marked dirty for next log read.
3824  *
3825  *   1. Take a snapshot of the bit and clear it if needed.
3826  *   2. Write protect the corresponding page.
3827  *   3. Copy the snapshot to the userspace.
3828  *   4. Flush TLB's if needed.
3829  */
3830 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
3831 {
3832 	bool is_dirty = false;
3833 	int r;
3834 
3835 	mutex_lock(&kvm->slots_lock);
3836 
3837 	/*
3838 	 * Flush potentially hardware-cached dirty pages to dirty_bitmap.
3839 	 */
3840 	if (kvm_x86_ops->flush_log_dirty)
3841 		kvm_x86_ops->flush_log_dirty(kvm);
3842 
3843 	r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
3844 
3845 	/*
3846 	 * All the TLBs can be flushed out of mmu lock, see the comments in
3847 	 * kvm_mmu_slot_remove_write_access().
3848 	 */
3849 	lockdep_assert_held(&kvm->slots_lock);
3850 	if (is_dirty)
3851 		kvm_flush_remote_tlbs(kvm);
3852 
3853 	mutex_unlock(&kvm->slots_lock);
3854 	return r;
3855 }
3856 
3857 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
3858 			bool line_status)
3859 {
3860 	if (!irqchip_in_kernel(kvm))
3861 		return -ENXIO;
3862 
3863 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3864 					irq_event->irq, irq_event->level,
3865 					line_status);
3866 	return 0;
3867 }
3868 
3869 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
3870 				   struct kvm_enable_cap *cap)
3871 {
3872 	int r;
3873 
3874 	if (cap->flags)
3875 		return -EINVAL;
3876 
3877 	switch (cap->cap) {
3878 	case KVM_CAP_DISABLE_QUIRKS:
3879 		kvm->arch.disabled_quirks = cap->args[0];
3880 		r = 0;
3881 		break;
3882 	case KVM_CAP_SPLIT_IRQCHIP: {
3883 		mutex_lock(&kvm->lock);
3884 		r = -EINVAL;
3885 		if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
3886 			goto split_irqchip_unlock;
3887 		r = -EEXIST;
3888 		if (irqchip_in_kernel(kvm))
3889 			goto split_irqchip_unlock;
3890 		if (kvm->created_vcpus)
3891 			goto split_irqchip_unlock;
3892 		r = kvm_setup_empty_irq_routing(kvm);
3893 		if (r)
3894 			goto split_irqchip_unlock;
3895 		/* Pairs with irqchip_in_kernel. */
3896 		smp_wmb();
3897 		kvm->arch.irqchip_split = true;
3898 		kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
3899 		r = 0;
3900 split_irqchip_unlock:
3901 		mutex_unlock(&kvm->lock);
3902 		break;
3903 	}
3904 	case KVM_CAP_X2APIC_API:
3905 		r = -EINVAL;
3906 		if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
3907 			break;
3908 
3909 		if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
3910 			kvm->arch.x2apic_format = true;
3911 		if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
3912 			kvm->arch.x2apic_broadcast_quirk_disabled = true;
3913 
3914 		r = 0;
3915 		break;
3916 	default:
3917 		r = -EINVAL;
3918 		break;
3919 	}
3920 	return r;
3921 }
3922 
3923 long kvm_arch_vm_ioctl(struct file *filp,
3924 		       unsigned int ioctl, unsigned long arg)
3925 {
3926 	struct kvm *kvm = filp->private_data;
3927 	void __user *argp = (void __user *)arg;
3928 	int r = -ENOTTY;
3929 	/*
3930 	 * This union makes it completely explicit to gcc-3.x
3931 	 * that these two variables' stack usage should be
3932 	 * combined, not added together.
3933 	 */
3934 	union {
3935 		struct kvm_pit_state ps;
3936 		struct kvm_pit_state2 ps2;
3937 		struct kvm_pit_config pit_config;
3938 	} u;
3939 
3940 	switch (ioctl) {
3941 	case KVM_SET_TSS_ADDR:
3942 		r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
3943 		break;
3944 	case KVM_SET_IDENTITY_MAP_ADDR: {
3945 		u64 ident_addr;
3946 
3947 		r = -EFAULT;
3948 		if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3949 			goto out;
3950 		r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3951 		break;
3952 	}
3953 	case KVM_SET_NR_MMU_PAGES:
3954 		r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3955 		break;
3956 	case KVM_GET_NR_MMU_PAGES:
3957 		r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3958 		break;
3959 	case KVM_CREATE_IRQCHIP: {
3960 		struct kvm_pic *vpic;
3961 
3962 		mutex_lock(&kvm->lock);
3963 		r = -EEXIST;
3964 		if (kvm->arch.vpic)
3965 			goto create_irqchip_unlock;
3966 		r = -EINVAL;
3967 		if (kvm->created_vcpus)
3968 			goto create_irqchip_unlock;
3969 		r = -ENOMEM;
3970 		vpic = kvm_create_pic(kvm);
3971 		if (vpic) {
3972 			r = kvm_ioapic_init(kvm);
3973 			if (r) {
3974 				mutex_lock(&kvm->slots_lock);
3975 				kvm_destroy_pic(vpic);
3976 				mutex_unlock(&kvm->slots_lock);
3977 				goto create_irqchip_unlock;
3978 			}
3979 		} else
3980 			goto create_irqchip_unlock;
3981 		r = kvm_setup_default_irq_routing(kvm);
3982 		if (r) {
3983 			mutex_lock(&kvm->slots_lock);
3984 			mutex_lock(&kvm->irq_lock);
3985 			kvm_ioapic_destroy(kvm);
3986 			kvm_destroy_pic(vpic);
3987 			mutex_unlock(&kvm->irq_lock);
3988 			mutex_unlock(&kvm->slots_lock);
3989 			goto create_irqchip_unlock;
3990 		}
3991 		/* Write kvm->irq_routing before kvm->arch.vpic.  */
3992 		smp_wmb();
3993 		kvm->arch.vpic = vpic;
3994 	create_irqchip_unlock:
3995 		mutex_unlock(&kvm->lock);
3996 		break;
3997 	}
3998 	case KVM_CREATE_PIT:
3999 		u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
4000 		goto create_pit;
4001 	case KVM_CREATE_PIT2:
4002 		r = -EFAULT;
4003 		if (copy_from_user(&u.pit_config, argp,
4004 				   sizeof(struct kvm_pit_config)))
4005 			goto out;
4006 	create_pit:
4007 		mutex_lock(&kvm->lock);
4008 		r = -EEXIST;
4009 		if (kvm->arch.vpit)
4010 			goto create_pit_unlock;
4011 		r = -ENOMEM;
4012 		kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
4013 		if (kvm->arch.vpit)
4014 			r = 0;
4015 	create_pit_unlock:
4016 		mutex_unlock(&kvm->lock);
4017 		break;
4018 	case KVM_GET_IRQCHIP: {
4019 		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
4020 		struct kvm_irqchip *chip;
4021 
4022 		chip = memdup_user(argp, sizeof(*chip));
4023 		if (IS_ERR(chip)) {
4024 			r = PTR_ERR(chip);
4025 			goto out;
4026 		}
4027 
4028 		r = -ENXIO;
4029 		if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
4030 			goto get_irqchip_out;
4031 		r = kvm_vm_ioctl_get_irqchip(kvm, chip);
4032 		if (r)
4033 			goto get_irqchip_out;
4034 		r = -EFAULT;
4035 		if (copy_to_user(argp, chip, sizeof *chip))
4036 			goto get_irqchip_out;
4037 		r = 0;
4038 	get_irqchip_out:
4039 		kfree(chip);
4040 		break;
4041 	}
4042 	case KVM_SET_IRQCHIP: {
4043 		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
4044 		struct kvm_irqchip *chip;
4045 
4046 		chip = memdup_user(argp, sizeof(*chip));
4047 		if (IS_ERR(chip)) {
4048 			r = PTR_ERR(chip);
4049 			goto out;
4050 		}
4051 
4052 		r = -ENXIO;
4053 		if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
4054 			goto set_irqchip_out;
4055 		r = kvm_vm_ioctl_set_irqchip(kvm, chip);
4056 		if (r)
4057 			goto set_irqchip_out;
4058 		r = 0;
4059 	set_irqchip_out:
4060 		kfree(chip);
4061 		break;
4062 	}
4063 	case KVM_GET_PIT: {
4064 		r = -EFAULT;
4065 		if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
4066 			goto out;
4067 		r = -ENXIO;
4068 		if (!kvm->arch.vpit)
4069 			goto out;
4070 		r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
4071 		if (r)
4072 			goto out;
4073 		r = -EFAULT;
4074 		if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
4075 			goto out;
4076 		r = 0;
4077 		break;
4078 	}
4079 	case KVM_SET_PIT: {
4080 		r = -EFAULT;
4081 		if (copy_from_user(&u.ps, argp, sizeof u.ps))
4082 			goto out;
4083 		r = -ENXIO;
4084 		if (!kvm->arch.vpit)
4085 			goto out;
4086 		r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
4087 		break;
4088 	}
4089 	case KVM_GET_PIT2: {
4090 		r = -ENXIO;
4091 		if (!kvm->arch.vpit)
4092 			goto out;
4093 		r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
4094 		if (r)
4095 			goto out;
4096 		r = -EFAULT;
4097 		if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
4098 			goto out;
4099 		r = 0;
4100 		break;
4101 	}
4102 	case KVM_SET_PIT2: {
4103 		r = -EFAULT;
4104 		if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
4105 			goto out;
4106 		r = -ENXIO;
4107 		if (!kvm->arch.vpit)
4108 			goto out;
4109 		r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
4110 		break;
4111 	}
4112 	case KVM_REINJECT_CONTROL: {
4113 		struct kvm_reinject_control control;
4114 		r =  -EFAULT;
4115 		if (copy_from_user(&control, argp, sizeof(control)))
4116 			goto out;
4117 		r = kvm_vm_ioctl_reinject(kvm, &control);
4118 		break;
4119 	}
4120 	case KVM_SET_BOOT_CPU_ID:
4121 		r = 0;
4122 		mutex_lock(&kvm->lock);
4123 		if (kvm->created_vcpus)
4124 			r = -EBUSY;
4125 		else
4126 			kvm->arch.bsp_vcpu_id = arg;
4127 		mutex_unlock(&kvm->lock);
4128 		break;
4129 	case KVM_XEN_HVM_CONFIG: {
4130 		r = -EFAULT;
4131 		if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
4132 				   sizeof(struct kvm_xen_hvm_config)))
4133 			goto out;
4134 		r = -EINVAL;
4135 		if (kvm->arch.xen_hvm_config.flags)
4136 			goto out;
4137 		r = 0;
4138 		break;
4139 	}
4140 	case KVM_SET_CLOCK: {
4141 		struct kvm_clock_data user_ns;
4142 		u64 now_ns;
4143 
4144 		r = -EFAULT;
4145 		if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
4146 			goto out;
4147 
4148 		r = -EINVAL;
4149 		if (user_ns.flags)
4150 			goto out;
4151 
4152 		r = 0;
4153 		local_irq_disable();
4154 		now_ns = __get_kvmclock_ns(kvm);
4155 		kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
4156 		local_irq_enable();
4157 		kvm_gen_update_masterclock(kvm);
4158 		break;
4159 	}
4160 	case KVM_GET_CLOCK: {
4161 		struct kvm_clock_data user_ns;
4162 		u64 now_ns;
4163 
4164 		local_irq_disable();
4165 		now_ns = __get_kvmclock_ns(kvm);
4166 		user_ns.clock = now_ns;
4167 		user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
4168 		local_irq_enable();
4169 		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
4170 
4171 		r = -EFAULT;
4172 		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
4173 			goto out;
4174 		r = 0;
4175 		break;
4176 	}
4177 	case KVM_ENABLE_CAP: {
4178 		struct kvm_enable_cap cap;
4179 
4180 		r = -EFAULT;
4181 		if (copy_from_user(&cap, argp, sizeof(cap)))
4182 			goto out;
4183 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
4184 		break;
4185 	}
4186 	default:
4187 		r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
4188 	}
4189 out:
4190 	return r;
4191 }
4192 
4193 static void kvm_init_msr_list(void)
4194 {
4195 	u32 dummy[2];
4196 	unsigned i, j;
4197 
4198 	for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
4199 		if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
4200 			continue;
4201 
4202 		/*
4203 		 * Even MSRs that are valid in the host may not be exposed
4204 		 * to the guests in some cases.
4205 		 */
4206 		switch (msrs_to_save[i]) {
4207 		case MSR_IA32_BNDCFGS:
4208 			if (!kvm_x86_ops->mpx_supported())
4209 				continue;
4210 			break;
4211 		case MSR_TSC_AUX:
4212 			if (!kvm_x86_ops->rdtscp_supported())
4213 				continue;
4214 			break;
4215 		default:
4216 			break;
4217 		}
4218 
4219 		if (j < i)
4220 			msrs_to_save[j] = msrs_to_save[i];
4221 		j++;
4222 	}
4223 	num_msrs_to_save = j;
4224 
4225 	for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
4226 		switch (emulated_msrs[i]) {
4227 		case MSR_IA32_SMBASE:
4228 			if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
4229 				continue;
4230 			break;
4231 		default:
4232 			break;
4233 		}
4234 
4235 		if (j < i)
4236 			emulated_msrs[j] = emulated_msrs[i];
4237 		j++;
4238 	}
4239 	num_emulated_msrs = j;
4240 }
4241 
4242 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
4243 			   const void *v)
4244 {
4245 	int handled = 0;
4246 	int n;
4247 
4248 	do {
4249 		n = min(len, 8);
4250 		if (!(lapic_in_kernel(vcpu) &&
4251 		      !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
4252 		    && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
4253 			break;
4254 		handled += n;
4255 		addr += n;
4256 		len -= n;
4257 		v += n;
4258 	} while (len);
4259 
4260 	return handled;
4261 }
4262 
4263 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
4264 {
4265 	int handled = 0;
4266 	int n;
4267 
4268 	do {
4269 		n = min(len, 8);
4270 		if (!(lapic_in_kernel(vcpu) &&
4271 		      !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
4272 					 addr, n, v))
4273 		    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
4274 			break;
4275 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
4276 		handled += n;
4277 		addr += n;
4278 		len -= n;
4279 		v += n;
4280 	} while (len);
4281 
4282 	return handled;
4283 }
4284 
4285 static void kvm_set_segment(struct kvm_vcpu *vcpu,
4286 			struct kvm_segment *var, int seg)
4287 {
4288 	kvm_x86_ops->set_segment(vcpu, var, seg);
4289 }
4290 
4291 void kvm_get_segment(struct kvm_vcpu *vcpu,
4292 		     struct kvm_segment *var, int seg)
4293 {
4294 	kvm_x86_ops->get_segment(vcpu, var, seg);
4295 }
4296 
4297 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
4298 			   struct x86_exception *exception)
4299 {
4300 	gpa_t t_gpa;
4301 
4302 	BUG_ON(!mmu_is_nested(vcpu));
4303 
4304 	/* NPT walks are always user-walks */
4305 	access |= PFERR_USER_MASK;
4306 	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
4307 
4308 	return t_gpa;
4309 }
4310 
4311 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
4312 			      struct x86_exception *exception)
4313 {
4314 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4315 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4316 }
4317 
4318  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
4319 				struct x86_exception *exception)
4320 {
4321 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4322 	access |= PFERR_FETCH_MASK;
4323 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4324 }
4325 
4326 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
4327 			       struct x86_exception *exception)
4328 {
4329 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4330 	access |= PFERR_WRITE_MASK;
4331 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4332 }
4333 
4334 /* uses this to access any guest's mapped memory without checking CPL */
4335 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
4336 				struct x86_exception *exception)
4337 {
4338 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
4339 }
4340 
4341 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
4342 				      struct kvm_vcpu *vcpu, u32 access,
4343 				      struct x86_exception *exception)
4344 {
4345 	void *data = val;
4346 	int r = X86EMUL_CONTINUE;
4347 
4348 	while (bytes) {
4349 		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
4350 							    exception);
4351 		unsigned offset = addr & (PAGE_SIZE-1);
4352 		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
4353 		int ret;
4354 
4355 		if (gpa == UNMAPPED_GVA)
4356 			return X86EMUL_PROPAGATE_FAULT;
4357 		ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
4358 					       offset, toread);
4359 		if (ret < 0) {
4360 			r = X86EMUL_IO_NEEDED;
4361 			goto out;
4362 		}
4363 
4364 		bytes -= toread;
4365 		data += toread;
4366 		addr += toread;
4367 	}
4368 out:
4369 	return r;
4370 }
4371 
4372 /* used for instruction fetching */
4373 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
4374 				gva_t addr, void *val, unsigned int bytes,
4375 				struct x86_exception *exception)
4376 {
4377 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4378 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4379 	unsigned offset;
4380 	int ret;
4381 
4382 	/* Inline kvm_read_guest_virt_helper for speed.  */
4383 	gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
4384 						    exception);
4385 	if (unlikely(gpa == UNMAPPED_GVA))
4386 		return X86EMUL_PROPAGATE_FAULT;
4387 
4388 	offset = addr & (PAGE_SIZE-1);
4389 	if (WARN_ON(offset + bytes > PAGE_SIZE))
4390 		bytes = (unsigned)PAGE_SIZE - offset;
4391 	ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
4392 				       offset, bytes);
4393 	if (unlikely(ret < 0))
4394 		return X86EMUL_IO_NEEDED;
4395 
4396 	return X86EMUL_CONTINUE;
4397 }
4398 
4399 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
4400 			       gva_t addr, void *val, unsigned int bytes,
4401 			       struct x86_exception *exception)
4402 {
4403 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4404 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4405 
4406 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
4407 					  exception);
4408 }
4409 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
4410 
4411 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4412 				      gva_t addr, void *val, unsigned int bytes,
4413 				      struct x86_exception *exception)
4414 {
4415 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4416 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
4417 }
4418 
4419 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
4420 		unsigned long addr, void *val, unsigned int bytes)
4421 {
4422 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4423 	int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);
4424 
4425 	return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
4426 }
4427 
4428 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4429 				       gva_t addr, void *val,
4430 				       unsigned int bytes,
4431 				       struct x86_exception *exception)
4432 {
4433 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4434 	void *data = val;
4435 	int r = X86EMUL_CONTINUE;
4436 
4437 	while (bytes) {
4438 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
4439 							     PFERR_WRITE_MASK,
4440 							     exception);
4441 		unsigned offset = addr & (PAGE_SIZE-1);
4442 		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
4443 		int ret;
4444 
4445 		if (gpa == UNMAPPED_GVA)
4446 			return X86EMUL_PROPAGATE_FAULT;
4447 		ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
4448 		if (ret < 0) {
4449 			r = X86EMUL_IO_NEEDED;
4450 			goto out;
4451 		}
4452 
4453 		bytes -= towrite;
4454 		data += towrite;
4455 		addr += towrite;
4456 	}
4457 out:
4458 	return r;
4459 }
4460 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
4461 
4462 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4463 				gpa_t *gpa, struct x86_exception *exception,
4464 				bool write)
4465 {
4466 	u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
4467 		| (write ? PFERR_WRITE_MASK : 0);
4468 
4469 	/*
4470 	 * currently PKRU is only applied to ept enabled guest so
4471 	 * there is no pkey in EPT page table for L1 guest or EPT
4472 	 * shadow page table for L2 guest.
4473 	 */
4474 	if (vcpu_match_mmio_gva(vcpu, gva)
4475 	    && !permission_fault(vcpu, vcpu->arch.walk_mmu,
4476 				 vcpu->arch.access, 0, access)) {
4477 		*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
4478 					(gva & (PAGE_SIZE - 1));
4479 		trace_vcpu_match_mmio(gva, *gpa, write, false);
4480 		return 1;
4481 	}
4482 
4483 	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4484 
4485 	if (*gpa == UNMAPPED_GVA)
4486 		return -1;
4487 
4488 	/* For APIC access vmexit */
4489 	if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4490 		return 1;
4491 
4492 	if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
4493 		trace_vcpu_match_mmio(gva, *gpa, write, true);
4494 		return 1;
4495 	}
4496 
4497 	return 0;
4498 }
4499 
4500 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4501 			const void *val, int bytes)
4502 {
4503 	int ret;
4504 
4505 	ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
4506 	if (ret < 0)
4507 		return 0;
4508 	kvm_page_track_write(vcpu, gpa, val, bytes);
4509 	return 1;
4510 }
4511 
4512 struct read_write_emulator_ops {
4513 	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
4514 				  int bytes);
4515 	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
4516 				  void *val, int bytes);
4517 	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4518 			       int bytes, void *val);
4519 	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4520 				    void *val, int bytes);
4521 	bool write;
4522 };
4523 
4524 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
4525 {
4526 	if (vcpu->mmio_read_completed) {
4527 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
4528 			       vcpu->mmio_fragments[0].gpa, *(u64 *)val);
4529 		vcpu->mmio_read_completed = 0;
4530 		return 1;
4531 	}
4532 
4533 	return 0;
4534 }
4535 
4536 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4537 			void *val, int bytes)
4538 {
4539 	return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
4540 }
4541 
4542 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4543 			 void *val, int bytes)
4544 {
4545 	return emulator_write_phys(vcpu, gpa, val, bytes);
4546 }
4547 
4548 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
4549 {
4550 	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
4551 	return vcpu_mmio_write(vcpu, gpa, bytes, val);
4552 }
4553 
4554 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4555 			  void *val, int bytes)
4556 {
4557 	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
4558 	return X86EMUL_IO_NEEDED;
4559 }
4560 
4561 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4562 			   void *val, int bytes)
4563 {
4564 	struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
4565 
4566 	memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
4567 	return X86EMUL_CONTINUE;
4568 }
4569 
4570 static const struct read_write_emulator_ops read_emultor = {
4571 	.read_write_prepare = read_prepare,
4572 	.read_write_emulate = read_emulate,
4573 	.read_write_mmio = vcpu_mmio_read,
4574 	.read_write_exit_mmio = read_exit_mmio,
4575 };
4576 
4577 static const struct read_write_emulator_ops write_emultor = {
4578 	.read_write_emulate = write_emulate,
4579 	.read_write_mmio = write_mmio,
4580 	.read_write_exit_mmio = write_exit_mmio,
4581 	.write = true,
4582 };
4583 
4584 static int emulator_read_write_onepage(unsigned long addr, void *val,
4585 				       unsigned int bytes,
4586 				       struct x86_exception *exception,
4587 				       struct kvm_vcpu *vcpu,
4588 				       const struct read_write_emulator_ops *ops)
4589 {
4590 	gpa_t gpa;
4591 	int handled, ret;
4592 	bool write = ops->write;
4593 	struct kvm_mmio_fragment *frag;
4594 
4595 	ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
4596 
4597 	if (ret < 0)
4598 		return X86EMUL_PROPAGATE_FAULT;
4599 
4600 	/* For APIC access vmexit */
4601 	if (ret)
4602 		goto mmio;
4603 
4604 	if (ops->read_write_emulate(vcpu, gpa, val, bytes))
4605 		return X86EMUL_CONTINUE;
4606 
4607 mmio:
4608 	/*
4609 	 * Is this MMIO handled locally?
4610 	 */
4611 	handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
4612 	if (handled == bytes)
4613 		return X86EMUL_CONTINUE;
4614 
4615 	gpa += handled;
4616 	bytes -= handled;
4617 	val += handled;
4618 
4619 	WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
4620 	frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
4621 	frag->gpa = gpa;
4622 	frag->data = val;
4623 	frag->len = bytes;
4624 	return X86EMUL_CONTINUE;
4625 }
4626 
4627 static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
4628 			unsigned long addr,
4629 			void *val, unsigned int bytes,
4630 			struct x86_exception *exception,
4631 			const struct read_write_emulator_ops *ops)
4632 {
4633 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4634 	gpa_t gpa;
4635 	int rc;
4636 
4637 	if (ops->read_write_prepare &&
4638 		  ops->read_write_prepare(vcpu, val, bytes))
4639 		return X86EMUL_CONTINUE;
4640 
4641 	vcpu->mmio_nr_fragments = 0;
4642 
4643 	/* Crossing a page boundary? */
4644 	if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
4645 		int now;
4646 
4647 		now = -addr & ~PAGE_MASK;
4648 		rc = emulator_read_write_onepage(addr, val, now, exception,
4649 						 vcpu, ops);
4650 
4651 		if (rc != X86EMUL_CONTINUE)
4652 			return rc;
4653 		addr += now;
4654 		if (ctxt->mode != X86EMUL_MODE_PROT64)
4655 			addr = (u32)addr;
4656 		val += now;
4657 		bytes -= now;
4658 	}
4659 
4660 	rc = emulator_read_write_onepage(addr, val, bytes, exception,
4661 					 vcpu, ops);
4662 	if (rc != X86EMUL_CONTINUE)
4663 		return rc;
4664 
4665 	if (!vcpu->mmio_nr_fragments)
4666 		return rc;
4667 
4668 	gpa = vcpu->mmio_fragments[0].gpa;
4669 
4670 	vcpu->mmio_needed = 1;
4671 	vcpu->mmio_cur_fragment = 0;
4672 
4673 	vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
4674 	vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
4675 	vcpu->run->exit_reason = KVM_EXIT_MMIO;
4676 	vcpu->run->mmio.phys_addr = gpa;
4677 
4678 	return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
4679 }
4680 
4681 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
4682 				  unsigned long addr,
4683 				  void *val,
4684 				  unsigned int bytes,
4685 				  struct x86_exception *exception)
4686 {
4687 	return emulator_read_write(ctxt, addr, val, bytes,
4688 				   exception, &read_emultor);
4689 }
4690 
4691 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
4692 			    unsigned long addr,
4693 			    const void *val,
4694 			    unsigned int bytes,
4695 			    struct x86_exception *exception)
4696 {
4697 	return emulator_read_write(ctxt, addr, (void *)val, bytes,
4698 				   exception, &write_emultor);
4699 }
4700 
4701 #define CMPXCHG_TYPE(t, ptr, old, new) \
4702 	(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
4703 
4704 #ifdef CONFIG_X86_64
4705 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
4706 #else
4707 #  define CMPXCHG64(ptr, old, new) \
4708 	(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
4709 #endif
4710 
4711 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
4712 				     unsigned long addr,
4713 				     const void *old,
4714 				     const void *new,
4715 				     unsigned int bytes,
4716 				     struct x86_exception *exception)
4717 {
4718 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4719 	gpa_t gpa;
4720 	struct page *page;
4721 	char *kaddr;
4722 	bool exchanged;
4723 
4724 	/* guests cmpxchg8b have to be emulated atomically */
4725 	if (bytes > 8 || (bytes & (bytes - 1)))
4726 		goto emul_write;
4727 
4728 	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
4729 
4730 	if (gpa == UNMAPPED_GVA ||
4731 	    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4732 		goto emul_write;
4733 
4734 	if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
4735 		goto emul_write;
4736 
4737 	page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
4738 	if (is_error_page(page))
4739 		goto emul_write;
4740 
4741 	kaddr = kmap_atomic(page);
4742 	kaddr += offset_in_page(gpa);
4743 	switch (bytes) {
4744 	case 1:
4745 		exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
4746 		break;
4747 	case 2:
4748 		exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
4749 		break;
4750 	case 4:
4751 		exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
4752 		break;
4753 	case 8:
4754 		exchanged = CMPXCHG64(kaddr, old, new);
4755 		break;
4756 	default:
4757 		BUG();
4758 	}
4759 	kunmap_atomic(kaddr);
4760 	kvm_release_page_dirty(page);
4761 
4762 	if (!exchanged)
4763 		return X86EMUL_CMPXCHG_FAILED;
4764 
4765 	kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
4766 	kvm_page_track_write(vcpu, gpa, new, bytes);
4767 
4768 	return X86EMUL_CONTINUE;
4769 
4770 emul_write:
4771 	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
4772 
4773 	return emulator_write_emulated(ctxt, addr, new, bytes, exception);
4774 }
4775 
4776 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
4777 {
4778 	/* TODO: String I/O for in kernel device */
4779 	int r;
4780 
4781 	if (vcpu->arch.pio.in)
4782 		r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
4783 				    vcpu->arch.pio.size, pd);
4784 	else
4785 		r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
4786 				     vcpu->arch.pio.port, vcpu->arch.pio.size,
4787 				     pd);
4788 	return r;
4789 }
4790 
4791 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
4792 			       unsigned short port, void *val,
4793 			       unsigned int count, bool in)
4794 {
4795 	vcpu->arch.pio.port = port;
4796 	vcpu->arch.pio.in = in;
4797 	vcpu->arch.pio.count  = count;
4798 	vcpu->arch.pio.size = size;
4799 
4800 	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
4801 		vcpu->arch.pio.count = 0;
4802 		return 1;
4803 	}
4804 
4805 	vcpu->run->exit_reason = KVM_EXIT_IO;
4806 	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
4807 	vcpu->run->io.size = size;
4808 	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
4809 	vcpu->run->io.count = count;
4810 	vcpu->run->io.port = port;
4811 
4812 	return 0;
4813 }
4814 
4815 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
4816 				    int size, unsigned short port, void *val,
4817 				    unsigned int count)
4818 {
4819 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4820 	int ret;
4821 
4822 	if (vcpu->arch.pio.count)
4823 		goto data_avail;
4824 
4825 	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
4826 	if (ret) {
4827 data_avail:
4828 		memcpy(val, vcpu->arch.pio_data, size * count);
4829 		trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
4830 		vcpu->arch.pio.count = 0;
4831 		return 1;
4832 	}
4833 
4834 	return 0;
4835 }
4836 
4837 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
4838 				     int size, unsigned short port,
4839 				     const void *val, unsigned int count)
4840 {
4841 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4842 
4843 	memcpy(vcpu->arch.pio_data, val, size * count);
4844 	trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
4845 	return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
4846 }
4847 
4848 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
4849 {
4850 	return kvm_x86_ops->get_segment_base(vcpu, seg);
4851 }
4852 
4853 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
4854 {
4855 	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
4856 }
4857 
4858 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
4859 {
4860 	if (!need_emulate_wbinvd(vcpu))
4861 		return X86EMUL_CONTINUE;
4862 
4863 	if (kvm_x86_ops->has_wbinvd_exit()) {
4864 		int cpu = get_cpu();
4865 
4866 		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4867 		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
4868 				wbinvd_ipi, NULL, 1);
4869 		put_cpu();
4870 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
4871 	} else
4872 		wbinvd();
4873 	return X86EMUL_CONTINUE;
4874 }
4875 
4876 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
4877 {
4878 	kvm_emulate_wbinvd_noskip(vcpu);
4879 	return kvm_skip_emulated_instruction(vcpu);
4880 }
4881 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
4882 
4883 
4884 
4885 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
4886 {
4887 	kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
4888 }
4889 
4890 static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
4891 			   unsigned long *dest)
4892 {
4893 	return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
4894 }
4895 
4896 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
4897 			   unsigned long value)
4898 {
4899 
4900 	return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
4901 }
4902 
4903 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
4904 {
4905 	return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
4906 }
4907 
4908 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
4909 {
4910 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4911 	unsigned long value;
4912 
4913 	switch (cr) {
4914 	case 0:
4915 		value = kvm_read_cr0(vcpu);
4916 		break;
4917 	case 2:
4918 		value = vcpu->arch.cr2;
4919 		break;
4920 	case 3:
4921 		value = kvm_read_cr3(vcpu);
4922 		break;
4923 	case 4:
4924 		value = kvm_read_cr4(vcpu);
4925 		break;
4926 	case 8:
4927 		value = kvm_get_cr8(vcpu);
4928 		break;
4929 	default:
4930 		kvm_err("%s: unexpected cr %u\n", __func__, cr);
4931 		return 0;
4932 	}
4933 
4934 	return value;
4935 }
4936 
4937 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
4938 {
4939 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4940 	int res = 0;
4941 
4942 	switch (cr) {
4943 	case 0:
4944 		res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
4945 		break;
4946 	case 2:
4947 		vcpu->arch.cr2 = val;
4948 		break;
4949 	case 3:
4950 		res = kvm_set_cr3(vcpu, val);
4951 		break;
4952 	case 4:
4953 		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
4954 		break;
4955 	case 8:
4956 		res = kvm_set_cr8(vcpu, val);
4957 		break;
4958 	default:
4959 		kvm_err("%s: unexpected cr %u\n", __func__, cr);
4960 		res = -1;
4961 	}
4962 
4963 	return res;
4964 }
4965 
4966 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
4967 {
4968 	return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
4969 }
4970 
4971 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4972 {
4973 	kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
4974 }
4975 
4976 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4977 {
4978 	kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
4979 }
4980 
4981 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4982 {
4983 	kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
4984 }
4985 
4986 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4987 {
4988 	kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
4989 }
4990 
4991 static unsigned long emulator_get_cached_segment_base(
4992 	struct x86_emulate_ctxt *ctxt, int seg)
4993 {
4994 	return get_segment_base(emul_to_vcpu(ctxt), seg);
4995 }
4996 
4997 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
4998 				 struct desc_struct *desc, u32 *base3,
4999 				 int seg)
5000 {
5001 	struct kvm_segment var;
5002 
5003 	kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
5004 	*selector = var.selector;
5005 
5006 	if (var.unusable) {
5007 		memset(desc, 0, sizeof(*desc));
5008 		return false;
5009 	}
5010 
5011 	if (var.g)
5012 		var.limit >>= 12;
5013 	set_desc_limit(desc, var.limit);
5014 	set_desc_base(desc, (unsigned long)var.base);
5015 #ifdef CONFIG_X86_64
5016 	if (base3)
5017 		*base3 = var.base >> 32;
5018 #endif
5019 	desc->type = var.type;
5020 	desc->s = var.s;
5021 	desc->dpl = var.dpl;
5022 	desc->p = var.present;
5023 	desc->avl = var.avl;
5024 	desc->l = var.l;
5025 	desc->d = var.db;
5026 	desc->g = var.g;
5027 
5028 	return true;
5029 }
5030 
5031 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
5032 				 struct desc_struct *desc, u32 base3,
5033 				 int seg)
5034 {
5035 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5036 	struct kvm_segment var;
5037 
5038 	var.selector = selector;
5039 	var.base = get_desc_base(desc);
5040 #ifdef CONFIG_X86_64
5041 	var.base |= ((u64)base3) << 32;
5042 #endif
5043 	var.limit = get_desc_limit(desc);
5044 	if (desc->g)
5045 		var.limit = (var.limit << 12) | 0xfff;
5046 	var.type = desc->type;
5047 	var.dpl = desc->dpl;
5048 	var.db = desc->d;
5049 	var.s = desc->s;
5050 	var.l = desc->l;
5051 	var.g = desc->g;
5052 	var.avl = desc->avl;
5053 	var.present = desc->p;
5054 	var.unusable = !var.present;
5055 	var.padding = 0;
5056 
5057 	kvm_set_segment(vcpu, &var, seg);
5058 	return;
5059 }
5060 
5061 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
5062 			    u32 msr_index, u64 *pdata)
5063 {
5064 	struct msr_data msr;
5065 	int r;
5066 
5067 	msr.index = msr_index;
5068 	msr.host_initiated = false;
5069 	r = kvm_get_msr(emul_to_vcpu(ctxt), &msr);
5070 	if (r)
5071 		return r;
5072 
5073 	*pdata = msr.data;
5074 	return 0;
5075 }
5076 
5077 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
5078 			    u32 msr_index, u64 data)
5079 {
5080 	struct msr_data msr;
5081 
5082 	msr.data = data;
5083 	msr.index = msr_index;
5084 	msr.host_initiated = false;
5085 	return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
5086 }
5087 
5088 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
5089 {
5090 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5091 
5092 	return vcpu->arch.smbase;
5093 }
5094 
5095 static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
5096 {
5097 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5098 
5099 	vcpu->arch.smbase = smbase;
5100 }
5101 
5102 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
5103 			      u32 pmc)
5104 {
5105 	return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc);
5106 }
5107 
5108 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
5109 			     u32 pmc, u64 *pdata)
5110 {
5111 	return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
5112 }
5113 
5114 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
5115 {
5116 	emul_to_vcpu(ctxt)->arch.halt_request = 1;
5117 }
5118 
5119 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
5120 {
5121 	preempt_disable();
5122 	kvm_load_guest_fpu(emul_to_vcpu(ctxt));
5123 }
5124 
5125 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
5126 {
5127 	preempt_enable();
5128 }
5129 
5130 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
5131 			      struct x86_instruction_info *info,
5132 			      enum x86_intercept_stage stage)
5133 {
5134 	return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
5135 }
5136 
5137 static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
5138 			       u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
5139 {
5140 	kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
5141 }
5142 
5143 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
5144 {
5145 	return kvm_register_read(emul_to_vcpu(ctxt), reg);
5146 }
5147 
5148 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
5149 {
5150 	kvm_register_write(emul_to_vcpu(ctxt), reg, val);
5151 }
5152 
5153 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
5154 {
5155 	kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
5156 }
5157 
5158 static const struct x86_emulate_ops emulate_ops = {
5159 	.read_gpr            = emulator_read_gpr,
5160 	.write_gpr           = emulator_write_gpr,
5161 	.read_std            = kvm_read_guest_virt_system,
5162 	.write_std           = kvm_write_guest_virt_system,
5163 	.read_phys           = kvm_read_guest_phys_system,
5164 	.fetch               = kvm_fetch_guest_virt,
5165 	.read_emulated       = emulator_read_emulated,
5166 	.write_emulated      = emulator_write_emulated,
5167 	.cmpxchg_emulated    = emulator_cmpxchg_emulated,
5168 	.invlpg              = emulator_invlpg,
5169 	.pio_in_emulated     = emulator_pio_in_emulated,
5170 	.pio_out_emulated    = emulator_pio_out_emulated,
5171 	.get_segment         = emulator_get_segment,
5172 	.set_segment         = emulator_set_segment,
5173 	.get_cached_segment_base = emulator_get_cached_segment_base,
5174 	.get_gdt             = emulator_get_gdt,
5175 	.get_idt	     = emulator_get_idt,
5176 	.set_gdt             = emulator_set_gdt,
5177 	.set_idt	     = emulator_set_idt,
5178 	.get_cr              = emulator_get_cr,
5179 	.set_cr              = emulator_set_cr,
5180 	.cpl                 = emulator_get_cpl,
5181 	.get_dr              = emulator_get_dr,
5182 	.set_dr              = emulator_set_dr,
5183 	.get_smbase          = emulator_get_smbase,
5184 	.set_smbase          = emulator_set_smbase,
5185 	.set_msr             = emulator_set_msr,
5186 	.get_msr             = emulator_get_msr,
5187 	.check_pmc	     = emulator_check_pmc,
5188 	.read_pmc            = emulator_read_pmc,
5189 	.halt                = emulator_halt,
5190 	.wbinvd              = emulator_wbinvd,
5191 	.fix_hypercall       = emulator_fix_hypercall,
5192 	.get_fpu             = emulator_get_fpu,
5193 	.put_fpu             = emulator_put_fpu,
5194 	.intercept           = emulator_intercept,
5195 	.get_cpuid           = emulator_get_cpuid,
5196 	.set_nmi_mask        = emulator_set_nmi_mask,
5197 };
5198 
5199 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
5200 {
5201 	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
5202 	/*
5203 	 * an sti; sti; sequence only disable interrupts for the first
5204 	 * instruction. So, if the last instruction, be it emulated or
5205 	 * not, left the system with the INT_STI flag enabled, it
5206 	 * means that the last instruction is an sti. We should not
5207 	 * leave the flag on in this case. The same goes for mov ss
5208 	 */
5209 	if (int_shadow & mask)
5210 		mask = 0;
5211 	if (unlikely(int_shadow || mask)) {
5212 		kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
5213 		if (!mask)
5214 			kvm_make_request(KVM_REQ_EVENT, vcpu);
5215 	}
5216 }
5217 
5218 static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
5219 {
5220 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5221 	if (ctxt->exception.vector == PF_VECTOR)
5222 		return kvm_propagate_fault(vcpu, &ctxt->exception);
5223 
5224 	if (ctxt->exception.error_code_valid)
5225 		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
5226 				      ctxt->exception.error_code);
5227 	else
5228 		kvm_queue_exception(vcpu, ctxt->exception.vector);
5229 	return false;
5230 }
5231 
5232 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
5233 {
5234 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5235 	int cs_db, cs_l;
5236 
5237 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5238 
5239 	ctxt->eflags = kvm_get_rflags(vcpu);
5240 	ctxt->eip = kvm_rip_read(vcpu);
5241 	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
5242 		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
5243 		     (cs_l && is_long_mode(vcpu))	? X86EMUL_MODE_PROT64 :
5244 		     cs_db				? X86EMUL_MODE_PROT32 :
5245 							  X86EMUL_MODE_PROT16;
5246 	BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
5247 	BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
5248 	BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
5249 	ctxt->emul_flags = vcpu->arch.hflags;
5250 
5251 	init_decode_cache(ctxt);
5252 	vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
5253 }
5254 
5255 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
5256 {
5257 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5258 	int ret;
5259 
5260 	init_emulate_ctxt(vcpu);
5261 
5262 	ctxt->op_bytes = 2;
5263 	ctxt->ad_bytes = 2;
5264 	ctxt->_eip = ctxt->eip + inc_eip;
5265 	ret = emulate_int_real(ctxt, irq);
5266 
5267 	if (ret != X86EMUL_CONTINUE)
5268 		return EMULATE_FAIL;
5269 
5270 	ctxt->eip = ctxt->_eip;
5271 	kvm_rip_write(vcpu, ctxt->eip);
5272 	kvm_set_rflags(vcpu, ctxt->eflags);
5273 
5274 	if (irq == NMI_VECTOR)
5275 		vcpu->arch.nmi_pending = 0;
5276 	else
5277 		vcpu->arch.interrupt.pending = false;
5278 
5279 	return EMULATE_DONE;
5280 }
5281 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
5282 
5283 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
5284 {
5285 	int r = EMULATE_DONE;
5286 
5287 	++vcpu->stat.insn_emulation_fail;
5288 	trace_kvm_emulate_insn_failed(vcpu);
5289 	if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
5290 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5291 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5292 		vcpu->run->internal.ndata = 0;
5293 		r = EMULATE_FAIL;
5294 	}
5295 	kvm_queue_exception(vcpu, UD_VECTOR);
5296 
5297 	return r;
5298 }
5299 
5300 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
5301 				  bool write_fault_to_shadow_pgtable,
5302 				  int emulation_type)
5303 {
5304 	gpa_t gpa = cr2;
5305 	kvm_pfn_t pfn;
5306 
5307 	if (emulation_type & EMULTYPE_NO_REEXECUTE)
5308 		return false;
5309 
5310 	if (!vcpu->arch.mmu.direct_map) {
5311 		/*
5312 		 * Write permission should be allowed since only
5313 		 * write access need to be emulated.
5314 		 */
5315 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
5316 
5317 		/*
5318 		 * If the mapping is invalid in guest, let cpu retry
5319 		 * it to generate fault.
5320 		 */
5321 		if (gpa == UNMAPPED_GVA)
5322 			return true;
5323 	}
5324 
5325 	/*
5326 	 * Do not retry the unhandleable instruction if it faults on the
5327 	 * readonly host memory, otherwise it will goto a infinite loop:
5328 	 * retry instruction -> write #PF -> emulation fail -> retry
5329 	 * instruction -> ...
5330 	 */
5331 	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
5332 
5333 	/*
5334 	 * If the instruction failed on the error pfn, it can not be fixed,
5335 	 * report the error to userspace.
5336 	 */
5337 	if (is_error_noslot_pfn(pfn))
5338 		return false;
5339 
5340 	kvm_release_pfn_clean(pfn);
5341 
5342 	/* The instructions are well-emulated on direct mmu. */
5343 	if (vcpu->arch.mmu.direct_map) {
5344 		unsigned int indirect_shadow_pages;
5345 
5346 		spin_lock(&vcpu->kvm->mmu_lock);
5347 		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
5348 		spin_unlock(&vcpu->kvm->mmu_lock);
5349 
5350 		if (indirect_shadow_pages)
5351 			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5352 
5353 		return true;
5354 	}
5355 
5356 	/*
5357 	 * if emulation was due to access to shadowed page table
5358 	 * and it failed try to unshadow page and re-enter the
5359 	 * guest to let CPU execute the instruction.
5360 	 */
5361 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5362 
5363 	/*
5364 	 * If the access faults on its page table, it can not
5365 	 * be fixed by unprotecting shadow page and it should
5366 	 * be reported to userspace.
5367 	 */
5368 	return !write_fault_to_shadow_pgtable;
5369 }
5370 
5371 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
5372 			      unsigned long cr2,  int emulation_type)
5373 {
5374 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5375 	unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
5376 
5377 	last_retry_eip = vcpu->arch.last_retry_eip;
5378 	last_retry_addr = vcpu->arch.last_retry_addr;
5379 
5380 	/*
5381 	 * If the emulation is caused by #PF and it is non-page_table
5382 	 * writing instruction, it means the VM-EXIT is caused by shadow
5383 	 * page protected, we can zap the shadow page and retry this
5384 	 * instruction directly.
5385 	 *
5386 	 * Note: if the guest uses a non-page-table modifying instruction
5387 	 * on the PDE that points to the instruction, then we will unmap
5388 	 * the instruction and go to an infinite loop. So, we cache the
5389 	 * last retried eip and the last fault address, if we meet the eip
5390 	 * and the address again, we can break out of the potential infinite
5391 	 * loop.
5392 	 */
5393 	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
5394 
5395 	if (!(emulation_type & EMULTYPE_RETRY))
5396 		return false;
5397 
5398 	if (x86_page_table_writing_insn(ctxt))
5399 		return false;
5400 
5401 	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
5402 		return false;
5403 
5404 	vcpu->arch.last_retry_eip = ctxt->eip;
5405 	vcpu->arch.last_retry_addr = cr2;
5406 
5407 	if (!vcpu->arch.mmu.direct_map)
5408 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
5409 
5410 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5411 
5412 	return true;
5413 }
5414 
5415 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
5416 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
5417 
5418 static void kvm_smm_changed(struct kvm_vcpu *vcpu)
5419 {
5420 	if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
5421 		/* This is a good place to trace that we are exiting SMM.  */
5422 		trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
5423 
5424 		/* Process a latched INIT or SMI, if any.  */
5425 		kvm_make_request(KVM_REQ_EVENT, vcpu);
5426 	}
5427 
5428 	kvm_mmu_reset_context(vcpu);
5429 }
5430 
5431 static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
5432 {
5433 	unsigned changed = vcpu->arch.hflags ^ emul_flags;
5434 
5435 	vcpu->arch.hflags = emul_flags;
5436 
5437 	if (changed & HF_SMM_MASK)
5438 		kvm_smm_changed(vcpu);
5439 }
5440 
5441 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
5442 				unsigned long *db)
5443 {
5444 	u32 dr6 = 0;
5445 	int i;
5446 	u32 enable, rwlen;
5447 
5448 	enable = dr7;
5449 	rwlen = dr7 >> 16;
5450 	for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
5451 		if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
5452 			dr6 |= (1 << i);
5453 	return dr6;
5454 }
5455 
5456 static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
5457 {
5458 	struct kvm_run *kvm_run = vcpu->run;
5459 
5460 	/*
5461 	 * rflags is the old, "raw" value of the flags.  The new value has
5462 	 * not been saved yet.
5463 	 *
5464 	 * This is correct even for TF set by the guest, because "the
5465 	 * processor will not generate this exception after the instruction
5466 	 * that sets the TF flag".
5467 	 */
5468 	if (unlikely(rflags & X86_EFLAGS_TF)) {
5469 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5470 			kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
5471 						  DR6_RTM;
5472 			kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
5473 			kvm_run->debug.arch.exception = DB_VECTOR;
5474 			kvm_run->exit_reason = KVM_EXIT_DEBUG;
5475 			*r = EMULATE_USER_EXIT;
5476 		} else {
5477 			/*
5478 			 * "Certain debug exceptions may clear bit 0-3.  The
5479 			 * remaining contents of the DR6 register are never
5480 			 * cleared by the processor".
5481 			 */
5482 			vcpu->arch.dr6 &= ~15;
5483 			vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
5484 			kvm_queue_exception(vcpu, DB_VECTOR);
5485 		}
5486 	}
5487 }
5488 
5489 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
5490 {
5491 	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
5492 	int r = EMULATE_DONE;
5493 
5494 	kvm_x86_ops->skip_emulated_instruction(vcpu);
5495 	kvm_vcpu_check_singlestep(vcpu, rflags, &r);
5496 	return r == EMULATE_DONE;
5497 }
5498 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
5499 
5500 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
5501 {
5502 	if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
5503 	    (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
5504 		struct kvm_run *kvm_run = vcpu->run;
5505 		unsigned long eip = kvm_get_linear_rip(vcpu);
5506 		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5507 					   vcpu->arch.guest_debug_dr7,
5508 					   vcpu->arch.eff_db);
5509 
5510 		if (dr6 != 0) {
5511 			kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
5512 			kvm_run->debug.arch.pc = eip;
5513 			kvm_run->debug.arch.exception = DB_VECTOR;
5514 			kvm_run->exit_reason = KVM_EXIT_DEBUG;
5515 			*r = EMULATE_USER_EXIT;
5516 			return true;
5517 		}
5518 	}
5519 
5520 	if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
5521 	    !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
5522 		unsigned long eip = kvm_get_linear_rip(vcpu);
5523 		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5524 					   vcpu->arch.dr7,
5525 					   vcpu->arch.db);
5526 
5527 		if (dr6 != 0) {
5528 			vcpu->arch.dr6 &= ~15;
5529 			vcpu->arch.dr6 |= dr6 | DR6_RTM;
5530 			kvm_queue_exception(vcpu, DB_VECTOR);
5531 			*r = EMULATE_DONE;
5532 			return true;
5533 		}
5534 	}
5535 
5536 	return false;
5537 }
5538 
5539 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
5540 			    unsigned long cr2,
5541 			    int emulation_type,
5542 			    void *insn,
5543 			    int insn_len)
5544 {
5545 	int r;
5546 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5547 	bool writeback = true;
5548 	bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
5549 
5550 	/*
5551 	 * Clear write_fault_to_shadow_pgtable here to ensure it is
5552 	 * never reused.
5553 	 */
5554 	vcpu->arch.write_fault_to_shadow_pgtable = false;
5555 	kvm_clear_exception_queue(vcpu);
5556 
5557 	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
5558 		init_emulate_ctxt(vcpu);
5559 
5560 		/*
5561 		 * We will reenter on the same instruction since
5562 		 * we do not set complete_userspace_io.  This does not
5563 		 * handle watchpoints yet, those would be handled in
5564 		 * the emulate_ops.
5565 		 */
5566 		if (kvm_vcpu_check_breakpoint(vcpu, &r))
5567 			return r;
5568 
5569 		ctxt->interruptibility = 0;
5570 		ctxt->have_exception = false;
5571 		ctxt->exception.vector = -1;
5572 		ctxt->perm_ok = false;
5573 
5574 		ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
5575 
5576 		r = x86_decode_insn(ctxt, insn, insn_len);
5577 
5578 		trace_kvm_emulate_insn_start(vcpu);
5579 		++vcpu->stat.insn_emulation;
5580 		if (r != EMULATION_OK)  {
5581 			if (emulation_type & EMULTYPE_TRAP_UD)
5582 				return EMULATE_FAIL;
5583 			if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
5584 						emulation_type))
5585 				return EMULATE_DONE;
5586 			if (emulation_type & EMULTYPE_SKIP)
5587 				return EMULATE_FAIL;
5588 			return handle_emulation_failure(vcpu);
5589 		}
5590 	}
5591 
5592 	if (emulation_type & EMULTYPE_SKIP) {
5593 		kvm_rip_write(vcpu, ctxt->_eip);
5594 		if (ctxt->eflags & X86_EFLAGS_RF)
5595 			kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
5596 		return EMULATE_DONE;
5597 	}
5598 
5599 	if (retry_instruction(ctxt, cr2, emulation_type))
5600 		return EMULATE_DONE;
5601 
5602 	/* this is needed for vmware backdoor interface to work since it
5603 	   changes registers values  during IO operation */
5604 	if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
5605 		vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
5606 		emulator_invalidate_register_cache(ctxt);
5607 	}
5608 
5609 restart:
5610 	r = x86_emulate_insn(ctxt);
5611 
5612 	if (r == EMULATION_INTERCEPTED)
5613 		return EMULATE_DONE;
5614 
5615 	if (r == EMULATION_FAILED) {
5616 		if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
5617 					emulation_type))
5618 			return EMULATE_DONE;
5619 
5620 		return handle_emulation_failure(vcpu);
5621 	}
5622 
5623 	if (ctxt->have_exception) {
5624 		r = EMULATE_DONE;
5625 		if (inject_emulated_exception(vcpu))
5626 			return r;
5627 	} else if (vcpu->arch.pio.count) {
5628 		if (!vcpu->arch.pio.in) {
5629 			/* FIXME: return into emulator if single-stepping.  */
5630 			vcpu->arch.pio.count = 0;
5631 		} else {
5632 			writeback = false;
5633 			vcpu->arch.complete_userspace_io = complete_emulated_pio;
5634 		}
5635 		r = EMULATE_USER_EXIT;
5636 	} else if (vcpu->mmio_needed) {
5637 		if (!vcpu->mmio_is_write)
5638 			writeback = false;
5639 		r = EMULATE_USER_EXIT;
5640 		vcpu->arch.complete_userspace_io = complete_emulated_mmio;
5641 	} else if (r == EMULATION_RESTART)
5642 		goto restart;
5643 	else
5644 		r = EMULATE_DONE;
5645 
5646 	if (writeback) {
5647 		unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
5648 		toggle_interruptibility(vcpu, ctxt->interruptibility);
5649 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5650 		if (vcpu->arch.hflags != ctxt->emul_flags)
5651 			kvm_set_hflags(vcpu, ctxt->emul_flags);
5652 		kvm_rip_write(vcpu, ctxt->eip);
5653 		if (r == EMULATE_DONE)
5654 			kvm_vcpu_check_singlestep(vcpu, rflags, &r);
5655 		if (!ctxt->have_exception ||
5656 		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
5657 			__kvm_set_rflags(vcpu, ctxt->eflags);
5658 
5659 		/*
5660 		 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
5661 		 * do nothing, and it will be requested again as soon as
5662 		 * the shadow expires.  But we still need to check here,
5663 		 * because POPF has no interrupt shadow.
5664 		 */
5665 		if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
5666 			kvm_make_request(KVM_REQ_EVENT, vcpu);
5667 	} else
5668 		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
5669 
5670 	return r;
5671 }
5672 EXPORT_SYMBOL_GPL(x86_emulate_instruction);
5673 
5674 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
5675 {
5676 	unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
5677 	int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
5678 					    size, port, &val, 1);
5679 	/* do not return to emulator after return from userspace */
5680 	vcpu->arch.pio.count = 0;
5681 	return ret;
5682 }
5683 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
5684 
5685 static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
5686 {
5687 	unsigned long val;
5688 
5689 	/* We should only ever be called with arch.pio.count equal to 1 */
5690 	BUG_ON(vcpu->arch.pio.count != 1);
5691 
5692 	/* For size less than 4 we merge, else we zero extend */
5693 	val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
5694 					: 0;
5695 
5696 	/*
5697 	 * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
5698 	 * the copy and tracing
5699 	 */
5700 	emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
5701 				 vcpu->arch.pio.port, &val, 1);
5702 	kvm_register_write(vcpu, VCPU_REGS_RAX, val);
5703 
5704 	return 1;
5705 }
5706 
5707 int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port)
5708 {
5709 	unsigned long val;
5710 	int ret;
5711 
5712 	/* For size less than 4 we merge, else we zero extend */
5713 	val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;
5714 
5715 	ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
5716 				       &val, 1);
5717 	if (ret) {
5718 		kvm_register_write(vcpu, VCPU_REGS_RAX, val);
5719 		return ret;
5720 	}
5721 
5722 	vcpu->arch.complete_userspace_io = complete_fast_pio_in;
5723 
5724 	return 0;
5725 }
5726 EXPORT_SYMBOL_GPL(kvm_fast_pio_in);
5727 
5728 static int kvmclock_cpu_down_prep(unsigned int cpu)
5729 {
5730 	__this_cpu_write(cpu_tsc_khz, 0);
5731 	return 0;
5732 }
5733 
5734 static void tsc_khz_changed(void *data)
5735 {
5736 	struct cpufreq_freqs *freq = data;
5737 	unsigned long khz = 0;
5738 
5739 	if (data)
5740 		khz = freq->new;
5741 	else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
5742 		khz = cpufreq_quick_get(raw_smp_processor_id());
5743 	if (!khz)
5744 		khz = tsc_khz;
5745 	__this_cpu_write(cpu_tsc_khz, khz);
5746 }
5747 
5748 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
5749 				     void *data)
5750 {
5751 	struct cpufreq_freqs *freq = data;
5752 	struct kvm *kvm;
5753 	struct kvm_vcpu *vcpu;
5754 	int i, send_ipi = 0;
5755 
5756 	/*
5757 	 * We allow guests to temporarily run on slowing clocks,
5758 	 * provided we notify them after, or to run on accelerating
5759 	 * clocks, provided we notify them before.  Thus time never
5760 	 * goes backwards.
5761 	 *
5762 	 * However, we have a problem.  We can't atomically update
5763 	 * the frequency of a given CPU from this function; it is
5764 	 * merely a notifier, which can be called from any CPU.
5765 	 * Changing the TSC frequency at arbitrary points in time
5766 	 * requires a recomputation of local variables related to
5767 	 * the TSC for each VCPU.  We must flag these local variables
5768 	 * to be updated and be sure the update takes place with the
5769 	 * new frequency before any guests proceed.
5770 	 *
5771 	 * Unfortunately, the combination of hotplug CPU and frequency
5772 	 * change creates an intractable locking scenario; the order
5773 	 * of when these callouts happen is undefined with respect to
5774 	 * CPU hotplug, and they can race with each other.  As such,
5775 	 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
5776 	 * undefined; you can actually have a CPU frequency change take
5777 	 * place in between the computation of X and the setting of the
5778 	 * variable.  To protect against this problem, all updates of
5779 	 * the per_cpu tsc_khz variable are done in an interrupt
5780 	 * protected IPI, and all callers wishing to update the value
5781 	 * must wait for a synchronous IPI to complete (which is trivial
5782 	 * if the caller is on the CPU already).  This establishes the
5783 	 * necessary total order on variable updates.
5784 	 *
5785 	 * Note that because a guest time update may take place
5786 	 * anytime after the setting of the VCPU's request bit, the
5787 	 * correct TSC value must be set before the request.  However,
5788 	 * to ensure the update actually makes it to any guest which
5789 	 * starts running in hardware virtualization between the set
5790 	 * and the acquisition of the spinlock, we must also ping the
5791 	 * CPU after setting the request bit.
5792 	 *
5793 	 */
5794 
5795 	if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
5796 		return 0;
5797 	if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
5798 		return 0;
5799 
5800 	smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5801 
5802 	spin_lock(&kvm_lock);
5803 	list_for_each_entry(kvm, &vm_list, vm_list) {
5804 		kvm_for_each_vcpu(i, vcpu, kvm) {
5805 			if (vcpu->cpu != freq->cpu)
5806 				continue;
5807 			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5808 			if (vcpu->cpu != smp_processor_id())
5809 				send_ipi = 1;
5810 		}
5811 	}
5812 	spin_unlock(&kvm_lock);
5813 
5814 	if (freq->old < freq->new && send_ipi) {
5815 		/*
5816 		 * We upscale the frequency.  Must make the guest
5817 		 * doesn't see old kvmclock values while running with
5818 		 * the new frequency, otherwise we risk the guest sees
5819 		 * time go backwards.
5820 		 *
5821 		 * In case we update the frequency for another cpu
5822 		 * (which might be in guest context) send an interrupt
5823 		 * to kick the cpu out of guest context.  Next time
5824 		 * guest context is entered kvmclock will be updated,
5825 		 * so the guest will not see stale values.
5826 		 */
5827 		smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5828 	}
5829 	return 0;
5830 }
5831 
5832 static struct notifier_block kvmclock_cpufreq_notifier_block = {
5833 	.notifier_call  = kvmclock_cpufreq_notifier
5834 };
5835 
5836 static int kvmclock_cpu_online(unsigned int cpu)
5837 {
5838 	tsc_khz_changed(NULL);
5839 	return 0;
5840 }
5841 
5842 static void kvm_timer_init(void)
5843 {
5844 	max_tsc_khz = tsc_khz;
5845 
5846 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5847 #ifdef CONFIG_CPU_FREQ
5848 		struct cpufreq_policy policy;
5849 		int cpu;
5850 
5851 		memset(&policy, 0, sizeof(policy));
5852 		cpu = get_cpu();
5853 		cpufreq_get_policy(&policy, cpu);
5854 		if (policy.cpuinfo.max_freq)
5855 			max_tsc_khz = policy.cpuinfo.max_freq;
5856 		put_cpu();
5857 #endif
5858 		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
5859 					  CPUFREQ_TRANSITION_NOTIFIER);
5860 	}
5861 	pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
5862 
5863 	cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
5864 			  kvmclock_cpu_online, kvmclock_cpu_down_prep);
5865 }
5866 
5867 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
5868 
5869 int kvm_is_in_guest(void)
5870 {
5871 	return __this_cpu_read(current_vcpu) != NULL;
5872 }
5873 
5874 static int kvm_is_user_mode(void)
5875 {
5876 	int user_mode = 3;
5877 
5878 	if (__this_cpu_read(current_vcpu))
5879 		user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
5880 
5881 	return user_mode != 0;
5882 }
5883 
5884 static unsigned long kvm_get_guest_ip(void)
5885 {
5886 	unsigned long ip = 0;
5887 
5888 	if (__this_cpu_read(current_vcpu))
5889 		ip = kvm_rip_read(__this_cpu_read(current_vcpu));
5890 
5891 	return ip;
5892 }
5893 
5894 static struct perf_guest_info_callbacks kvm_guest_cbs = {
5895 	.is_in_guest		= kvm_is_in_guest,
5896 	.is_user_mode		= kvm_is_user_mode,
5897 	.get_guest_ip		= kvm_get_guest_ip,
5898 };
5899 
5900 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
5901 {
5902 	__this_cpu_write(current_vcpu, vcpu);
5903 }
5904 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
5905 
5906 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
5907 {
5908 	__this_cpu_write(current_vcpu, NULL);
5909 }
5910 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
5911 
5912 static void kvm_set_mmio_spte_mask(void)
5913 {
5914 	u64 mask;
5915 	int maxphyaddr = boot_cpu_data.x86_phys_bits;
5916 
5917 	/*
5918 	 * Set the reserved bits and the present bit of an paging-structure
5919 	 * entry to generate page fault with PFER.RSV = 1.
5920 	 */
5921 	 /* Mask the reserved physical address bits. */
5922 	mask = rsvd_bits(maxphyaddr, 51);
5923 
5924 	/* Bit 62 is always reserved for 32bit host. */
5925 	mask |= 0x3ull << 62;
5926 
5927 	/* Set the present bit. */
5928 	mask |= 1ull;
5929 
5930 #ifdef CONFIG_X86_64
5931 	/*
5932 	 * If reserved bit is not supported, clear the present bit to disable
5933 	 * mmio page fault.
5934 	 */
5935 	if (maxphyaddr == 52)
5936 		mask &= ~1ull;
5937 #endif
5938 
5939 	kvm_mmu_set_mmio_spte_mask(mask);
5940 }
5941 
5942 #ifdef CONFIG_X86_64
5943 static void pvclock_gtod_update_fn(struct work_struct *work)
5944 {
5945 	struct kvm *kvm;
5946 
5947 	struct kvm_vcpu *vcpu;
5948 	int i;
5949 
5950 	spin_lock(&kvm_lock);
5951 	list_for_each_entry(kvm, &vm_list, vm_list)
5952 		kvm_for_each_vcpu(i, vcpu, kvm)
5953 			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
5954 	atomic_set(&kvm_guest_has_master_clock, 0);
5955 	spin_unlock(&kvm_lock);
5956 }
5957 
5958 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
5959 
5960 /*
5961  * Notification about pvclock gtod data update.
5962  */
5963 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
5964 			       void *priv)
5965 {
5966 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
5967 	struct timekeeper *tk = priv;
5968 
5969 	update_pvclock_gtod(tk);
5970 
5971 	/* disable master clock if host does not trust, or does not
5972 	 * use, TSC clocksource
5973 	 */
5974 	if (gtod->clock.vclock_mode != VCLOCK_TSC &&
5975 	    atomic_read(&kvm_guest_has_master_clock) != 0)
5976 		queue_work(system_long_wq, &pvclock_gtod_work);
5977 
5978 	return 0;
5979 }
5980 
5981 static struct notifier_block pvclock_gtod_notifier = {
5982 	.notifier_call = pvclock_gtod_notify,
5983 };
5984 #endif
5985 
5986 int kvm_arch_init(void *opaque)
5987 {
5988 	int r;
5989 	struct kvm_x86_ops *ops = opaque;
5990 
5991 	if (kvm_x86_ops) {
5992 		printk(KERN_ERR "kvm: already loaded the other module\n");
5993 		r = -EEXIST;
5994 		goto out;
5995 	}
5996 
5997 	if (!ops->cpu_has_kvm_support()) {
5998 		printk(KERN_ERR "kvm: no hardware support\n");
5999 		r = -EOPNOTSUPP;
6000 		goto out;
6001 	}
6002 	if (ops->disabled_by_bios()) {
6003 		printk(KERN_ERR "kvm: disabled by bios\n");
6004 		r = -EOPNOTSUPP;
6005 		goto out;
6006 	}
6007 
6008 	r = -ENOMEM;
6009 	shared_msrs = alloc_percpu(struct kvm_shared_msrs);
6010 	if (!shared_msrs) {
6011 		printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
6012 		goto out;
6013 	}
6014 
6015 	r = kvm_mmu_module_init();
6016 	if (r)
6017 		goto out_free_percpu;
6018 
6019 	kvm_set_mmio_spte_mask();
6020 
6021 	kvm_x86_ops = ops;
6022 
6023 	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
6024 			PT_DIRTY_MASK, PT64_NX_MASK, 0,
6025 			PT_PRESENT_MASK);
6026 	kvm_timer_init();
6027 
6028 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
6029 
6030 	if (boot_cpu_has(X86_FEATURE_XSAVE))
6031 		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
6032 
6033 	kvm_lapic_init();
6034 #ifdef CONFIG_X86_64
6035 	pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
6036 #endif
6037 
6038 	return 0;
6039 
6040 out_free_percpu:
6041 	free_percpu(shared_msrs);
6042 out:
6043 	return r;
6044 }
6045 
6046 void kvm_arch_exit(void)
6047 {
6048 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6049 
6050 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
6051 		cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
6052 					    CPUFREQ_TRANSITION_NOTIFIER);
6053 	cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
6054 #ifdef CONFIG_X86_64
6055 	pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
6056 #endif
6057 	kvm_x86_ops = NULL;
6058 	kvm_mmu_module_exit();
6059 	free_percpu(shared_msrs);
6060 }
6061 
6062 int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
6063 {
6064 	++vcpu->stat.halt_exits;
6065 	if (lapic_in_kernel(vcpu)) {
6066 		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
6067 		return 1;
6068 	} else {
6069 		vcpu->run->exit_reason = KVM_EXIT_HLT;
6070 		return 0;
6071 	}
6072 }
6073 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
6074 
6075 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
6076 {
6077 	int ret = kvm_skip_emulated_instruction(vcpu);
6078 	/*
6079 	 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
6080 	 * KVM_EXIT_DEBUG here.
6081 	 */
6082 	return kvm_vcpu_halt(vcpu) && ret;
6083 }
6084 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
6085 
6086 /*
6087  * kvm_pv_kick_cpu_op:  Kick a vcpu.
6088  *
6089  * @apicid - apicid of vcpu to be kicked.
6090  */
6091 static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
6092 {
6093 	struct kvm_lapic_irq lapic_irq;
6094 
6095 	lapic_irq.shorthand = 0;
6096 	lapic_irq.dest_mode = 0;
6097 	lapic_irq.dest_id = apicid;
6098 	lapic_irq.msi_redir_hint = false;
6099 
6100 	lapic_irq.delivery_mode = APIC_DM_REMRD;
6101 	kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
6102 }
6103 
6104 void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
6105 {
6106 	vcpu->arch.apicv_active = false;
6107 	kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
6108 }
6109 
6110 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
6111 {
6112 	unsigned long nr, a0, a1, a2, a3, ret;
6113 	int op_64_bit, r;
6114 
6115 	r = kvm_skip_emulated_instruction(vcpu);
6116 
6117 	if (kvm_hv_hypercall_enabled(vcpu->kvm))
6118 		return kvm_hv_hypercall(vcpu);
6119 
6120 	nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
6121 	a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
6122 	a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
6123 	a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
6124 	a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
6125 
6126 	trace_kvm_hypercall(nr, a0, a1, a2, a3);
6127 
6128 	op_64_bit = is_64_bit_mode(vcpu);
6129 	if (!op_64_bit) {
6130 		nr &= 0xFFFFFFFF;
6131 		a0 &= 0xFFFFFFFF;
6132 		a1 &= 0xFFFFFFFF;
6133 		a2 &= 0xFFFFFFFF;
6134 		a3 &= 0xFFFFFFFF;
6135 	}
6136 
6137 	if (kvm_x86_ops->get_cpl(vcpu) != 0) {
6138 		ret = -KVM_EPERM;
6139 		goto out;
6140 	}
6141 
6142 	switch (nr) {
6143 	case KVM_HC_VAPIC_POLL_IRQ:
6144 		ret = 0;
6145 		break;
6146 	case KVM_HC_KICK_CPU:
6147 		kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
6148 		ret = 0;
6149 		break;
6150 	default:
6151 		ret = -KVM_ENOSYS;
6152 		break;
6153 	}
6154 out:
6155 	if (!op_64_bit)
6156 		ret = (u32)ret;
6157 	kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
6158 	++vcpu->stat.hypercalls;
6159 	return r;
6160 }
6161 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
6162 
6163 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
6164 {
6165 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6166 	char instruction[3];
6167 	unsigned long rip = kvm_rip_read(vcpu);
6168 
6169 	kvm_x86_ops->patch_hypercall(vcpu, instruction);
6170 
6171 	return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
6172 }
6173 
6174 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
6175 {
6176 	return vcpu->run->request_interrupt_window &&
6177 		likely(!pic_in_kernel(vcpu->kvm));
6178 }
6179 
6180 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
6181 {
6182 	struct kvm_run *kvm_run = vcpu->run;
6183 
6184 	kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
6185 	kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
6186 	kvm_run->cr8 = kvm_get_cr8(vcpu);
6187 	kvm_run->apic_base = kvm_get_apic_base(vcpu);
6188 	kvm_run->ready_for_interrupt_injection =
6189 		pic_in_kernel(vcpu->kvm) ||
6190 		kvm_vcpu_ready_for_interrupt_injection(vcpu);
6191 }
6192 
6193 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
6194 {
6195 	int max_irr, tpr;
6196 
6197 	if (!kvm_x86_ops->update_cr8_intercept)
6198 		return;
6199 
6200 	if (!lapic_in_kernel(vcpu))
6201 		return;
6202 
6203 	if (vcpu->arch.apicv_active)
6204 		return;
6205 
6206 	if (!vcpu->arch.apic->vapic_addr)
6207 		max_irr = kvm_lapic_find_highest_irr(vcpu);
6208 	else
6209 		max_irr = -1;
6210 
6211 	if (max_irr != -1)
6212 		max_irr >>= 4;
6213 
6214 	tpr = kvm_lapic_get_cr8(vcpu);
6215 
6216 	kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
6217 }
6218 
6219 static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
6220 {
6221 	int r;
6222 
6223 	/* try to reinject previous events if any */
6224 	if (vcpu->arch.exception.pending) {
6225 		trace_kvm_inj_exception(vcpu->arch.exception.nr,
6226 					vcpu->arch.exception.has_error_code,
6227 					vcpu->arch.exception.error_code);
6228 
6229 		if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
6230 			__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
6231 					     X86_EFLAGS_RF);
6232 
6233 		if (vcpu->arch.exception.nr == DB_VECTOR &&
6234 		    (vcpu->arch.dr7 & DR7_GD)) {
6235 			vcpu->arch.dr7 &= ~DR7_GD;
6236 			kvm_update_dr7(vcpu);
6237 		}
6238 
6239 		kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
6240 					  vcpu->arch.exception.has_error_code,
6241 					  vcpu->arch.exception.error_code,
6242 					  vcpu->arch.exception.reinject);
6243 		return 0;
6244 	}
6245 
6246 	if (vcpu->arch.nmi_injected) {
6247 		kvm_x86_ops->set_nmi(vcpu);
6248 		return 0;
6249 	}
6250 
6251 	if (vcpu->arch.interrupt.pending) {
6252 		kvm_x86_ops->set_irq(vcpu);
6253 		return 0;
6254 	}
6255 
6256 	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
6257 		r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
6258 		if (r != 0)
6259 			return r;
6260 	}
6261 
6262 	/* try to inject new event if pending */
6263 	if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
6264 		vcpu->arch.smi_pending = false;
6265 		enter_smm(vcpu);
6266 	} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
6267 		--vcpu->arch.nmi_pending;
6268 		vcpu->arch.nmi_injected = true;
6269 		kvm_x86_ops->set_nmi(vcpu);
6270 	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
6271 		/*
6272 		 * Because interrupts can be injected asynchronously, we are
6273 		 * calling check_nested_events again here to avoid a race condition.
6274 		 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
6275 		 * proposal and current concerns.  Perhaps we should be setting
6276 		 * KVM_REQ_EVENT only on certain events and not unconditionally?
6277 		 */
6278 		if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
6279 			r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
6280 			if (r != 0)
6281 				return r;
6282 		}
6283 		if (kvm_x86_ops->interrupt_allowed(vcpu)) {
6284 			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
6285 					    false);
6286 			kvm_x86_ops->set_irq(vcpu);
6287 		}
6288 	}
6289 
6290 	return 0;
6291 }
6292 
6293 static void process_nmi(struct kvm_vcpu *vcpu)
6294 {
6295 	unsigned limit = 2;
6296 
6297 	/*
6298 	 * x86 is limited to one NMI running, and one NMI pending after it.
6299 	 * If an NMI is already in progress, limit further NMIs to just one.
6300 	 * Otherwise, allow two (and we'll inject the first one immediately).
6301 	 */
6302 	if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
6303 		limit = 1;
6304 
6305 	vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
6306 	vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
6307 	kvm_make_request(KVM_REQ_EVENT, vcpu);
6308 }
6309 
6310 #define put_smstate(type, buf, offset, val)			  \
6311 	*(type *)((buf) + (offset) - 0x7e00) = val
6312 
6313 static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
6314 {
6315 	u32 flags = 0;
6316 	flags |= seg->g       << 23;
6317 	flags |= seg->db      << 22;
6318 	flags |= seg->l       << 21;
6319 	flags |= seg->avl     << 20;
6320 	flags |= seg->present << 15;
6321 	flags |= seg->dpl     << 13;
6322 	flags |= seg->s       << 12;
6323 	flags |= seg->type    << 8;
6324 	return flags;
6325 }
6326 
6327 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
6328 {
6329 	struct kvm_segment seg;
6330 	int offset;
6331 
6332 	kvm_get_segment(vcpu, &seg, n);
6333 	put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
6334 
6335 	if (n < 3)
6336 		offset = 0x7f84 + n * 12;
6337 	else
6338 		offset = 0x7f2c + (n - 3) * 12;
6339 
6340 	put_smstate(u32, buf, offset + 8, seg.base);
6341 	put_smstate(u32, buf, offset + 4, seg.limit);
6342 	put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
6343 }
6344 
6345 #ifdef CONFIG_X86_64
6346 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
6347 {
6348 	struct kvm_segment seg;
6349 	int offset;
6350 	u16 flags;
6351 
6352 	kvm_get_segment(vcpu, &seg, n);
6353 	offset = 0x7e00 + n * 16;
6354 
6355 	flags = enter_smm_get_segment_flags(&seg) >> 8;
6356 	put_smstate(u16, buf, offset, seg.selector);
6357 	put_smstate(u16, buf, offset + 2, flags);
6358 	put_smstate(u32, buf, offset + 4, seg.limit);
6359 	put_smstate(u64, buf, offset + 8, seg.base);
6360 }
6361 #endif
6362 
6363 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
6364 {
6365 	struct desc_ptr dt;
6366 	struct kvm_segment seg;
6367 	unsigned long val;
6368 	int i;
6369 
6370 	put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
6371 	put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
6372 	put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
6373 	put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
6374 
6375 	for (i = 0; i < 8; i++)
6376 		put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));
6377 
6378 	kvm_get_dr(vcpu, 6, &val);
6379 	put_smstate(u32, buf, 0x7fcc, (u32)val);
6380 	kvm_get_dr(vcpu, 7, &val);
6381 	put_smstate(u32, buf, 0x7fc8, (u32)val);
6382 
6383 	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
6384 	put_smstate(u32, buf, 0x7fc4, seg.selector);
6385 	put_smstate(u32, buf, 0x7f64, seg.base);
6386 	put_smstate(u32, buf, 0x7f60, seg.limit);
6387 	put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
6388 
6389 	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
6390 	put_smstate(u32, buf, 0x7fc0, seg.selector);
6391 	put_smstate(u32, buf, 0x7f80, seg.base);
6392 	put_smstate(u32, buf, 0x7f7c, seg.limit);
6393 	put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
6394 
6395 	kvm_x86_ops->get_gdt(vcpu, &dt);
6396 	put_smstate(u32, buf, 0x7f74, dt.address);
6397 	put_smstate(u32, buf, 0x7f70, dt.size);
6398 
6399 	kvm_x86_ops->get_idt(vcpu, &dt);
6400 	put_smstate(u32, buf, 0x7f58, dt.address);
6401 	put_smstate(u32, buf, 0x7f54, dt.size);
6402 
6403 	for (i = 0; i < 6; i++)
6404 		enter_smm_save_seg_32(vcpu, buf, i);
6405 
6406 	put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
6407 
6408 	/* revision id */
6409 	put_smstate(u32, buf, 0x7efc, 0x00020000);
6410 	put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
6411 }
6412 
6413 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
6414 {
6415 #ifdef CONFIG_X86_64
6416 	struct desc_ptr dt;
6417 	struct kvm_segment seg;
6418 	unsigned long val;
6419 	int i;
6420 
6421 	for (i = 0; i < 16; i++)
6422 		put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));
6423 
6424 	put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
6425 	put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
6426 
6427 	kvm_get_dr(vcpu, 6, &val);
6428 	put_smstate(u64, buf, 0x7f68, val);
6429 	kvm_get_dr(vcpu, 7, &val);
6430 	put_smstate(u64, buf, 0x7f60, val);
6431 
6432 	put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
6433 	put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
6434 	put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
6435 
6436 	put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
6437 
6438 	/* revision id */
6439 	put_smstate(u32, buf, 0x7efc, 0x00020064);
6440 
6441 	put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
6442 
6443 	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
6444 	put_smstate(u16, buf, 0x7e90, seg.selector);
6445 	put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
6446 	put_smstate(u32, buf, 0x7e94, seg.limit);
6447 	put_smstate(u64, buf, 0x7e98, seg.base);
6448 
6449 	kvm_x86_ops->get_idt(vcpu, &dt);
6450 	put_smstate(u32, buf, 0x7e84, dt.size);
6451 	put_smstate(u64, buf, 0x7e88, dt.address);
6452 
6453 	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
6454 	put_smstate(u16, buf, 0x7e70, seg.selector);
6455 	put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
6456 	put_smstate(u32, buf, 0x7e74, seg.limit);
6457 	put_smstate(u64, buf, 0x7e78, seg.base);
6458 
6459 	kvm_x86_ops->get_gdt(vcpu, &dt);
6460 	put_smstate(u32, buf, 0x7e64, dt.size);
6461 	put_smstate(u64, buf, 0x7e68, dt.address);
6462 
6463 	for (i = 0; i < 6; i++)
6464 		enter_smm_save_seg_64(vcpu, buf, i);
6465 #else
6466 	WARN_ON_ONCE(1);
6467 #endif
6468 }
6469 
6470 static void enter_smm(struct kvm_vcpu *vcpu)
6471 {
6472 	struct kvm_segment cs, ds;
6473 	struct desc_ptr dt;
6474 	char buf[512];
6475 	u32 cr0;
6476 
6477 	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
6478 	vcpu->arch.hflags |= HF_SMM_MASK;
6479 	memset(buf, 0, 512);
6480 	if (guest_cpuid_has_longmode(vcpu))
6481 		enter_smm_save_state_64(vcpu, buf);
6482 	else
6483 		enter_smm_save_state_32(vcpu, buf);
6484 
6485 	kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
6486 
6487 	if (kvm_x86_ops->get_nmi_mask(vcpu))
6488 		vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
6489 	else
6490 		kvm_x86_ops->set_nmi_mask(vcpu, true);
6491 
6492 	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
6493 	kvm_rip_write(vcpu, 0x8000);
6494 
6495 	cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
6496 	kvm_x86_ops->set_cr0(vcpu, cr0);
6497 	vcpu->arch.cr0 = cr0;
6498 
6499 	kvm_x86_ops->set_cr4(vcpu, 0);
6500 
6501 	/* Undocumented: IDT limit is set to zero on entry to SMM.  */
6502 	dt.address = dt.size = 0;
6503 	kvm_x86_ops->set_idt(vcpu, &dt);
6504 
6505 	__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
6506 
6507 	cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
6508 	cs.base = vcpu->arch.smbase;
6509 
6510 	ds.selector = 0;
6511 	ds.base = 0;
6512 
6513 	cs.limit    = ds.limit = 0xffffffff;
6514 	cs.type     = ds.type = 0x3;
6515 	cs.dpl      = ds.dpl = 0;
6516 	cs.db       = ds.db = 0;
6517 	cs.s        = ds.s = 1;
6518 	cs.l        = ds.l = 0;
6519 	cs.g        = ds.g = 1;
6520 	cs.avl      = ds.avl = 0;
6521 	cs.present  = ds.present = 1;
6522 	cs.unusable = ds.unusable = 0;
6523 	cs.padding  = ds.padding = 0;
6524 
6525 	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
6526 	kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
6527 	kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
6528 	kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
6529 	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
6530 	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
6531 
6532 	if (guest_cpuid_has_longmode(vcpu))
6533 		kvm_x86_ops->set_efer(vcpu, 0);
6534 
6535 	kvm_update_cpuid(vcpu);
6536 	kvm_mmu_reset_context(vcpu);
6537 }
6538 
6539 static void process_smi(struct kvm_vcpu *vcpu)
6540 {
6541 	vcpu->arch.smi_pending = true;
6542 	kvm_make_request(KVM_REQ_EVENT, vcpu);
6543 }
6544 
6545 void kvm_make_scan_ioapic_request(struct kvm *kvm)
6546 {
6547 	kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
6548 }
6549 
6550 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
6551 {
6552 	u64 eoi_exit_bitmap[4];
6553 
6554 	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
6555 		return;
6556 
6557 	bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
6558 
6559 	if (irqchip_split(vcpu->kvm))
6560 		kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
6561 	else {
6562 		if (vcpu->arch.apicv_active)
6563 			kvm_x86_ops->sync_pir_to_irr(vcpu);
6564 		kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
6565 	}
6566 	bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
6567 		  vcpu_to_synic(vcpu)->vec_bitmap, 256);
6568 	kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
6569 }
6570 
6571 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
6572 {
6573 	++vcpu->stat.tlb_flush;
6574 	kvm_x86_ops->tlb_flush(vcpu);
6575 }
6576 
6577 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
6578 {
6579 	struct page *page = NULL;
6580 
6581 	if (!lapic_in_kernel(vcpu))
6582 		return;
6583 
6584 	if (!kvm_x86_ops->set_apic_access_page_addr)
6585 		return;
6586 
6587 	page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6588 	if (is_error_page(page))
6589 		return;
6590 	kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
6591 
6592 	/*
6593 	 * Do not pin apic access page in memory, the MMU notifier
6594 	 * will call us again if it is migrated or swapped out.
6595 	 */
6596 	put_page(page);
6597 }
6598 EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
6599 
6600 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
6601 					   unsigned long address)
6602 {
6603 	/*
6604 	 * The physical address of apic access page is stored in the VMCS.
6605 	 * Update it when it becomes invalid.
6606 	 */
6607 	if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
6608 		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
6609 }
6610 
6611 /*
6612  * Returns 1 to let vcpu_run() continue the guest execution loop without
6613  * exiting to the userspace.  Otherwise, the value will be returned to the
6614  * userspace.
6615  */
6616 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6617 {
6618 	int r;
6619 	bool req_int_win =
6620 		dm_request_for_irq_injection(vcpu) &&
6621 		kvm_cpu_accept_dm_intr(vcpu);
6622 
6623 	bool req_immediate_exit = false;
6624 
6625 	if (vcpu->requests) {
6626 		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
6627 			kvm_mmu_unload(vcpu);
6628 		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
6629 			__kvm_migrate_timers(vcpu);
6630 		if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
6631 			kvm_gen_update_masterclock(vcpu->kvm);
6632 		if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
6633 			kvm_gen_kvmclock_update(vcpu);
6634 		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
6635 			r = kvm_guest_time_update(vcpu);
6636 			if (unlikely(r))
6637 				goto out;
6638 		}
6639 		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
6640 			kvm_mmu_sync_roots(vcpu);
6641 		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
6642 			kvm_vcpu_flush_tlb(vcpu);
6643 		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
6644 			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
6645 			r = 0;
6646 			goto out;
6647 		}
6648 		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
6649 			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
6650 			r = 0;
6651 			goto out;
6652 		}
6653 		if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
6654 			vcpu->fpu_active = 0;
6655 			kvm_x86_ops->fpu_deactivate(vcpu);
6656 		}
6657 		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
6658 			/* Page is swapped out. Do synthetic halt */
6659 			vcpu->arch.apf.halted = true;
6660 			r = 1;
6661 			goto out;
6662 		}
6663 		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
6664 			record_steal_time(vcpu);
6665 		if (kvm_check_request(KVM_REQ_SMI, vcpu))
6666 			process_smi(vcpu);
6667 		if (kvm_check_request(KVM_REQ_NMI, vcpu))
6668 			process_nmi(vcpu);
6669 		if (kvm_check_request(KVM_REQ_PMU, vcpu))
6670 			kvm_pmu_handle_event(vcpu);
6671 		if (kvm_check_request(KVM_REQ_PMI, vcpu))
6672 			kvm_pmu_deliver_pmi(vcpu);
6673 		if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
6674 			BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
6675 			if (test_bit(vcpu->arch.pending_ioapic_eoi,
6676 				     vcpu->arch.ioapic_handled_vectors)) {
6677 				vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
6678 				vcpu->run->eoi.vector =
6679 						vcpu->arch.pending_ioapic_eoi;
6680 				r = 0;
6681 				goto out;
6682 			}
6683 		}
6684 		if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
6685 			vcpu_scan_ioapic(vcpu);
6686 		if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
6687 			kvm_vcpu_reload_apic_access_page(vcpu);
6688 		if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
6689 			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
6690 			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
6691 			r = 0;
6692 			goto out;
6693 		}
6694 		if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
6695 			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
6696 			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
6697 			r = 0;
6698 			goto out;
6699 		}
6700 		if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
6701 			vcpu->run->exit_reason = KVM_EXIT_HYPERV;
6702 			vcpu->run->hyperv = vcpu->arch.hyperv.exit;
6703 			r = 0;
6704 			goto out;
6705 		}
6706 
6707 		/*
6708 		 * KVM_REQ_HV_STIMER has to be processed after
6709 		 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
6710 		 * depend on the guest clock being up-to-date
6711 		 */
6712 		if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
6713 			kvm_hv_process_stimers(vcpu);
6714 	}
6715 
6716 	/*
6717 	 * KVM_REQ_EVENT is not set when posted interrupts are set by
6718 	 * VT-d hardware, so we have to update RVI unconditionally.
6719 	 */
6720 	if (kvm_lapic_enabled(vcpu)) {
6721 		/*
6722 		 * Update architecture specific hints for APIC
6723 		 * virtual interrupt delivery.
6724 		 */
6725 		if (vcpu->arch.apicv_active)
6726 			kvm_x86_ops->hwapic_irr_update(vcpu,
6727 				kvm_lapic_find_highest_irr(vcpu));
6728 	}
6729 
6730 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
6731 		kvm_apic_accept_events(vcpu);
6732 		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
6733 			r = 1;
6734 			goto out;
6735 		}
6736 
6737 		if (inject_pending_event(vcpu, req_int_win) != 0)
6738 			req_immediate_exit = true;
6739 		else {
6740 			/* Enable NMI/IRQ window open exits if needed.
6741 			 *
6742 			 * SMIs have two cases: 1) they can be nested, and
6743 			 * then there is nothing to do here because RSM will
6744 			 * cause a vmexit anyway; 2) or the SMI can be pending
6745 			 * because inject_pending_event has completed the
6746 			 * injection of an IRQ or NMI from the previous vmexit,
6747 			 * and then we request an immediate exit to inject the SMI.
6748 			 */
6749 			if (vcpu->arch.smi_pending && !is_smm(vcpu))
6750 				req_immediate_exit = true;
6751 			if (vcpu->arch.nmi_pending)
6752 				kvm_x86_ops->enable_nmi_window(vcpu);
6753 			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
6754 				kvm_x86_ops->enable_irq_window(vcpu);
6755 		}
6756 
6757 		if (kvm_lapic_enabled(vcpu)) {
6758 			update_cr8_intercept(vcpu);
6759 			kvm_lapic_sync_to_vapic(vcpu);
6760 		}
6761 	}
6762 
6763 	r = kvm_mmu_reload(vcpu);
6764 	if (unlikely(r)) {
6765 		goto cancel_injection;
6766 	}
6767 
6768 	preempt_disable();
6769 
6770 	kvm_x86_ops->prepare_guest_switch(vcpu);
6771 	if (vcpu->fpu_active)
6772 		kvm_load_guest_fpu(vcpu);
6773 	vcpu->mode = IN_GUEST_MODE;
6774 
6775 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
6776 
6777 	/*
6778 	 * We should set ->mode before check ->requests,
6779 	 * Please see the comment in kvm_make_all_cpus_request.
6780 	 * This also orders the write to mode from any reads
6781 	 * to the page tables done while the VCPU is running.
6782 	 * Please see the comment in kvm_flush_remote_tlbs.
6783 	 */
6784 	smp_mb__after_srcu_read_unlock();
6785 
6786 	local_irq_disable();
6787 
6788 	if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
6789 	    || need_resched() || signal_pending(current)) {
6790 		vcpu->mode = OUTSIDE_GUEST_MODE;
6791 		smp_wmb();
6792 		local_irq_enable();
6793 		preempt_enable();
6794 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6795 		r = 1;
6796 		goto cancel_injection;
6797 	}
6798 
6799 	kvm_load_guest_xcr0(vcpu);
6800 
6801 	if (req_immediate_exit) {
6802 		kvm_make_request(KVM_REQ_EVENT, vcpu);
6803 		smp_send_reschedule(vcpu->cpu);
6804 	}
6805 
6806 	trace_kvm_entry(vcpu->vcpu_id);
6807 	wait_lapic_expire(vcpu);
6808 	guest_enter_irqoff();
6809 
6810 	if (unlikely(vcpu->arch.switch_db_regs)) {
6811 		set_debugreg(0, 7);
6812 		set_debugreg(vcpu->arch.eff_db[0], 0);
6813 		set_debugreg(vcpu->arch.eff_db[1], 1);
6814 		set_debugreg(vcpu->arch.eff_db[2], 2);
6815 		set_debugreg(vcpu->arch.eff_db[3], 3);
6816 		set_debugreg(vcpu->arch.dr6, 6);
6817 		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6818 	}
6819 
6820 	kvm_x86_ops->run(vcpu);
6821 
6822 	/*
6823 	 * Do this here before restoring debug registers on the host.  And
6824 	 * since we do this before handling the vmexit, a DR access vmexit
6825 	 * can (a) read the correct value of the debug registers, (b) set
6826 	 * KVM_DEBUGREG_WONT_EXIT again.
6827 	 */
6828 	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
6829 		WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
6830 		kvm_x86_ops->sync_dirty_debug_regs(vcpu);
6831 		kvm_update_dr0123(vcpu);
6832 		kvm_update_dr6(vcpu);
6833 		kvm_update_dr7(vcpu);
6834 		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6835 	}
6836 
6837 	/*
6838 	 * If the guest has used debug registers, at least dr7
6839 	 * will be disabled while returning to the host.
6840 	 * If we don't have active breakpoints in the host, we don't
6841 	 * care about the messed up debug address registers. But if
6842 	 * we have some of them active, restore the old state.
6843 	 */
6844 	if (hw_breakpoint_active())
6845 		hw_breakpoint_restore();
6846 
6847 	vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
6848 
6849 	vcpu->mode = OUTSIDE_GUEST_MODE;
6850 	smp_wmb();
6851 
6852 	kvm_put_guest_xcr0(vcpu);
6853 
6854 	kvm_x86_ops->handle_external_intr(vcpu);
6855 
6856 	++vcpu->stat.exits;
6857 
6858 	guest_exit_irqoff();
6859 
6860 	local_irq_enable();
6861 	preempt_enable();
6862 
6863 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6864 
6865 	/*
6866 	 * Profile KVM exit RIPs:
6867 	 */
6868 	if (unlikely(prof_on == KVM_PROFILING)) {
6869 		unsigned long rip = kvm_rip_read(vcpu);
6870 		profile_hit(KVM_PROFILING, (void *)rip);
6871 	}
6872 
6873 	if (unlikely(vcpu->arch.tsc_always_catchup))
6874 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
6875 
6876 	if (vcpu->arch.apic_attention)
6877 		kvm_lapic_sync_from_vapic(vcpu);
6878 
6879 	r = kvm_x86_ops->handle_exit(vcpu);
6880 	return r;
6881 
6882 cancel_injection:
6883 	kvm_x86_ops->cancel_injection(vcpu);
6884 	if (unlikely(vcpu->arch.apic_attention))
6885 		kvm_lapic_sync_from_vapic(vcpu);
6886 out:
6887 	return r;
6888 }
6889 
6890 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
6891 {
6892 	if (!kvm_arch_vcpu_runnable(vcpu) &&
6893 	    (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) {
6894 		srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6895 		kvm_vcpu_block(vcpu);
6896 		vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6897 
6898 		if (kvm_x86_ops->post_block)
6899 			kvm_x86_ops->post_block(vcpu);
6900 
6901 		if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
6902 			return 1;
6903 	}
6904 
6905 	kvm_apic_accept_events(vcpu);
6906 	switch(vcpu->arch.mp_state) {
6907 	case KVM_MP_STATE_HALTED:
6908 		vcpu->arch.pv.pv_unhalted = false;
6909 		vcpu->arch.mp_state =
6910 			KVM_MP_STATE_RUNNABLE;
6911 	case KVM_MP_STATE_RUNNABLE:
6912 		vcpu->arch.apf.halted = false;
6913 		break;
6914 	case KVM_MP_STATE_INIT_RECEIVED:
6915 		break;
6916 	default:
6917 		return -EINTR;
6918 		break;
6919 	}
6920 	return 1;
6921 }
6922 
6923 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
6924 {
6925 	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
6926 		!vcpu->arch.apf.halted);
6927 }
6928 
6929 static int vcpu_run(struct kvm_vcpu *vcpu)
6930 {
6931 	int r;
6932 	struct kvm *kvm = vcpu->kvm;
6933 
6934 	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6935 
6936 	for (;;) {
6937 		if (kvm_vcpu_running(vcpu)) {
6938 			r = vcpu_enter_guest(vcpu);
6939 		} else {
6940 			r = vcpu_block(kvm, vcpu);
6941 		}
6942 
6943 		if (r <= 0)
6944 			break;
6945 
6946 		clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
6947 		if (kvm_cpu_has_pending_timer(vcpu))
6948 			kvm_inject_pending_timer_irqs(vcpu);
6949 
6950 		if (dm_request_for_irq_injection(vcpu) &&
6951 			kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
6952 			r = 0;
6953 			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
6954 			++vcpu->stat.request_irq_exits;
6955 			break;
6956 		}
6957 
6958 		kvm_check_async_pf_completion(vcpu);
6959 
6960 		if (signal_pending(current)) {
6961 			r = -EINTR;
6962 			vcpu->run->exit_reason = KVM_EXIT_INTR;
6963 			++vcpu->stat.signal_exits;
6964 			break;
6965 		}
6966 		if (need_resched()) {
6967 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6968 			cond_resched();
6969 			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6970 		}
6971 	}
6972 
6973 	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6974 
6975 	return r;
6976 }
6977 
6978 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
6979 {
6980 	int r;
6981 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6982 	r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
6983 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
6984 	if (r != EMULATE_DONE)
6985 		return 0;
6986 	return 1;
6987 }
6988 
6989 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
6990 {
6991 	BUG_ON(!vcpu->arch.pio.count);
6992 
6993 	return complete_emulated_io(vcpu);
6994 }
6995 
6996 /*
6997  * Implements the following, as a state machine:
6998  *
6999  * read:
7000  *   for each fragment
7001  *     for each mmio piece in the fragment
7002  *       write gpa, len
7003  *       exit
7004  *       copy data
7005  *   execute insn
7006  *
7007  * write:
7008  *   for each fragment
7009  *     for each mmio piece in the fragment
7010  *       write gpa, len
7011  *       copy data
7012  *       exit
7013  */
7014 static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
7015 {
7016 	struct kvm_run *run = vcpu->run;
7017 	struct kvm_mmio_fragment *frag;
7018 	unsigned len;
7019 
7020 	BUG_ON(!vcpu->mmio_needed);
7021 
7022 	/* Complete previous fragment */
7023 	frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
7024 	len = min(8u, frag->len);
7025 	if (!vcpu->mmio_is_write)
7026 		memcpy(frag->data, run->mmio.data, len);
7027 
7028 	if (frag->len <= 8) {
7029 		/* Switch to the next fragment. */
7030 		frag++;
7031 		vcpu->mmio_cur_fragment++;
7032 	} else {
7033 		/* Go forward to the next mmio piece. */
7034 		frag->data += len;
7035 		frag->gpa += len;
7036 		frag->len -= len;
7037 	}
7038 
7039 	if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
7040 		vcpu->mmio_needed = 0;
7041 
7042 		/* FIXME: return into emulator if single-stepping.  */
7043 		if (vcpu->mmio_is_write)
7044 			return 1;
7045 		vcpu->mmio_read_completed = 1;
7046 		return complete_emulated_io(vcpu);
7047 	}
7048 
7049 	run->exit_reason = KVM_EXIT_MMIO;
7050 	run->mmio.phys_addr = frag->gpa;
7051 	if (vcpu->mmio_is_write)
7052 		memcpy(run->mmio.data, frag->data, min(8u, frag->len));
7053 	run->mmio.len = min(8u, frag->len);
7054 	run->mmio.is_write = vcpu->mmio_is_write;
7055 	vcpu->arch.complete_userspace_io = complete_emulated_mmio;
7056 	return 0;
7057 }
7058 
7059 
7060 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
7061 {
7062 	struct fpu *fpu = &current->thread.fpu;
7063 	int r;
7064 	sigset_t sigsaved;
7065 
7066 	fpu__activate_curr(fpu);
7067 
7068 	if (vcpu->sigset_active)
7069 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
7070 
7071 	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
7072 		kvm_vcpu_block(vcpu);
7073 		kvm_apic_accept_events(vcpu);
7074 		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
7075 		r = -EAGAIN;
7076 		goto out;
7077 	}
7078 
7079 	/* re-sync apic's tpr */
7080 	if (!lapic_in_kernel(vcpu)) {
7081 		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
7082 			r = -EINVAL;
7083 			goto out;
7084 		}
7085 	}
7086 
7087 	if (unlikely(vcpu->arch.complete_userspace_io)) {
7088 		int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
7089 		vcpu->arch.complete_userspace_io = NULL;
7090 		r = cui(vcpu);
7091 		if (r <= 0)
7092 			goto out;
7093 	} else
7094 		WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
7095 
7096 	r = vcpu_run(vcpu);
7097 
7098 out:
7099 	post_kvm_run_save(vcpu);
7100 	if (vcpu->sigset_active)
7101 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
7102 
7103 	return r;
7104 }
7105 
7106 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
7107 {
7108 	if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
7109 		/*
7110 		 * We are here if userspace calls get_regs() in the middle of
7111 		 * instruction emulation. Registers state needs to be copied
7112 		 * back from emulation context to vcpu. Userspace shouldn't do
7113 		 * that usually, but some bad designed PV devices (vmware
7114 		 * backdoor interface) need this to work
7115 		 */
7116 		emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
7117 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
7118 	}
7119 	regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
7120 	regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
7121 	regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
7122 	regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
7123 	regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
7124 	regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
7125 	regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
7126 	regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
7127 #ifdef CONFIG_X86_64
7128 	regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
7129 	regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
7130 	regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
7131 	regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
7132 	regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
7133 	regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
7134 	regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
7135 	regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
7136 #endif
7137 
7138 	regs->rip = kvm_rip_read(vcpu);
7139 	regs->rflags = kvm_get_rflags(vcpu);
7140 
7141 	return 0;
7142 }
7143 
7144 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
7145 {
7146 	vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
7147 	vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
7148 
7149 	kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
7150 	kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
7151 	kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
7152 	kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
7153 	kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
7154 	kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
7155 	kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
7156 	kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
7157 #ifdef CONFIG_X86_64
7158 	kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
7159 	kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
7160 	kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
7161 	kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
7162 	kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
7163 	kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
7164 	kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
7165 	kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
7166 #endif
7167 
7168 	kvm_rip_write(vcpu, regs->rip);
7169 	kvm_set_rflags(vcpu, regs->rflags);
7170 
7171 	vcpu->arch.exception.pending = false;
7172 
7173 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7174 
7175 	return 0;
7176 }
7177 
7178 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
7179 {
7180 	struct kvm_segment cs;
7181 
7182 	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
7183 	*db = cs.db;
7184 	*l = cs.l;
7185 }
7186 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
7187 
7188 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
7189 				  struct kvm_sregs *sregs)
7190 {
7191 	struct desc_ptr dt;
7192 
7193 	kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
7194 	kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
7195 	kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
7196 	kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
7197 	kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
7198 	kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
7199 
7200 	kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
7201 	kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
7202 
7203 	kvm_x86_ops->get_idt(vcpu, &dt);
7204 	sregs->idt.limit = dt.size;
7205 	sregs->idt.base = dt.address;
7206 	kvm_x86_ops->get_gdt(vcpu, &dt);
7207 	sregs->gdt.limit = dt.size;
7208 	sregs->gdt.base = dt.address;
7209 
7210 	sregs->cr0 = kvm_read_cr0(vcpu);
7211 	sregs->cr2 = vcpu->arch.cr2;
7212 	sregs->cr3 = kvm_read_cr3(vcpu);
7213 	sregs->cr4 = kvm_read_cr4(vcpu);
7214 	sregs->cr8 = kvm_get_cr8(vcpu);
7215 	sregs->efer = vcpu->arch.efer;
7216 	sregs->apic_base = kvm_get_apic_base(vcpu);
7217 
7218 	memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
7219 
7220 	if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
7221 		set_bit(vcpu->arch.interrupt.nr,
7222 			(unsigned long *)sregs->interrupt_bitmap);
7223 
7224 	return 0;
7225 }
7226 
7227 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
7228 				    struct kvm_mp_state *mp_state)
7229 {
7230 	kvm_apic_accept_events(vcpu);
7231 	if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
7232 					vcpu->arch.pv.pv_unhalted)
7233 		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
7234 	else
7235 		mp_state->mp_state = vcpu->arch.mp_state;
7236 
7237 	return 0;
7238 }
7239 
7240 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
7241 				    struct kvm_mp_state *mp_state)
7242 {
7243 	if (!lapic_in_kernel(vcpu) &&
7244 	    mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
7245 		return -EINVAL;
7246 
7247 	if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
7248 		vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
7249 		set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
7250 	} else
7251 		vcpu->arch.mp_state = mp_state->mp_state;
7252 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7253 	return 0;
7254 }
7255 
7256 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
7257 		    int reason, bool has_error_code, u32 error_code)
7258 {
7259 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
7260 	int ret;
7261 
7262 	init_emulate_ctxt(vcpu);
7263 
7264 	ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
7265 				   has_error_code, error_code);
7266 
7267 	if (ret)
7268 		return EMULATE_FAIL;
7269 
7270 	kvm_rip_write(vcpu, ctxt->eip);
7271 	kvm_set_rflags(vcpu, ctxt->eflags);
7272 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7273 	return EMULATE_DONE;
7274 }
7275 EXPORT_SYMBOL_GPL(kvm_task_switch);
7276 
7277 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
7278 				  struct kvm_sregs *sregs)
7279 {
7280 	struct msr_data apic_base_msr;
7281 	int mmu_reset_needed = 0;
7282 	int pending_vec, max_bits, idx;
7283 	struct desc_ptr dt;
7284 
7285 	if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
7286 		return -EINVAL;
7287 
7288 	dt.size = sregs->idt.limit;
7289 	dt.address = sregs->idt.base;
7290 	kvm_x86_ops->set_idt(vcpu, &dt);
7291 	dt.size = sregs->gdt.limit;
7292 	dt.address = sregs->gdt.base;
7293 	kvm_x86_ops->set_gdt(vcpu, &dt);
7294 
7295 	vcpu->arch.cr2 = sregs->cr2;
7296 	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
7297 	vcpu->arch.cr3 = sregs->cr3;
7298 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
7299 
7300 	kvm_set_cr8(vcpu, sregs->cr8);
7301 
7302 	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
7303 	kvm_x86_ops->set_efer(vcpu, sregs->efer);
7304 	apic_base_msr.data = sregs->apic_base;
7305 	apic_base_msr.host_initiated = true;
7306 	kvm_set_apic_base(vcpu, &apic_base_msr);
7307 
7308 	mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
7309 	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
7310 	vcpu->arch.cr0 = sregs->cr0;
7311 
7312 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
7313 	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
7314 	if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
7315 		kvm_update_cpuid(vcpu);
7316 
7317 	idx = srcu_read_lock(&vcpu->kvm->srcu);
7318 	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
7319 		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
7320 		mmu_reset_needed = 1;
7321 	}
7322 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
7323 
7324 	if (mmu_reset_needed)
7325 		kvm_mmu_reset_context(vcpu);
7326 
7327 	max_bits = KVM_NR_INTERRUPTS;
7328 	pending_vec = find_first_bit(
7329 		(const unsigned long *)sregs->interrupt_bitmap, max_bits);
7330 	if (pending_vec < max_bits) {
7331 		kvm_queue_interrupt(vcpu, pending_vec, false);
7332 		pr_debug("Set back pending irq %d\n", pending_vec);
7333 	}
7334 
7335 	kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
7336 	kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
7337 	kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
7338 	kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
7339 	kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
7340 	kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
7341 
7342 	kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
7343 	kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
7344 
7345 	update_cr8_intercept(vcpu);
7346 
7347 	/* Older userspace won't unhalt the vcpu on reset. */
7348 	if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
7349 	    sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
7350 	    !is_protmode(vcpu))
7351 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
7352 
7353 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7354 
7355 	return 0;
7356 }
7357 
7358 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
7359 					struct kvm_guest_debug *dbg)
7360 {
7361 	unsigned long rflags;
7362 	int i, r;
7363 
7364 	if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
7365 		r = -EBUSY;
7366 		if (vcpu->arch.exception.pending)
7367 			goto out;
7368 		if (dbg->control & KVM_GUESTDBG_INJECT_DB)
7369 			kvm_queue_exception(vcpu, DB_VECTOR);
7370 		else
7371 			kvm_queue_exception(vcpu, BP_VECTOR);
7372 	}
7373 
7374 	/*
7375 	 * Read rflags as long as potentially injected trace flags are still
7376 	 * filtered out.
7377 	 */
7378 	rflags = kvm_get_rflags(vcpu);
7379 
7380 	vcpu->guest_debug = dbg->control;
7381 	if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
7382 		vcpu->guest_debug = 0;
7383 
7384 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
7385 		for (i = 0; i < KVM_NR_DB_REGS; ++i)
7386 			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
7387 		vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
7388 	} else {
7389 		for (i = 0; i < KVM_NR_DB_REGS; i++)
7390 			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
7391 	}
7392 	kvm_update_dr7(vcpu);
7393 
7394 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7395 		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
7396 			get_segment_base(vcpu, VCPU_SREG_CS);
7397 
7398 	/*
7399 	 * Trigger an rflags update that will inject or remove the trace
7400 	 * flags.
7401 	 */
7402 	kvm_set_rflags(vcpu, rflags);
7403 
7404 	kvm_x86_ops->update_bp_intercept(vcpu);
7405 
7406 	r = 0;
7407 
7408 out:
7409 
7410 	return r;
7411 }
7412 
7413 /*
7414  * Translate a guest virtual address to a guest physical address.
7415  */
7416 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
7417 				    struct kvm_translation *tr)
7418 {
7419 	unsigned long vaddr = tr->linear_address;
7420 	gpa_t gpa;
7421 	int idx;
7422 
7423 	idx = srcu_read_lock(&vcpu->kvm->srcu);
7424 	gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
7425 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
7426 	tr->physical_address = gpa;
7427 	tr->valid = gpa != UNMAPPED_GVA;
7428 	tr->writeable = 1;
7429 	tr->usermode = 0;
7430 
7431 	return 0;
7432 }
7433 
7434 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
7435 {
7436 	struct fxregs_state *fxsave =
7437 			&vcpu->arch.guest_fpu.state.fxsave;
7438 
7439 	memcpy(fpu->fpr, fxsave->st_space, 128);
7440 	fpu->fcw = fxsave->cwd;
7441 	fpu->fsw = fxsave->swd;
7442 	fpu->ftwx = fxsave->twd;
7443 	fpu->last_opcode = fxsave->fop;
7444 	fpu->last_ip = fxsave->rip;
7445 	fpu->last_dp = fxsave->rdp;
7446 	memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
7447 
7448 	return 0;
7449 }
7450 
7451 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
7452 {
7453 	struct fxregs_state *fxsave =
7454 			&vcpu->arch.guest_fpu.state.fxsave;
7455 
7456 	memcpy(fxsave->st_space, fpu->fpr, 128);
7457 	fxsave->cwd = fpu->fcw;
7458 	fxsave->swd = fpu->fsw;
7459 	fxsave->twd = fpu->ftwx;
7460 	fxsave->fop = fpu->last_opcode;
7461 	fxsave->rip = fpu->last_ip;
7462 	fxsave->rdp = fpu->last_dp;
7463 	memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
7464 
7465 	return 0;
7466 }
7467 
7468 static void fx_init(struct kvm_vcpu *vcpu)
7469 {
7470 	fpstate_init(&vcpu->arch.guest_fpu.state);
7471 	if (boot_cpu_has(X86_FEATURE_XSAVES))
7472 		vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
7473 			host_xcr0 | XSTATE_COMPACTION_ENABLED;
7474 
7475 	/*
7476 	 * Ensure guest xcr0 is valid for loading
7477 	 */
7478 	vcpu->arch.xcr0 = XFEATURE_MASK_FP;
7479 
7480 	vcpu->arch.cr0 |= X86_CR0_ET;
7481 }
7482 
7483 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
7484 {
7485 	if (vcpu->guest_fpu_loaded)
7486 		return;
7487 
7488 	/*
7489 	 * Restore all possible states in the guest,
7490 	 * and assume host would use all available bits.
7491 	 * Guest xcr0 would be loaded later.
7492 	 */
7493 	vcpu->guest_fpu_loaded = 1;
7494 	__kernel_fpu_begin();
7495 	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
7496 	trace_kvm_fpu(1);
7497 }
7498 
7499 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7500 {
7501 	if (!vcpu->guest_fpu_loaded)
7502 		return;
7503 
7504 	vcpu->guest_fpu_loaded = 0;
7505 	copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
7506 	__kernel_fpu_end();
7507 	++vcpu->stat.fpu_reload;
7508 	trace_kvm_fpu(0);
7509 }
7510 
7511 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
7512 {
7513 	void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
7514 
7515 	kvmclock_reset(vcpu);
7516 
7517 	kvm_x86_ops->vcpu_free(vcpu);
7518 	free_cpumask_var(wbinvd_dirty_mask);
7519 }
7520 
7521 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
7522 						unsigned int id)
7523 {
7524 	struct kvm_vcpu *vcpu;
7525 
7526 	if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
7527 		printk_once(KERN_WARNING
7528 		"kvm: SMP vm created on host with unstable TSC; "
7529 		"guest TSC will not be reliable\n");
7530 
7531 	vcpu = kvm_x86_ops->vcpu_create(kvm, id);
7532 
7533 	return vcpu;
7534 }
7535 
7536 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
7537 {
7538 	int r;
7539 
7540 	kvm_vcpu_mtrr_init(vcpu);
7541 	r = vcpu_load(vcpu);
7542 	if (r)
7543 		return r;
7544 	kvm_vcpu_reset(vcpu, false);
7545 	kvm_mmu_setup(vcpu);
7546 	vcpu_put(vcpu);
7547 	return r;
7548 }
7549 
7550 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
7551 {
7552 	struct msr_data msr;
7553 	struct kvm *kvm = vcpu->kvm;
7554 
7555 	if (vcpu_load(vcpu))
7556 		return;
7557 	msr.data = 0x0;
7558 	msr.index = MSR_IA32_TSC;
7559 	msr.host_initiated = true;
7560 	kvm_write_tsc(vcpu, &msr);
7561 	vcpu_put(vcpu);
7562 
7563 	if (!kvmclock_periodic_sync)
7564 		return;
7565 
7566 	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
7567 					KVMCLOCK_SYNC_PERIOD);
7568 }
7569 
7570 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
7571 {
7572 	int r;
7573 	vcpu->arch.apf.msr_val = 0;
7574 
7575 	r = vcpu_load(vcpu);
7576 	BUG_ON(r);
7577 	kvm_mmu_unload(vcpu);
7578 	vcpu_put(vcpu);
7579 
7580 	kvm_x86_ops->vcpu_free(vcpu);
7581 }
7582 
7583 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
7584 {
7585 	vcpu->arch.hflags = 0;
7586 
7587 	vcpu->arch.smi_pending = 0;
7588 	atomic_set(&vcpu->arch.nmi_queued, 0);
7589 	vcpu->arch.nmi_pending = 0;
7590 	vcpu->arch.nmi_injected = false;
7591 	kvm_clear_interrupt_queue(vcpu);
7592 	kvm_clear_exception_queue(vcpu);
7593 
7594 	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
7595 	kvm_update_dr0123(vcpu);
7596 	vcpu->arch.dr6 = DR6_INIT;
7597 	kvm_update_dr6(vcpu);
7598 	vcpu->arch.dr7 = DR7_FIXED_1;
7599 	kvm_update_dr7(vcpu);
7600 
7601 	vcpu->arch.cr2 = 0;
7602 
7603 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7604 	vcpu->arch.apf.msr_val = 0;
7605 	vcpu->arch.st.msr_val = 0;
7606 
7607 	kvmclock_reset(vcpu);
7608 
7609 	kvm_clear_async_pf_completion_queue(vcpu);
7610 	kvm_async_pf_hash_reset(vcpu);
7611 	vcpu->arch.apf.halted = false;
7612 
7613 	if (!init_event) {
7614 		kvm_pmu_reset(vcpu);
7615 		vcpu->arch.smbase = 0x30000;
7616 	}
7617 
7618 	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
7619 	vcpu->arch.regs_avail = ~0;
7620 	vcpu->arch.regs_dirty = ~0;
7621 
7622 	kvm_x86_ops->vcpu_reset(vcpu, init_event);
7623 }
7624 
7625 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
7626 {
7627 	struct kvm_segment cs;
7628 
7629 	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
7630 	cs.selector = vector << 8;
7631 	cs.base = vector << 12;
7632 	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
7633 	kvm_rip_write(vcpu, 0);
7634 }
7635 
7636 int kvm_arch_hardware_enable(void)
7637 {
7638 	struct kvm *kvm;
7639 	struct kvm_vcpu *vcpu;
7640 	int i;
7641 	int ret;
7642 	u64 local_tsc;
7643 	u64 max_tsc = 0;
7644 	bool stable, backwards_tsc = false;
7645 
7646 	kvm_shared_msr_cpu_online();
7647 	ret = kvm_x86_ops->hardware_enable();
7648 	if (ret != 0)
7649 		return ret;
7650 
7651 	local_tsc = rdtsc();
7652 	stable = !check_tsc_unstable();
7653 	list_for_each_entry(kvm, &vm_list, vm_list) {
7654 		kvm_for_each_vcpu(i, vcpu, kvm) {
7655 			if (!stable && vcpu->cpu == smp_processor_id())
7656 				kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
7657 			if (stable && vcpu->arch.last_host_tsc > local_tsc) {
7658 				backwards_tsc = true;
7659 				if (vcpu->arch.last_host_tsc > max_tsc)
7660 					max_tsc = vcpu->arch.last_host_tsc;
7661 			}
7662 		}
7663 	}
7664 
7665 	/*
7666 	 * Sometimes, even reliable TSCs go backwards.  This happens on
7667 	 * platforms that reset TSC during suspend or hibernate actions, but
7668 	 * maintain synchronization.  We must compensate.  Fortunately, we can
7669 	 * detect that condition here, which happens early in CPU bringup,
7670 	 * before any KVM threads can be running.  Unfortunately, we can't
7671 	 * bring the TSCs fully up to date with real time, as we aren't yet far
7672 	 * enough into CPU bringup that we know how much real time has actually
7673 	 * elapsed; our helper function, ktime_get_boot_ns() will be using boot
7674 	 * variables that haven't been updated yet.
7675 	 *
7676 	 * So we simply find the maximum observed TSC above, then record the
7677 	 * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
7678 	 * the adjustment will be applied.  Note that we accumulate
7679 	 * adjustments, in case multiple suspend cycles happen before some VCPU
7680 	 * gets a chance to run again.  In the event that no KVM threads get a
7681 	 * chance to run, we will miss the entire elapsed period, as we'll have
7682 	 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
7683 	 * loose cycle time.  This isn't too big a deal, since the loss will be
7684 	 * uniform across all VCPUs (not to mention the scenario is extremely
7685 	 * unlikely). It is possible that a second hibernate recovery happens
7686 	 * much faster than a first, causing the observed TSC here to be
7687 	 * smaller; this would require additional padding adjustment, which is
7688 	 * why we set last_host_tsc to the local tsc observed here.
7689 	 *
7690 	 * N.B. - this code below runs only on platforms with reliable TSC,
7691 	 * as that is the only way backwards_tsc is set above.  Also note
7692 	 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
7693 	 * have the same delta_cyc adjustment applied if backwards_tsc
7694 	 * is detected.  Note further, this adjustment is only done once,
7695 	 * as we reset last_host_tsc on all VCPUs to stop this from being
7696 	 * called multiple times (one for each physical CPU bringup).
7697 	 *
7698 	 * Platforms with unreliable TSCs don't have to deal with this, they
7699 	 * will be compensated by the logic in vcpu_load, which sets the TSC to
7700 	 * catchup mode.  This will catchup all VCPUs to real time, but cannot
7701 	 * guarantee that they stay in perfect synchronization.
7702 	 */
7703 	if (backwards_tsc) {
7704 		u64 delta_cyc = max_tsc - local_tsc;
7705 		backwards_tsc_observed = true;
7706 		list_for_each_entry(kvm, &vm_list, vm_list) {
7707 			kvm_for_each_vcpu(i, vcpu, kvm) {
7708 				vcpu->arch.tsc_offset_adjustment += delta_cyc;
7709 				vcpu->arch.last_host_tsc = local_tsc;
7710 				kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
7711 			}
7712 
7713 			/*
7714 			 * We have to disable TSC offset matching.. if you were
7715 			 * booting a VM while issuing an S4 host suspend....
7716 			 * you may have some problem.  Solving this issue is
7717 			 * left as an exercise to the reader.
7718 			 */
7719 			kvm->arch.last_tsc_nsec = 0;
7720 			kvm->arch.last_tsc_write = 0;
7721 		}
7722 
7723 	}
7724 	return 0;
7725 }
7726 
7727 void kvm_arch_hardware_disable(void)
7728 {
7729 	kvm_x86_ops->hardware_disable();
7730 	drop_user_return_notifiers();
7731 }
7732 
7733 int kvm_arch_hardware_setup(void)
7734 {
7735 	int r;
7736 
7737 	r = kvm_x86_ops->hardware_setup();
7738 	if (r != 0)
7739 		return r;
7740 
7741 	if (kvm_has_tsc_control) {
7742 		/*
7743 		 * Make sure the user can only configure tsc_khz values that
7744 		 * fit into a signed integer.
7745 		 * A min value is not calculated needed because it will always
7746 		 * be 1 on all machines.
7747 		 */
7748 		u64 max = min(0x7fffffffULL,
7749 			      __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
7750 		kvm_max_guest_tsc_khz = max;
7751 
7752 		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
7753 	}
7754 
7755 	kvm_init_msr_list();
7756 	return 0;
7757 }
7758 
7759 void kvm_arch_hardware_unsetup(void)
7760 {
7761 	kvm_x86_ops->hardware_unsetup();
7762 }
7763 
7764 void kvm_arch_check_processor_compat(void *rtn)
7765 {
7766 	kvm_x86_ops->check_processor_compatibility(rtn);
7767 }
7768 
7769 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
7770 {
7771 	return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
7772 }
7773 EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);
7774 
7775 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
7776 {
7777 	return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
7778 }
7779 
7780 struct static_key kvm_no_apic_vcpu __read_mostly;
7781 EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
7782 
7783 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
7784 {
7785 	struct page *page;
7786 	struct kvm *kvm;
7787 	int r;
7788 
7789 	BUG_ON(vcpu->kvm == NULL);
7790 	kvm = vcpu->kvm;
7791 
7792 	vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv();
7793 	vcpu->arch.pv.pv_unhalted = false;
7794 	vcpu->arch.emulate_ctxt.ops = &emulate_ops;
7795 	if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu))
7796 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
7797 	else
7798 		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
7799 
7800 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
7801 	if (!page) {
7802 		r = -ENOMEM;
7803 		goto fail;
7804 	}
7805 	vcpu->arch.pio_data = page_address(page);
7806 
7807 	kvm_set_tsc_khz(vcpu, max_tsc_khz);
7808 
7809 	r = kvm_mmu_create(vcpu);
7810 	if (r < 0)
7811 		goto fail_free_pio_data;
7812 
7813 	if (irqchip_in_kernel(kvm)) {
7814 		r = kvm_create_lapic(vcpu);
7815 		if (r < 0)
7816 			goto fail_mmu_destroy;
7817 	} else
7818 		static_key_slow_inc(&kvm_no_apic_vcpu);
7819 
7820 	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
7821 				       GFP_KERNEL);
7822 	if (!vcpu->arch.mce_banks) {
7823 		r = -ENOMEM;
7824 		goto fail_free_lapic;
7825 	}
7826 	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
7827 
7828 	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) {
7829 		r = -ENOMEM;
7830 		goto fail_free_mce_banks;
7831 	}
7832 
7833 	fx_init(vcpu);
7834 
7835 	vcpu->arch.ia32_tsc_adjust_msr = 0x0;
7836 	vcpu->arch.pv_time_enabled = false;
7837 
7838 	vcpu->arch.guest_supported_xcr0 = 0;
7839 	vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
7840 
7841 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
7842 
7843 	vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
7844 
7845 	kvm_async_pf_hash_reset(vcpu);
7846 	kvm_pmu_init(vcpu);
7847 
7848 	vcpu->arch.pending_external_vector = -1;
7849 
7850 	kvm_hv_vcpu_init(vcpu);
7851 
7852 	return 0;
7853 
7854 fail_free_mce_banks:
7855 	kfree(vcpu->arch.mce_banks);
7856 fail_free_lapic:
7857 	kvm_free_lapic(vcpu);
7858 fail_mmu_destroy:
7859 	kvm_mmu_destroy(vcpu);
7860 fail_free_pio_data:
7861 	free_page((unsigned long)vcpu->arch.pio_data);
7862 fail:
7863 	return r;
7864 }
7865 
7866 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
7867 {
7868 	int idx;
7869 
7870 	kvm_hv_vcpu_uninit(vcpu);
7871 	kvm_pmu_destroy(vcpu);
7872 	kfree(vcpu->arch.mce_banks);
7873 	kvm_free_lapic(vcpu);
7874 	idx = srcu_read_lock(&vcpu->kvm->srcu);
7875 	kvm_mmu_destroy(vcpu);
7876 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
7877 	free_page((unsigned long)vcpu->arch.pio_data);
7878 	if (!lapic_in_kernel(vcpu))
7879 		static_key_slow_dec(&kvm_no_apic_vcpu);
7880 }
7881 
7882 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
7883 {
7884 	kvm_x86_ops->sched_in(vcpu, cpu);
7885 }
7886 
7887 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
7888 {
7889 	if (type)
7890 		return -EINVAL;
7891 
7892 	INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
7893 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
7894 	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
7895 	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
7896 	atomic_set(&kvm->arch.noncoherent_dma_count, 0);
7897 
7898 	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
7899 	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
7900 	/* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
7901 	set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
7902 		&kvm->arch.irq_sources_bitmap);
7903 
7904 	raw_spin_lock_init(&kvm->arch.tsc_write_lock);
7905 	mutex_init(&kvm->arch.apic_map_lock);
7906 	mutex_init(&kvm->arch.hyperv.hv_lock);
7907 	spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
7908 
7909 	kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
7910 	pvclock_update_vm_gtod_copy(kvm);
7911 
7912 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
7913 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
7914 
7915 	kvm_page_track_init(kvm);
7916 	kvm_mmu_init_vm(kvm);
7917 
7918 	if (kvm_x86_ops->vm_init)
7919 		return kvm_x86_ops->vm_init(kvm);
7920 
7921 	return 0;
7922 }
7923 
7924 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
7925 {
7926 	int r;
7927 	r = vcpu_load(vcpu);
7928 	BUG_ON(r);
7929 	kvm_mmu_unload(vcpu);
7930 	vcpu_put(vcpu);
7931 }
7932 
7933 static void kvm_free_vcpus(struct kvm *kvm)
7934 {
7935 	unsigned int i;
7936 	struct kvm_vcpu *vcpu;
7937 
7938 	/*
7939 	 * Unpin any mmu pages first.
7940 	 */
7941 	kvm_for_each_vcpu(i, vcpu, kvm) {
7942 		kvm_clear_async_pf_completion_queue(vcpu);
7943 		kvm_unload_vcpu_mmu(vcpu);
7944 	}
7945 	kvm_for_each_vcpu(i, vcpu, kvm)
7946 		kvm_arch_vcpu_free(vcpu);
7947 
7948 	mutex_lock(&kvm->lock);
7949 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
7950 		kvm->vcpus[i] = NULL;
7951 
7952 	atomic_set(&kvm->online_vcpus, 0);
7953 	mutex_unlock(&kvm->lock);
7954 }
7955 
7956 void kvm_arch_sync_events(struct kvm *kvm)
7957 {
7958 	cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
7959 	cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
7960 	kvm_free_all_assigned_devices(kvm);
7961 	kvm_free_pit(kvm);
7962 }
7963 
7964 int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7965 {
7966 	int i, r;
7967 	unsigned long hva;
7968 	struct kvm_memslots *slots = kvm_memslots(kvm);
7969 	struct kvm_memory_slot *slot, old;
7970 
7971 	/* Called with kvm->slots_lock held.  */
7972 	if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
7973 		return -EINVAL;
7974 
7975 	slot = id_to_memslot(slots, id);
7976 	if (size) {
7977 		if (slot->npages)
7978 			return -EEXIST;
7979 
7980 		/*
7981 		 * MAP_SHARED to prevent internal slot pages from being moved
7982 		 * by fork()/COW.
7983 		 */
7984 		hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
7985 			      MAP_SHARED | MAP_ANONYMOUS, 0);
7986 		if (IS_ERR((void *)hva))
7987 			return PTR_ERR((void *)hva);
7988 	} else {
7989 		if (!slot->npages)
7990 			return 0;
7991 
7992 		hva = 0;
7993 	}
7994 
7995 	old = *slot;
7996 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
7997 		struct kvm_userspace_memory_region m;
7998 
7999 		m.slot = id | (i << 16);
8000 		m.flags = 0;
8001 		m.guest_phys_addr = gpa;
8002 		m.userspace_addr = hva;
8003 		m.memory_size = size;
8004 		r = __kvm_set_memory_region(kvm, &m);
8005 		if (r < 0)
8006 			return r;
8007 	}
8008 
8009 	if (!size) {
8010 		r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
8011 		WARN_ON(r < 0);
8012 	}
8013 
8014 	return 0;
8015 }
8016 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
8017 
8018 int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
8019 {
8020 	int r;
8021 
8022 	mutex_lock(&kvm->slots_lock);
8023 	r = __x86_set_memory_region(kvm, id, gpa, size);
8024 	mutex_unlock(&kvm->slots_lock);
8025 
8026 	return r;
8027 }
8028 EXPORT_SYMBOL_GPL(x86_set_memory_region);
8029 
8030 void kvm_arch_destroy_vm(struct kvm *kvm)
8031 {
8032 	if (current->mm == kvm->mm) {
8033 		/*
8034 		 * Free memory regions allocated on behalf of userspace,
8035 		 * unless the the memory map has changed due to process exit
8036 		 * or fd copying.
8037 		 */
8038 		x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
8039 		x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
8040 		x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
8041 	}
8042 	if (kvm_x86_ops->vm_destroy)
8043 		kvm_x86_ops->vm_destroy(kvm);
8044 	kvm_iommu_unmap_guest(kvm);
8045 	kfree(kvm->arch.vpic);
8046 	kfree(kvm->arch.vioapic);
8047 	kvm_free_vcpus(kvm);
8048 	kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
8049 	kvm_mmu_uninit_vm(kvm);
8050 }
8051 
8052 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
8053 			   struct kvm_memory_slot *dont)
8054 {
8055 	int i;
8056 
8057 	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
8058 		if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
8059 			kvfree(free->arch.rmap[i]);
8060 			free->arch.rmap[i] = NULL;
8061 		}
8062 		if (i == 0)
8063 			continue;
8064 
8065 		if (!dont || free->arch.lpage_info[i - 1] !=
8066 			     dont->arch.lpage_info[i - 1]) {
8067 			kvfree(free->arch.lpage_info[i - 1]);
8068 			free->arch.lpage_info[i - 1] = NULL;
8069 		}
8070 	}
8071 
8072 	kvm_page_track_free_memslot(free, dont);
8073 }
8074 
8075 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
8076 			    unsigned long npages)
8077 {
8078 	int i;
8079 
8080 	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
8081 		struct kvm_lpage_info *linfo;
8082 		unsigned long ugfn;
8083 		int lpages;
8084 		int level = i + 1;
8085 
8086 		lpages = gfn_to_index(slot->base_gfn + npages - 1,
8087 				      slot->base_gfn, level) + 1;
8088 
8089 		slot->arch.rmap[i] =
8090 			kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
8091 		if (!slot->arch.rmap[i])
8092 			goto out_free;
8093 		if (i == 0)
8094 			continue;
8095 
8096 		linfo = kvm_kvzalloc(lpages * sizeof(*linfo));
8097 		if (!linfo)
8098 			goto out_free;
8099 
8100 		slot->arch.lpage_info[i - 1] = linfo;
8101 
8102 		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
8103 			linfo[0].disallow_lpage = 1;
8104 		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
8105 			linfo[lpages - 1].disallow_lpage = 1;
8106 		ugfn = slot->userspace_addr >> PAGE_SHIFT;
8107 		/*
8108 		 * If the gfn and userspace address are not aligned wrt each
8109 		 * other, or if explicitly asked to, disable large page
8110 		 * support for this slot
8111 		 */
8112 		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
8113 		    !kvm_largepages_enabled()) {
8114 			unsigned long j;
8115 
8116 			for (j = 0; j < lpages; ++j)
8117 				linfo[j].disallow_lpage = 1;
8118 		}
8119 	}
8120 
8121 	if (kvm_page_track_create_memslot(slot, npages))
8122 		goto out_free;
8123 
8124 	return 0;
8125 
8126 out_free:
8127 	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
8128 		kvfree(slot->arch.rmap[i]);
8129 		slot->arch.rmap[i] = NULL;
8130 		if (i == 0)
8131 			continue;
8132 
8133 		kvfree(slot->arch.lpage_info[i - 1]);
8134 		slot->arch.lpage_info[i - 1] = NULL;
8135 	}
8136 	return -ENOMEM;
8137 }
8138 
8139 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
8140 {
8141 	/*
8142 	 * memslots->generation has been incremented.
8143 	 * mmio generation may have reached its maximum value.
8144 	 */
8145 	kvm_mmu_invalidate_mmio_sptes(kvm, slots);
8146 }
8147 
8148 int kvm_arch_prepare_memory_region(struct kvm *kvm,
8149 				struct kvm_memory_slot *memslot,
8150 				const struct kvm_userspace_memory_region *mem,
8151 				enum kvm_mr_change change)
8152 {
8153 	return 0;
8154 }
8155 
8156 static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
8157 				     struct kvm_memory_slot *new)
8158 {
8159 	/* Still write protect RO slot */
8160 	if (new->flags & KVM_MEM_READONLY) {
8161 		kvm_mmu_slot_remove_write_access(kvm, new);
8162 		return;
8163 	}
8164 
8165 	/*
8166 	 * Call kvm_x86_ops dirty logging hooks when they are valid.
8167 	 *
8168 	 * kvm_x86_ops->slot_disable_log_dirty is called when:
8169 	 *
8170 	 *  - KVM_MR_CREATE with dirty logging is disabled
8171 	 *  - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
8172 	 *
8173 	 * The reason is, in case of PML, we need to set D-bit for any slots
8174 	 * with dirty logging disabled in order to eliminate unnecessary GPA
8175 	 * logging in PML buffer (and potential PML buffer full VMEXT). This
8176 	 * guarantees leaving PML enabled during guest's lifetime won't have
8177 	 * any additonal overhead from PML when guest is running with dirty
8178 	 * logging disabled for memory slots.
8179 	 *
8180 	 * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
8181 	 * to dirty logging mode.
8182 	 *
8183 	 * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
8184 	 *
8185 	 * In case of write protect:
8186 	 *
8187 	 * Write protect all pages for dirty logging.
8188 	 *
8189 	 * All the sptes including the large sptes which point to this
8190 	 * slot are set to readonly. We can not create any new large
8191 	 * spte on this slot until the end of the logging.
8192 	 *
8193 	 * See the comments in fast_page_fault().
8194 	 */
8195 	if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
8196 		if (kvm_x86_ops->slot_enable_log_dirty)
8197 			kvm_x86_ops->slot_enable_log_dirty(kvm, new);
8198 		else
8199 			kvm_mmu_slot_remove_write_access(kvm, new);
8200 	} else {
8201 		if (kvm_x86_ops->slot_disable_log_dirty)
8202 			kvm_x86_ops->slot_disable_log_dirty(kvm, new);
8203 	}
8204 }
8205 
8206 void kvm_arch_commit_memory_region(struct kvm *kvm,
8207 				const struct kvm_userspace_memory_region *mem,
8208 				const struct kvm_memory_slot *old,
8209 				const struct kvm_memory_slot *new,
8210 				enum kvm_mr_change change)
8211 {
8212 	int nr_mmu_pages = 0;
8213 
8214 	if (!kvm->arch.n_requested_mmu_pages)
8215 		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
8216 
8217 	if (nr_mmu_pages)
8218 		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
8219 
8220 	/*
8221 	 * Dirty logging tracks sptes in 4k granularity, meaning that large
8222 	 * sptes have to be split.  If live migration is successful, the guest
8223 	 * in the source machine will be destroyed and large sptes will be
8224 	 * created in the destination. However, if the guest continues to run
8225 	 * in the source machine (for example if live migration fails), small
8226 	 * sptes will remain around and cause bad performance.
8227 	 *
8228 	 * Scan sptes if dirty logging has been stopped, dropping those
8229 	 * which can be collapsed into a single large-page spte.  Later
8230 	 * page faults will create the large-page sptes.
8231 	 */
8232 	if ((change != KVM_MR_DELETE) &&
8233 		(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
8234 		!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
8235 		kvm_mmu_zap_collapsible_sptes(kvm, new);
8236 
8237 	/*
8238 	 * Set up write protection and/or dirty logging for the new slot.
8239 	 *
8240 	 * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
8241 	 * been zapped so no dirty logging staff is needed for old slot. For
8242 	 * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
8243 	 * new and it's also covered when dealing with the new slot.
8244 	 *
8245 	 * FIXME: const-ify all uses of struct kvm_memory_slot.
8246 	 */
8247 	if (change != KVM_MR_DELETE)
8248 		kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
8249 }
8250 
8251 void kvm_arch_flush_shadow_all(struct kvm *kvm)
8252 {
8253 	kvm_mmu_invalidate_zap_all_pages(kvm);
8254 }
8255 
8256 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
8257 				   struct kvm_memory_slot *slot)
8258 {
8259 	kvm_page_track_flush_slot(kvm, slot);
8260 }
8261 
8262 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
8263 {
8264 	if (!list_empty_careful(&vcpu->async_pf.done))
8265 		return true;
8266 
8267 	if (kvm_apic_has_events(vcpu))
8268 		return true;
8269 
8270 	if (vcpu->arch.pv.pv_unhalted)
8271 		return true;
8272 
8273 	if (atomic_read(&vcpu->arch.nmi_queued))
8274 		return true;
8275 
8276 	if (test_bit(KVM_REQ_SMI, &vcpu->requests))
8277 		return true;
8278 
8279 	if (kvm_arch_interrupt_allowed(vcpu) &&
8280 	    kvm_cpu_has_interrupt(vcpu))
8281 		return true;
8282 
8283 	if (kvm_hv_has_stimer_pending(vcpu))
8284 		return true;
8285 
8286 	return false;
8287 }
8288 
8289 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
8290 {
8291 	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
8292 		kvm_x86_ops->check_nested_events(vcpu, false);
8293 
8294 	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
8295 }
8296 
8297 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
8298 {
8299 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
8300 }
8301 
8302 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
8303 {
8304 	return kvm_x86_ops->interrupt_allowed(vcpu);
8305 }
8306 
8307 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
8308 {
8309 	if (is_64_bit_mode(vcpu))
8310 		return kvm_rip_read(vcpu);
8311 	return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
8312 		     kvm_rip_read(vcpu));
8313 }
8314 EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
8315 
8316 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
8317 {
8318 	return kvm_get_linear_rip(vcpu) == linear_rip;
8319 }
8320 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
8321 
8322 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
8323 {
8324 	unsigned long rflags;
8325 
8326 	rflags = kvm_x86_ops->get_rflags(vcpu);
8327 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
8328 		rflags &= ~X86_EFLAGS_TF;
8329 	return rflags;
8330 }
8331 EXPORT_SYMBOL_GPL(kvm_get_rflags);
8332 
8333 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
8334 {
8335 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
8336 	    kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
8337 		rflags |= X86_EFLAGS_TF;
8338 	kvm_x86_ops->set_rflags(vcpu, rflags);
8339 }
8340 
8341 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
8342 {
8343 	__kvm_set_rflags(vcpu, rflags);
8344 	kvm_make_request(KVM_REQ_EVENT, vcpu);
8345 }
8346 EXPORT_SYMBOL_GPL(kvm_set_rflags);
8347 
8348 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
8349 {
8350 	int r;
8351 
8352 	if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
8353 	      work->wakeup_all)
8354 		return;
8355 
8356 	r = kvm_mmu_reload(vcpu);
8357 	if (unlikely(r))
8358 		return;
8359 
8360 	if (!vcpu->arch.mmu.direct_map &&
8361 	      work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
8362 		return;
8363 
8364 	vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
8365 }
8366 
8367 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
8368 {
8369 	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
8370 }
8371 
8372 static inline u32 kvm_async_pf_next_probe(u32 key)
8373 {
8374 	return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
8375 }
8376 
8377 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8378 {
8379 	u32 key = kvm_async_pf_hash_fn(gfn);
8380 
8381 	while (vcpu->arch.apf.gfns[key] != ~0)
8382 		key = kvm_async_pf_next_probe(key);
8383 
8384 	vcpu->arch.apf.gfns[key] = gfn;
8385 }
8386 
8387 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
8388 {
8389 	int i;
8390 	u32 key = kvm_async_pf_hash_fn(gfn);
8391 
8392 	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
8393 		     (vcpu->arch.apf.gfns[key] != gfn &&
8394 		      vcpu->arch.apf.gfns[key] != ~0); i++)
8395 		key = kvm_async_pf_next_probe(key);
8396 
8397 	return key;
8398 }
8399 
8400 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8401 {
8402 	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
8403 }
8404 
8405 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8406 {
8407 	u32 i, j, k;
8408 
8409 	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
8410 	while (true) {
8411 		vcpu->arch.apf.gfns[i] = ~0;
8412 		do {
8413 			j = kvm_async_pf_next_probe(j);
8414 			if (vcpu->arch.apf.gfns[j] == ~0)
8415 				return;
8416 			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
8417 			/*
8418 			 * k lies cyclically in ]i,j]
8419 			 * |    i.k.j |
8420 			 * |....j i.k.| or  |.k..j i...|
8421 			 */
8422 		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
8423 		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
8424 		i = j;
8425 	}
8426 }
8427 
8428 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
8429 {
8430 
8431 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
8432 				      sizeof(val));
8433 }
8434 
8435 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
8436 				     struct kvm_async_pf *work)
8437 {
8438 	struct x86_exception fault;
8439 
8440 	trace_kvm_async_pf_not_present(work->arch.token, work->gva);
8441 	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
8442 
8443 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
8444 	    (vcpu->arch.apf.send_user_only &&
8445 	     kvm_x86_ops->get_cpl(vcpu) == 0))
8446 		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
8447 	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
8448 		fault.vector = PF_VECTOR;
8449 		fault.error_code_valid = true;
8450 		fault.error_code = 0;
8451 		fault.nested_page_fault = false;
8452 		fault.address = work->arch.token;
8453 		kvm_inject_page_fault(vcpu, &fault);
8454 	}
8455 }
8456 
8457 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
8458 				 struct kvm_async_pf *work)
8459 {
8460 	struct x86_exception fault;
8461 
8462 	trace_kvm_async_pf_ready(work->arch.token, work->gva);
8463 	if (work->wakeup_all)
8464 		work->arch.token = ~0; /* broadcast wakeup */
8465 	else
8466 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
8467 
8468 	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
8469 	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
8470 		fault.vector = PF_VECTOR;
8471 		fault.error_code_valid = true;
8472 		fault.error_code = 0;
8473 		fault.nested_page_fault = false;
8474 		fault.address = work->arch.token;
8475 		kvm_inject_page_fault(vcpu, &fault);
8476 	}
8477 	vcpu->arch.apf.halted = false;
8478 	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
8479 }
8480 
8481 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
8482 {
8483 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
8484 		return true;
8485 	else
8486 		return !kvm_event_needs_reinjection(vcpu) &&
8487 			kvm_x86_ops->interrupt_allowed(vcpu);
8488 }
8489 
8490 void kvm_arch_start_assignment(struct kvm *kvm)
8491 {
8492 	atomic_inc(&kvm->arch.assigned_device_count);
8493 }
8494 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
8495 
8496 void kvm_arch_end_assignment(struct kvm *kvm)
8497 {
8498 	atomic_dec(&kvm->arch.assigned_device_count);
8499 }
8500 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
8501 
8502 bool kvm_arch_has_assigned_device(struct kvm *kvm)
8503 {
8504 	return atomic_read(&kvm->arch.assigned_device_count);
8505 }
8506 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
8507 
8508 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
8509 {
8510 	atomic_inc(&kvm->arch.noncoherent_dma_count);
8511 }
8512 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
8513 
8514 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
8515 {
8516 	atomic_dec(&kvm->arch.noncoherent_dma_count);
8517 }
8518 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
8519 
8520 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
8521 {
8522 	return atomic_read(&kvm->arch.noncoherent_dma_count);
8523 }
8524 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
8525 
8526 bool kvm_arch_has_irq_bypass(void)
8527 {
8528 	return kvm_x86_ops->update_pi_irte != NULL;
8529 }
8530 
8531 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
8532 				      struct irq_bypass_producer *prod)
8533 {
8534 	struct kvm_kernel_irqfd *irqfd =
8535 		container_of(cons, struct kvm_kernel_irqfd, consumer);
8536 
8537 	irqfd->producer = prod;
8538 
8539 	return kvm_x86_ops->update_pi_irte(irqfd->kvm,
8540 					   prod->irq, irqfd->gsi, 1);
8541 }
8542 
8543 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
8544 				      struct irq_bypass_producer *prod)
8545 {
8546 	int ret;
8547 	struct kvm_kernel_irqfd *irqfd =
8548 		container_of(cons, struct kvm_kernel_irqfd, consumer);
8549 
8550 	WARN_ON(irqfd->producer != prod);
8551 	irqfd->producer = NULL;
8552 
8553 	/*
8554 	 * When producer of consumer is unregistered, we change back to
8555 	 * remapped mode, so we can re-use the current implementation
8556 	 * when the irq is masked/disabled or the consumer side (KVM
8557 	 * int this case doesn't want to receive the interrupts.
8558 	*/
8559 	ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
8560 	if (ret)
8561 		printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
8562 		       " fails: %d\n", irqfd->consumer.token, ret);
8563 }
8564 
8565 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
8566 				   uint32_t guest_irq, bool set)
8567 {
8568 	if (!kvm_x86_ops->update_pi_irte)
8569 		return -EINVAL;
8570 
8571 	return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
8572 }
8573 
8574 bool kvm_vector_hashing_enabled(void)
8575 {
8576 	return vector_hashing;
8577 }
8578 EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);
8579 
8580 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
8581 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
8582 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
8583 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
8584 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
8585 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
8586 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
8587 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
8588 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
8589 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
8590 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
8591 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
8592 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
8593 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
8594 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
8595 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
8596 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
8597 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
8598 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
8599