xref: /linux/arch/x86/kvm/svm/nested.c (revision 6607aa6f6b68fc9b5955755f1b1be125cf2a9d03)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14 
15 #define pr_fmt(fmt) "SVM: " fmt
16 
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20 
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
23 
24 #include "kvm_emulate.h"
25 #include "trace.h"
26 #include "mmu.h"
27 #include "x86.h"
28 #include "cpuid.h"
29 #include "lapic.h"
30 #include "svm.h"
31 
32 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
33 				       struct x86_exception *fault)
34 {
35 	struct vcpu_svm *svm = to_svm(vcpu);
36 
37 	if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
38 		/*
39 		 * TODO: track the cause of the nested page fault, and
40 		 * correctly fill in the high bits of exit_info_1.
41 		 */
42 		svm->vmcb->control.exit_code = SVM_EXIT_NPF;
43 		svm->vmcb->control.exit_code_hi = 0;
44 		svm->vmcb->control.exit_info_1 = (1ULL << 32);
45 		svm->vmcb->control.exit_info_2 = fault->address;
46 	}
47 
48 	svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
49 	svm->vmcb->control.exit_info_1 |= fault->error_code;
50 
51 	nested_svm_vmexit(svm);
52 }
53 
54 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
55 {
56 	struct vcpu_svm *svm = to_svm(vcpu);
57 	u64 cr3 = svm->nested.ctl.nested_cr3;
58 	u64 pdpte;
59 	int ret;
60 
61 	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
62 				       offset_in_page(cr3) + index * 8, 8);
63 	if (ret)
64 		return 0;
65 	return pdpte;
66 }
67 
68 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
69 {
70 	struct vcpu_svm *svm = to_svm(vcpu);
71 
72 	return svm->nested.ctl.nested_cr3;
73 }
74 
75 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
76 {
77 	struct vcpu_svm *svm = to_svm(vcpu);
78 	struct vmcb *hsave = svm->nested.hsave;
79 
80 	WARN_ON(mmu_is_nested(vcpu));
81 
82 	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
83 	kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer,
84 				svm->nested.ctl.nested_cr3);
85 	vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
86 	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
87 	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
88 	reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
89 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
90 }
91 
92 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
93 {
94 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
95 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
96 }
97 
98 void recalc_intercepts(struct vcpu_svm *svm)
99 {
100 	struct vmcb_control_area *c, *h, *g;
101 	unsigned int i;
102 
103 	vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
104 
105 	if (!is_guest_mode(&svm->vcpu))
106 		return;
107 
108 	c = &svm->vmcb->control;
109 	h = &svm->nested.hsave->control;
110 	g = &svm->nested.ctl;
111 
112 	for (i = 0; i < MAX_INTERCEPT; i++)
113 		c->intercepts[i] = h->intercepts[i];
114 
115 	if (g->int_ctl & V_INTR_MASKING_MASK) {
116 		/* We only want the cr8 intercept bits of L1 */
117 		vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
118 		vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
119 
120 		/*
121 		 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
122 		 * affect any interrupt we may want to inject; therefore,
123 		 * interrupt window vmexits are irrelevant to L0.
124 		 */
125 		vmcb_clr_intercept(c, INTERCEPT_VINTR);
126 	}
127 
128 	/* We don't want to see VMMCALLs from a nested guest */
129 	vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
130 
131 	for (i = 0; i < MAX_INTERCEPT; i++)
132 		c->intercepts[i] |= g->intercepts[i];
133 }
134 
135 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
136 				   struct vmcb_control_area *from)
137 {
138 	unsigned int i;
139 
140 	for (i = 0; i < MAX_INTERCEPT; i++)
141 		dst->intercepts[i] = from->intercepts[i];
142 
143 	dst->iopm_base_pa         = from->iopm_base_pa;
144 	dst->msrpm_base_pa        = from->msrpm_base_pa;
145 	dst->tsc_offset           = from->tsc_offset;
146 	/* asid not copied, it is handled manually for svm->vmcb.  */
147 	dst->tlb_ctl              = from->tlb_ctl;
148 	dst->int_ctl              = from->int_ctl;
149 	dst->int_vector           = from->int_vector;
150 	dst->int_state            = from->int_state;
151 	dst->exit_code            = from->exit_code;
152 	dst->exit_code_hi         = from->exit_code_hi;
153 	dst->exit_info_1          = from->exit_info_1;
154 	dst->exit_info_2          = from->exit_info_2;
155 	dst->exit_int_info        = from->exit_int_info;
156 	dst->exit_int_info_err    = from->exit_int_info_err;
157 	dst->nested_ctl           = from->nested_ctl;
158 	dst->event_inj            = from->event_inj;
159 	dst->event_inj_err        = from->event_inj_err;
160 	dst->nested_cr3           = from->nested_cr3;
161 	dst->virt_ext              = from->virt_ext;
162 	dst->pause_filter_count   = from->pause_filter_count;
163 	dst->pause_filter_thresh  = from->pause_filter_thresh;
164 }
165 
166 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
167 {
168 	/*
169 	 * This function merges the msr permission bitmaps of kvm and the
170 	 * nested vmcb. It is optimized in that it only merges the parts where
171 	 * the kvm msr permission bitmap may contain zero bits
172 	 */
173 	int i;
174 
175 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
176 		return true;
177 
178 	for (i = 0; i < MSRPM_OFFSETS; i++) {
179 		u32 value, p;
180 		u64 offset;
181 
182 		if (msrpm_offsets[i] == 0xffffffff)
183 			break;
184 
185 		p      = msrpm_offsets[i];
186 		offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
187 
188 		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
189 			return false;
190 
191 		svm->nested.msrpm[p] = svm->msrpm[p] | value;
192 	}
193 
194 	svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
195 
196 	return true;
197 }
198 
199 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
200 {
201 	struct vcpu_svm *svm = to_svm(vcpu);
202 
203 	if (!nested_svm_vmrun_msrpm(svm)) {
204 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
205 		vcpu->run->internal.suberror =
206 			KVM_INTERNAL_ERROR_EMULATION;
207 		vcpu->run->internal.ndata = 0;
208 		return false;
209 	}
210 
211 	return true;
212 }
213 
214 static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
215 {
216 	if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
217 		return false;
218 
219 	if (control->asid == 0)
220 		return false;
221 
222 	if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
223 	    !npt_enabled)
224 		return false;
225 
226 	return true;
227 }
228 
229 static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
230 {
231 	bool vmcb12_lma;
232 
233 	if ((vmcb12->save.efer & EFER_SVME) == 0)
234 		return false;
235 
236 	if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW))
237 		return false;
238 
239 	if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7))
240 		return false;
241 
242 	vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
243 
244 	if (!vmcb12_lma) {
245 		if (vmcb12->save.cr4 & X86_CR4_PAE) {
246 			if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
247 				return false;
248 		} else {
249 			if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
250 				return false;
251 		}
252 	} else {
253 		if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
254 		    !(vmcb12->save.cr0 & X86_CR0_PE) ||
255 		    (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
256 			return false;
257 	}
258 	if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
259 		return false;
260 
261 	return nested_vmcb_check_controls(&vmcb12->control);
262 }
263 
264 static void load_nested_vmcb_control(struct vcpu_svm *svm,
265 				     struct vmcb_control_area *control)
266 {
267 	copy_vmcb_control_area(&svm->nested.ctl, control);
268 
269 	/* Copy it here because nested_svm_check_controls will check it.  */
270 	svm->nested.ctl.asid           = control->asid;
271 	svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
272 	svm->nested.ctl.iopm_base_pa  &= ~0x0fffULL;
273 }
274 
275 /*
276  * Synchronize fields that are written by the processor, so that
277  * they can be copied back into the nested_vmcb.
278  */
279 void sync_nested_vmcb_control(struct vcpu_svm *svm)
280 {
281 	u32 mask;
282 	svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
283 	svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;
284 
285 	/* Only a few fields of int_ctl are written by the processor.  */
286 	mask = V_IRQ_MASK | V_TPR_MASK;
287 	if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
288 	    svm_is_intercept(svm, INTERCEPT_VINTR)) {
289 		/*
290 		 * In order to request an interrupt window, L0 is usurping
291 		 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
292 		 * even if it was clear in L1's VMCB.  Restoring it would be
293 		 * wrong.  However, in this case V_IRQ will remain true until
294 		 * interrupt_window_interception calls svm_clear_vintr and
295 		 * restores int_ctl.  We can just leave it aside.
296 		 */
297 		mask &= ~V_IRQ_MASK;
298 	}
299 	svm->nested.ctl.int_ctl        &= ~mask;
300 	svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
301 }
302 
303 /*
304  * Transfer any event that L0 or L1 wanted to inject into L2 to
305  * EXIT_INT_INFO.
306  */
307 static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
308 					   struct vmcb *vmcb12)
309 {
310 	struct kvm_vcpu *vcpu = &svm->vcpu;
311 	u32 exit_int_info = 0;
312 	unsigned int nr;
313 
314 	if (vcpu->arch.exception.injected) {
315 		nr = vcpu->arch.exception.nr;
316 		exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
317 
318 		if (vcpu->arch.exception.has_error_code) {
319 			exit_int_info |= SVM_EVTINJ_VALID_ERR;
320 			vmcb12->control.exit_int_info_err =
321 				vcpu->arch.exception.error_code;
322 		}
323 
324 	} else if (vcpu->arch.nmi_injected) {
325 		exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
326 
327 	} else if (vcpu->arch.interrupt.injected) {
328 		nr = vcpu->arch.interrupt.nr;
329 		exit_int_info = nr | SVM_EVTINJ_VALID;
330 
331 		if (vcpu->arch.interrupt.soft)
332 			exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
333 		else
334 			exit_int_info |= SVM_EVTINJ_TYPE_INTR;
335 	}
336 
337 	vmcb12->control.exit_int_info = exit_int_info;
338 }
339 
340 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
341 {
342 	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
343 }
344 
345 /*
346  * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
347  * if we are emulating VM-Entry into a guest with NPT enabled.
348  */
349 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
350 			       bool nested_npt)
351 {
352 	if (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))
353 		return -EINVAL;
354 
355 	if (!nested_npt && is_pae_paging(vcpu) &&
356 	    (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
357 		if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
358 			return -EINVAL;
359 	}
360 
361 	/*
362 	 * TODO: optimize unconditional TLB flush/MMU sync here and in
363 	 * kvm_init_shadow_npt_mmu().
364 	 */
365 	if (!nested_npt)
366 		kvm_mmu_new_pgd(vcpu, cr3, false, false);
367 
368 	vcpu->arch.cr3 = cr3;
369 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
370 
371 	kvm_init_mmu(vcpu, false);
372 
373 	return 0;
374 }
375 
376 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
377 {
378 	/* Load the nested guest state */
379 	svm->vmcb->save.es = vmcb12->save.es;
380 	svm->vmcb->save.cs = vmcb12->save.cs;
381 	svm->vmcb->save.ss = vmcb12->save.ss;
382 	svm->vmcb->save.ds = vmcb12->save.ds;
383 	svm->vmcb->save.gdtr = vmcb12->save.gdtr;
384 	svm->vmcb->save.idtr = vmcb12->save.idtr;
385 	kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
386 	svm_set_efer(&svm->vcpu, vmcb12->save.efer);
387 	svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
388 	svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
389 	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
390 	kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
391 	kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
392 	kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
393 
394 	/* In case we don't even reach vcpu_run, the fields are not updated */
395 	svm->vmcb->save.rax = vmcb12->save.rax;
396 	svm->vmcb->save.rsp = vmcb12->save.rsp;
397 	svm->vmcb->save.rip = vmcb12->save.rip;
398 	svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
399 	svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_FIXED_1 | DR6_RTM;
400 	svm->vmcb->save.cpl = vmcb12->save.cpl;
401 }
402 
403 static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
404 {
405 	const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
406 
407 	if (nested_npt_enabled(svm))
408 		nested_svm_init_mmu_context(&svm->vcpu);
409 
410 	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
411 		svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
412 
413 	svm->vmcb->control.int_ctl             =
414 		(svm->nested.ctl.int_ctl & ~mask) |
415 		(svm->nested.hsave->control.int_ctl & mask);
416 
417 	svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
418 	svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
419 	svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
420 	svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
421 	svm->vmcb->control.event_inj_err       = svm->nested.ctl.event_inj_err;
422 
423 	svm->vmcb->control.pause_filter_count  = svm->nested.ctl.pause_filter_count;
424 	svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
425 
426 	/* Enter Guest-Mode */
427 	enter_guest_mode(&svm->vcpu);
428 
429 	/*
430 	 * Merge guest and host intercepts - must be called  with vcpu in
431 	 * guest-mode to take affect here
432 	 */
433 	recalc_intercepts(svm);
434 
435 	vmcb_mark_all_dirty(svm->vmcb);
436 }
437 
438 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
439 			 struct vmcb *vmcb12)
440 {
441 	int ret;
442 
443 	svm->nested.vmcb12_gpa = vmcb12_gpa;
444 	load_nested_vmcb_control(svm, &vmcb12->control);
445 	nested_prepare_vmcb_save(svm, vmcb12);
446 	nested_prepare_vmcb_control(svm);
447 
448 	ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
449 				  nested_npt_enabled(svm));
450 	if (ret)
451 		return ret;
452 
453 	svm_set_gif(svm, true);
454 
455 	return 0;
456 }
457 
458 int nested_svm_vmrun(struct vcpu_svm *svm)
459 {
460 	int ret;
461 	struct vmcb *vmcb12;
462 	struct vmcb *hsave = svm->nested.hsave;
463 	struct vmcb *vmcb = svm->vmcb;
464 	struct kvm_host_map map;
465 	u64 vmcb12_gpa;
466 
467 	if (is_smm(&svm->vcpu)) {
468 		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
469 		return 1;
470 	}
471 
472 	vmcb12_gpa = svm->vmcb->save.rax;
473 	ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
474 	if (ret == -EINVAL) {
475 		kvm_inject_gp(&svm->vcpu, 0);
476 		return 1;
477 	} else if (ret) {
478 		return kvm_skip_emulated_instruction(&svm->vcpu);
479 	}
480 
481 	ret = kvm_skip_emulated_instruction(&svm->vcpu);
482 
483 	vmcb12 = map.hva;
484 
485 	if (WARN_ON_ONCE(!svm->nested.initialized))
486 		return -EINVAL;
487 
488 	if (!nested_vmcb_checks(svm, vmcb12)) {
489 		vmcb12->control.exit_code    = SVM_EXIT_ERR;
490 		vmcb12->control.exit_code_hi = 0;
491 		vmcb12->control.exit_info_1  = 0;
492 		vmcb12->control.exit_info_2  = 0;
493 		goto out;
494 	}
495 
496 	trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
497 			       vmcb12->save.rip,
498 			       vmcb12->control.int_ctl,
499 			       vmcb12->control.event_inj,
500 			       vmcb12->control.nested_ctl);
501 
502 	trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
503 				    vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
504 				    vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
505 				    vmcb12->control.intercepts[INTERCEPT_WORD3],
506 				    vmcb12->control.intercepts[INTERCEPT_WORD4],
507 				    vmcb12->control.intercepts[INTERCEPT_WORD5]);
508 
509 	/* Clear internal status */
510 	kvm_clear_exception_queue(&svm->vcpu);
511 	kvm_clear_interrupt_queue(&svm->vcpu);
512 
513 	/*
514 	 * Save the old vmcb, so we don't need to pick what we save, but can
515 	 * restore everything when a VMEXIT occurs
516 	 */
517 	hsave->save.es     = vmcb->save.es;
518 	hsave->save.cs     = vmcb->save.cs;
519 	hsave->save.ss     = vmcb->save.ss;
520 	hsave->save.ds     = vmcb->save.ds;
521 	hsave->save.gdtr   = vmcb->save.gdtr;
522 	hsave->save.idtr   = vmcb->save.idtr;
523 	hsave->save.efer   = svm->vcpu.arch.efer;
524 	hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
525 	hsave->save.cr4    = svm->vcpu.arch.cr4;
526 	hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
527 	hsave->save.rip    = kvm_rip_read(&svm->vcpu);
528 	hsave->save.rsp    = vmcb->save.rsp;
529 	hsave->save.rax    = vmcb->save.rax;
530 	if (npt_enabled)
531 		hsave->save.cr3    = vmcb->save.cr3;
532 	else
533 		hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
534 
535 	copy_vmcb_control_area(&hsave->control, &vmcb->control);
536 
537 	svm->nested.nested_run_pending = 1;
538 
539 	if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12))
540 		goto out_exit_err;
541 
542 	if (nested_svm_vmrun_msrpm(svm))
543 		goto out;
544 
545 out_exit_err:
546 	svm->nested.nested_run_pending = 0;
547 
548 	svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
549 	svm->vmcb->control.exit_code_hi = 0;
550 	svm->vmcb->control.exit_info_1  = 0;
551 	svm->vmcb->control.exit_info_2  = 0;
552 
553 	nested_svm_vmexit(svm);
554 
555 out:
556 	kvm_vcpu_unmap(&svm->vcpu, &map, true);
557 
558 	return ret;
559 }
560 
561 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
562 {
563 	to_vmcb->save.fs = from_vmcb->save.fs;
564 	to_vmcb->save.gs = from_vmcb->save.gs;
565 	to_vmcb->save.tr = from_vmcb->save.tr;
566 	to_vmcb->save.ldtr = from_vmcb->save.ldtr;
567 	to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
568 	to_vmcb->save.star = from_vmcb->save.star;
569 	to_vmcb->save.lstar = from_vmcb->save.lstar;
570 	to_vmcb->save.cstar = from_vmcb->save.cstar;
571 	to_vmcb->save.sfmask = from_vmcb->save.sfmask;
572 	to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
573 	to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
574 	to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
575 }
576 
577 int nested_svm_vmexit(struct vcpu_svm *svm)
578 {
579 	int rc;
580 	struct vmcb *vmcb12;
581 	struct vmcb *hsave = svm->nested.hsave;
582 	struct vmcb *vmcb = svm->vmcb;
583 	struct kvm_host_map map;
584 
585 	rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
586 	if (rc) {
587 		if (rc == -EINVAL)
588 			kvm_inject_gp(&svm->vcpu, 0);
589 		return 1;
590 	}
591 
592 	vmcb12 = map.hva;
593 
594 	/* Exit Guest-Mode */
595 	leave_guest_mode(&svm->vcpu);
596 	svm->nested.vmcb12_gpa = 0;
597 	WARN_ON_ONCE(svm->nested.nested_run_pending);
598 
599 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
600 
601 	/* in case we halted in L2 */
602 	svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
603 
604 	/* Give the current vmcb to the guest */
605 
606 	vmcb12->save.es     = vmcb->save.es;
607 	vmcb12->save.cs     = vmcb->save.cs;
608 	vmcb12->save.ss     = vmcb->save.ss;
609 	vmcb12->save.ds     = vmcb->save.ds;
610 	vmcb12->save.gdtr   = vmcb->save.gdtr;
611 	vmcb12->save.idtr   = vmcb->save.idtr;
612 	vmcb12->save.efer   = svm->vcpu.arch.efer;
613 	vmcb12->save.cr0    = kvm_read_cr0(&svm->vcpu);
614 	vmcb12->save.cr3    = kvm_read_cr3(&svm->vcpu);
615 	vmcb12->save.cr2    = vmcb->save.cr2;
616 	vmcb12->save.cr4    = svm->vcpu.arch.cr4;
617 	vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
618 	vmcb12->save.rip    = kvm_rip_read(&svm->vcpu);
619 	vmcb12->save.rsp    = kvm_rsp_read(&svm->vcpu);
620 	vmcb12->save.rax    = kvm_rax_read(&svm->vcpu);
621 	vmcb12->save.dr7    = vmcb->save.dr7;
622 	vmcb12->save.dr6    = svm->vcpu.arch.dr6;
623 	vmcb12->save.cpl    = vmcb->save.cpl;
624 
625 	vmcb12->control.int_state         = vmcb->control.int_state;
626 	vmcb12->control.exit_code         = vmcb->control.exit_code;
627 	vmcb12->control.exit_code_hi      = vmcb->control.exit_code_hi;
628 	vmcb12->control.exit_info_1       = vmcb->control.exit_info_1;
629 	vmcb12->control.exit_info_2       = vmcb->control.exit_info_2;
630 
631 	if (vmcb12->control.exit_code != SVM_EXIT_ERR)
632 		nested_vmcb_save_pending_event(svm, vmcb12);
633 
634 	if (svm->nrips_enabled)
635 		vmcb12->control.next_rip  = vmcb->control.next_rip;
636 
637 	vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
638 	vmcb12->control.tlb_ctl           = svm->nested.ctl.tlb_ctl;
639 	vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
640 	vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
641 
642 	vmcb12->control.pause_filter_count =
643 		svm->vmcb->control.pause_filter_count;
644 	vmcb12->control.pause_filter_thresh =
645 		svm->vmcb->control.pause_filter_thresh;
646 
647 	/* Restore the original control entries */
648 	copy_vmcb_control_area(&vmcb->control, &hsave->control);
649 
650 	/* On vmexit the  GIF is set to false */
651 	svm_set_gif(svm, false);
652 
653 	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
654 		svm->vcpu.arch.l1_tsc_offset;
655 
656 	svm->nested.ctl.nested_cr3 = 0;
657 
658 	/* Restore selected save entries */
659 	svm->vmcb->save.es = hsave->save.es;
660 	svm->vmcb->save.cs = hsave->save.cs;
661 	svm->vmcb->save.ss = hsave->save.ss;
662 	svm->vmcb->save.ds = hsave->save.ds;
663 	svm->vmcb->save.gdtr = hsave->save.gdtr;
664 	svm->vmcb->save.idtr = hsave->save.idtr;
665 	kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
666 	kvm_set_rflags(&svm->vcpu, hsave->save.rflags | X86_EFLAGS_FIXED);
667 	svm_set_efer(&svm->vcpu, hsave->save.efer);
668 	svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
669 	svm_set_cr4(&svm->vcpu, hsave->save.cr4);
670 	kvm_rax_write(&svm->vcpu, hsave->save.rax);
671 	kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
672 	kvm_rip_write(&svm->vcpu, hsave->save.rip);
673 	svm->vmcb->save.dr7 = DR7_FIXED_1;
674 	svm->vmcb->save.cpl = 0;
675 	svm->vmcb->control.exit_int_info = 0;
676 
677 	vmcb_mark_all_dirty(svm->vmcb);
678 
679 	trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
680 				       vmcb12->control.exit_info_1,
681 				       vmcb12->control.exit_info_2,
682 				       vmcb12->control.exit_int_info,
683 				       vmcb12->control.exit_int_info_err,
684 				       KVM_ISA_SVM);
685 
686 	kvm_vcpu_unmap(&svm->vcpu, &map, true);
687 
688 	nested_svm_uninit_mmu_context(&svm->vcpu);
689 
690 	rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false);
691 	if (rc)
692 		return 1;
693 
694 	if (npt_enabled)
695 		svm->vmcb->save.cr3 = hsave->save.cr3;
696 
697 	/*
698 	 * Drop what we picked up for L2 via svm_complete_interrupts() so it
699 	 * doesn't end up in L1.
700 	 */
701 	svm->vcpu.arch.nmi_injected = false;
702 	kvm_clear_exception_queue(&svm->vcpu);
703 	kvm_clear_interrupt_queue(&svm->vcpu);
704 
705 	return 0;
706 }
707 
708 int svm_allocate_nested(struct vcpu_svm *svm)
709 {
710 	struct page *hsave_page;
711 
712 	if (svm->nested.initialized)
713 		return 0;
714 
715 	hsave_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
716 	if (!hsave_page)
717 		return -ENOMEM;
718 	svm->nested.hsave = page_address(hsave_page);
719 
720 	svm->nested.msrpm = svm_vcpu_alloc_msrpm();
721 	if (!svm->nested.msrpm)
722 		goto err_free_hsave;
723 	svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
724 
725 	svm->nested.initialized = true;
726 	return 0;
727 
728 err_free_hsave:
729 	__free_page(hsave_page);
730 	return -ENOMEM;
731 }
732 
733 void svm_free_nested(struct vcpu_svm *svm)
734 {
735 	if (!svm->nested.initialized)
736 		return;
737 
738 	svm_vcpu_free_msrpm(svm->nested.msrpm);
739 	svm->nested.msrpm = NULL;
740 
741 	__free_page(virt_to_page(svm->nested.hsave));
742 	svm->nested.hsave = NULL;
743 
744 	svm->nested.initialized = false;
745 }
746 
747 /*
748  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
749  */
750 void svm_leave_nested(struct vcpu_svm *svm)
751 {
752 	if (is_guest_mode(&svm->vcpu)) {
753 		struct vmcb *hsave = svm->nested.hsave;
754 		struct vmcb *vmcb = svm->vmcb;
755 
756 		svm->nested.nested_run_pending = 0;
757 		leave_guest_mode(&svm->vcpu);
758 		copy_vmcb_control_area(&vmcb->control, &hsave->control);
759 		nested_svm_uninit_mmu_context(&svm->vcpu);
760 		vmcb_mark_all_dirty(svm->vmcb);
761 	}
762 
763 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
764 }
765 
766 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
767 {
768 	u32 offset, msr, value;
769 	int write, mask;
770 
771 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
772 		return NESTED_EXIT_HOST;
773 
774 	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
775 	offset = svm_msrpm_offset(msr);
776 	write  = svm->vmcb->control.exit_info_1 & 1;
777 	mask   = 1 << ((2 * (msr & 0xf)) + write);
778 
779 	if (offset == MSR_INVALID)
780 		return NESTED_EXIT_DONE;
781 
782 	/* Offset is in 32 bit units but need in 8 bit units */
783 	offset *= 4;
784 
785 	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
786 		return NESTED_EXIT_DONE;
787 
788 	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
789 }
790 
791 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
792 {
793 	unsigned port, size, iopm_len;
794 	u16 val, mask;
795 	u8 start_bit;
796 	u64 gpa;
797 
798 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
799 		return NESTED_EXIT_HOST;
800 
801 	port = svm->vmcb->control.exit_info_1 >> 16;
802 	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
803 		SVM_IOIO_SIZE_SHIFT;
804 	gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
805 	start_bit = port % 8;
806 	iopm_len = (start_bit + size > 8) ? 2 : 1;
807 	mask = (0xf >> (4 - size)) << start_bit;
808 	val = 0;
809 
810 	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
811 		return NESTED_EXIT_DONE;
812 
813 	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
814 }
815 
816 static int nested_svm_intercept(struct vcpu_svm *svm)
817 {
818 	u32 exit_code = svm->vmcb->control.exit_code;
819 	int vmexit = NESTED_EXIT_HOST;
820 
821 	switch (exit_code) {
822 	case SVM_EXIT_MSR:
823 		vmexit = nested_svm_exit_handled_msr(svm);
824 		break;
825 	case SVM_EXIT_IOIO:
826 		vmexit = nested_svm_intercept_ioio(svm);
827 		break;
828 	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
829 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
830 			vmexit = NESTED_EXIT_DONE;
831 		break;
832 	}
833 	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
834 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
835 			vmexit = NESTED_EXIT_DONE;
836 		break;
837 	}
838 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
839 		/*
840 		 * Host-intercepted exceptions have been checked already in
841 		 * nested_svm_exit_special.  There is nothing to do here,
842 		 * the vmexit is injected by svm_check_nested_events.
843 		 */
844 		vmexit = NESTED_EXIT_DONE;
845 		break;
846 	}
847 	case SVM_EXIT_ERR: {
848 		vmexit = NESTED_EXIT_DONE;
849 		break;
850 	}
851 	default: {
852 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
853 			vmexit = NESTED_EXIT_DONE;
854 	}
855 	}
856 
857 	return vmexit;
858 }
859 
860 int nested_svm_exit_handled(struct vcpu_svm *svm)
861 {
862 	int vmexit;
863 
864 	vmexit = nested_svm_intercept(svm);
865 
866 	if (vmexit == NESTED_EXIT_DONE)
867 		nested_svm_vmexit(svm);
868 
869 	return vmexit;
870 }
871 
872 int nested_svm_check_permissions(struct vcpu_svm *svm)
873 {
874 	if (!(svm->vcpu.arch.efer & EFER_SVME) ||
875 	    !is_paging(&svm->vcpu)) {
876 		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
877 		return 1;
878 	}
879 
880 	if (svm->vmcb->save.cpl) {
881 		kvm_inject_gp(&svm->vcpu, 0);
882 		return 1;
883 	}
884 
885 	return 0;
886 }
887 
888 static bool nested_exit_on_exception(struct vcpu_svm *svm)
889 {
890 	unsigned int nr = svm->vcpu.arch.exception.nr;
891 
892 	return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
893 }
894 
895 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
896 {
897 	unsigned int nr = svm->vcpu.arch.exception.nr;
898 
899 	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
900 	svm->vmcb->control.exit_code_hi = 0;
901 
902 	if (svm->vcpu.arch.exception.has_error_code)
903 		svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
904 
905 	/*
906 	 * EXITINFO2 is undefined for all exception intercepts other
907 	 * than #PF.
908 	 */
909 	if (nr == PF_VECTOR) {
910 		if (svm->vcpu.arch.exception.nested_apf)
911 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
912 		else if (svm->vcpu.arch.exception.has_payload)
913 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
914 		else
915 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
916 	} else if (nr == DB_VECTOR) {
917 		/* See inject_pending_event.  */
918 		kvm_deliver_exception_payload(&svm->vcpu);
919 		if (svm->vcpu.arch.dr7 & DR7_GD) {
920 			svm->vcpu.arch.dr7 &= ~DR7_GD;
921 			kvm_update_dr7(&svm->vcpu);
922 		}
923 	} else
924 		WARN_ON(svm->vcpu.arch.exception.has_payload);
925 
926 	nested_svm_vmexit(svm);
927 }
928 
929 static void nested_svm_smi(struct vcpu_svm *svm)
930 {
931 	svm->vmcb->control.exit_code = SVM_EXIT_SMI;
932 	svm->vmcb->control.exit_info_1 = 0;
933 	svm->vmcb->control.exit_info_2 = 0;
934 
935 	nested_svm_vmexit(svm);
936 }
937 
938 static void nested_svm_nmi(struct vcpu_svm *svm)
939 {
940 	svm->vmcb->control.exit_code = SVM_EXIT_NMI;
941 	svm->vmcb->control.exit_info_1 = 0;
942 	svm->vmcb->control.exit_info_2 = 0;
943 
944 	nested_svm_vmexit(svm);
945 }
946 
947 static void nested_svm_intr(struct vcpu_svm *svm)
948 {
949 	trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
950 
951 	svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
952 	svm->vmcb->control.exit_info_1 = 0;
953 	svm->vmcb->control.exit_info_2 = 0;
954 
955 	nested_svm_vmexit(svm);
956 }
957 
958 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
959 {
960 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
961 }
962 
963 static void nested_svm_init(struct vcpu_svm *svm)
964 {
965 	svm->vmcb->control.exit_code   = SVM_EXIT_INIT;
966 	svm->vmcb->control.exit_info_1 = 0;
967 	svm->vmcb->control.exit_info_2 = 0;
968 
969 	nested_svm_vmexit(svm);
970 }
971 
972 
973 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
974 {
975 	struct vcpu_svm *svm = to_svm(vcpu);
976 	bool block_nested_events =
977 		kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
978 	struct kvm_lapic *apic = vcpu->arch.apic;
979 
980 	if (lapic_in_kernel(vcpu) &&
981 	    test_bit(KVM_APIC_INIT, &apic->pending_events)) {
982 		if (block_nested_events)
983 			return -EBUSY;
984 		if (!nested_exit_on_init(svm))
985 			return 0;
986 		nested_svm_init(svm);
987 		return 0;
988 	}
989 
990 	if (vcpu->arch.exception.pending) {
991 		if (block_nested_events)
992                         return -EBUSY;
993 		if (!nested_exit_on_exception(svm))
994 			return 0;
995 		nested_svm_inject_exception_vmexit(svm);
996 		return 0;
997 	}
998 
999 	if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1000 		if (block_nested_events)
1001 			return -EBUSY;
1002 		if (!nested_exit_on_smi(svm))
1003 			return 0;
1004 		nested_svm_smi(svm);
1005 		return 0;
1006 	}
1007 
1008 	if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1009 		if (block_nested_events)
1010 			return -EBUSY;
1011 		if (!nested_exit_on_nmi(svm))
1012 			return 0;
1013 		nested_svm_nmi(svm);
1014 		return 0;
1015 	}
1016 
1017 	if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1018 		if (block_nested_events)
1019 			return -EBUSY;
1020 		if (!nested_exit_on_intr(svm))
1021 			return 0;
1022 		nested_svm_intr(svm);
1023 		return 0;
1024 	}
1025 
1026 	return 0;
1027 }
1028 
1029 int nested_svm_exit_special(struct vcpu_svm *svm)
1030 {
1031 	u32 exit_code = svm->vmcb->control.exit_code;
1032 
1033 	switch (exit_code) {
1034 	case SVM_EXIT_INTR:
1035 	case SVM_EXIT_NMI:
1036 	case SVM_EXIT_NPF:
1037 		return NESTED_EXIT_HOST;
1038 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1039 		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1040 
1041 		if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] &
1042 				excp_bits)
1043 			return NESTED_EXIT_HOST;
1044 		else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1045 			 svm->vcpu.arch.apf.host_apf_flags)
1046 			/* Trap async PF even if not shadowing */
1047 			return NESTED_EXIT_HOST;
1048 		break;
1049 	}
1050 	default:
1051 		break;
1052 	}
1053 
1054 	return NESTED_EXIT_CONTINUE;
1055 }
1056 
1057 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1058 				struct kvm_nested_state __user *user_kvm_nested_state,
1059 				u32 user_data_size)
1060 {
1061 	struct vcpu_svm *svm;
1062 	struct kvm_nested_state kvm_state = {
1063 		.flags = 0,
1064 		.format = KVM_STATE_NESTED_FORMAT_SVM,
1065 		.size = sizeof(kvm_state),
1066 	};
1067 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1068 		&user_kvm_nested_state->data.svm[0];
1069 
1070 	if (!vcpu)
1071 		return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1072 
1073 	svm = to_svm(vcpu);
1074 
1075 	if (user_data_size < kvm_state.size)
1076 		goto out;
1077 
1078 	/* First fill in the header and copy it out.  */
1079 	if (is_guest_mode(vcpu)) {
1080 		kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1081 		kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1082 		kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1083 
1084 		if (svm->nested.nested_run_pending)
1085 			kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1086 	}
1087 
1088 	if (gif_set(svm))
1089 		kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1090 
1091 	if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1092 		return -EFAULT;
1093 
1094 	if (!is_guest_mode(vcpu))
1095 		goto out;
1096 
1097 	/*
1098 	 * Copy over the full size of the VMCB rather than just the size
1099 	 * of the structs.
1100 	 */
1101 	if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1102 		return -EFAULT;
1103 	if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1104 			 sizeof(user_vmcb->control)))
1105 		return -EFAULT;
1106 	if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
1107 			 sizeof(user_vmcb->save)))
1108 		return -EFAULT;
1109 
1110 out:
1111 	return kvm_state.size;
1112 }
1113 
1114 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1115 				struct kvm_nested_state __user *user_kvm_nested_state,
1116 				struct kvm_nested_state *kvm_state)
1117 {
1118 	struct vcpu_svm *svm = to_svm(vcpu);
1119 	struct vmcb *hsave = svm->nested.hsave;
1120 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1121 		&user_kvm_nested_state->data.svm[0];
1122 	struct vmcb_control_area *ctl;
1123 	struct vmcb_save_area *save;
1124 	int ret;
1125 	u32 cr0;
1126 
1127 	BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1128 		     KVM_STATE_NESTED_SVM_VMCB_SIZE);
1129 
1130 	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1131 		return -EINVAL;
1132 
1133 	if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1134 				 KVM_STATE_NESTED_RUN_PENDING |
1135 				 KVM_STATE_NESTED_GIF_SET))
1136 		return -EINVAL;
1137 
1138 	/*
1139 	 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1140 	 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1141 	 */
1142 	if (!(vcpu->arch.efer & EFER_SVME)) {
1143 		/* GIF=1 and no guest mode are required if SVME=0.  */
1144 		if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1145 			return -EINVAL;
1146 	}
1147 
1148 	/* SMM temporarily disables SVM, so we cannot be in guest mode.  */
1149 	if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1150 		return -EINVAL;
1151 
1152 	if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1153 		svm_leave_nested(svm);
1154 		svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1155 		return 0;
1156 	}
1157 
1158 	if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1159 		return -EINVAL;
1160 	if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1161 		return -EINVAL;
1162 
1163 	ret  = -ENOMEM;
1164 	ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL);
1165 	save = kzalloc(sizeof(*save), GFP_KERNEL);
1166 	if (!ctl || !save)
1167 		goto out_free;
1168 
1169 	ret = -EFAULT;
1170 	if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1171 		goto out_free;
1172 	if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1173 		goto out_free;
1174 
1175 	ret = -EINVAL;
1176 	if (!nested_vmcb_check_controls(ctl))
1177 		goto out_free;
1178 
1179 	/*
1180 	 * Processor state contains L2 state.  Check that it is
1181 	 * valid for guest mode (see nested_vmcb_checks).
1182 	 */
1183 	cr0 = kvm_read_cr0(vcpu);
1184         if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1185 		goto out_free;
1186 
1187 	/*
1188 	 * Validate host state saved from before VMRUN (see
1189 	 * nested_svm_check_permissions).
1190 	 * TODO: validate reserved bits for all saved state.
1191 	 */
1192 	if (!(save->cr0 & X86_CR0_PG))
1193 		goto out_free;
1194 
1195 	/*
1196 	 * All checks done, we can enter guest mode.  L1 control fields
1197 	 * come from the nested save state.  Guest state is already
1198 	 * in the registers, the save area of the nested state instead
1199 	 * contains saved L1 state.
1200 	 */
1201 
1202 	svm->nested.nested_run_pending =
1203 		!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1204 
1205 	copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
1206 	hsave->save = *save;
1207 
1208 	svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1209 	load_nested_vmcb_control(svm, ctl);
1210 	nested_prepare_vmcb_control(svm);
1211 
1212 	kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1213 	ret = 0;
1214 out_free:
1215 	kfree(save);
1216 	kfree(ctl);
1217 
1218 	return ret;
1219 }
1220 
1221 struct kvm_x86_nested_ops svm_nested_ops = {
1222 	.check_events = svm_check_nested_events,
1223 	.get_nested_state_pages = svm_get_nested_state_pages,
1224 	.get_state = svm_get_nested_state,
1225 	.set_state = svm_set_nested_state,
1226 };
1227