xref: /illumos-gate/usr/src/uts/intel/io/vmm/amd/svm.c (revision d35aa001d65a0260475aa7ded6ca159c6ba3fe64)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * This file and its contents are supplied under the terms of the
31  * Common Development and Distribution License ("CDDL"), version 1.0.
32  * You may only use this file in accordance with the terms of version
33  * 1.0 of the CDDL.
34  *
35  * A full copy of the text of the CDDL should have accompanied this
36  * source.  A copy of the CDDL is also available via the Internet at
37  * http://www.illumos.org/license/CDDL.
38  *
39  * Copyright 2018 Joyent, Inc.
40  * Copyright 2023 Oxide Computer Company
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/kmem.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/sysctl.h>
53 
54 #include <sys/x86_archext.h>
55 #include <sys/trap.h>
56 
57 #include <machine/cpufunc.h>
58 #include <machine/psl.h>
59 #include <machine/md_var.h>
60 #include <machine/reg.h>
61 #include <machine/specialreg.h>
62 #include <machine/vmm.h>
63 #include <machine/vmm_dev.h>
64 #include <sys/vmm_instruction_emul.h>
65 #include <sys/vmm_vm.h>
66 #include <sys/vmm_kernel.h>
67 
68 #include "vmm_lapic.h"
69 #include "vmm_stat.h"
70 #include "vmm_ioport.h"
71 #include "vatpic.h"
72 #include "vlapic.h"
73 #include "vlapic_priv.h"
74 
75 #include "vmcb.h"
76 #include "svm.h"
77 #include "svm_softc.h"
78 #include "svm_msr.h"
79 
80 SYSCTL_DECL(_hw_vmm);
81 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
82     NULL);
83 
84 #define	VMCB_CACHE_DEFAULT	(VMCB_CACHE_ASID	|	\
85 				VMCB_CACHE_IOPM		|	\
86 				VMCB_CACHE_I		|	\
87 				VMCB_CACHE_TPR		|	\
88 				VMCB_CACHE_CR2		|	\
89 				VMCB_CACHE_CR		|	\
90 				VMCB_CACHE_DR		|	\
91 				VMCB_CACHE_DT		|	\
92 				VMCB_CACHE_SEG		|	\
93 				VMCB_CACHE_NP)
94 
95 /*
96  * Guardrails for supported guest TSC frequencies.
97  *
98  * A minimum of 0.5 GHz, which should be sufficient for all recent AMD CPUs, and
99  * a maximum ratio of (15 * host frequency), which is sufficient to prevent
100  * overflowing frequency calcuations and give plenty of bandwidth for future CPU
101  * frequency increases.
102  */
103 #define	AMD_TSC_MIN_FREQ	500000000
104 #define	AMD_TSC_MAX_FREQ_RATIO	15
105 
106 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
107 
108 /* SVM features advertised by CPUID.8000000AH:EDX */
109 static uint32_t svm_feature = 0;
110 
111 static int disable_npf_assist;
112 
113 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
114 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
115 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
116 
117 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
118 static int svm_getreg(void *arg, int vcpu, int ident, uint64_t *val);
119 static void flush_asid(struct svm_softc *sc, int vcpuid);
120 
121 static __inline bool
122 has_flush_by_asid(void)
123 {
124 	return ((svm_feature & CPUID_AMD_EDX_FLUSH_ASID) != 0);
125 }
126 
127 static __inline bool
128 has_lbr_virt(void)
129 {
130 	return ((svm_feature & CPUID_AMD_EDX_LBR_VIRT) != 0);
131 }
132 
133 static __inline bool
134 has_decode_assist(void)
135 {
136 	return ((svm_feature & CPUID_AMD_EDX_DECODE_ASSISTS) != 0);
137 }
138 
139 static __inline bool
140 has_tsc_freq_ctl(void)
141 {
142 	return ((svm_feature & CPUID_AMD_EDX_TSC_RATE_MSR) != 0);
143 }
144 
145 static int
146 svm_cleanup(void)
147 {
148 	/* This is taken care of by the hma registration */
149 	return (0);
150 }
151 
152 static int
153 svm_init(void)
154 {
155 	/* Grab a (bhyve) local copy of the SVM feature bits */
156 	struct cpuid_regs regs = {
157 		.cp_eax = 0x8000000a,
158 	};
159 	(void) cpuid_insn(NULL, &regs);
160 	svm_feature = regs.cp_edx;
161 
162 	/*
163 	 * HMA should have already checked for these features which we refuse to
164 	 * operate without, but no harm in making sure
165 	 */
166 	const uint32_t demand_bits =
167 	    (CPUID_AMD_EDX_NESTED_PAGING | CPUID_AMD_EDX_NRIPS);
168 	VERIFY((svm_feature & demand_bits) == demand_bits);
169 
170 	/* Clear any unexpected bits (set manually) from vmcb_clean */
171 	vmcb_clean &= VMCB_CACHE_DEFAULT;
172 
173 	return (0);
174 }
175 
176 static void
177 svm_restore(void)
178 {
179 	/* No-op on illumos */
180 }
181 
182 /* Pentium compatible MSRs */
183 #define	MSR_PENTIUM_START	0
184 #define	MSR_PENTIUM_END		0x1FFF
185 /* AMD 6th generation and Intel compatible MSRs */
186 #define	MSR_AMD6TH_START	0xC0000000UL
187 #define	MSR_AMD6TH_END		0xC0001FFFUL
188 /* AMD 7th and 8th generation compatible MSRs */
189 #define	MSR_AMD7TH_START	0xC0010000UL
190 #define	MSR_AMD7TH_END		0xC0011FFFUL
191 
192 /*
193  * Get the index and bit position for a MSR in permission bitmap.
194  * Two bits are used for each MSR: lower bit for read and higher bit for write.
195  */
196 static int
197 svm_msr_index(uint64_t msr, int *index, int *bit)
198 {
199 	uint32_t base, off;
200 
201 	*index = -1;
202 	*bit = (msr % 4) * 2;
203 	base = 0;
204 
205 	if (msr <= MSR_PENTIUM_END) {
206 		*index = msr / 4;
207 		return (0);
208 	}
209 
210 	base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1);
211 	if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) {
212 		off = (msr - MSR_AMD6TH_START);
213 		*index = (off + base) / 4;
214 		return (0);
215 	}
216 
217 	base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
218 	if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
219 		off = (msr - MSR_AMD7TH_START);
220 		*index = (off + base) / 4;
221 		return (0);
222 	}
223 
224 	return (EINVAL);
225 }
226 
227 /*
228  * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
229  */
230 static void
231 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write)
232 {
233 	int index, bit, error;
234 
235 	error = svm_msr_index(msr, &index, &bit);
236 	KASSERT(error == 0, ("%s: invalid msr %lx", __func__, msr));
237 	KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE,
238 	    ("%s: invalid index %d for msr %lx", __func__, index, msr));
239 	KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d "
240 	    "msr %lx", __func__, bit, msr));
241 
242 	if (read)
243 		perm_bitmap[index] &= ~(1UL << bit);
244 
245 	if (write)
246 		perm_bitmap[index] &= ~(2UL << bit);
247 }
248 
249 static void
250 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr)
251 {
252 
253 	svm_msr_perm(perm_bitmap, msr, true, true);
254 }
255 
256 static void
257 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
258 {
259 
260 	svm_msr_perm(perm_bitmap, msr, true, false);
261 }
262 
263 static __inline int
264 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
265 {
266 	struct vmcb_ctrl *ctrl;
267 
268 	KASSERT(idx >= 0 && idx < 5, ("invalid intercept index %d", idx));
269 
270 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
271 	return (ctrl->intercept[idx] & bitmask ? 1 : 0);
272 }
273 
274 static __inline void
275 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
276     int enabled)
277 {
278 	struct vmcb_ctrl *ctrl;
279 	uint32_t oldval;
280 
281 	KASSERT(idx >= 0 && idx < 5, ("invalid intercept index %d", idx));
282 
283 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
284 	oldval = ctrl->intercept[idx];
285 
286 	if (enabled)
287 		ctrl->intercept[idx] |= bitmask;
288 	else
289 		ctrl->intercept[idx] &= ~bitmask;
290 
291 	if (ctrl->intercept[idx] != oldval) {
292 		svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
293 	}
294 }
295 
296 static __inline void
297 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
298 {
299 
300 	svm_set_intercept(sc, vcpu, off, bitmask, 0);
301 }
302 
303 static __inline void
304 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
305 {
306 
307 	svm_set_intercept(sc, vcpu, off, bitmask, 1);
308 }
309 
310 static void
311 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
312     uint64_t msrpm_base_pa, uint64_t np_pml4)
313 {
314 	struct vmcb_ctrl *ctrl;
315 	struct vmcb_state *state;
316 	uint32_t mask;
317 	int n;
318 
319 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
320 	state = svm_get_vmcb_state(sc, vcpu);
321 
322 	ctrl->iopm_base_pa = iopm_base_pa;
323 	ctrl->msrpm_base_pa = msrpm_base_pa;
324 
325 	/* Enable nested paging */
326 	ctrl->np_ctrl = NP_ENABLE;
327 	ctrl->n_cr3 = np_pml4;
328 
329 	/*
330 	 * Intercept accesses to the control registers that are not shadowed
331 	 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8.
332 	 */
333 	for (n = 0; n < 16; n++) {
334 		mask = (BIT(n) << 16) | BIT(n);
335 		if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8)
336 			svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
337 		else
338 			svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
339 	}
340 
341 	/*
342 	 * Selectively intercept writes to %cr0.  This triggers on operations
343 	 * which would change bits other than TS or MP.
344 	 */
345 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
346 	    VMCB_INTCPT_CR0_WRITE);
347 
348 	/*
349 	 * Intercept everything when tracing guest exceptions otherwise
350 	 * just intercept machine check exception.
351 	 */
352 	if (vcpu_trace_exceptions(sc->vm, vcpu)) {
353 		for (n = 0; n < 32; n++) {
354 			/*
355 			 * Skip unimplemented vectors in the exception bitmap.
356 			 */
357 			if (n == 2 || n == 9) {
358 				continue;
359 			}
360 			svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
361 		}
362 	} else {
363 		svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
364 	}
365 
366 	/* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
367 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
368 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
369 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
370 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
371 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
372 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
373 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
374 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
375 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
376 	    VMCB_INTCPT_FERR_FREEZE);
377 
378 	/* Enable exit-on-hlt by default */
379 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_HLT);
380 
381 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
382 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
383 
384 	/* Intercept privileged invalidation instructions. */
385 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
386 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
387 
388 	/*
389 	 * Intercept all virtualization-related instructions.
390 	 *
391 	 * From section "Canonicalization and Consistency Checks" in APMv2
392 	 * the VMRUN intercept bit must be set to pass the consistency check.
393 	 */
394 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
395 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMMCALL);
396 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
397 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
398 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
399 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
400 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
401 	if (vcpu_trap_wbinvd(sc->vm, vcpu) != 0) {
402 		svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT,
403 		    VMCB_INTCPT_WBINVD);
404 	}
405 
406 	/*
407 	 * The ASID will be set to a non-zero value just before VMRUN.
408 	 */
409 	ctrl->asid = 0;
410 
411 	/*
412 	 * Section 15.21.1, Interrupt Masking in EFLAGS
413 	 * Section 15.21.2, Virtualizing APIC.TPR
414 	 *
415 	 * This must be set for %rflag and %cr8 isolation of guest and host.
416 	 */
417 	ctrl->v_intr_ctrl |= V_INTR_MASKING;
418 
419 	/* Enable Last Branch Record aka LBR-virt (if available) */
420 	if (has_lbr_virt()) {
421 		ctrl->misc_ctrl |= LBR_VIRT_ENABLE;
422 	}
423 
424 	/* EFER_SVM must always be set when the guest is executing */
425 	state->efer = EFER_SVM;
426 
427 	/* Set up the PAT to power-on state */
428 	state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK)	|
429 	    PAT_VALUE(1, PAT_WRITE_THROUGH)	|
430 	    PAT_VALUE(2, PAT_UNCACHED)		|
431 	    PAT_VALUE(3, PAT_UNCACHEABLE)	|
432 	    PAT_VALUE(4, PAT_WRITE_BACK)	|
433 	    PAT_VALUE(5, PAT_WRITE_THROUGH)	|
434 	    PAT_VALUE(6, PAT_UNCACHED)		|
435 	    PAT_VALUE(7, PAT_UNCACHEABLE);
436 
437 	/* Set up DR6/7 to power-on state */
438 	state->dr6 = DBREG_DR6_RESERVED1;
439 	state->dr7 = DBREG_DR7_RESERVED1;
440 }
441 
442 /*
443  * Initialize a virtual machine.
444  */
445 static void *
446 svm_vminit(struct vm *vm)
447 {
448 	struct svm_softc *svm_sc;
449 	struct svm_vcpu *vcpu;
450 	vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
451 	int i;
452 	uint16_t maxcpus;
453 
454 	svm_sc = kmem_zalloc(sizeof (*svm_sc), KM_SLEEP);
455 	VERIFY3U(((uintptr_t)svm_sc & PAGE_MASK),  ==,  0);
456 
457 	svm_sc->msr_bitmap = vmm_contig_alloc(SVM_MSR_BITMAP_SIZE);
458 	if (svm_sc->msr_bitmap == NULL)
459 		panic("contigmalloc of SVM MSR bitmap failed");
460 	svm_sc->iopm_bitmap = vmm_contig_alloc(SVM_IO_BITMAP_SIZE);
461 	if (svm_sc->iopm_bitmap == NULL)
462 		panic("contigmalloc of SVM IO bitmap failed");
463 
464 	svm_sc->vm = vm;
465 	svm_sc->nptp = vmspace_table_root(vm_get_vmspace(vm));
466 
467 	/*
468 	 * Intercept read and write accesses to all MSRs.
469 	 */
470 	memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE);
471 
472 	/*
473 	 * Access to the following MSRs is redirected to the VMCB when the
474 	 * guest is executing. Therefore it is safe to allow the guest to
475 	 * read/write these MSRs directly without hypervisor involvement.
476 	 */
477 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
478 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
479 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
480 
481 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
482 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
483 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
484 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
485 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
486 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
487 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
488 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
489 
490 	svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
491 
492 	/*
493 	 * Intercept writes to make sure that the EFER_SVM bit is not cleared.
494 	 */
495 	svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
496 
497 	/* Intercept access to all I/O ports. */
498 	memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE);
499 
500 	iopm_pa = vtophys(svm_sc->iopm_bitmap);
501 	msrpm_pa = vtophys(svm_sc->msr_bitmap);
502 	pml4_pa = svm_sc->nptp;
503 	maxcpus = vm_get_maxcpus(svm_sc->vm);
504 	for (i = 0; i < maxcpus; i++) {
505 		vcpu = svm_get_vcpu(svm_sc, i);
506 		vcpu->nextrip = ~0;
507 		vcpu->lastcpu = NOCPU;
508 		vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
509 		vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
510 		svm_msr_guest_init(svm_sc, i);
511 	}
512 	return (svm_sc);
513 }
514 
515 /*
516  * Collateral for a generic SVM VM-exit.
517  */
518 static void
519 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2)
520 {
521 
522 	vme->exitcode = VM_EXITCODE_SVM;
523 	vme->u.svm.exitcode = code;
524 	vme->u.svm.exitinfo1 = info1;
525 	vme->u.svm.exitinfo2 = info2;
526 }
527 
528 static enum vm_cpu_mode
529 svm_vcpu_mode(struct vmcb *vmcb)
530 {
531 	struct vmcb_state *state;
532 
533 	state = &vmcb->state;
534 
535 	if (state->efer & EFER_LMA) {
536 		struct vmcb_segment *seg;
537 
538 		/*
539 		 * Section 4.8.1 for APM2, check if Code Segment has
540 		 * Long attribute set in descriptor.
541 		 */
542 		seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS);
543 		if (seg->attrib & VMCB_CS_ATTRIB_L)
544 			return (CPU_MODE_64BIT);
545 		else
546 			return (CPU_MODE_COMPATIBILITY);
547 	} else  if (state->cr0 & CR0_PE) {
548 		return (CPU_MODE_PROTECTED);
549 	} else {
550 		return (CPU_MODE_REAL);
551 	}
552 }
553 
554 static enum vm_paging_mode
555 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer)
556 {
557 
558 	if ((cr0 & CR0_PG) == 0)
559 		return (PAGING_MODE_FLAT);
560 	if ((cr4 & CR4_PAE) == 0)
561 		return (PAGING_MODE_32);
562 	if (efer & EFER_LME)
563 		return (PAGING_MODE_64);
564 	else
565 		return (PAGING_MODE_PAE);
566 }
567 
568 static void
569 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
570 {
571 	struct vmcb_state *state;
572 
573 	state = &vmcb->state;
574 	paging->cr3 = state->cr3;
575 	paging->cpl = state->cpl;
576 	paging->cpu_mode = svm_vcpu_mode(vmcb);
577 	paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
578 	    state->efer);
579 }
580 
581 #define	UNHANDLED 0
582 
583 /*
584  * Handle guest I/O intercept.
585  */
586 static int
587 svm_handle_inout(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
588 {
589 	struct vmcb_ctrl *ctrl;
590 	struct vmcb_state *state;
591 	struct vm_inout *inout;
592 	struct vie *vie;
593 	uint64_t info1;
594 	struct vm_guest_paging paging;
595 
596 	state = svm_get_vmcb_state(svm_sc, vcpu);
597 	ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
598 	inout = &vmexit->u.inout;
599 	info1 = ctrl->exitinfo1;
600 
601 	inout->bytes = (info1 >> 4) & 0x7;
602 	inout->flags = 0;
603 	inout->flags |= (info1 & BIT(0)) ? INOUT_IN : 0;
604 	inout->flags |= (info1 & BIT(3)) ? INOUT_REP : 0;
605 	inout->flags |= (info1 & BIT(2)) ? INOUT_STR : 0;
606 	inout->port = (uint16_t)(info1 >> 16);
607 	inout->eax = (uint32_t)(state->rax);
608 
609 	if ((inout->flags & INOUT_STR) != 0) {
610 		/*
611 		 * The effective segment number in EXITINFO1[12:10] is populated
612 		 * only if the processor has the DecodeAssist capability.
613 		 *
614 		 * This is not specified explicitly in APMv2 but can be verified
615 		 * empirically.
616 		 */
617 		if (!has_decode_assist()) {
618 			/*
619 			 * Without decoding assistance, force the task of
620 			 * emulating the ins/outs on userspace.
621 			 */
622 			vmexit->exitcode = VM_EXITCODE_INST_EMUL;
623 			bzero(&vmexit->u.inst_emul,
624 			    sizeof (vmexit->u.inst_emul));
625 			return (UNHANDLED);
626 		}
627 
628 		/*
629 		 * Bits 7-9 encode the address size of ins/outs operations where
630 		 * the 1/2/4 values correspond to 16/32/64 bit sizes.
631 		 */
632 		inout->addrsize = 2 * ((info1 >> 7) & 0x7);
633 		VERIFY(inout->addrsize == 2 || inout->addrsize == 4 ||
634 		    inout->addrsize == 8);
635 
636 		if (inout->flags & INOUT_IN) {
637 			/*
638 			 * For INS instructions, %es (encoded as 0) is the
639 			 * implied segment for the operation.
640 			 */
641 			inout->segment = 0;
642 		} else {
643 			/*
644 			 * Bits 10-12 encode the segment for OUTS.
645 			 * This value follows the standard x86 segment order.
646 			 */
647 			inout->segment = (info1 >> 10) & 0x7;
648 		}
649 	}
650 
651 	vmexit->exitcode = VM_EXITCODE_INOUT;
652 	svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &paging);
653 	vie = vm_vie_ctx(svm_sc->vm, vcpu);
654 	vie_init_inout(vie, inout, vmexit->inst_length, &paging);
655 
656 	/* The in/out emulation will handle advancing %rip */
657 	vmexit->inst_length = 0;
658 
659 	return (UNHANDLED);
660 }
661 
662 static int
663 npf_fault_type(uint64_t exitinfo1)
664 {
665 
666 	if (exitinfo1 & VMCB_NPF_INFO1_W)
667 		return (PROT_WRITE);
668 	else if (exitinfo1 & VMCB_NPF_INFO1_ID)
669 		return (PROT_EXEC);
670 	else
671 		return (PROT_READ);
672 }
673 
674 static bool
675 svm_npf_emul_fault(uint64_t exitinfo1)
676 {
677 	if (exitinfo1 & VMCB_NPF_INFO1_ID) {
678 		return (false);
679 	}
680 
681 	if (exitinfo1 & VMCB_NPF_INFO1_GPT) {
682 		return (false);
683 	}
684 
685 	if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
686 		return (false);
687 	}
688 
689 	return (true);
690 }
691 
692 static void
693 svm_handle_mmio_emul(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit,
694     uint64_t gpa)
695 {
696 	struct vmcb_ctrl *ctrl;
697 	struct vmcb *vmcb;
698 	struct vie *vie;
699 	struct vm_guest_paging paging;
700 	struct vmcb_segment *seg;
701 	char *inst_bytes = NULL;
702 	uint8_t inst_len = 0;
703 
704 	vmcb = svm_get_vmcb(svm_sc, vcpu);
705 	ctrl = &vmcb->ctrl;
706 
707 	vmexit->exitcode = VM_EXITCODE_MMIO_EMUL;
708 	vmexit->u.mmio_emul.gpa = gpa;
709 	vmexit->u.mmio_emul.gla = VIE_INVALID_GLA;
710 	svm_paging_info(vmcb, &paging);
711 
712 	switch (paging.cpu_mode) {
713 	case CPU_MODE_REAL:
714 		seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS);
715 		vmexit->u.mmio_emul.cs_base = seg->base;
716 		vmexit->u.mmio_emul.cs_d = 0;
717 		break;
718 	case CPU_MODE_PROTECTED:
719 	case CPU_MODE_COMPATIBILITY:
720 		seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS);
721 		vmexit->u.mmio_emul.cs_base = seg->base;
722 
723 		/*
724 		 * Section 4.8.1 of APM2, Default Operand Size or D bit.
725 		 */
726 		vmexit->u.mmio_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ?
727 		    1 : 0;
728 		break;
729 	default:
730 		vmexit->u.mmio_emul.cs_base = 0;
731 		vmexit->u.mmio_emul.cs_d = 0;
732 		break;
733 	}
734 
735 	/*
736 	 * Copy the instruction bytes into 'vie' if available.
737 	 */
738 	if (has_decode_assist() && !disable_npf_assist) {
739 		inst_len = ctrl->inst_len;
740 		inst_bytes = (char *)ctrl->inst_bytes;
741 	}
742 	vie = vm_vie_ctx(svm_sc->vm, vcpu);
743 	vie_init_mmio(vie, inst_bytes, inst_len, &paging, gpa);
744 }
745 
746 /*
747  * Do not allow CD, NW, or invalid high bits to be asserted in the value of cr0
748  * which is live in the guest.  They are visible via the shadow instead.
749  */
750 #define	SVM_CR0_MASK	~(CR0_CD | CR0_NW | 0xffffffff00000000)
751 
752 static void
753 svm_set_cr0(struct svm_softc *svm_sc, int vcpu, uint64_t val, bool guest_write)
754 {
755 	struct vmcb_state *state;
756 	struct svm_regctx *regctx;
757 	uint64_t masked, old, diff;
758 
759 	state = svm_get_vmcb_state(svm_sc, vcpu);
760 	regctx = svm_get_guest_regctx(svm_sc, vcpu);
761 
762 	old = state->cr0 | (regctx->sctx_cr0_shadow & ~SVM_CR0_MASK);
763 	diff = old ^ val;
764 
765 	/* No further work needed if register contents remain the same */
766 	if (diff == 0) {
767 		return;
768 	}
769 
770 	/* Flush the TLB if the paging or write-protect bits are changing */
771 	if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) {
772 		flush_asid(svm_sc, vcpu);
773 	}
774 
775 	/*
776 	 * If the change in %cr0 is due to a guest action (via interception)
777 	 * then other CPU state updates may be required.
778 	 */
779 	if (guest_write) {
780 		if ((diff & CR0_PG) != 0) {
781 			uint64_t efer = state->efer;
782 
783 			/* Keep the long-mode state in EFER in sync */
784 			if ((val & CR0_PG) != 0 && (efer & EFER_LME) != 0) {
785 				state->efer |= EFER_LMA;
786 			}
787 			if ((val & CR0_PG) == 0 && (efer & EFER_LME) != 0) {
788 				state->efer &= ~EFER_LMA;
789 			}
790 		}
791 	}
792 
793 	masked = val & SVM_CR0_MASK;
794 	regctx->sctx_cr0_shadow = val;
795 	state->cr0 = masked;
796 	svm_set_dirty(svm_sc, vcpu, VMCB_CACHE_CR);
797 
798 	if ((masked ^ val) != 0) {
799 		/*
800 		 * The guest has set bits in %cr0 which we are masking out and
801 		 * exposing via shadow.
802 		 *
803 		 * We must intercept %cr0 reads in order to make the shadowed
804 		 * view available to the guest.
805 		 *
806 		 * Writes to %cr0 must also be intercepted (unconditionally,
807 		 * unlike the VMCB_INTCPT_CR0_WRITE mechanism) so we can catch
808 		 * if/when the guest clears those shadowed bits.
809 		 */
810 		svm_enable_intercept(svm_sc, vcpu, VMCB_CR_INTCPT,
811 		    BIT(0) | BIT(16));
812 	} else {
813 		/*
814 		 * When no bits remain in %cr0 which require shadowing, the
815 		 * unconditional intercept of reads/writes to %cr0 can be
816 		 * disabled.
817 		 *
818 		 * The selective write intercept (VMCB_INTCPT_CR0_WRITE) remains
819 		 * in place so we can be notified of operations which change
820 		 * bits other than TS or MP.
821 		 */
822 		svm_disable_intercept(svm_sc, vcpu, VMCB_CR_INTCPT,
823 		    BIT(0) | BIT(16));
824 	}
825 	svm_set_dirty(svm_sc, vcpu, VMCB_CACHE_I);
826 }
827 
828 static void
829 svm_get_cr0(struct svm_softc *svm_sc, int vcpu, uint64_t *val)
830 {
831 	struct vmcb *vmcb;
832 	struct svm_regctx *regctx;
833 
834 	vmcb = svm_get_vmcb(svm_sc, vcpu);
835 	regctx = svm_get_guest_regctx(svm_sc, vcpu);
836 
837 	/*
838 	 * Include the %cr0 bits which exist only in the shadow along with those
839 	 * in the running vCPU state.
840 	 */
841 	*val = vmcb->state.cr0 | (regctx->sctx_cr0_shadow & ~SVM_CR0_MASK);
842 }
843 
844 static void
845 svm_handle_cr0_read(struct svm_softc *svm_sc, int vcpu, enum vm_reg_name reg)
846 {
847 	uint64_t val;
848 	int err __maybe_unused;
849 
850 	svm_get_cr0(svm_sc, vcpu, &val);
851 	err = svm_setreg(svm_sc, vcpu, reg, val);
852 	ASSERT(err == 0);
853 }
854 
855 static void
856 svm_handle_cr0_write(struct svm_softc *svm_sc, int vcpu, enum vm_reg_name reg)
857 {
858 	struct vmcb_state *state;
859 	uint64_t val;
860 	int err __maybe_unused;
861 
862 	state = svm_get_vmcb_state(svm_sc, vcpu);
863 
864 	err = svm_getreg(svm_sc, vcpu, reg, &val);
865 	ASSERT(err == 0);
866 
867 	if ((val & CR0_NW) != 0 && (val & CR0_CD) == 0) {
868 		/* NW without CD is nonsensical */
869 		vm_inject_gp(svm_sc->vm, vcpu);
870 		return;
871 	}
872 	if ((val & CR0_PG) != 0 && (val & CR0_PE) == 0) {
873 		/* PG requires PE */
874 		vm_inject_gp(svm_sc->vm, vcpu);
875 		return;
876 	}
877 	if ((state->cr0 & CR0_PG) == 0 && (val & CR0_PG) != 0) {
878 		/* When enabling paging, PAE must be enabled if LME is. */
879 		if ((state->efer & EFER_LME) != 0 &&
880 		    (state->cr4 & CR4_PAE) == 0) {
881 			vm_inject_gp(svm_sc->vm, vcpu);
882 			return;
883 		}
884 	}
885 
886 	svm_set_cr0(svm_sc, vcpu, val, true);
887 }
888 
889 static void
890 svm_inst_emul_other(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
891 {
892 	struct vie *vie;
893 	struct vm_guest_paging paging;
894 
895 	/* Let the instruction emulation (hopefully in-kernel) handle it */
896 	vmexit->exitcode = VM_EXITCODE_INST_EMUL;
897 	bzero(&vmexit->u.inst_emul, sizeof (vmexit->u.inst_emul));
898 	vie = vm_vie_ctx(svm_sc->vm, vcpu);
899 	svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &paging);
900 	vie_init_other(vie, &paging);
901 
902 	/* The instruction emulation will handle advancing %rip */
903 	vmexit->inst_length = 0;
904 }
905 
906 static void
907 svm_update_virqinfo(struct svm_softc *sc, int vcpu)
908 {
909 	struct vm *vm;
910 	struct vlapic *vlapic;
911 	struct vmcb_ctrl *ctrl;
912 
913 	vm = sc->vm;
914 	vlapic = vm_lapic(vm, vcpu);
915 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
916 
917 	/* Update %cr8 in the emulated vlapic */
918 	vlapic_set_cr8(vlapic, ctrl->v_tpr);
919 
920 	/* Virtual interrupt injection is not used. */
921 	KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid "
922 	    "v_intr_vector %d", __func__, ctrl->v_intr_vector));
923 }
924 
925 CTASSERT(VMCB_EVENTINJ_TYPE_INTR	== VM_INTINFO_HWINTR);
926 CTASSERT(VMCB_EVENTINJ_TYPE_NMI		== VM_INTINFO_NMI);
927 CTASSERT(VMCB_EVENTINJ_TYPE_EXCEPTION	== VM_INTINFO_HWEXCP);
928 CTASSERT(VMCB_EVENTINJ_TYPE_INTn	== VM_INTINFO_SWINTR);
929 CTASSERT(VMCB_EVENTINJ_EC_VALID		== VM_INTINFO_DEL_ERRCODE);
930 CTASSERT(VMCB_EVENTINJ_VALID		== VM_INTINFO_VALID);
931 
932 /*
933  * Store SVM-specific event injection info for later handling.  This depends on
934  * the bhyve-internal event definitions matching those in the VMCB, as ensured
935  * by the above CTASSERTs.
936  */
937 static void
938 svm_stash_intinfo(struct svm_softc *svm_sc, int vcpu, uint64_t intinfo)
939 {
940 	ASSERT(VMCB_EXITINTINFO_VALID(intinfo));
941 
942 	/*
943 	 * If stashing an NMI pending injection, ensure that it bears the
944 	 * correct vector which exit_intinfo expects.
945 	 */
946 	if (VM_INTINFO_TYPE(intinfo) == VM_INTINFO_NMI) {
947 		intinfo &= ~VM_INTINFO_MASK_VECTOR;
948 		intinfo |= IDT_NMI;
949 	}
950 
951 	VERIFY0(vm_exit_intinfo(svm_sc->vm, vcpu, intinfo));
952 }
953 
954 static void
955 svm_save_exitintinfo(struct svm_softc *svm_sc, int vcpu)
956 {
957 	struct vmcb_ctrl *ctrl  = svm_get_vmcb_ctrl(svm_sc, vcpu);
958 	uint64_t intinfo = ctrl->exitintinfo;
959 
960 	if (VMCB_EXITINTINFO_VALID(intinfo)) {
961 		/*
962 		 * If a #VMEXIT happened during event delivery then record the
963 		 * event that was being delivered.
964 		 */
965 		vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
966 
967 		svm_stash_intinfo(svm_sc, vcpu, intinfo);
968 	}
969 }
970 
971 static __inline int
972 vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
973 {
974 
975 	return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
976 	    VMCB_INTCPT_VINTR));
977 }
978 
979 static void
980 svm_enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
981 {
982 	struct vmcb_ctrl *ctrl;
983 	struct vmcb_state *state;
984 
985 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
986 	state = svm_get_vmcb_state(sc, vcpu);
987 
988 	if ((ctrl->v_irq & V_IRQ) != 0 && ctrl->v_intr_vector == 0) {
989 		KASSERT(ctrl->v_intr_prio & V_IGN_TPR,
990 		    ("%s: invalid v_ign_tpr", __func__));
991 		KASSERT(vintr_intercept_enabled(sc, vcpu),
992 		    ("%s: vintr intercept should be enabled", __func__));
993 		return;
994 	}
995 
996 	/*
997 	 * We use V_IRQ in conjunction with the VINTR intercept to trap into the
998 	 * hypervisor as soon as a virtual interrupt can be delivered.
999 	 *
1000 	 * Since injected events are not subject to intercept checks we need to
1001 	 * ensure that the V_IRQ is not actually going to be delivered on VM
1002 	 * entry.
1003 	 */
1004 	VERIFY((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 ||
1005 	    (state->rflags & PSL_I) == 0 || ctrl->intr_shadow);
1006 
1007 	ctrl->v_irq |= V_IRQ;
1008 	ctrl->v_intr_prio |= V_IGN_TPR;
1009 	ctrl->v_intr_vector = 0;
1010 	svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1011 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1012 }
1013 
1014 static void
1015 svm_disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
1016 {
1017 	struct vmcb_ctrl *ctrl;
1018 
1019 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1020 
1021 	if ((ctrl->v_irq & V_IRQ) == 0 && ctrl->v_intr_vector == 0) {
1022 		KASSERT(!vintr_intercept_enabled(sc, vcpu),
1023 		    ("%s: vintr intercept should be disabled", __func__));
1024 		return;
1025 	}
1026 
1027 	ctrl->v_irq &= ~V_IRQ;
1028 	ctrl->v_intr_vector = 0;
1029 	svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1030 	svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1031 }
1032 
1033 /*
1034  * Once an NMI is injected it blocks delivery of further NMIs until the handler
1035  * executes an IRET. The IRET intercept is enabled when an NMI is injected to
1036  * to track when the vcpu is done handling the NMI.
1037  */
1038 static int
1039 svm_nmi_blocked(struct svm_softc *sc, int vcpu)
1040 {
1041 	return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
1042 	    VMCB_INTCPT_IRET));
1043 }
1044 
1045 static void
1046 svm_clear_nmi_blocking(struct svm_softc *sc, int vcpu)
1047 {
1048 	struct vmcb_ctrl *ctrl;
1049 
1050 	KASSERT(svm_nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
1051 	/*
1052 	 * When the IRET intercept is cleared the vcpu will attempt to execute
1053 	 * the "iret" when it runs next. However, it is possible to inject
1054 	 * another NMI into the vcpu before the "iret" has actually executed.
1055 	 *
1056 	 * For e.g. if the "iret" encounters a #NPF when accessing the stack
1057 	 * it will trap back into the hypervisor. If an NMI is pending for
1058 	 * the vcpu it will be injected into the guest.
1059 	 *
1060 	 * XXX this needs to be fixed
1061 	 */
1062 	svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1063 
1064 	/*
1065 	 * Set an interrupt shadow to prevent an NMI from being immediately
1066 	 * injected on the next VMRUN.
1067 	 */
1068 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1069 	ctrl->intr_shadow = 1;
1070 }
1071 
1072 static void
1073 svm_inject_event(struct vmcb_ctrl *ctrl, uint64_t info)
1074 {
1075 	ASSERT(VM_INTINFO_PENDING(info));
1076 
1077 	uint8_t vector = VM_INTINFO_VECTOR(info);
1078 	uint32_t type = VM_INTINFO_TYPE(info);
1079 
1080 	/*
1081 	 * Correct behavior depends on bhyve intinfo event types lining up with
1082 	 * those defined by AMD for event injection in the VMCB.  The CTASSERTs
1083 	 * above svm_save_exitintinfo() ensure it.
1084 	 */
1085 	switch (type) {
1086 	case VM_INTINFO_NMI:
1087 		/* Ensure vector for injected event matches its type (NMI) */
1088 		vector = IDT_NMI;
1089 		break;
1090 	case VM_INTINFO_HWINTR:
1091 	case VM_INTINFO_SWINTR:
1092 		break;
1093 	case VM_INTINFO_HWEXCP:
1094 		if (vector == IDT_NMI) {
1095 			/*
1096 			 * NMIs are expected to be injected with
1097 			 * VMCB_EVENTINJ_TYPE_NMI, rather than as an exception
1098 			 * with the NMI vector.
1099 			 */
1100 			type = VM_INTINFO_NMI;
1101 		}
1102 		VERIFY(vector < 32);
1103 		break;
1104 	default:
1105 		/*
1106 		 * Since there is not strong validation for injected event types
1107 		 * at this point, fall back to software interrupt for those we
1108 		 * do not recognized.
1109 		 */
1110 		type = VM_INTINFO_SWINTR;
1111 		break;
1112 	}
1113 
1114 	ctrl->eventinj = VMCB_EVENTINJ_VALID | type | vector;
1115 	if (VM_INTINFO_HAS_ERRCODE(info)) {
1116 		ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
1117 		ctrl->eventinj |= (uint64_t)VM_INTINFO_ERRCODE(info) << 32;
1118 	}
1119 }
1120 
1121 static void
1122 svm_inject_nmi(struct svm_softc *sc, int vcpu)
1123 {
1124 	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1125 
1126 	ASSERT(!svm_nmi_blocked(sc, vcpu));
1127 
1128 	ctrl->eventinj = VMCB_EVENTINJ_VALID | VMCB_EVENTINJ_TYPE_NMI;
1129 	vm_nmi_clear(sc->vm, vcpu);
1130 
1131 	/*
1132 	 * Virtual NMI blocking is now in effect.
1133 	 *
1134 	 * Not only does this block a subsequent NMI injection from taking
1135 	 * place, it also configures an intercept on the IRET so we can track
1136 	 * when the next injection can take place.
1137 	 */
1138 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1139 }
1140 
1141 static void
1142 svm_inject_irq(struct svm_softc *sc, int vcpu, int vector)
1143 {
1144 	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1145 
1146 	ASSERT(vector >= 0 && vector <= 255);
1147 
1148 	ctrl->eventinj = VMCB_EVENTINJ_VALID | vector;
1149 }
1150 
1151 #define	EFER_MBZ_BITS	0xFFFFFFFFFFFF0200UL
1152 
1153 static vm_msr_result_t
1154 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval)
1155 {
1156 	struct vmcb_state *state = svm_get_vmcb_state(sc, vcpu);
1157 	uint64_t lma;
1158 	int error;
1159 
1160 	newval &= ~0xFE;		/* clear the Read-As-Zero (RAZ) bits */
1161 
1162 	if (newval & EFER_MBZ_BITS) {
1163 		return (VMR_GP);
1164 	}
1165 
1166 	/* APMv2 Table 14-5 "Long-Mode Consistency Checks" */
1167 	const uint64_t changed = state->efer ^ newval;
1168 	if (changed & EFER_LME) {
1169 		if (state->cr0 & CR0_PG) {
1170 			return (VMR_GP);
1171 		}
1172 	}
1173 
1174 	/* EFER.LMA = EFER.LME & CR0.PG */
1175 	if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) {
1176 		lma = EFER_LMA;
1177 	} else {
1178 		lma = 0;
1179 	}
1180 	if ((newval & EFER_LMA) != lma) {
1181 		return (VMR_GP);
1182 	}
1183 
1184 	if ((newval & EFER_NXE) != 0 &&
1185 	    !vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) {
1186 		return (VMR_GP);
1187 	}
1188 	if ((newval & EFER_FFXSR) != 0 &&
1189 	    !vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) {
1190 		return (VMR_GP);
1191 	}
1192 	if ((newval & EFER_TCE) != 0 &&
1193 	    !vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) {
1194 		return (VMR_GP);
1195 	}
1196 
1197 	/*
1198 	 * Until bhyve has proper support for long-mode segment limits, just
1199 	 * toss a #GP at the guest if they attempt to use it.
1200 	 */
1201 	if (newval & EFER_LMSLE) {
1202 		return (VMR_GP);
1203 	}
1204 
1205 	error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval);
1206 	VERIFY0(error);
1207 	return (VMR_OK);
1208 }
1209 
1210 static int
1211 svm_handle_msr(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit,
1212     bool is_wrmsr)
1213 {
1214 	struct vmcb_state *state = svm_get_vmcb_state(svm_sc, vcpu);
1215 	struct svm_regctx *ctx = svm_get_guest_regctx(svm_sc, vcpu);
1216 	const uint32_t ecx = ctx->sctx_rcx;
1217 	vm_msr_result_t res;
1218 	uint64_t val = 0;
1219 
1220 	if (is_wrmsr) {
1221 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1222 		val = ctx->sctx_rdx << 32 | (uint32_t)state->rax;
1223 
1224 		if (vlapic_owned_msr(ecx)) {
1225 			struct vlapic *vlapic = vm_lapic(svm_sc->vm, vcpu);
1226 
1227 			res = vlapic_wrmsr(vlapic, ecx, val);
1228 		} else if (ecx == MSR_EFER) {
1229 			res = svm_write_efer(svm_sc, vcpu, val);
1230 		} else {
1231 			res = svm_wrmsr(svm_sc, vcpu, ecx, val);
1232 		}
1233 	} else {
1234 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
1235 
1236 		if (vlapic_owned_msr(ecx)) {
1237 			struct vlapic *vlapic = vm_lapic(svm_sc->vm, vcpu);
1238 
1239 			res = vlapic_rdmsr(vlapic, ecx, &val);
1240 		} else {
1241 			res = svm_rdmsr(svm_sc, vcpu, ecx, &val);
1242 		}
1243 	}
1244 
1245 	switch (res) {
1246 	case VMR_OK:
1247 		/* Store rdmsr result in the appropriate registers */
1248 		if (!is_wrmsr) {
1249 			state->rax = (uint32_t)val;
1250 			ctx->sctx_rdx = val >> 32;
1251 		}
1252 		return (1);
1253 	case VMR_GP:
1254 		vm_inject_gp(svm_sc->vm, vcpu);
1255 		return (1);
1256 	case VMR_UNHANLDED:
1257 		vmexit->exitcode = is_wrmsr ?
1258 		    VM_EXITCODE_WRMSR : VM_EXITCODE_RDMSR;
1259 		vmexit->u.msr.code = ecx;
1260 		vmexit->u.msr.wval = val;
1261 		return (0);
1262 	default:
1263 		panic("unexpected msr result %u\n", res);
1264 	}
1265 }
1266 
1267 /*
1268  * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs
1269  * that are due to instruction intercepts as well as MSR and IOIO intercepts
1270  * and exceptions caused by INT3, INTO and BOUND instructions.
1271  *
1272  * Return 1 if the nRIP is valid and 0 otherwise.
1273  */
1274 static int
1275 nrip_valid(uint64_t exitcode)
1276 {
1277 	switch (exitcode) {
1278 	case 0x00 ... 0x0F:	/* read of CR0 through CR15 */
1279 	case 0x10 ... 0x1F:	/* write of CR0 through CR15 */
1280 	case 0x20 ... 0x2F:	/* read of DR0 through DR15 */
1281 	case 0x30 ... 0x3F:	/* write of DR0 through DR15 */
1282 	case 0x43:		/* INT3 */
1283 	case 0x44:		/* INTO */
1284 	case 0x45:		/* BOUND */
1285 	case 0x65 ... 0x7C:	/* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */
1286 	case 0x80 ... 0x8D:	/* VMEXIT_VMRUN ... VMEXIT_XSETBV */
1287 		return (1);
1288 	default:
1289 		return (0);
1290 	}
1291 }
1292 
1293 static int
1294 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
1295 {
1296 	struct vmcb *vmcb;
1297 	struct vmcb_state *state;
1298 	struct vmcb_ctrl *ctrl;
1299 	struct svm_regctx *ctx;
1300 	uint64_t code, info1, info2;
1301 	int handled;
1302 
1303 	ctx = svm_get_guest_regctx(svm_sc, vcpu);
1304 	vmcb = svm_get_vmcb(svm_sc, vcpu);
1305 	state = &vmcb->state;
1306 	ctrl = &vmcb->ctrl;
1307 
1308 	handled = 0;
1309 	code = ctrl->exitcode;
1310 	info1 = ctrl->exitinfo1;
1311 	info2 = ctrl->exitinfo2;
1312 
1313 	vmexit->exitcode = VM_EXITCODE_BOGUS;
1314 	vmexit->rip = state->rip;
1315 	vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
1316 
1317 	vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
1318 
1319 	/*
1320 	 * #VMEXIT(INVALID) needs to be handled early because the VMCB is
1321 	 * in an inconsistent state and can trigger assertions that would
1322 	 * never happen otherwise.
1323 	 */
1324 	if (code == VMCB_EXIT_INVALID) {
1325 		vm_exit_svm(vmexit, code, info1, info2);
1326 		return (0);
1327 	}
1328 
1329 	KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
1330 	    "injection valid bit is set %lx", __func__, ctrl->eventinj));
1331 
1332 	KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15,
1333 	    ("invalid inst_length %d: code (%lx), info1 (%lx), info2 (%lx)",
1334 	    vmexit->inst_length, code, info1, info2));
1335 
1336 	svm_update_virqinfo(svm_sc, vcpu);
1337 	svm_save_exitintinfo(svm_sc, vcpu);
1338 
1339 	switch (code) {
1340 	case VMCB_EXIT_CR0_READ:
1341 		if (VMCB_CRx_INFO1_VALID(info1) != 0) {
1342 			svm_handle_cr0_read(svm_sc, vcpu,
1343 			    vie_regnum_map(VMCB_CRx_INFO1_GPR(info1)));
1344 			handled = 1;
1345 		} else {
1346 			/*
1347 			 * If SMSW is used to read the contents of %cr0, then
1348 			 * the VALID bit will not be set in `info1`, since the
1349 			 * handling is different from the mov-to-reg case.
1350 			 *
1351 			 * Punt to the instruction emulation to handle it.
1352 			 */
1353 			svm_inst_emul_other(svm_sc, vcpu, vmexit);
1354 		}
1355 		break;
1356 	case VMCB_EXIT_CR0_WRITE:
1357 	case VMCB_EXIT_CR0_SEL_WRITE:
1358 		if (VMCB_CRx_INFO1_VALID(info1) != 0) {
1359 			svm_handle_cr0_write(svm_sc, vcpu,
1360 			    vie_regnum_map(VMCB_CRx_INFO1_GPR(info1)));
1361 			handled = 1;
1362 		} else {
1363 			/*
1364 			 * Writes to %cr0 without VALID being set in `info1` are
1365 			 * initiated by the LMSW and CLTS instructions.  While
1366 			 * LMSW (like SMSW) sees little use in modern OSes and
1367 			 * bootloaders, CLTS is still used for handling FPU
1368 			 * state transitions.
1369 			 *
1370 			 * Punt to the instruction emulation to handle them.
1371 			 */
1372 			svm_inst_emul_other(svm_sc, vcpu, vmexit);
1373 		}
1374 		break;
1375 	case VMCB_EXIT_IRET:
1376 		/*
1377 		 * Restart execution at "iret" but with the intercept cleared.
1378 		 */
1379 		vmexit->inst_length = 0;
1380 		svm_clear_nmi_blocking(svm_sc, vcpu);
1381 		handled = 1;
1382 		break;
1383 	case VMCB_EXIT_VINTR:	/* interrupt window exiting */
1384 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
1385 		svm_disable_intr_window_exiting(svm_sc, vcpu);
1386 		handled = 1;
1387 		break;
1388 	case VMCB_EXIT_INTR:	/* external interrupt */
1389 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
1390 		handled = 1;
1391 		break;
1392 	case VMCB_EXIT_NMI:
1393 	case VMCB_EXIT_SMI:
1394 	case VMCB_EXIT_INIT:
1395 		/*
1396 		 * For external NMI/SMI and physical INIT interrupts, simply
1397 		 * continue execution, as those host events will be handled by
1398 		 * the physical CPU.
1399 		 */
1400 		handled = 1;
1401 		break;
1402 	case VMCB_EXIT_EXCP0 ... VMCB_EXIT_EXCP31: {
1403 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1404 
1405 		const uint8_t idtvec = code - VMCB_EXIT_EXCP0;
1406 		uint32_t errcode = 0;
1407 		bool reflect = true;
1408 		bool errcode_valid = false;
1409 
1410 		switch (idtvec) {
1411 		case IDT_MC:
1412 			/* The host will handle the MCE itself. */
1413 			reflect = false;
1414 			vmm_call_trap(T_MCE);
1415 			break;
1416 		case IDT_PF:
1417 			VERIFY0(svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
1418 			    info2));
1419 			/* fallthru */
1420 		case IDT_NP:
1421 		case IDT_SS:
1422 		case IDT_GP:
1423 		case IDT_AC:
1424 		case IDT_TS:
1425 			errcode_valid = true;
1426 			errcode = info1;
1427 			break;
1428 
1429 		case IDT_DF:
1430 			errcode_valid = true;
1431 			break;
1432 
1433 		case IDT_BP:
1434 		case IDT_OF:
1435 		case IDT_BR:
1436 			/*
1437 			 * The 'nrip' field is populated for INT3, INTO and
1438 			 * BOUND exceptions and this also implies that
1439 			 * 'inst_length' is non-zero.
1440 			 *
1441 			 * Reset 'inst_length' to zero so the guest %rip at
1442 			 * event injection is identical to what it was when
1443 			 * the exception originally happened.
1444 			 */
1445 			vmexit->inst_length = 0;
1446 			/* fallthru */
1447 		default:
1448 			errcode_valid = false;
1449 			break;
1450 		}
1451 		VERIFY0(vmexit->inst_length);
1452 
1453 		if (reflect) {
1454 			/* Reflect the exception back into the guest */
1455 			VERIFY0(vm_inject_exception(svm_sc->vm, vcpu, idtvec,
1456 			    errcode_valid, errcode, false));
1457 		}
1458 		handled = 1;
1459 		break;
1460 		}
1461 	case VMCB_EXIT_MSR:
1462 		handled = svm_handle_msr(svm_sc, vcpu, vmexit, info1 != 0);
1463 		break;
1464 	case VMCB_EXIT_IO:
1465 		handled = svm_handle_inout(svm_sc, vcpu, vmexit);
1466 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
1467 		break;
1468 	case VMCB_EXIT_SHUTDOWN:
1469 		(void) vm_suspend(svm_sc->vm, VM_SUSPEND_TRIPLEFAULT);
1470 		handled = 1;
1471 		break;
1472 	case VMCB_EXIT_INVLPGA:
1473 		/* privileged invalidation instructions */
1474 		vm_inject_ud(svm_sc->vm, vcpu);
1475 		handled = 1;
1476 		break;
1477 	case VMCB_EXIT_VMRUN:
1478 	case VMCB_EXIT_VMLOAD:
1479 	case VMCB_EXIT_VMSAVE:
1480 	case VMCB_EXIT_STGI:
1481 	case VMCB_EXIT_CLGI:
1482 	case VMCB_EXIT_SKINIT:
1483 		/* privileged vmm instructions */
1484 		vm_inject_ud(svm_sc->vm, vcpu);
1485 		handled = 1;
1486 		break;
1487 	case VMCB_EXIT_INVD:
1488 	case VMCB_EXIT_WBINVD:
1489 		/* ignore exit */
1490 		handled = 1;
1491 		break;
1492 	case VMCB_EXIT_VMMCALL:
1493 		/* No handlers make use of VMMCALL for now */
1494 		vm_inject_ud(svm_sc->vm, vcpu);
1495 		handled = 1;
1496 		break;
1497 	case VMCB_EXIT_CPUID:
1498 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
1499 		vcpu_emulate_cpuid(svm_sc->vm, vcpu, &state->rax,
1500 		    &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx);
1501 		handled = 1;
1502 		break;
1503 	case VMCB_EXIT_HLT:
1504 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
1505 		vmexit->exitcode = VM_EXITCODE_HLT;
1506 		vmexit->u.hlt.rflags = state->rflags;
1507 		break;
1508 	case VMCB_EXIT_PAUSE:
1509 		vmexit->exitcode = VM_EXITCODE_PAUSE;
1510 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
1511 		break;
1512 	case VMCB_EXIT_NPF:
1513 		/* EXITINFO2 contains the faulting guest physical address */
1514 		if (info1 & VMCB_NPF_INFO1_RSV) {
1515 			/* nested fault with reserved bits set */
1516 		} else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) {
1517 			vmexit->exitcode = VM_EXITCODE_PAGING;
1518 			vmexit->u.paging.gpa = info2;
1519 			vmexit->u.paging.fault_type = npf_fault_type(info1);
1520 			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
1521 		} else if (svm_npf_emul_fault(info1)) {
1522 			svm_handle_mmio_emul(svm_sc, vcpu, vmexit, info2);
1523 			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MMIO_EMUL, 1);
1524 		}
1525 		break;
1526 	case VMCB_EXIT_MONITOR:
1527 		vmexit->exitcode = VM_EXITCODE_MONITOR;
1528 		break;
1529 	case VMCB_EXIT_MWAIT:
1530 		vmexit->exitcode = VM_EXITCODE_MWAIT;
1531 		break;
1532 	default:
1533 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
1534 		break;
1535 	}
1536 
1537 	DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, vmexit->rip, uint32_t,
1538 	    code);
1539 
1540 	if (handled) {
1541 		vmexit->rip += vmexit->inst_length;
1542 		vmexit->inst_length = 0;
1543 		state->rip = vmexit->rip;
1544 	} else {
1545 		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1546 			/*
1547 			 * If this VM exit was not claimed by anybody then
1548 			 * treat it as a generic SVM exit.
1549 			 */
1550 			vm_exit_svm(vmexit, code, info1, info2);
1551 		} else {
1552 			/*
1553 			 * The exitcode and collateral have been populated.
1554 			 * The VM exit will be processed further in userland.
1555 			 */
1556 		}
1557 	}
1558 	return (handled);
1559 }
1560 
1561 /*
1562  * Inject exceptions, NMIs, and ExtINTs.
1563  *
1564  * The logic behind these are complicated and may involve mutex contention, so
1565  * the injection is performed without the protection of host CPU interrupts
1566  * being disabled.  This means a racing notification could be "lost",
1567  * necessitating a later call to svm_inject_recheck() to close that window
1568  * of opportunity.
1569  */
1570 static enum event_inject_state
1571 svm_inject_events(struct svm_softc *sc, int vcpu)
1572 {
1573 	struct vmcb_ctrl *ctrl;
1574 	struct vmcb_state *state;
1575 	struct svm_vcpu *vcpustate;
1576 	uint64_t intinfo;
1577 	enum event_inject_state ev_state;
1578 
1579 	state = svm_get_vmcb_state(sc, vcpu);
1580 	ctrl  = svm_get_vmcb_ctrl(sc, vcpu);
1581 	vcpustate = svm_get_vcpu(sc, vcpu);
1582 	ev_state = EIS_CAN_INJECT;
1583 
1584 	/* Clear any interrupt shadow if guest %rip has changed */
1585 	if (vcpustate->nextrip != state->rip) {
1586 		ctrl->intr_shadow = 0;
1587 	}
1588 
1589 	/*
1590 	 * An event is already pending for injection.  This can occur when the
1591 	 * vCPU exits prior to VM entry (like for an AST).
1592 	 */
1593 	if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1594 		return (EIS_EV_EXISTING | EIS_REQ_EXIT);
1595 	}
1596 
1597 	/*
1598 	 * Inject pending events or exceptions for this vcpu.
1599 	 *
1600 	 * An event might be pending because the previous #VMEXIT happened
1601 	 * during event delivery (i.e. ctrl->exitintinfo).
1602 	 *
1603 	 * An event might also be pending because an exception was injected
1604 	 * by the hypervisor (e.g. #PF during instruction emulation).
1605 	 */
1606 	if (vm_entry_intinfo(sc->vm, vcpu, &intinfo)) {
1607 		svm_inject_event(ctrl, intinfo);
1608 		vmm_stat_incr(sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
1609 		ev_state = EIS_EV_INJECTED;
1610 	}
1611 
1612 	/* NMI event has priority over interrupts. */
1613 	if (vm_nmi_pending(sc->vm, vcpu) && !svm_nmi_blocked(sc, vcpu)) {
1614 		if (ev_state == EIS_CAN_INJECT) {
1615 			/* Can't inject NMI if vcpu is in an intr_shadow. */
1616 			if (ctrl->intr_shadow) {
1617 				return (EIS_GI_BLOCK);
1618 			}
1619 
1620 			svm_inject_nmi(sc, vcpu);
1621 			ev_state = EIS_EV_INJECTED;
1622 		} else {
1623 			return (ev_state | EIS_REQ_EXIT);
1624 		}
1625 	}
1626 
1627 	if (vm_extint_pending(sc->vm, vcpu)) {
1628 		int vector;
1629 
1630 		if (ev_state != EIS_CAN_INJECT) {
1631 			return (ev_state | EIS_REQ_EXIT);
1632 		}
1633 
1634 		/*
1635 		 * If the guest has disabled interrupts or is in an interrupt
1636 		 * shadow then we cannot inject the pending interrupt.
1637 		 */
1638 		if ((state->rflags & PSL_I) == 0 || ctrl->intr_shadow) {
1639 			return (EIS_GI_BLOCK);
1640 		}
1641 
1642 		/* Ask the legacy pic for a vector to inject */
1643 		vatpic_pending_intr(sc->vm, &vector);
1644 		KASSERT(vector >= 0 && vector <= 255,
1645 		    ("invalid vector %d from INTR", vector));
1646 
1647 		svm_inject_irq(sc, vcpu, vector);
1648 		vm_extint_clear(sc->vm, vcpu);
1649 		vatpic_intr_accepted(sc->vm, vector);
1650 		ev_state = EIS_EV_INJECTED;
1651 	}
1652 
1653 	return (ev_state);
1654 }
1655 
1656 /*
1657  * Synchronize vLAPIC state and inject any interrupts pending on it.
1658  *
1659  * This is done with host CPU interrupts disabled so notification IPIs will be
1660  * queued on the host APIC and recognized when entering SVM guest context.
1661  */
1662 static enum event_inject_state
1663 svm_inject_vlapic(struct svm_softc *sc, int vcpu, struct vlapic *vlapic,
1664     enum event_inject_state ev_state)
1665 {
1666 	struct vmcb_ctrl *ctrl;
1667 	struct vmcb_state *state;
1668 	int vector;
1669 	uint8_t v_tpr;
1670 
1671 	state = svm_get_vmcb_state(sc, vcpu);
1672 	ctrl  = svm_get_vmcb_ctrl(sc, vcpu);
1673 
1674 	/*
1675 	 * The guest can modify the TPR by writing to %cr8. In guest mode the
1676 	 * CPU reflects this write to V_TPR without hypervisor intervention.
1677 	 *
1678 	 * The guest can also modify the TPR by writing to it via the memory
1679 	 * mapped APIC page. In this case, the write will be emulated by the
1680 	 * hypervisor. For this reason V_TPR must be updated before every
1681 	 * VMRUN.
1682 	 */
1683 	v_tpr = vlapic_get_cr8(vlapic);
1684 	KASSERT(v_tpr <= 15, ("invalid v_tpr %x", v_tpr));
1685 	if (ctrl->v_tpr != v_tpr) {
1686 		ctrl->v_tpr = v_tpr;
1687 		svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1688 	}
1689 
1690 	/* If an event cannot otherwise be injected, we are done for now */
1691 	if (ev_state != EIS_CAN_INJECT) {
1692 		return (ev_state);
1693 	}
1694 
1695 	if (!vlapic_pending_intr(vlapic, &vector)) {
1696 		return (EIS_CAN_INJECT);
1697 	}
1698 	KASSERT(vector >= 16 && vector <= 255,
1699 	    ("invalid vector %d from local APIC", vector));
1700 
1701 	/*
1702 	 * If the guest has disabled interrupts or is in an interrupt shadow
1703 	 * then we cannot inject the pending interrupt.
1704 	 */
1705 	if ((state->rflags & PSL_I) == 0 || ctrl->intr_shadow) {
1706 		return (EIS_GI_BLOCK);
1707 	}
1708 
1709 	svm_inject_irq(sc, vcpu, vector);
1710 	vlapic_intr_accepted(vlapic, vector);
1711 	return (EIS_EV_INJECTED);
1712 }
1713 
1714 /*
1715  * Re-check for events to be injected.
1716  *
1717  * Once host CPU interrupts are disabled, check for the presence of any events
1718  * which require injection processing.  If an exit is required upon injection,
1719  * or once the guest becomes interruptable, that will be configured too.
1720  */
1721 static bool
1722 svm_inject_recheck(struct svm_softc *sc, int vcpu,
1723     enum event_inject_state ev_state)
1724 {
1725 	struct vmcb_ctrl *ctrl;
1726 
1727 	ctrl  = svm_get_vmcb_ctrl(sc, vcpu);
1728 
1729 	if (ev_state == EIS_CAN_INJECT) {
1730 		/*
1731 		 * An active interrupt shadow would preclude us from injecting
1732 		 * any events picked up during a re-check.
1733 		 */
1734 		if (ctrl->intr_shadow != 0) {
1735 			return (false);
1736 		}
1737 
1738 		if (vm_nmi_pending(sc->vm, vcpu) &&
1739 		    !svm_nmi_blocked(sc, vcpu)) {
1740 			/* queued NMI not blocked by NMI-window-exiting */
1741 			return (true);
1742 		}
1743 		if (vm_extint_pending(sc->vm, vcpu)) {
1744 			/* queued ExtINT not blocked by existing injection */
1745 			return (true);
1746 		}
1747 	} else {
1748 		if ((ev_state & EIS_REQ_EXIT) != 0) {
1749 			/*
1750 			 * Use a self-IPI to force an immediate exit after
1751 			 * event injection has occurred.
1752 			 */
1753 			poke_cpu(CPU->cpu_id);
1754 		} else {
1755 			/*
1756 			 * If any event is being injected, an exit immediately
1757 			 * upon becoming interruptable again will allow pending
1758 			 * or newly queued events to be injected in a timely
1759 			 * manner.
1760 			 */
1761 			svm_enable_intr_window_exiting(sc, vcpu);
1762 		}
1763 	}
1764 	return (false);
1765 }
1766 
1767 
1768 static void
1769 check_asid(struct svm_softc *sc, int vcpuid, uint_t thiscpu, uint64_t nptgen)
1770 {
1771 	struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpuid);
1772 	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
1773 	uint8_t flush;
1774 
1775 	flush = hma_svm_asid_update(&vcpustate->hma_asid, has_flush_by_asid(),
1776 	    vcpustate->nptgen != nptgen);
1777 
1778 	if (flush != VMCB_TLB_FLUSH_NOTHING) {
1779 		ctrl->asid = vcpustate->hma_asid.hsa_asid;
1780 		svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
1781 	}
1782 	ctrl->tlb_ctrl = flush;
1783 	vcpustate->nptgen = nptgen;
1784 }
1785 
1786 static void
1787 flush_asid(struct svm_softc *sc, int vcpuid)
1788 {
1789 	struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpuid);
1790 	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
1791 	uint8_t flush;
1792 
1793 	flush = hma_svm_asid_update(&vcpustate->hma_asid, has_flush_by_asid(),
1794 	    true);
1795 
1796 	ASSERT(flush != VMCB_TLB_FLUSH_NOTHING);
1797 	ctrl->asid = vcpustate->hma_asid.hsa_asid;
1798 	ctrl->tlb_ctrl = flush;
1799 	svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
1800 	/*
1801 	 * A potential future optimization: We could choose to update the nptgen
1802 	 * associated with the vCPU, since any pending nptgen change requiring a
1803 	 * flush will be satisfied by the one which has just now been queued.
1804 	 */
1805 }
1806 
1807 static __inline void
1808 disable_gintr(void)
1809 {
1810 	__asm __volatile("clgi");
1811 }
1812 
1813 static __inline void
1814 enable_gintr(void)
1815 {
1816 	__asm __volatile("stgi");
1817 }
1818 
1819 static __inline void
1820 svm_dr_enter_guest(struct svm_regctx *gctx)
1821 {
1822 
1823 	/* Save host control debug registers. */
1824 	gctx->host_dr7 = rdr7();
1825 	gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
1826 
1827 	/*
1828 	 * Disable debugging in DR7 and DEBUGCTL to avoid triggering
1829 	 * exceptions in the host based on the guest DRx values.  The
1830 	 * guest DR6, DR7, and DEBUGCTL are saved/restored in the
1831 	 * VMCB.
1832 	 */
1833 	load_dr7(0);
1834 	wrmsr(MSR_DEBUGCTLMSR, 0);
1835 
1836 	/* Save host debug registers. */
1837 	gctx->host_dr0 = rdr0();
1838 	gctx->host_dr1 = rdr1();
1839 	gctx->host_dr2 = rdr2();
1840 	gctx->host_dr3 = rdr3();
1841 	gctx->host_dr6 = rdr6();
1842 
1843 	/* Restore guest debug registers. */
1844 	load_dr0(gctx->sctx_dr0);
1845 	load_dr1(gctx->sctx_dr1);
1846 	load_dr2(gctx->sctx_dr2);
1847 	load_dr3(gctx->sctx_dr3);
1848 }
1849 
1850 static __inline void
1851 svm_dr_leave_guest(struct svm_regctx *gctx)
1852 {
1853 
1854 	/* Save guest debug registers. */
1855 	gctx->sctx_dr0 = rdr0();
1856 	gctx->sctx_dr1 = rdr1();
1857 	gctx->sctx_dr2 = rdr2();
1858 	gctx->sctx_dr3 = rdr3();
1859 
1860 	/*
1861 	 * Restore host debug registers.  Restore DR7 and DEBUGCTL
1862 	 * last.
1863 	 */
1864 	load_dr0(gctx->host_dr0);
1865 	load_dr1(gctx->host_dr1);
1866 	load_dr2(gctx->host_dr2);
1867 	load_dr3(gctx->host_dr3);
1868 	load_dr6(gctx->host_dr6);
1869 	wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl);
1870 	load_dr7(gctx->host_dr7);
1871 }
1872 
1873 /*
1874  * Apply the TSC offset for a vCPU, including physical CPU and per-vCPU offsets.
1875  */
1876 static void
1877 svm_apply_tsc_adjust(struct svm_softc *svm_sc, int vcpuid)
1878 {
1879 	const uint64_t offset = vcpu_tsc_offset(svm_sc->vm, vcpuid, true);
1880 	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(svm_sc, vcpuid);
1881 
1882 	if (ctrl->tsc_offset != offset) {
1883 		ctrl->tsc_offset = offset;
1884 		svm_set_dirty(svm_sc, vcpuid, VMCB_CACHE_I);
1885 	}
1886 }
1887 
1888 /*
1889  * Start vcpu with specified RIP.
1890  */
1891 static int
1892 svm_vmrun(void *arg, int vcpu, uint64_t rip)
1893 {
1894 	struct svm_regctx *gctx;
1895 	struct svm_softc *svm_sc;
1896 	struct svm_vcpu *vcpustate;
1897 	struct vmcb_state *state;
1898 	struct vmcb_ctrl *ctrl;
1899 	struct vm_exit *vmexit;
1900 	struct vlapic *vlapic;
1901 	vm_client_t *vmc;
1902 	struct vm *vm;
1903 	uint64_t vmcb_pa;
1904 	int handled;
1905 	uint16_t ldt_sel;
1906 
1907 	svm_sc = arg;
1908 	vm = svm_sc->vm;
1909 
1910 	vcpustate = svm_get_vcpu(svm_sc, vcpu);
1911 	state = svm_get_vmcb_state(svm_sc, vcpu);
1912 	ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1913 	vmexit = vm_exitinfo(vm, vcpu);
1914 	vlapic = vm_lapic(vm, vcpu);
1915 	vmc = vm_get_vmclient(vm, vcpu);
1916 
1917 	gctx = svm_get_guest_regctx(svm_sc, vcpu);
1918 	vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1919 
1920 	if (vcpustate->lastcpu != curcpu) {
1921 		/*
1922 		 * Force new ASID allocation by invalidating the generation.
1923 		 */
1924 		vcpustate->hma_asid.hsa_gen = 0;
1925 
1926 		/*
1927 		 * Invalidate the VMCB state cache by marking all fields dirty.
1928 		 */
1929 		svm_set_dirty(svm_sc, vcpu, 0xffffffff);
1930 
1931 		/*
1932 		 * XXX
1933 		 * Setting 'vcpustate->lastcpu' here is bit premature because
1934 		 * we may return from this function without actually executing
1935 		 * the VMRUN  instruction. This could happen if an AST or yield
1936 		 * condition is pending on the first time through the loop.
1937 		 *
1938 		 * This works for now but any new side-effects of vcpu
1939 		 * migration should take this case into account.
1940 		 */
1941 		vcpustate->lastcpu = curcpu;
1942 		vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1943 	}
1944 
1945 	svm_apply_tsc_adjust(svm_sc, vcpu);
1946 
1947 	svm_msr_guest_enter(svm_sc, vcpu);
1948 
1949 	VERIFY(!vcpustate->loaded && curthread->t_preempt != 0);
1950 	vcpustate->loaded = B_TRUE;
1951 
1952 	/* Update Guest RIP */
1953 	state->rip = rip;
1954 
1955 	do {
1956 		enum event_inject_state inject_state;
1957 		uint64_t nptgen;
1958 
1959 		/*
1960 		 * Initial event injection is complex and may involve mutex
1961 		 * contention, so it must be performed with global interrupts
1962 		 * still enabled.
1963 		 */
1964 		inject_state = svm_inject_events(svm_sc, vcpu);
1965 		handled = 0;
1966 
1967 		/*
1968 		 * Disable global interrupts to guarantee atomicity during
1969 		 * loading of guest state. This includes not only the state
1970 		 * loaded by the "vmrun" instruction but also software state
1971 		 * maintained by the hypervisor: suspended and rendezvous
1972 		 * state, NPT generation number, vlapic interrupts etc.
1973 		 */
1974 		disable_gintr();
1975 
1976 		/*
1977 		 * Synchronizing and injecting vlapic state is lock-free and is
1978 		 * safe (and prudent) to perform with interrupts disabled.
1979 		 */
1980 		inject_state = svm_inject_vlapic(svm_sc, vcpu, vlapic,
1981 		    inject_state);
1982 
1983 		/*
1984 		 * Check for vCPU bail-out conditions.  This must be done after
1985 		 * svm_inject_events() to detect a triple-fault condition.
1986 		 */
1987 		if (vcpu_entry_bailout_checks(vm, vcpu, state->rip)) {
1988 			enable_gintr();
1989 			break;
1990 		}
1991 
1992 		if (vcpu_run_state_pending(vm, vcpu)) {
1993 			enable_gintr();
1994 			vm_exit_run_state(vm, vcpu, state->rip);
1995 			break;
1996 		}
1997 
1998 		/*
1999 		 * If subsequent activity queued events which require injection
2000 		 * handling, take another lap to handle them.
2001 		 */
2002 		if (svm_inject_recheck(svm_sc, vcpu, inject_state)) {
2003 			enable_gintr();
2004 			handled = 1;
2005 			continue;
2006 		}
2007 
2008 		/*
2009 		 * #VMEXIT resumes the host with the guest LDTR, so
2010 		 * save the current LDT selector so it can be restored
2011 		 * after an exit.  The userspace hypervisor probably
2012 		 * doesn't use a LDT, but save and restore it to be
2013 		 * safe.
2014 		 */
2015 		ldt_sel = sldt();
2016 
2017 		/*
2018 		 * Check the vmspace and ASID generations to ensure that the
2019 		 * vcpu does not use stale TLB mappings.
2020 		 */
2021 		nptgen = vmc_table_enter(vmc);
2022 		check_asid(svm_sc, vcpu, curcpu, nptgen);
2023 
2024 		ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
2025 		vcpustate->dirty = 0;
2026 
2027 		/* Launch Virtual Machine. */
2028 		vcpu_ustate_change(vm, vcpu, VU_RUN);
2029 		svm_dr_enter_guest(gctx);
2030 		svm_launch(vmcb_pa, gctx, get_pcpu());
2031 		svm_dr_leave_guest(gctx);
2032 		vcpu_ustate_change(vm, vcpu, VU_EMU_KERN);
2033 
2034 		/* Restore host LDTR. */
2035 		lldt(ldt_sel);
2036 
2037 		/* #VMEXIT disables interrupts so re-enable them here. */
2038 		enable_gintr();
2039 
2040 		vmc_table_exit(vmc);
2041 
2042 		/* Update 'nextrip' */
2043 		vcpustate->nextrip = state->rip;
2044 
2045 		/* Handle #VMEXIT and if required return to user space. */
2046 		handled = svm_vmexit(svm_sc, vcpu, vmexit);
2047 	} while (handled);
2048 
2049 	svm_msr_guest_exit(svm_sc, vcpu);
2050 
2051 	VERIFY(vcpustate->loaded && curthread->t_preempt != 0);
2052 	vcpustate->loaded = B_FALSE;
2053 
2054 	return (0);
2055 }
2056 
2057 static void
2058 svm_vmcleanup(void *arg)
2059 {
2060 	struct svm_softc *sc = arg;
2061 
2062 	vmm_contig_free(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE);
2063 	vmm_contig_free(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE);
2064 	kmem_free(sc, sizeof (*sc));
2065 }
2066 
2067 static uint64_t *
2068 swctx_regptr(struct svm_regctx *regctx, int reg)
2069 {
2070 	switch (reg) {
2071 	case VM_REG_GUEST_RBX:
2072 		return (&regctx->sctx_rbx);
2073 	case VM_REG_GUEST_RCX:
2074 		return (&regctx->sctx_rcx);
2075 	case VM_REG_GUEST_RDX:
2076 		return (&regctx->sctx_rdx);
2077 	case VM_REG_GUEST_RDI:
2078 		return (&regctx->sctx_rdi);
2079 	case VM_REG_GUEST_RSI:
2080 		return (&regctx->sctx_rsi);
2081 	case VM_REG_GUEST_RBP:
2082 		return (&regctx->sctx_rbp);
2083 	case VM_REG_GUEST_R8:
2084 		return (&regctx->sctx_r8);
2085 	case VM_REG_GUEST_R9:
2086 		return (&regctx->sctx_r9);
2087 	case VM_REG_GUEST_R10:
2088 		return (&regctx->sctx_r10);
2089 	case VM_REG_GUEST_R11:
2090 		return (&regctx->sctx_r11);
2091 	case VM_REG_GUEST_R12:
2092 		return (&regctx->sctx_r12);
2093 	case VM_REG_GUEST_R13:
2094 		return (&regctx->sctx_r13);
2095 	case VM_REG_GUEST_R14:
2096 		return (&regctx->sctx_r14);
2097 	case VM_REG_GUEST_R15:
2098 		return (&regctx->sctx_r15);
2099 	case VM_REG_GUEST_DR0:
2100 		return (&regctx->sctx_dr0);
2101 	case VM_REG_GUEST_DR1:
2102 		return (&regctx->sctx_dr1);
2103 	case VM_REG_GUEST_DR2:
2104 		return (&regctx->sctx_dr2);
2105 	case VM_REG_GUEST_DR3:
2106 		return (&regctx->sctx_dr3);
2107 	default:
2108 		return (NULL);
2109 	}
2110 }
2111 
2112 static int
2113 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
2114 {
2115 	struct svm_softc *sc;
2116 	struct vmcb *vmcb;
2117 	uint64_t *regp;
2118 	uint64_t *fieldp;
2119 	struct vmcb_segment *seg;
2120 
2121 	sc = arg;
2122 	vmcb = svm_get_vmcb(sc, vcpu);
2123 
2124 	regp = swctx_regptr(svm_get_guest_regctx(sc, vcpu), ident);
2125 	if (regp != NULL) {
2126 		*val = *regp;
2127 		return (0);
2128 	}
2129 
2130 	switch (ident) {
2131 	case VM_REG_GUEST_INTR_SHADOW:
2132 		*val = (vmcb->ctrl.intr_shadow != 0) ? 1 : 0;
2133 		break;
2134 
2135 	case VM_REG_GUEST_CR0:
2136 		svm_get_cr0(sc, vcpu, val);
2137 		break;
2138 	case VM_REG_GUEST_CR2:
2139 	case VM_REG_GUEST_CR3:
2140 	case VM_REG_GUEST_CR4:
2141 	case VM_REG_GUEST_DR6:
2142 	case VM_REG_GUEST_DR7:
2143 	case VM_REG_GUEST_EFER:
2144 	case VM_REG_GUEST_RAX:
2145 	case VM_REG_GUEST_RFLAGS:
2146 	case VM_REG_GUEST_RIP:
2147 	case VM_REG_GUEST_RSP:
2148 		fieldp = vmcb_regptr(vmcb, ident, NULL);
2149 		*val = *fieldp;
2150 		break;
2151 
2152 	case VM_REG_GUEST_CS:
2153 	case VM_REG_GUEST_DS:
2154 	case VM_REG_GUEST_ES:
2155 	case VM_REG_GUEST_FS:
2156 	case VM_REG_GUEST_GS:
2157 	case VM_REG_GUEST_SS:
2158 	case VM_REG_GUEST_LDTR:
2159 	case VM_REG_GUEST_TR:
2160 		seg = vmcb_segptr(vmcb, ident);
2161 		*val = seg->selector;
2162 		break;
2163 
2164 	case VM_REG_GUEST_GDTR:
2165 	case VM_REG_GUEST_IDTR:
2166 		/* GDTR and IDTR don't have segment selectors */
2167 		return (EINVAL);
2168 
2169 	case VM_REG_GUEST_PDPTE0:
2170 	case VM_REG_GUEST_PDPTE1:
2171 	case VM_REG_GUEST_PDPTE2:
2172 	case VM_REG_GUEST_PDPTE3:
2173 		/*
2174 		 * Unlike VMX, where the PDPTEs are explicitly cached as part of
2175 		 * several well-defined events related to paging (such as
2176 		 * loading %cr3), SVM walks the PDPEs (their PDPTE) as part of
2177 		 * nested paging lookups.  This makes these registers
2178 		 * effectively irrelevant on SVM.
2179 		 *
2180 		 * Rather than tossing an error, emit zeroed values so casual
2181 		 * consumers do not need to be as careful about that difference.
2182 		 */
2183 		*val = 0;
2184 		break;
2185 
2186 	default:
2187 		return (EINVAL);
2188 	}
2189 
2190 	return (0);
2191 }
2192 
2193 static int
2194 svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
2195 {
2196 	struct svm_softc *sc;
2197 	struct vmcb *vmcb;
2198 	uint64_t *regp;
2199 	uint64_t *fieldp;
2200 	uint32_t dirty;
2201 	struct vmcb_segment *seg;
2202 
2203 	sc = arg;
2204 	vmcb = svm_get_vmcb(sc, vcpu);
2205 
2206 	regp = swctx_regptr(svm_get_guest_regctx(sc, vcpu), ident);
2207 	if (regp != NULL) {
2208 		*regp = val;
2209 		return (0);
2210 	}
2211 
2212 	dirty = VMCB_CACHE_NONE;
2213 	switch (ident) {
2214 	case VM_REG_GUEST_INTR_SHADOW:
2215 		vmcb->ctrl.intr_shadow = (val != 0) ? 1 : 0;
2216 		break;
2217 
2218 	case VM_REG_GUEST_EFER:
2219 		fieldp = vmcb_regptr(vmcb, ident, &dirty);
2220 		/* EFER_SVM must always be set when the guest is executing */
2221 		*fieldp = val | EFER_SVM;
2222 		dirty |= VMCB_CACHE_CR;
2223 		break;
2224 
2225 	case VM_REG_GUEST_CR0:
2226 		svm_set_cr0(sc, vcpu, val, false);
2227 		break;
2228 	case VM_REG_GUEST_CR2:
2229 	case VM_REG_GUEST_CR3:
2230 	case VM_REG_GUEST_CR4:
2231 	case VM_REG_GUEST_DR6:
2232 	case VM_REG_GUEST_DR7:
2233 	case VM_REG_GUEST_RAX:
2234 	case VM_REG_GUEST_RFLAGS:
2235 	case VM_REG_GUEST_RIP:
2236 	case VM_REG_GUEST_RSP:
2237 		fieldp = vmcb_regptr(vmcb, ident, &dirty);
2238 		*fieldp = val;
2239 		break;
2240 
2241 	case VM_REG_GUEST_CS:
2242 	case VM_REG_GUEST_DS:
2243 	case VM_REG_GUEST_ES:
2244 	case VM_REG_GUEST_SS:
2245 	case VM_REG_GUEST_FS:
2246 	case VM_REG_GUEST_GS:
2247 	case VM_REG_GUEST_LDTR:
2248 	case VM_REG_GUEST_TR:
2249 		dirty |= VMCB_CACHE_SEG;
2250 		seg = vmcb_segptr(vmcb, ident);
2251 		seg->selector = (uint16_t)val;
2252 		break;
2253 
2254 	case VM_REG_GUEST_GDTR:
2255 	case VM_REG_GUEST_IDTR:
2256 		/* GDTR and IDTR don't have segment selectors */
2257 		return (EINVAL);
2258 
2259 	case VM_REG_GUEST_PDPTE0:
2260 	case VM_REG_GUEST_PDPTE1:
2261 	case VM_REG_GUEST_PDPTE2:
2262 	case VM_REG_GUEST_PDPTE3:
2263 		/*
2264 		 * PDPEs (AMD's PDPTE) are not cached under SVM, so we can
2265 		 * ignore attempts to set them.  See handler in svm_getreg() for
2266 		 * more details.
2267 		 */
2268 		break;
2269 
2270 	default:
2271 		return (EINVAL);
2272 	}
2273 
2274 	if (dirty != VMCB_CACHE_NONE) {
2275 		svm_set_dirty(sc, vcpu, dirty);
2276 	}
2277 
2278 	/*
2279 	 * XXX deal with CR3 and invalidate TLB entries tagged with the
2280 	 * vcpu's ASID. This needs to be treated differently depending on
2281 	 * whether 'running' is true/false.
2282 	 */
2283 
2284 	return (0);
2285 }
2286 
2287 static int
2288 svm_setdesc(void *arg, int vcpu, int reg, const struct seg_desc *desc)
2289 {
2290 	struct vmcb *vmcb;
2291 	struct svm_softc *sc;
2292 	struct vmcb_segment *seg;
2293 
2294 	sc = arg;
2295 	vmcb = svm_get_vmcb(sc, vcpu);
2296 
2297 	switch (reg) {
2298 	case VM_REG_GUEST_CS:
2299 	case VM_REG_GUEST_DS:
2300 	case VM_REG_GUEST_ES:
2301 	case VM_REG_GUEST_SS:
2302 	case VM_REG_GUEST_FS:
2303 	case VM_REG_GUEST_GS:
2304 	case VM_REG_GUEST_LDTR:
2305 	case VM_REG_GUEST_TR:
2306 		svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
2307 		seg = vmcb_segptr(vmcb, reg);
2308 		/*
2309 		 * Map seg_desc access to VMCB attribute format.
2310 		 *
2311 		 * SVM uses the 'P' bit in the segment attributes to indicate a
2312 		 * NULL segment so clear it if the segment is marked unusable.
2313 		 */
2314 		seg->attrib = VMCB_ACCESS2ATTR(desc->access);
2315 		if (SEG_DESC_UNUSABLE(desc->access)) {
2316 			seg->attrib &= ~0x80;
2317 		}
2318 		/*
2319 		 * Keep CPL synced with the DPL specified for %ss.
2320 		 *
2321 		 * KVM notes that a SYSRET to non-cpl-3 is possible on AMD
2322 		 * (unlike Intel), but accepts such a possible deviation for
2323 		 * what is otherwise unreasonable behavior for a guest OS, since
2324 		 * they do the same synchronization.
2325 		 */
2326 		if (reg == VM_REG_GUEST_SS) {
2327 			vmcb->state.cpl = SEG_DESC_DPL(desc->access);
2328 		}
2329 		break;
2330 
2331 	case VM_REG_GUEST_GDTR:
2332 	case VM_REG_GUEST_IDTR:
2333 		svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
2334 		seg = vmcb_segptr(vmcb, reg);
2335 		break;
2336 
2337 	default:
2338 		return (EINVAL);
2339 	}
2340 
2341 	ASSERT(seg != NULL);
2342 	seg->base = desc->base;
2343 	seg->limit = desc->limit;
2344 
2345 	return (0);
2346 }
2347 
2348 static int
2349 svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2350 {
2351 	struct vmcb *vmcb;
2352 	struct svm_softc *sc;
2353 	struct vmcb_segment *seg;
2354 
2355 	sc = arg;
2356 	vmcb = svm_get_vmcb(sc, vcpu);
2357 
2358 	switch (reg) {
2359 	case VM_REG_GUEST_DS:
2360 	case VM_REG_GUEST_ES:
2361 	case VM_REG_GUEST_FS:
2362 	case VM_REG_GUEST_GS:
2363 	case VM_REG_GUEST_SS:
2364 	case VM_REG_GUEST_LDTR:
2365 		seg = vmcb_segptr(vmcb, reg);
2366 		desc->access = VMCB_ATTR2ACCESS(seg->attrib);
2367 		/*
2368 		 * VT-x uses bit 16 to indicate a segment that has been loaded
2369 		 * with a NULL selector (aka unusable). The 'desc->access'
2370 		 * field is interpreted in the VT-x format by the
2371 		 * processor-independent code.
2372 		 *
2373 		 * SVM uses the 'P' bit to convey the same information so
2374 		 * convert it into the VT-x format. For more details refer to
2375 		 * section "Segment State in the VMCB" in APMv2.
2376 		 */
2377 		if ((desc->access & 0x80) == 0) {
2378 			/* Unusable segment */
2379 			desc->access |= 0x10000;
2380 		}
2381 
2382 		/*
2383 		 * Just as CPL (in the VMCB) is kept synced to SS when the
2384 		 * segment is written, so too shall the segment sync from CPL
2385 		 * when it is read.
2386 		 */
2387 		if (reg == VM_REG_GUEST_SS) {
2388 			desc->access &=
2389 			    ~(SEG_DESC_DPL_MASK << SEG_DESC_DPL_SHIFT);
2390 			desc->access |=
2391 			    (vmcb->state.cpl & SEG_DESC_DPL_MASK) <<
2392 			    SEG_DESC_DPL_SHIFT;
2393 		}
2394 		break;
2395 
2396 	case VM_REG_GUEST_CS:
2397 	case VM_REG_GUEST_TR:
2398 		seg = vmcb_segptr(vmcb, reg);
2399 		desc->access = VMCB_ATTR2ACCESS(seg->attrib);
2400 		break;
2401 
2402 	case VM_REG_GUEST_GDTR:
2403 	case VM_REG_GUEST_IDTR:
2404 		seg = vmcb_segptr(vmcb, reg);
2405 		/*
2406 		 * Since there are no access bits associated with the GDTR or
2407 		 * the IDTR, zero out the field to ensure it does not contain
2408 		 * garbage which might confuse the consumer.
2409 		 */
2410 		desc->access = 0;
2411 		break;
2412 
2413 	default:
2414 		return (EINVAL);
2415 	}
2416 
2417 	ASSERT(seg != NULL);
2418 	desc->base = seg->base;
2419 	desc->limit = seg->limit;
2420 	return (0);
2421 }
2422 
2423 static int
2424 svm_get_msr(void *arg, int vcpu, uint32_t msr, uint64_t *valp)
2425 {
2426 	struct svm_softc *sc = arg;
2427 	struct vmcb *vmcb = svm_get_vmcb(sc, vcpu);
2428 	const uint64_t *msrp = vmcb_msr_ptr(vmcb, msr, NULL);
2429 
2430 	if (msrp != NULL) {
2431 		*valp = *msrp;
2432 		return (0);
2433 	}
2434 
2435 	return (EINVAL);
2436 }
2437 
2438 static int
2439 svm_set_msr(void *arg, int vcpu, uint32_t msr, uint64_t val)
2440 {
2441 	struct svm_softc *sc = arg;
2442 	struct vmcb *vmcb = svm_get_vmcb(sc, vcpu);
2443 
2444 	uint32_t dirty = 0;
2445 	uint64_t *msrp = vmcb_msr_ptr(vmcb, msr, &dirty);
2446 	if (msrp == NULL) {
2447 		return (EINVAL);
2448 	}
2449 	switch (msr) {
2450 	case MSR_EFER:
2451 		/*
2452 		 * For now, just clone the logic from
2453 		 * svm_setreg():
2454 		 *
2455 		 * EFER_SVM must always be set when the guest is
2456 		 * executing
2457 		 */
2458 		*msrp = val | EFER_SVM;
2459 		break;
2460 	/* TODO: other necessary MSR masking */
2461 	default:
2462 		*msrp = val;
2463 		break;
2464 	}
2465 	if (dirty != 0) {
2466 		svm_set_dirty(sc, vcpu, dirty);
2467 	}
2468 	return (0);
2469 
2470 }
2471 
2472 static int
2473 svm_setcap(void *arg, int vcpu, int type, int val)
2474 {
2475 	struct svm_softc *sc;
2476 	int error;
2477 
2478 	sc = arg;
2479 	error = 0;
2480 	switch (type) {
2481 	case VM_CAP_HALT_EXIT:
2482 		svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2483 		    VMCB_INTCPT_HLT, val);
2484 		break;
2485 	case VM_CAP_PAUSE_EXIT:
2486 		svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2487 		    VMCB_INTCPT_PAUSE, val);
2488 		break;
2489 	default:
2490 		error = ENOENT;
2491 		break;
2492 	}
2493 	return (error);
2494 }
2495 
2496 static int
2497 svm_getcap(void *arg, int vcpu, int type, int *retval)
2498 {
2499 	struct svm_softc *sc;
2500 	int error;
2501 
2502 	sc = arg;
2503 	error = 0;
2504 
2505 	switch (type) {
2506 	case VM_CAP_HALT_EXIT:
2507 		*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2508 		    VMCB_INTCPT_HLT);
2509 		break;
2510 	case VM_CAP_PAUSE_EXIT:
2511 		*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2512 		    VMCB_INTCPT_PAUSE);
2513 		break;
2514 	default:
2515 		error = ENOENT;
2516 		break;
2517 	}
2518 	return (error);
2519 }
2520 
2521 static struct vlapic *
2522 svm_vlapic_init(void *arg, int vcpuid)
2523 {
2524 	struct svm_softc *svm_sc;
2525 	struct vlapic *vlapic;
2526 
2527 	svm_sc = arg;
2528 	vlapic = kmem_zalloc(sizeof (struct vlapic), KM_SLEEP);
2529 	vlapic->vm = svm_sc->vm;
2530 	vlapic->vcpuid = vcpuid;
2531 	vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
2532 
2533 	vlapic_init(vlapic);
2534 
2535 	return (vlapic);
2536 }
2537 
2538 static void
2539 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
2540 {
2541 	vlapic_cleanup(vlapic);
2542 	kmem_free(vlapic, sizeof (struct vlapic));
2543 }
2544 
2545 static void
2546 svm_pause(void *arg, int vcpu)
2547 {
2548 	struct svm_softc *sc = arg;
2549 	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
2550 
2551 	/*
2552 	 * If an event is pending injection in the VMCB, stash it in
2553 	 * exit_intinfo as if it were deferred by an exit from guest context.
2554 	 */
2555 	const uint64_t intinfo = ctrl->eventinj;
2556 	if ((intinfo & VMCB_EVENTINJ_VALID) != 0) {
2557 		svm_stash_intinfo(sc, vcpu, intinfo);
2558 		ctrl->eventinj = 0;
2559 	}
2560 
2561 	/*
2562 	 * Now that no event is pending injection, interrupt-window exiting and
2563 	 * NMI-blocking can be disabled.  If/when this vCPU is made to run
2564 	 * again, those conditions will be reinstated when the now-queued events
2565 	 * are re-injected.
2566 	 */
2567 	svm_disable_intr_window_exiting(sc, vcpu);
2568 	svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
2569 }
2570 
2571 static void
2572 svm_savectx(void *arg, int vcpu)
2573 {
2574 	struct svm_softc *sc = arg;
2575 
2576 	if (sc->vcpu[vcpu].loaded) {
2577 		svm_msr_guest_exit(sc, vcpu);
2578 	}
2579 }
2580 
2581 static void
2582 svm_restorectx(void *arg, int vcpu)
2583 {
2584 	struct svm_softc *sc = arg;
2585 
2586 	if (sc->vcpu[vcpu].loaded) {
2587 		svm_msr_guest_enter(sc, vcpu);
2588 	}
2589 }
2590 
2591 static freqratio_res_t
2592 svm_freq_ratio(uint64_t guest_hz, uint64_t host_hz, uint64_t *mult)
2593 {
2594 	/*
2595 	 * Check whether scaling is needed at all before potentially erroring
2596 	 * out for other reasons.
2597 	 */
2598 	if (guest_hz == host_hz) {
2599 		return (FR_SCALING_NOT_NEEDED);
2600 	}
2601 
2602 	/*
2603 	 * Confirm that scaling is available.
2604 	 */
2605 	if (!has_tsc_freq_ctl()) {
2606 		return (FR_SCALING_NOT_SUPPORTED);
2607 	}
2608 
2609 	/*
2610 	 * Verify the guest_hz is within the supported range.
2611 	 */
2612 	if ((guest_hz < AMD_TSC_MIN_FREQ) ||
2613 	    (guest_hz >= (host_hz * AMD_TSC_MAX_FREQ_RATIO))) {
2614 		return (FR_OUT_OF_RANGE);
2615 	}
2616 
2617 	/* Calculate the multiplier. */
2618 	uint64_t m = vmm_calc_freq_multiplier(guest_hz, host_hz,
2619 	    AMD_TSCM_FRAC_SIZE);
2620 	*mult = m;
2621 
2622 	return (FR_VALID);
2623 }
2624 
2625 struct vmm_ops vmm_ops_amd = {
2626 	.init		= svm_init,
2627 	.cleanup	= svm_cleanup,
2628 	.resume		= svm_restore,
2629 
2630 	.vminit		= svm_vminit,
2631 	.vmrun		= svm_vmrun,
2632 	.vmcleanup	= svm_vmcleanup,
2633 	.vmgetreg	= svm_getreg,
2634 	.vmsetreg	= svm_setreg,
2635 	.vmgetdesc	= svm_getdesc,
2636 	.vmsetdesc	= svm_setdesc,
2637 	.vmgetcap	= svm_getcap,
2638 	.vmsetcap	= svm_setcap,
2639 	.vlapic_init	= svm_vlapic_init,
2640 	.vlapic_cleanup	= svm_vlapic_cleanup,
2641 	.vmpause	= svm_pause,
2642 
2643 	.vmsavectx	= svm_savectx,
2644 	.vmrestorectx	= svm_restorectx,
2645 
2646 	.vmgetmsr	= svm_get_msr,
2647 	.vmsetmsr	= svm_set_msr,
2648 
2649 	.vmfreqratio	= svm_freq_ratio,
2650 	.fr_intsize	= AMD_TSCM_INT_SIZE,
2651 	.fr_fracsize	= AMD_TSCM_FRAC_SIZE,
2652 };
2653