xref: /freebsd/sys/amd64/vmm/intel/vmx.c (revision ec0e626bafb335b30c499d06066997f54b10c092)
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/smp.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/pcpu.h>
38 #include <sys/proc.h>
39 #include <sys/sysctl.h>
40 
41 #include <vm/vm.h>
42 #include <vm/pmap.h>
43 
44 #include <machine/psl.h>
45 #include <machine/cpufunc.h>
46 #include <machine/md_var.h>
47 #include <machine/segments.h>
48 #include <machine/smp.h>
49 #include <machine/specialreg.h>
50 #include <machine/vmparam.h>
51 
52 #include <machine/vmm.h>
53 #include <machine/vmm_dev.h>
54 #include <machine/vmm_instruction_emul.h>
55 #include "vmm_lapic.h"
56 #include "vmm_host.h"
57 #include "vmm_ioport.h"
58 #include "vmm_ktr.h"
59 #include "vmm_stat.h"
60 #include "vatpic.h"
61 #include "vlapic.h"
62 #include "vlapic_priv.h"
63 
64 #include "ept.h"
65 #include "vmx_cpufunc.h"
66 #include "vmx.h"
67 #include "vmx_msr.h"
68 #include "x86.h"
69 #include "vmx_controls.h"
70 
71 #define	PINBASED_CTLS_ONE_SETTING					\
72 	(PINBASED_EXTINT_EXITING	|				\
73 	 PINBASED_NMI_EXITING		|				\
74 	 PINBASED_VIRTUAL_NMI)
75 #define	PINBASED_CTLS_ZERO_SETTING	0
76 
77 #define PROCBASED_CTLS_WINDOW_SETTING					\
78 	(PROCBASED_INT_WINDOW_EXITING	|				\
79 	 PROCBASED_NMI_WINDOW_EXITING)
80 
81 #define	PROCBASED_CTLS_ONE_SETTING 					\
82 	(PROCBASED_SECONDARY_CONTROLS	|				\
83 	 PROCBASED_MWAIT_EXITING	|				\
84 	 PROCBASED_MONITOR_EXITING	|				\
85 	 PROCBASED_IO_EXITING		|				\
86 	 PROCBASED_MSR_BITMAPS		|				\
87 	 PROCBASED_CTLS_WINDOW_SETTING	|				\
88 	 PROCBASED_CR8_LOAD_EXITING	|				\
89 	 PROCBASED_CR8_STORE_EXITING)
90 #define	PROCBASED_CTLS_ZERO_SETTING	\
91 	(PROCBASED_CR3_LOAD_EXITING |	\
92 	PROCBASED_CR3_STORE_EXITING |	\
93 	PROCBASED_IO_BITMAPS)
94 
95 #define	PROCBASED_CTLS2_ONE_SETTING	PROCBASED2_ENABLE_EPT
96 #define	PROCBASED_CTLS2_ZERO_SETTING	0
97 
98 #define	VM_EXIT_CTLS_ONE_SETTING					\
99 	(VM_EXIT_HOST_LMA			|			\
100 	VM_EXIT_SAVE_EFER			|			\
101 	VM_EXIT_LOAD_EFER			|			\
102 	VM_EXIT_ACKNOWLEDGE_INTERRUPT)
103 
104 #define	VM_EXIT_CTLS_ZERO_SETTING	VM_EXIT_SAVE_DEBUG_CONTROLS
105 
106 #define	VM_ENTRY_CTLS_ONE_SETTING	(VM_ENTRY_LOAD_EFER)
107 
108 #define	VM_ENTRY_CTLS_ZERO_SETTING					\
109 	(VM_ENTRY_LOAD_DEBUG_CONTROLS		|			\
110 	VM_ENTRY_INTO_SMM			|			\
111 	VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
112 
113 #define	HANDLED		1
114 #define	UNHANDLED	0
115 
116 static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
117 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
118 
119 SYSCTL_DECL(_hw_vmm);
120 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
121 
122 int vmxon_enabled[MAXCPU];
123 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
124 
125 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
126 static uint32_t exit_ctls, entry_ctls;
127 
128 static uint64_t cr0_ones_mask, cr0_zeros_mask;
129 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
130 	     &cr0_ones_mask, 0, NULL);
131 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
132 	     &cr0_zeros_mask, 0, NULL);
133 
134 static uint64_t cr4_ones_mask, cr4_zeros_mask;
135 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
136 	     &cr4_ones_mask, 0, NULL);
137 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
138 	     &cr4_zeros_mask, 0, NULL);
139 
140 static int vmx_initialized;
141 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
142 	   &vmx_initialized, 0, "Intel VMX initialized");
143 
144 /*
145  * Optional capabilities
146  */
147 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
148 
149 static int cap_halt_exit;
150 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
151     "HLT triggers a VM-exit");
152 
153 static int cap_pause_exit;
154 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
155     0, "PAUSE triggers a VM-exit");
156 
157 static int cap_unrestricted_guest;
158 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
159     &cap_unrestricted_guest, 0, "Unrestricted guests");
160 
161 static int cap_monitor_trap;
162 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
163     &cap_monitor_trap, 0, "Monitor trap flag");
164 
165 static int cap_invpcid;
166 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
167     0, "Guests are allowed to use INVPCID");
168 
169 static int virtual_interrupt_delivery;
170 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
171     &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
172 
173 static int posted_interrupts;
174 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
175     &posted_interrupts, 0, "APICv posted interrupt support");
176 
177 static int pirvec = -1;
178 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
179     &pirvec, 0, "APICv posted interrupt vector");
180 
181 static struct unrhdr *vpid_unr;
182 static u_int vpid_alloc_failed;
183 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
184 	    &vpid_alloc_failed, 0, NULL);
185 
186 /*
187  * Use the last page below 4GB as the APIC access address. This address is
188  * occupied by the boot firmware so it is guaranteed that it will not conflict
189  * with a page in system memory.
190  */
191 #define	APIC_ACCESS_ADDRESS	0xFFFFF000
192 
193 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
194 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
195 static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
196 static void vmx_inject_pir(struct vlapic *vlapic);
197 
198 #ifdef KTR
199 static const char *
200 exit_reason_to_str(int reason)
201 {
202 	static char reasonbuf[32];
203 
204 	switch (reason) {
205 	case EXIT_REASON_EXCEPTION:
206 		return "exception";
207 	case EXIT_REASON_EXT_INTR:
208 		return "extint";
209 	case EXIT_REASON_TRIPLE_FAULT:
210 		return "triplefault";
211 	case EXIT_REASON_INIT:
212 		return "init";
213 	case EXIT_REASON_SIPI:
214 		return "sipi";
215 	case EXIT_REASON_IO_SMI:
216 		return "iosmi";
217 	case EXIT_REASON_SMI:
218 		return "smi";
219 	case EXIT_REASON_INTR_WINDOW:
220 		return "intrwindow";
221 	case EXIT_REASON_NMI_WINDOW:
222 		return "nmiwindow";
223 	case EXIT_REASON_TASK_SWITCH:
224 		return "taskswitch";
225 	case EXIT_REASON_CPUID:
226 		return "cpuid";
227 	case EXIT_REASON_GETSEC:
228 		return "getsec";
229 	case EXIT_REASON_HLT:
230 		return "hlt";
231 	case EXIT_REASON_INVD:
232 		return "invd";
233 	case EXIT_REASON_INVLPG:
234 		return "invlpg";
235 	case EXIT_REASON_RDPMC:
236 		return "rdpmc";
237 	case EXIT_REASON_RDTSC:
238 		return "rdtsc";
239 	case EXIT_REASON_RSM:
240 		return "rsm";
241 	case EXIT_REASON_VMCALL:
242 		return "vmcall";
243 	case EXIT_REASON_VMCLEAR:
244 		return "vmclear";
245 	case EXIT_REASON_VMLAUNCH:
246 		return "vmlaunch";
247 	case EXIT_REASON_VMPTRLD:
248 		return "vmptrld";
249 	case EXIT_REASON_VMPTRST:
250 		return "vmptrst";
251 	case EXIT_REASON_VMREAD:
252 		return "vmread";
253 	case EXIT_REASON_VMRESUME:
254 		return "vmresume";
255 	case EXIT_REASON_VMWRITE:
256 		return "vmwrite";
257 	case EXIT_REASON_VMXOFF:
258 		return "vmxoff";
259 	case EXIT_REASON_VMXON:
260 		return "vmxon";
261 	case EXIT_REASON_CR_ACCESS:
262 		return "craccess";
263 	case EXIT_REASON_DR_ACCESS:
264 		return "draccess";
265 	case EXIT_REASON_INOUT:
266 		return "inout";
267 	case EXIT_REASON_RDMSR:
268 		return "rdmsr";
269 	case EXIT_REASON_WRMSR:
270 		return "wrmsr";
271 	case EXIT_REASON_INVAL_VMCS:
272 		return "invalvmcs";
273 	case EXIT_REASON_INVAL_MSR:
274 		return "invalmsr";
275 	case EXIT_REASON_MWAIT:
276 		return "mwait";
277 	case EXIT_REASON_MTF:
278 		return "mtf";
279 	case EXIT_REASON_MONITOR:
280 		return "monitor";
281 	case EXIT_REASON_PAUSE:
282 		return "pause";
283 	case EXIT_REASON_MCE_DURING_ENTRY:
284 		return "mce-during-entry";
285 	case EXIT_REASON_TPR:
286 		return "tpr";
287 	case EXIT_REASON_APIC_ACCESS:
288 		return "apic-access";
289 	case EXIT_REASON_GDTR_IDTR:
290 		return "gdtridtr";
291 	case EXIT_REASON_LDTR_TR:
292 		return "ldtrtr";
293 	case EXIT_REASON_EPT_FAULT:
294 		return "eptfault";
295 	case EXIT_REASON_EPT_MISCONFIG:
296 		return "eptmisconfig";
297 	case EXIT_REASON_INVEPT:
298 		return "invept";
299 	case EXIT_REASON_RDTSCP:
300 		return "rdtscp";
301 	case EXIT_REASON_VMX_PREEMPT:
302 		return "vmxpreempt";
303 	case EXIT_REASON_INVVPID:
304 		return "invvpid";
305 	case EXIT_REASON_WBINVD:
306 		return "wbinvd";
307 	case EXIT_REASON_XSETBV:
308 		return "xsetbv";
309 	case EXIT_REASON_APIC_WRITE:
310 		return "apic-write";
311 	default:
312 		snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
313 		return (reasonbuf);
314 	}
315 }
316 #endif	/* KTR */
317 
318 static int
319 vmx_allow_x2apic_msrs(struct vmx *vmx)
320 {
321 	int i, error;
322 
323 	error = 0;
324 
325 	/*
326 	 * Allow readonly access to the following x2APIC MSRs from the guest.
327 	 */
328 	error += guest_msr_ro(vmx, MSR_APIC_ID);
329 	error += guest_msr_ro(vmx, MSR_APIC_VERSION);
330 	error += guest_msr_ro(vmx, MSR_APIC_LDR);
331 	error += guest_msr_ro(vmx, MSR_APIC_SVR);
332 
333 	for (i = 0; i < 8; i++)
334 		error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
335 
336 	for (i = 0; i < 8; i++)
337 		error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
338 
339 	for (i = 0; i < 8; i++)
340 		error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
341 
342 	error += guest_msr_ro(vmx, MSR_APIC_ESR);
343 	error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
344 	error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
345 	error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
346 	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
347 	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
348 	error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
349 	error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
350 	error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
351 	error += guest_msr_ro(vmx, MSR_APIC_ICR);
352 
353 	/*
354 	 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
355 	 *
356 	 * These registers get special treatment described in the section
357 	 * "Virtualizing MSR-Based APIC Accesses".
358 	 */
359 	error += guest_msr_rw(vmx, MSR_APIC_TPR);
360 	error += guest_msr_rw(vmx, MSR_APIC_EOI);
361 	error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
362 
363 	return (error);
364 }
365 
366 u_long
367 vmx_fix_cr0(u_long cr0)
368 {
369 
370 	return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
371 }
372 
373 u_long
374 vmx_fix_cr4(u_long cr4)
375 {
376 
377 	return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
378 }
379 
380 static void
381 vpid_free(int vpid)
382 {
383 	if (vpid < 0 || vpid > 0xffff)
384 		panic("vpid_free: invalid vpid %d", vpid);
385 
386 	/*
387 	 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
388 	 * the unit number allocator.
389 	 */
390 
391 	if (vpid > VM_MAXCPU)
392 		free_unr(vpid_unr, vpid);
393 }
394 
395 static void
396 vpid_alloc(uint16_t *vpid, int num)
397 {
398 	int i, x;
399 
400 	if (num <= 0 || num > VM_MAXCPU)
401 		panic("invalid number of vpids requested: %d", num);
402 
403 	/*
404 	 * If the "enable vpid" execution control is not enabled then the
405 	 * VPID is required to be 0 for all vcpus.
406 	 */
407 	if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
408 		for (i = 0; i < num; i++)
409 			vpid[i] = 0;
410 		return;
411 	}
412 
413 	/*
414 	 * Allocate a unique VPID for each vcpu from the unit number allocator.
415 	 */
416 	for (i = 0; i < num; i++) {
417 		x = alloc_unr(vpid_unr);
418 		if (x == -1)
419 			break;
420 		else
421 			vpid[i] = x;
422 	}
423 
424 	if (i < num) {
425 		atomic_add_int(&vpid_alloc_failed, 1);
426 
427 		/*
428 		 * If the unit number allocator does not have enough unique
429 		 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
430 		 *
431 		 * These VPIDs are not be unique across VMs but this does not
432 		 * affect correctness because the combined mappings are also
433 		 * tagged with the EP4TA which is unique for each VM.
434 		 *
435 		 * It is still sub-optimal because the invvpid will invalidate
436 		 * combined mappings for a particular VPID across all EP4TAs.
437 		 */
438 		while (i-- > 0)
439 			vpid_free(vpid[i]);
440 
441 		for (i = 0; i < num; i++)
442 			vpid[i] = i + 1;
443 	}
444 }
445 
446 static void
447 vpid_init(void)
448 {
449 	/*
450 	 * VPID 0 is required when the "enable VPID" execution control is
451 	 * disabled.
452 	 *
453 	 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
454 	 * unit number allocator does not have sufficient unique VPIDs to
455 	 * satisfy the allocation.
456 	 *
457 	 * The remaining VPIDs are managed by the unit number allocator.
458 	 */
459 	vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
460 }
461 
462 static void
463 vmx_disable(void *arg __unused)
464 {
465 	struct invvpid_desc invvpid_desc = { 0 };
466 	struct invept_desc invept_desc = { 0 };
467 
468 	if (vmxon_enabled[curcpu]) {
469 		/*
470 		 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
471 		 *
472 		 * VMXON or VMXOFF are not required to invalidate any TLB
473 		 * caching structures. This prevents potential retention of
474 		 * cached information in the TLB between distinct VMX episodes.
475 		 */
476 		invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
477 		invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
478 		vmxoff();
479 	}
480 	load_cr4(rcr4() & ~CR4_VMXE);
481 }
482 
483 static int
484 vmx_cleanup(void)
485 {
486 
487 	if (pirvec >= 0)
488 		lapic_ipi_free(pirvec);
489 
490 	if (vpid_unr != NULL) {
491 		delete_unrhdr(vpid_unr);
492 		vpid_unr = NULL;
493 	}
494 
495 	smp_rendezvous(NULL, vmx_disable, NULL, NULL);
496 
497 	return (0);
498 }
499 
500 static void
501 vmx_enable(void *arg __unused)
502 {
503 	int error;
504 	uint64_t feature_control;
505 
506 	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
507 	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
508 	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
509 		wrmsr(MSR_IA32_FEATURE_CONTROL,
510 		    feature_control | IA32_FEATURE_CONTROL_VMX_EN |
511 		    IA32_FEATURE_CONTROL_LOCK);
512 	}
513 
514 	load_cr4(rcr4() | CR4_VMXE);
515 
516 	*(uint32_t *)vmxon_region[curcpu] = vmx_revision();
517 	error = vmxon(vmxon_region[curcpu]);
518 	if (error == 0)
519 		vmxon_enabled[curcpu] = 1;
520 }
521 
522 static void
523 vmx_restore(void)
524 {
525 
526 	if (vmxon_enabled[curcpu])
527 		vmxon(vmxon_region[curcpu]);
528 }
529 
530 static int
531 vmx_init(int ipinum)
532 {
533 	int error, use_tpr_shadow;
534 	uint64_t basic, fixed0, fixed1, feature_control;
535 	uint32_t tmp, procbased2_vid_bits;
536 
537 	/* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
538 	if (!(cpu_feature2 & CPUID2_VMX)) {
539 		printf("vmx_init: processor does not support VMX operation\n");
540 		return (ENXIO);
541 	}
542 
543 	/*
544 	 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
545 	 * are set (bits 0 and 2 respectively).
546 	 */
547 	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
548 	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
549 	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
550 		printf("vmx_init: VMX operation disabled by BIOS\n");
551 		return (ENXIO);
552 	}
553 
554 	/*
555 	 * Verify capabilities MSR_VMX_BASIC:
556 	 * - bit 54 indicates support for INS/OUTS decoding
557 	 */
558 	basic = rdmsr(MSR_VMX_BASIC);
559 	if ((basic & (1UL << 54)) == 0) {
560 		printf("vmx_init: processor does not support desired basic "
561 		    "capabilities\n");
562 		return (EINVAL);
563 	}
564 
565 	/* Check support for primary processor-based VM-execution controls */
566 	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
567 			       MSR_VMX_TRUE_PROCBASED_CTLS,
568 			       PROCBASED_CTLS_ONE_SETTING,
569 			       PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
570 	if (error) {
571 		printf("vmx_init: processor does not support desired primary "
572 		       "processor-based controls\n");
573 		return (error);
574 	}
575 
576 	/* Clear the processor-based ctl bits that are set on demand */
577 	procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
578 
579 	/* Check support for secondary processor-based VM-execution controls */
580 	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
581 			       MSR_VMX_PROCBASED_CTLS2,
582 			       PROCBASED_CTLS2_ONE_SETTING,
583 			       PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
584 	if (error) {
585 		printf("vmx_init: processor does not support desired secondary "
586 		       "processor-based controls\n");
587 		return (error);
588 	}
589 
590 	/* Check support for VPID */
591 	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
592 			       PROCBASED2_ENABLE_VPID, 0, &tmp);
593 	if (error == 0)
594 		procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
595 
596 	/* Check support for pin-based VM-execution controls */
597 	error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
598 			       MSR_VMX_TRUE_PINBASED_CTLS,
599 			       PINBASED_CTLS_ONE_SETTING,
600 			       PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
601 	if (error) {
602 		printf("vmx_init: processor does not support desired "
603 		       "pin-based controls\n");
604 		return (error);
605 	}
606 
607 	/* Check support for VM-exit controls */
608 	error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
609 			       VM_EXIT_CTLS_ONE_SETTING,
610 			       VM_EXIT_CTLS_ZERO_SETTING,
611 			       &exit_ctls);
612 	if (error) {
613 		printf("vmx_init: processor does not support desired "
614 		    "exit controls\n");
615 		return (error);
616 	}
617 
618 	/* Check support for VM-entry controls */
619 	error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
620 	    VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
621 	    &entry_ctls);
622 	if (error) {
623 		printf("vmx_init: processor does not support desired "
624 		    "entry controls\n");
625 		return (error);
626 	}
627 
628 	/*
629 	 * Check support for optional features by testing them
630 	 * as individual bits
631 	 */
632 	cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
633 					MSR_VMX_TRUE_PROCBASED_CTLS,
634 					PROCBASED_HLT_EXITING, 0,
635 					&tmp) == 0);
636 
637 	cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
638 					MSR_VMX_PROCBASED_CTLS,
639 					PROCBASED_MTF, 0,
640 					&tmp) == 0);
641 
642 	cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
643 					 MSR_VMX_TRUE_PROCBASED_CTLS,
644 					 PROCBASED_PAUSE_EXITING, 0,
645 					 &tmp) == 0);
646 
647 	cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
648 					MSR_VMX_PROCBASED_CTLS2,
649 					PROCBASED2_UNRESTRICTED_GUEST, 0,
650 				        &tmp) == 0);
651 
652 	cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
653 	    MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
654 	    &tmp) == 0);
655 
656 	/*
657 	 * Check support for virtual interrupt delivery.
658 	 */
659 	procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
660 	    PROCBASED2_VIRTUALIZE_X2APIC_MODE |
661 	    PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
662 	    PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
663 
664 	use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
665 	    MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
666 	    &tmp) == 0);
667 
668 	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
669 	    procbased2_vid_bits, 0, &tmp);
670 	if (error == 0 && use_tpr_shadow) {
671 		virtual_interrupt_delivery = 1;
672 		TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
673 		    &virtual_interrupt_delivery);
674 	}
675 
676 	if (virtual_interrupt_delivery) {
677 		procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
678 		procbased_ctls2 |= procbased2_vid_bits;
679 		procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
680 
681 		/*
682 		 * No need to emulate accesses to %CR8 if virtual
683 		 * interrupt delivery is enabled.
684 		 */
685 		procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
686 		procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
687 
688 		/*
689 		 * Check for Posted Interrupts only if Virtual Interrupt
690 		 * Delivery is enabled.
691 		 */
692 		error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
693 		    MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
694 		    &tmp);
695 		if (error == 0) {
696 			pirvec = lapic_ipi_alloc(&IDTVEC(justreturn));
697 			if (pirvec < 0) {
698 				if (bootverbose) {
699 					printf("vmx_init: unable to allocate "
700 					    "posted interrupt vector\n");
701 				}
702 			} else {
703 				posted_interrupts = 1;
704 				TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
705 				    &posted_interrupts);
706 			}
707 		}
708 	}
709 
710 	if (posted_interrupts)
711 		    pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
712 
713 	/* Initialize EPT */
714 	error = ept_init(ipinum);
715 	if (error) {
716 		printf("vmx_init: ept initialization failed (%d)\n", error);
717 		return (error);
718 	}
719 
720 	/*
721 	 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
722 	 */
723 	fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
724 	fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
725 	cr0_ones_mask = fixed0 & fixed1;
726 	cr0_zeros_mask = ~fixed0 & ~fixed1;
727 
728 	/*
729 	 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
730 	 * if unrestricted guest execution is allowed.
731 	 */
732 	if (cap_unrestricted_guest)
733 		cr0_ones_mask &= ~(CR0_PG | CR0_PE);
734 
735 	/*
736 	 * Do not allow the guest to set CR0_NW or CR0_CD.
737 	 */
738 	cr0_zeros_mask |= (CR0_NW | CR0_CD);
739 
740 	fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
741 	fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
742 	cr4_ones_mask = fixed0 & fixed1;
743 	cr4_zeros_mask = ~fixed0 & ~fixed1;
744 
745 	vpid_init();
746 
747 	vmx_msr_init();
748 
749 	/* enable VMX operation */
750 	smp_rendezvous(NULL, vmx_enable, NULL, NULL);
751 
752 	vmx_initialized = 1;
753 
754 	return (0);
755 }
756 
757 static void
758 vmx_trigger_hostintr(int vector)
759 {
760 	uintptr_t func;
761 	struct gate_descriptor *gd;
762 
763 	gd = &idt[vector];
764 
765 	KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
766 	    "invalid vector %d", vector));
767 	KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
768 	    vector));
769 	KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
770 	    "has invalid type %d", vector, gd->gd_type));
771 	KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
772 	    "has invalid dpl %d", vector, gd->gd_dpl));
773 	KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
774 	    "for vector %d has invalid selector %d", vector, gd->gd_selector));
775 	KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
776 	    "IST %d", vector, gd->gd_ist));
777 
778 	func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
779 	vmx_call_isr(func);
780 }
781 
782 static int
783 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
784 {
785 	int error, mask_ident, shadow_ident;
786 	uint64_t mask_value;
787 
788 	if (which != 0 && which != 4)
789 		panic("vmx_setup_cr_shadow: unknown cr%d", which);
790 
791 	if (which == 0) {
792 		mask_ident = VMCS_CR0_MASK;
793 		mask_value = cr0_ones_mask | cr0_zeros_mask;
794 		shadow_ident = VMCS_CR0_SHADOW;
795 	} else {
796 		mask_ident = VMCS_CR4_MASK;
797 		mask_value = cr4_ones_mask | cr4_zeros_mask;
798 		shadow_ident = VMCS_CR4_SHADOW;
799 	}
800 
801 	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
802 	if (error)
803 		return (error);
804 
805 	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
806 	if (error)
807 		return (error);
808 
809 	return (0);
810 }
811 #define	vmx_setup_cr0_shadow(vmcs,init)	vmx_setup_cr_shadow(0, (vmcs), (init))
812 #define	vmx_setup_cr4_shadow(vmcs,init)	vmx_setup_cr_shadow(4, (vmcs), (init))
813 
814 static void *
815 vmx_vminit(struct vm *vm, pmap_t pmap)
816 {
817 	uint16_t vpid[VM_MAXCPU];
818 	int i, error;
819 	struct vmx *vmx;
820 	struct vmcs *vmcs;
821 	uint32_t exc_bitmap;
822 
823 	vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
824 	if ((uintptr_t)vmx & PAGE_MASK) {
825 		panic("malloc of struct vmx not aligned on %d byte boundary",
826 		      PAGE_SIZE);
827 	}
828 	vmx->vm = vm;
829 
830 	vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
831 
832 	/*
833 	 * Clean up EPTP-tagged guest physical and combined mappings
834 	 *
835 	 * VMX transitions are not required to invalidate any guest physical
836 	 * mappings. So, it may be possible for stale guest physical mappings
837 	 * to be present in the processor TLBs.
838 	 *
839 	 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
840 	 */
841 	ept_invalidate_mappings(vmx->eptp);
842 
843 	msr_bitmap_initialize(vmx->msr_bitmap);
844 
845 	/*
846 	 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
847 	 * The guest FSBASE and GSBASE are saved and restored during
848 	 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
849 	 * always restored from the vmcs host state area on vm-exit.
850 	 *
851 	 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
852 	 * how they are saved/restored so can be directly accessed by the
853 	 * guest.
854 	 *
855 	 * MSR_EFER is saved and restored in the guest VMCS area on a
856 	 * VM exit and entry respectively. It is also restored from the
857 	 * host VMCS area on a VM exit.
858 	 *
859 	 * The TSC MSR is exposed read-only. Writes are disallowed as that
860 	 * will impact the host TSC.
861 	 * XXX Writes would be implemented with a wrmsr trap, and
862 	 * then modifying the TSC offset in the VMCS.
863 	 */
864 	if (guest_msr_rw(vmx, MSR_GSBASE) ||
865 	    guest_msr_rw(vmx, MSR_FSBASE) ||
866 	    guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
867 	    guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
868 	    guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
869 	    guest_msr_rw(vmx, MSR_EFER) ||
870 	    guest_msr_ro(vmx, MSR_TSC))
871 		panic("vmx_vminit: error setting guest msr access");
872 
873 	vpid_alloc(vpid, VM_MAXCPU);
874 
875 	if (virtual_interrupt_delivery) {
876 		error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
877 		    APIC_ACCESS_ADDRESS);
878 		/* XXX this should really return an error to the caller */
879 		KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
880 	}
881 
882 	for (i = 0; i < VM_MAXCPU; i++) {
883 		vmcs = &vmx->vmcs[i];
884 		vmcs->identifier = vmx_revision();
885 		error = vmclear(vmcs);
886 		if (error != 0) {
887 			panic("vmx_vminit: vmclear error %d on vcpu %d\n",
888 			      error, i);
889 		}
890 
891 		vmx_msr_guest_init(vmx, i);
892 
893 		error = vmcs_init(vmcs);
894 		KASSERT(error == 0, ("vmcs_init error %d", error));
895 
896 		VMPTRLD(vmcs);
897 		error = 0;
898 		error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
899 		error += vmwrite(VMCS_EPTP, vmx->eptp);
900 		error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
901 		error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
902 		error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
903 		error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
904 		error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
905 		error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
906 		error += vmwrite(VMCS_VPID, vpid[i]);
907 
908 		/* exception bitmap */
909 		if (vcpu_trace_exceptions(vm, i))
910 			exc_bitmap = 0xffffffff;
911 		else
912 			exc_bitmap = 1 << IDT_MC;
913 		error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
914 
915 		if (virtual_interrupt_delivery) {
916 			error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
917 			error += vmwrite(VMCS_VIRTUAL_APIC,
918 			    vtophys(&vmx->apic_page[i]));
919 			error += vmwrite(VMCS_EOI_EXIT0, 0);
920 			error += vmwrite(VMCS_EOI_EXIT1, 0);
921 			error += vmwrite(VMCS_EOI_EXIT2, 0);
922 			error += vmwrite(VMCS_EOI_EXIT3, 0);
923 		}
924 		if (posted_interrupts) {
925 			error += vmwrite(VMCS_PIR_VECTOR, pirvec);
926 			error += vmwrite(VMCS_PIR_DESC,
927 			    vtophys(&vmx->pir_desc[i]));
928 		}
929 		VMCLEAR(vmcs);
930 		KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
931 
932 		vmx->cap[i].set = 0;
933 		vmx->cap[i].proc_ctls = procbased_ctls;
934 		vmx->cap[i].proc_ctls2 = procbased_ctls2;
935 
936 		vmx->state[i].nextrip = ~0;
937 		vmx->state[i].lastcpu = NOCPU;
938 		vmx->state[i].vpid = vpid[i];
939 
940 		/*
941 		 * Set up the CR0/4 shadows, and init the read shadow
942 		 * to the power-on register value from the Intel Sys Arch.
943 		 *  CR0 - 0x60000010
944 		 *  CR4 - 0
945 		 */
946 		error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
947 		if (error != 0)
948 			panic("vmx_setup_cr0_shadow %d", error);
949 
950 		error = vmx_setup_cr4_shadow(vmcs, 0);
951 		if (error != 0)
952 			panic("vmx_setup_cr4_shadow %d", error);
953 
954 		vmx->ctx[i].pmap = pmap;
955 	}
956 
957 	return (vmx);
958 }
959 
960 static int
961 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
962 {
963 	int handled, func;
964 
965 	func = vmxctx->guest_rax;
966 
967 	handled = x86_emulate_cpuid(vm, vcpu,
968 				    (uint32_t*)(&vmxctx->guest_rax),
969 				    (uint32_t*)(&vmxctx->guest_rbx),
970 				    (uint32_t*)(&vmxctx->guest_rcx),
971 				    (uint32_t*)(&vmxctx->guest_rdx));
972 	return (handled);
973 }
974 
975 static __inline void
976 vmx_run_trace(struct vmx *vmx, int vcpu)
977 {
978 #ifdef KTR
979 	VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
980 #endif
981 }
982 
983 static __inline void
984 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
985 	       int handled)
986 {
987 #ifdef KTR
988 	VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
989 		 handled ? "handled" : "unhandled",
990 		 exit_reason_to_str(exit_reason), rip);
991 #endif
992 }
993 
994 static __inline void
995 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
996 {
997 #ifdef KTR
998 	VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
999 #endif
1000 }
1001 
1002 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1003 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1004 
1005 /*
1006  * Invalidate guest mappings identified by its vpid from the TLB.
1007  */
1008 static __inline void
1009 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1010 {
1011 	struct vmxstate *vmxstate;
1012 	struct invvpid_desc invvpid_desc;
1013 
1014 	vmxstate = &vmx->state[vcpu];
1015 	if (vmxstate->vpid == 0)
1016 		return;
1017 
1018 	if (!running) {
1019 		/*
1020 		 * Set the 'lastcpu' to an invalid host cpu.
1021 		 *
1022 		 * This will invalidate TLB entries tagged with the vcpu's
1023 		 * vpid the next time it runs via vmx_set_pcpu_defaults().
1024 		 */
1025 		vmxstate->lastcpu = NOCPU;
1026 		return;
1027 	}
1028 
1029 	KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1030 	    "critical section", __func__, vcpu));
1031 
1032 	/*
1033 	 * Invalidate all mappings tagged with 'vpid'
1034 	 *
1035 	 * We do this because this vcpu was executing on a different host
1036 	 * cpu when it last ran. We do not track whether it invalidated
1037 	 * mappings associated with its 'vpid' during that run. So we must
1038 	 * assume that the mappings associated with 'vpid' on 'curcpu' are
1039 	 * stale and invalidate them.
1040 	 *
1041 	 * Note that we incur this penalty only when the scheduler chooses to
1042 	 * move the thread associated with this vcpu between host cpus.
1043 	 *
1044 	 * Note also that this will invalidate mappings tagged with 'vpid'
1045 	 * for "all" EP4TAs.
1046 	 */
1047 	if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1048 		invvpid_desc._res1 = 0;
1049 		invvpid_desc._res2 = 0;
1050 		invvpid_desc.vpid = vmxstate->vpid;
1051 		invvpid_desc.linear_addr = 0;
1052 		invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1053 		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1054 	} else {
1055 		/*
1056 		 * The invvpid can be skipped if an invept is going to
1057 		 * be performed before entering the guest. The invept
1058 		 * will invalidate combined mappings tagged with
1059 		 * 'vmx->eptp' for all vpids.
1060 		 */
1061 		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1062 	}
1063 }
1064 
1065 static void
1066 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1067 {
1068 	struct vmxstate *vmxstate;
1069 
1070 	vmxstate = &vmx->state[vcpu];
1071 	if (vmxstate->lastcpu == curcpu)
1072 		return;
1073 
1074 	vmxstate->lastcpu = curcpu;
1075 
1076 	vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1077 
1078 	vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1079 	vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1080 	vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1081 	vmx_invvpid(vmx, vcpu, pmap, 1);
1082 }
1083 
1084 /*
1085  * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1086  */
1087 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1088 
1089 static void __inline
1090 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1091 {
1092 
1093 	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1094 		vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1095 		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1096 		VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1097 	}
1098 }
1099 
1100 static void __inline
1101 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1102 {
1103 
1104 	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1105 	    ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1106 	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1107 	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1108 	VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1109 }
1110 
1111 static void __inline
1112 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1113 {
1114 
1115 	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1116 		vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1117 		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1118 		VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1119 	}
1120 }
1121 
1122 static void __inline
1123 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1124 {
1125 
1126 	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1127 	    ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1128 	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1129 	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1130 	VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1131 }
1132 
1133 #define	NMI_BLOCKING	(VMCS_INTERRUPTIBILITY_NMI_BLOCKING |		\
1134 			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1135 #define	HWINTR_BLOCKING	(VMCS_INTERRUPTIBILITY_STI_BLOCKING |		\
1136 			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1137 
1138 static void
1139 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1140 {
1141 	uint32_t gi, info;
1142 
1143 	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1144 	KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1145 	    "interruptibility-state %#x", gi));
1146 
1147 	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1148 	KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1149 	    "VM-entry interruption information %#x", info));
1150 
1151 	/*
1152 	 * Inject the virtual NMI. The vector must be the NMI IDT entry
1153 	 * or the VMCS entry check will fail.
1154 	 */
1155 	info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1156 	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1157 
1158 	VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1159 
1160 	/* Clear the request */
1161 	vm_nmi_clear(vmx->vm, vcpu);
1162 }
1163 
1164 static void
1165 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
1166     uint64_t guestrip)
1167 {
1168 	int vector, need_nmi_exiting, extint_pending;
1169 	uint64_t rflags, entryinfo;
1170 	uint32_t gi, info;
1171 
1172 	if (vmx->state[vcpu].nextrip != guestrip) {
1173 		gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1174 		if (gi & HWINTR_BLOCKING) {
1175 			VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking "
1176 			    "cleared due to rip change: %#lx/%#lx",
1177 			    vmx->state[vcpu].nextrip, guestrip);
1178 			gi &= ~HWINTR_BLOCKING;
1179 			vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1180 		}
1181 	}
1182 
1183 	if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1184 		KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1185 		    "intinfo is not valid: %#lx", __func__, entryinfo));
1186 
1187 		info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1188 		KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1189 		     "pending exception: %#lx/%#x", __func__, entryinfo, info));
1190 
1191 		info = entryinfo;
1192 		vector = info & 0xff;
1193 		if (vector == IDT_BP || vector == IDT_OF) {
1194 			/*
1195 			 * VT-x requires #BP and #OF to be injected as software
1196 			 * exceptions.
1197 			 */
1198 			info &= ~VMCS_INTR_T_MASK;
1199 			info |= VMCS_INTR_T_SWEXCEPTION;
1200 		}
1201 
1202 		if (info & VMCS_INTR_DEL_ERRCODE)
1203 			vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1204 
1205 		vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1206 	}
1207 
1208 	if (vm_nmi_pending(vmx->vm, vcpu)) {
1209 		/*
1210 		 * If there are no conditions blocking NMI injection then
1211 		 * inject it directly here otherwise enable "NMI window
1212 		 * exiting" to inject it as soon as we can.
1213 		 *
1214 		 * We also check for STI_BLOCKING because some implementations
1215 		 * don't allow NMI injection in this case. If we are running
1216 		 * on a processor that doesn't have this restriction it will
1217 		 * immediately exit and the NMI will be injected in the
1218 		 * "NMI window exiting" handler.
1219 		 */
1220 		need_nmi_exiting = 1;
1221 		gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1222 		if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1223 			info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1224 			if ((info & VMCS_INTR_VALID) == 0) {
1225 				vmx_inject_nmi(vmx, vcpu);
1226 				need_nmi_exiting = 0;
1227 			} else {
1228 				VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1229 				    "due to VM-entry intr info %#x", info);
1230 			}
1231 		} else {
1232 			VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1233 			    "Guest Interruptibility-state %#x", gi);
1234 		}
1235 
1236 		if (need_nmi_exiting)
1237 			vmx_set_nmi_window_exiting(vmx, vcpu);
1238 	}
1239 
1240 	extint_pending = vm_extint_pending(vmx->vm, vcpu);
1241 
1242 	if (!extint_pending && virtual_interrupt_delivery) {
1243 		vmx_inject_pir(vlapic);
1244 		return;
1245 	}
1246 
1247 	/*
1248 	 * If interrupt-window exiting is already in effect then don't bother
1249 	 * checking for pending interrupts. This is just an optimization and
1250 	 * not needed for correctness.
1251 	 */
1252 	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1253 		VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1254 		    "pending int_window_exiting");
1255 		return;
1256 	}
1257 
1258 	if (!extint_pending) {
1259 		/* Ask the local apic for a vector to inject */
1260 		if (!vlapic_pending_intr(vlapic, &vector))
1261 			return;
1262 
1263 		/*
1264 		 * From the Intel SDM, Volume 3, Section "Maskable
1265 		 * Hardware Interrupts":
1266 		 * - maskable interrupt vectors [16,255] can be delivered
1267 		 *   through the local APIC.
1268 		*/
1269 		KASSERT(vector >= 16 && vector <= 255,
1270 		    ("invalid vector %d from local APIC", vector));
1271 	} else {
1272 		/* Ask the legacy pic for a vector to inject */
1273 		vatpic_pending_intr(vmx->vm, &vector);
1274 
1275 		/*
1276 		 * From the Intel SDM, Volume 3, Section "Maskable
1277 		 * Hardware Interrupts":
1278 		 * - maskable interrupt vectors [0,255] can be delivered
1279 		 *   through the INTR pin.
1280 		 */
1281 		KASSERT(vector >= 0 && vector <= 255,
1282 		    ("invalid vector %d from INTR", vector));
1283 	}
1284 
1285 	/* Check RFLAGS.IF and the interruptibility state of the guest */
1286 	rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1287 	if ((rflags & PSL_I) == 0) {
1288 		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1289 		    "rflags %#lx", vector, rflags);
1290 		goto cantinject;
1291 	}
1292 
1293 	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1294 	if (gi & HWINTR_BLOCKING) {
1295 		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1296 		    "Guest Interruptibility-state %#x", vector, gi);
1297 		goto cantinject;
1298 	}
1299 
1300 	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1301 	if (info & VMCS_INTR_VALID) {
1302 		/*
1303 		 * This is expected and could happen for multiple reasons:
1304 		 * - A vectoring VM-entry was aborted due to astpending
1305 		 * - A VM-exit happened during event injection.
1306 		 * - An exception was injected above.
1307 		 * - An NMI was injected above or after "NMI window exiting"
1308 		 */
1309 		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1310 		    "VM-entry intr info %#x", vector, info);
1311 		goto cantinject;
1312 	}
1313 
1314 	/* Inject the interrupt */
1315 	info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1316 	info |= vector;
1317 	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1318 
1319 	if (!extint_pending) {
1320 		/* Update the Local APIC ISR */
1321 		vlapic_intr_accepted(vlapic, vector);
1322 	} else {
1323 		vm_extint_clear(vmx->vm, vcpu);
1324 		vatpic_intr_accepted(vmx->vm, vector);
1325 
1326 		/*
1327 		 * After we accepted the current ExtINT the PIC may
1328 		 * have posted another one.  If that is the case, set
1329 		 * the Interrupt Window Exiting execution control so
1330 		 * we can inject that one too.
1331 		 *
1332 		 * Also, interrupt window exiting allows us to inject any
1333 		 * pending APIC vector that was preempted by the ExtINT
1334 		 * as soon as possible. This applies both for the software
1335 		 * emulated vlapic and the hardware assisted virtual APIC.
1336 		 */
1337 		vmx_set_int_window_exiting(vmx, vcpu);
1338 	}
1339 
1340 	VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1341 
1342 	return;
1343 
1344 cantinject:
1345 	/*
1346 	 * Set the Interrupt Window Exiting execution control so we can inject
1347 	 * the interrupt as soon as blocking condition goes away.
1348 	 */
1349 	vmx_set_int_window_exiting(vmx, vcpu);
1350 }
1351 
1352 /*
1353  * If the Virtual NMIs execution control is '1' then the logical processor
1354  * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1355  * the VMCS. An IRET instruction in VMX non-root operation will remove any
1356  * virtual-NMI blocking.
1357  *
1358  * This unblocking occurs even if the IRET causes a fault. In this case the
1359  * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1360  */
1361 static void
1362 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1363 {
1364 	uint32_t gi;
1365 
1366 	VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1367 	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1368 	gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1369 	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1370 }
1371 
1372 static void
1373 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1374 {
1375 	uint32_t gi;
1376 
1377 	VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1378 	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1379 	gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1380 	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1381 }
1382 
1383 static void
1384 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1385 {
1386 	uint32_t gi;
1387 
1388 	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1389 	KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1390 	    ("NMI blocking is not in effect %#x", gi));
1391 }
1392 
1393 static int
1394 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1395 {
1396 	struct vmxctx *vmxctx;
1397 	uint64_t xcrval;
1398 	const struct xsave_limits *limits;
1399 
1400 	vmxctx = &vmx->ctx[vcpu];
1401 	limits = vmm_get_xsave_limits();
1402 
1403 	/*
1404 	 * Note that the processor raises a GP# fault on its own if
1405 	 * xsetbv is executed for CPL != 0, so we do not have to
1406 	 * emulate that fault here.
1407 	 */
1408 
1409 	/* Only xcr0 is supported. */
1410 	if (vmxctx->guest_rcx != 0) {
1411 		vm_inject_gp(vmx->vm, vcpu);
1412 		return (HANDLED);
1413 	}
1414 
1415 	/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1416 	if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1417 		vm_inject_ud(vmx->vm, vcpu);
1418 		return (HANDLED);
1419 	}
1420 
1421 	xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1422 	if ((xcrval & ~limits->xcr0_allowed) != 0) {
1423 		vm_inject_gp(vmx->vm, vcpu);
1424 		return (HANDLED);
1425 	}
1426 
1427 	if (!(xcrval & XFEATURE_ENABLED_X87)) {
1428 		vm_inject_gp(vmx->vm, vcpu);
1429 		return (HANDLED);
1430 	}
1431 
1432 	/* AVX (YMM_Hi128) requires SSE. */
1433 	if (xcrval & XFEATURE_ENABLED_AVX &&
1434 	    (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1435 		vm_inject_gp(vmx->vm, vcpu);
1436 		return (HANDLED);
1437 	}
1438 
1439 	/*
1440 	 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1441 	 * ZMM_Hi256, and Hi16_ZMM.
1442 	 */
1443 	if (xcrval & XFEATURE_AVX512 &&
1444 	    (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1445 	    (XFEATURE_AVX512 | XFEATURE_AVX)) {
1446 		vm_inject_gp(vmx->vm, vcpu);
1447 		return (HANDLED);
1448 	}
1449 
1450 	/*
1451 	 * Intel MPX requires both bound register state flags to be
1452 	 * set.
1453 	 */
1454 	if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1455 	    ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1456 		vm_inject_gp(vmx->vm, vcpu);
1457 		return (HANDLED);
1458 	}
1459 
1460 	/*
1461 	 * This runs "inside" vmrun() with the guest's FPU state, so
1462 	 * modifying xcr0 directly modifies the guest's xcr0, not the
1463 	 * host's.
1464 	 */
1465 	load_xcr(0, xcrval);
1466 	return (HANDLED);
1467 }
1468 
1469 static uint64_t
1470 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1471 {
1472 	const struct vmxctx *vmxctx;
1473 
1474 	vmxctx = &vmx->ctx[vcpu];
1475 
1476 	switch (ident) {
1477 	case 0:
1478 		return (vmxctx->guest_rax);
1479 	case 1:
1480 		return (vmxctx->guest_rcx);
1481 	case 2:
1482 		return (vmxctx->guest_rdx);
1483 	case 3:
1484 		return (vmxctx->guest_rbx);
1485 	case 4:
1486 		return (vmcs_read(VMCS_GUEST_RSP));
1487 	case 5:
1488 		return (vmxctx->guest_rbp);
1489 	case 6:
1490 		return (vmxctx->guest_rsi);
1491 	case 7:
1492 		return (vmxctx->guest_rdi);
1493 	case 8:
1494 		return (vmxctx->guest_r8);
1495 	case 9:
1496 		return (vmxctx->guest_r9);
1497 	case 10:
1498 		return (vmxctx->guest_r10);
1499 	case 11:
1500 		return (vmxctx->guest_r11);
1501 	case 12:
1502 		return (vmxctx->guest_r12);
1503 	case 13:
1504 		return (vmxctx->guest_r13);
1505 	case 14:
1506 		return (vmxctx->guest_r14);
1507 	case 15:
1508 		return (vmxctx->guest_r15);
1509 	default:
1510 		panic("invalid vmx register %d", ident);
1511 	}
1512 }
1513 
1514 static void
1515 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1516 {
1517 	struct vmxctx *vmxctx;
1518 
1519 	vmxctx = &vmx->ctx[vcpu];
1520 
1521 	switch (ident) {
1522 	case 0:
1523 		vmxctx->guest_rax = regval;
1524 		break;
1525 	case 1:
1526 		vmxctx->guest_rcx = regval;
1527 		break;
1528 	case 2:
1529 		vmxctx->guest_rdx = regval;
1530 		break;
1531 	case 3:
1532 		vmxctx->guest_rbx = regval;
1533 		break;
1534 	case 4:
1535 		vmcs_write(VMCS_GUEST_RSP, regval);
1536 		break;
1537 	case 5:
1538 		vmxctx->guest_rbp = regval;
1539 		break;
1540 	case 6:
1541 		vmxctx->guest_rsi = regval;
1542 		break;
1543 	case 7:
1544 		vmxctx->guest_rdi = regval;
1545 		break;
1546 	case 8:
1547 		vmxctx->guest_r8 = regval;
1548 		break;
1549 	case 9:
1550 		vmxctx->guest_r9 = regval;
1551 		break;
1552 	case 10:
1553 		vmxctx->guest_r10 = regval;
1554 		break;
1555 	case 11:
1556 		vmxctx->guest_r11 = regval;
1557 		break;
1558 	case 12:
1559 		vmxctx->guest_r12 = regval;
1560 		break;
1561 	case 13:
1562 		vmxctx->guest_r13 = regval;
1563 		break;
1564 	case 14:
1565 		vmxctx->guest_r14 = regval;
1566 		break;
1567 	case 15:
1568 		vmxctx->guest_r15 = regval;
1569 		break;
1570 	default:
1571 		panic("invalid vmx register %d", ident);
1572 	}
1573 }
1574 
1575 static int
1576 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1577 {
1578 	uint64_t crval, regval;
1579 
1580 	/* We only handle mov to %cr0 at this time */
1581 	if ((exitqual & 0xf0) != 0x00)
1582 		return (UNHANDLED);
1583 
1584 	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1585 
1586 	vmcs_write(VMCS_CR0_SHADOW, regval);
1587 
1588 	crval = regval | cr0_ones_mask;
1589 	crval &= ~cr0_zeros_mask;
1590 	vmcs_write(VMCS_GUEST_CR0, crval);
1591 
1592 	if (regval & CR0_PG) {
1593 		uint64_t efer, entry_ctls;
1594 
1595 		/*
1596 		 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1597 		 * the "IA-32e mode guest" bit in VM-entry control must be
1598 		 * equal.
1599 		 */
1600 		efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1601 		if (efer & EFER_LME) {
1602 			efer |= EFER_LMA;
1603 			vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1604 			entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1605 			entry_ctls |= VM_ENTRY_GUEST_LMA;
1606 			vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1607 		}
1608 	}
1609 
1610 	return (HANDLED);
1611 }
1612 
1613 static int
1614 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1615 {
1616 	uint64_t crval, regval;
1617 
1618 	/* We only handle mov to %cr4 at this time */
1619 	if ((exitqual & 0xf0) != 0x00)
1620 		return (UNHANDLED);
1621 
1622 	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1623 
1624 	vmcs_write(VMCS_CR4_SHADOW, regval);
1625 
1626 	crval = regval | cr4_ones_mask;
1627 	crval &= ~cr4_zeros_mask;
1628 	vmcs_write(VMCS_GUEST_CR4, crval);
1629 
1630 	return (HANDLED);
1631 }
1632 
1633 static int
1634 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1635 {
1636 	struct vlapic *vlapic;
1637 	uint64_t cr8;
1638 	int regnum;
1639 
1640 	/* We only handle mov %cr8 to/from a register at this time. */
1641 	if ((exitqual & 0xe0) != 0x00) {
1642 		return (UNHANDLED);
1643 	}
1644 
1645 	vlapic = vm_lapic(vmx->vm, vcpu);
1646 	regnum = (exitqual >> 8) & 0xf;
1647 	if (exitqual & 0x10) {
1648 		cr8 = vlapic_get_cr8(vlapic);
1649 		vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1650 	} else {
1651 		cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1652 		vlapic_set_cr8(vlapic, cr8);
1653 	}
1654 
1655 	return (HANDLED);
1656 }
1657 
1658 /*
1659  * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1660  */
1661 static int
1662 vmx_cpl(void)
1663 {
1664 	uint32_t ssar;
1665 
1666 	ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1667 	return ((ssar >> 5) & 0x3);
1668 }
1669 
1670 static enum vm_cpu_mode
1671 vmx_cpu_mode(void)
1672 {
1673 	uint32_t csar;
1674 
1675 	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1676 		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1677 		if (csar & 0x2000)
1678 			return (CPU_MODE_64BIT);	/* CS.L = 1 */
1679 		else
1680 			return (CPU_MODE_COMPATIBILITY);
1681 	} else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1682 		return (CPU_MODE_PROTECTED);
1683 	} else {
1684 		return (CPU_MODE_REAL);
1685 	}
1686 }
1687 
1688 static enum vm_paging_mode
1689 vmx_paging_mode(void)
1690 {
1691 
1692 	if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1693 		return (PAGING_MODE_FLAT);
1694 	if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1695 		return (PAGING_MODE_32);
1696 	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1697 		return (PAGING_MODE_64);
1698 	else
1699 		return (PAGING_MODE_PAE);
1700 }
1701 
1702 static uint64_t
1703 inout_str_index(struct vmx *vmx, int vcpuid, int in)
1704 {
1705 	uint64_t val;
1706 	int error;
1707 	enum vm_reg_name reg;
1708 
1709 	reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1710 	error = vmx_getreg(vmx, vcpuid, reg, &val);
1711 	KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1712 	return (val);
1713 }
1714 
1715 static uint64_t
1716 inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1717 {
1718 	uint64_t val;
1719 	int error;
1720 
1721 	if (rep) {
1722 		error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1723 		KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1724 	} else {
1725 		val = 1;
1726 	}
1727 	return (val);
1728 }
1729 
1730 static int
1731 inout_str_addrsize(uint32_t inst_info)
1732 {
1733 	uint32_t size;
1734 
1735 	size = (inst_info >> 7) & 0x7;
1736 	switch (size) {
1737 	case 0:
1738 		return (2);	/* 16 bit */
1739 	case 1:
1740 		return (4);	/* 32 bit */
1741 	case 2:
1742 		return (8);	/* 64 bit */
1743 	default:
1744 		panic("%s: invalid size encoding %d", __func__, size);
1745 	}
1746 }
1747 
1748 static void
1749 inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1750     struct vm_inout_str *vis)
1751 {
1752 	int error, s;
1753 
1754 	if (in) {
1755 		vis->seg_name = VM_REG_GUEST_ES;
1756 	} else {
1757 		s = (inst_info >> 15) & 0x7;
1758 		vis->seg_name = vm_segment_name(s);
1759 	}
1760 
1761 	error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1762 	KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1763 }
1764 
1765 static void
1766 vmx_paging_info(struct vm_guest_paging *paging)
1767 {
1768 	paging->cr3 = vmcs_guest_cr3();
1769 	paging->cpl = vmx_cpl();
1770 	paging->cpu_mode = vmx_cpu_mode();
1771 	paging->paging_mode = vmx_paging_mode();
1772 }
1773 
1774 static void
1775 vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1776 {
1777 	struct vm_guest_paging *paging;
1778 	uint32_t csar;
1779 
1780 	paging = &vmexit->u.inst_emul.paging;
1781 
1782 	vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1783 	vmexit->u.inst_emul.gpa = gpa;
1784 	vmexit->u.inst_emul.gla = gla;
1785 	vmx_paging_info(paging);
1786 	switch (paging->cpu_mode) {
1787 	case CPU_MODE_PROTECTED:
1788 	case CPU_MODE_COMPATIBILITY:
1789 		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1790 		vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1791 		break;
1792 	default:
1793 		vmexit->u.inst_emul.cs_d = 0;
1794 		break;
1795 	}
1796 	vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
1797 }
1798 
1799 static int
1800 ept_fault_type(uint64_t ept_qual)
1801 {
1802 	int fault_type;
1803 
1804 	if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1805 		fault_type = VM_PROT_WRITE;
1806 	else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1807 		fault_type = VM_PROT_EXECUTE;
1808 	else
1809 		fault_type= VM_PROT_READ;
1810 
1811 	return (fault_type);
1812 }
1813 
1814 static boolean_t
1815 ept_emulation_fault(uint64_t ept_qual)
1816 {
1817 	int read, write;
1818 
1819 	/* EPT fault on an instruction fetch doesn't make sense here */
1820 	if (ept_qual & EPT_VIOLATION_INST_FETCH)
1821 		return (FALSE);
1822 
1823 	/* EPT fault must be a read fault or a write fault */
1824 	read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1825 	write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1826 	if ((read | write) == 0)
1827 		return (FALSE);
1828 
1829 	/*
1830 	 * The EPT violation must have been caused by accessing a
1831 	 * guest-physical address that is a translation of a guest-linear
1832 	 * address.
1833 	 */
1834 	if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1835 	    (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1836 		return (FALSE);
1837 	}
1838 
1839 	return (TRUE);
1840 }
1841 
1842 static __inline int
1843 apic_access_virtualization(struct vmx *vmx, int vcpuid)
1844 {
1845 	uint32_t proc_ctls2;
1846 
1847 	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1848 	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1849 }
1850 
1851 static __inline int
1852 x2apic_virtualization(struct vmx *vmx, int vcpuid)
1853 {
1854 	uint32_t proc_ctls2;
1855 
1856 	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1857 	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1858 }
1859 
1860 static int
1861 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1862     uint64_t qual)
1863 {
1864 	int error, handled, offset;
1865 	uint32_t *apic_regs, vector;
1866 	bool retu;
1867 
1868 	handled = HANDLED;
1869 	offset = APIC_WRITE_OFFSET(qual);
1870 
1871 	if (!apic_access_virtualization(vmx, vcpuid)) {
1872 		/*
1873 		 * In general there should not be any APIC write VM-exits
1874 		 * unless APIC-access virtualization is enabled.
1875 		 *
1876 		 * However self-IPI virtualization can legitimately trigger
1877 		 * an APIC-write VM-exit so treat it specially.
1878 		 */
1879 		if (x2apic_virtualization(vmx, vcpuid) &&
1880 		    offset == APIC_OFFSET_SELF_IPI) {
1881 			apic_regs = (uint32_t *)(vlapic->apic_page);
1882 			vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1883 			vlapic_self_ipi_handler(vlapic, vector);
1884 			return (HANDLED);
1885 		} else
1886 			return (UNHANDLED);
1887 	}
1888 
1889 	switch (offset) {
1890 	case APIC_OFFSET_ID:
1891 		vlapic_id_write_handler(vlapic);
1892 		break;
1893 	case APIC_OFFSET_LDR:
1894 		vlapic_ldr_write_handler(vlapic);
1895 		break;
1896 	case APIC_OFFSET_DFR:
1897 		vlapic_dfr_write_handler(vlapic);
1898 		break;
1899 	case APIC_OFFSET_SVR:
1900 		vlapic_svr_write_handler(vlapic);
1901 		break;
1902 	case APIC_OFFSET_ESR:
1903 		vlapic_esr_write_handler(vlapic);
1904 		break;
1905 	case APIC_OFFSET_ICR_LOW:
1906 		retu = false;
1907 		error = vlapic_icrlo_write_handler(vlapic, &retu);
1908 		if (error != 0 || retu)
1909 			handled = UNHANDLED;
1910 		break;
1911 	case APIC_OFFSET_CMCI_LVT:
1912 	case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1913 		vlapic_lvt_write_handler(vlapic, offset);
1914 		break;
1915 	case APIC_OFFSET_TIMER_ICR:
1916 		vlapic_icrtmr_write_handler(vlapic);
1917 		break;
1918 	case APIC_OFFSET_TIMER_DCR:
1919 		vlapic_dcr_write_handler(vlapic);
1920 		break;
1921 	default:
1922 		handled = UNHANDLED;
1923 		break;
1924 	}
1925 	return (handled);
1926 }
1927 
1928 static bool
1929 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1930 {
1931 
1932 	if (apic_access_virtualization(vmx, vcpuid) &&
1933 	    (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
1934 		return (true);
1935 	else
1936 		return (false);
1937 }
1938 
1939 static int
1940 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
1941 {
1942 	uint64_t qual;
1943 	int access_type, offset, allowed;
1944 
1945 	if (!apic_access_virtualization(vmx, vcpuid))
1946 		return (UNHANDLED);
1947 
1948 	qual = vmexit->u.vmx.exit_qualification;
1949 	access_type = APIC_ACCESS_TYPE(qual);
1950 	offset = APIC_ACCESS_OFFSET(qual);
1951 
1952 	allowed = 0;
1953 	if (access_type == 0) {
1954 		/*
1955 		 * Read data access to the following registers is expected.
1956 		 */
1957 		switch (offset) {
1958 		case APIC_OFFSET_APR:
1959 		case APIC_OFFSET_PPR:
1960 		case APIC_OFFSET_RRR:
1961 		case APIC_OFFSET_CMCI_LVT:
1962 		case APIC_OFFSET_TIMER_CCR:
1963 			allowed = 1;
1964 			break;
1965 		default:
1966 			break;
1967 		}
1968 	} else if (access_type == 1) {
1969 		/*
1970 		 * Write data access to the following registers is expected.
1971 		 */
1972 		switch (offset) {
1973 		case APIC_OFFSET_VER:
1974 		case APIC_OFFSET_APR:
1975 		case APIC_OFFSET_PPR:
1976 		case APIC_OFFSET_RRR:
1977 		case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
1978 		case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
1979 		case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
1980 		case APIC_OFFSET_CMCI_LVT:
1981 		case APIC_OFFSET_TIMER_CCR:
1982 			allowed = 1;
1983 			break;
1984 		default:
1985 			break;
1986 		}
1987 	}
1988 
1989 	if (allowed) {
1990 		vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
1991 		    VIE_INVALID_GLA);
1992 	}
1993 
1994 	/*
1995 	 * Regardless of whether the APIC-access is allowed this handler
1996 	 * always returns UNHANDLED:
1997 	 * - if the access is allowed then it is handled by emulating the
1998 	 *   instruction that caused the VM-exit (outside the critical section)
1999 	 * - if the access is not allowed then it will be converted to an
2000 	 *   exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
2001 	 */
2002 	return (UNHANDLED);
2003 }
2004 
2005 static enum task_switch_reason
2006 vmx_task_switch_reason(uint64_t qual)
2007 {
2008 	int reason;
2009 
2010 	reason = (qual >> 30) & 0x3;
2011 	switch (reason) {
2012 	case 0:
2013 		return (TSR_CALL);
2014 	case 1:
2015 		return (TSR_IRET);
2016 	case 2:
2017 		return (TSR_JMP);
2018 	case 3:
2019 		return (TSR_IDT_GATE);
2020 	default:
2021 		panic("%s: invalid reason %d", __func__, reason);
2022 	}
2023 }
2024 
2025 static int
2026 emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2027 {
2028 	int error;
2029 
2030 	if (lapic_msr(num))
2031 		error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2032 	else
2033 		error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2034 
2035 	return (error);
2036 }
2037 
2038 static int
2039 emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2040 {
2041 	struct vmxctx *vmxctx;
2042 	uint64_t result;
2043 	uint32_t eax, edx;
2044 	int error;
2045 
2046 	if (lapic_msr(num))
2047 		error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2048 	else
2049 		error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2050 
2051 	if (error == 0) {
2052 		eax = result;
2053 		vmxctx = &vmx->ctx[vcpuid];
2054 		error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2055 		KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2056 
2057 		edx = result >> 32;
2058 		error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2059 		KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2060 	}
2061 
2062 	return (error);
2063 }
2064 
2065 static int
2066 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2067 {
2068 	int error, errcode, errcode_valid, handled, in;
2069 	struct vmxctx *vmxctx;
2070 	struct vlapic *vlapic;
2071 	struct vm_inout_str *vis;
2072 	struct vm_task_switch *ts;
2073 	uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2074 	uint32_t intr_type, intr_vec, reason;
2075 	uint64_t exitintinfo, qual, gpa;
2076 	bool retu;
2077 
2078 	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2079 	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2080 
2081 	handled = UNHANDLED;
2082 	vmxctx = &vmx->ctx[vcpu];
2083 
2084 	qual = vmexit->u.vmx.exit_qualification;
2085 	reason = vmexit->u.vmx.exit_reason;
2086 	vmexit->exitcode = VM_EXITCODE_BOGUS;
2087 
2088 	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2089 
2090 	/*
2091 	 * VM-entry failures during or after loading guest state.
2092 	 *
2093 	 * These VM-exits are uncommon but must be handled specially
2094 	 * as most VM-exit fields are not populated as usual.
2095 	 */
2096 	if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
2097 		VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
2098 		__asm __volatile("int $18");
2099 		return (1);
2100 	}
2101 
2102 	/*
2103 	 * VM exits that can be triggered during event delivery need to
2104 	 * be handled specially by re-injecting the event if the IDT
2105 	 * vectoring information field's valid bit is set.
2106 	 *
2107 	 * See "Information for VM Exits During Event Delivery" in Intel SDM
2108 	 * for details.
2109 	 */
2110 	idtvec_info = vmcs_idt_vectoring_info();
2111 	if (idtvec_info & VMCS_IDT_VEC_VALID) {
2112 		idtvec_info &= ~(1 << 12); /* clear undefined bit */
2113 		exitintinfo = idtvec_info;
2114 		if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2115 			idtvec_err = vmcs_idt_vectoring_err();
2116 			exitintinfo |= (uint64_t)idtvec_err << 32;
2117 		}
2118 		error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2119 		KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2120 		    __func__, error));
2121 
2122 		/*
2123 		 * If 'virtual NMIs' are being used and the VM-exit
2124 		 * happened while injecting an NMI during the previous
2125 		 * VM-entry, then clear "blocking by NMI" in the
2126 		 * Guest Interruptibility-State so the NMI can be
2127 		 * reinjected on the subsequent VM-entry.
2128 		 *
2129 		 * However, if the NMI was being delivered through a task
2130 		 * gate, then the new task must start execution with NMIs
2131 		 * blocked so don't clear NMI blocking in this case.
2132 		 */
2133 		intr_type = idtvec_info & VMCS_INTR_T_MASK;
2134 		if (intr_type == VMCS_INTR_T_NMI) {
2135 			if (reason != EXIT_REASON_TASK_SWITCH)
2136 				vmx_clear_nmi_blocking(vmx, vcpu);
2137 			else
2138 				vmx_assert_nmi_blocking(vmx, vcpu);
2139 		}
2140 
2141 		/*
2142 		 * Update VM-entry instruction length if the event being
2143 		 * delivered was a software interrupt or software exception.
2144 		 */
2145 		if (intr_type == VMCS_INTR_T_SWINTR ||
2146 		    intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2147 		    intr_type == VMCS_INTR_T_SWEXCEPTION) {
2148 			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2149 		}
2150 	}
2151 
2152 	switch (reason) {
2153 	case EXIT_REASON_TASK_SWITCH:
2154 		ts = &vmexit->u.task_switch;
2155 		ts->tsssel = qual & 0xffff;
2156 		ts->reason = vmx_task_switch_reason(qual);
2157 		ts->ext = 0;
2158 		ts->errcode_valid = 0;
2159 		vmx_paging_info(&ts->paging);
2160 		/*
2161 		 * If the task switch was due to a CALL, JMP, IRET, software
2162 		 * interrupt (INT n) or software exception (INT3, INTO),
2163 		 * then the saved %rip references the instruction that caused
2164 		 * the task switch. The instruction length field in the VMCS
2165 		 * is valid in this case.
2166 		 *
2167 		 * In all other cases (e.g., NMI, hardware exception) the
2168 		 * saved %rip is one that would have been saved in the old TSS
2169 		 * had the task switch completed normally so the instruction
2170 		 * length field is not needed in this case and is explicitly
2171 		 * set to 0.
2172 		 */
2173 		if (ts->reason == TSR_IDT_GATE) {
2174 			KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2175 			    ("invalid idtvec_info %#x for IDT task switch",
2176 			    idtvec_info));
2177 			intr_type = idtvec_info & VMCS_INTR_T_MASK;
2178 			if (intr_type != VMCS_INTR_T_SWINTR &&
2179 			    intr_type != VMCS_INTR_T_SWEXCEPTION &&
2180 			    intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2181 				/* Task switch triggered by external event */
2182 				ts->ext = 1;
2183 				vmexit->inst_length = 0;
2184 				if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2185 					ts->errcode_valid = 1;
2186 					ts->errcode = vmcs_idt_vectoring_err();
2187 				}
2188 			}
2189 		}
2190 		vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2191 		VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2192 		    "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2193 		    ts->ext ? "external" : "internal",
2194 		    ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2195 		break;
2196 	case EXIT_REASON_CR_ACCESS:
2197 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2198 		switch (qual & 0xf) {
2199 		case 0:
2200 			handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2201 			break;
2202 		case 4:
2203 			handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2204 			break;
2205 		case 8:
2206 			handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2207 			break;
2208 		}
2209 		break;
2210 	case EXIT_REASON_RDMSR:
2211 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2212 		retu = false;
2213 		ecx = vmxctx->guest_rcx;
2214 		VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2215 		error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2216 		if (error) {
2217 			vmexit->exitcode = VM_EXITCODE_RDMSR;
2218 			vmexit->u.msr.code = ecx;
2219 		} else if (!retu) {
2220 			handled = HANDLED;
2221 		} else {
2222 			/* Return to userspace with a valid exitcode */
2223 			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2224 			    ("emulate_rdmsr retu with bogus exitcode"));
2225 		}
2226 		break;
2227 	case EXIT_REASON_WRMSR:
2228 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2229 		retu = false;
2230 		eax = vmxctx->guest_rax;
2231 		ecx = vmxctx->guest_rcx;
2232 		edx = vmxctx->guest_rdx;
2233 		VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2234 		    ecx, (uint64_t)edx << 32 | eax);
2235 		error = emulate_wrmsr(vmx, vcpu, ecx,
2236 		    (uint64_t)edx << 32 | eax, &retu);
2237 		if (error) {
2238 			vmexit->exitcode = VM_EXITCODE_WRMSR;
2239 			vmexit->u.msr.code = ecx;
2240 			vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2241 		} else if (!retu) {
2242 			handled = HANDLED;
2243 		} else {
2244 			/* Return to userspace with a valid exitcode */
2245 			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2246 			    ("emulate_wrmsr retu with bogus exitcode"));
2247 		}
2248 		break;
2249 	case EXIT_REASON_HLT:
2250 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2251 		vmexit->exitcode = VM_EXITCODE_HLT;
2252 		vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2253 		break;
2254 	case EXIT_REASON_MTF:
2255 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2256 		vmexit->exitcode = VM_EXITCODE_MTRAP;
2257 		vmexit->inst_length = 0;
2258 		break;
2259 	case EXIT_REASON_PAUSE:
2260 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2261 		vmexit->exitcode = VM_EXITCODE_PAUSE;
2262 		break;
2263 	case EXIT_REASON_INTR_WINDOW:
2264 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2265 		vmx_clear_int_window_exiting(vmx, vcpu);
2266 		return (1);
2267 	case EXIT_REASON_EXT_INTR:
2268 		/*
2269 		 * External interrupts serve only to cause VM exits and allow
2270 		 * the host interrupt handler to run.
2271 		 *
2272 		 * If this external interrupt triggers a virtual interrupt
2273 		 * to a VM, then that state will be recorded by the
2274 		 * host interrupt handler in the VM's softc. We will inject
2275 		 * this virtual interrupt during the subsequent VM enter.
2276 		 */
2277 		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2278 
2279 		/*
2280 		 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2281 		 * This appears to be a bug in VMware Fusion?
2282 		 */
2283 		if (!(intr_info & VMCS_INTR_VALID))
2284 			return (1);
2285 		KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2286 		    (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2287 		    ("VM exit interruption info invalid: %#x", intr_info));
2288 		vmx_trigger_hostintr(intr_info & 0xff);
2289 
2290 		/*
2291 		 * This is special. We want to treat this as an 'handled'
2292 		 * VM-exit but not increment the instruction pointer.
2293 		 */
2294 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2295 		return (1);
2296 	case EXIT_REASON_NMI_WINDOW:
2297 		/* Exit to allow the pending virtual NMI to be injected */
2298 		if (vm_nmi_pending(vmx->vm, vcpu))
2299 			vmx_inject_nmi(vmx, vcpu);
2300 		vmx_clear_nmi_window_exiting(vmx, vcpu);
2301 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2302 		return (1);
2303 	case EXIT_REASON_INOUT:
2304 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2305 		vmexit->exitcode = VM_EXITCODE_INOUT;
2306 		vmexit->u.inout.bytes = (qual & 0x7) + 1;
2307 		vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2308 		vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2309 		vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2310 		vmexit->u.inout.port = (uint16_t)(qual >> 16);
2311 		vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2312 		if (vmexit->u.inout.string) {
2313 			inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2314 			vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2315 			vis = &vmexit->u.inout_str;
2316 			vmx_paging_info(&vis->paging);
2317 			vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2318 			vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2319 			vis->index = inout_str_index(vmx, vcpu, in);
2320 			vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2321 			vis->addrsize = inout_str_addrsize(inst_info);
2322 			inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2323 		}
2324 		break;
2325 	case EXIT_REASON_CPUID:
2326 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2327 		handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2328 		break;
2329 	case EXIT_REASON_EXCEPTION:
2330 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2331 		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2332 		KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2333 		    ("VM exit interruption info invalid: %#x", intr_info));
2334 
2335 		intr_vec = intr_info & 0xff;
2336 		intr_type = intr_info & VMCS_INTR_T_MASK;
2337 
2338 		/*
2339 		 * If Virtual NMIs control is 1 and the VM-exit is due to a
2340 		 * fault encountered during the execution of IRET then we must
2341 		 * restore the state of "virtual-NMI blocking" before resuming
2342 		 * the guest.
2343 		 *
2344 		 * See "Resuming Guest Software after Handling an Exception".
2345 		 * See "Information for VM Exits Due to Vectored Events".
2346 		 */
2347 		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2348 		    (intr_vec != IDT_DF) &&
2349 		    (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2350 			vmx_restore_nmi_blocking(vmx, vcpu);
2351 
2352 		/*
2353 		 * The NMI has already been handled in vmx_exit_handle_nmi().
2354 		 */
2355 		if (intr_type == VMCS_INTR_T_NMI)
2356 			return (1);
2357 
2358 		/*
2359 		 * Call the machine check handler by hand. Also don't reflect
2360 		 * the machine check back into the guest.
2361 		 */
2362 		if (intr_vec == IDT_MC) {
2363 			VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
2364 			__asm __volatile("int $18");
2365 			return (1);
2366 		}
2367 
2368 		if (intr_vec == IDT_PF) {
2369 			error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual);
2370 			KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d",
2371 			    __func__, error));
2372 		}
2373 
2374 		/*
2375 		 * Software exceptions exhibit trap-like behavior. This in
2376 		 * turn requires populating the VM-entry instruction length
2377 		 * so that the %rip in the trap frame is past the INT3/INTO
2378 		 * instruction.
2379 		 */
2380 		if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2381 			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2382 
2383 		/* Reflect all other exceptions back into the guest */
2384 		errcode_valid = errcode = 0;
2385 		if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2386 			errcode_valid = 1;
2387 			errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2388 		}
2389 		VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
2390 		    "the guest", intr_vec, errcode);
2391 		error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
2392 		    errcode_valid, errcode, 0);
2393 		KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2394 		    __func__, error));
2395 		return (1);
2396 
2397 	case EXIT_REASON_EPT_FAULT:
2398 		/*
2399 		 * If 'gpa' lies within the address space allocated to
2400 		 * memory then this must be a nested page fault otherwise
2401 		 * this must be an instruction that accesses MMIO space.
2402 		 */
2403 		gpa = vmcs_gpa();
2404 		if (vm_mem_allocated(vmx->vm, gpa) ||
2405 		    apic_access_fault(vmx, vcpu, gpa)) {
2406 			vmexit->exitcode = VM_EXITCODE_PAGING;
2407 			vmexit->inst_length = 0;
2408 			vmexit->u.paging.gpa = gpa;
2409 			vmexit->u.paging.fault_type = ept_fault_type(qual);
2410 			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2411 		} else if (ept_emulation_fault(qual)) {
2412 			vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2413 			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2414 		}
2415 		/*
2416 		 * If Virtual NMIs control is 1 and the VM-exit is due to an
2417 		 * EPT fault during the execution of IRET then we must restore
2418 		 * the state of "virtual-NMI blocking" before resuming.
2419 		 *
2420 		 * See description of "NMI unblocking due to IRET" in
2421 		 * "Exit Qualification for EPT Violations".
2422 		 */
2423 		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2424 		    (qual & EXIT_QUAL_NMIUDTI) != 0)
2425 			vmx_restore_nmi_blocking(vmx, vcpu);
2426 		break;
2427 	case EXIT_REASON_VIRTUALIZED_EOI:
2428 		vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2429 		vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2430 		vmexit->inst_length = 0;	/* trap-like */
2431 		break;
2432 	case EXIT_REASON_APIC_ACCESS:
2433 		handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2434 		break;
2435 	case EXIT_REASON_APIC_WRITE:
2436 		/*
2437 		 * APIC-write VM exit is trap-like so the %rip is already
2438 		 * pointing to the next instruction.
2439 		 */
2440 		vmexit->inst_length = 0;
2441 		vlapic = vm_lapic(vmx->vm, vcpu);
2442 		handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2443 		break;
2444 	case EXIT_REASON_XSETBV:
2445 		handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2446 		break;
2447 	case EXIT_REASON_MONITOR:
2448 		vmexit->exitcode = VM_EXITCODE_MONITOR;
2449 		break;
2450 	case EXIT_REASON_MWAIT:
2451 		vmexit->exitcode = VM_EXITCODE_MWAIT;
2452 		break;
2453 	default:
2454 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2455 		break;
2456 	}
2457 
2458 	if (handled) {
2459 		/*
2460 		 * It is possible that control is returned to userland
2461 		 * even though we were able to handle the VM exit in the
2462 		 * kernel.
2463 		 *
2464 		 * In such a case we want to make sure that the userland
2465 		 * restarts guest execution at the instruction *after*
2466 		 * the one we just processed. Therefore we update the
2467 		 * guest rip in the VMCS and in 'vmexit'.
2468 		 */
2469 		vmexit->rip += vmexit->inst_length;
2470 		vmexit->inst_length = 0;
2471 		vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2472 	} else {
2473 		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2474 			/*
2475 			 * If this VM exit was not claimed by anybody then
2476 			 * treat it as a generic VMX exit.
2477 			 */
2478 			vmexit->exitcode = VM_EXITCODE_VMX;
2479 			vmexit->u.vmx.status = VM_SUCCESS;
2480 			vmexit->u.vmx.inst_type = 0;
2481 			vmexit->u.vmx.inst_error = 0;
2482 		} else {
2483 			/*
2484 			 * The exitcode and collateral have been populated.
2485 			 * The VM exit will be processed further in userland.
2486 			 */
2487 		}
2488 	}
2489 	return (handled);
2490 }
2491 
2492 static __inline void
2493 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2494 {
2495 
2496 	KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2497 	    ("vmx_exit_inst_error: invalid inst_fail_status %d",
2498 	    vmxctx->inst_fail_status));
2499 
2500 	vmexit->inst_length = 0;
2501 	vmexit->exitcode = VM_EXITCODE_VMX;
2502 	vmexit->u.vmx.status = vmxctx->inst_fail_status;
2503 	vmexit->u.vmx.inst_error = vmcs_instruction_error();
2504 	vmexit->u.vmx.exit_reason = ~0;
2505 	vmexit->u.vmx.exit_qualification = ~0;
2506 
2507 	switch (rc) {
2508 	case VMX_VMRESUME_ERROR:
2509 	case VMX_VMLAUNCH_ERROR:
2510 	case VMX_INVEPT_ERROR:
2511 		vmexit->u.vmx.inst_type = rc;
2512 		break;
2513 	default:
2514 		panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2515 	}
2516 }
2517 
2518 /*
2519  * If the NMI-exiting VM execution control is set to '1' then an NMI in
2520  * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2521  * sufficient to simply vector to the NMI handler via a software interrupt.
2522  * However, this must be done before maskable interrupts are enabled
2523  * otherwise the "iret" issued by an interrupt handler will incorrectly
2524  * clear NMI blocking.
2525  */
2526 static __inline void
2527 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2528 {
2529 	uint32_t intr_info;
2530 
2531 	KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2532 
2533 	if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2534 		return;
2535 
2536 	intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2537 	KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2538 	    ("VM exit interruption info invalid: %#x", intr_info));
2539 
2540 	if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2541 		KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2542 		    "to NMI has invalid vector: %#x", intr_info));
2543 		VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2544 		__asm __volatile("int $2");
2545 	}
2546 }
2547 
2548 static int
2549 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
2550     void *rendezvous_cookie, void *suspend_cookie)
2551 {
2552 	int rc, handled, launched;
2553 	struct vmx *vmx;
2554 	struct vm *vm;
2555 	struct vmxctx *vmxctx;
2556 	struct vmcs *vmcs;
2557 	struct vm_exit *vmexit;
2558 	struct vlapic *vlapic;
2559 	uint32_t exit_reason;
2560 
2561 	vmx = arg;
2562 	vm = vmx->vm;
2563 	vmcs = &vmx->vmcs[vcpu];
2564 	vmxctx = &vmx->ctx[vcpu];
2565 	vlapic = vm_lapic(vm, vcpu);
2566 	vmexit = vm_exitinfo(vm, vcpu);
2567 	launched = 0;
2568 
2569 	KASSERT(vmxctx->pmap == pmap,
2570 	    ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2571 
2572 	vmx_msr_guest_enter(vmx, vcpu);
2573 
2574 	VMPTRLD(vmcs);
2575 
2576 	/*
2577 	 * XXX
2578 	 * We do this every time because we may setup the virtual machine
2579 	 * from a different process than the one that actually runs it.
2580 	 *
2581 	 * If the life of a virtual machine was spent entirely in the context
2582 	 * of a single process we could do this once in vmx_vminit().
2583 	 */
2584 	vmcs_write(VMCS_HOST_CR3, rcr3());
2585 
2586 	vmcs_write(VMCS_GUEST_RIP, rip);
2587 	vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2588 	do {
2589 		KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
2590 		    "%#lx/%#lx", __func__, vmcs_guest_rip(), rip));
2591 
2592 		handled = UNHANDLED;
2593 		/*
2594 		 * Interrupts are disabled from this point on until the
2595 		 * guest starts executing. This is done for the following
2596 		 * reasons:
2597 		 *
2598 		 * If an AST is asserted on this thread after the check below,
2599 		 * then the IPI_AST notification will not be lost, because it
2600 		 * will cause a VM exit due to external interrupt as soon as
2601 		 * the guest state is loaded.
2602 		 *
2603 		 * A posted interrupt after 'vmx_inject_interrupts()' will
2604 		 * not be "lost" because it will be held pending in the host
2605 		 * APIC because interrupts are disabled. The pending interrupt
2606 		 * will be recognized as soon as the guest state is loaded.
2607 		 *
2608 		 * The same reasoning applies to the IPI generated by
2609 		 * pmap_invalidate_ept().
2610 		 */
2611 		disable_intr();
2612 		vmx_inject_interrupts(vmx, vcpu, vlapic, rip);
2613 
2614 		/*
2615 		 * Check for vcpu suspension after injecting events because
2616 		 * vmx_inject_interrupts() can suspend the vcpu due to a
2617 		 * triple fault.
2618 		 */
2619 		if (vcpu_suspended(suspend_cookie)) {
2620 			enable_intr();
2621 			vm_exit_suspended(vmx->vm, vcpu, rip);
2622 			break;
2623 		}
2624 
2625 		if (vcpu_rendezvous_pending(rendezvous_cookie)) {
2626 			enable_intr();
2627 			vm_exit_rendezvous(vmx->vm, vcpu, rip);
2628 			break;
2629 		}
2630 
2631 		if (vcpu_should_yield(vm, vcpu)) {
2632 			enable_intr();
2633 			vm_exit_astpending(vmx->vm, vcpu, rip);
2634 			vmx_astpending_trace(vmx, vcpu, rip);
2635 			handled = HANDLED;
2636 			break;
2637 		}
2638 
2639 		vmx_run_trace(vmx, vcpu);
2640 		rc = vmx_enter_guest(vmxctx, vmx, launched);
2641 
2642 		/* Collect some information for VM exit processing */
2643 		vmexit->rip = rip = vmcs_guest_rip();
2644 		vmexit->inst_length = vmexit_instruction_length();
2645 		vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2646 		vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2647 
2648 		/* Update 'nextrip' */
2649 		vmx->state[vcpu].nextrip = rip;
2650 
2651 		if (rc == VMX_GUEST_VMEXIT) {
2652 			vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2653 			enable_intr();
2654 			handled = vmx_exit_process(vmx, vcpu, vmexit);
2655 		} else {
2656 			enable_intr();
2657 			vmx_exit_inst_error(vmxctx, rc, vmexit);
2658 		}
2659 		launched = 1;
2660 		vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2661 		rip = vmexit->rip;
2662 	} while (handled);
2663 
2664 	/*
2665 	 * If a VM exit has been handled then the exitcode must be BOGUS
2666 	 * If a VM exit is not handled then the exitcode must not be BOGUS
2667 	 */
2668 	if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2669 	    (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2670 		panic("Mismatch between handled (%d) and exitcode (%d)",
2671 		      handled, vmexit->exitcode);
2672 	}
2673 
2674 	if (!handled)
2675 		vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2676 
2677 	VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2678 	    vmexit->exitcode);
2679 
2680 	VMCLEAR(vmcs);
2681 	vmx_msr_guest_exit(vmx, vcpu);
2682 
2683 	return (0);
2684 }
2685 
2686 static void
2687 vmx_vmcleanup(void *arg)
2688 {
2689 	int i;
2690 	struct vmx *vmx = arg;
2691 
2692 	if (apic_access_virtualization(vmx, 0))
2693 		vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2694 
2695 	for (i = 0; i < VM_MAXCPU; i++)
2696 		vpid_free(vmx->state[i].vpid);
2697 
2698 	free(vmx, M_VMX);
2699 
2700 	return;
2701 }
2702 
2703 static register_t *
2704 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2705 {
2706 
2707 	switch (reg) {
2708 	case VM_REG_GUEST_RAX:
2709 		return (&vmxctx->guest_rax);
2710 	case VM_REG_GUEST_RBX:
2711 		return (&vmxctx->guest_rbx);
2712 	case VM_REG_GUEST_RCX:
2713 		return (&vmxctx->guest_rcx);
2714 	case VM_REG_GUEST_RDX:
2715 		return (&vmxctx->guest_rdx);
2716 	case VM_REG_GUEST_RSI:
2717 		return (&vmxctx->guest_rsi);
2718 	case VM_REG_GUEST_RDI:
2719 		return (&vmxctx->guest_rdi);
2720 	case VM_REG_GUEST_RBP:
2721 		return (&vmxctx->guest_rbp);
2722 	case VM_REG_GUEST_R8:
2723 		return (&vmxctx->guest_r8);
2724 	case VM_REG_GUEST_R9:
2725 		return (&vmxctx->guest_r9);
2726 	case VM_REG_GUEST_R10:
2727 		return (&vmxctx->guest_r10);
2728 	case VM_REG_GUEST_R11:
2729 		return (&vmxctx->guest_r11);
2730 	case VM_REG_GUEST_R12:
2731 		return (&vmxctx->guest_r12);
2732 	case VM_REG_GUEST_R13:
2733 		return (&vmxctx->guest_r13);
2734 	case VM_REG_GUEST_R14:
2735 		return (&vmxctx->guest_r14);
2736 	case VM_REG_GUEST_R15:
2737 		return (&vmxctx->guest_r15);
2738 	case VM_REG_GUEST_CR2:
2739 		return (&vmxctx->guest_cr2);
2740 	default:
2741 		break;
2742 	}
2743 	return (NULL);
2744 }
2745 
2746 static int
2747 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2748 {
2749 	register_t *regp;
2750 
2751 	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2752 		*retval = *regp;
2753 		return (0);
2754 	} else
2755 		return (EINVAL);
2756 }
2757 
2758 static int
2759 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2760 {
2761 	register_t *regp;
2762 
2763 	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2764 		*regp = val;
2765 		return (0);
2766 	} else
2767 		return (EINVAL);
2768 }
2769 
2770 static int
2771 vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
2772 {
2773 	uint64_t gi;
2774 	int error;
2775 
2776 	error = vmcs_getreg(&vmx->vmcs[vcpu], running,
2777 	    VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
2778 	*retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
2779 	return (error);
2780 }
2781 
2782 static int
2783 vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
2784 {
2785 	struct vmcs *vmcs;
2786 	uint64_t gi;
2787 	int error, ident;
2788 
2789 	/*
2790 	 * Forcing the vcpu into an interrupt shadow is not supported.
2791 	 */
2792 	if (val) {
2793 		error = EINVAL;
2794 		goto done;
2795 	}
2796 
2797 	vmcs = &vmx->vmcs[vcpu];
2798 	ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
2799 	error = vmcs_getreg(vmcs, running, ident, &gi);
2800 	if (error == 0) {
2801 		gi &= ~HWINTR_BLOCKING;
2802 		error = vmcs_setreg(vmcs, running, ident, gi);
2803 	}
2804 done:
2805 	VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
2806 	    error ? "failed" : "succeeded");
2807 	return (error);
2808 }
2809 
2810 static int
2811 vmx_shadow_reg(int reg)
2812 {
2813 	int shreg;
2814 
2815 	shreg = -1;
2816 
2817 	switch (reg) {
2818 	case VM_REG_GUEST_CR0:
2819 		shreg = VMCS_CR0_SHADOW;
2820                 break;
2821         case VM_REG_GUEST_CR4:
2822 		shreg = VMCS_CR4_SHADOW;
2823 		break;
2824 	default:
2825 		break;
2826 	}
2827 
2828 	return (shreg);
2829 }
2830 
2831 static int
2832 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
2833 {
2834 	int running, hostcpu;
2835 	struct vmx *vmx = arg;
2836 
2837 	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2838 	if (running && hostcpu != curcpu)
2839 		panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
2840 
2841 	if (reg == VM_REG_GUEST_INTR_SHADOW)
2842 		return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
2843 
2844 	if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
2845 		return (0);
2846 
2847 	return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
2848 }
2849 
2850 static int
2851 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
2852 {
2853 	int error, hostcpu, running, shadow;
2854 	uint64_t ctls;
2855 	pmap_t pmap;
2856 	struct vmx *vmx = arg;
2857 
2858 	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2859 	if (running && hostcpu != curcpu)
2860 		panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
2861 
2862 	if (reg == VM_REG_GUEST_INTR_SHADOW)
2863 		return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
2864 
2865 	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
2866 		return (0);
2867 
2868 	error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
2869 
2870 	if (error == 0) {
2871 		/*
2872 		 * If the "load EFER" VM-entry control is 1 then the
2873 		 * value of EFER.LMA must be identical to "IA-32e mode guest"
2874 		 * bit in the VM-entry control.
2875 		 */
2876 		if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
2877 		    (reg == VM_REG_GUEST_EFER)) {
2878 			vmcs_getreg(&vmx->vmcs[vcpu], running,
2879 				    VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
2880 			if (val & EFER_LMA)
2881 				ctls |= VM_ENTRY_GUEST_LMA;
2882 			else
2883 				ctls &= ~VM_ENTRY_GUEST_LMA;
2884 			vmcs_setreg(&vmx->vmcs[vcpu], running,
2885 				    VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
2886 		}
2887 
2888 		shadow = vmx_shadow_reg(reg);
2889 		if (shadow > 0) {
2890 			/*
2891 			 * Store the unmodified value in the shadow
2892 			 */
2893 			error = vmcs_setreg(&vmx->vmcs[vcpu], running,
2894 				    VMCS_IDENT(shadow), val);
2895 		}
2896 
2897 		if (reg == VM_REG_GUEST_CR3) {
2898 			/*
2899 			 * Invalidate the guest vcpu's TLB mappings to emulate
2900 			 * the behavior of updating %cr3.
2901 			 *
2902 			 * XXX the processor retains global mappings when %cr3
2903 			 * is updated but vmx_invvpid() does not.
2904 			 */
2905 			pmap = vmx->ctx[vcpu].pmap;
2906 			vmx_invvpid(vmx, vcpu, pmap, running);
2907 		}
2908 	}
2909 
2910 	return (error);
2911 }
2912 
2913 static int
2914 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2915 {
2916 	int hostcpu, running;
2917 	struct vmx *vmx = arg;
2918 
2919 	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2920 	if (running && hostcpu != curcpu)
2921 		panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2922 
2923 	return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
2924 }
2925 
2926 static int
2927 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2928 {
2929 	int hostcpu, running;
2930 	struct vmx *vmx = arg;
2931 
2932 	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2933 	if (running && hostcpu != curcpu)
2934 		panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2935 
2936 	return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
2937 }
2938 
2939 static int
2940 vmx_getcap(void *arg, int vcpu, int type, int *retval)
2941 {
2942 	struct vmx *vmx = arg;
2943 	int vcap;
2944 	int ret;
2945 
2946 	ret = ENOENT;
2947 
2948 	vcap = vmx->cap[vcpu].set;
2949 
2950 	switch (type) {
2951 	case VM_CAP_HALT_EXIT:
2952 		if (cap_halt_exit)
2953 			ret = 0;
2954 		break;
2955 	case VM_CAP_PAUSE_EXIT:
2956 		if (cap_pause_exit)
2957 			ret = 0;
2958 		break;
2959 	case VM_CAP_MTRAP_EXIT:
2960 		if (cap_monitor_trap)
2961 			ret = 0;
2962 		break;
2963 	case VM_CAP_UNRESTRICTED_GUEST:
2964 		if (cap_unrestricted_guest)
2965 			ret = 0;
2966 		break;
2967 	case VM_CAP_ENABLE_INVPCID:
2968 		if (cap_invpcid)
2969 			ret = 0;
2970 		break;
2971 	default:
2972 		break;
2973 	}
2974 
2975 	if (ret == 0)
2976 		*retval = (vcap & (1 << type)) ? 1 : 0;
2977 
2978 	return (ret);
2979 }
2980 
2981 static int
2982 vmx_setcap(void *arg, int vcpu, int type, int val)
2983 {
2984 	struct vmx *vmx = arg;
2985 	struct vmcs *vmcs = &vmx->vmcs[vcpu];
2986 	uint32_t baseval;
2987 	uint32_t *pptr;
2988 	int error;
2989 	int flag;
2990 	int reg;
2991 	int retval;
2992 
2993 	retval = ENOENT;
2994 	pptr = NULL;
2995 
2996 	switch (type) {
2997 	case VM_CAP_HALT_EXIT:
2998 		if (cap_halt_exit) {
2999 			retval = 0;
3000 			pptr = &vmx->cap[vcpu].proc_ctls;
3001 			baseval = *pptr;
3002 			flag = PROCBASED_HLT_EXITING;
3003 			reg = VMCS_PRI_PROC_BASED_CTLS;
3004 		}
3005 		break;
3006 	case VM_CAP_MTRAP_EXIT:
3007 		if (cap_monitor_trap) {
3008 			retval = 0;
3009 			pptr = &vmx->cap[vcpu].proc_ctls;
3010 			baseval = *pptr;
3011 			flag = PROCBASED_MTF;
3012 			reg = VMCS_PRI_PROC_BASED_CTLS;
3013 		}
3014 		break;
3015 	case VM_CAP_PAUSE_EXIT:
3016 		if (cap_pause_exit) {
3017 			retval = 0;
3018 			pptr = &vmx->cap[vcpu].proc_ctls;
3019 			baseval = *pptr;
3020 			flag = PROCBASED_PAUSE_EXITING;
3021 			reg = VMCS_PRI_PROC_BASED_CTLS;
3022 		}
3023 		break;
3024 	case VM_CAP_UNRESTRICTED_GUEST:
3025 		if (cap_unrestricted_guest) {
3026 			retval = 0;
3027 			pptr = &vmx->cap[vcpu].proc_ctls2;
3028 			baseval = *pptr;
3029 			flag = PROCBASED2_UNRESTRICTED_GUEST;
3030 			reg = VMCS_SEC_PROC_BASED_CTLS;
3031 		}
3032 		break;
3033 	case VM_CAP_ENABLE_INVPCID:
3034 		if (cap_invpcid) {
3035 			retval = 0;
3036 			pptr = &vmx->cap[vcpu].proc_ctls2;
3037 			baseval = *pptr;
3038 			flag = PROCBASED2_ENABLE_INVPCID;
3039 			reg = VMCS_SEC_PROC_BASED_CTLS;
3040 		}
3041 		break;
3042 	default:
3043 		break;
3044 	}
3045 
3046 	if (retval == 0) {
3047 		if (val) {
3048 			baseval |= flag;
3049 		} else {
3050 			baseval &= ~flag;
3051 		}
3052 		VMPTRLD(vmcs);
3053 		error = vmwrite(reg, baseval);
3054 		VMCLEAR(vmcs);
3055 
3056 		if (error) {
3057 			retval = error;
3058 		} else {
3059 			/*
3060 			 * Update optional stored flags, and record
3061 			 * setting
3062 			 */
3063 			if (pptr != NULL) {
3064 				*pptr = baseval;
3065 			}
3066 
3067 			if (val) {
3068 				vmx->cap[vcpu].set |= (1 << type);
3069 			} else {
3070 				vmx->cap[vcpu].set &= ~(1 << type);
3071 			}
3072 		}
3073 	}
3074 
3075         return (retval);
3076 }
3077 
3078 struct vlapic_vtx {
3079 	struct vlapic	vlapic;
3080 	struct pir_desc	*pir_desc;
3081 	struct vmx	*vmx;
3082 };
3083 
3084 #define	VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg)	\
3085 do {									\
3086 	VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d",	\
3087 	    level ? "level" : "edge", vector);				\
3088 	VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]);	\
3089 	VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]);	\
3090 	VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]);	\
3091 	VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]);	\
3092 	VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3093 } while (0)
3094 
3095 /*
3096  * vlapic->ops handlers that utilize the APICv hardware assist described in
3097  * Chapter 29 of the Intel SDM.
3098  */
3099 static int
3100 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3101 {
3102 	struct vlapic_vtx *vlapic_vtx;
3103 	struct pir_desc *pir_desc;
3104 	uint64_t mask;
3105 	int idx, notify;
3106 
3107 	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3108 	pir_desc = vlapic_vtx->pir_desc;
3109 
3110 	/*
3111 	 * Keep track of interrupt requests in the PIR descriptor. This is
3112 	 * because the virtual APIC page pointed to by the VMCS cannot be
3113 	 * modified if the vcpu is running.
3114 	 */
3115 	idx = vector / 64;
3116 	mask = 1UL << (vector % 64);
3117 	atomic_set_long(&pir_desc->pir[idx], mask);
3118 	notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
3119 
3120 	VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3121 	    level, "vmx_set_intr_ready");
3122 	return (notify);
3123 }
3124 
3125 static int
3126 vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3127 {
3128 	struct vlapic_vtx *vlapic_vtx;
3129 	struct pir_desc *pir_desc;
3130 	struct LAPIC *lapic;
3131 	uint64_t pending, pirval;
3132 	uint32_t ppr, vpr;
3133 	int i;
3134 
3135 	/*
3136 	 * This function is only expected to be called from the 'HLT' exit
3137 	 * handler which does not care about the vector that is pending.
3138 	 */
3139 	KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3140 
3141 	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3142 	pir_desc = vlapic_vtx->pir_desc;
3143 
3144 	pending = atomic_load_acq_long(&pir_desc->pending);
3145 	if (!pending)
3146 		return (0);	/* common case */
3147 
3148 	/*
3149 	 * If there is an interrupt pending then it will be recognized only
3150 	 * if its priority is greater than the processor priority.
3151 	 *
3152 	 * Special case: if the processor priority is zero then any pending
3153 	 * interrupt will be recognized.
3154 	 */
3155 	lapic = vlapic->apic_page;
3156 	ppr = lapic->ppr & 0xf0;
3157 	if (ppr == 0)
3158 		return (1);
3159 
3160 	VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3161 	    lapic->ppr);
3162 
3163 	for (i = 3; i >= 0; i--) {
3164 		pirval = pir_desc->pir[i];
3165 		if (pirval != 0) {
3166 			vpr = (i * 64 + flsl(pirval) - 1) & 0xf0;
3167 			return (vpr > ppr);
3168 		}
3169 	}
3170 	return (0);
3171 }
3172 
3173 static void
3174 vmx_intr_accepted(struct vlapic *vlapic, int vector)
3175 {
3176 
3177 	panic("vmx_intr_accepted: not expected to be called");
3178 }
3179 
3180 static void
3181 vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3182 {
3183 	struct vlapic_vtx *vlapic_vtx;
3184 	struct vmx *vmx;
3185 	struct vmcs *vmcs;
3186 	uint64_t mask, val;
3187 
3188 	KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3189 	KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3190 	    ("vmx_set_tmr: vcpu cannot be running"));
3191 
3192 	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3193 	vmx = vlapic_vtx->vmx;
3194 	vmcs = &vmx->vmcs[vlapic->vcpuid];
3195 	mask = 1UL << (vector % 64);
3196 
3197 	VMPTRLD(vmcs);
3198 	val = vmcs_read(VMCS_EOI_EXIT(vector));
3199 	if (level)
3200 		val |= mask;
3201 	else
3202 		val &= ~mask;
3203 	vmcs_write(VMCS_EOI_EXIT(vector), val);
3204 	VMCLEAR(vmcs);
3205 }
3206 
3207 static void
3208 vmx_enable_x2apic_mode(struct vlapic *vlapic)
3209 {
3210 	struct vmx *vmx;
3211 	struct vmcs *vmcs;
3212 	uint32_t proc_ctls2;
3213 	int vcpuid, error;
3214 
3215 	vcpuid = vlapic->vcpuid;
3216 	vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3217 	vmcs = &vmx->vmcs[vcpuid];
3218 
3219 	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3220 	KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3221 	    ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3222 
3223 	proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3224 	proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3225 	vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3226 
3227 	VMPTRLD(vmcs);
3228 	vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3229 	VMCLEAR(vmcs);
3230 
3231 	if (vlapic->vcpuid == 0) {
3232 		/*
3233 		 * The nested page table mappings are shared by all vcpus
3234 		 * so unmap the APIC access page just once.
3235 		 */
3236 		error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3237 		KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3238 		    __func__, error));
3239 
3240 		/*
3241 		 * The MSR bitmap is shared by all vcpus so modify it only
3242 		 * once in the context of vcpu 0.
3243 		 */
3244 		error = vmx_allow_x2apic_msrs(vmx);
3245 		KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3246 		    __func__, error));
3247 	}
3248 }
3249 
3250 static void
3251 vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3252 {
3253 
3254 	ipi_cpu(hostcpu, pirvec);
3255 }
3256 
3257 /*
3258  * Transfer the pending interrupts in the PIR descriptor to the IRR
3259  * in the virtual APIC page.
3260  */
3261 static void
3262 vmx_inject_pir(struct vlapic *vlapic)
3263 {
3264 	struct vlapic_vtx *vlapic_vtx;
3265 	struct pir_desc *pir_desc;
3266 	struct LAPIC *lapic;
3267 	uint64_t val, pirval;
3268 	int rvi, pirbase = -1;
3269 	uint16_t intr_status_old, intr_status_new;
3270 
3271 	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3272 	pir_desc = vlapic_vtx->pir_desc;
3273 	if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3274 		VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3275 		    "no posted interrupt pending");
3276 		return;
3277 	}
3278 
3279 	pirval = 0;
3280 	pirbase = -1;
3281 	lapic = vlapic->apic_page;
3282 
3283 	val = atomic_readandclear_long(&pir_desc->pir[0]);
3284 	if (val != 0) {
3285 		lapic->irr0 |= val;
3286 		lapic->irr1 |= val >> 32;
3287 		pirbase = 0;
3288 		pirval = val;
3289 	}
3290 
3291 	val = atomic_readandclear_long(&pir_desc->pir[1]);
3292 	if (val != 0) {
3293 		lapic->irr2 |= val;
3294 		lapic->irr3 |= val >> 32;
3295 		pirbase = 64;
3296 		pirval = val;
3297 	}
3298 
3299 	val = atomic_readandclear_long(&pir_desc->pir[2]);
3300 	if (val != 0) {
3301 		lapic->irr4 |= val;
3302 		lapic->irr5 |= val >> 32;
3303 		pirbase = 128;
3304 		pirval = val;
3305 	}
3306 
3307 	val = atomic_readandclear_long(&pir_desc->pir[3]);
3308 	if (val != 0) {
3309 		lapic->irr6 |= val;
3310 		lapic->irr7 |= val >> 32;
3311 		pirbase = 192;
3312 		pirval = val;
3313 	}
3314 
3315 	VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3316 
3317 	/*
3318 	 * Update RVI so the processor can evaluate pending virtual
3319 	 * interrupts on VM-entry.
3320 	 *
3321 	 * It is possible for pirval to be 0 here, even though the
3322 	 * pending bit has been set. The scenario is:
3323 	 * CPU-Y is sending a posted interrupt to CPU-X, which
3324 	 * is running a guest and processing posted interrupts in h/w.
3325 	 * CPU-X will eventually exit and the state seen in s/w is
3326 	 * the pending bit set, but no PIR bits set.
3327 	 *
3328 	 *      CPU-X                      CPU-Y
3329 	 *   (vm running)                (host running)
3330 	 *   rx posted interrupt
3331 	 *   CLEAR pending bit
3332 	 *				 SET PIR bit
3333 	 *   READ/CLEAR PIR bits
3334 	 *				 SET pending bit
3335 	 *   (vm exit)
3336 	 *   pending bit set, PIR 0
3337 	 */
3338 	if (pirval != 0) {
3339 		rvi = pirbase + flsl(pirval) - 1;
3340 		intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3341 		intr_status_new = (intr_status_old & 0xFF00) | rvi;
3342 		if (intr_status_new > intr_status_old) {
3343 			vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3344 			VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3345 			    "guest_intr_status changed from 0x%04x to 0x%04x",
3346 			    intr_status_old, intr_status_new);
3347 		}
3348 	}
3349 }
3350 
3351 static struct vlapic *
3352 vmx_vlapic_init(void *arg, int vcpuid)
3353 {
3354 	struct vmx *vmx;
3355 	struct vlapic *vlapic;
3356 	struct vlapic_vtx *vlapic_vtx;
3357 
3358 	vmx = arg;
3359 
3360 	vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3361 	vlapic->vm = vmx->vm;
3362 	vlapic->vcpuid = vcpuid;
3363 	vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3364 
3365 	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3366 	vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3367 	vlapic_vtx->vmx = vmx;
3368 
3369 	if (virtual_interrupt_delivery) {
3370 		vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3371 		vlapic->ops.pending_intr = vmx_pending_intr;
3372 		vlapic->ops.intr_accepted = vmx_intr_accepted;
3373 		vlapic->ops.set_tmr = vmx_set_tmr;
3374 		vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3375 	}
3376 
3377 	if (posted_interrupts)
3378 		vlapic->ops.post_intr = vmx_post_intr;
3379 
3380 	vlapic_init(vlapic);
3381 
3382 	return (vlapic);
3383 }
3384 
3385 static void
3386 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3387 {
3388 
3389 	vlapic_cleanup(vlapic);
3390 	free(vlapic, M_VLAPIC);
3391 }
3392 
3393 struct vmm_ops vmm_ops_intel = {
3394 	vmx_init,
3395 	vmx_cleanup,
3396 	vmx_restore,
3397 	vmx_vminit,
3398 	vmx_run,
3399 	vmx_vmcleanup,
3400 	vmx_getreg,
3401 	vmx_setreg,
3402 	vmx_getdesc,
3403 	vmx_setdesc,
3404 	vmx_getcap,
3405 	vmx_setcap,
3406 	ept_vmspace_alloc,
3407 	ept_vmspace_free,
3408 	vmx_vlapic_init,
3409 	vmx_vlapic_cleanup,
3410 };
3411