1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * This file and its contents are supplied under the terms of the
31 * Common Development and Distribution License ("CDDL"), version 1.0.
32 * You may only use this file in accordance with the terms of version
33 * 1.0 of the CDDL.
34 *
35 * A full copy of the text of the CDDL should have accompanied this
36 * source. A copy of the CDDL is also available via the Internet at
37 * http://www.illumos.org/license/CDDL.
38 *
39 * Copyright 2018 Joyent, Inc.
40 * Copyright 2023 Oxide Computer Company
41 */
42
43 #include <sys/cdefs.h>
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/kmem.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sysctl.h>
52
53 #include <sys/x86_archext.h>
54 #include <sys/trap.h>
55
56 #include <machine/cpufunc.h>
57 #include <machine/psl.h>
58 #include <machine/md_var.h>
59 #include <machine/reg.h>
60 #include <machine/specialreg.h>
61 #include <machine/vmm.h>
62 #include <machine/vmm_dev.h>
63 #include <sys/vmm_instruction_emul.h>
64 #include <sys/vmm_vm.h>
65 #include <sys/vmm_kernel.h>
66
67 #include "vmm_lapic.h"
68 #include "vmm_stat.h"
69 #include "vmm_ioport.h"
70 #include "vatpic.h"
71 #include "vlapic.h"
72 #include "vlapic_priv.h"
73
74 #include "vmcb.h"
75 #include "svm.h"
76 #include "svm_softc.h"
77 #include "svm_msr.h"
78
79 SYSCTL_DECL(_hw_vmm);
80 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
81 NULL);
82
83 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \
84 VMCB_CACHE_IOPM | \
85 VMCB_CACHE_I | \
86 VMCB_CACHE_TPR | \
87 VMCB_CACHE_CR2 | \
88 VMCB_CACHE_CR | \
89 VMCB_CACHE_DR | \
90 VMCB_CACHE_DT | \
91 VMCB_CACHE_SEG | \
92 VMCB_CACHE_NP)
93
94 /*
95 * Guardrails for supported guest TSC frequencies.
96 *
97 * A minimum of 0.5 GHz, which should be sufficient for all recent AMD CPUs, and
98 * a maximum ratio of (15 * host frequency), which is sufficient to prevent
99 * overflowing frequency calcuations and give plenty of bandwidth for future CPU
100 * frequency increases.
101 */
102 #define AMD_TSC_MIN_FREQ 500000000
103 #define AMD_TSC_MAX_FREQ_RATIO 15
104
105 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
106
107 /* SVM features advertised by CPUID.8000000AH:EDX */
108 static uint32_t svm_feature = 0;
109
110 static int disable_npf_assist;
111
112 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
113 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
114 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
115
116 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
117 static int svm_getreg(void *arg, int vcpu, int ident, uint64_t *val);
118 static void flush_asid(struct svm_softc *sc, int vcpuid);
119
120 static __inline bool
has_flush_by_asid(void)121 has_flush_by_asid(void)
122 {
123 return ((svm_feature & CPUID_AMD_EDX_FLUSH_ASID) != 0);
124 }
125
126 static __inline bool
has_lbr_virt(void)127 has_lbr_virt(void)
128 {
129 return ((svm_feature & CPUID_AMD_EDX_LBR_VIRT) != 0);
130 }
131
132 static __inline bool
has_decode_assist(void)133 has_decode_assist(void)
134 {
135 return ((svm_feature & CPUID_AMD_EDX_DECODE_ASSISTS) != 0);
136 }
137
138 static __inline bool
has_tsc_freq_ctl(void)139 has_tsc_freq_ctl(void)
140 {
141 return ((svm_feature & CPUID_AMD_EDX_TSC_RATE_MSR) != 0);
142 }
143
144 static int
svm_cleanup(void)145 svm_cleanup(void)
146 {
147 /* This is taken care of by the hma registration */
148 return (0);
149 }
150
151 static int
svm_init(void)152 svm_init(void)
153 {
154 /* Grab a (bhyve) local copy of the SVM feature bits */
155 struct cpuid_regs regs = {
156 .cp_eax = 0x8000000a,
157 };
158 (void) cpuid_insn(NULL, ®s);
159 svm_feature = regs.cp_edx;
160
161 /*
162 * HMA should have already checked for these features which we refuse to
163 * operate without, but no harm in making sure
164 */
165 const uint32_t demand_bits =
166 (CPUID_AMD_EDX_NESTED_PAGING | CPUID_AMD_EDX_NRIPS);
167 VERIFY((svm_feature & demand_bits) == demand_bits);
168
169 /* Clear any unexpected bits (set manually) from vmcb_clean */
170 vmcb_clean &= VMCB_CACHE_DEFAULT;
171
172 return (0);
173 }
174
175 static void
svm_restore(void)176 svm_restore(void)
177 {
178 /* No-op on illumos */
179 }
180
181 /* Pentium compatible MSRs */
182 #define MSR_PENTIUM_START 0
183 #define MSR_PENTIUM_END 0x1FFF
184 /* AMD 6th generation and Intel compatible MSRs */
185 #define MSR_AMD6TH_START 0xC0000000UL
186 #define MSR_AMD6TH_END 0xC0001FFFUL
187 /* AMD 7th and 8th generation compatible MSRs */
188 #define MSR_AMD7TH_START 0xC0010000UL
189 #define MSR_AMD7TH_END 0xC0011FFFUL
190
191 /*
192 * Get the index and bit position for a MSR in permission bitmap.
193 * Two bits are used for each MSR: lower bit for read and higher bit for write.
194 */
195 static int
svm_msr_index(uint64_t msr,int * index,int * bit)196 svm_msr_index(uint64_t msr, int *index, int *bit)
197 {
198 uint32_t base, off;
199
200 *index = -1;
201 *bit = (msr % 4) * 2;
202 base = 0;
203
204 if (msr <= MSR_PENTIUM_END) {
205 *index = msr / 4;
206 return (0);
207 }
208
209 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1);
210 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) {
211 off = (msr - MSR_AMD6TH_START);
212 *index = (off + base) / 4;
213 return (0);
214 }
215
216 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
217 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
218 off = (msr - MSR_AMD7TH_START);
219 *index = (off + base) / 4;
220 return (0);
221 }
222
223 return (EINVAL);
224 }
225
226 /*
227 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
228 */
229 static void
svm_msr_perm(uint8_t * perm_bitmap,uint64_t msr,bool read,bool write)230 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write)
231 {
232 int index, bit, error;
233
234 error = svm_msr_index(msr, &index, &bit);
235 KASSERT(error == 0, ("%s: invalid msr %lx", __func__, msr));
236 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE,
237 ("%s: invalid index %d for msr %lx", __func__, index, msr));
238 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d "
239 "msr %lx", __func__, bit, msr));
240
241 if (read)
242 perm_bitmap[index] &= ~(1UL << bit);
243
244 if (write)
245 perm_bitmap[index] &= ~(2UL << bit);
246 }
247
248 static void
svm_msr_rw_ok(uint8_t * perm_bitmap,uint64_t msr)249 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr)
250 {
251
252 svm_msr_perm(perm_bitmap, msr, true, true);
253 }
254
255 static void
svm_msr_rd_ok(uint8_t * perm_bitmap,uint64_t msr)256 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
257 {
258
259 svm_msr_perm(perm_bitmap, msr, true, false);
260 }
261
262 static __inline int
svm_get_intercept(struct svm_softc * sc,int vcpu,int idx,uint32_t bitmask)263 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
264 {
265 struct vmcb_ctrl *ctrl;
266
267 KASSERT(idx >= 0 && idx < 5, ("invalid intercept index %d", idx));
268
269 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
270 return (ctrl->intercept[idx] & bitmask ? 1 : 0);
271 }
272
273 static __inline void
svm_set_intercept(struct svm_softc * sc,int vcpu,int idx,uint32_t bitmask,int enabled)274 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
275 int enabled)
276 {
277 struct vmcb_ctrl *ctrl;
278 uint32_t oldval;
279
280 KASSERT(idx >= 0 && idx < 5, ("invalid intercept index %d", idx));
281
282 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
283 oldval = ctrl->intercept[idx];
284
285 if (enabled)
286 ctrl->intercept[idx] |= bitmask;
287 else
288 ctrl->intercept[idx] &= ~bitmask;
289
290 if (ctrl->intercept[idx] != oldval) {
291 svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
292 }
293 }
294
295 static __inline void
svm_disable_intercept(struct svm_softc * sc,int vcpu,int off,uint32_t bitmask)296 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
297 {
298
299 svm_set_intercept(sc, vcpu, off, bitmask, 0);
300 }
301
302 static __inline void
svm_enable_intercept(struct svm_softc * sc,int vcpu,int off,uint32_t bitmask)303 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
304 {
305
306 svm_set_intercept(sc, vcpu, off, bitmask, 1);
307 }
308
309 static void
vmcb_init(struct svm_softc * sc,int vcpu,uint64_t iopm_base_pa,uint64_t msrpm_base_pa,uint64_t np_pml4)310 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
311 uint64_t msrpm_base_pa, uint64_t np_pml4)
312 {
313 struct vmcb_ctrl *ctrl;
314 struct vmcb_state *state;
315 uint32_t mask;
316 int n;
317
318 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
319 state = svm_get_vmcb_state(sc, vcpu);
320
321 ctrl->iopm_base_pa = iopm_base_pa;
322 ctrl->msrpm_base_pa = msrpm_base_pa;
323
324 /* Enable nested paging */
325 ctrl->np_ctrl = NP_ENABLE;
326 ctrl->n_cr3 = np_pml4;
327
328 /*
329 * Intercept accesses to the control registers that are not shadowed
330 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8.
331 */
332 for (n = 0; n < 16; n++) {
333 mask = (BIT(n) << 16) | BIT(n);
334 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8)
335 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
336 else
337 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
338 }
339
340 /*
341 * Selectively intercept writes to %cr0. This triggers on operations
342 * which would change bits other than TS or MP.
343 */
344 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
345 VMCB_INTCPT_CR0_WRITE);
346
347 /*
348 * Intercept everything when tracing guest exceptions otherwise
349 * just intercept machine check exception.
350 */
351 if (vcpu_trace_exceptions(sc->vm, vcpu)) {
352 for (n = 0; n < 32; n++) {
353 /*
354 * Skip unimplemented vectors in the exception bitmap.
355 */
356 if (n == 2 || n == 9) {
357 continue;
358 }
359 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
360 }
361 } else {
362 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
363 }
364
365 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
366 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
367 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
368 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
369 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
370 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
371 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
372 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
373 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
374 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
375 VMCB_INTCPT_FERR_FREEZE);
376
377 /* Enable exit-on-hlt by default */
378 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_HLT);
379
380 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
381 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
382
383 /* Intercept privileged invalidation instructions. */
384 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
385 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
386
387 /*
388 * Intercept all virtualization-related instructions.
389 *
390 * From section "Canonicalization and Consistency Checks" in APMv2
391 * the VMRUN intercept bit must be set to pass the consistency check.
392 */
393 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
394 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMMCALL);
395 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
396 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
397 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
398 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
399 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
400 if (vcpu_trap_wbinvd(sc->vm, vcpu) != 0) {
401 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT,
402 VMCB_INTCPT_WBINVD);
403 }
404
405 /*
406 * The ASID will be set to a non-zero value just before VMRUN.
407 */
408 ctrl->asid = 0;
409
410 /*
411 * Section 15.21.1, Interrupt Masking in EFLAGS
412 * Section 15.21.2, Virtualizing APIC.TPR
413 *
414 * This must be set for %rflag and %cr8 isolation of guest and host.
415 */
416 ctrl->v_intr_ctrl |= V_INTR_MASKING;
417
418 /* Enable Last Branch Record aka LBR-virt (if available) */
419 if (has_lbr_virt()) {
420 ctrl->misc_ctrl |= LBR_VIRT_ENABLE;
421 }
422
423 /* EFER_SVM must always be set when the guest is executing */
424 state->efer = EFER_SVM;
425
426 /* Set up the PAT to power-on state */
427 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) |
428 PAT_VALUE(1, PAT_WRITE_THROUGH) |
429 PAT_VALUE(2, PAT_UNCACHED) |
430 PAT_VALUE(3, PAT_UNCACHEABLE) |
431 PAT_VALUE(4, PAT_WRITE_BACK) |
432 PAT_VALUE(5, PAT_WRITE_THROUGH) |
433 PAT_VALUE(6, PAT_UNCACHED) |
434 PAT_VALUE(7, PAT_UNCACHEABLE);
435
436 /* Set up DR6/7 to power-on state */
437 state->dr6 = DBREG_DR6_RESERVED1;
438 state->dr7 = DBREG_DR7_RESERVED1;
439 }
440
441 /*
442 * Initialize a virtual machine.
443 */
444 static void *
svm_vminit(struct vm * vm)445 svm_vminit(struct vm *vm)
446 {
447 struct svm_softc *svm_sc;
448 struct svm_vcpu *vcpu;
449 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
450 int i;
451 uint16_t maxcpus;
452
453 svm_sc = kmem_zalloc(sizeof (*svm_sc), KM_SLEEP);
454 VERIFY3U(((uintptr_t)svm_sc & PAGE_MASK), ==, 0);
455
456 svm_sc->msr_bitmap = vmm_contig_alloc(SVM_MSR_BITMAP_SIZE);
457 if (svm_sc->msr_bitmap == NULL)
458 panic("contigmalloc of SVM MSR bitmap failed");
459 svm_sc->iopm_bitmap = vmm_contig_alloc(SVM_IO_BITMAP_SIZE);
460 if (svm_sc->iopm_bitmap == NULL)
461 panic("contigmalloc of SVM IO bitmap failed");
462
463 svm_sc->vm = vm;
464 svm_sc->nptp = vmspace_table_root(vm_get_vmspace(vm));
465
466 /*
467 * Intercept read and write accesses to all MSRs.
468 */
469 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE);
470
471 /*
472 * Access to the following MSRs is redirected to the VMCB when the
473 * guest is executing. Therefore it is safe to allow the guest to
474 * read/write these MSRs directly without hypervisor involvement.
475 */
476 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
477 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
478 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
479
480 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
481 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
482 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
483 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
484 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
485 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
486 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
487 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
488
489 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
490
491 /*
492 * Intercept writes to make sure that the EFER_SVM bit is not cleared.
493 */
494 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
495
496 /* Intercept access to all I/O ports. */
497 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE);
498
499 iopm_pa = vtophys(svm_sc->iopm_bitmap);
500 msrpm_pa = vtophys(svm_sc->msr_bitmap);
501 pml4_pa = svm_sc->nptp;
502 maxcpus = vm_get_maxcpus(svm_sc->vm);
503 for (i = 0; i < maxcpus; i++) {
504 vcpu = svm_get_vcpu(svm_sc, i);
505 vcpu->nextrip = ~0;
506 vcpu->lastcpu = NOCPU;
507 vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
508 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
509 svm_msr_guest_init(svm_sc, i);
510 }
511 return (svm_sc);
512 }
513
514 /*
515 * Collateral for a generic SVM VM-exit.
516 */
517 static void
vm_exit_svm(struct vm_exit * vme,uint64_t code,uint64_t info1,uint64_t info2)518 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2)
519 {
520
521 vme->exitcode = VM_EXITCODE_SVM;
522 vme->u.svm.exitcode = code;
523 vme->u.svm.exitinfo1 = info1;
524 vme->u.svm.exitinfo2 = info2;
525 }
526
527 static enum vm_cpu_mode
svm_vcpu_mode(struct vmcb * vmcb)528 svm_vcpu_mode(struct vmcb *vmcb)
529 {
530 struct vmcb_state *state;
531
532 state = &vmcb->state;
533
534 if (state->efer & EFER_LMA) {
535 struct vmcb_segment *seg;
536
537 /*
538 * Section 4.8.1 for APM2, check if Code Segment has
539 * Long attribute set in descriptor.
540 */
541 seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS);
542 if (seg->attrib & VMCB_CS_ATTRIB_L)
543 return (CPU_MODE_64BIT);
544 else
545 return (CPU_MODE_COMPATIBILITY);
546 } else if (state->cr0 & CR0_PE) {
547 return (CPU_MODE_PROTECTED);
548 } else {
549 return (CPU_MODE_REAL);
550 }
551 }
552
553 static enum vm_paging_mode
svm_paging_mode(uint64_t cr0,uint64_t cr4,uint64_t efer)554 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer)
555 {
556
557 if ((cr0 & CR0_PG) == 0)
558 return (PAGING_MODE_FLAT);
559 if ((cr4 & CR4_PAE) == 0)
560 return (PAGING_MODE_32);
561 if (efer & EFER_LME)
562 return (PAGING_MODE_64);
563 else
564 return (PAGING_MODE_PAE);
565 }
566
567 static void
svm_paging_info(struct vmcb * vmcb,struct vm_guest_paging * paging)568 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
569 {
570 struct vmcb_state *state;
571
572 state = &vmcb->state;
573 paging->cr3 = state->cr3;
574 paging->cpl = state->cpl;
575 paging->cpu_mode = svm_vcpu_mode(vmcb);
576 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
577 state->efer);
578 }
579
580 #define UNHANDLED 0
581
582 /*
583 * Handle guest I/O intercept.
584 */
585 static int
svm_handle_inout(struct svm_softc * svm_sc,int vcpu,struct vm_exit * vmexit)586 svm_handle_inout(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
587 {
588 struct vmcb_ctrl *ctrl;
589 struct vmcb_state *state;
590 struct vm_inout *inout;
591 struct vie *vie;
592 uint64_t info1;
593 struct vm_guest_paging paging;
594
595 state = svm_get_vmcb_state(svm_sc, vcpu);
596 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
597 inout = &vmexit->u.inout;
598 info1 = ctrl->exitinfo1;
599
600 inout->bytes = (info1 >> 4) & 0x7;
601 inout->flags = 0;
602 inout->flags |= (info1 & BIT(0)) ? INOUT_IN : 0;
603 inout->flags |= (info1 & BIT(3)) ? INOUT_REP : 0;
604 inout->flags |= (info1 & BIT(2)) ? INOUT_STR : 0;
605 inout->port = (uint16_t)(info1 >> 16);
606 inout->eax = (uint32_t)(state->rax);
607
608 /*
609 * We'll always need paging and vie info, even if we bail out early
610 * due to missing DecodeAssist.
611 */
612 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &paging);
613 vie = vm_vie_ctx(svm_sc->vm, vcpu);
614
615 if ((inout->flags & INOUT_STR) != 0) {
616 /*
617 * The effective segment number in EXITINFO1[12:10] is populated
618 * only if the processor has the DecodeAssist capability.
619 *
620 * This is not specified explicitly in APMv2 but can be verified
621 * empirically.
622 */
623 if (!has_decode_assist()) {
624 /*
625 * Without decoding assistance, force the task of
626 * emulating the ins/outs on userspace.
627 */
628 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
629 bzero(&vmexit->u.inst_emul,
630 sizeof (vmexit->u.inst_emul));
631 vie_init_other(vie, &paging);
632 return (UNHANDLED);
633 }
634
635 /*
636 * Bits 7-9 encode the address size of ins/outs operations where
637 * the 1/2/4 values correspond to 16/32/64 bit sizes.
638 */
639 inout->addrsize = 2 * ((info1 >> 7) & 0x7);
640 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 ||
641 inout->addrsize == 8);
642
643 if (inout->flags & INOUT_IN) {
644 /*
645 * For INS instructions, %es (encoded as 0) is the
646 * implied segment for the operation.
647 */
648 inout->segment = 0;
649 } else {
650 /*
651 * Bits 10-12 encode the segment for OUTS.
652 * This value follows the standard x86 segment order.
653 */
654 inout->segment = (info1 >> 10) & 0x7;
655 }
656 }
657
658 vmexit->exitcode = VM_EXITCODE_INOUT;
659 vie_init_inout(vie, inout, vmexit->inst_length, &paging);
660
661 /* The in/out emulation will handle advancing %rip */
662 vmexit->inst_length = 0;
663
664 return (UNHANDLED);
665 }
666
667 static int
npf_fault_type(uint64_t exitinfo1)668 npf_fault_type(uint64_t exitinfo1)
669 {
670
671 if (exitinfo1 & VMCB_NPF_INFO1_W)
672 return (PROT_WRITE);
673 else if (exitinfo1 & VMCB_NPF_INFO1_ID)
674 return (PROT_EXEC);
675 else
676 return (PROT_READ);
677 }
678
679 static bool
svm_npf_emul_fault(uint64_t exitinfo1)680 svm_npf_emul_fault(uint64_t exitinfo1)
681 {
682 if (exitinfo1 & VMCB_NPF_INFO1_ID) {
683 return (false);
684 }
685
686 if (exitinfo1 & VMCB_NPF_INFO1_GPT) {
687 return (false);
688 }
689
690 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
691 return (false);
692 }
693
694 return (true);
695 }
696
697 static void
svm_handle_mmio_emul(struct svm_softc * svm_sc,int vcpu,struct vm_exit * vmexit,uint64_t gpa)698 svm_handle_mmio_emul(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit,
699 uint64_t gpa)
700 {
701 struct vmcb_ctrl *ctrl;
702 struct vmcb *vmcb;
703 struct vie *vie;
704 struct vm_guest_paging paging;
705 struct vmcb_segment *seg;
706 char *inst_bytes = NULL;
707 uint8_t inst_len = 0;
708
709 vmcb = svm_get_vmcb(svm_sc, vcpu);
710 ctrl = &vmcb->ctrl;
711
712 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL;
713 vmexit->u.mmio_emul.gpa = gpa;
714 vmexit->u.mmio_emul.gla = VIE_INVALID_GLA;
715 svm_paging_info(vmcb, &paging);
716
717 switch (paging.cpu_mode) {
718 case CPU_MODE_REAL:
719 seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS);
720 vmexit->u.mmio_emul.cs_base = seg->base;
721 vmexit->u.mmio_emul.cs_d = 0;
722 break;
723 case CPU_MODE_PROTECTED:
724 case CPU_MODE_COMPATIBILITY:
725 seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS);
726 vmexit->u.mmio_emul.cs_base = seg->base;
727
728 /*
729 * Section 4.8.1 of APM2, Default Operand Size or D bit.
730 */
731 vmexit->u.mmio_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ?
732 1 : 0;
733 break;
734 default:
735 vmexit->u.mmio_emul.cs_base = 0;
736 vmexit->u.mmio_emul.cs_d = 0;
737 break;
738 }
739
740 /*
741 * Copy the instruction bytes into 'vie' if available.
742 */
743 if (has_decode_assist() && !disable_npf_assist) {
744 inst_len = ctrl->inst_len;
745 inst_bytes = (char *)ctrl->inst_bytes;
746 }
747 vie = vm_vie_ctx(svm_sc->vm, vcpu);
748 vie_init_mmio(vie, inst_bytes, inst_len, &paging, gpa);
749 }
750
751 /*
752 * Do not allow CD, NW, or invalid high bits to be asserted in the value of cr0
753 * which is live in the guest. They are visible via the shadow instead.
754 */
755 #define SVM_CR0_MASK ~(CR0_CD | CR0_NW | 0xffffffff00000000)
756
757 static void
svm_set_cr0(struct svm_softc * svm_sc,int vcpu,uint64_t val,bool guest_write)758 svm_set_cr0(struct svm_softc *svm_sc, int vcpu, uint64_t val, bool guest_write)
759 {
760 struct vmcb_state *state;
761 struct svm_regctx *regctx;
762 uint64_t masked, old, diff;
763
764 state = svm_get_vmcb_state(svm_sc, vcpu);
765 regctx = svm_get_guest_regctx(svm_sc, vcpu);
766
767 old = state->cr0 | (regctx->sctx_cr0_shadow & ~SVM_CR0_MASK);
768 diff = old ^ val;
769
770 /* No further work needed if register contents remain the same */
771 if (diff == 0) {
772 return;
773 }
774
775 /* Flush the TLB if the paging or write-protect bits are changing */
776 if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) {
777 flush_asid(svm_sc, vcpu);
778 }
779
780 /*
781 * If the change in %cr0 is due to a guest action (via interception)
782 * then other CPU state updates may be required.
783 */
784 if (guest_write) {
785 if ((diff & CR0_PG) != 0) {
786 uint64_t efer = state->efer;
787
788 /* Keep the long-mode state in EFER in sync */
789 if ((val & CR0_PG) != 0 && (efer & EFER_LME) != 0) {
790 state->efer |= EFER_LMA;
791 }
792 if ((val & CR0_PG) == 0 && (efer & EFER_LME) != 0) {
793 state->efer &= ~EFER_LMA;
794 }
795 }
796 }
797
798 masked = val & SVM_CR0_MASK;
799 regctx->sctx_cr0_shadow = val;
800 state->cr0 = masked;
801 svm_set_dirty(svm_sc, vcpu, VMCB_CACHE_CR);
802
803 if ((masked ^ val) != 0) {
804 /*
805 * The guest has set bits in %cr0 which we are masking out and
806 * exposing via shadow.
807 *
808 * We must intercept %cr0 reads in order to make the shadowed
809 * view available to the guest.
810 *
811 * Writes to %cr0 must also be intercepted (unconditionally,
812 * unlike the VMCB_INTCPT_CR0_WRITE mechanism) so we can catch
813 * if/when the guest clears those shadowed bits.
814 */
815 svm_enable_intercept(svm_sc, vcpu, VMCB_CR_INTCPT,
816 BIT(0) | BIT(16));
817 } else {
818 /*
819 * When no bits remain in %cr0 which require shadowing, the
820 * unconditional intercept of reads/writes to %cr0 can be
821 * disabled.
822 *
823 * The selective write intercept (VMCB_INTCPT_CR0_WRITE) remains
824 * in place so we can be notified of operations which change
825 * bits other than TS or MP.
826 */
827 svm_disable_intercept(svm_sc, vcpu, VMCB_CR_INTCPT,
828 BIT(0) | BIT(16));
829 }
830 svm_set_dirty(svm_sc, vcpu, VMCB_CACHE_I);
831 }
832
833 static void
svm_get_cr0(struct svm_softc * svm_sc,int vcpu,uint64_t * val)834 svm_get_cr0(struct svm_softc *svm_sc, int vcpu, uint64_t *val)
835 {
836 struct vmcb *vmcb;
837 struct svm_regctx *regctx;
838
839 vmcb = svm_get_vmcb(svm_sc, vcpu);
840 regctx = svm_get_guest_regctx(svm_sc, vcpu);
841
842 /*
843 * Include the %cr0 bits which exist only in the shadow along with those
844 * in the running vCPU state.
845 */
846 *val = vmcb->state.cr0 | (regctx->sctx_cr0_shadow & ~SVM_CR0_MASK);
847 }
848
849 static void
svm_handle_cr0_read(struct svm_softc * svm_sc,int vcpu,enum vm_reg_name reg)850 svm_handle_cr0_read(struct svm_softc *svm_sc, int vcpu, enum vm_reg_name reg)
851 {
852 uint64_t val;
853 int err __maybe_unused;
854
855 svm_get_cr0(svm_sc, vcpu, &val);
856 err = svm_setreg(svm_sc, vcpu, reg, val);
857 ASSERT(err == 0);
858 }
859
860 static void
svm_handle_cr0_write(struct svm_softc * svm_sc,int vcpu,enum vm_reg_name reg)861 svm_handle_cr0_write(struct svm_softc *svm_sc, int vcpu, enum vm_reg_name reg)
862 {
863 struct vmcb_state *state;
864 uint64_t val;
865 int err __maybe_unused;
866
867 state = svm_get_vmcb_state(svm_sc, vcpu);
868
869 err = svm_getreg(svm_sc, vcpu, reg, &val);
870 ASSERT(err == 0);
871
872 if ((val & CR0_NW) != 0 && (val & CR0_CD) == 0) {
873 /* NW without CD is nonsensical */
874 vm_inject_gp(svm_sc->vm, vcpu);
875 return;
876 }
877 if ((val & CR0_PG) != 0 && (val & CR0_PE) == 0) {
878 /* PG requires PE */
879 vm_inject_gp(svm_sc->vm, vcpu);
880 return;
881 }
882 if ((state->cr0 & CR0_PG) == 0 && (val & CR0_PG) != 0) {
883 /* When enabling paging, PAE must be enabled if LME is. */
884 if ((state->efer & EFER_LME) != 0 &&
885 (state->cr4 & CR4_PAE) == 0) {
886 vm_inject_gp(svm_sc->vm, vcpu);
887 return;
888 }
889 }
890
891 svm_set_cr0(svm_sc, vcpu, val, true);
892 }
893
894 static void
svm_inst_emul_other(struct svm_softc * svm_sc,int vcpu,struct vm_exit * vmexit)895 svm_inst_emul_other(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
896 {
897 struct vie *vie;
898 struct vm_guest_paging paging;
899
900 /* Let the instruction emulation (hopefully in-kernel) handle it */
901 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
902 bzero(&vmexit->u.inst_emul, sizeof (vmexit->u.inst_emul));
903 vie = vm_vie_ctx(svm_sc->vm, vcpu);
904 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &paging);
905 vie_init_other(vie, &paging);
906
907 /* The instruction emulation will handle advancing %rip */
908 vmexit->inst_length = 0;
909 }
910
911 static void
svm_update_virqinfo(struct svm_softc * sc,int vcpu)912 svm_update_virqinfo(struct svm_softc *sc, int vcpu)
913 {
914 struct vm *vm;
915 struct vlapic *vlapic;
916 struct vmcb_ctrl *ctrl;
917
918 vm = sc->vm;
919 vlapic = vm_lapic(vm, vcpu);
920 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
921
922 /* Update %cr8 in the emulated vlapic */
923 vlapic_set_cr8(vlapic, ctrl->v_tpr);
924
925 /* Virtual interrupt injection is not used. */
926 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid "
927 "v_intr_vector %d", __func__, ctrl->v_intr_vector));
928 }
929
930 CTASSERT(VMCB_EVENTINJ_TYPE_INTR == VM_INTINFO_HWINTR);
931 CTASSERT(VMCB_EVENTINJ_TYPE_NMI == VM_INTINFO_NMI);
932 CTASSERT(VMCB_EVENTINJ_TYPE_EXCEPTION == VM_INTINFO_HWEXCP);
933 CTASSERT(VMCB_EVENTINJ_TYPE_INTn == VM_INTINFO_SWINTR);
934 CTASSERT(VMCB_EVENTINJ_EC_VALID == VM_INTINFO_DEL_ERRCODE);
935 CTASSERT(VMCB_EVENTINJ_VALID == VM_INTINFO_VALID);
936
937 /*
938 * Store SVM-specific event injection info for later handling. This depends on
939 * the bhyve-internal event definitions matching those in the VMCB, as ensured
940 * by the above CTASSERTs.
941 */
942 static void
svm_stash_intinfo(struct svm_softc * svm_sc,int vcpu,uint64_t intinfo)943 svm_stash_intinfo(struct svm_softc *svm_sc, int vcpu, uint64_t intinfo)
944 {
945 ASSERT(VMCB_EXITINTINFO_VALID(intinfo));
946
947 /*
948 * If stashing an NMI pending injection, ensure that it bears the
949 * correct vector which exit_intinfo expects.
950 */
951 if (VM_INTINFO_TYPE(intinfo) == VM_INTINFO_NMI) {
952 intinfo &= ~VM_INTINFO_MASK_VECTOR;
953 intinfo |= IDT_NMI;
954 }
955
956 VERIFY0(vm_exit_intinfo(svm_sc->vm, vcpu, intinfo));
957 }
958
959 static void
svm_save_exitintinfo(struct svm_softc * svm_sc,int vcpu)960 svm_save_exitintinfo(struct svm_softc *svm_sc, int vcpu)
961 {
962 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
963 uint64_t intinfo = ctrl->exitintinfo;
964
965 if (VMCB_EXITINTINFO_VALID(intinfo)) {
966 /*
967 * If a #VMEXIT happened during event delivery then record the
968 * event that was being delivered.
969 */
970 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
971
972 svm_stash_intinfo(svm_sc, vcpu, intinfo);
973 }
974 }
975
976 static __inline int
vintr_intercept_enabled(struct svm_softc * sc,int vcpu)977 vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
978 {
979
980 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
981 VMCB_INTCPT_VINTR));
982 }
983
984 static void
svm_enable_intr_window_exiting(struct svm_softc * sc,int vcpu)985 svm_enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
986 {
987 struct vmcb_ctrl *ctrl;
988 struct vmcb_state *state;
989
990 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
991 state = svm_get_vmcb_state(sc, vcpu);
992
993 if ((ctrl->v_irq & V_IRQ) != 0 && ctrl->v_intr_vector == 0) {
994 KASSERT(ctrl->v_intr_prio & V_IGN_TPR,
995 ("%s: invalid v_ign_tpr", __func__));
996 KASSERT(vintr_intercept_enabled(sc, vcpu),
997 ("%s: vintr intercept should be enabled", __func__));
998 return;
999 }
1000
1001 /*
1002 * We use V_IRQ in conjunction with the VINTR intercept to trap into the
1003 * hypervisor as soon as a virtual interrupt can be delivered.
1004 *
1005 * Since injected events are not subject to intercept checks we need to
1006 * ensure that the V_IRQ is not actually going to be delivered on VM
1007 * entry.
1008 */
1009 VERIFY((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 ||
1010 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow);
1011
1012 ctrl->v_irq |= V_IRQ;
1013 ctrl->v_intr_prio |= V_IGN_TPR;
1014 ctrl->v_intr_vector = 0;
1015 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1016 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1017 }
1018
1019 static void
svm_disable_intr_window_exiting(struct svm_softc * sc,int vcpu)1020 svm_disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
1021 {
1022 struct vmcb_ctrl *ctrl;
1023
1024 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1025
1026 if ((ctrl->v_irq & V_IRQ) == 0 && ctrl->v_intr_vector == 0) {
1027 KASSERT(!vintr_intercept_enabled(sc, vcpu),
1028 ("%s: vintr intercept should be disabled", __func__));
1029 return;
1030 }
1031
1032 ctrl->v_irq &= ~V_IRQ;
1033 ctrl->v_intr_vector = 0;
1034 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1035 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1036 }
1037
1038 /*
1039 * Once an NMI is injected it blocks delivery of further NMIs until the handler
1040 * executes an IRET. The IRET intercept is enabled when an NMI is injected to
1041 * to track when the vcpu is done handling the NMI.
1042 */
1043 static int
svm_nmi_blocked(struct svm_softc * sc,int vcpu)1044 svm_nmi_blocked(struct svm_softc *sc, int vcpu)
1045 {
1046 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
1047 VMCB_INTCPT_IRET));
1048 }
1049
1050 static void
svm_clear_nmi_blocking(struct svm_softc * sc,int vcpu)1051 svm_clear_nmi_blocking(struct svm_softc *sc, int vcpu)
1052 {
1053 struct vmcb_ctrl *ctrl;
1054
1055 KASSERT(svm_nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
1056 /*
1057 * When the IRET intercept is cleared the vcpu will attempt to execute
1058 * the "iret" when it runs next. However, it is possible to inject
1059 * another NMI into the vcpu before the "iret" has actually executed.
1060 *
1061 * For e.g. if the "iret" encounters a #NPF when accessing the stack
1062 * it will trap back into the hypervisor. If an NMI is pending for
1063 * the vcpu it will be injected into the guest.
1064 *
1065 * XXX this needs to be fixed
1066 */
1067 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1068
1069 /*
1070 * Set an interrupt shadow to prevent an NMI from being immediately
1071 * injected on the next VMRUN.
1072 */
1073 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1074 ctrl->intr_shadow = 1;
1075 }
1076
1077 static void
svm_inject_event(struct vmcb_ctrl * ctrl,uint64_t info)1078 svm_inject_event(struct vmcb_ctrl *ctrl, uint64_t info)
1079 {
1080 ASSERT(VM_INTINFO_PENDING(info));
1081
1082 uint8_t vector = VM_INTINFO_VECTOR(info);
1083 uint32_t type = VM_INTINFO_TYPE(info);
1084
1085 /*
1086 * Correct behavior depends on bhyve intinfo event types lining up with
1087 * those defined by AMD for event injection in the VMCB. The CTASSERTs
1088 * above svm_save_exitintinfo() ensure it.
1089 */
1090 switch (type) {
1091 case VM_INTINFO_NMI:
1092 /* Ensure vector for injected event matches its type (NMI) */
1093 vector = IDT_NMI;
1094 break;
1095 case VM_INTINFO_HWINTR:
1096 case VM_INTINFO_SWINTR:
1097 break;
1098 case VM_INTINFO_HWEXCP:
1099 if (vector == IDT_NMI) {
1100 /*
1101 * NMIs are expected to be injected with
1102 * VMCB_EVENTINJ_TYPE_NMI, rather than as an exception
1103 * with the NMI vector.
1104 */
1105 type = VM_INTINFO_NMI;
1106 }
1107 VERIFY(vector < 32);
1108 break;
1109 default:
1110 /*
1111 * Since there is not strong validation for injected event types
1112 * at this point, fall back to software interrupt for those we
1113 * do not recognized.
1114 */
1115 type = VM_INTINFO_SWINTR;
1116 break;
1117 }
1118
1119 ctrl->eventinj = VMCB_EVENTINJ_VALID | type | vector;
1120 if (VM_INTINFO_HAS_ERRCODE(info)) {
1121 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
1122 ctrl->eventinj |= (uint64_t)VM_INTINFO_ERRCODE(info) << 32;
1123 }
1124 }
1125
1126 static void
svm_inject_nmi(struct svm_softc * sc,int vcpu)1127 svm_inject_nmi(struct svm_softc *sc, int vcpu)
1128 {
1129 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1130
1131 ASSERT(!svm_nmi_blocked(sc, vcpu));
1132
1133 ctrl->eventinj = VMCB_EVENTINJ_VALID | VMCB_EVENTINJ_TYPE_NMI;
1134 vm_nmi_clear(sc->vm, vcpu);
1135
1136 /*
1137 * Virtual NMI blocking is now in effect.
1138 *
1139 * Not only does this block a subsequent NMI injection from taking
1140 * place, it also configures an intercept on the IRET so we can track
1141 * when the next injection can take place.
1142 */
1143 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1144 }
1145
1146 static void
svm_inject_irq(struct svm_softc * sc,int vcpu,int vector)1147 svm_inject_irq(struct svm_softc *sc, int vcpu, int vector)
1148 {
1149 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1150
1151 ASSERT(vector >= 0 && vector <= 255);
1152
1153 ctrl->eventinj = VMCB_EVENTINJ_VALID | vector;
1154 }
1155
1156 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL
1157
1158 static vm_msr_result_t
svm_write_efer(struct svm_softc * sc,int vcpu,uint64_t newval)1159 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval)
1160 {
1161 struct vmcb_state *state = svm_get_vmcb_state(sc, vcpu);
1162 uint64_t lma;
1163 int error;
1164
1165 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */
1166
1167 if (newval & EFER_MBZ_BITS) {
1168 return (VMR_GP);
1169 }
1170
1171 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */
1172 const uint64_t changed = state->efer ^ newval;
1173 if (changed & EFER_LME) {
1174 if (state->cr0 & CR0_PG) {
1175 return (VMR_GP);
1176 }
1177 }
1178
1179 /* EFER.LMA = EFER.LME & CR0.PG */
1180 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) {
1181 lma = EFER_LMA;
1182 } else {
1183 lma = 0;
1184 }
1185 if ((newval & EFER_LMA) != lma) {
1186 return (VMR_GP);
1187 }
1188
1189 if ((newval & EFER_NXE) != 0 &&
1190 !vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) {
1191 return (VMR_GP);
1192 }
1193 if ((newval & EFER_FFXSR) != 0 &&
1194 !vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) {
1195 return (VMR_GP);
1196 }
1197 if ((newval & EFER_TCE) != 0 &&
1198 !vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) {
1199 return (VMR_GP);
1200 }
1201
1202 /*
1203 * Until bhyve has proper support for long-mode segment limits, just
1204 * toss a #GP at the guest if they attempt to use it.
1205 */
1206 if (newval & EFER_LMSLE) {
1207 return (VMR_GP);
1208 }
1209
1210 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval);
1211 VERIFY0(error);
1212 return (VMR_OK);
1213 }
1214
1215 static int
svm_handle_msr(struct svm_softc * svm_sc,int vcpu,struct vm_exit * vmexit,bool is_wrmsr)1216 svm_handle_msr(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit,
1217 bool is_wrmsr)
1218 {
1219 struct vmcb_state *state = svm_get_vmcb_state(svm_sc, vcpu);
1220 struct svm_regctx *ctx = svm_get_guest_regctx(svm_sc, vcpu);
1221 const uint32_t ecx = ctx->sctx_rcx;
1222 vm_msr_result_t res;
1223 uint64_t val = 0;
1224
1225 if (is_wrmsr) {
1226 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1227 val = ctx->sctx_rdx << 32 | (uint32_t)state->rax;
1228
1229 if (vlapic_owned_msr(ecx)) {
1230 struct vlapic *vlapic = vm_lapic(svm_sc->vm, vcpu);
1231
1232 res = vlapic_wrmsr(vlapic, ecx, val);
1233 } else if (ecx == MSR_EFER) {
1234 res = svm_write_efer(svm_sc, vcpu, val);
1235 } else {
1236 res = svm_wrmsr(svm_sc, vcpu, ecx, val);
1237 }
1238 } else {
1239 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
1240
1241 if (vlapic_owned_msr(ecx)) {
1242 struct vlapic *vlapic = vm_lapic(svm_sc->vm, vcpu);
1243
1244 res = vlapic_rdmsr(vlapic, ecx, &val);
1245 } else {
1246 res = svm_rdmsr(svm_sc, vcpu, ecx, &val);
1247 }
1248 }
1249
1250 switch (res) {
1251 case VMR_OK:
1252 /* Store rdmsr result in the appropriate registers */
1253 if (!is_wrmsr) {
1254 state->rax = (uint32_t)val;
1255 ctx->sctx_rdx = val >> 32;
1256 }
1257 return (1);
1258 case VMR_GP:
1259 vm_inject_gp(svm_sc->vm, vcpu);
1260 return (1);
1261 case VMR_UNHANLDED:
1262 vmexit->exitcode = is_wrmsr ?
1263 VM_EXITCODE_WRMSR : VM_EXITCODE_RDMSR;
1264 vmexit->u.msr.code = ecx;
1265 vmexit->u.msr.wval = val;
1266 return (0);
1267 default:
1268 panic("unexpected msr result %u\n", res);
1269 }
1270 }
1271
1272 /*
1273 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs
1274 * that are due to instruction intercepts as well as MSR and IOIO intercepts
1275 * and exceptions caused by INT3, INTO and BOUND instructions.
1276 *
1277 * Return 1 if the nRIP is valid and 0 otherwise.
1278 */
1279 static int
nrip_valid(uint64_t exitcode)1280 nrip_valid(uint64_t exitcode)
1281 {
1282 switch (exitcode) {
1283 case 0x00 ... 0x0F: /* read of CR0 through CR15 */
1284 case 0x10 ... 0x1F: /* write of CR0 through CR15 */
1285 case 0x20 ... 0x2F: /* read of DR0 through DR15 */
1286 case 0x30 ... 0x3F: /* write of DR0 through DR15 */
1287 case 0x43: /* INT3 */
1288 case 0x44: /* INTO */
1289 case 0x45: /* BOUND */
1290 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */
1291 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */
1292 return (1);
1293 default:
1294 return (0);
1295 }
1296 }
1297
1298 static int
svm_vmexit(struct svm_softc * svm_sc,int vcpu,struct vm_exit * vmexit)1299 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
1300 {
1301 struct vmcb *vmcb;
1302 struct vmcb_state *state;
1303 struct vmcb_ctrl *ctrl;
1304 struct svm_regctx *ctx;
1305 uint64_t code, info1, info2;
1306 int handled;
1307
1308 ctx = svm_get_guest_regctx(svm_sc, vcpu);
1309 vmcb = svm_get_vmcb(svm_sc, vcpu);
1310 state = &vmcb->state;
1311 ctrl = &vmcb->ctrl;
1312
1313 handled = 0;
1314 code = ctrl->exitcode;
1315 info1 = ctrl->exitinfo1;
1316 info2 = ctrl->exitinfo2;
1317
1318 vmexit->exitcode = VM_EXITCODE_BOGUS;
1319 vmexit->rip = state->rip;
1320 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
1321
1322 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
1323
1324 /*
1325 * #VMEXIT(INVALID) needs to be handled early because the VMCB is
1326 * in an inconsistent state and can trigger assertions that would
1327 * never happen otherwise.
1328 */
1329 if (code == VMCB_EXIT_INVALID) {
1330 vm_exit_svm(vmexit, code, info1, info2);
1331 return (0);
1332 }
1333
1334 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
1335 "injection valid bit is set %lx", __func__, ctrl->eventinj));
1336
1337 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15,
1338 ("invalid inst_length %d: code (%lx), info1 (%lx), info2 (%lx)",
1339 vmexit->inst_length, code, info1, info2));
1340
1341 svm_update_virqinfo(svm_sc, vcpu);
1342 svm_save_exitintinfo(svm_sc, vcpu);
1343
1344 switch (code) {
1345 case VMCB_EXIT_CR0_READ:
1346 if (VMCB_CRx_INFO1_VALID(info1) != 0) {
1347 svm_handle_cr0_read(svm_sc, vcpu,
1348 vie_regnum_map(VMCB_CRx_INFO1_GPR(info1)));
1349 handled = 1;
1350 } else {
1351 /*
1352 * If SMSW is used to read the contents of %cr0, then
1353 * the VALID bit will not be set in `info1`, since the
1354 * handling is different from the mov-to-reg case.
1355 *
1356 * Punt to the instruction emulation to handle it.
1357 */
1358 svm_inst_emul_other(svm_sc, vcpu, vmexit);
1359 }
1360 break;
1361 case VMCB_EXIT_CR0_WRITE:
1362 case VMCB_EXIT_CR0_SEL_WRITE:
1363 if (VMCB_CRx_INFO1_VALID(info1) != 0) {
1364 svm_handle_cr0_write(svm_sc, vcpu,
1365 vie_regnum_map(VMCB_CRx_INFO1_GPR(info1)));
1366 handled = 1;
1367 } else {
1368 /*
1369 * Writes to %cr0 without VALID being set in `info1` are
1370 * initiated by the LMSW and CLTS instructions. While
1371 * LMSW (like SMSW) sees little use in modern OSes and
1372 * bootloaders, CLTS is still used for handling FPU
1373 * state transitions.
1374 *
1375 * Punt to the instruction emulation to handle them.
1376 */
1377 svm_inst_emul_other(svm_sc, vcpu, vmexit);
1378 }
1379 break;
1380 case VMCB_EXIT_IRET:
1381 /*
1382 * Restart execution at "iret" but with the intercept cleared.
1383 */
1384 vmexit->inst_length = 0;
1385 svm_clear_nmi_blocking(svm_sc, vcpu);
1386 handled = 1;
1387 break;
1388 case VMCB_EXIT_VINTR: /* interrupt window exiting */
1389 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
1390 svm_disable_intr_window_exiting(svm_sc, vcpu);
1391 handled = 1;
1392 break;
1393 case VMCB_EXIT_INTR: /* external interrupt */
1394 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
1395 handled = 1;
1396 break;
1397 case VMCB_EXIT_NMI:
1398 case VMCB_EXIT_SMI:
1399 case VMCB_EXIT_INIT:
1400 /*
1401 * For external NMI/SMI and physical INIT interrupts, simply
1402 * continue execution, as those host events will be handled by
1403 * the physical CPU.
1404 */
1405 handled = 1;
1406 break;
1407 case VMCB_EXIT_EXCP0 ... VMCB_EXIT_EXCP31: {
1408 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1409
1410 const uint8_t idtvec = code - VMCB_EXIT_EXCP0;
1411 uint32_t errcode = 0;
1412 bool reflect = true;
1413 bool errcode_valid = false;
1414
1415 switch (idtvec) {
1416 case IDT_MC:
1417 /* The host will handle the MCE itself. */
1418 reflect = false;
1419 vmm_call_trap(T_MCE);
1420 break;
1421 case IDT_PF:
1422 VERIFY0(svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
1423 info2));
1424 /* fallthru */
1425 case IDT_NP:
1426 case IDT_SS:
1427 case IDT_GP:
1428 case IDT_AC:
1429 case IDT_TS:
1430 errcode_valid = true;
1431 errcode = info1;
1432 break;
1433
1434 case IDT_DF:
1435 errcode_valid = true;
1436 break;
1437
1438 case IDT_BP:
1439 case IDT_OF:
1440 case IDT_BR:
1441 /*
1442 * The 'nrip' field is populated for INT3, INTO and
1443 * BOUND exceptions and this also implies that
1444 * 'inst_length' is non-zero.
1445 *
1446 * Reset 'inst_length' to zero so the guest %rip at
1447 * event injection is identical to what it was when
1448 * the exception originally happened.
1449 */
1450 vmexit->inst_length = 0;
1451 /* fallthru */
1452 default:
1453 errcode_valid = false;
1454 break;
1455 }
1456 VERIFY0(vmexit->inst_length);
1457
1458 if (reflect) {
1459 /* Reflect the exception back into the guest */
1460 VERIFY0(vm_inject_exception(svm_sc->vm, vcpu, idtvec,
1461 errcode_valid, errcode, false));
1462 }
1463 handled = 1;
1464 break;
1465 }
1466 case VMCB_EXIT_MSR:
1467 handled = svm_handle_msr(svm_sc, vcpu, vmexit, info1 != 0);
1468 break;
1469 case VMCB_EXIT_IO:
1470 handled = svm_handle_inout(svm_sc, vcpu, vmexit);
1471 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
1472 break;
1473 case VMCB_EXIT_SHUTDOWN:
1474 (void) vm_suspend(svm_sc->vm, VM_SUSPEND_TRIPLEFAULT, vcpu);
1475 handled = 1;
1476 break;
1477 case VMCB_EXIT_INVLPGA:
1478 /* privileged invalidation instructions */
1479 vm_inject_ud(svm_sc->vm, vcpu);
1480 handled = 1;
1481 break;
1482 case VMCB_EXIT_VMRUN:
1483 case VMCB_EXIT_VMLOAD:
1484 case VMCB_EXIT_VMSAVE:
1485 case VMCB_EXIT_STGI:
1486 case VMCB_EXIT_CLGI:
1487 case VMCB_EXIT_SKINIT:
1488 /* privileged vmm instructions */
1489 vm_inject_ud(svm_sc->vm, vcpu);
1490 handled = 1;
1491 break;
1492 case VMCB_EXIT_INVD:
1493 case VMCB_EXIT_WBINVD:
1494 /* ignore exit */
1495 handled = 1;
1496 break;
1497 case VMCB_EXIT_VMMCALL:
1498 /* No handlers make use of VMMCALL for now */
1499 vm_inject_ud(svm_sc->vm, vcpu);
1500 handled = 1;
1501 break;
1502 case VMCB_EXIT_CPUID:
1503 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
1504 vcpu_emulate_cpuid(svm_sc->vm, vcpu, &state->rax,
1505 &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx);
1506 handled = 1;
1507 break;
1508 case VMCB_EXIT_HLT:
1509 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
1510 vmexit->exitcode = VM_EXITCODE_HLT;
1511 vmexit->u.hlt.rflags = state->rflags;
1512 break;
1513 case VMCB_EXIT_PAUSE:
1514 vmexit->exitcode = VM_EXITCODE_PAUSE;
1515 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
1516 break;
1517 case VMCB_EXIT_NPF:
1518 /* EXITINFO2 contains the faulting guest physical address */
1519 if (info1 & VMCB_NPF_INFO1_RSV) {
1520 /* nested fault with reserved bits set */
1521 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) {
1522 vmexit->exitcode = VM_EXITCODE_PAGING;
1523 vmexit->u.paging.gpa = info2;
1524 vmexit->u.paging.fault_type = npf_fault_type(info1);
1525 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
1526 } else if (svm_npf_emul_fault(info1)) {
1527 svm_handle_mmio_emul(svm_sc, vcpu, vmexit, info2);
1528 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MMIO_EMUL, 1);
1529 }
1530 break;
1531 case VMCB_EXIT_MONITOR:
1532 vmexit->exitcode = VM_EXITCODE_MONITOR;
1533 break;
1534 case VMCB_EXIT_MWAIT:
1535 vmexit->exitcode = VM_EXITCODE_MWAIT;
1536 break;
1537 default:
1538 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
1539 break;
1540 }
1541
1542 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, vmexit->rip, uint32_t,
1543 code);
1544
1545 if (handled) {
1546 vmexit->rip += vmexit->inst_length;
1547 vmexit->inst_length = 0;
1548 state->rip = vmexit->rip;
1549 } else {
1550 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1551 /*
1552 * If this VM exit was not claimed by anybody then
1553 * treat it as a generic SVM exit.
1554 */
1555 vm_exit_svm(vmexit, code, info1, info2);
1556 } else {
1557 /*
1558 * The exitcode and collateral have been populated.
1559 * The VM exit will be processed further in userland.
1560 */
1561 }
1562 }
1563 return (handled);
1564 }
1565
1566 /*
1567 * Inject exceptions, NMIs, and ExtINTs.
1568 *
1569 * The logic behind these are complicated and may involve mutex contention, so
1570 * the injection is performed without the protection of host CPU interrupts
1571 * being disabled. This means a racing notification could be "lost",
1572 * necessitating a later call to svm_inject_recheck() to close that window
1573 * of opportunity.
1574 */
1575 static enum event_inject_state
svm_inject_events(struct svm_softc * sc,int vcpu)1576 svm_inject_events(struct svm_softc *sc, int vcpu)
1577 {
1578 struct vmcb_ctrl *ctrl;
1579 struct vmcb_state *state;
1580 struct svm_vcpu *vcpustate;
1581 uint64_t intinfo;
1582 enum event_inject_state ev_state;
1583
1584 state = svm_get_vmcb_state(sc, vcpu);
1585 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1586 vcpustate = svm_get_vcpu(sc, vcpu);
1587 ev_state = EIS_CAN_INJECT;
1588
1589 /* Clear any interrupt shadow if guest %rip has changed */
1590 if (vcpustate->nextrip != state->rip) {
1591 ctrl->intr_shadow = 0;
1592 }
1593
1594 /*
1595 * An event is already pending for injection. This can occur when the
1596 * vCPU exits prior to VM entry (like for an AST).
1597 */
1598 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1599 return (EIS_EV_EXISTING | EIS_REQ_EXIT);
1600 }
1601
1602 /*
1603 * Inject pending events or exceptions for this vcpu.
1604 *
1605 * An event might be pending because the previous #VMEXIT happened
1606 * during event delivery (i.e. ctrl->exitintinfo).
1607 *
1608 * An event might also be pending because an exception was injected
1609 * by the hypervisor (e.g. #PF during instruction emulation).
1610 */
1611 if (vm_entry_intinfo(sc->vm, vcpu, &intinfo)) {
1612 svm_inject_event(ctrl, intinfo);
1613 vmm_stat_incr(sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
1614 ev_state = EIS_EV_INJECTED;
1615 }
1616
1617 /* NMI event has priority over interrupts. */
1618 if (vm_nmi_pending(sc->vm, vcpu) && !svm_nmi_blocked(sc, vcpu)) {
1619 if (ev_state == EIS_CAN_INJECT) {
1620 /* Can't inject NMI if vcpu is in an intr_shadow. */
1621 if (ctrl->intr_shadow) {
1622 return (EIS_GI_BLOCK);
1623 }
1624
1625 svm_inject_nmi(sc, vcpu);
1626 ev_state = EIS_EV_INJECTED;
1627 } else {
1628 return (ev_state | EIS_REQ_EXIT);
1629 }
1630 }
1631
1632 if (vm_extint_pending(sc->vm, vcpu)) {
1633 int vector;
1634
1635 if (ev_state != EIS_CAN_INJECT) {
1636 return (ev_state | EIS_REQ_EXIT);
1637 }
1638
1639 /*
1640 * If the guest has disabled interrupts or is in an interrupt
1641 * shadow then we cannot inject the pending interrupt.
1642 */
1643 if ((state->rflags & PSL_I) == 0 || ctrl->intr_shadow) {
1644 return (EIS_GI_BLOCK);
1645 }
1646
1647 /* Ask the legacy pic for a vector to inject */
1648 vatpic_pending_intr(sc->vm, &vector);
1649 KASSERT(vector >= 0 && vector <= 255,
1650 ("invalid vector %d from INTR", vector));
1651
1652 svm_inject_irq(sc, vcpu, vector);
1653 vm_extint_clear(sc->vm, vcpu);
1654 vatpic_intr_accepted(sc->vm, vector);
1655 ev_state = EIS_EV_INJECTED;
1656 }
1657
1658 return (ev_state);
1659 }
1660
1661 /*
1662 * Synchronize vLAPIC state and inject any interrupts pending on it.
1663 *
1664 * This is done with host CPU interrupts disabled so notification IPIs will be
1665 * queued on the host APIC and recognized when entering SVM guest context.
1666 */
1667 static enum event_inject_state
svm_inject_vlapic(struct svm_softc * sc,int vcpu,struct vlapic * vlapic,enum event_inject_state ev_state)1668 svm_inject_vlapic(struct svm_softc *sc, int vcpu, struct vlapic *vlapic,
1669 enum event_inject_state ev_state)
1670 {
1671 struct vmcb_ctrl *ctrl;
1672 struct vmcb_state *state;
1673 int vector;
1674 uint8_t v_tpr;
1675
1676 state = svm_get_vmcb_state(sc, vcpu);
1677 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1678
1679 /*
1680 * The guest can modify the TPR by writing to %cr8. In guest mode the
1681 * CPU reflects this write to V_TPR without hypervisor intervention.
1682 *
1683 * The guest can also modify the TPR by writing to it via the memory
1684 * mapped APIC page. In this case, the write will be emulated by the
1685 * hypervisor. For this reason V_TPR must be updated before every
1686 * VMRUN.
1687 */
1688 v_tpr = vlapic_get_cr8(vlapic);
1689 KASSERT(v_tpr <= 15, ("invalid v_tpr %x", v_tpr));
1690 if (ctrl->v_tpr != v_tpr) {
1691 ctrl->v_tpr = v_tpr;
1692 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1693 }
1694
1695 /* If an event cannot otherwise be injected, we are done for now */
1696 if (ev_state != EIS_CAN_INJECT) {
1697 return (ev_state);
1698 }
1699
1700 if (!vlapic_pending_intr(vlapic, &vector)) {
1701 return (EIS_CAN_INJECT);
1702 }
1703 KASSERT(vector >= 16 && vector <= 255,
1704 ("invalid vector %d from local APIC", vector));
1705
1706 /*
1707 * If the guest has disabled interrupts or is in an interrupt shadow
1708 * then we cannot inject the pending interrupt.
1709 */
1710 if ((state->rflags & PSL_I) == 0 || ctrl->intr_shadow) {
1711 return (EIS_GI_BLOCK);
1712 }
1713
1714 svm_inject_irq(sc, vcpu, vector);
1715 vlapic_intr_accepted(vlapic, vector);
1716 return (EIS_EV_INJECTED);
1717 }
1718
1719 /*
1720 * Re-check for events to be injected.
1721 *
1722 * Once host CPU interrupts are disabled, check for the presence of any events
1723 * which require injection processing. If an exit is required upon injection,
1724 * or once the guest becomes interruptable, that will be configured too.
1725 */
1726 static bool
svm_inject_recheck(struct svm_softc * sc,int vcpu,enum event_inject_state ev_state)1727 svm_inject_recheck(struct svm_softc *sc, int vcpu,
1728 enum event_inject_state ev_state)
1729 {
1730 struct vmcb_ctrl *ctrl;
1731
1732 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1733
1734 if (ev_state == EIS_CAN_INJECT) {
1735 /*
1736 * An active interrupt shadow would preclude us from injecting
1737 * any events picked up during a re-check.
1738 */
1739 if (ctrl->intr_shadow != 0) {
1740 return (false);
1741 }
1742
1743 if (vm_nmi_pending(sc->vm, vcpu) &&
1744 !svm_nmi_blocked(sc, vcpu)) {
1745 /* queued NMI not blocked by NMI-window-exiting */
1746 return (true);
1747 }
1748 if (vm_extint_pending(sc->vm, vcpu)) {
1749 /* queued ExtINT not blocked by existing injection */
1750 return (true);
1751 }
1752 } else {
1753 if ((ev_state & EIS_REQ_EXIT) != 0) {
1754 /*
1755 * Use a self-IPI to force an immediate exit after
1756 * event injection has occurred.
1757 */
1758 poke_cpu(CPU->cpu_id);
1759 } else {
1760 /*
1761 * If any event is being injected, an exit immediately
1762 * upon becoming interruptable again will allow pending
1763 * or newly queued events to be injected in a timely
1764 * manner.
1765 */
1766 svm_enable_intr_window_exiting(sc, vcpu);
1767 }
1768 }
1769 return (false);
1770 }
1771
1772
1773 static void
check_asid(struct svm_softc * sc,int vcpuid,uint_t thiscpu,uint64_t nptgen)1774 check_asid(struct svm_softc *sc, int vcpuid, uint_t thiscpu, uint64_t nptgen)
1775 {
1776 struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpuid);
1777 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
1778 uint8_t flush;
1779
1780 flush = hma_svm_asid_update(&vcpustate->hma_asid, has_flush_by_asid(),
1781 vcpustate->nptgen != nptgen);
1782
1783 if (flush != VMCB_TLB_FLUSH_NOTHING) {
1784 ctrl->asid = vcpustate->hma_asid.hsa_asid;
1785 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
1786 }
1787 ctrl->tlb_ctrl = flush;
1788 vcpustate->nptgen = nptgen;
1789 }
1790
1791 static void
flush_asid(struct svm_softc * sc,int vcpuid)1792 flush_asid(struct svm_softc *sc, int vcpuid)
1793 {
1794 struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpuid);
1795 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
1796 uint8_t flush;
1797
1798 flush = hma_svm_asid_update(&vcpustate->hma_asid, has_flush_by_asid(),
1799 true);
1800
1801 ASSERT(flush != VMCB_TLB_FLUSH_NOTHING);
1802 ctrl->asid = vcpustate->hma_asid.hsa_asid;
1803 ctrl->tlb_ctrl = flush;
1804 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
1805 /*
1806 * A potential future optimization: We could choose to update the nptgen
1807 * associated with the vCPU, since any pending nptgen change requiring a
1808 * flush will be satisfied by the one which has just now been queued.
1809 */
1810 }
1811
1812 static __inline void
disable_gintr(void)1813 disable_gintr(void)
1814 {
1815 __asm __volatile("clgi");
1816 }
1817
1818 static __inline void
enable_gintr(void)1819 enable_gintr(void)
1820 {
1821 __asm __volatile("stgi");
1822 }
1823
1824 static __inline void
svm_dr_enter_guest(struct svm_regctx * gctx)1825 svm_dr_enter_guest(struct svm_regctx *gctx)
1826 {
1827
1828 /* Save host control debug registers. */
1829 gctx->host_dr7 = rdr7();
1830 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
1831
1832 /*
1833 * Disable debugging in DR7 and DEBUGCTL to avoid triggering
1834 * exceptions in the host based on the guest DRx values. The
1835 * guest DR6, DR7, and DEBUGCTL are saved/restored in the
1836 * VMCB.
1837 */
1838 load_dr7(0);
1839 wrmsr(MSR_DEBUGCTLMSR, 0);
1840
1841 /* Save host debug registers. */
1842 gctx->host_dr0 = rdr0();
1843 gctx->host_dr1 = rdr1();
1844 gctx->host_dr2 = rdr2();
1845 gctx->host_dr3 = rdr3();
1846 gctx->host_dr6 = rdr6();
1847
1848 /* Restore guest debug registers. */
1849 load_dr0(gctx->sctx_dr0);
1850 load_dr1(gctx->sctx_dr1);
1851 load_dr2(gctx->sctx_dr2);
1852 load_dr3(gctx->sctx_dr3);
1853 }
1854
1855 static __inline void
svm_dr_leave_guest(struct svm_regctx * gctx)1856 svm_dr_leave_guest(struct svm_regctx *gctx)
1857 {
1858
1859 /* Save guest debug registers. */
1860 gctx->sctx_dr0 = rdr0();
1861 gctx->sctx_dr1 = rdr1();
1862 gctx->sctx_dr2 = rdr2();
1863 gctx->sctx_dr3 = rdr3();
1864
1865 /*
1866 * Restore host debug registers. Restore DR7 and DEBUGCTL
1867 * last.
1868 */
1869 load_dr0(gctx->host_dr0);
1870 load_dr1(gctx->host_dr1);
1871 load_dr2(gctx->host_dr2);
1872 load_dr3(gctx->host_dr3);
1873 load_dr6(gctx->host_dr6);
1874 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl);
1875 load_dr7(gctx->host_dr7);
1876 }
1877
1878 /*
1879 * Apply the TSC offset for a vCPU, including physical CPU and per-vCPU offsets.
1880 */
1881 static void
svm_apply_tsc_adjust(struct svm_softc * svm_sc,int vcpuid)1882 svm_apply_tsc_adjust(struct svm_softc *svm_sc, int vcpuid)
1883 {
1884 const uint64_t offset = vcpu_tsc_offset(svm_sc->vm, vcpuid, true);
1885 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(svm_sc, vcpuid);
1886
1887 if (ctrl->tsc_offset != offset) {
1888 ctrl->tsc_offset = offset;
1889 svm_set_dirty(svm_sc, vcpuid, VMCB_CACHE_I);
1890 }
1891 }
1892
1893 /*
1894 * Start vcpu with specified RIP.
1895 */
1896 static int
svm_vmrun(void * arg,int vcpu,uint64_t rip)1897 svm_vmrun(void *arg, int vcpu, uint64_t rip)
1898 {
1899 struct svm_regctx *gctx;
1900 struct svm_softc *svm_sc;
1901 struct svm_vcpu *vcpustate;
1902 struct vmcb_state *state;
1903 struct vmcb_ctrl *ctrl;
1904 struct vm_exit *vmexit;
1905 struct vlapic *vlapic;
1906 vm_client_t *vmc;
1907 struct vm *vm;
1908 uint64_t vmcb_pa;
1909 int handled;
1910 uint16_t ldt_sel;
1911
1912 svm_sc = arg;
1913 vm = svm_sc->vm;
1914
1915 vcpustate = svm_get_vcpu(svm_sc, vcpu);
1916 state = svm_get_vmcb_state(svm_sc, vcpu);
1917 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1918 vmexit = vm_exitinfo(vm, vcpu);
1919 vlapic = vm_lapic(vm, vcpu);
1920 vmc = vm_get_vmclient(vm, vcpu);
1921
1922 gctx = svm_get_guest_regctx(svm_sc, vcpu);
1923 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1924
1925 if (vcpustate->lastcpu != curcpu) {
1926 /*
1927 * Force new ASID allocation by invalidating the generation.
1928 */
1929 vcpustate->hma_asid.hsa_gen = 0;
1930
1931 /*
1932 * Invalidate the VMCB state cache by marking all fields dirty.
1933 */
1934 svm_set_dirty(svm_sc, vcpu, 0xffffffff);
1935
1936 /*
1937 * XXX
1938 * Setting 'vcpustate->lastcpu' here is bit premature because
1939 * we may return from this function without actually executing
1940 * the VMRUN instruction. This could happen if an AST or yield
1941 * condition is pending on the first time through the loop.
1942 *
1943 * This works for now but any new side-effects of vcpu
1944 * migration should take this case into account.
1945 */
1946 vcpustate->lastcpu = curcpu;
1947 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1948 }
1949
1950 svm_apply_tsc_adjust(svm_sc, vcpu);
1951
1952 svm_msr_guest_enter(svm_sc, vcpu);
1953
1954 VERIFY(!vcpustate->loaded && curthread->t_preempt != 0);
1955 vcpustate->loaded = B_TRUE;
1956
1957 /* Update Guest RIP */
1958 state->rip = rip;
1959
1960 do {
1961 enum event_inject_state inject_state;
1962 uint64_t nptgen;
1963
1964 /*
1965 * Initial event injection is complex and may involve mutex
1966 * contention, so it must be performed with global interrupts
1967 * still enabled.
1968 */
1969 inject_state = svm_inject_events(svm_sc, vcpu);
1970 handled = 0;
1971
1972 /*
1973 * Disable global interrupts to guarantee atomicity during
1974 * loading of guest state. This includes not only the state
1975 * loaded by the "vmrun" instruction but also software state
1976 * maintained by the hypervisor: suspended and rendezvous
1977 * state, NPT generation number, vlapic interrupts etc.
1978 */
1979 disable_gintr();
1980
1981 /*
1982 * Synchronizing and injecting vlapic state is lock-free and is
1983 * safe (and prudent) to perform with interrupts disabled.
1984 */
1985 inject_state = svm_inject_vlapic(svm_sc, vcpu, vlapic,
1986 inject_state);
1987
1988 /*
1989 * Check for vCPU bail-out conditions. This must be done after
1990 * svm_inject_events() to detect a triple-fault condition.
1991 */
1992 if (vcpu_entry_bailout_checks(vm, vcpu, state->rip)) {
1993 enable_gintr();
1994 break;
1995 }
1996
1997 if (vcpu_run_state_pending(vm, vcpu)) {
1998 enable_gintr();
1999 vm_exit_run_state(vm, vcpu, state->rip);
2000 break;
2001 }
2002
2003 /*
2004 * If subsequent activity queued events which require injection
2005 * handling, take another lap to handle them.
2006 */
2007 if (svm_inject_recheck(svm_sc, vcpu, inject_state)) {
2008 enable_gintr();
2009 handled = 1;
2010 continue;
2011 }
2012
2013 /*
2014 * #VMEXIT resumes the host with the guest LDTR, so
2015 * save the current LDT selector so it can be restored
2016 * after an exit. The userspace hypervisor probably
2017 * doesn't use a LDT, but save and restore it to be
2018 * safe.
2019 */
2020 ldt_sel = sldt();
2021
2022 /*
2023 * Check the vmspace and ASID generations to ensure that the
2024 * vcpu does not use stale TLB mappings.
2025 */
2026 nptgen = vmc_table_enter(vmc);
2027 check_asid(svm_sc, vcpu, curcpu, nptgen);
2028
2029 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
2030 vcpustate->dirty = 0;
2031
2032 /* Launch Virtual Machine. */
2033 vcpu_ustate_change(vm, vcpu, VU_RUN);
2034 svm_dr_enter_guest(gctx);
2035 svm_launch(vmcb_pa, gctx, get_pcpu());
2036 svm_dr_leave_guest(gctx);
2037 vcpu_ustate_change(vm, vcpu, VU_EMU_KERN);
2038
2039 /* Restore host LDTR. */
2040 lldt(ldt_sel);
2041
2042 /* #VMEXIT disables interrupts so re-enable them here. */
2043 enable_gintr();
2044
2045 vmc_table_exit(vmc);
2046
2047 /* Update 'nextrip' */
2048 vcpustate->nextrip = state->rip;
2049
2050 /* Handle #VMEXIT and if required return to user space. */
2051 handled = svm_vmexit(svm_sc, vcpu, vmexit);
2052 } while (handled);
2053
2054 svm_msr_guest_exit(svm_sc, vcpu);
2055
2056 VERIFY(vcpustate->loaded && curthread->t_preempt != 0);
2057 vcpustate->loaded = B_FALSE;
2058
2059 return (0);
2060 }
2061
2062 static void
svm_vmcleanup(void * arg)2063 svm_vmcleanup(void *arg)
2064 {
2065 struct svm_softc *sc = arg;
2066
2067 vmm_contig_free(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE);
2068 vmm_contig_free(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE);
2069 kmem_free(sc, sizeof (*sc));
2070 }
2071
2072 static uint64_t *
swctx_regptr(struct svm_regctx * regctx,int reg)2073 swctx_regptr(struct svm_regctx *regctx, int reg)
2074 {
2075 switch (reg) {
2076 case VM_REG_GUEST_RBX:
2077 return (®ctx->sctx_rbx);
2078 case VM_REG_GUEST_RCX:
2079 return (®ctx->sctx_rcx);
2080 case VM_REG_GUEST_RDX:
2081 return (®ctx->sctx_rdx);
2082 case VM_REG_GUEST_RDI:
2083 return (®ctx->sctx_rdi);
2084 case VM_REG_GUEST_RSI:
2085 return (®ctx->sctx_rsi);
2086 case VM_REG_GUEST_RBP:
2087 return (®ctx->sctx_rbp);
2088 case VM_REG_GUEST_R8:
2089 return (®ctx->sctx_r8);
2090 case VM_REG_GUEST_R9:
2091 return (®ctx->sctx_r9);
2092 case VM_REG_GUEST_R10:
2093 return (®ctx->sctx_r10);
2094 case VM_REG_GUEST_R11:
2095 return (®ctx->sctx_r11);
2096 case VM_REG_GUEST_R12:
2097 return (®ctx->sctx_r12);
2098 case VM_REG_GUEST_R13:
2099 return (®ctx->sctx_r13);
2100 case VM_REG_GUEST_R14:
2101 return (®ctx->sctx_r14);
2102 case VM_REG_GUEST_R15:
2103 return (®ctx->sctx_r15);
2104 case VM_REG_GUEST_DR0:
2105 return (®ctx->sctx_dr0);
2106 case VM_REG_GUEST_DR1:
2107 return (®ctx->sctx_dr1);
2108 case VM_REG_GUEST_DR2:
2109 return (®ctx->sctx_dr2);
2110 case VM_REG_GUEST_DR3:
2111 return (®ctx->sctx_dr3);
2112 default:
2113 return (NULL);
2114 }
2115 }
2116
2117 static int
svm_getreg(void * arg,int vcpu,int ident,uint64_t * val)2118 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
2119 {
2120 struct svm_softc *sc;
2121 struct vmcb *vmcb;
2122 uint64_t *regp;
2123 uint64_t *fieldp;
2124 struct vmcb_segment *seg;
2125
2126 sc = arg;
2127 vmcb = svm_get_vmcb(sc, vcpu);
2128
2129 regp = swctx_regptr(svm_get_guest_regctx(sc, vcpu), ident);
2130 if (regp != NULL) {
2131 *val = *regp;
2132 return (0);
2133 }
2134
2135 switch (ident) {
2136 case VM_REG_GUEST_INTR_SHADOW:
2137 *val = (vmcb->ctrl.intr_shadow != 0) ? 1 : 0;
2138 break;
2139
2140 case VM_REG_GUEST_CR0:
2141 svm_get_cr0(sc, vcpu, val);
2142 break;
2143 case VM_REG_GUEST_CR2:
2144 case VM_REG_GUEST_CR3:
2145 case VM_REG_GUEST_CR4:
2146 case VM_REG_GUEST_DR6:
2147 case VM_REG_GUEST_DR7:
2148 case VM_REG_GUEST_EFER:
2149 case VM_REG_GUEST_RAX:
2150 case VM_REG_GUEST_RFLAGS:
2151 case VM_REG_GUEST_RIP:
2152 case VM_REG_GUEST_RSP:
2153 fieldp = vmcb_regptr(vmcb, ident, NULL);
2154 *val = *fieldp;
2155 break;
2156
2157 case VM_REG_GUEST_CS:
2158 case VM_REG_GUEST_DS:
2159 case VM_REG_GUEST_ES:
2160 case VM_REG_GUEST_FS:
2161 case VM_REG_GUEST_GS:
2162 case VM_REG_GUEST_SS:
2163 case VM_REG_GUEST_LDTR:
2164 case VM_REG_GUEST_TR:
2165 seg = vmcb_segptr(vmcb, ident);
2166 *val = seg->selector;
2167 break;
2168
2169 case VM_REG_GUEST_GDTR:
2170 case VM_REG_GUEST_IDTR:
2171 /* GDTR and IDTR don't have segment selectors */
2172 return (EINVAL);
2173
2174 case VM_REG_GUEST_PDPTE0:
2175 case VM_REG_GUEST_PDPTE1:
2176 case VM_REG_GUEST_PDPTE2:
2177 case VM_REG_GUEST_PDPTE3:
2178 /*
2179 * Unlike VMX, where the PDPTEs are explicitly cached as part of
2180 * several well-defined events related to paging (such as
2181 * loading %cr3), SVM walks the PDPEs (their PDPTE) as part of
2182 * nested paging lookups. This makes these registers
2183 * effectively irrelevant on SVM.
2184 *
2185 * Rather than tossing an error, emit zeroed values so casual
2186 * consumers do not need to be as careful about that difference.
2187 */
2188 *val = 0;
2189 break;
2190
2191 default:
2192 return (EINVAL);
2193 }
2194
2195 return (0);
2196 }
2197
2198 static int
svm_setreg(void * arg,int vcpu,int ident,uint64_t val)2199 svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
2200 {
2201 struct svm_softc *sc;
2202 struct vmcb *vmcb;
2203 uint64_t *regp;
2204 uint64_t *fieldp;
2205 uint32_t dirty;
2206 struct vmcb_segment *seg;
2207
2208 sc = arg;
2209 vmcb = svm_get_vmcb(sc, vcpu);
2210
2211 regp = swctx_regptr(svm_get_guest_regctx(sc, vcpu), ident);
2212 if (regp != NULL) {
2213 *regp = val;
2214 return (0);
2215 }
2216
2217 dirty = VMCB_CACHE_NONE;
2218 switch (ident) {
2219 case VM_REG_GUEST_INTR_SHADOW:
2220 vmcb->ctrl.intr_shadow = (val != 0) ? 1 : 0;
2221 break;
2222
2223 case VM_REG_GUEST_EFER:
2224 fieldp = vmcb_regptr(vmcb, ident, &dirty);
2225 /* EFER_SVM must always be set when the guest is executing */
2226 *fieldp = val | EFER_SVM;
2227 dirty |= VMCB_CACHE_CR;
2228 break;
2229
2230 case VM_REG_GUEST_CR0:
2231 svm_set_cr0(sc, vcpu, val, false);
2232 break;
2233 case VM_REG_GUEST_CR2:
2234 case VM_REG_GUEST_CR3:
2235 case VM_REG_GUEST_CR4:
2236 case VM_REG_GUEST_DR6:
2237 case VM_REG_GUEST_DR7:
2238 case VM_REG_GUEST_RAX:
2239 case VM_REG_GUEST_RFLAGS:
2240 case VM_REG_GUEST_RIP:
2241 case VM_REG_GUEST_RSP:
2242 fieldp = vmcb_regptr(vmcb, ident, &dirty);
2243 *fieldp = val;
2244 break;
2245
2246 case VM_REG_GUEST_CS:
2247 case VM_REG_GUEST_DS:
2248 case VM_REG_GUEST_ES:
2249 case VM_REG_GUEST_SS:
2250 case VM_REG_GUEST_FS:
2251 case VM_REG_GUEST_GS:
2252 case VM_REG_GUEST_LDTR:
2253 case VM_REG_GUEST_TR:
2254 dirty |= VMCB_CACHE_SEG;
2255 seg = vmcb_segptr(vmcb, ident);
2256 seg->selector = (uint16_t)val;
2257 break;
2258
2259 case VM_REG_GUEST_GDTR:
2260 case VM_REG_GUEST_IDTR:
2261 /* GDTR and IDTR don't have segment selectors */
2262 return (EINVAL);
2263
2264 case VM_REG_GUEST_PDPTE0:
2265 case VM_REG_GUEST_PDPTE1:
2266 case VM_REG_GUEST_PDPTE2:
2267 case VM_REG_GUEST_PDPTE3:
2268 /*
2269 * PDPEs (AMD's PDPTE) are not cached under SVM, so we can
2270 * ignore attempts to set them. See handler in svm_getreg() for
2271 * more details.
2272 */
2273 break;
2274
2275 default:
2276 return (EINVAL);
2277 }
2278
2279 if (dirty != VMCB_CACHE_NONE) {
2280 svm_set_dirty(sc, vcpu, dirty);
2281 }
2282
2283 /*
2284 * XXX deal with CR3 and invalidate TLB entries tagged with the
2285 * vcpu's ASID. This needs to be treated differently depending on
2286 * whether 'running' is true/false.
2287 */
2288
2289 return (0);
2290 }
2291
2292 static int
svm_setdesc(void * arg,int vcpu,int reg,const struct seg_desc * desc)2293 svm_setdesc(void *arg, int vcpu, int reg, const struct seg_desc *desc)
2294 {
2295 struct vmcb *vmcb;
2296 struct svm_softc *sc;
2297 struct vmcb_segment *seg;
2298
2299 sc = arg;
2300 vmcb = svm_get_vmcb(sc, vcpu);
2301
2302 switch (reg) {
2303 case VM_REG_GUEST_CS:
2304 case VM_REG_GUEST_DS:
2305 case VM_REG_GUEST_ES:
2306 case VM_REG_GUEST_SS:
2307 case VM_REG_GUEST_FS:
2308 case VM_REG_GUEST_GS:
2309 case VM_REG_GUEST_LDTR:
2310 case VM_REG_GUEST_TR:
2311 svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
2312 seg = vmcb_segptr(vmcb, reg);
2313 /*
2314 * Map seg_desc access to VMCB attribute format.
2315 *
2316 * SVM uses the 'P' bit in the segment attributes to indicate a
2317 * NULL segment so clear it if the segment is marked unusable.
2318 */
2319 seg->attrib = VMCB_ACCESS2ATTR(desc->access);
2320 if (SEG_DESC_UNUSABLE(desc->access)) {
2321 seg->attrib &= ~0x80;
2322 }
2323 /*
2324 * Keep CPL synced with the DPL specified for %ss.
2325 *
2326 * KVM notes that a SYSRET to non-cpl-3 is possible on AMD
2327 * (unlike Intel), but accepts such a possible deviation for
2328 * what is otherwise unreasonable behavior for a guest OS, since
2329 * they do the same synchronization.
2330 */
2331 if (reg == VM_REG_GUEST_SS) {
2332 vmcb->state.cpl = SEG_DESC_DPL(desc->access);
2333 }
2334 break;
2335
2336 case VM_REG_GUEST_GDTR:
2337 case VM_REG_GUEST_IDTR:
2338 svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
2339 seg = vmcb_segptr(vmcb, reg);
2340 break;
2341
2342 default:
2343 return (EINVAL);
2344 }
2345
2346 ASSERT(seg != NULL);
2347 seg->base = desc->base;
2348 seg->limit = desc->limit;
2349
2350 return (0);
2351 }
2352
2353 static int
svm_getdesc(void * arg,int vcpu,int reg,struct seg_desc * desc)2354 svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2355 {
2356 struct vmcb *vmcb;
2357 struct svm_softc *sc;
2358 struct vmcb_segment *seg;
2359
2360 sc = arg;
2361 vmcb = svm_get_vmcb(sc, vcpu);
2362
2363 switch (reg) {
2364 case VM_REG_GUEST_DS:
2365 case VM_REG_GUEST_ES:
2366 case VM_REG_GUEST_FS:
2367 case VM_REG_GUEST_GS:
2368 case VM_REG_GUEST_SS:
2369 case VM_REG_GUEST_LDTR:
2370 seg = vmcb_segptr(vmcb, reg);
2371 desc->access = VMCB_ATTR2ACCESS(seg->attrib);
2372 /*
2373 * VT-x uses bit 16 to indicate a segment that has been loaded
2374 * with a NULL selector (aka unusable). The 'desc->access'
2375 * field is interpreted in the VT-x format by the
2376 * processor-independent code.
2377 *
2378 * SVM uses the 'P' bit to convey the same information so
2379 * convert it into the VT-x format. For more details refer to
2380 * section "Segment State in the VMCB" in APMv2.
2381 */
2382 if ((desc->access & 0x80) == 0) {
2383 /* Unusable segment */
2384 desc->access |= 0x10000;
2385 }
2386
2387 /*
2388 * Just as CPL (in the VMCB) is kept synced to SS when the
2389 * segment is written, so too shall the segment sync from CPL
2390 * when it is read.
2391 */
2392 if (reg == VM_REG_GUEST_SS) {
2393 desc->access &=
2394 ~(SEG_DESC_DPL_MASK << SEG_DESC_DPL_SHIFT);
2395 desc->access |=
2396 (vmcb->state.cpl & SEG_DESC_DPL_MASK) <<
2397 SEG_DESC_DPL_SHIFT;
2398 }
2399 break;
2400
2401 case VM_REG_GUEST_CS:
2402 case VM_REG_GUEST_TR:
2403 seg = vmcb_segptr(vmcb, reg);
2404 desc->access = VMCB_ATTR2ACCESS(seg->attrib);
2405 break;
2406
2407 case VM_REG_GUEST_GDTR:
2408 case VM_REG_GUEST_IDTR:
2409 seg = vmcb_segptr(vmcb, reg);
2410 /*
2411 * Since there are no access bits associated with the GDTR or
2412 * the IDTR, zero out the field to ensure it does not contain
2413 * garbage which might confuse the consumer.
2414 */
2415 desc->access = 0;
2416 break;
2417
2418 default:
2419 return (EINVAL);
2420 }
2421
2422 ASSERT(seg != NULL);
2423 desc->base = seg->base;
2424 desc->limit = seg->limit;
2425 return (0);
2426 }
2427
2428 static int
svm_get_msr(void * arg,int vcpu,uint32_t msr,uint64_t * valp)2429 svm_get_msr(void *arg, int vcpu, uint32_t msr, uint64_t *valp)
2430 {
2431 struct svm_softc *sc = arg;
2432 struct vmcb *vmcb = svm_get_vmcb(sc, vcpu);
2433 const uint64_t *msrp = vmcb_msr_ptr(vmcb, msr, NULL);
2434
2435 if (msrp != NULL) {
2436 *valp = *msrp;
2437 return (0);
2438 }
2439
2440 return (EINVAL);
2441 }
2442
2443 static int
svm_set_msr(void * arg,int vcpu,uint32_t msr,uint64_t val)2444 svm_set_msr(void *arg, int vcpu, uint32_t msr, uint64_t val)
2445 {
2446 struct svm_softc *sc = arg;
2447 struct vmcb *vmcb = svm_get_vmcb(sc, vcpu);
2448
2449 uint32_t dirty = 0;
2450 uint64_t *msrp = vmcb_msr_ptr(vmcb, msr, &dirty);
2451 if (msrp == NULL) {
2452 return (EINVAL);
2453 }
2454 switch (msr) {
2455 case MSR_EFER:
2456 /*
2457 * For now, just clone the logic from
2458 * svm_setreg():
2459 *
2460 * EFER_SVM must always be set when the guest is
2461 * executing
2462 */
2463 *msrp = val | EFER_SVM;
2464 break;
2465 /* TODO: other necessary MSR masking */
2466 default:
2467 *msrp = val;
2468 break;
2469 }
2470 if (dirty != 0) {
2471 svm_set_dirty(sc, vcpu, dirty);
2472 }
2473 return (0);
2474
2475 }
2476
2477 static int
svm_setcap(void * arg,int vcpu,int type,int val)2478 svm_setcap(void *arg, int vcpu, int type, int val)
2479 {
2480 struct svm_softc *sc;
2481 int error;
2482
2483 sc = arg;
2484 error = 0;
2485 switch (type) {
2486 case VM_CAP_HALT_EXIT:
2487 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2488 VMCB_INTCPT_HLT, val);
2489 break;
2490 case VM_CAP_PAUSE_EXIT:
2491 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2492 VMCB_INTCPT_PAUSE, val);
2493 break;
2494 default:
2495 error = ENOENT;
2496 break;
2497 }
2498 return (error);
2499 }
2500
2501 static int
svm_getcap(void * arg,int vcpu,int type,int * retval)2502 svm_getcap(void *arg, int vcpu, int type, int *retval)
2503 {
2504 struct svm_softc *sc;
2505 int error;
2506
2507 sc = arg;
2508 error = 0;
2509
2510 switch (type) {
2511 case VM_CAP_HALT_EXIT:
2512 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2513 VMCB_INTCPT_HLT);
2514 break;
2515 case VM_CAP_PAUSE_EXIT:
2516 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2517 VMCB_INTCPT_PAUSE);
2518 break;
2519 default:
2520 error = ENOENT;
2521 break;
2522 }
2523 return (error);
2524 }
2525
2526 static struct vlapic *
svm_vlapic_init(void * arg,int vcpuid)2527 svm_vlapic_init(void *arg, int vcpuid)
2528 {
2529 struct svm_softc *svm_sc;
2530 struct vlapic *vlapic;
2531
2532 svm_sc = arg;
2533 vlapic = kmem_zalloc(sizeof (struct vlapic), KM_SLEEP);
2534 vlapic->vm = svm_sc->vm;
2535 vlapic->vcpuid = vcpuid;
2536 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
2537
2538 vlapic_init(vlapic);
2539
2540 return (vlapic);
2541 }
2542
2543 static void
svm_vlapic_cleanup(void * arg,struct vlapic * vlapic)2544 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
2545 {
2546 vlapic_cleanup(vlapic);
2547 kmem_free(vlapic, sizeof (struct vlapic));
2548 }
2549
2550 static void
svm_pause(void * arg,int vcpu)2551 svm_pause(void *arg, int vcpu)
2552 {
2553 struct svm_softc *sc = arg;
2554 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
2555
2556 /*
2557 * If an event is pending injection in the VMCB, stash it in
2558 * exit_intinfo as if it were deferred by an exit from guest context.
2559 */
2560 const uint64_t intinfo = ctrl->eventinj;
2561 if ((intinfo & VMCB_EVENTINJ_VALID) != 0) {
2562 svm_stash_intinfo(sc, vcpu, intinfo);
2563 ctrl->eventinj = 0;
2564 }
2565
2566 /*
2567 * Now that no event is pending injection, interrupt-window exiting and
2568 * NMI-blocking can be disabled. If/when this vCPU is made to run
2569 * again, those conditions will be reinstated when the now-queued events
2570 * are re-injected.
2571 */
2572 svm_disable_intr_window_exiting(sc, vcpu);
2573 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
2574 }
2575
2576 static void
svm_savectx(void * arg,int vcpu)2577 svm_savectx(void *arg, int vcpu)
2578 {
2579 struct svm_softc *sc = arg;
2580
2581 if (sc->vcpu[vcpu].loaded) {
2582 svm_msr_guest_exit(sc, vcpu);
2583 }
2584 }
2585
2586 static void
svm_restorectx(void * arg,int vcpu)2587 svm_restorectx(void *arg, int vcpu)
2588 {
2589 struct svm_softc *sc = arg;
2590
2591 if (sc->vcpu[vcpu].loaded) {
2592 svm_msr_guest_enter(sc, vcpu);
2593 }
2594 }
2595
2596 static freqratio_res_t
svm_freq_ratio(uint64_t guest_hz,uint64_t host_hz,uint64_t * mult)2597 svm_freq_ratio(uint64_t guest_hz, uint64_t host_hz, uint64_t *mult)
2598 {
2599 /*
2600 * Check whether scaling is needed at all before potentially erroring
2601 * out for other reasons.
2602 */
2603 if (guest_hz == host_hz) {
2604 return (FR_SCALING_NOT_NEEDED);
2605 }
2606
2607 /*
2608 * Confirm that scaling is available.
2609 */
2610 if (!has_tsc_freq_ctl()) {
2611 return (FR_SCALING_NOT_SUPPORTED);
2612 }
2613
2614 /*
2615 * Verify the guest_hz is within the supported range.
2616 */
2617 if ((guest_hz < AMD_TSC_MIN_FREQ) ||
2618 (guest_hz >= (host_hz * AMD_TSC_MAX_FREQ_RATIO))) {
2619 return (FR_OUT_OF_RANGE);
2620 }
2621
2622 /* Calculate the multiplier. */
2623 uint64_t m = vmm_calc_freq_multiplier(guest_hz, host_hz,
2624 AMD_TSCM_FRAC_SIZE);
2625 *mult = m;
2626
2627 return (FR_VALID);
2628 }
2629
2630 struct vmm_ops vmm_ops_amd = {
2631 .init = svm_init,
2632 .cleanup = svm_cleanup,
2633 .resume = svm_restore,
2634
2635 .vminit = svm_vminit,
2636 .vmrun = svm_vmrun,
2637 .vmcleanup = svm_vmcleanup,
2638 .vmgetreg = svm_getreg,
2639 .vmsetreg = svm_setreg,
2640 .vmgetdesc = svm_getdesc,
2641 .vmsetdesc = svm_setdesc,
2642 .vmgetcap = svm_getcap,
2643 .vmsetcap = svm_setcap,
2644 .vlapic_init = svm_vlapic_init,
2645 .vlapic_cleanup = svm_vlapic_cleanup,
2646 .vmpause = svm_pause,
2647
2648 .vmsavectx = svm_savectx,
2649 .vmrestorectx = svm_restorectx,
2650
2651 .vmgetmsr = svm_get_msr,
2652 .vmsetmsr = svm_set_msr,
2653
2654 .vmfreqratio = svm_freq_ratio,
2655 .fr_intsize = AMD_TSCM_INT_SIZE,
2656 .fr_fracsize = AMD_TSCM_FRAC_SIZE,
2657 };
2658