1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 /*
29 * This file and its contents are supplied under the terms of the
30 * Common Development and Distribution License ("CDDL"), version 1.0.
31 * You may only use this file in accordance with the terms of version
32 * 1.0 of the CDDL.
33 *
34 * A full copy of the text of the CDDL should have accompanied this
35 * source. A copy of the CDDL is also available via the Internet at
36 * http://www.illumos.org/license/CDDL.
37 *
38 * Copyright 2014 Pluribus Networks Inc.
39 * Copyright 2017 Joyent, Inc.
40 * Copyright 2020 Oxide Computer Company
41 */
42
43 #include <sys/cdefs.h>
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47
48 #include <machine/specialreg.h>
49 #include <machine/vmm.h>
50 #include "vmx.h"
51
52 /* Bits 0-30 of VMX_BASIC MSR contain VMCS revision identifier */
53 #define VMX_BASIC_REVISION(v) ((v) & 0x7fffffff)
54
55 uint32_t
vmcs_field_encoding(int ident)56 vmcs_field_encoding(int ident)
57 {
58 switch (ident) {
59 case VM_REG_GUEST_CR0:
60 return (VMCS_GUEST_CR0);
61 case VM_REG_GUEST_CR3:
62 return (VMCS_GUEST_CR3);
63 case VM_REG_GUEST_CR4:
64 return (VMCS_GUEST_CR4);
65 case VM_REG_GUEST_DR7:
66 return (VMCS_GUEST_DR7);
67 case VM_REG_GUEST_RSP:
68 return (VMCS_GUEST_RSP);
69 case VM_REG_GUEST_RIP:
70 return (VMCS_GUEST_RIP);
71 case VM_REG_GUEST_RFLAGS:
72 return (VMCS_GUEST_RFLAGS);
73 case VM_REG_GUEST_ES:
74 return (VMCS_GUEST_ES_SELECTOR);
75 case VM_REG_GUEST_CS:
76 return (VMCS_GUEST_CS_SELECTOR);
77 case VM_REG_GUEST_SS:
78 return (VMCS_GUEST_SS_SELECTOR);
79 case VM_REG_GUEST_DS:
80 return (VMCS_GUEST_DS_SELECTOR);
81 case VM_REG_GUEST_FS:
82 return (VMCS_GUEST_FS_SELECTOR);
83 case VM_REG_GUEST_GS:
84 return (VMCS_GUEST_GS_SELECTOR);
85 case VM_REG_GUEST_TR:
86 return (VMCS_GUEST_TR_SELECTOR);
87 case VM_REG_GUEST_LDTR:
88 return (VMCS_GUEST_LDTR_SELECTOR);
89 case VM_REG_GUEST_EFER:
90 return (VMCS_GUEST_IA32_EFER);
91 case VM_REG_GUEST_PDPTE0:
92 return (VMCS_GUEST_PDPTE0);
93 case VM_REG_GUEST_PDPTE1:
94 return (VMCS_GUEST_PDPTE1);
95 case VM_REG_GUEST_PDPTE2:
96 return (VMCS_GUEST_PDPTE2);
97 case VM_REG_GUEST_PDPTE3:
98 return (VMCS_GUEST_PDPTE3);
99 case VM_REG_GUEST_ENTRY_INST_LENGTH:
100 return (VMCS_ENTRY_INST_LENGTH);
101 default:
102 return (VMCS_INVALID_ENCODING);
103 }
104 }
105
106 void
vmcs_seg_desc_encoding(int seg,uint32_t * base,uint32_t * lim,uint32_t * acc)107 vmcs_seg_desc_encoding(int seg, uint32_t *base, uint32_t *lim, uint32_t *acc)
108 {
109 switch (seg) {
110 case VM_REG_GUEST_ES:
111 *base = VMCS_GUEST_ES_BASE;
112 *lim = VMCS_GUEST_ES_LIMIT;
113 *acc = VMCS_GUEST_ES_ACCESS_RIGHTS;
114 break;
115 case VM_REG_GUEST_CS:
116 *base = VMCS_GUEST_CS_BASE;
117 *lim = VMCS_GUEST_CS_LIMIT;
118 *acc = VMCS_GUEST_CS_ACCESS_RIGHTS;
119 break;
120 case VM_REG_GUEST_SS:
121 *base = VMCS_GUEST_SS_BASE;
122 *lim = VMCS_GUEST_SS_LIMIT;
123 *acc = VMCS_GUEST_SS_ACCESS_RIGHTS;
124 break;
125 case VM_REG_GUEST_DS:
126 *base = VMCS_GUEST_DS_BASE;
127 *lim = VMCS_GUEST_DS_LIMIT;
128 *acc = VMCS_GUEST_DS_ACCESS_RIGHTS;
129 break;
130 case VM_REG_GUEST_FS:
131 *base = VMCS_GUEST_FS_BASE;
132 *lim = VMCS_GUEST_FS_LIMIT;
133 *acc = VMCS_GUEST_FS_ACCESS_RIGHTS;
134 break;
135 case VM_REG_GUEST_GS:
136 *base = VMCS_GUEST_GS_BASE;
137 *lim = VMCS_GUEST_GS_LIMIT;
138 *acc = VMCS_GUEST_GS_ACCESS_RIGHTS;
139 break;
140 case VM_REG_GUEST_TR:
141 *base = VMCS_GUEST_TR_BASE;
142 *lim = VMCS_GUEST_TR_LIMIT;
143 *acc = VMCS_GUEST_TR_ACCESS_RIGHTS;
144 break;
145 case VM_REG_GUEST_LDTR:
146 *base = VMCS_GUEST_LDTR_BASE;
147 *lim = VMCS_GUEST_LDTR_LIMIT;
148 *acc = VMCS_GUEST_LDTR_ACCESS_RIGHTS;
149 break;
150 case VM_REG_GUEST_IDTR:
151 *base = VMCS_GUEST_IDTR_BASE;
152 *lim = VMCS_GUEST_IDTR_LIMIT;
153 *acc = VMCS_INVALID_ENCODING;
154 break;
155 case VM_REG_GUEST_GDTR:
156 *base = VMCS_GUEST_GDTR_BASE;
157 *lim = VMCS_GUEST_GDTR_LIMIT;
158 *acc = VMCS_INVALID_ENCODING;
159 break;
160 default:
161 panic("invalid segment register %d", seg);
162 }
163 }
164
165 uint32_t
vmcs_msr_encoding(uint32_t msr)166 vmcs_msr_encoding(uint32_t msr)
167 {
168 switch (msr) {
169 case MSR_PAT:
170 return (VMCS_GUEST_IA32_PAT);
171 case MSR_EFER:
172 return (VMCS_GUEST_IA32_EFER);
173 case MSR_SYSENTER_CS_MSR:
174 return (VMCS_GUEST_IA32_SYSENTER_CS);
175 case MSR_SYSENTER_ESP_MSR:
176 return (VMCS_GUEST_IA32_SYSENTER_ESP);
177 case MSR_SYSENTER_EIP_MSR:
178 return (VMCS_GUEST_IA32_SYSENTER_EIP);
179 /*
180 * While fsbase and gsbase are expected to be accessed (by the VMM) via
181 * the segment descriptor interfaces, we still make it available as MSR
182 * contents as well.
183 */
184 case MSR_FSBASE:
185 return (VMCS_GUEST_FS_BASE);
186 case MSR_GSBASE:
187 return (VMCS_GUEST_GS_BASE);
188 default:
189 return (VMCS_INVALID_ENCODING);
190 }
191 }
192
193 void
vmcs_clear(uintptr_t vmcs_pa)194 vmcs_clear(uintptr_t vmcs_pa)
195 {
196 int err;
197
198 __asm __volatile("vmclear %[addr];"
199 VMX_SET_ERROR_CODE_ASM
200 : [error] "=r" (err)
201 : [addr] "m" (vmcs_pa)
202 : "memory");
203
204 if (err != 0) {
205 panic("vmclear(%p) error %d", (void *)vmcs_pa, err);
206 }
207
208 /*
209 * A call to critical_enter() was made in vmcs_load() to prevent
210 * preemption. Now that the VMCS is unloaded, it is safe to relax that
211 * restriction.
212 */
213 critical_exit();
214 }
215
216 void
vmcs_initialize(struct vmcs * vmcs,uintptr_t vmcs_pa)217 vmcs_initialize(struct vmcs *vmcs, uintptr_t vmcs_pa)
218 {
219 int err;
220
221 /* set to VMCS revision */
222 vmcs->identifier = VMX_BASIC_REVISION(rdmsr(MSR_VMX_BASIC));
223
224 /*
225 * Perform a vmclear on the VMCS, but without the critical section
226 * manipulation as done by vmcs_clear() above.
227 */
228 __asm __volatile("vmclear %[addr];"
229 VMX_SET_ERROR_CODE_ASM
230 : [error] "=r" (err)
231 : [addr] "m" (vmcs_pa)
232 : "memory");
233
234 if (err != 0) {
235 panic("vmclear(%p) error %d", (void *)vmcs_pa, err);
236 }
237 }
238
239 void
vmcs_load(uintptr_t vmcs_pa)240 vmcs_load(uintptr_t vmcs_pa)
241 {
242 int err;
243
244 /*
245 * While the VMCS is loaded on the CPU for subsequent operations, it is
246 * important that the thread not be preempted. That is ensured with
247 * critical_enter() here, with a matching critical_exit() call in
248 * vmcs_clear() once the VMCS is unloaded.
249 */
250 critical_enter();
251
252 __asm __volatile("vmptrld %[addr];"
253 VMX_SET_ERROR_CODE_ASM
254 : [error] "=r" (err)
255 : [addr] "m" (vmcs_pa)
256 : "memory");
257
258 if (err != 0) {
259 panic("vmptrld(%p) error %d", (void *)vmcs_pa, err);
260 }
261 }
262
263 uint64_t
vmcs_read(uint32_t encoding)264 vmcs_read(uint32_t encoding)
265 {
266 int error;
267 uint64_t val;
268
269 __asm __volatile("vmread %[enc], %[val];"
270 VMX_SET_ERROR_CODE_ASM
271 : [error] "=r" (error), [val] "=r" (val)
272 : [enc] "r" ((uint64_t)encoding)
273 : "memory");
274
275 if (error != 0) {
276 panic("vmread(%x) error %d", encoding, error);
277 }
278
279 return (val);
280 }
281
282 void
vmcs_write(uint32_t encoding,uint64_t val)283 vmcs_write(uint32_t encoding, uint64_t val)
284 {
285 int error;
286
287 __asm __volatile("vmwrite %[val], %[enc];"
288 VMX_SET_ERROR_CODE_ASM
289 : [error] "=r" (error)
290 : [val] "r" (val), [enc] "r" ((uint64_t)encoding)
291 : "memory");
292
293 if (error != 0) {
294 panic("vmwrite(%x, %lx) error %d", encoding, val, error);
295 }
296 }
297