xref: /freebsd/sys/amd64/vmm/amd/vmcb.c (revision 4928135658a9d0eaee37003df6137ab363fcb0b4)
1 /*-
2  * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 
33 #include <machine/segments.h>
34 #include <machine/specialreg.h>
35 #include <machine/vmm.h>
36 
37 #include "vmm_ktr.h"
38 
39 #include "vmcb.h"
40 #include "svm.h"
41 #include "svm_softc.h"
42 
43 /*
44  * The VMCB aka Virtual Machine Control Block is a 4KB aligned page
45  * in memory that describes the virtual machine.
46  *
47  * The VMCB contains:
48  * - instructions or events in the guest to intercept
49  * - control bits that modify execution environment of the guest
50  * - guest processor state (e.g. general purpose registers)
51  */
52 
53 /*
54  * Return VMCB segment area.
55  */
56 static struct vmcb_segment *
57 vmcb_segptr(struct vmcb *vmcb, int type)
58 {
59 	struct vmcb_state *state;
60 	struct vmcb_segment *seg;
61 
62 	state = &vmcb->state;
63 
64 	switch (type) {
65 	case VM_REG_GUEST_CS:
66 		seg = &state->cs;
67 		break;
68 
69 	case VM_REG_GUEST_DS:
70 		seg = &state->ds;
71 		break;
72 
73 	case VM_REG_GUEST_ES:
74 		seg = &state->es;
75 		break;
76 
77 	case VM_REG_GUEST_FS:
78 		seg = &state->fs;
79 		break;
80 
81 	case VM_REG_GUEST_GS:
82 		seg = &state->gs;
83 		break;
84 
85 	case VM_REG_GUEST_SS:
86 		seg = &state->ss;
87 		break;
88 
89 	case VM_REG_GUEST_GDTR:
90 		seg = &state->gdt;
91 		break;
92 
93 	case VM_REG_GUEST_IDTR:
94 		seg = &state->idt;
95 		break;
96 
97 	case VM_REG_GUEST_LDTR:
98 		seg = &state->ldt;
99 		break;
100 
101 	case VM_REG_GUEST_TR:
102 		seg = &state->tr;
103 		break;
104 
105 	default:
106 		seg = NULL;
107 		break;
108 	}
109 
110 	return (seg);
111 }
112 
113 static int
114 vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
115 	uint64_t *val)
116 {
117 	struct vmcb *vmcb;
118 	int off, bytes;
119 	char *ptr;
120 
121 	vmcb	= svm_get_vmcb(softc, vcpu);
122 	off	= VMCB_ACCESS_OFFSET(ident);
123 	bytes	= VMCB_ACCESS_BYTES(ident);
124 
125 	if ((off + bytes) >= sizeof (struct vmcb))
126 		return (EINVAL);
127 
128 	ptr = (char *)vmcb;
129 
130 	if (!write)
131 		*val = 0;
132 
133 	switch (bytes) {
134 	case 8:
135 	case 4:
136 	case 2:
137 		if (write)
138 			memcpy(ptr + off, val, bytes);
139 		else
140 			memcpy(val, ptr + off, bytes);
141 		break;
142 	default:
143 		VCPU_CTR1(softc->vm, vcpu,
144 		    "Invalid size %d for VMCB access: %d", bytes);
145 		return (EINVAL);
146 	}
147 
148 	/* Invalidate all VMCB state cached by h/w. */
149 	if (write)
150 		svm_set_dirty(softc, vcpu, 0xffffffff);
151 
152 	return (0);
153 }
154 
155 /*
156  * Read from segment selector, control and general purpose register of VMCB.
157  */
158 int
159 vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval)
160 {
161 	struct vmcb *vmcb;
162 	struct vmcb_state *state;
163 	struct vmcb_segment *seg;
164 	int err;
165 
166 	vmcb = svm_get_vmcb(sc, vcpu);
167 	state = &vmcb->state;
168 	err = 0;
169 
170 	if (VMCB_ACCESS_OK(ident))
171 		return (vmcb_access(sc, vcpu, 0, ident, retval));
172 
173 	switch (ident) {
174 	case VM_REG_GUEST_CR0:
175 		*retval = state->cr0;
176 		break;
177 
178 	case VM_REG_GUEST_CR2:
179 		*retval = state->cr2;
180 		break;
181 
182 	case VM_REG_GUEST_CR3:
183 		*retval = state->cr3;
184 		break;
185 
186 	case VM_REG_GUEST_CR4:
187 		*retval = state->cr4;
188 		break;
189 
190 	case VM_REG_GUEST_DR6:
191 		*retval = state->dr6;
192 		break;
193 
194 	case VM_REG_GUEST_DR7:
195 		*retval = state->dr7;
196 		break;
197 
198 	case VM_REG_GUEST_EFER:
199 		*retval = state->efer;
200 		break;
201 
202 	case VM_REG_GUEST_RAX:
203 		*retval = state->rax;
204 		break;
205 
206 	case VM_REG_GUEST_RFLAGS:
207 		*retval = state->rflags;
208 		break;
209 
210 	case VM_REG_GUEST_RIP:
211 		*retval = state->rip;
212 		break;
213 
214 	case VM_REG_GUEST_RSP:
215 		*retval = state->rsp;
216 		break;
217 
218 	case VM_REG_GUEST_CS:
219 	case VM_REG_GUEST_DS:
220 	case VM_REG_GUEST_ES:
221 	case VM_REG_GUEST_FS:
222 	case VM_REG_GUEST_GS:
223 	case VM_REG_GUEST_SS:
224 	case VM_REG_GUEST_LDTR:
225 	case VM_REG_GUEST_TR:
226 		seg = vmcb_segptr(vmcb, ident);
227 		KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
228 		    __func__, ident));
229 		*retval = seg->selector;
230 		break;
231 
232 	case VM_REG_GUEST_GDTR:
233 	case VM_REG_GUEST_IDTR:
234 		/* GDTR and IDTR don't have segment selectors */
235 		err = EINVAL;
236 		break;
237 	default:
238 		err =  EINVAL;
239 		break;
240 	}
241 
242 	return (err);
243 }
244 
245 /*
246  * Write to segment selector, control and general purpose register of VMCB.
247  */
248 int
249 vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
250 {
251 	struct vmcb *vmcb;
252 	struct vmcb_state *state;
253 	struct vmcb_segment *seg;
254 	int err, dirtyseg;
255 
256 	vmcb = svm_get_vmcb(sc, vcpu);
257 	state = &vmcb->state;
258 	dirtyseg = 0;
259 	err = 0;
260 
261 	if (VMCB_ACCESS_OK(ident))
262 		return (vmcb_access(sc, vcpu, 1, ident, &val));
263 
264 	switch (ident) {
265 	case VM_REG_GUEST_CR0:
266 		state->cr0 = val;
267 		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
268 		break;
269 
270 	case VM_REG_GUEST_CR2:
271 		state->cr2 = val;
272 		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR2);
273 		break;
274 
275 	case VM_REG_GUEST_CR3:
276 		state->cr3 = val;
277 		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
278 		break;
279 
280 	case VM_REG_GUEST_CR4:
281 		state->cr4 = val;
282 		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
283 		break;
284 
285 	case VM_REG_GUEST_DR6:
286 		state->dr6 = val;
287 		svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
288 		break;
289 
290 	case VM_REG_GUEST_DR7:
291 		state->dr7 = val;
292 		svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
293 		break;
294 
295 	case VM_REG_GUEST_EFER:
296 		/* EFER_SVM must always be set when the guest is executing */
297 		state->efer = val | EFER_SVM;
298 		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
299 		break;
300 
301 	case VM_REG_GUEST_RAX:
302 		state->rax = val;
303 		break;
304 
305 	case VM_REG_GUEST_RFLAGS:
306 		state->rflags = val;
307 		break;
308 
309 	case VM_REG_GUEST_RIP:
310 		state->rip = val;
311 		break;
312 
313 	case VM_REG_GUEST_RSP:
314 		state->rsp = val;
315 		break;
316 
317 	case VM_REG_GUEST_CS:
318 	case VM_REG_GUEST_DS:
319 	case VM_REG_GUEST_ES:
320 	case VM_REG_GUEST_SS:
321 		dirtyseg = 1;		/* FALLTHROUGH */
322 	case VM_REG_GUEST_FS:
323 	case VM_REG_GUEST_GS:
324 	case VM_REG_GUEST_LDTR:
325 	case VM_REG_GUEST_TR:
326 		seg = vmcb_segptr(vmcb, ident);
327 		KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
328 		    __func__, ident));
329 		seg->selector = val;
330 		if (dirtyseg)
331 			svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
332 		break;
333 
334 	case VM_REG_GUEST_GDTR:
335 	case VM_REG_GUEST_IDTR:
336 		/* GDTR and IDTR don't have segment selectors */
337 		err = EINVAL;
338 		break;
339 	default:
340 		err = EINVAL;
341 		break;
342 	}
343 
344 	return (err);
345 }
346 
347 int
348 vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
349 {
350 	struct vmcb_segment *seg;
351 
352 	seg = vmcb_segptr(vmcb, ident);
353 	if (seg != NULL) {
354 		bcopy(seg, seg2, sizeof(struct vmcb_segment));
355 		return (0);
356 	} else {
357 		return (EINVAL);
358 	}
359 }
360 
361 int
362 vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
363 {
364 	struct vmcb *vmcb;
365 	struct svm_softc *sc;
366 	struct vmcb_segment *seg;
367 	uint16_t attrib;
368 
369 	sc = arg;
370 	vmcb = svm_get_vmcb(sc, vcpu);
371 
372 	seg = vmcb_segptr(vmcb, reg);
373 	KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
374 	    __func__, reg));
375 
376 	seg->base = desc->base;
377 	seg->limit = desc->limit;
378 	if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
379 		/*
380 		 * Map seg_desc access to VMCB attribute format.
381 		 *
382 		 * SVM uses the 'P' bit in the segment attributes to indicate a
383 		 * NULL segment so clear it if the segment is marked unusable.
384 		 */
385 		attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
386 		if (SEG_DESC_UNUSABLE(desc->access)) {
387 			attrib &= ~0x80;
388 		}
389 		seg->attrib = attrib;
390 	}
391 
392 	VCPU_CTR4(sc->vm, vcpu, "Setting desc %d: base (%#lx), limit (%#x), "
393 	    "attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
394 
395 	switch (reg) {
396 	case VM_REG_GUEST_CS:
397 	case VM_REG_GUEST_DS:
398 	case VM_REG_GUEST_ES:
399 	case VM_REG_GUEST_SS:
400 		svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
401 		break;
402 	case VM_REG_GUEST_GDTR:
403 	case VM_REG_GUEST_IDTR:
404 		svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
405 		break;
406 	default:
407 		break;
408 	}
409 
410 	return (0);
411 }
412 
413 int
414 vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
415 {
416 	struct vmcb *vmcb;
417 	struct svm_softc *sc;
418 	struct vmcb_segment *seg;
419 
420 	sc = arg;
421 	vmcb = svm_get_vmcb(sc, vcpu);
422 	seg = vmcb_segptr(vmcb, reg);
423 	KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
424 	    __func__, reg));
425 
426 	desc->base = seg->base;
427 	desc->limit = seg->limit;
428 	desc->access = 0;
429 
430 	if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
431 		/* Map seg_desc access to VMCB attribute format */
432 		desc->access = ((seg->attrib & 0xF00) << 4) |
433 		    (seg->attrib & 0xFF);
434 
435 		/*
436 		 * VT-x uses bit 16 to indicate a segment that has been loaded
437 		 * with a NULL selector (aka unusable). The 'desc->access'
438 		 * field is interpreted in the VT-x format by the
439 		 * processor-independent code.
440 		 *
441 		 * SVM uses the 'P' bit to convey the same information so
442 		 * convert it into the VT-x format. For more details refer to
443 		 * section "Segment State in the VMCB" in APMv2.
444 		 */
445 		if (reg != VM_REG_GUEST_CS && reg != VM_REG_GUEST_TR) {
446 			if ((desc->access & 0x80) == 0)
447 				desc->access |= 0x10000;  /* Unusable segment */
448 		}
449 	}
450 
451 	return (0);
452 }
453