xref: /freebsd/sys/amd64/vmm/amd/vmcb.c (revision 4e99f45480598189d49d45a825533a6c9e12f02c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_bhyve_snapshot.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 
37 #include <machine/segments.h>
38 #include <machine/specialreg.h>
39 #include <machine/vmm.h>
40 #include <machine/vmm_snapshot.h>
41 
42 #include "vmm_ktr.h"
43 
44 #include "vmcb.h"
45 #include "svm.h"
46 #include "svm_softc.h"
47 
48 /*
49  * The VMCB aka Virtual Machine Control Block is a 4KB aligned page
50  * in memory that describes the virtual machine.
51  *
52  * The VMCB contains:
53  * - instructions or events in the guest to intercept
54  * - control bits that modify execution environment of the guest
55  * - guest processor state (e.g. general purpose registers)
56  */
57 
58 /*
59  * Return VMCB segment area.
60  */
61 static struct vmcb_segment *
62 vmcb_segptr(struct vmcb *vmcb, int type)
63 {
64 	struct vmcb_state *state;
65 	struct vmcb_segment *seg;
66 
67 	state = &vmcb->state;
68 
69 	switch (type) {
70 	case VM_REG_GUEST_CS:
71 		seg = &state->cs;
72 		break;
73 
74 	case VM_REG_GUEST_DS:
75 		seg = &state->ds;
76 		break;
77 
78 	case VM_REG_GUEST_ES:
79 		seg = &state->es;
80 		break;
81 
82 	case VM_REG_GUEST_FS:
83 		seg = &state->fs;
84 		break;
85 
86 	case VM_REG_GUEST_GS:
87 		seg = &state->gs;
88 		break;
89 
90 	case VM_REG_GUEST_SS:
91 		seg = &state->ss;
92 		break;
93 
94 	case VM_REG_GUEST_GDTR:
95 		seg = &state->gdt;
96 		break;
97 
98 	case VM_REG_GUEST_IDTR:
99 		seg = &state->idt;
100 		break;
101 
102 	case VM_REG_GUEST_LDTR:
103 		seg = &state->ldt;
104 		break;
105 
106 	case VM_REG_GUEST_TR:
107 		seg = &state->tr;
108 		break;
109 
110 	default:
111 		seg = NULL;
112 		break;
113 	}
114 
115 	return (seg);
116 }
117 
118 static int
119 vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
120 	uint64_t *val)
121 {
122 	struct vmcb *vmcb;
123 	int off, bytes;
124 	char *ptr;
125 
126 	vmcb	= svm_get_vmcb(softc, vcpu);
127 	off	= VMCB_ACCESS_OFFSET(ident);
128 	bytes	= VMCB_ACCESS_BYTES(ident);
129 
130 	if ((off + bytes) >= sizeof (struct vmcb))
131 		return (EINVAL);
132 
133 	ptr = (char *)vmcb;
134 
135 	if (!write)
136 		*val = 0;
137 
138 	switch (bytes) {
139 	case 8:
140 	case 4:
141 	case 2:
142 		if (write)
143 			memcpy(ptr + off, val, bytes);
144 		else
145 			memcpy(val, ptr + off, bytes);
146 		break;
147 	default:
148 		VCPU_CTR1(softc->vm, vcpu,
149 		    "Invalid size %d for VMCB access: %d", bytes);
150 		return (EINVAL);
151 	}
152 
153 	/* Invalidate all VMCB state cached by h/w. */
154 	if (write)
155 		svm_set_dirty(softc, vcpu, 0xffffffff);
156 
157 	return (0);
158 }
159 
160 /*
161  * Read from segment selector, control and general purpose register of VMCB.
162  */
163 int
164 vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval)
165 {
166 	struct vmcb *vmcb;
167 	struct vmcb_state *state;
168 	struct vmcb_segment *seg;
169 	int err;
170 
171 	vmcb = svm_get_vmcb(sc, vcpu);
172 	state = &vmcb->state;
173 	err = 0;
174 
175 	if (VMCB_ACCESS_OK(ident))
176 		return (vmcb_access(sc, vcpu, 0, ident, retval));
177 
178 	switch (ident) {
179 	case VM_REG_GUEST_CR0:
180 		*retval = state->cr0;
181 		break;
182 
183 	case VM_REG_GUEST_CR2:
184 		*retval = state->cr2;
185 		break;
186 
187 	case VM_REG_GUEST_CR3:
188 		*retval = state->cr3;
189 		break;
190 
191 	case VM_REG_GUEST_CR4:
192 		*retval = state->cr4;
193 		break;
194 
195 	case VM_REG_GUEST_DR6:
196 		*retval = state->dr6;
197 		break;
198 
199 	case VM_REG_GUEST_DR7:
200 		*retval = state->dr7;
201 		break;
202 
203 	case VM_REG_GUEST_EFER:
204 		*retval = state->efer;
205 		break;
206 
207 	case VM_REG_GUEST_RAX:
208 		*retval = state->rax;
209 		break;
210 
211 	case VM_REG_GUEST_RFLAGS:
212 		*retval = state->rflags;
213 		break;
214 
215 	case VM_REG_GUEST_RIP:
216 		*retval = state->rip;
217 		break;
218 
219 	case VM_REG_GUEST_RSP:
220 		*retval = state->rsp;
221 		break;
222 
223 	case VM_REG_GUEST_CS:
224 	case VM_REG_GUEST_DS:
225 	case VM_REG_GUEST_ES:
226 	case VM_REG_GUEST_FS:
227 	case VM_REG_GUEST_GS:
228 	case VM_REG_GUEST_SS:
229 	case VM_REG_GUEST_LDTR:
230 	case VM_REG_GUEST_TR:
231 		seg = vmcb_segptr(vmcb, ident);
232 		KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
233 		    __func__, ident));
234 		*retval = seg->selector;
235 		break;
236 
237 	case VM_REG_GUEST_GDTR:
238 	case VM_REG_GUEST_IDTR:
239 		/* GDTR and IDTR don't have segment selectors */
240 		err = EINVAL;
241 		break;
242 	default:
243 		err =  EINVAL;
244 		break;
245 	}
246 
247 	return (err);
248 }
249 
250 /*
251  * Write to segment selector, control and general purpose register of VMCB.
252  */
253 int
254 vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
255 {
256 	struct vmcb *vmcb;
257 	struct vmcb_state *state;
258 	struct vmcb_segment *seg;
259 	int err, dirtyseg;
260 
261 	vmcb = svm_get_vmcb(sc, vcpu);
262 	state = &vmcb->state;
263 	dirtyseg = 0;
264 	err = 0;
265 
266 	if (VMCB_ACCESS_OK(ident))
267 		return (vmcb_access(sc, vcpu, 1, ident, &val));
268 
269 	switch (ident) {
270 	case VM_REG_GUEST_CR0:
271 		state->cr0 = val;
272 		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
273 		break;
274 
275 	case VM_REG_GUEST_CR2:
276 		state->cr2 = val;
277 		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR2);
278 		break;
279 
280 	case VM_REG_GUEST_CR3:
281 		state->cr3 = val;
282 		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
283 		break;
284 
285 	case VM_REG_GUEST_CR4:
286 		state->cr4 = val;
287 		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
288 		break;
289 
290 	case VM_REG_GUEST_DR6:
291 		state->dr6 = val;
292 		svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
293 		break;
294 
295 	case VM_REG_GUEST_DR7:
296 		state->dr7 = val;
297 		svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
298 		break;
299 
300 	case VM_REG_GUEST_EFER:
301 		/* EFER_SVM must always be set when the guest is executing */
302 		state->efer = val | EFER_SVM;
303 		svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
304 		break;
305 
306 	case VM_REG_GUEST_RAX:
307 		state->rax = val;
308 		break;
309 
310 	case VM_REG_GUEST_RFLAGS:
311 		state->rflags = val;
312 		break;
313 
314 	case VM_REG_GUEST_RIP:
315 		state->rip = val;
316 		break;
317 
318 	case VM_REG_GUEST_RSP:
319 		state->rsp = val;
320 		break;
321 
322 	case VM_REG_GUEST_CS:
323 	case VM_REG_GUEST_DS:
324 	case VM_REG_GUEST_ES:
325 	case VM_REG_GUEST_SS:
326 		dirtyseg = 1;		/* FALLTHROUGH */
327 	case VM_REG_GUEST_FS:
328 	case VM_REG_GUEST_GS:
329 	case VM_REG_GUEST_LDTR:
330 	case VM_REG_GUEST_TR:
331 		seg = vmcb_segptr(vmcb, ident);
332 		KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
333 		    __func__, ident));
334 		seg->selector = val;
335 		if (dirtyseg)
336 			svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
337 		break;
338 
339 	case VM_REG_GUEST_GDTR:
340 	case VM_REG_GUEST_IDTR:
341 		/* GDTR and IDTR don't have segment selectors */
342 		err = EINVAL;
343 		break;
344 	default:
345 		err = EINVAL;
346 		break;
347 	}
348 
349 	return (err);
350 }
351 
352 int
353 vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
354 {
355 	struct vmcb_segment *seg;
356 
357 	seg = vmcb_segptr(vmcb, ident);
358 	if (seg != NULL) {
359 		bcopy(seg, seg2, sizeof(struct vmcb_segment));
360 		return (0);
361 	} else {
362 		return (EINVAL);
363 	}
364 }
365 
366 int
367 vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
368 {
369 	struct vmcb *vmcb;
370 	struct svm_softc *sc;
371 	struct vmcb_segment *seg;
372 	uint16_t attrib;
373 
374 	sc = arg;
375 	vmcb = svm_get_vmcb(sc, vcpu);
376 
377 	seg = vmcb_segptr(vmcb, reg);
378 	KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
379 	    __func__, reg));
380 
381 	seg->base = desc->base;
382 	seg->limit = desc->limit;
383 	if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
384 		/*
385 		 * Map seg_desc access to VMCB attribute format.
386 		 *
387 		 * SVM uses the 'P' bit in the segment attributes to indicate a
388 		 * NULL segment so clear it if the segment is marked unusable.
389 		 */
390 		attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
391 		if (SEG_DESC_UNUSABLE(desc->access)) {
392 			attrib &= ~0x80;
393 		}
394 		seg->attrib = attrib;
395 	}
396 
397 	VCPU_CTR4(sc->vm, vcpu, "Setting desc %d: base (%#lx), limit (%#x), "
398 	    "attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
399 
400 	switch (reg) {
401 	case VM_REG_GUEST_CS:
402 	case VM_REG_GUEST_DS:
403 	case VM_REG_GUEST_ES:
404 	case VM_REG_GUEST_SS:
405 		svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
406 		break;
407 	case VM_REG_GUEST_GDTR:
408 	case VM_REG_GUEST_IDTR:
409 		svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
410 		break;
411 	default:
412 		break;
413 	}
414 
415 	return (0);
416 }
417 
418 int
419 vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
420 {
421 	struct vmcb *vmcb;
422 	struct svm_softc *sc;
423 	struct vmcb_segment *seg;
424 
425 	sc = arg;
426 	vmcb = svm_get_vmcb(sc, vcpu);
427 	seg = vmcb_segptr(vmcb, reg);
428 	KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
429 	    __func__, reg));
430 
431 	desc->base = seg->base;
432 	desc->limit = seg->limit;
433 	desc->access = 0;
434 
435 	if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
436 		/* Map seg_desc access to VMCB attribute format */
437 		desc->access = ((seg->attrib & 0xF00) << 4) |
438 		    (seg->attrib & 0xFF);
439 
440 		/*
441 		 * VT-x uses bit 16 to indicate a segment that has been loaded
442 		 * with a NULL selector (aka unusable). The 'desc->access'
443 		 * field is interpreted in the VT-x format by the
444 		 * processor-independent code.
445 		 *
446 		 * SVM uses the 'P' bit to convey the same information so
447 		 * convert it into the VT-x format. For more details refer to
448 		 * section "Segment State in the VMCB" in APMv2.
449 		 */
450 		if (reg != VM_REG_GUEST_CS && reg != VM_REG_GUEST_TR) {
451 			if ((desc->access & 0x80) == 0)
452 				desc->access |= 0x10000;  /* Unusable segment */
453 		}
454 	}
455 
456 	return (0);
457 }
458 
459 #ifdef BHYVE_SNAPSHOT
460 int
461 vmcb_getany(struct svm_softc *sc, int vcpu, int ident, uint64_t *val)
462 {
463 	int error = 0;
464 
465 	if (vcpu < 0 || vcpu >= VM_MAXCPU) {
466 		error = EINVAL;
467 		goto err;
468 	}
469 
470 	if (ident >= VM_REG_LAST) {
471 		error = EINVAL;
472 		goto err;
473 	}
474 
475 	error = vm_get_register(sc->vm, vcpu, ident, val);
476 
477 err:
478 	return (error);
479 }
480 
481 int
482 vmcb_setany(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
483 {
484 	int error = 0;
485 
486 	if (vcpu < 0 || vcpu >= VM_MAXCPU) {
487 		error = EINVAL;
488 		goto err;
489 	}
490 
491 	if (ident >= VM_REG_LAST) {
492 		error = EINVAL;
493 		goto err;
494 	}
495 
496 	error = vm_set_register(sc->vm, vcpu, ident, val);
497 
498 err:
499 	return (error);
500 }
501 
502 int
503 vmcb_snapshot_desc(void *arg, int vcpu, int reg, struct vm_snapshot_meta *meta)
504 {
505 	int ret;
506 	struct seg_desc desc;
507 
508 	if (meta->op == VM_SNAPSHOT_SAVE) {
509 		ret = vmcb_getdesc(arg, vcpu, reg, &desc);
510 		if (ret != 0)
511 			goto done;
512 
513 		SNAPSHOT_VAR_OR_LEAVE(desc.base, meta, ret, done);
514 		SNAPSHOT_VAR_OR_LEAVE(desc.limit, meta, ret, done);
515 		SNAPSHOT_VAR_OR_LEAVE(desc.access, meta, ret, done);
516 	} else if (meta->op == VM_SNAPSHOT_RESTORE) {
517 		SNAPSHOT_VAR_OR_LEAVE(desc.base, meta, ret, done);
518 		SNAPSHOT_VAR_OR_LEAVE(desc.limit, meta, ret, done);
519 		SNAPSHOT_VAR_OR_LEAVE(desc.access, meta, ret, done);
520 
521 		ret = vmcb_setdesc(arg, vcpu, reg, &desc);
522 		if (ret != 0)
523 			goto done;
524 	} else {
525 		ret = EINVAL;
526 		goto done;
527 	}
528 
529 done:
530 	return (ret);
531 }
532 
533 int
534 vmcb_snapshot_any(struct svm_softc *sc, int vcpu, int ident,
535 		  struct vm_snapshot_meta *meta)
536 {
537 	int ret;
538 	uint64_t val;
539 
540 	if (meta->op == VM_SNAPSHOT_SAVE) {
541 		ret = vmcb_getany(sc, vcpu, ident, &val);
542 		if (ret != 0)
543 			goto done;
544 
545 		SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
546 	} else if (meta->op == VM_SNAPSHOT_RESTORE) {
547 		SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
548 
549 		ret = vmcb_setany(sc, vcpu, ident, val);
550 		if (ret != 0)
551 			goto done;
552 	} else {
553 		ret = EINVAL;
554 		goto done;
555 	}
556 
557 done:
558 	return (ret);
559 }
560 #endif
561