xref: /freebsd/sys/amd64/vmm/amd/vmcb.c (revision db33c6f3ae9d1231087710068ee4ea5398aacca7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include "opt_bhyve_snapshot.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 
35 #include <machine/segments.h>
36 #include <machine/specialreg.h>
37 #include <machine/vmm.h>
38 #include <machine/vmm_snapshot.h>
39 
40 #include <dev/vmm/vmm_ktr.h>
41 
42 #include "vlapic.h"
43 #include "vmcb.h"
44 #include "svm.h"
45 #include "svm_softc.h"
46 
47 /*
48  * The VMCB aka Virtual Machine Control Block is a 4KB aligned page
49  * in memory that describes the virtual machine.
50  *
51  * The VMCB contains:
52  * - instructions or events in the guest to intercept
53  * - control bits that modify execution environment of the guest
54  * - guest processor state (e.g. general purpose registers)
55  */
56 
57 /*
58  * Return VMCB segment area.
59  */
60 static struct vmcb_segment *
61 vmcb_segptr(struct vmcb *vmcb, int type)
62 {
63 	struct vmcb_state *state;
64 	struct vmcb_segment *seg;
65 
66 	state = &vmcb->state;
67 
68 	switch (type) {
69 	case VM_REG_GUEST_CS:
70 		seg = &state->cs;
71 		break;
72 
73 	case VM_REG_GUEST_DS:
74 		seg = &state->ds;
75 		break;
76 
77 	case VM_REG_GUEST_ES:
78 		seg = &state->es;
79 		break;
80 
81 	case VM_REG_GUEST_FS:
82 		seg = &state->fs;
83 		break;
84 
85 	case VM_REG_GUEST_GS:
86 		seg = &state->gs;
87 		break;
88 
89 	case VM_REG_GUEST_SS:
90 		seg = &state->ss;
91 		break;
92 
93 	case VM_REG_GUEST_GDTR:
94 		seg = &state->gdt;
95 		break;
96 
97 	case VM_REG_GUEST_IDTR:
98 		seg = &state->idt;
99 		break;
100 
101 	case VM_REG_GUEST_LDTR:
102 		seg = &state->ldt;
103 		break;
104 
105 	case VM_REG_GUEST_TR:
106 		seg = &state->tr;
107 		break;
108 
109 	default:
110 		seg = NULL;
111 		break;
112 	}
113 
114 	return (seg);
115 }
116 
117 static int
118 vmcb_access(struct svm_vcpu *vcpu, int write, int ident, uint64_t *val)
119 {
120 	struct vmcb *vmcb;
121 	int off, bytes;
122 	char *ptr;
123 
124 	vmcb	= svm_get_vmcb(vcpu);
125 	off	= VMCB_ACCESS_OFFSET(ident);
126 	bytes	= VMCB_ACCESS_BYTES(ident);
127 
128 	if ((off + bytes) >= sizeof (struct vmcb))
129 		return (EINVAL);
130 
131 	ptr = (char *)vmcb;
132 
133 	if (!write)
134 		*val = 0;
135 
136 	switch (bytes) {
137 	case 8:
138 	case 4:
139 	case 2:
140 	case 1:
141 		if (write)
142 			memcpy(ptr + off, val, bytes);
143 		else
144 			memcpy(val, ptr + off, bytes);
145 		break;
146 	default:
147 		SVM_CTR1(vcpu, "Invalid size %d for VMCB access: %d", bytes);
148 		return (EINVAL);
149 	}
150 
151 	/* Invalidate all VMCB state cached by h/w. */
152 	if (write)
153 		svm_set_dirty(vcpu, 0xffffffff);
154 
155 	return (0);
156 }
157 
158 /*
159  * Read from segment selector, control and general purpose register of VMCB.
160  */
161 int
162 vmcb_read(struct svm_vcpu *vcpu, int ident, uint64_t *retval)
163 {
164 	struct vmcb *vmcb;
165 	struct vmcb_state *state;
166 	struct vmcb_segment *seg;
167 	int err;
168 
169 	vmcb = svm_get_vmcb(vcpu);
170 	state = &vmcb->state;
171 	err = 0;
172 
173 	if (VMCB_ACCESS_OK(ident))
174 		return (vmcb_access(vcpu, 0, ident, retval));
175 
176 	switch (ident) {
177 	case VM_REG_GUEST_CR0:
178 		*retval = state->cr0;
179 		break;
180 
181 	case VM_REG_GUEST_CR2:
182 		*retval = state->cr2;
183 		break;
184 
185 	case VM_REG_GUEST_CR3:
186 		*retval = state->cr3;
187 		break;
188 
189 	case VM_REG_GUEST_CR4:
190 		*retval = state->cr4;
191 		break;
192 
193 	case VM_REG_GUEST_DR6:
194 		*retval = state->dr6;
195 		break;
196 
197 	case VM_REG_GUEST_DR7:
198 		*retval = state->dr7;
199 		break;
200 
201 	case VM_REG_GUEST_EFER:
202 		*retval = state->efer;
203 		break;
204 
205 	case VM_REG_GUEST_RAX:
206 		*retval = state->rax;
207 		break;
208 
209 	case VM_REG_GUEST_RFLAGS:
210 		*retval = state->rflags;
211 		break;
212 
213 	case VM_REG_GUEST_RIP:
214 		*retval = state->rip;
215 		break;
216 
217 	case VM_REG_GUEST_RSP:
218 		*retval = state->rsp;
219 		break;
220 
221 	case VM_REG_GUEST_CS:
222 	case VM_REG_GUEST_DS:
223 	case VM_REG_GUEST_ES:
224 	case VM_REG_GUEST_FS:
225 	case VM_REG_GUEST_GS:
226 	case VM_REG_GUEST_SS:
227 	case VM_REG_GUEST_LDTR:
228 	case VM_REG_GUEST_TR:
229 		seg = vmcb_segptr(vmcb, ident);
230 		KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
231 		    __func__, ident));
232 		*retval = seg->selector;
233 		break;
234 
235 	case VM_REG_GUEST_FS_BASE:
236 	case VM_REG_GUEST_GS_BASE:
237 		seg = vmcb_segptr(vmcb, ident == VM_REG_GUEST_FS_BASE ?
238 		    VM_REG_GUEST_FS : VM_REG_GUEST_GS);
239 		KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
240 		    __func__, ident));
241 		*retval = seg->base;
242 		break;
243 	case VM_REG_GUEST_KGS_BASE:
244 		*retval = state->kernelgsbase;
245 		break;
246 
247 	case VM_REG_GUEST_TPR:
248 		*retval = vlapic_get_cr8(vm_lapic(vcpu->vcpu));
249 		break;
250 
251 	case VM_REG_GUEST_GDTR:
252 	case VM_REG_GUEST_IDTR:
253 		/* GDTR and IDTR don't have segment selectors */
254 		err = EINVAL;
255 		break;
256 	default:
257 		err =  EINVAL;
258 		break;
259 	}
260 
261 	return (err);
262 }
263 
264 /*
265  * Write to segment selector, control and general purpose register of VMCB.
266  */
267 int
268 vmcb_write(struct svm_vcpu *vcpu, int ident, uint64_t val)
269 {
270 	struct vmcb *vmcb;
271 	struct vmcb_state *state;
272 	struct vmcb_segment *seg;
273 	int err, dirtyseg;
274 
275 	vmcb = svm_get_vmcb(vcpu);
276 	state = &vmcb->state;
277 	dirtyseg = 0;
278 	err = 0;
279 
280 	if (VMCB_ACCESS_OK(ident))
281 		return (vmcb_access(vcpu, 1, ident, &val));
282 
283 	switch (ident) {
284 	case VM_REG_GUEST_CR0:
285 		state->cr0 = val;
286 		svm_set_dirty(vcpu, VMCB_CACHE_CR);
287 		break;
288 
289 	case VM_REG_GUEST_CR2:
290 		state->cr2 = val;
291 		svm_set_dirty(vcpu, VMCB_CACHE_CR2);
292 		break;
293 
294 	case VM_REG_GUEST_CR3:
295 		state->cr3 = val;
296 		svm_set_dirty(vcpu, VMCB_CACHE_CR);
297 		break;
298 
299 	case VM_REG_GUEST_CR4:
300 		state->cr4 = val;
301 		svm_set_dirty(vcpu, VMCB_CACHE_CR);
302 		break;
303 
304 	case VM_REG_GUEST_DR6:
305 		state->dr6 = val;
306 		svm_set_dirty(vcpu, VMCB_CACHE_DR);
307 		break;
308 
309 	case VM_REG_GUEST_DR7:
310 		state->dr7 = val;
311 		svm_set_dirty(vcpu, VMCB_CACHE_DR);
312 		break;
313 
314 	case VM_REG_GUEST_EFER:
315 		/* EFER_SVM must always be set when the guest is executing */
316 		state->efer = val | EFER_SVM;
317 		svm_set_dirty(vcpu, VMCB_CACHE_CR);
318 		break;
319 
320 	case VM_REG_GUEST_RAX:
321 		state->rax = val;
322 		break;
323 
324 	case VM_REG_GUEST_RFLAGS:
325 		state->rflags = val;
326 		break;
327 
328 	case VM_REG_GUEST_RIP:
329 		state->rip = val;
330 		break;
331 
332 	case VM_REG_GUEST_RSP:
333 		state->rsp = val;
334 		break;
335 
336 	case VM_REG_GUEST_CS:
337 	case VM_REG_GUEST_DS:
338 	case VM_REG_GUEST_ES:
339 	case VM_REG_GUEST_SS:
340 		dirtyseg = 1;		/* FALLTHROUGH */
341 	case VM_REG_GUEST_FS:
342 	case VM_REG_GUEST_GS:
343 	case VM_REG_GUEST_LDTR:
344 	case VM_REG_GUEST_TR:
345 		seg = vmcb_segptr(vmcb, ident);
346 		KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
347 		    __func__, ident));
348 		seg->selector = val;
349 		if (dirtyseg)
350 			svm_set_dirty(vcpu, VMCB_CACHE_SEG);
351 		break;
352 
353 	case VM_REG_GUEST_GDTR:
354 	case VM_REG_GUEST_IDTR:
355 		/* GDTR and IDTR don't have segment selectors */
356 		err = EINVAL;
357 		break;
358 	default:
359 		err = EINVAL;
360 		break;
361 	}
362 
363 	return (err);
364 }
365 
366 int
367 vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
368 {
369 	struct vmcb_segment *seg;
370 
371 	seg = vmcb_segptr(vmcb, ident);
372 	if (seg != NULL) {
373 		bcopy(seg, seg2, sizeof(struct vmcb_segment));
374 		return (0);
375 	} else {
376 		return (EINVAL);
377 	}
378 }
379 
380 int
381 vmcb_setdesc(struct svm_vcpu *vcpu, int reg, struct seg_desc *desc)
382 {
383 	struct vmcb *vmcb;
384 	struct vmcb_segment *seg;
385 	uint16_t attrib;
386 
387 	vmcb = svm_get_vmcb(vcpu);
388 
389 	seg = vmcb_segptr(vmcb, reg);
390 	KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
391 	    __func__, reg));
392 
393 	seg->base = desc->base;
394 	seg->limit = desc->limit;
395 	if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
396 		/*
397 		 * Map seg_desc access to VMCB attribute format.
398 		 *
399 		 * SVM uses the 'P' bit in the segment attributes to indicate a
400 		 * NULL segment so clear it if the segment is marked unusable.
401 		 */
402 		attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
403 		if (SEG_DESC_UNUSABLE(desc->access)) {
404 			attrib &= ~0x80;
405 		}
406 		seg->attrib = attrib;
407 	}
408 
409 	SVM_CTR4(vcpu, "Setting desc %d: base (%#lx), limit (%#x), "
410 	    "attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
411 
412 	switch (reg) {
413 	case VM_REG_GUEST_CS:
414 	case VM_REG_GUEST_DS:
415 	case VM_REG_GUEST_ES:
416 	case VM_REG_GUEST_SS:
417 		svm_set_dirty(vcpu, VMCB_CACHE_SEG);
418 		break;
419 	case VM_REG_GUEST_GDTR:
420 	case VM_REG_GUEST_IDTR:
421 		svm_set_dirty(vcpu, VMCB_CACHE_DT);
422 		break;
423 	default:
424 		break;
425 	}
426 
427 	return (0);
428 }
429 
430 int
431 vmcb_getdesc(struct svm_vcpu *vcpu, int reg, struct seg_desc *desc)
432 {
433 	struct vmcb *vmcb;
434 	struct vmcb_segment *seg;
435 
436 	vmcb = svm_get_vmcb(vcpu);
437 	seg = vmcb_segptr(vmcb, reg);
438 	KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
439 	    __func__, reg));
440 
441 	desc->base = seg->base;
442 	desc->limit = seg->limit;
443 	desc->access = 0;
444 
445 	if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
446 		/* Map seg_desc access to VMCB attribute format */
447 		desc->access = ((seg->attrib & 0xF00) << 4) |
448 		    (seg->attrib & 0xFF);
449 
450 		/*
451 		 * VT-x uses bit 16 to indicate a segment that has been loaded
452 		 * with a NULL selector (aka unusable). The 'desc->access'
453 		 * field is interpreted in the VT-x format by the
454 		 * processor-independent code.
455 		 *
456 		 * SVM uses the 'P' bit to convey the same information so
457 		 * convert it into the VT-x format. For more details refer to
458 		 * section "Segment State in the VMCB" in APMv2.
459 		 */
460 		if (reg != VM_REG_GUEST_CS && reg != VM_REG_GUEST_TR) {
461 			if ((desc->access & 0x80) == 0)
462 				desc->access |= 0x10000;  /* Unusable segment */
463 		}
464 	}
465 
466 	return (0);
467 }
468 
469 #ifdef BHYVE_SNAPSHOT
470 int
471 vmcb_getany(struct svm_vcpu *vcpu, int ident, uint64_t *val)
472 {
473 	int error = 0;
474 
475 	if (ident >= VM_REG_LAST) {
476 		error = EINVAL;
477 		goto err;
478 	}
479 
480 	error = vmcb_read(vcpu, ident, val);
481 
482 err:
483 	return (error);
484 }
485 
486 int
487 vmcb_setany(struct svm_vcpu *vcpu, int ident, uint64_t val)
488 {
489 	int error = 0;
490 
491 	if (ident >= VM_REG_LAST) {
492 		error = EINVAL;
493 		goto err;
494 	}
495 
496 	error = vmcb_write(vcpu, ident, val);
497 
498 err:
499 	return (error);
500 }
501 
502 int
503 vmcb_snapshot_desc(struct svm_vcpu *vcpu, int reg,
504     struct vm_snapshot_meta *meta)
505 {
506 	int ret;
507 	struct seg_desc desc;
508 
509 	if (meta->op == VM_SNAPSHOT_SAVE) {
510 		ret = vmcb_getdesc(vcpu, reg, &desc);
511 		if (ret != 0)
512 			goto done;
513 
514 		SNAPSHOT_VAR_OR_LEAVE(desc.base, meta, ret, done);
515 		SNAPSHOT_VAR_OR_LEAVE(desc.limit, meta, ret, done);
516 		SNAPSHOT_VAR_OR_LEAVE(desc.access, meta, ret, done);
517 	} else if (meta->op == VM_SNAPSHOT_RESTORE) {
518 		SNAPSHOT_VAR_OR_LEAVE(desc.base, meta, ret, done);
519 		SNAPSHOT_VAR_OR_LEAVE(desc.limit, meta, ret, done);
520 		SNAPSHOT_VAR_OR_LEAVE(desc.access, meta, ret, done);
521 
522 		ret = vmcb_setdesc(vcpu, reg, &desc);
523 		if (ret != 0)
524 			goto done;
525 	} else {
526 		ret = EINVAL;
527 		goto done;
528 	}
529 
530 done:
531 	return (ret);
532 }
533 
534 int
535 vmcb_snapshot_any(struct svm_vcpu *vcpu, int ident,
536     struct vm_snapshot_meta *meta)
537 {
538 	int ret;
539 	uint64_t val;
540 
541 	if (meta->op == VM_SNAPSHOT_SAVE) {
542 		ret = vmcb_getany(vcpu, ident, &val);
543 		if (ret != 0)
544 			goto done;
545 
546 		SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
547 	} else if (meta->op == VM_SNAPSHOT_RESTORE) {
548 		SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
549 
550 		ret = vmcb_setany(vcpu, ident, val);
551 		if (ret != 0)
552 			goto done;
553 	} else {
554 		ret = EINVAL;
555 		goto done;
556 	}
557 
558 done:
559 	return (ret);
560 }
561 #endif
562