1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 35 #include <machine/segments.h> 36 #include <machine/specialreg.h> 37 #include <machine/vmm.h> 38 39 #include "vmm_ktr.h" 40 41 #include "vmcb.h" 42 #include "svm.h" 43 #include "svm_softc.h" 44 45 /* 46 * The VMCB aka Virtual Machine Control Block is a 4KB aligned page 47 * in memory that describes the virtual machine. 48 * 49 * The VMCB contains: 50 * - instructions or events in the guest to intercept 51 * - control bits that modify execution environment of the guest 52 * - guest processor state (e.g. general purpose registers) 53 */ 54 55 /* 56 * Return VMCB segment area. 57 */ 58 static struct vmcb_segment * 59 vmcb_segptr(struct vmcb *vmcb, int type) 60 { 61 struct vmcb_state *state; 62 struct vmcb_segment *seg; 63 64 state = &vmcb->state; 65 66 switch (type) { 67 case VM_REG_GUEST_CS: 68 seg = &state->cs; 69 break; 70 71 case VM_REG_GUEST_DS: 72 seg = &state->ds; 73 break; 74 75 case VM_REG_GUEST_ES: 76 seg = &state->es; 77 break; 78 79 case VM_REG_GUEST_FS: 80 seg = &state->fs; 81 break; 82 83 case VM_REG_GUEST_GS: 84 seg = &state->gs; 85 break; 86 87 case VM_REG_GUEST_SS: 88 seg = &state->ss; 89 break; 90 91 case VM_REG_GUEST_GDTR: 92 seg = &state->gdt; 93 break; 94 95 case VM_REG_GUEST_IDTR: 96 seg = &state->idt; 97 break; 98 99 case VM_REG_GUEST_LDTR: 100 seg = &state->ldt; 101 break; 102 103 case VM_REG_GUEST_TR: 104 seg = &state->tr; 105 break; 106 107 default: 108 seg = NULL; 109 break; 110 } 111 112 return (seg); 113 } 114 115 static int 116 vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident, 117 uint64_t *val) 118 { 119 struct vmcb *vmcb; 120 int off, bytes; 121 char *ptr; 122 123 vmcb = svm_get_vmcb(softc, vcpu); 124 off = VMCB_ACCESS_OFFSET(ident); 125 bytes = VMCB_ACCESS_BYTES(ident); 126 127 if ((off + bytes) >= sizeof (struct vmcb)) 128 return (EINVAL); 129 130 ptr = (char *)vmcb; 131 132 if (!write) 133 *val = 0; 134 135 switch (bytes) { 136 case 8: 137 case 4: 138 case 2: 139 if (write) 140 memcpy(ptr + off, val, bytes); 141 else 142 memcpy(val, ptr + off, bytes); 143 break; 144 default: 145 VCPU_CTR1(softc->vm, vcpu, 146 "Invalid size %d for VMCB access: %d", bytes); 147 return (EINVAL); 148 } 149 150 /* Invalidate all VMCB state cached by h/w. */ 151 if (write) 152 svm_set_dirty(softc, vcpu, 0xffffffff); 153 154 return (0); 155 } 156 157 /* 158 * Read from segment selector, control and general purpose register of VMCB. 159 */ 160 int 161 vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval) 162 { 163 struct vmcb *vmcb; 164 struct vmcb_state *state; 165 struct vmcb_segment *seg; 166 int err; 167 168 vmcb = svm_get_vmcb(sc, vcpu); 169 state = &vmcb->state; 170 err = 0; 171 172 if (VMCB_ACCESS_OK(ident)) 173 return (vmcb_access(sc, vcpu, 0, ident, retval)); 174 175 switch (ident) { 176 case VM_REG_GUEST_CR0: 177 *retval = state->cr0; 178 break; 179 180 case VM_REG_GUEST_CR2: 181 *retval = state->cr2; 182 break; 183 184 case VM_REG_GUEST_CR3: 185 *retval = state->cr3; 186 break; 187 188 case VM_REG_GUEST_CR4: 189 *retval = state->cr4; 190 break; 191 192 case VM_REG_GUEST_DR6: 193 *retval = state->dr6; 194 break; 195 196 case VM_REG_GUEST_DR7: 197 *retval = state->dr7; 198 break; 199 200 case VM_REG_GUEST_EFER: 201 *retval = state->efer; 202 break; 203 204 case VM_REG_GUEST_RAX: 205 *retval = state->rax; 206 break; 207 208 case VM_REG_GUEST_RFLAGS: 209 *retval = state->rflags; 210 break; 211 212 case VM_REG_GUEST_RIP: 213 *retval = state->rip; 214 break; 215 216 case VM_REG_GUEST_RSP: 217 *retval = state->rsp; 218 break; 219 220 case VM_REG_GUEST_CS: 221 case VM_REG_GUEST_DS: 222 case VM_REG_GUEST_ES: 223 case VM_REG_GUEST_FS: 224 case VM_REG_GUEST_GS: 225 case VM_REG_GUEST_SS: 226 case VM_REG_GUEST_LDTR: 227 case VM_REG_GUEST_TR: 228 seg = vmcb_segptr(vmcb, ident); 229 KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB", 230 __func__, ident)); 231 *retval = seg->selector; 232 break; 233 234 case VM_REG_GUEST_GDTR: 235 case VM_REG_GUEST_IDTR: 236 /* GDTR and IDTR don't have segment selectors */ 237 err = EINVAL; 238 break; 239 default: 240 err = EINVAL; 241 break; 242 } 243 244 return (err); 245 } 246 247 /* 248 * Write to segment selector, control and general purpose register of VMCB. 249 */ 250 int 251 vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val) 252 { 253 struct vmcb *vmcb; 254 struct vmcb_state *state; 255 struct vmcb_segment *seg; 256 int err, dirtyseg; 257 258 vmcb = svm_get_vmcb(sc, vcpu); 259 state = &vmcb->state; 260 dirtyseg = 0; 261 err = 0; 262 263 if (VMCB_ACCESS_OK(ident)) 264 return (vmcb_access(sc, vcpu, 1, ident, &val)); 265 266 switch (ident) { 267 case VM_REG_GUEST_CR0: 268 state->cr0 = val; 269 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR); 270 break; 271 272 case VM_REG_GUEST_CR2: 273 state->cr2 = val; 274 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR2); 275 break; 276 277 case VM_REG_GUEST_CR3: 278 state->cr3 = val; 279 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR); 280 break; 281 282 case VM_REG_GUEST_CR4: 283 state->cr4 = val; 284 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR); 285 break; 286 287 case VM_REG_GUEST_DR6: 288 state->dr6 = val; 289 svm_set_dirty(sc, vcpu, VMCB_CACHE_DR); 290 break; 291 292 case VM_REG_GUEST_DR7: 293 state->dr7 = val; 294 svm_set_dirty(sc, vcpu, VMCB_CACHE_DR); 295 break; 296 297 case VM_REG_GUEST_EFER: 298 /* EFER_SVM must always be set when the guest is executing */ 299 state->efer = val | EFER_SVM; 300 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR); 301 break; 302 303 case VM_REG_GUEST_RAX: 304 state->rax = val; 305 break; 306 307 case VM_REG_GUEST_RFLAGS: 308 state->rflags = val; 309 break; 310 311 case VM_REG_GUEST_RIP: 312 state->rip = val; 313 break; 314 315 case VM_REG_GUEST_RSP: 316 state->rsp = val; 317 break; 318 319 case VM_REG_GUEST_CS: 320 case VM_REG_GUEST_DS: 321 case VM_REG_GUEST_ES: 322 case VM_REG_GUEST_SS: 323 dirtyseg = 1; /* FALLTHROUGH */ 324 case VM_REG_GUEST_FS: 325 case VM_REG_GUEST_GS: 326 case VM_REG_GUEST_LDTR: 327 case VM_REG_GUEST_TR: 328 seg = vmcb_segptr(vmcb, ident); 329 KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB", 330 __func__, ident)); 331 seg->selector = val; 332 if (dirtyseg) 333 svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG); 334 break; 335 336 case VM_REG_GUEST_GDTR: 337 case VM_REG_GUEST_IDTR: 338 /* GDTR and IDTR don't have segment selectors */ 339 err = EINVAL; 340 break; 341 default: 342 err = EINVAL; 343 break; 344 } 345 346 return (err); 347 } 348 349 int 350 vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2) 351 { 352 struct vmcb_segment *seg; 353 354 seg = vmcb_segptr(vmcb, ident); 355 if (seg != NULL) { 356 bcopy(seg, seg2, sizeof(struct vmcb_segment)); 357 return (0); 358 } else { 359 return (EINVAL); 360 } 361 } 362 363 int 364 vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 365 { 366 struct vmcb *vmcb; 367 struct svm_softc *sc; 368 struct vmcb_segment *seg; 369 uint16_t attrib; 370 371 sc = arg; 372 vmcb = svm_get_vmcb(sc, vcpu); 373 374 seg = vmcb_segptr(vmcb, reg); 375 KASSERT(seg != NULL, ("%s: invalid segment descriptor %d", 376 __func__, reg)); 377 378 seg->base = desc->base; 379 seg->limit = desc->limit; 380 if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) { 381 /* 382 * Map seg_desc access to VMCB attribute format. 383 * 384 * SVM uses the 'P' bit in the segment attributes to indicate a 385 * NULL segment so clear it if the segment is marked unusable. 386 */ 387 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 388 if (SEG_DESC_UNUSABLE(desc->access)) { 389 attrib &= ~0x80; 390 } 391 seg->attrib = attrib; 392 } 393 394 VCPU_CTR4(sc->vm, vcpu, "Setting desc %d: base (%#lx), limit (%#x), " 395 "attrib (%#x)", reg, seg->base, seg->limit, seg->attrib); 396 397 switch (reg) { 398 case VM_REG_GUEST_CS: 399 case VM_REG_GUEST_DS: 400 case VM_REG_GUEST_ES: 401 case VM_REG_GUEST_SS: 402 svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG); 403 break; 404 case VM_REG_GUEST_GDTR: 405 case VM_REG_GUEST_IDTR: 406 svm_set_dirty(sc, vcpu, VMCB_CACHE_DT); 407 break; 408 default: 409 break; 410 } 411 412 return (0); 413 } 414 415 int 416 vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 417 { 418 struct vmcb *vmcb; 419 struct svm_softc *sc; 420 struct vmcb_segment *seg; 421 422 sc = arg; 423 vmcb = svm_get_vmcb(sc, vcpu); 424 seg = vmcb_segptr(vmcb, reg); 425 KASSERT(seg != NULL, ("%s: invalid segment descriptor %d", 426 __func__, reg)); 427 428 desc->base = seg->base; 429 desc->limit = seg->limit; 430 desc->access = 0; 431 432 if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) { 433 /* Map seg_desc access to VMCB attribute format */ 434 desc->access = ((seg->attrib & 0xF00) << 4) | 435 (seg->attrib & 0xFF); 436 437 /* 438 * VT-x uses bit 16 to indicate a segment that has been loaded 439 * with a NULL selector (aka unusable). The 'desc->access' 440 * field is interpreted in the VT-x format by the 441 * processor-independent code. 442 * 443 * SVM uses the 'P' bit to convey the same information so 444 * convert it into the VT-x format. For more details refer to 445 * section "Segment State in the VMCB" in APMv2. 446 */ 447 if (reg != VM_REG_GUEST_CS && reg != VM_REG_GUEST_TR) { 448 if ((desc->access & 0x80) == 0) 449 desc->access |= 0x10000; /* Unusable segment */ 450 } 451 } 452 453 return (0); 454 } 455