1 /*- 2 * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 33 #include <machine/segments.h> 34 #include <machine/specialreg.h> 35 #include <machine/vmm.h> 36 37 #include "vmm_ktr.h" 38 39 #include "vmcb.h" 40 #include "svm.h" 41 #include "svm_softc.h" 42 43 /* 44 * The VMCB aka Virtual Machine Control Block is a 4KB aligned page 45 * in memory that describes the virtual machine. 46 * 47 * The VMCB contains: 48 * - instructions or events in the guest to intercept 49 * - control bits that modify execution environment of the guest 50 * - guest processor state (e.g. general purpose registers) 51 */ 52 53 /* 54 * Return VMCB segment area. 55 */ 56 static struct vmcb_segment * 57 vmcb_segptr(struct vmcb *vmcb, int type) 58 { 59 struct vmcb_state *state; 60 struct vmcb_segment *seg; 61 62 state = &vmcb->state; 63 64 switch (type) { 65 case VM_REG_GUEST_CS: 66 seg = &state->cs; 67 break; 68 69 case VM_REG_GUEST_DS: 70 seg = &state->ds; 71 break; 72 73 case VM_REG_GUEST_ES: 74 seg = &state->es; 75 break; 76 77 case VM_REG_GUEST_FS: 78 seg = &state->fs; 79 break; 80 81 case VM_REG_GUEST_GS: 82 seg = &state->gs; 83 break; 84 85 case VM_REG_GUEST_SS: 86 seg = &state->ss; 87 break; 88 89 case VM_REG_GUEST_GDTR: 90 seg = &state->gdt; 91 break; 92 93 case VM_REG_GUEST_IDTR: 94 seg = &state->idt; 95 break; 96 97 case VM_REG_GUEST_LDTR: 98 seg = &state->ldt; 99 break; 100 101 case VM_REG_GUEST_TR: 102 seg = &state->tr; 103 break; 104 105 default: 106 seg = NULL; 107 break; 108 } 109 110 return (seg); 111 } 112 113 static int 114 vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident, 115 uint64_t *val) 116 { 117 struct vmcb *vmcb; 118 int off, bytes; 119 char *ptr; 120 121 vmcb = svm_get_vmcb(softc, vcpu); 122 off = VMCB_ACCESS_OFFSET(ident); 123 bytes = VMCB_ACCESS_BYTES(ident); 124 125 if ((off + bytes) >= sizeof (struct vmcb)) 126 return (EINVAL); 127 128 ptr = (char *)vmcb; 129 130 if (!write) 131 *val = 0; 132 133 switch (bytes) { 134 case 8: 135 case 4: 136 case 2: 137 if (write) 138 memcpy(ptr + off, val, bytes); 139 else 140 memcpy(val, ptr + off, bytes); 141 break; 142 default: 143 VCPU_CTR1(softc->vm, vcpu, 144 "Invalid size %d for VMCB access: %d", bytes); 145 return (EINVAL); 146 } 147 148 /* Invalidate all VMCB state cached by h/w. */ 149 if (write) 150 svm_set_dirty(softc, vcpu, 0xffffffff); 151 152 return (0); 153 } 154 155 /* 156 * Read from segment selector, control and general purpose register of VMCB. 157 */ 158 int 159 vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval) 160 { 161 struct vmcb *vmcb; 162 struct vmcb_state *state; 163 struct vmcb_segment *seg; 164 int err; 165 166 vmcb = svm_get_vmcb(sc, vcpu); 167 state = &vmcb->state; 168 err = 0; 169 170 if (VMCB_ACCESS_OK(ident)) 171 return (vmcb_access(sc, vcpu, 0, ident, retval)); 172 173 switch (ident) { 174 case VM_REG_GUEST_CR0: 175 *retval = state->cr0; 176 break; 177 178 case VM_REG_GUEST_CR2: 179 *retval = state->cr2; 180 break; 181 182 case VM_REG_GUEST_CR3: 183 *retval = state->cr3; 184 break; 185 186 case VM_REG_GUEST_CR4: 187 *retval = state->cr4; 188 break; 189 190 case VM_REG_GUEST_DR7: 191 *retval = state->dr7; 192 break; 193 194 case VM_REG_GUEST_EFER: 195 *retval = state->efer; 196 break; 197 198 case VM_REG_GUEST_RAX: 199 *retval = state->rax; 200 break; 201 202 case VM_REG_GUEST_RFLAGS: 203 *retval = state->rflags; 204 break; 205 206 case VM_REG_GUEST_RIP: 207 *retval = state->rip; 208 break; 209 210 case VM_REG_GUEST_RSP: 211 *retval = state->rsp; 212 break; 213 214 case VM_REG_GUEST_CS: 215 case VM_REG_GUEST_DS: 216 case VM_REG_GUEST_ES: 217 case VM_REG_GUEST_FS: 218 case VM_REG_GUEST_GS: 219 case VM_REG_GUEST_SS: 220 case VM_REG_GUEST_LDTR: 221 case VM_REG_GUEST_TR: 222 seg = vmcb_segptr(vmcb, ident); 223 KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB", 224 __func__, ident)); 225 *retval = seg->selector; 226 break; 227 228 case VM_REG_GUEST_GDTR: 229 case VM_REG_GUEST_IDTR: 230 /* GDTR and IDTR don't have segment selectors */ 231 err = EINVAL; 232 break; 233 default: 234 err = EINVAL; 235 break; 236 } 237 238 return (err); 239 } 240 241 /* 242 * Write to segment selector, control and general purpose register of VMCB. 243 */ 244 int 245 vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val) 246 { 247 struct vmcb *vmcb; 248 struct vmcb_state *state; 249 struct vmcb_segment *seg; 250 int err, dirtyseg; 251 252 vmcb = svm_get_vmcb(sc, vcpu); 253 state = &vmcb->state; 254 dirtyseg = 0; 255 err = 0; 256 257 if (VMCB_ACCESS_OK(ident)) 258 return (vmcb_access(sc, vcpu, 1, ident, &val)); 259 260 switch (ident) { 261 case VM_REG_GUEST_CR0: 262 state->cr0 = val; 263 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR); 264 break; 265 266 case VM_REG_GUEST_CR2: 267 state->cr2 = val; 268 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR2); 269 break; 270 271 case VM_REG_GUEST_CR3: 272 state->cr3 = val; 273 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR); 274 break; 275 276 case VM_REG_GUEST_CR4: 277 state->cr4 = val; 278 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR); 279 break; 280 281 case VM_REG_GUEST_DR7: 282 state->dr7 = val; 283 break; 284 285 case VM_REG_GUEST_EFER: 286 /* EFER_SVM must always be set when the guest is executing */ 287 state->efer = val | EFER_SVM; 288 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR); 289 break; 290 291 case VM_REG_GUEST_RAX: 292 state->rax = val; 293 break; 294 295 case VM_REG_GUEST_RFLAGS: 296 state->rflags = val; 297 break; 298 299 case VM_REG_GUEST_RIP: 300 state->rip = val; 301 break; 302 303 case VM_REG_GUEST_RSP: 304 state->rsp = val; 305 break; 306 307 case VM_REG_GUEST_CS: 308 case VM_REG_GUEST_DS: 309 case VM_REG_GUEST_ES: 310 case VM_REG_GUEST_SS: 311 dirtyseg = 1; /* FALLTHROUGH */ 312 case VM_REG_GUEST_FS: 313 case VM_REG_GUEST_GS: 314 case VM_REG_GUEST_LDTR: 315 case VM_REG_GUEST_TR: 316 seg = vmcb_segptr(vmcb, ident); 317 KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB", 318 __func__, ident)); 319 seg->selector = val; 320 if (dirtyseg) 321 svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG); 322 break; 323 324 case VM_REG_GUEST_GDTR: 325 case VM_REG_GUEST_IDTR: 326 /* GDTR and IDTR don't have segment selectors */ 327 err = EINVAL; 328 break; 329 default: 330 err = EINVAL; 331 break; 332 } 333 334 return (err); 335 } 336 337 int 338 vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2) 339 { 340 struct vmcb_segment *seg; 341 342 seg = vmcb_segptr(vmcb, ident); 343 if (seg != NULL) { 344 bcopy(seg, seg2, sizeof(struct vmcb_segment)); 345 return (0); 346 } else { 347 return (EINVAL); 348 } 349 } 350 351 int 352 vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 353 { 354 struct vmcb *vmcb; 355 struct svm_softc *sc; 356 struct vmcb_segment *seg; 357 uint16_t attrib; 358 359 sc = arg; 360 vmcb = svm_get_vmcb(sc, vcpu); 361 362 seg = vmcb_segptr(vmcb, reg); 363 KASSERT(seg != NULL, ("%s: invalid segment descriptor %d", 364 __func__, reg)); 365 366 seg->base = desc->base; 367 seg->limit = desc->limit; 368 if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) { 369 /* 370 * Map seg_desc access to VMCB attribute format. 371 * 372 * SVM uses the 'P' bit in the segment attributes to indicate a 373 * NULL segment so clear it if the segment is marked unusable. 374 */ 375 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 376 if (SEG_DESC_UNUSABLE(desc->access)) { 377 attrib &= ~0x80; 378 } 379 seg->attrib = attrib; 380 } 381 382 VCPU_CTR4(sc->vm, vcpu, "Setting desc %d: base (%#lx), limit (%#x), " 383 "attrib (%#x)", reg, seg->base, seg->limit, seg->attrib); 384 385 switch (reg) { 386 case VM_REG_GUEST_CS: 387 case VM_REG_GUEST_DS: 388 case VM_REG_GUEST_ES: 389 case VM_REG_GUEST_SS: 390 svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG); 391 break; 392 case VM_REG_GUEST_GDTR: 393 case VM_REG_GUEST_IDTR: 394 svm_set_dirty(sc, vcpu, VMCB_CACHE_DT); 395 break; 396 default: 397 break; 398 } 399 400 return (0); 401 } 402 403 int 404 vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 405 { 406 struct vmcb *vmcb; 407 struct svm_softc *sc; 408 struct vmcb_segment *seg; 409 410 sc = arg; 411 vmcb = svm_get_vmcb(sc, vcpu); 412 seg = vmcb_segptr(vmcb, reg); 413 KASSERT(seg != NULL, ("%s: invalid segment descriptor %d", 414 __func__, reg)); 415 416 desc->base = seg->base; 417 desc->limit = seg->limit; 418 desc->access = 0; 419 420 if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) { 421 /* Map seg_desc access to VMCB attribute format */ 422 desc->access = ((seg->attrib & 0xF00) << 4) | 423 (seg->attrib & 0xFF); 424 425 /* 426 * VT-x uses bit 16 to indicate a segment that has been loaded 427 * with a NULL selector (aka unusable). The 'desc->access' 428 * field is interpreted in the VT-x format by the 429 * processor-independent code. 430 * 431 * SVM uses the 'P' bit to convey the same information so 432 * convert it into the VT-x format. For more details refer to 433 * section "Segment State in the VMCB" in APMv2. 434 */ 435 if (reg != VM_REG_GUEST_CS && reg != VM_REG_GUEST_TR) { 436 if ((desc->access & 0x80) == 0) 437 desc->access |= 0x10000; /* Unusable segment */ 438 } 439 } 440 441 return (0); 442 } 443