1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/sysctl.h> 34 #include <sys/ioctl.h> 35 #include <sys/mman.h> 36 #include <sys/_iovec.h> 37 #include <sys/cpuset.h> 38 39 #include <machine/specialreg.h> 40 #include <machine/param.h> 41 42 #include <stdio.h> 43 #include <stdlib.h> 44 #include <assert.h> 45 #include <string.h> 46 #include <fcntl.h> 47 #include <unistd.h> 48 49 #include <libutil.h> 50 51 #include <machine/vmm.h> 52 #include <machine/vmm_dev.h> 53 54 #include "vmmapi.h" 55 56 #define MB (1024 * 1024UL) 57 #define GB (1024 * 1024 * 1024UL) 58 59 struct vmctx { 60 int fd; 61 uint32_t lowmem_limit; 62 enum vm_mmap_style vms; 63 int memflags; 64 size_t lowmem; 65 char *lowmem_addr; 66 size_t highmem; 67 char *highmem_addr; 68 char *name; 69 }; 70 71 #define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x))) 72 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x))) 73 74 static int 75 vm_device_open(const char *name) 76 { 77 int fd, len; 78 char *vmfile; 79 80 len = strlen("/dev/vmm/") + strlen(name) + 1; 81 vmfile = malloc(len); 82 assert(vmfile != NULL); 83 snprintf(vmfile, len, "/dev/vmm/%s", name); 84 85 /* Open the device file */ 86 fd = open(vmfile, O_RDWR, 0); 87 88 free(vmfile); 89 return (fd); 90 } 91 92 int 93 vm_create(const char *name) 94 { 95 96 return (CREATE((char *)name)); 97 } 98 99 struct vmctx * 100 vm_open(const char *name) 101 { 102 struct vmctx *vm; 103 104 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1); 105 assert(vm != NULL); 106 107 vm->fd = -1; 108 vm->memflags = 0; 109 vm->lowmem_limit = 3 * GB; 110 vm->name = (char *)(vm + 1); 111 strcpy(vm->name, name); 112 113 if ((vm->fd = vm_device_open(vm->name)) < 0) 114 goto err; 115 116 return (vm); 117 err: 118 vm_destroy(vm); 119 return (NULL); 120 } 121 122 void 123 vm_destroy(struct vmctx *vm) 124 { 125 assert(vm != NULL); 126 127 if (vm->fd >= 0) 128 close(vm->fd); 129 DESTROY(vm->name); 130 131 free(vm); 132 } 133 134 int 135 vm_parse_memsize(const char *optarg, size_t *ret_memsize) 136 { 137 char *endptr; 138 size_t optval; 139 int error; 140 141 optval = strtoul(optarg, &endptr, 0); 142 if (*optarg != '\0' && *endptr == '\0') { 143 /* 144 * For the sake of backward compatibility if the memory size 145 * specified on the command line is less than a megabyte then 146 * it is interpreted as being in units of MB. 147 */ 148 if (optval < MB) 149 optval *= MB; 150 *ret_memsize = optval; 151 error = 0; 152 } else 153 error = expand_number(optarg, ret_memsize); 154 155 return (error); 156 } 157 158 int 159 vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len, 160 int *wired) 161 { 162 int error; 163 struct vm_memory_segment seg; 164 165 bzero(&seg, sizeof(seg)); 166 seg.gpa = gpa; 167 error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg); 168 *ret_len = seg.len; 169 if (wired != NULL) 170 *wired = seg.wired; 171 return (error); 172 } 173 174 uint32_t 175 vm_get_lowmem_limit(struct vmctx *ctx) 176 { 177 178 return (ctx->lowmem_limit); 179 } 180 181 void 182 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit) 183 { 184 185 ctx->lowmem_limit = limit; 186 } 187 188 void 189 vm_set_memflags(struct vmctx *ctx, int flags) 190 { 191 192 ctx->memflags = flags; 193 } 194 195 static int 196 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **addr) 197 { 198 int error, mmap_flags; 199 struct vm_memory_segment seg; 200 201 /* 202 * Create and optionally map 'len' bytes of memory at guest 203 * physical address 'gpa' 204 */ 205 bzero(&seg, sizeof(seg)); 206 seg.gpa = gpa; 207 seg.len = len; 208 error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg); 209 if (error == 0 && addr != NULL) { 210 mmap_flags = MAP_SHARED; 211 if ((ctx->memflags & VM_MEM_F_INCORE) == 0) 212 mmap_flags |= MAP_NOCORE; 213 *addr = mmap(NULL, len, PROT_READ | PROT_WRITE, mmap_flags, 214 ctx->fd, gpa); 215 } 216 return (error); 217 } 218 219 int 220 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms) 221 { 222 char **addr; 223 int error; 224 225 /* XXX VM_MMAP_SPARSE not implemented yet */ 226 assert(vms == VM_MMAP_NONE || vms == VM_MMAP_ALL); 227 ctx->vms = vms; 228 229 /* 230 * If 'memsize' cannot fit entirely in the 'lowmem' segment then 231 * create another 'highmem' segment above 4GB for the remainder. 232 */ 233 if (memsize > ctx->lowmem_limit) { 234 ctx->lowmem = ctx->lowmem_limit; 235 ctx->highmem = memsize - ctx->lowmem; 236 } else { 237 ctx->lowmem = memsize; 238 ctx->highmem = 0; 239 } 240 241 if (ctx->lowmem > 0) { 242 addr = (vms == VM_MMAP_ALL) ? &ctx->lowmem_addr : NULL; 243 error = setup_memory_segment(ctx, 0, ctx->lowmem, addr); 244 if (error) 245 return (error); 246 } 247 248 if (ctx->highmem > 0) { 249 addr = (vms == VM_MMAP_ALL) ? &ctx->highmem_addr : NULL; 250 error = setup_memory_segment(ctx, 4*GB, ctx->highmem, addr); 251 if (error) 252 return (error); 253 } 254 255 return (0); 256 } 257 258 void * 259 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len) 260 { 261 262 /* XXX VM_MMAP_SPARSE not implemented yet */ 263 assert(ctx->vms == VM_MMAP_ALL); 264 265 if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem) 266 return ((void *)(ctx->lowmem_addr + gaddr)); 267 268 if (gaddr >= 4*GB) { 269 gaddr -= 4*GB; 270 if (gaddr < ctx->highmem && gaddr + len <= ctx->highmem) 271 return ((void *)(ctx->highmem_addr + gaddr)); 272 } 273 274 return (NULL); 275 } 276 277 int 278 vm_set_desc(struct vmctx *ctx, int vcpu, int reg, 279 uint64_t base, uint32_t limit, uint32_t access) 280 { 281 int error; 282 struct vm_seg_desc vmsegdesc; 283 284 bzero(&vmsegdesc, sizeof(vmsegdesc)); 285 vmsegdesc.cpuid = vcpu; 286 vmsegdesc.regnum = reg; 287 vmsegdesc.desc.base = base; 288 vmsegdesc.desc.limit = limit; 289 vmsegdesc.desc.access = access; 290 291 error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc); 292 return (error); 293 } 294 295 int 296 vm_get_desc(struct vmctx *ctx, int vcpu, int reg, 297 uint64_t *base, uint32_t *limit, uint32_t *access) 298 { 299 int error; 300 struct vm_seg_desc vmsegdesc; 301 302 bzero(&vmsegdesc, sizeof(vmsegdesc)); 303 vmsegdesc.cpuid = vcpu; 304 vmsegdesc.regnum = reg; 305 306 error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc); 307 if (error == 0) { 308 *base = vmsegdesc.desc.base; 309 *limit = vmsegdesc.desc.limit; 310 *access = vmsegdesc.desc.access; 311 } 312 return (error); 313 } 314 315 int 316 vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val) 317 { 318 int error; 319 struct vm_register vmreg; 320 321 bzero(&vmreg, sizeof(vmreg)); 322 vmreg.cpuid = vcpu; 323 vmreg.regnum = reg; 324 vmreg.regval = val; 325 326 error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg); 327 return (error); 328 } 329 330 int 331 vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val) 332 { 333 int error; 334 struct vm_register vmreg; 335 336 bzero(&vmreg, sizeof(vmreg)); 337 vmreg.cpuid = vcpu; 338 vmreg.regnum = reg; 339 340 error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg); 341 *ret_val = vmreg.regval; 342 return (error); 343 } 344 345 int 346 vm_run(struct vmctx *ctx, int vcpu, uint64_t rip, struct vm_exit *vmexit) 347 { 348 int error; 349 struct vm_run vmrun; 350 351 bzero(&vmrun, sizeof(vmrun)); 352 vmrun.cpuid = vcpu; 353 vmrun.rip = rip; 354 355 error = ioctl(ctx->fd, VM_RUN, &vmrun); 356 bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit)); 357 return (error); 358 } 359 360 int 361 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how) 362 { 363 struct vm_suspend vmsuspend; 364 365 bzero(&vmsuspend, sizeof(vmsuspend)); 366 vmsuspend.how = how; 367 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend)); 368 } 369 370 int 371 vm_reinit(struct vmctx *ctx) 372 { 373 374 return (ioctl(ctx->fd, VM_REINIT, 0)); 375 } 376 377 static int 378 vm_inject_exception_real(struct vmctx *ctx, int vcpu, int vector, 379 int error_code, int error_code_valid) 380 { 381 struct vm_exception exc; 382 383 bzero(&exc, sizeof(exc)); 384 exc.cpuid = vcpu; 385 exc.vector = vector; 386 exc.error_code = error_code; 387 exc.error_code_valid = error_code_valid; 388 389 return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc)); 390 } 391 392 int 393 vm_inject_exception(struct vmctx *ctx, int vcpu, int vector) 394 { 395 396 return (vm_inject_exception_real(ctx, vcpu, vector, 0, 0)); 397 } 398 399 int 400 vm_inject_exception2(struct vmctx *ctx, int vcpu, int vector, int errcode) 401 { 402 403 return (vm_inject_exception_real(ctx, vcpu, vector, errcode, 1)); 404 } 405 406 int 407 vm_apicid2vcpu(struct vmctx *ctx, int apicid) 408 { 409 /* 410 * The apic id associated with the 'vcpu' has the same numerical value 411 * as the 'vcpu' itself. 412 */ 413 return (apicid); 414 } 415 416 int 417 vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector) 418 { 419 struct vm_lapic_irq vmirq; 420 421 bzero(&vmirq, sizeof(vmirq)); 422 vmirq.cpuid = vcpu; 423 vmirq.vector = vector; 424 425 return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq)); 426 } 427 428 int 429 vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector) 430 { 431 struct vm_lapic_irq vmirq; 432 433 bzero(&vmirq, sizeof(vmirq)); 434 vmirq.cpuid = vcpu; 435 vmirq.vector = vector; 436 437 return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq)); 438 } 439 440 int 441 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg) 442 { 443 struct vm_lapic_msi vmmsi; 444 445 bzero(&vmmsi, sizeof(vmmsi)); 446 vmmsi.addr = addr; 447 vmmsi.msg = msg; 448 449 return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi)); 450 } 451 452 int 453 vm_ioapic_assert_irq(struct vmctx *ctx, int irq) 454 { 455 struct vm_ioapic_irq ioapic_irq; 456 457 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 458 ioapic_irq.irq = irq; 459 460 return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq)); 461 } 462 463 int 464 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq) 465 { 466 struct vm_ioapic_irq ioapic_irq; 467 468 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 469 ioapic_irq.irq = irq; 470 471 return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq)); 472 } 473 474 int 475 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq) 476 { 477 struct vm_ioapic_irq ioapic_irq; 478 479 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 480 ioapic_irq.irq = irq; 481 482 return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq)); 483 } 484 485 int 486 vm_ioapic_pincount(struct vmctx *ctx, int *pincount) 487 { 488 489 return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount)); 490 } 491 492 int 493 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 494 { 495 struct vm_isa_irq isa_irq; 496 497 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 498 isa_irq.atpic_irq = atpic_irq; 499 isa_irq.ioapic_irq = ioapic_irq; 500 501 return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq)); 502 } 503 504 int 505 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 506 { 507 struct vm_isa_irq isa_irq; 508 509 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 510 isa_irq.atpic_irq = atpic_irq; 511 isa_irq.ioapic_irq = ioapic_irq; 512 513 return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq)); 514 } 515 516 int 517 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 518 { 519 struct vm_isa_irq isa_irq; 520 521 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 522 isa_irq.atpic_irq = atpic_irq; 523 isa_irq.ioapic_irq = ioapic_irq; 524 525 return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq)); 526 } 527 528 int 529 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq, 530 enum vm_intr_trigger trigger) 531 { 532 struct vm_isa_irq_trigger isa_irq_trigger; 533 534 bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger)); 535 isa_irq_trigger.atpic_irq = atpic_irq; 536 isa_irq_trigger.trigger = trigger; 537 538 return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger)); 539 } 540 541 int 542 vm_inject_nmi(struct vmctx *ctx, int vcpu) 543 { 544 struct vm_nmi vmnmi; 545 546 bzero(&vmnmi, sizeof(vmnmi)); 547 vmnmi.cpuid = vcpu; 548 549 return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi)); 550 } 551 552 static struct { 553 const char *name; 554 int type; 555 } capstrmap[] = { 556 { "hlt_exit", VM_CAP_HALT_EXIT }, 557 { "mtrap_exit", VM_CAP_MTRAP_EXIT }, 558 { "pause_exit", VM_CAP_PAUSE_EXIT }, 559 { "unrestricted_guest", VM_CAP_UNRESTRICTED_GUEST }, 560 { "enable_invpcid", VM_CAP_ENABLE_INVPCID }, 561 { 0 } 562 }; 563 564 int 565 vm_capability_name2type(const char *capname) 566 { 567 int i; 568 569 for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) { 570 if (strcmp(capstrmap[i].name, capname) == 0) 571 return (capstrmap[i].type); 572 } 573 574 return (-1); 575 } 576 577 const char * 578 vm_capability_type2name(int type) 579 { 580 int i; 581 582 for (i = 0; capstrmap[i].name != NULL; i++) { 583 if (capstrmap[i].type == type) 584 return (capstrmap[i].name); 585 } 586 587 return (NULL); 588 } 589 590 int 591 vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, 592 int *retval) 593 { 594 int error; 595 struct vm_capability vmcap; 596 597 bzero(&vmcap, sizeof(vmcap)); 598 vmcap.cpuid = vcpu; 599 vmcap.captype = cap; 600 601 error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap); 602 *retval = vmcap.capval; 603 return (error); 604 } 605 606 int 607 vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val) 608 { 609 struct vm_capability vmcap; 610 611 bzero(&vmcap, sizeof(vmcap)); 612 vmcap.cpuid = vcpu; 613 vmcap.captype = cap; 614 vmcap.capval = val; 615 616 return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap)); 617 } 618 619 int 620 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func) 621 { 622 struct vm_pptdev pptdev; 623 624 bzero(&pptdev, sizeof(pptdev)); 625 pptdev.bus = bus; 626 pptdev.slot = slot; 627 pptdev.func = func; 628 629 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev)); 630 } 631 632 int 633 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func) 634 { 635 struct vm_pptdev pptdev; 636 637 bzero(&pptdev, sizeof(pptdev)); 638 pptdev.bus = bus; 639 pptdev.slot = slot; 640 pptdev.func = func; 641 642 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev)); 643 } 644 645 int 646 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, 647 vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 648 { 649 struct vm_pptdev_mmio pptmmio; 650 651 bzero(&pptmmio, sizeof(pptmmio)); 652 pptmmio.bus = bus; 653 pptmmio.slot = slot; 654 pptmmio.func = func; 655 pptmmio.gpa = gpa; 656 pptmmio.len = len; 657 pptmmio.hpa = hpa; 658 659 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio)); 660 } 661 662 int 663 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func, 664 uint64_t addr, uint64_t msg, int numvec) 665 { 666 struct vm_pptdev_msi pptmsi; 667 668 bzero(&pptmsi, sizeof(pptmsi)); 669 pptmsi.vcpu = vcpu; 670 pptmsi.bus = bus; 671 pptmsi.slot = slot; 672 pptmsi.func = func; 673 pptmsi.msg = msg; 674 pptmsi.addr = addr; 675 pptmsi.numvec = numvec; 676 677 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi)); 678 } 679 680 int 681 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func, 682 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control) 683 { 684 struct vm_pptdev_msix pptmsix; 685 686 bzero(&pptmsix, sizeof(pptmsix)); 687 pptmsix.vcpu = vcpu; 688 pptmsix.bus = bus; 689 pptmsix.slot = slot; 690 pptmsix.func = func; 691 pptmsix.idx = idx; 692 pptmsix.msg = msg; 693 pptmsix.addr = addr; 694 pptmsix.vector_control = vector_control; 695 696 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix); 697 } 698 699 uint64_t * 700 vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv, 701 int *ret_entries) 702 { 703 int error; 704 705 static struct vm_stats vmstats; 706 707 vmstats.cpuid = vcpu; 708 709 error = ioctl(ctx->fd, VM_STATS, &vmstats); 710 if (error == 0) { 711 if (ret_entries) 712 *ret_entries = vmstats.num_entries; 713 if (ret_tv) 714 *ret_tv = vmstats.tv; 715 return (vmstats.statbuf); 716 } else 717 return (NULL); 718 } 719 720 const char * 721 vm_get_stat_desc(struct vmctx *ctx, int index) 722 { 723 static struct vm_stat_desc statdesc; 724 725 statdesc.index = index; 726 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0) 727 return (statdesc.desc); 728 else 729 return (NULL); 730 } 731 732 int 733 vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state) 734 { 735 int error; 736 struct vm_x2apic x2apic; 737 738 bzero(&x2apic, sizeof(x2apic)); 739 x2apic.cpuid = vcpu; 740 741 error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic); 742 *state = x2apic.state; 743 return (error); 744 } 745 746 int 747 vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state) 748 { 749 int error; 750 struct vm_x2apic x2apic; 751 752 bzero(&x2apic, sizeof(x2apic)); 753 x2apic.cpuid = vcpu; 754 x2apic.state = state; 755 756 error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic); 757 758 return (error); 759 } 760 761 /* 762 * From Intel Vol 3a: 763 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT 764 */ 765 int 766 vcpu_reset(struct vmctx *vmctx, int vcpu) 767 { 768 int error; 769 uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx; 770 uint32_t desc_access, desc_limit; 771 uint16_t sel; 772 773 zero = 0; 774 775 rflags = 0x2; 776 error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags); 777 if (error) 778 goto done; 779 780 rip = 0xfff0; 781 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0) 782 goto done; 783 784 cr0 = CR0_NE; 785 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0) 786 goto done; 787 788 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0) 789 goto done; 790 791 cr4 = 0; 792 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0) 793 goto done; 794 795 /* 796 * CS: present, r/w, accessed, 16-bit, byte granularity, usable 797 */ 798 desc_base = 0xffff0000; 799 desc_limit = 0xffff; 800 desc_access = 0x0093; 801 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS, 802 desc_base, desc_limit, desc_access); 803 if (error) 804 goto done; 805 806 sel = 0xf000; 807 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0) 808 goto done; 809 810 /* 811 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity 812 */ 813 desc_base = 0; 814 desc_limit = 0xffff; 815 desc_access = 0x0093; 816 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS, 817 desc_base, desc_limit, desc_access); 818 if (error) 819 goto done; 820 821 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS, 822 desc_base, desc_limit, desc_access); 823 if (error) 824 goto done; 825 826 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES, 827 desc_base, desc_limit, desc_access); 828 if (error) 829 goto done; 830 831 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS, 832 desc_base, desc_limit, desc_access); 833 if (error) 834 goto done; 835 836 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS, 837 desc_base, desc_limit, desc_access); 838 if (error) 839 goto done; 840 841 sel = 0; 842 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0) 843 goto done; 844 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0) 845 goto done; 846 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0) 847 goto done; 848 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0) 849 goto done; 850 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0) 851 goto done; 852 853 /* General purpose registers */ 854 rdx = 0xf00; 855 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0) 856 goto done; 857 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0) 858 goto done; 859 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0) 860 goto done; 861 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0) 862 goto done; 863 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0) 864 goto done; 865 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0) 866 goto done; 867 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0) 868 goto done; 869 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0) 870 goto done; 871 872 /* GDTR, IDTR */ 873 desc_base = 0; 874 desc_limit = 0xffff; 875 desc_access = 0; 876 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR, 877 desc_base, desc_limit, desc_access); 878 if (error != 0) 879 goto done; 880 881 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR, 882 desc_base, desc_limit, desc_access); 883 if (error != 0) 884 goto done; 885 886 /* TR */ 887 desc_base = 0; 888 desc_limit = 0xffff; 889 desc_access = 0x0000008b; 890 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access); 891 if (error) 892 goto done; 893 894 sel = 0; 895 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0) 896 goto done; 897 898 /* LDTR */ 899 desc_base = 0; 900 desc_limit = 0xffff; 901 desc_access = 0x00000082; 902 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base, 903 desc_limit, desc_access); 904 if (error) 905 goto done; 906 907 sel = 0; 908 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0) 909 goto done; 910 911 /* XXX cr2, debug registers */ 912 913 error = 0; 914 done: 915 return (error); 916 } 917 918 int 919 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num) 920 { 921 int error, i; 922 struct vm_gpa_pte gpapte; 923 924 bzero(&gpapte, sizeof(gpapte)); 925 gpapte.gpa = gpa; 926 927 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte); 928 929 if (error == 0) { 930 *num = gpapte.ptenum; 931 for (i = 0; i < gpapte.ptenum; i++) 932 pte[i] = gpapte.pte[i]; 933 } 934 935 return (error); 936 } 937 938 int 939 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities) 940 { 941 int error; 942 struct vm_hpet_cap cap; 943 944 bzero(&cap, sizeof(struct vm_hpet_cap)); 945 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap); 946 if (capabilities != NULL) 947 *capabilities = cap.capabilities; 948 return (error); 949 } 950 951 static int 952 gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, 953 uint64_t gla, int prot, int *fault, uint64_t *gpa) 954 { 955 struct vm_gla2gpa gg; 956 int error; 957 958 bzero(&gg, sizeof(struct vm_gla2gpa)); 959 gg.vcpuid = vcpu; 960 gg.prot = prot; 961 gg.gla = gla; 962 gg.paging = *paging; 963 964 error = ioctl(ctx->fd, VM_GLA2GPA, &gg); 965 if (error == 0) { 966 *fault = gg.fault; 967 *gpa = gg.gpa; 968 } 969 return (error); 970 } 971 972 #ifndef min 973 #define min(a,b) (((a) < (b)) ? (a) : (b)) 974 #endif 975 976 int 977 vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, 978 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt) 979 { 980 uint64_t gpa; 981 int error, fault, i, n, off; 982 983 for (i = 0; i < iovcnt; i++) { 984 iov[i].iov_base = 0; 985 iov[i].iov_len = 0; 986 } 987 988 while (len) { 989 assert(iovcnt > 0); 990 error = gla2gpa(ctx, vcpu, paging, gla, prot, &fault, &gpa); 991 if (error) 992 return (-1); 993 if (fault) 994 return (1); 995 996 off = gpa & PAGE_MASK; 997 n = min(len, PAGE_SIZE - off); 998 999 iov->iov_base = (void *)gpa; 1000 iov->iov_len = n; 1001 iov++; 1002 iovcnt--; 1003 1004 gla += n; 1005 len -= n; 1006 } 1007 return (0); 1008 } 1009 1010 void 1011 vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len) 1012 { 1013 const char *src; 1014 char *dst; 1015 uint64_t gpa; 1016 size_t n; 1017 1018 dst = vp; 1019 while (len) { 1020 assert(iov->iov_len); 1021 gpa = (uint64_t)iov->iov_base; 1022 n = min(len, iov->iov_len); 1023 src = vm_map_gpa(ctx, gpa, n); 1024 bcopy(src, dst, n); 1025 1026 iov++; 1027 dst += n; 1028 len -= n; 1029 } 1030 } 1031 1032 void 1033 vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov, 1034 size_t len) 1035 { 1036 const char *src; 1037 char *dst; 1038 uint64_t gpa; 1039 size_t n; 1040 1041 src = vp; 1042 while (len) { 1043 assert(iov->iov_len); 1044 gpa = (uint64_t)iov->iov_base; 1045 n = min(len, iov->iov_len); 1046 dst = vm_map_gpa(ctx, gpa, n); 1047 bcopy(src, dst, n); 1048 1049 iov++; 1050 src += n; 1051 len -= n; 1052 } 1053 } 1054 1055 static int 1056 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus) 1057 { 1058 struct vm_cpuset vm_cpuset; 1059 int error; 1060 1061 bzero(&vm_cpuset, sizeof(struct vm_cpuset)); 1062 vm_cpuset.which = which; 1063 vm_cpuset.cpusetsize = sizeof(cpuset_t); 1064 vm_cpuset.cpus = cpus; 1065 1066 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset); 1067 return (error); 1068 } 1069 1070 int 1071 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus) 1072 { 1073 1074 return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus)); 1075 } 1076 1077 int 1078 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus) 1079 { 1080 1081 return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus)); 1082 } 1083 1084 int 1085 vm_activate_cpu(struct vmctx *ctx, int vcpu) 1086 { 1087 struct vm_activate_cpu ac; 1088 int error; 1089 1090 bzero(&ac, sizeof(struct vm_activate_cpu)); 1091 ac.vcpuid = vcpu; 1092 error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac); 1093 return (error); 1094 } 1095