1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/sysctl.h> 34 #include <sys/ioctl.h> 35 #include <sys/mman.h> 36 #include <sys/_iovec.h> 37 #include <sys/cpuset.h> 38 39 #include <x86/segments.h> 40 #include <machine/specialreg.h> 41 #include <machine/param.h> 42 43 #include <errno.h> 44 #include <stdio.h> 45 #include <stdlib.h> 46 #include <assert.h> 47 #include <string.h> 48 #include <fcntl.h> 49 #include <unistd.h> 50 51 #include <libutil.h> 52 53 #include <machine/vmm.h> 54 #include <machine/vmm_dev.h> 55 56 #include "vmmapi.h" 57 58 #define MB (1024 * 1024UL) 59 #define GB (1024 * 1024 * 1024UL) 60 61 struct vmctx { 62 int fd; 63 uint32_t lowmem_limit; 64 enum vm_mmap_style vms; 65 int memflags; 66 size_t lowmem; 67 char *lowmem_addr; 68 size_t highmem; 69 char *highmem_addr; 70 char *name; 71 }; 72 73 #define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x))) 74 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x))) 75 76 static int 77 vm_device_open(const char *name) 78 { 79 int fd, len; 80 char *vmfile; 81 82 len = strlen("/dev/vmm/") + strlen(name) + 1; 83 vmfile = malloc(len); 84 assert(vmfile != NULL); 85 snprintf(vmfile, len, "/dev/vmm/%s", name); 86 87 /* Open the device file */ 88 fd = open(vmfile, O_RDWR, 0); 89 90 free(vmfile); 91 return (fd); 92 } 93 94 int 95 vm_create(const char *name) 96 { 97 98 return (CREATE((char *)name)); 99 } 100 101 struct vmctx * 102 vm_open(const char *name) 103 { 104 struct vmctx *vm; 105 106 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1); 107 assert(vm != NULL); 108 109 vm->fd = -1; 110 vm->memflags = 0; 111 vm->lowmem_limit = 3 * GB; 112 vm->name = (char *)(vm + 1); 113 strcpy(vm->name, name); 114 115 if ((vm->fd = vm_device_open(vm->name)) < 0) 116 goto err; 117 118 return (vm); 119 err: 120 vm_destroy(vm); 121 return (NULL); 122 } 123 124 void 125 vm_destroy(struct vmctx *vm) 126 { 127 assert(vm != NULL); 128 129 if (vm->fd >= 0) 130 close(vm->fd); 131 DESTROY(vm->name); 132 133 free(vm); 134 } 135 136 int 137 vm_parse_memsize(const char *optarg, size_t *ret_memsize) 138 { 139 char *endptr; 140 size_t optval; 141 int error; 142 143 optval = strtoul(optarg, &endptr, 0); 144 if (*optarg != '\0' && *endptr == '\0') { 145 /* 146 * For the sake of backward compatibility if the memory size 147 * specified on the command line is less than a megabyte then 148 * it is interpreted as being in units of MB. 149 */ 150 if (optval < MB) 151 optval *= MB; 152 *ret_memsize = optval; 153 error = 0; 154 } else 155 error = expand_number(optarg, ret_memsize); 156 157 return (error); 158 } 159 160 int 161 vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len, 162 int *wired) 163 { 164 int error; 165 struct vm_memory_segment seg; 166 167 bzero(&seg, sizeof(seg)); 168 seg.gpa = gpa; 169 error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg); 170 *ret_len = seg.len; 171 if (wired != NULL) 172 *wired = seg.wired; 173 return (error); 174 } 175 176 uint32_t 177 vm_get_lowmem_limit(struct vmctx *ctx) 178 { 179 180 return (ctx->lowmem_limit); 181 } 182 183 void 184 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit) 185 { 186 187 ctx->lowmem_limit = limit; 188 } 189 190 void 191 vm_set_memflags(struct vmctx *ctx, int flags) 192 { 193 194 ctx->memflags = flags; 195 } 196 197 static int 198 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **addr) 199 { 200 int error, mmap_flags; 201 struct vm_memory_segment seg; 202 203 /* 204 * Create and optionally map 'len' bytes of memory at guest 205 * physical address 'gpa' 206 */ 207 bzero(&seg, sizeof(seg)); 208 seg.gpa = gpa; 209 seg.len = len; 210 error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg); 211 if (error == 0 && addr != NULL) { 212 mmap_flags = MAP_SHARED; 213 if ((ctx->memflags & VM_MEM_F_INCORE) == 0) 214 mmap_flags |= MAP_NOCORE; 215 *addr = mmap(NULL, len, PROT_READ | PROT_WRITE, mmap_flags, 216 ctx->fd, gpa); 217 } 218 return (error); 219 } 220 221 int 222 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms) 223 { 224 char **addr; 225 int error; 226 227 /* XXX VM_MMAP_SPARSE not implemented yet */ 228 assert(vms == VM_MMAP_NONE || vms == VM_MMAP_ALL); 229 ctx->vms = vms; 230 231 /* 232 * If 'memsize' cannot fit entirely in the 'lowmem' segment then 233 * create another 'highmem' segment above 4GB for the remainder. 234 */ 235 if (memsize > ctx->lowmem_limit) { 236 ctx->lowmem = ctx->lowmem_limit; 237 ctx->highmem = memsize - ctx->lowmem; 238 } else { 239 ctx->lowmem = memsize; 240 ctx->highmem = 0; 241 } 242 243 if (ctx->lowmem > 0) { 244 addr = (vms == VM_MMAP_ALL) ? &ctx->lowmem_addr : NULL; 245 error = setup_memory_segment(ctx, 0, ctx->lowmem, addr); 246 if (error) 247 return (error); 248 } 249 250 if (ctx->highmem > 0) { 251 addr = (vms == VM_MMAP_ALL) ? &ctx->highmem_addr : NULL; 252 error = setup_memory_segment(ctx, 4*GB, ctx->highmem, addr); 253 if (error) 254 return (error); 255 } 256 257 return (0); 258 } 259 260 void * 261 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len) 262 { 263 264 /* XXX VM_MMAP_SPARSE not implemented yet */ 265 assert(ctx->vms == VM_MMAP_ALL); 266 267 if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem) 268 return ((void *)(ctx->lowmem_addr + gaddr)); 269 270 if (gaddr >= 4*GB) { 271 gaddr -= 4*GB; 272 if (gaddr < ctx->highmem && gaddr + len <= ctx->highmem) 273 return ((void *)(ctx->highmem_addr + gaddr)); 274 } 275 276 return (NULL); 277 } 278 279 size_t 280 vm_get_lowmem_size(struct vmctx *ctx) 281 { 282 283 return (ctx->lowmem); 284 } 285 286 size_t 287 vm_get_highmem_size(struct vmctx *ctx) 288 { 289 290 return (ctx->highmem); 291 } 292 293 int 294 vm_set_desc(struct vmctx *ctx, int vcpu, int reg, 295 uint64_t base, uint32_t limit, uint32_t access) 296 { 297 int error; 298 struct vm_seg_desc vmsegdesc; 299 300 bzero(&vmsegdesc, sizeof(vmsegdesc)); 301 vmsegdesc.cpuid = vcpu; 302 vmsegdesc.regnum = reg; 303 vmsegdesc.desc.base = base; 304 vmsegdesc.desc.limit = limit; 305 vmsegdesc.desc.access = access; 306 307 error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc); 308 return (error); 309 } 310 311 int 312 vm_get_desc(struct vmctx *ctx, int vcpu, int reg, 313 uint64_t *base, uint32_t *limit, uint32_t *access) 314 { 315 int error; 316 struct vm_seg_desc vmsegdesc; 317 318 bzero(&vmsegdesc, sizeof(vmsegdesc)); 319 vmsegdesc.cpuid = vcpu; 320 vmsegdesc.regnum = reg; 321 322 error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc); 323 if (error == 0) { 324 *base = vmsegdesc.desc.base; 325 *limit = vmsegdesc.desc.limit; 326 *access = vmsegdesc.desc.access; 327 } 328 return (error); 329 } 330 331 int 332 vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *seg_desc) 333 { 334 int error; 335 336 error = vm_get_desc(ctx, vcpu, reg, &seg_desc->base, &seg_desc->limit, 337 &seg_desc->access); 338 return (error); 339 } 340 341 int 342 vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val) 343 { 344 int error; 345 struct vm_register vmreg; 346 347 bzero(&vmreg, sizeof(vmreg)); 348 vmreg.cpuid = vcpu; 349 vmreg.regnum = reg; 350 vmreg.regval = val; 351 352 error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg); 353 return (error); 354 } 355 356 int 357 vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val) 358 { 359 int error; 360 struct vm_register vmreg; 361 362 bzero(&vmreg, sizeof(vmreg)); 363 vmreg.cpuid = vcpu; 364 vmreg.regnum = reg; 365 366 error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg); 367 *ret_val = vmreg.regval; 368 return (error); 369 } 370 371 int 372 vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit) 373 { 374 int error; 375 struct vm_run vmrun; 376 377 bzero(&vmrun, sizeof(vmrun)); 378 vmrun.cpuid = vcpu; 379 380 error = ioctl(ctx->fd, VM_RUN, &vmrun); 381 bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit)); 382 return (error); 383 } 384 385 int 386 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how) 387 { 388 struct vm_suspend vmsuspend; 389 390 bzero(&vmsuspend, sizeof(vmsuspend)); 391 vmsuspend.how = how; 392 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend)); 393 } 394 395 int 396 vm_reinit(struct vmctx *ctx) 397 { 398 399 return (ioctl(ctx->fd, VM_REINIT, 0)); 400 } 401 402 int 403 vm_inject_exception(struct vmctx *ctx, int vcpu, int vector, int errcode_valid, 404 uint32_t errcode, int restart_instruction) 405 { 406 struct vm_exception exc; 407 408 exc.cpuid = vcpu; 409 exc.vector = vector; 410 exc.error_code = errcode; 411 exc.error_code_valid = errcode_valid; 412 exc.restart_instruction = restart_instruction; 413 414 return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc)); 415 } 416 417 int 418 vm_apicid2vcpu(struct vmctx *ctx, int apicid) 419 { 420 /* 421 * The apic id associated with the 'vcpu' has the same numerical value 422 * as the 'vcpu' itself. 423 */ 424 return (apicid); 425 } 426 427 int 428 vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector) 429 { 430 struct vm_lapic_irq vmirq; 431 432 bzero(&vmirq, sizeof(vmirq)); 433 vmirq.cpuid = vcpu; 434 vmirq.vector = vector; 435 436 return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq)); 437 } 438 439 int 440 vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector) 441 { 442 struct vm_lapic_irq vmirq; 443 444 bzero(&vmirq, sizeof(vmirq)); 445 vmirq.cpuid = vcpu; 446 vmirq.vector = vector; 447 448 return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq)); 449 } 450 451 int 452 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg) 453 { 454 struct vm_lapic_msi vmmsi; 455 456 bzero(&vmmsi, sizeof(vmmsi)); 457 vmmsi.addr = addr; 458 vmmsi.msg = msg; 459 460 return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi)); 461 } 462 463 int 464 vm_ioapic_assert_irq(struct vmctx *ctx, int irq) 465 { 466 struct vm_ioapic_irq ioapic_irq; 467 468 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 469 ioapic_irq.irq = irq; 470 471 return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq)); 472 } 473 474 int 475 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq) 476 { 477 struct vm_ioapic_irq ioapic_irq; 478 479 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 480 ioapic_irq.irq = irq; 481 482 return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq)); 483 } 484 485 int 486 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq) 487 { 488 struct vm_ioapic_irq ioapic_irq; 489 490 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 491 ioapic_irq.irq = irq; 492 493 return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq)); 494 } 495 496 int 497 vm_ioapic_pincount(struct vmctx *ctx, int *pincount) 498 { 499 500 return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount)); 501 } 502 503 int 504 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 505 { 506 struct vm_isa_irq isa_irq; 507 508 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 509 isa_irq.atpic_irq = atpic_irq; 510 isa_irq.ioapic_irq = ioapic_irq; 511 512 return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq)); 513 } 514 515 int 516 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 517 { 518 struct vm_isa_irq isa_irq; 519 520 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 521 isa_irq.atpic_irq = atpic_irq; 522 isa_irq.ioapic_irq = ioapic_irq; 523 524 return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq)); 525 } 526 527 int 528 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 529 { 530 struct vm_isa_irq isa_irq; 531 532 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 533 isa_irq.atpic_irq = atpic_irq; 534 isa_irq.ioapic_irq = ioapic_irq; 535 536 return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq)); 537 } 538 539 int 540 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq, 541 enum vm_intr_trigger trigger) 542 { 543 struct vm_isa_irq_trigger isa_irq_trigger; 544 545 bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger)); 546 isa_irq_trigger.atpic_irq = atpic_irq; 547 isa_irq_trigger.trigger = trigger; 548 549 return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger)); 550 } 551 552 int 553 vm_inject_nmi(struct vmctx *ctx, int vcpu) 554 { 555 struct vm_nmi vmnmi; 556 557 bzero(&vmnmi, sizeof(vmnmi)); 558 vmnmi.cpuid = vcpu; 559 560 return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi)); 561 } 562 563 static struct { 564 const char *name; 565 int type; 566 } capstrmap[] = { 567 { "hlt_exit", VM_CAP_HALT_EXIT }, 568 { "mtrap_exit", VM_CAP_MTRAP_EXIT }, 569 { "pause_exit", VM_CAP_PAUSE_EXIT }, 570 { "unrestricted_guest", VM_CAP_UNRESTRICTED_GUEST }, 571 { "enable_invpcid", VM_CAP_ENABLE_INVPCID }, 572 { 0 } 573 }; 574 575 int 576 vm_capability_name2type(const char *capname) 577 { 578 int i; 579 580 for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) { 581 if (strcmp(capstrmap[i].name, capname) == 0) 582 return (capstrmap[i].type); 583 } 584 585 return (-1); 586 } 587 588 const char * 589 vm_capability_type2name(int type) 590 { 591 int i; 592 593 for (i = 0; capstrmap[i].name != NULL; i++) { 594 if (capstrmap[i].type == type) 595 return (capstrmap[i].name); 596 } 597 598 return (NULL); 599 } 600 601 int 602 vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, 603 int *retval) 604 { 605 int error; 606 struct vm_capability vmcap; 607 608 bzero(&vmcap, sizeof(vmcap)); 609 vmcap.cpuid = vcpu; 610 vmcap.captype = cap; 611 612 error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap); 613 *retval = vmcap.capval; 614 return (error); 615 } 616 617 int 618 vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val) 619 { 620 struct vm_capability vmcap; 621 622 bzero(&vmcap, sizeof(vmcap)); 623 vmcap.cpuid = vcpu; 624 vmcap.captype = cap; 625 vmcap.capval = val; 626 627 return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap)); 628 } 629 630 int 631 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func) 632 { 633 struct vm_pptdev pptdev; 634 635 bzero(&pptdev, sizeof(pptdev)); 636 pptdev.bus = bus; 637 pptdev.slot = slot; 638 pptdev.func = func; 639 640 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev)); 641 } 642 643 int 644 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func) 645 { 646 struct vm_pptdev pptdev; 647 648 bzero(&pptdev, sizeof(pptdev)); 649 pptdev.bus = bus; 650 pptdev.slot = slot; 651 pptdev.func = func; 652 653 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev)); 654 } 655 656 int 657 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, 658 vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 659 { 660 struct vm_pptdev_mmio pptmmio; 661 662 bzero(&pptmmio, sizeof(pptmmio)); 663 pptmmio.bus = bus; 664 pptmmio.slot = slot; 665 pptmmio.func = func; 666 pptmmio.gpa = gpa; 667 pptmmio.len = len; 668 pptmmio.hpa = hpa; 669 670 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio)); 671 } 672 673 int 674 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func, 675 uint64_t addr, uint64_t msg, int numvec) 676 { 677 struct vm_pptdev_msi pptmsi; 678 679 bzero(&pptmsi, sizeof(pptmsi)); 680 pptmsi.vcpu = vcpu; 681 pptmsi.bus = bus; 682 pptmsi.slot = slot; 683 pptmsi.func = func; 684 pptmsi.msg = msg; 685 pptmsi.addr = addr; 686 pptmsi.numvec = numvec; 687 688 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi)); 689 } 690 691 int 692 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func, 693 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control) 694 { 695 struct vm_pptdev_msix pptmsix; 696 697 bzero(&pptmsix, sizeof(pptmsix)); 698 pptmsix.vcpu = vcpu; 699 pptmsix.bus = bus; 700 pptmsix.slot = slot; 701 pptmsix.func = func; 702 pptmsix.idx = idx; 703 pptmsix.msg = msg; 704 pptmsix.addr = addr; 705 pptmsix.vector_control = vector_control; 706 707 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix); 708 } 709 710 uint64_t * 711 vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv, 712 int *ret_entries) 713 { 714 int error; 715 716 static struct vm_stats vmstats; 717 718 vmstats.cpuid = vcpu; 719 720 error = ioctl(ctx->fd, VM_STATS, &vmstats); 721 if (error == 0) { 722 if (ret_entries) 723 *ret_entries = vmstats.num_entries; 724 if (ret_tv) 725 *ret_tv = vmstats.tv; 726 return (vmstats.statbuf); 727 } else 728 return (NULL); 729 } 730 731 const char * 732 vm_get_stat_desc(struct vmctx *ctx, int index) 733 { 734 static struct vm_stat_desc statdesc; 735 736 statdesc.index = index; 737 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0) 738 return (statdesc.desc); 739 else 740 return (NULL); 741 } 742 743 int 744 vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state) 745 { 746 int error; 747 struct vm_x2apic x2apic; 748 749 bzero(&x2apic, sizeof(x2apic)); 750 x2apic.cpuid = vcpu; 751 752 error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic); 753 *state = x2apic.state; 754 return (error); 755 } 756 757 int 758 vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state) 759 { 760 int error; 761 struct vm_x2apic x2apic; 762 763 bzero(&x2apic, sizeof(x2apic)); 764 x2apic.cpuid = vcpu; 765 x2apic.state = state; 766 767 error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic); 768 769 return (error); 770 } 771 772 /* 773 * From Intel Vol 3a: 774 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT 775 */ 776 int 777 vcpu_reset(struct vmctx *vmctx, int vcpu) 778 { 779 int error; 780 uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx; 781 uint32_t desc_access, desc_limit; 782 uint16_t sel; 783 784 zero = 0; 785 786 rflags = 0x2; 787 error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags); 788 if (error) 789 goto done; 790 791 rip = 0xfff0; 792 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0) 793 goto done; 794 795 cr0 = CR0_NE; 796 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0) 797 goto done; 798 799 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0) 800 goto done; 801 802 cr4 = 0; 803 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0) 804 goto done; 805 806 /* 807 * CS: present, r/w, accessed, 16-bit, byte granularity, usable 808 */ 809 desc_base = 0xffff0000; 810 desc_limit = 0xffff; 811 desc_access = 0x0093; 812 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS, 813 desc_base, desc_limit, desc_access); 814 if (error) 815 goto done; 816 817 sel = 0xf000; 818 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0) 819 goto done; 820 821 /* 822 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity 823 */ 824 desc_base = 0; 825 desc_limit = 0xffff; 826 desc_access = 0x0093; 827 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS, 828 desc_base, desc_limit, desc_access); 829 if (error) 830 goto done; 831 832 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS, 833 desc_base, desc_limit, desc_access); 834 if (error) 835 goto done; 836 837 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES, 838 desc_base, desc_limit, desc_access); 839 if (error) 840 goto done; 841 842 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS, 843 desc_base, desc_limit, desc_access); 844 if (error) 845 goto done; 846 847 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS, 848 desc_base, desc_limit, desc_access); 849 if (error) 850 goto done; 851 852 sel = 0; 853 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0) 854 goto done; 855 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0) 856 goto done; 857 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0) 858 goto done; 859 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0) 860 goto done; 861 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0) 862 goto done; 863 864 /* General purpose registers */ 865 rdx = 0xf00; 866 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0) 867 goto done; 868 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0) 869 goto done; 870 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0) 871 goto done; 872 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0) 873 goto done; 874 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0) 875 goto done; 876 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0) 877 goto done; 878 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0) 879 goto done; 880 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0) 881 goto done; 882 883 /* GDTR, IDTR */ 884 desc_base = 0; 885 desc_limit = 0xffff; 886 desc_access = 0; 887 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR, 888 desc_base, desc_limit, desc_access); 889 if (error != 0) 890 goto done; 891 892 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR, 893 desc_base, desc_limit, desc_access); 894 if (error != 0) 895 goto done; 896 897 /* TR */ 898 desc_base = 0; 899 desc_limit = 0xffff; 900 desc_access = 0x0000008b; 901 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access); 902 if (error) 903 goto done; 904 905 sel = 0; 906 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0) 907 goto done; 908 909 /* LDTR */ 910 desc_base = 0; 911 desc_limit = 0xffff; 912 desc_access = 0x00000082; 913 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base, 914 desc_limit, desc_access); 915 if (error) 916 goto done; 917 918 sel = 0; 919 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0) 920 goto done; 921 922 /* XXX cr2, debug registers */ 923 924 error = 0; 925 done: 926 return (error); 927 } 928 929 int 930 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num) 931 { 932 int error, i; 933 struct vm_gpa_pte gpapte; 934 935 bzero(&gpapte, sizeof(gpapte)); 936 gpapte.gpa = gpa; 937 938 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte); 939 940 if (error == 0) { 941 *num = gpapte.ptenum; 942 for (i = 0; i < gpapte.ptenum; i++) 943 pte[i] = gpapte.pte[i]; 944 } 945 946 return (error); 947 } 948 949 int 950 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities) 951 { 952 int error; 953 struct vm_hpet_cap cap; 954 955 bzero(&cap, sizeof(struct vm_hpet_cap)); 956 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap); 957 if (capabilities != NULL) 958 *capabilities = cap.capabilities; 959 return (error); 960 } 961 962 int 963 vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, 964 uint64_t gla, int prot, uint64_t *gpa, int *fault) 965 { 966 struct vm_gla2gpa gg; 967 int error; 968 969 bzero(&gg, sizeof(struct vm_gla2gpa)); 970 gg.vcpuid = vcpu; 971 gg.prot = prot; 972 gg.gla = gla; 973 gg.paging = *paging; 974 975 error = ioctl(ctx->fd, VM_GLA2GPA, &gg); 976 if (error == 0) { 977 *fault = gg.fault; 978 *gpa = gg.gpa; 979 } 980 return (error); 981 } 982 983 #ifndef min 984 #define min(a,b) (((a) < (b)) ? (a) : (b)) 985 #endif 986 987 int 988 vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, 989 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt, 990 int *fault) 991 { 992 void *va; 993 uint64_t gpa; 994 int error, i, n, off; 995 996 for (i = 0; i < iovcnt; i++) { 997 iov[i].iov_base = 0; 998 iov[i].iov_len = 0; 999 } 1000 1001 while (len) { 1002 assert(iovcnt > 0); 1003 error = vm_gla2gpa(ctx, vcpu, paging, gla, prot, &gpa, fault); 1004 if (error || *fault) 1005 return (error); 1006 1007 off = gpa & PAGE_MASK; 1008 n = min(len, PAGE_SIZE - off); 1009 1010 va = vm_map_gpa(ctx, gpa, n); 1011 if (va == NULL) 1012 return (EFAULT); 1013 1014 iov->iov_base = va; 1015 iov->iov_len = n; 1016 iov++; 1017 iovcnt--; 1018 1019 gla += n; 1020 len -= n; 1021 } 1022 return (0); 1023 } 1024 1025 void 1026 vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov, int iovcnt) 1027 { 1028 1029 return; 1030 } 1031 1032 void 1033 vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len) 1034 { 1035 const char *src; 1036 char *dst; 1037 size_t n; 1038 1039 dst = vp; 1040 while (len) { 1041 assert(iov->iov_len); 1042 n = min(len, iov->iov_len); 1043 src = iov->iov_base; 1044 bcopy(src, dst, n); 1045 1046 iov++; 1047 dst += n; 1048 len -= n; 1049 } 1050 } 1051 1052 void 1053 vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov, 1054 size_t len) 1055 { 1056 const char *src; 1057 char *dst; 1058 size_t n; 1059 1060 src = vp; 1061 while (len) { 1062 assert(iov->iov_len); 1063 n = min(len, iov->iov_len); 1064 dst = iov->iov_base; 1065 bcopy(src, dst, n); 1066 1067 iov++; 1068 src += n; 1069 len -= n; 1070 } 1071 } 1072 1073 static int 1074 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus) 1075 { 1076 struct vm_cpuset vm_cpuset; 1077 int error; 1078 1079 bzero(&vm_cpuset, sizeof(struct vm_cpuset)); 1080 vm_cpuset.which = which; 1081 vm_cpuset.cpusetsize = sizeof(cpuset_t); 1082 vm_cpuset.cpus = cpus; 1083 1084 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset); 1085 return (error); 1086 } 1087 1088 int 1089 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus) 1090 { 1091 1092 return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus)); 1093 } 1094 1095 int 1096 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus) 1097 { 1098 1099 return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus)); 1100 } 1101 1102 int 1103 vm_activate_cpu(struct vmctx *ctx, int vcpu) 1104 { 1105 struct vm_activate_cpu ac; 1106 int error; 1107 1108 bzero(&ac, sizeof(struct vm_activate_cpu)); 1109 ac.vcpuid = vcpu; 1110 error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac); 1111 return (error); 1112 } 1113 1114 int 1115 vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2) 1116 { 1117 struct vm_intinfo vmii; 1118 int error; 1119 1120 bzero(&vmii, sizeof(struct vm_intinfo)); 1121 vmii.vcpuid = vcpu; 1122 error = ioctl(ctx->fd, VM_GET_INTINFO, &vmii); 1123 if (error == 0) { 1124 *info1 = vmii.info1; 1125 *info2 = vmii.info2; 1126 } 1127 return (error); 1128 } 1129 1130 int 1131 vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1) 1132 { 1133 struct vm_intinfo vmii; 1134 int error; 1135 1136 bzero(&vmii, sizeof(struct vm_intinfo)); 1137 vmii.vcpuid = vcpu; 1138 vmii.info1 = info1; 1139 error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii); 1140 return (error); 1141 } 1142 1143 int 1144 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value) 1145 { 1146 struct vm_rtc_data rtcdata; 1147 int error; 1148 1149 bzero(&rtcdata, sizeof(struct vm_rtc_data)); 1150 rtcdata.offset = offset; 1151 rtcdata.value = value; 1152 error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata); 1153 return (error); 1154 } 1155 1156 int 1157 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval) 1158 { 1159 struct vm_rtc_data rtcdata; 1160 int error; 1161 1162 bzero(&rtcdata, sizeof(struct vm_rtc_data)); 1163 rtcdata.offset = offset; 1164 error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata); 1165 if (error == 0) 1166 *retval = rtcdata.value; 1167 return (error); 1168 } 1169 1170 int 1171 vm_rtc_settime(struct vmctx *ctx, time_t secs) 1172 { 1173 struct vm_rtc_time rtctime; 1174 int error; 1175 1176 bzero(&rtctime, sizeof(struct vm_rtc_time)); 1177 rtctime.secs = secs; 1178 error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime); 1179 return (error); 1180 } 1181 1182 int 1183 vm_rtc_gettime(struct vmctx *ctx, time_t *secs) 1184 { 1185 struct vm_rtc_time rtctime; 1186 int error; 1187 1188 bzero(&rtctime, sizeof(struct vm_rtc_time)); 1189 error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime); 1190 if (error == 0) 1191 *secs = rtctime.secs; 1192 return (error); 1193 } 1194 1195 int 1196 vm_restart_instruction(void *arg, int vcpu) 1197 { 1198 struct vmctx *ctx = arg; 1199 1200 return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu)); 1201 } 1202