1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/capsicum.h> 36 #include <sys/sysctl.h> 37 #include <sys/ioctl.h> 38 #include <sys/linker.h> 39 #include <sys/mman.h> 40 #include <sys/module.h> 41 #include <sys/_iovec.h> 42 #include <sys/cpuset.h> 43 44 #include <capsicum_helpers.h> 45 #include <errno.h> 46 #include <stdbool.h> 47 #include <stdio.h> 48 #include <stdlib.h> 49 #include <assert.h> 50 #include <string.h> 51 #include <fcntl.h> 52 #include <unistd.h> 53 54 #include <libutil.h> 55 56 #include <vm/vm.h> 57 #include <machine/vmm.h> 58 #include <machine/vmm_dev.h> 59 #include <machine/vmm_snapshot.h> 60 61 #include "vmmapi.h" 62 #include "internal.h" 63 64 #define MB (1024 * 1024UL) 65 #define GB (1024 * 1024 * 1024UL) 66 67 /* 68 * Size of the guard region before and after the virtual address space 69 * mapping the guest physical memory. This must be a multiple of the 70 * superpage size for performance reasons. 71 */ 72 #define VM_MMAP_GUARD_SIZE (4 * MB) 73 74 #define PROT_RW (PROT_READ | PROT_WRITE) 75 #define PROT_ALL (PROT_READ | PROT_WRITE | PROT_EXEC) 76 77 struct vmctx { 78 int fd; 79 uint32_t lowmem_limit; 80 int memflags; 81 size_t lowmem; 82 size_t highmem; 83 char *baseaddr; 84 char *name; 85 }; 86 87 #define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x))) 88 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x))) 89 90 static int 91 vm_device_open(const char *name) 92 { 93 int fd, len; 94 char *vmfile; 95 96 len = strlen("/dev/vmm/") + strlen(name) + 1; 97 vmfile = malloc(len); 98 assert(vmfile != NULL); 99 snprintf(vmfile, len, "/dev/vmm/%s", name); 100 101 /* Open the device file */ 102 fd = open(vmfile, O_RDWR, 0); 103 104 free(vmfile); 105 return (fd); 106 } 107 108 int 109 vm_create(const char *name) 110 { 111 /* Try to load vmm(4) module before creating a guest. */ 112 if (modfind("vmm") < 0) 113 kldload("vmm"); 114 return (CREATE(name)); 115 } 116 117 struct vmctx * 118 vm_open(const char *name) 119 { 120 struct vmctx *vm; 121 int saved_errno; 122 123 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1); 124 assert(vm != NULL); 125 126 vm->fd = -1; 127 vm->memflags = 0; 128 vm->lowmem_limit = 3 * GB; 129 vm->name = (char *)(vm + 1); 130 strcpy(vm->name, name); 131 132 if ((vm->fd = vm_device_open(vm->name)) < 0) 133 goto err; 134 135 return (vm); 136 err: 137 saved_errno = errno; 138 free(vm); 139 errno = saved_errno; 140 return (NULL); 141 } 142 143 void 144 vm_close(struct vmctx *vm) 145 { 146 assert(vm != NULL); 147 148 close(vm->fd); 149 free(vm); 150 } 151 152 void 153 vm_destroy(struct vmctx *vm) 154 { 155 assert(vm != NULL); 156 157 if (vm->fd >= 0) 158 close(vm->fd); 159 DESTROY(vm->name); 160 161 free(vm); 162 } 163 164 struct vcpu * 165 vm_vcpu_open(struct vmctx *ctx, int vcpuid) 166 { 167 struct vcpu *vcpu; 168 169 vcpu = malloc(sizeof(*vcpu)); 170 vcpu->ctx = ctx; 171 vcpu->vcpuid = vcpuid; 172 return (vcpu); 173 } 174 175 void 176 vm_vcpu_close(struct vcpu *vcpu) 177 { 178 free(vcpu); 179 } 180 181 int 182 vcpu_id(struct vcpu *vcpu) 183 { 184 return (vcpu->vcpuid); 185 } 186 187 int 188 vm_parse_memsize(const char *opt, size_t *ret_memsize) 189 { 190 char *endptr; 191 size_t optval; 192 int error; 193 194 optval = strtoul(opt, &endptr, 0); 195 if (*opt != '\0' && *endptr == '\0') { 196 /* 197 * For the sake of backward compatibility if the memory size 198 * specified on the command line is less than a megabyte then 199 * it is interpreted as being in units of MB. 200 */ 201 if (optval < MB) 202 optval *= MB; 203 *ret_memsize = optval; 204 error = 0; 205 } else 206 error = expand_number(opt, ret_memsize); 207 208 return (error); 209 } 210 211 uint32_t 212 vm_get_lowmem_limit(struct vmctx *ctx) 213 { 214 215 return (ctx->lowmem_limit); 216 } 217 218 void 219 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit) 220 { 221 222 ctx->lowmem_limit = limit; 223 } 224 225 void 226 vm_set_memflags(struct vmctx *ctx, int flags) 227 { 228 229 ctx->memflags = flags; 230 } 231 232 int 233 vm_get_memflags(struct vmctx *ctx) 234 { 235 236 return (ctx->memflags); 237 } 238 239 /* 240 * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len). 241 */ 242 int 243 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off, 244 size_t len, int prot) 245 { 246 struct vm_memmap memmap; 247 int error, flags; 248 249 memmap.gpa = gpa; 250 memmap.segid = segid; 251 memmap.segoff = off; 252 memmap.len = len; 253 memmap.prot = prot; 254 memmap.flags = 0; 255 256 if (ctx->memflags & VM_MEM_F_WIRED) 257 memmap.flags |= VM_MEMMAP_F_WIRED; 258 259 /* 260 * If this mapping already exists then don't create it again. This 261 * is the common case for SYSMEM mappings created by bhyveload(8). 262 */ 263 error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags); 264 if (error == 0 && gpa == memmap.gpa) { 265 if (segid != memmap.segid || off != memmap.segoff || 266 prot != memmap.prot || flags != memmap.flags) { 267 errno = EEXIST; 268 return (-1); 269 } else { 270 return (0); 271 } 272 } 273 274 error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap); 275 return (error); 276 } 277 278 int 279 vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr, 280 size_t *lowmem_size, size_t *highmem_size) 281 { 282 283 *guest_baseaddr = ctx->baseaddr; 284 *lowmem_size = ctx->lowmem; 285 *highmem_size = ctx->highmem; 286 return (0); 287 } 288 289 int 290 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len) 291 { 292 struct vm_munmap munmap; 293 int error; 294 295 munmap.gpa = gpa; 296 munmap.len = len; 297 298 error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap); 299 return (error); 300 } 301 302 int 303 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid, 304 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) 305 { 306 struct vm_memmap memmap; 307 int error; 308 309 bzero(&memmap, sizeof(struct vm_memmap)); 310 memmap.gpa = *gpa; 311 error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap); 312 if (error == 0) { 313 *gpa = memmap.gpa; 314 *segid = memmap.segid; 315 *segoff = memmap.segoff; 316 *len = memmap.len; 317 *prot = memmap.prot; 318 *flags = memmap.flags; 319 } 320 return (error); 321 } 322 323 /* 324 * Return 0 if the segments are identical and non-zero otherwise. 325 * 326 * This is slightly complicated by the fact that only device memory segments 327 * are named. 328 */ 329 static int 330 cmpseg(size_t len, const char *str, size_t len2, const char *str2) 331 { 332 333 if (len == len2) { 334 if ((!str && !str2) || (str && str2 && !strcmp(str, str2))) 335 return (0); 336 } 337 return (-1); 338 } 339 340 static int 341 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name) 342 { 343 struct vm_memseg memseg; 344 size_t n; 345 int error; 346 347 /* 348 * If the memory segment has already been created then just return. 349 * This is the usual case for the SYSMEM segment created by userspace 350 * loaders like bhyveload(8). 351 */ 352 error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name, 353 sizeof(memseg.name)); 354 if (error) 355 return (error); 356 357 if (memseg.len != 0) { 358 if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) { 359 errno = EINVAL; 360 return (-1); 361 } else { 362 return (0); 363 } 364 } 365 366 bzero(&memseg, sizeof(struct vm_memseg)); 367 memseg.segid = segid; 368 memseg.len = len; 369 if (name != NULL) { 370 n = strlcpy(memseg.name, name, sizeof(memseg.name)); 371 if (n >= sizeof(memseg.name)) { 372 errno = ENAMETOOLONG; 373 return (-1); 374 } 375 } 376 377 error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg); 378 return (error); 379 } 380 381 int 382 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf, 383 size_t bufsize) 384 { 385 struct vm_memseg memseg; 386 size_t n; 387 int error; 388 389 memseg.segid = segid; 390 error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg); 391 if (error == 0) { 392 *lenp = memseg.len; 393 n = strlcpy(namebuf, memseg.name, bufsize); 394 if (n >= bufsize) { 395 errno = ENAMETOOLONG; 396 error = -1; 397 } 398 } 399 return (error); 400 } 401 402 static int 403 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base) 404 { 405 char *ptr; 406 int error, flags; 407 408 /* Map 'len' bytes starting at 'gpa' in the guest address space */ 409 error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL); 410 if (error) 411 return (error); 412 413 flags = MAP_SHARED | MAP_FIXED; 414 if ((ctx->memflags & VM_MEM_F_INCORE) == 0) 415 flags |= MAP_NOCORE; 416 417 /* mmap into the process address space on the host */ 418 ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa); 419 if (ptr == MAP_FAILED) 420 return (-1); 421 422 return (0); 423 } 424 425 int 426 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms) 427 { 428 size_t objsize, len; 429 vm_paddr_t gpa; 430 char *baseaddr, *ptr; 431 int error; 432 433 assert(vms == VM_MMAP_ALL); 434 435 /* 436 * If 'memsize' cannot fit entirely in the 'lowmem' segment then 437 * create another 'highmem' segment above 4GB for the remainder. 438 */ 439 if (memsize > ctx->lowmem_limit) { 440 ctx->lowmem = ctx->lowmem_limit; 441 ctx->highmem = memsize - ctx->lowmem_limit; 442 objsize = 4*GB + ctx->highmem; 443 } else { 444 ctx->lowmem = memsize; 445 ctx->highmem = 0; 446 objsize = ctx->lowmem; 447 } 448 449 error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL); 450 if (error) 451 return (error); 452 453 /* 454 * Stake out a contiguous region covering the guest physical memory 455 * and the adjoining guard regions. 456 */ 457 len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE; 458 ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0); 459 if (ptr == MAP_FAILED) 460 return (-1); 461 462 baseaddr = ptr + VM_MMAP_GUARD_SIZE; 463 if (ctx->highmem > 0) { 464 gpa = 4*GB; 465 len = ctx->highmem; 466 error = setup_memory_segment(ctx, gpa, len, baseaddr); 467 if (error) 468 return (error); 469 } 470 471 if (ctx->lowmem > 0) { 472 gpa = 0; 473 len = ctx->lowmem; 474 error = setup_memory_segment(ctx, gpa, len, baseaddr); 475 if (error) 476 return (error); 477 } 478 479 ctx->baseaddr = baseaddr; 480 481 return (0); 482 } 483 484 /* 485 * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in 486 * the lowmem or highmem regions. 487 * 488 * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region. 489 * The instruction emulation code depends on this behavior. 490 */ 491 void * 492 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len) 493 { 494 495 if (ctx->lowmem > 0) { 496 if (gaddr < ctx->lowmem && len <= ctx->lowmem && 497 gaddr + len <= ctx->lowmem) 498 return (ctx->baseaddr + gaddr); 499 } 500 501 if (ctx->highmem > 0) { 502 if (gaddr >= 4*GB) { 503 if (gaddr < 4*GB + ctx->highmem && 504 len <= ctx->highmem && 505 gaddr + len <= 4*GB + ctx->highmem) 506 return (ctx->baseaddr + gaddr); 507 } 508 } 509 510 return (NULL); 511 } 512 513 vm_paddr_t 514 vm_rev_map_gpa(struct vmctx *ctx, void *addr) 515 { 516 vm_paddr_t offaddr; 517 518 offaddr = (char *)addr - ctx->baseaddr; 519 520 if (ctx->lowmem > 0) 521 if (offaddr <= ctx->lowmem) 522 return (offaddr); 523 524 if (ctx->highmem > 0) 525 if (offaddr >= 4*GB && offaddr < 4*GB + ctx->highmem) 526 return (offaddr); 527 528 return ((vm_paddr_t)-1); 529 } 530 531 const char * 532 vm_get_name(struct vmctx *ctx) 533 { 534 535 return (ctx->name); 536 } 537 538 size_t 539 vm_get_lowmem_size(struct vmctx *ctx) 540 { 541 542 return (ctx->lowmem); 543 } 544 545 size_t 546 vm_get_highmem_size(struct vmctx *ctx) 547 { 548 549 return (ctx->highmem); 550 } 551 552 void * 553 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len) 554 { 555 char pathname[MAXPATHLEN]; 556 size_t len2; 557 char *base, *ptr; 558 int fd, error, flags; 559 560 fd = -1; 561 ptr = MAP_FAILED; 562 if (name == NULL || strlen(name) == 0) { 563 errno = EINVAL; 564 goto done; 565 } 566 567 error = vm_alloc_memseg(ctx, segid, len, name); 568 if (error) 569 goto done; 570 571 strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname)); 572 strlcat(pathname, ctx->name, sizeof(pathname)); 573 strlcat(pathname, ".", sizeof(pathname)); 574 strlcat(pathname, name, sizeof(pathname)); 575 576 fd = open(pathname, O_RDWR); 577 if (fd < 0) 578 goto done; 579 580 /* 581 * Stake out a contiguous region covering the device memory and the 582 * adjoining guard regions. 583 */ 584 len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE; 585 base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 586 0); 587 if (base == MAP_FAILED) 588 goto done; 589 590 flags = MAP_SHARED | MAP_FIXED; 591 if ((ctx->memflags & VM_MEM_F_INCORE) == 0) 592 flags |= MAP_NOCORE; 593 594 /* mmap the devmem region in the host address space */ 595 ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0); 596 done: 597 if (fd >= 0) 598 close(fd); 599 return (ptr); 600 } 601 602 static int 603 vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg) 604 { 605 /* 606 * XXX: fragile, handle with care 607 * Assumes that the first field of the ioctl data 608 * is the vcpuid. 609 */ 610 *(int *)arg = vcpu->vcpuid; 611 return (ioctl(vcpu->ctx->fd, cmd, arg)); 612 } 613 614 int 615 vm_set_desc(struct vcpu *vcpu, int reg, 616 uint64_t base, uint32_t limit, uint32_t access) 617 { 618 int error; 619 struct vm_seg_desc vmsegdesc; 620 621 bzero(&vmsegdesc, sizeof(vmsegdesc)); 622 vmsegdesc.regnum = reg; 623 vmsegdesc.desc.base = base; 624 vmsegdesc.desc.limit = limit; 625 vmsegdesc.desc.access = access; 626 627 error = vcpu_ioctl(vcpu, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc); 628 return (error); 629 } 630 631 int 632 vm_get_desc(struct vcpu *vcpu, int reg, uint64_t *base, uint32_t *limit, 633 uint32_t *access) 634 { 635 int error; 636 struct vm_seg_desc vmsegdesc; 637 638 bzero(&vmsegdesc, sizeof(vmsegdesc)); 639 vmsegdesc.regnum = reg; 640 641 error = vcpu_ioctl(vcpu, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc); 642 if (error == 0) { 643 *base = vmsegdesc.desc.base; 644 *limit = vmsegdesc.desc.limit; 645 *access = vmsegdesc.desc.access; 646 } 647 return (error); 648 } 649 650 int 651 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc) 652 { 653 int error; 654 655 error = vm_get_desc(vcpu, reg, &seg_desc->base, &seg_desc->limit, 656 &seg_desc->access); 657 return (error); 658 } 659 660 int 661 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) 662 { 663 int error; 664 struct vm_register vmreg; 665 666 bzero(&vmreg, sizeof(vmreg)); 667 vmreg.regnum = reg; 668 vmreg.regval = val; 669 670 error = vcpu_ioctl(vcpu, VM_SET_REGISTER, &vmreg); 671 return (error); 672 } 673 674 int 675 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *ret_val) 676 { 677 int error; 678 struct vm_register vmreg; 679 680 bzero(&vmreg, sizeof(vmreg)); 681 vmreg.regnum = reg; 682 683 error = vcpu_ioctl(vcpu, VM_GET_REGISTER, &vmreg); 684 *ret_val = vmreg.regval; 685 return (error); 686 } 687 688 int 689 vm_set_register_set(struct vcpu *vcpu, unsigned int count, 690 const int *regnums, uint64_t *regvals) 691 { 692 int error; 693 struct vm_register_set vmregset; 694 695 bzero(&vmregset, sizeof(vmregset)); 696 vmregset.count = count; 697 vmregset.regnums = regnums; 698 vmregset.regvals = regvals; 699 700 error = vcpu_ioctl(vcpu, VM_SET_REGISTER_SET, &vmregset); 701 return (error); 702 } 703 704 int 705 vm_get_register_set(struct vcpu *vcpu, unsigned int count, 706 const int *regnums, uint64_t *regvals) 707 { 708 int error; 709 struct vm_register_set vmregset; 710 711 bzero(&vmregset, sizeof(vmregset)); 712 vmregset.count = count; 713 vmregset.regnums = regnums; 714 vmregset.regvals = regvals; 715 716 error = vcpu_ioctl(vcpu, VM_GET_REGISTER_SET, &vmregset); 717 return (error); 718 } 719 720 int 721 vm_run(struct vcpu *vcpu, struct vm_run *vmrun) 722 { 723 return (vcpu_ioctl(vcpu, VM_RUN, vmrun)); 724 } 725 726 int 727 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how) 728 { 729 struct vm_suspend vmsuspend; 730 731 bzero(&vmsuspend, sizeof(vmsuspend)); 732 vmsuspend.how = how; 733 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend)); 734 } 735 736 int 737 vm_reinit(struct vmctx *ctx) 738 { 739 740 return (ioctl(ctx->fd, VM_REINIT, 0)); 741 } 742 743 int 744 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid, 745 uint32_t errcode, int restart_instruction) 746 { 747 struct vm_exception exc; 748 749 exc.vector = vector; 750 exc.error_code = errcode; 751 exc.error_code_valid = errcode_valid; 752 exc.restart_instruction = restart_instruction; 753 754 return (vcpu_ioctl(vcpu, VM_INJECT_EXCEPTION, &exc)); 755 } 756 757 int 758 vm_apicid2vcpu(struct vmctx *ctx __unused, int apicid) 759 { 760 /* 761 * The apic id associated with the 'vcpu' has the same numerical value 762 * as the 'vcpu' itself. 763 */ 764 return (apicid); 765 } 766 767 int 768 vm_lapic_irq(struct vcpu *vcpu, int vector) 769 { 770 struct vm_lapic_irq vmirq; 771 772 bzero(&vmirq, sizeof(vmirq)); 773 vmirq.vector = vector; 774 775 return (vcpu_ioctl(vcpu, VM_LAPIC_IRQ, &vmirq)); 776 } 777 778 int 779 vm_lapic_local_irq(struct vcpu *vcpu, int vector) 780 { 781 struct vm_lapic_irq vmirq; 782 783 bzero(&vmirq, sizeof(vmirq)); 784 vmirq.vector = vector; 785 786 return (vcpu_ioctl(vcpu, VM_LAPIC_LOCAL_IRQ, &vmirq)); 787 } 788 789 int 790 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg) 791 { 792 struct vm_lapic_msi vmmsi; 793 794 bzero(&vmmsi, sizeof(vmmsi)); 795 vmmsi.addr = addr; 796 vmmsi.msg = msg; 797 798 return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi)); 799 } 800 801 int 802 vm_ioapic_assert_irq(struct vmctx *ctx, int irq) 803 { 804 struct vm_ioapic_irq ioapic_irq; 805 806 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 807 ioapic_irq.irq = irq; 808 809 return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq)); 810 } 811 812 int 813 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq) 814 { 815 struct vm_ioapic_irq ioapic_irq; 816 817 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 818 ioapic_irq.irq = irq; 819 820 return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq)); 821 } 822 823 int 824 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq) 825 { 826 struct vm_ioapic_irq ioapic_irq; 827 828 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 829 ioapic_irq.irq = irq; 830 831 return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq)); 832 } 833 834 int 835 vm_ioapic_pincount(struct vmctx *ctx, int *pincount) 836 { 837 838 return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount)); 839 } 840 841 int 842 vm_readwrite_kernemu_device(struct vcpu *vcpu, vm_paddr_t gpa, 843 bool write, int size, uint64_t *value) 844 { 845 struct vm_readwrite_kernemu_device irp = { 846 .access_width = fls(size) - 1, 847 .gpa = gpa, 848 .value = write ? *value : ~0ul, 849 }; 850 long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV); 851 int rc; 852 853 rc = vcpu_ioctl(vcpu, cmd, &irp); 854 if (rc == 0 && !write) 855 *value = irp.value; 856 return (rc); 857 } 858 859 int 860 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 861 { 862 struct vm_isa_irq isa_irq; 863 864 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 865 isa_irq.atpic_irq = atpic_irq; 866 isa_irq.ioapic_irq = ioapic_irq; 867 868 return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq)); 869 } 870 871 int 872 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 873 { 874 struct vm_isa_irq isa_irq; 875 876 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 877 isa_irq.atpic_irq = atpic_irq; 878 isa_irq.ioapic_irq = ioapic_irq; 879 880 return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq)); 881 } 882 883 int 884 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 885 { 886 struct vm_isa_irq isa_irq; 887 888 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 889 isa_irq.atpic_irq = atpic_irq; 890 isa_irq.ioapic_irq = ioapic_irq; 891 892 return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq)); 893 } 894 895 int 896 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq, 897 enum vm_intr_trigger trigger) 898 { 899 struct vm_isa_irq_trigger isa_irq_trigger; 900 901 bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger)); 902 isa_irq_trigger.atpic_irq = atpic_irq; 903 isa_irq_trigger.trigger = trigger; 904 905 return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger)); 906 } 907 908 int 909 vm_inject_nmi(struct vcpu *vcpu) 910 { 911 struct vm_nmi vmnmi; 912 913 bzero(&vmnmi, sizeof(vmnmi)); 914 915 return (vcpu_ioctl(vcpu, VM_INJECT_NMI, &vmnmi)); 916 } 917 918 static const char *capstrmap[] = { 919 [VM_CAP_HALT_EXIT] = "hlt_exit", 920 [VM_CAP_MTRAP_EXIT] = "mtrap_exit", 921 [VM_CAP_PAUSE_EXIT] = "pause_exit", 922 [VM_CAP_UNRESTRICTED_GUEST] = "unrestricted_guest", 923 [VM_CAP_ENABLE_INVPCID] = "enable_invpcid", 924 [VM_CAP_BPT_EXIT] = "bpt_exit", 925 }; 926 927 int 928 vm_capability_name2type(const char *capname) 929 { 930 int i; 931 932 for (i = 0; i < (int)nitems(capstrmap); i++) { 933 if (strcmp(capstrmap[i], capname) == 0) 934 return (i); 935 } 936 937 return (-1); 938 } 939 940 const char * 941 vm_capability_type2name(int type) 942 { 943 if (type >= 0 && type < (int)nitems(capstrmap)) 944 return (capstrmap[type]); 945 946 return (NULL); 947 } 948 949 int 950 vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap, int *retval) 951 { 952 int error; 953 struct vm_capability vmcap; 954 955 bzero(&vmcap, sizeof(vmcap)); 956 vmcap.captype = cap; 957 958 error = vcpu_ioctl(vcpu, VM_GET_CAPABILITY, &vmcap); 959 *retval = vmcap.capval; 960 return (error); 961 } 962 963 int 964 vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap, int val) 965 { 966 struct vm_capability vmcap; 967 968 bzero(&vmcap, sizeof(vmcap)); 969 vmcap.captype = cap; 970 vmcap.capval = val; 971 972 return (vcpu_ioctl(vcpu, VM_SET_CAPABILITY, &vmcap)); 973 } 974 975 int 976 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func) 977 { 978 struct vm_pptdev pptdev; 979 980 bzero(&pptdev, sizeof(pptdev)); 981 pptdev.bus = bus; 982 pptdev.slot = slot; 983 pptdev.func = func; 984 985 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev)); 986 } 987 988 int 989 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func) 990 { 991 struct vm_pptdev pptdev; 992 993 bzero(&pptdev, sizeof(pptdev)); 994 pptdev.bus = bus; 995 pptdev.slot = slot; 996 pptdev.func = func; 997 998 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev)); 999 } 1000 1001 int 1002 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, 1003 vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 1004 { 1005 struct vm_pptdev_mmio pptmmio; 1006 1007 bzero(&pptmmio, sizeof(pptmmio)); 1008 pptmmio.bus = bus; 1009 pptmmio.slot = slot; 1010 pptmmio.func = func; 1011 pptmmio.gpa = gpa; 1012 pptmmio.len = len; 1013 pptmmio.hpa = hpa; 1014 1015 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio)); 1016 } 1017 1018 int 1019 vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, 1020 vm_paddr_t gpa, size_t len) 1021 { 1022 struct vm_pptdev_mmio pptmmio; 1023 1024 bzero(&pptmmio, sizeof(pptmmio)); 1025 pptmmio.bus = bus; 1026 pptmmio.slot = slot; 1027 pptmmio.func = func; 1028 pptmmio.gpa = gpa; 1029 pptmmio.len = len; 1030 1031 return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio)); 1032 } 1033 1034 int 1035 vm_setup_pptdev_msi(struct vmctx *ctx, int bus, int slot, int func, 1036 uint64_t addr, uint64_t msg, int numvec) 1037 { 1038 struct vm_pptdev_msi pptmsi; 1039 1040 bzero(&pptmsi, sizeof(pptmsi)); 1041 pptmsi.bus = bus; 1042 pptmsi.slot = slot; 1043 pptmsi.func = func; 1044 pptmsi.msg = msg; 1045 pptmsi.addr = addr; 1046 pptmsi.numvec = numvec; 1047 1048 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi)); 1049 } 1050 1051 int 1052 vm_setup_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func, 1053 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control) 1054 { 1055 struct vm_pptdev_msix pptmsix; 1056 1057 bzero(&pptmsix, sizeof(pptmsix)); 1058 pptmsix.bus = bus; 1059 pptmsix.slot = slot; 1060 pptmsix.func = func; 1061 pptmsix.idx = idx; 1062 pptmsix.msg = msg; 1063 pptmsix.addr = addr; 1064 pptmsix.vector_control = vector_control; 1065 1066 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix); 1067 } 1068 1069 int 1070 vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func) 1071 { 1072 struct vm_pptdev ppt; 1073 1074 bzero(&ppt, sizeof(ppt)); 1075 ppt.bus = bus; 1076 ppt.slot = slot; 1077 ppt.func = func; 1078 1079 return ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &ppt); 1080 } 1081 1082 uint64_t * 1083 vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv, 1084 int *ret_entries) 1085 { 1086 static _Thread_local uint64_t *stats_buf; 1087 static _Thread_local u_int stats_count; 1088 uint64_t *new_stats; 1089 struct vm_stats vmstats; 1090 u_int count, index; 1091 bool have_stats; 1092 1093 have_stats = false; 1094 count = 0; 1095 for (index = 0;; index += nitems(vmstats.statbuf)) { 1096 vmstats.index = index; 1097 if (vcpu_ioctl(vcpu, VM_STATS, &vmstats) != 0) 1098 break; 1099 if (stats_count < index + vmstats.num_entries) { 1100 new_stats = realloc(stats_buf, 1101 (index + vmstats.num_entries) * sizeof(uint64_t)); 1102 if (new_stats == NULL) { 1103 errno = ENOMEM; 1104 return (NULL); 1105 } 1106 stats_count = index + vmstats.num_entries; 1107 stats_buf = new_stats; 1108 } 1109 memcpy(stats_buf + index, vmstats.statbuf, 1110 vmstats.num_entries * sizeof(uint64_t)); 1111 count += vmstats.num_entries; 1112 have_stats = true; 1113 1114 if (vmstats.num_entries != nitems(vmstats.statbuf)) 1115 break; 1116 } 1117 if (have_stats) { 1118 if (ret_entries) 1119 *ret_entries = count; 1120 if (ret_tv) 1121 *ret_tv = vmstats.tv; 1122 return (stats_buf); 1123 } else 1124 return (NULL); 1125 } 1126 1127 const char * 1128 vm_get_stat_desc(struct vmctx *ctx, int index) 1129 { 1130 static struct vm_stat_desc statdesc; 1131 1132 statdesc.index = index; 1133 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0) 1134 return (statdesc.desc); 1135 else 1136 return (NULL); 1137 } 1138 1139 int 1140 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state) 1141 { 1142 int error; 1143 struct vm_x2apic x2apic; 1144 1145 bzero(&x2apic, sizeof(x2apic)); 1146 1147 error = vcpu_ioctl(vcpu, VM_GET_X2APIC_STATE, &x2apic); 1148 *state = x2apic.state; 1149 return (error); 1150 } 1151 1152 int 1153 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state) 1154 { 1155 int error; 1156 struct vm_x2apic x2apic; 1157 1158 bzero(&x2apic, sizeof(x2apic)); 1159 x2apic.state = state; 1160 1161 error = vcpu_ioctl(vcpu, VM_SET_X2APIC_STATE, &x2apic); 1162 1163 return (error); 1164 } 1165 1166 /* 1167 * From Intel Vol 3a: 1168 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT 1169 */ 1170 int 1171 vcpu_reset(struct vcpu *vcpu) 1172 { 1173 int error; 1174 uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx; 1175 uint32_t desc_access, desc_limit; 1176 uint16_t sel; 1177 1178 zero = 0; 1179 1180 rflags = 0x2; 1181 error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS, rflags); 1182 if (error) 1183 goto done; 1184 1185 rip = 0xfff0; 1186 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RIP, rip)) != 0) 1187 goto done; 1188 1189 /* 1190 * According to Intels Software Developer Manual CR0 should be 1191 * initialized with CR0_ET | CR0_NW | CR0_CD but that crashes some 1192 * guests like Windows. 1193 */ 1194 cr0 = CR0_NE; 1195 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0)) != 0) 1196 goto done; 1197 1198 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR2, zero)) != 0) 1199 goto done; 1200 1201 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR3, zero)) != 0) 1202 goto done; 1203 1204 cr4 = 0; 1205 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR4, cr4)) != 0) 1206 goto done; 1207 1208 /* 1209 * CS: present, r/w, accessed, 16-bit, byte granularity, usable 1210 */ 1211 desc_base = 0xffff0000; 1212 desc_limit = 0xffff; 1213 desc_access = 0x0093; 1214 error = vm_set_desc(vcpu, VM_REG_GUEST_CS, 1215 desc_base, desc_limit, desc_access); 1216 if (error) 1217 goto done; 1218 1219 sel = 0xf000; 1220 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CS, sel)) != 0) 1221 goto done; 1222 1223 /* 1224 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity 1225 */ 1226 desc_base = 0; 1227 desc_limit = 0xffff; 1228 desc_access = 0x0093; 1229 error = vm_set_desc(vcpu, VM_REG_GUEST_SS, 1230 desc_base, desc_limit, desc_access); 1231 if (error) 1232 goto done; 1233 1234 error = vm_set_desc(vcpu, VM_REG_GUEST_DS, 1235 desc_base, desc_limit, desc_access); 1236 if (error) 1237 goto done; 1238 1239 error = vm_set_desc(vcpu, VM_REG_GUEST_ES, 1240 desc_base, desc_limit, desc_access); 1241 if (error) 1242 goto done; 1243 1244 error = vm_set_desc(vcpu, VM_REG_GUEST_FS, 1245 desc_base, desc_limit, desc_access); 1246 if (error) 1247 goto done; 1248 1249 error = vm_set_desc(vcpu, VM_REG_GUEST_GS, 1250 desc_base, desc_limit, desc_access); 1251 if (error) 1252 goto done; 1253 1254 sel = 0; 1255 if ((error = vm_set_register(vcpu, VM_REG_GUEST_SS, sel)) != 0) 1256 goto done; 1257 if ((error = vm_set_register(vcpu, VM_REG_GUEST_DS, sel)) != 0) 1258 goto done; 1259 if ((error = vm_set_register(vcpu, VM_REG_GUEST_ES, sel)) != 0) 1260 goto done; 1261 if ((error = vm_set_register(vcpu, VM_REG_GUEST_FS, sel)) != 0) 1262 goto done; 1263 if ((error = vm_set_register(vcpu, VM_REG_GUEST_GS, sel)) != 0) 1264 goto done; 1265 1266 if ((error = vm_set_register(vcpu, VM_REG_GUEST_EFER, zero)) != 0) 1267 goto done; 1268 1269 /* General purpose registers */ 1270 rdx = 0xf00; 1271 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RAX, zero)) != 0) 1272 goto done; 1273 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBX, zero)) != 0) 1274 goto done; 1275 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RCX, zero)) != 0) 1276 goto done; 1277 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDX, rdx)) != 0) 1278 goto done; 1279 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSI, zero)) != 0) 1280 goto done; 1281 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDI, zero)) != 0) 1282 goto done; 1283 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBP, zero)) != 0) 1284 goto done; 1285 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSP, zero)) != 0) 1286 goto done; 1287 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R8, zero)) != 0) 1288 goto done; 1289 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R9, zero)) != 0) 1290 goto done; 1291 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R10, zero)) != 0) 1292 goto done; 1293 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R11, zero)) != 0) 1294 goto done; 1295 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R12, zero)) != 0) 1296 goto done; 1297 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R13, zero)) != 0) 1298 goto done; 1299 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R14, zero)) != 0) 1300 goto done; 1301 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R15, zero)) != 0) 1302 goto done; 1303 1304 /* GDTR, IDTR */ 1305 desc_base = 0; 1306 desc_limit = 0xffff; 1307 desc_access = 0; 1308 error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR, 1309 desc_base, desc_limit, desc_access); 1310 if (error != 0) 1311 goto done; 1312 1313 error = vm_set_desc(vcpu, VM_REG_GUEST_IDTR, 1314 desc_base, desc_limit, desc_access); 1315 if (error != 0) 1316 goto done; 1317 1318 /* TR */ 1319 desc_base = 0; 1320 desc_limit = 0xffff; 1321 desc_access = 0x0000008b; 1322 error = vm_set_desc(vcpu, VM_REG_GUEST_TR, 0, 0, desc_access); 1323 if (error) 1324 goto done; 1325 1326 sel = 0; 1327 if ((error = vm_set_register(vcpu, VM_REG_GUEST_TR, sel)) != 0) 1328 goto done; 1329 1330 /* LDTR */ 1331 desc_base = 0; 1332 desc_limit = 0xffff; 1333 desc_access = 0x00000082; 1334 error = vm_set_desc(vcpu, VM_REG_GUEST_LDTR, desc_base, 1335 desc_limit, desc_access); 1336 if (error) 1337 goto done; 1338 1339 sel = 0; 1340 if ((error = vm_set_register(vcpu, VM_REG_GUEST_LDTR, 0)) != 0) 1341 goto done; 1342 1343 if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR6, 1344 0xffff0ff0)) != 0) 1345 goto done; 1346 if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR7, 0x400)) != 1347 0) 1348 goto done; 1349 1350 if ((error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 1351 zero)) != 0) 1352 goto done; 1353 1354 error = 0; 1355 done: 1356 return (error); 1357 } 1358 1359 int 1360 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num) 1361 { 1362 int error, i; 1363 struct vm_gpa_pte gpapte; 1364 1365 bzero(&gpapte, sizeof(gpapte)); 1366 gpapte.gpa = gpa; 1367 1368 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte); 1369 1370 if (error == 0) { 1371 *num = gpapte.ptenum; 1372 for (i = 0; i < gpapte.ptenum; i++) 1373 pte[i] = gpapte.pte[i]; 1374 } 1375 1376 return (error); 1377 } 1378 1379 int 1380 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities) 1381 { 1382 int error; 1383 struct vm_hpet_cap cap; 1384 1385 bzero(&cap, sizeof(struct vm_hpet_cap)); 1386 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap); 1387 if (capabilities != NULL) 1388 *capabilities = cap.capabilities; 1389 return (error); 1390 } 1391 1392 int 1393 vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging, 1394 uint64_t gla, int prot, uint64_t *gpa, int *fault) 1395 { 1396 struct vm_gla2gpa gg; 1397 int error; 1398 1399 bzero(&gg, sizeof(struct vm_gla2gpa)); 1400 gg.prot = prot; 1401 gg.gla = gla; 1402 gg.paging = *paging; 1403 1404 error = vcpu_ioctl(vcpu, VM_GLA2GPA, &gg); 1405 if (error == 0) { 1406 *fault = gg.fault; 1407 *gpa = gg.gpa; 1408 } 1409 return (error); 1410 } 1411 1412 int 1413 vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, 1414 uint64_t gla, int prot, uint64_t *gpa, int *fault) 1415 { 1416 struct vm_gla2gpa gg; 1417 int error; 1418 1419 bzero(&gg, sizeof(struct vm_gla2gpa)); 1420 gg.prot = prot; 1421 gg.gla = gla; 1422 gg.paging = *paging; 1423 1424 error = vcpu_ioctl(vcpu, VM_GLA2GPA_NOFAULT, &gg); 1425 if (error == 0) { 1426 *fault = gg.fault; 1427 *gpa = gg.gpa; 1428 } 1429 return (error); 1430 } 1431 1432 #ifndef min 1433 #define min(a,b) (((a) < (b)) ? (a) : (b)) 1434 #endif 1435 1436 int 1437 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging, 1438 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt, 1439 int *fault) 1440 { 1441 void *va; 1442 uint64_t gpa, off; 1443 int error, i, n; 1444 1445 for (i = 0; i < iovcnt; i++) { 1446 iov[i].iov_base = 0; 1447 iov[i].iov_len = 0; 1448 } 1449 1450 while (len) { 1451 assert(iovcnt > 0); 1452 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault); 1453 if (error || *fault) 1454 return (error); 1455 1456 off = gpa & PAGE_MASK; 1457 n = MIN(len, PAGE_SIZE - off); 1458 1459 va = vm_map_gpa(vcpu->ctx, gpa, n); 1460 if (va == NULL) 1461 return (EFAULT); 1462 1463 iov->iov_base = va; 1464 iov->iov_len = n; 1465 iov++; 1466 iovcnt--; 1467 1468 gla += n; 1469 len -= n; 1470 } 1471 return (0); 1472 } 1473 1474 void 1475 vm_copy_teardown(struct iovec *iov __unused, int iovcnt __unused) 1476 { 1477 /* 1478 * Intentionally empty. This is used by the instruction 1479 * emulation code shared with the kernel. The in-kernel 1480 * version of this is non-empty. 1481 */ 1482 } 1483 1484 void 1485 vm_copyin(struct iovec *iov, void *vp, size_t len) 1486 { 1487 const char *src; 1488 char *dst; 1489 size_t n; 1490 1491 dst = vp; 1492 while (len) { 1493 assert(iov->iov_len); 1494 n = min(len, iov->iov_len); 1495 src = iov->iov_base; 1496 bcopy(src, dst, n); 1497 1498 iov++; 1499 dst += n; 1500 len -= n; 1501 } 1502 } 1503 1504 void 1505 vm_copyout(const void *vp, struct iovec *iov, size_t len) 1506 { 1507 const char *src; 1508 char *dst; 1509 size_t n; 1510 1511 src = vp; 1512 while (len) { 1513 assert(iov->iov_len); 1514 n = min(len, iov->iov_len); 1515 dst = iov->iov_base; 1516 bcopy(src, dst, n); 1517 1518 iov++; 1519 src += n; 1520 len -= n; 1521 } 1522 } 1523 1524 static int 1525 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus) 1526 { 1527 struct vm_cpuset vm_cpuset; 1528 int error; 1529 1530 bzero(&vm_cpuset, sizeof(struct vm_cpuset)); 1531 vm_cpuset.which = which; 1532 vm_cpuset.cpusetsize = sizeof(cpuset_t); 1533 vm_cpuset.cpus = cpus; 1534 1535 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset); 1536 return (error); 1537 } 1538 1539 int 1540 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus) 1541 { 1542 1543 return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus)); 1544 } 1545 1546 int 1547 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus) 1548 { 1549 1550 return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus)); 1551 } 1552 1553 int 1554 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus) 1555 { 1556 1557 return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus)); 1558 } 1559 1560 int 1561 vm_activate_cpu(struct vcpu *vcpu) 1562 { 1563 struct vm_activate_cpu ac; 1564 int error; 1565 1566 bzero(&ac, sizeof(struct vm_activate_cpu)); 1567 error = vcpu_ioctl(vcpu, VM_ACTIVATE_CPU, &ac); 1568 return (error); 1569 } 1570 1571 int 1572 vm_suspend_all_cpus(struct vmctx *ctx) 1573 { 1574 struct vm_activate_cpu ac; 1575 int error; 1576 1577 bzero(&ac, sizeof(struct vm_activate_cpu)); 1578 ac.vcpuid = -1; 1579 error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac); 1580 return (error); 1581 } 1582 1583 int 1584 vm_suspend_cpu(struct vcpu *vcpu) 1585 { 1586 struct vm_activate_cpu ac; 1587 int error; 1588 1589 bzero(&ac, sizeof(struct vm_activate_cpu)); 1590 error = vcpu_ioctl(vcpu, VM_SUSPEND_CPU, &ac); 1591 return (error); 1592 } 1593 1594 int 1595 vm_resume_cpu(struct vcpu *vcpu) 1596 { 1597 struct vm_activate_cpu ac; 1598 int error; 1599 1600 bzero(&ac, sizeof(struct vm_activate_cpu)); 1601 error = vcpu_ioctl(vcpu, VM_RESUME_CPU, &ac); 1602 return (error); 1603 } 1604 1605 int 1606 vm_resume_all_cpus(struct vmctx *ctx) 1607 { 1608 struct vm_activate_cpu ac; 1609 int error; 1610 1611 bzero(&ac, sizeof(struct vm_activate_cpu)); 1612 ac.vcpuid = -1; 1613 error = ioctl(ctx->fd, VM_RESUME_CPU, &ac); 1614 return (error); 1615 } 1616 1617 int 1618 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2) 1619 { 1620 struct vm_intinfo vmii; 1621 int error; 1622 1623 bzero(&vmii, sizeof(struct vm_intinfo)); 1624 error = vcpu_ioctl(vcpu, VM_GET_INTINFO, &vmii); 1625 if (error == 0) { 1626 *info1 = vmii.info1; 1627 *info2 = vmii.info2; 1628 } 1629 return (error); 1630 } 1631 1632 int 1633 vm_set_intinfo(struct vcpu *vcpu, uint64_t info1) 1634 { 1635 struct vm_intinfo vmii; 1636 int error; 1637 1638 bzero(&vmii, sizeof(struct vm_intinfo)); 1639 vmii.info1 = info1; 1640 error = vcpu_ioctl(vcpu, VM_SET_INTINFO, &vmii); 1641 return (error); 1642 } 1643 1644 int 1645 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value) 1646 { 1647 struct vm_rtc_data rtcdata; 1648 int error; 1649 1650 bzero(&rtcdata, sizeof(struct vm_rtc_data)); 1651 rtcdata.offset = offset; 1652 rtcdata.value = value; 1653 error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata); 1654 return (error); 1655 } 1656 1657 int 1658 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval) 1659 { 1660 struct vm_rtc_data rtcdata; 1661 int error; 1662 1663 bzero(&rtcdata, sizeof(struct vm_rtc_data)); 1664 rtcdata.offset = offset; 1665 error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata); 1666 if (error == 0) 1667 *retval = rtcdata.value; 1668 return (error); 1669 } 1670 1671 int 1672 vm_rtc_settime(struct vmctx *ctx, time_t secs) 1673 { 1674 struct vm_rtc_time rtctime; 1675 int error; 1676 1677 bzero(&rtctime, sizeof(struct vm_rtc_time)); 1678 rtctime.secs = secs; 1679 error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime); 1680 return (error); 1681 } 1682 1683 int 1684 vm_rtc_gettime(struct vmctx *ctx, time_t *secs) 1685 { 1686 struct vm_rtc_time rtctime; 1687 int error; 1688 1689 bzero(&rtctime, sizeof(struct vm_rtc_time)); 1690 error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime); 1691 if (error == 0) 1692 *secs = rtctime.secs; 1693 return (error); 1694 } 1695 1696 int 1697 vm_restart_instruction(struct vcpu *vcpu) 1698 { 1699 int arg; 1700 1701 return (vcpu_ioctl(vcpu, VM_RESTART_INSTRUCTION, &arg)); 1702 } 1703 1704 int 1705 vm_snapshot_req(struct vmctx *ctx, struct vm_snapshot_meta *meta) 1706 { 1707 1708 if (ioctl(ctx->fd, VM_SNAPSHOT_REQ, meta) == -1) { 1709 #ifdef SNAPSHOT_DEBUG 1710 fprintf(stderr, "%s: snapshot failed for %s: %d\r\n", 1711 __func__, meta->dev_name, errno); 1712 #endif 1713 return (-1); 1714 } 1715 return (0); 1716 } 1717 1718 int 1719 vm_restore_time(struct vmctx *ctx) 1720 { 1721 int dummy; 1722 1723 dummy = 0; 1724 return (ioctl(ctx->fd, VM_RESTORE_TIME, &dummy)); 1725 } 1726 1727 int 1728 vm_set_topology(struct vmctx *ctx, 1729 uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus) 1730 { 1731 struct vm_cpu_topology topology; 1732 1733 bzero(&topology, sizeof (struct vm_cpu_topology)); 1734 topology.sockets = sockets; 1735 topology.cores = cores; 1736 topology.threads = threads; 1737 topology.maxcpus = maxcpus; 1738 return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology)); 1739 } 1740 1741 int 1742 vm_get_topology(struct vmctx *ctx, 1743 uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus) 1744 { 1745 struct vm_cpu_topology topology; 1746 int error; 1747 1748 bzero(&topology, sizeof (struct vm_cpu_topology)); 1749 error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology); 1750 if (error == 0) { 1751 *sockets = topology.sockets; 1752 *cores = topology.cores; 1753 *threads = topology.threads; 1754 *maxcpus = topology.maxcpus; 1755 } 1756 return (error); 1757 } 1758 1759 /* Keep in sync with machine/vmm_dev.h. */ 1760 static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT, 1761 VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG, 1762 VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER, 1763 VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR, 1764 VM_SET_REGISTER_SET, VM_GET_REGISTER_SET, 1765 VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV, 1766 VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ, 1767 VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ, 1768 VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ, 1769 VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER, 1770 VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV, 1771 VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI, 1772 VM_PPTDEV_MSIX, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_DISABLE_MSIX, 1773 VM_INJECT_NMI, VM_STATS, VM_STAT_DESC, 1774 VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE, 1775 VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA, 1776 VM_GLA2GPA_NOFAULT, 1777 VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU, 1778 VM_SET_INTINFO, VM_GET_INTINFO, 1779 VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME, 1780 VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY, 1781 VM_SNAPSHOT_REQ, VM_RESTORE_TIME 1782 }; 1783 1784 int 1785 vm_limit_rights(struct vmctx *ctx) 1786 { 1787 cap_rights_t rights; 1788 size_t ncmds; 1789 1790 cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW); 1791 if (caph_rights_limit(ctx->fd, &rights) != 0) 1792 return (-1); 1793 ncmds = nitems(vm_ioctl_cmds); 1794 if (caph_ioctls_limit(ctx->fd, vm_ioctl_cmds, ncmds) != 0) 1795 return (-1); 1796 return (0); 1797 } 1798 1799 /* 1800 * Avoid using in new code. Operations on the fd should be wrapped here so that 1801 * capability rights can be kept in sync. 1802 */ 1803 int 1804 vm_get_device_fd(struct vmctx *ctx) 1805 { 1806 1807 return (ctx->fd); 1808 } 1809 1810 /* Legacy interface, do not use. */ 1811 const cap_ioctl_t * 1812 vm_get_ioctls(size_t *len) 1813 { 1814 cap_ioctl_t *cmds; 1815 1816 if (len == NULL) { 1817 cmds = malloc(sizeof(vm_ioctl_cmds)); 1818 if (cmds == NULL) 1819 return (NULL); 1820 bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds)); 1821 return (cmds); 1822 } 1823 1824 *len = nitems(vm_ioctl_cmds); 1825 return (NULL); 1826 } 1827