1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com> 6 * All rights reserved. 7 */ 8 9 #include <sys/param.h> 10 #include <sys/conf.h> 11 #include <sys/ioccom.h> 12 #include <sys/jail.h> 13 #include <sys/kernel.h> 14 #include <sys/malloc.h> 15 #include <sys/mman.h> 16 #include <sys/proc.h> 17 #include <sys/queue.h> 18 #include <sys/sx.h> 19 #include <sys/sysctl.h> 20 #include <sys/ucred.h> 21 #include <sys/uio.h> 22 23 #include <machine/vmm.h> 24 25 #include <vm/vm.h> 26 #include <vm/vm_object.h> 27 28 #include <dev/vmm/vmm_dev.h> 29 #include <dev/vmm/vmm_stat.h> 30 31 #if defined(__amd64__) && defined(COMPAT_FREEBSD12) 32 struct vm_memseg_12 { 33 int segid; 34 size_t len; 35 char name[64]; 36 }; 37 _Static_assert(sizeof(struct vm_memseg_12) == 80, "COMPAT_FREEBSD12 ABI"); 38 39 #define VM_ALLOC_MEMSEG_12 \ 40 _IOW('v', IOCNUM_ALLOC_MEMSEG, struct vm_memseg_12) 41 #define VM_GET_MEMSEG_12 \ 42 _IOWR('v', IOCNUM_GET_MEMSEG, struct vm_memseg_12) 43 #endif 44 45 struct devmem_softc { 46 int segid; 47 char *name; 48 struct cdev *cdev; 49 struct vmmdev_softc *sc; 50 SLIST_ENTRY(devmem_softc) link; 51 }; 52 53 struct vmmdev_softc { 54 struct vm *vm; /* vm instance cookie */ 55 struct cdev *cdev; 56 struct ucred *ucred; 57 SLIST_ENTRY(vmmdev_softc) link; 58 SLIST_HEAD(, devmem_softc) devmem; 59 int flags; 60 }; 61 62 static SLIST_HEAD(, vmmdev_softc) head; 63 64 static unsigned pr_allow_flag; 65 static struct sx vmmdev_mtx; 66 SX_SYSINIT(vmmdev_mtx, &vmmdev_mtx, "vmm device mutex"); 67 68 static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev"); 69 70 SYSCTL_DECL(_hw_vmm); 71 72 static void devmem_destroy(void *arg); 73 static int devmem_create_cdev(struct vmmdev_softc *sc, int id, char *devmem); 74 75 static int 76 vmm_priv_check(struct ucred *ucred) 77 { 78 if (jailed(ucred) && 79 !(ucred->cr_prison->pr_allow & pr_allow_flag)) 80 return (EPERM); 81 82 return (0); 83 } 84 85 static int 86 vcpu_lock_one(struct vcpu *vcpu) 87 { 88 return (vcpu_set_state(vcpu, VCPU_FROZEN, true)); 89 } 90 91 static void 92 vcpu_unlock_one(struct vcpu *vcpu) 93 { 94 enum vcpu_state state; 95 96 state = vcpu_get_state(vcpu, NULL); 97 if (state != VCPU_FROZEN) { 98 panic("vcpu %s(%d) has invalid state %d", 99 vm_name(vcpu_vm(vcpu)), vcpu_vcpuid(vcpu), state); 100 } 101 102 vcpu_set_state(vcpu, VCPU_IDLE, false); 103 } 104 105 static int 106 vcpu_lock_all(struct vmmdev_softc *sc) 107 { 108 struct vcpu *vcpu; 109 int error; 110 uint16_t i, j, maxcpus; 111 112 error = 0; 113 vm_slock_vcpus(sc->vm); 114 maxcpus = vm_get_maxcpus(sc->vm); 115 for (i = 0; i < maxcpus; i++) { 116 vcpu = vm_vcpu(sc->vm, i); 117 if (vcpu == NULL) 118 continue; 119 error = vcpu_lock_one(vcpu); 120 if (error) 121 break; 122 } 123 124 if (error) { 125 for (j = 0; j < i; j++) { 126 vcpu = vm_vcpu(sc->vm, j); 127 if (vcpu == NULL) 128 continue; 129 vcpu_unlock_one(vcpu); 130 } 131 vm_unlock_vcpus(sc->vm); 132 } 133 134 return (error); 135 } 136 137 static void 138 vcpu_unlock_all(struct vmmdev_softc *sc) 139 { 140 struct vcpu *vcpu; 141 uint16_t i, maxcpus; 142 143 maxcpus = vm_get_maxcpus(sc->vm); 144 for (i = 0; i < maxcpus; i++) { 145 vcpu = vm_vcpu(sc->vm, i); 146 if (vcpu == NULL) 147 continue; 148 vcpu_unlock_one(vcpu); 149 } 150 vm_unlock_vcpus(sc->vm); 151 } 152 153 static struct vmmdev_softc * 154 vmmdev_lookup(const char *name, struct ucred *cred) 155 { 156 struct vmmdev_softc *sc; 157 158 sx_assert(&vmmdev_mtx, SA_XLOCKED); 159 160 SLIST_FOREACH(sc, &head, link) { 161 if (strcmp(name, vm_name(sc->vm)) == 0) 162 break; 163 } 164 165 if (sc == NULL) 166 return (NULL); 167 168 if (cr_cansee(cred, sc->ucred)) 169 return (NULL); 170 171 return (sc); 172 } 173 174 static struct vmmdev_softc * 175 vmmdev_lookup2(struct cdev *cdev) 176 { 177 return (cdev->si_drv1); 178 } 179 180 static int 181 vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags) 182 { 183 int error, off, c, prot; 184 vm_paddr_t gpa, maxaddr; 185 void *hpa, *cookie; 186 struct vmmdev_softc *sc; 187 188 sc = vmmdev_lookup2(cdev); 189 if (sc == NULL) 190 return (ENXIO); 191 192 /* 193 * Get a read lock on the guest memory map. 194 */ 195 vm_slock_memsegs(sc->vm); 196 197 prot = (uio->uio_rw == UIO_WRITE ? VM_PROT_WRITE : VM_PROT_READ); 198 maxaddr = vmm_sysmem_maxaddr(sc->vm); 199 while (uio->uio_resid > 0 && error == 0) { 200 gpa = uio->uio_offset; 201 off = gpa & PAGE_MASK; 202 c = min(uio->uio_resid, PAGE_SIZE - off); 203 204 /* 205 * The VM has a hole in its physical memory map. If we want to 206 * use 'dd' to inspect memory beyond the hole we need to 207 * provide bogus data for memory that lies in the hole. 208 * 209 * Since this device does not support lseek(2), dd(1) will 210 * read(2) blocks of data to simulate the lseek(2). 211 */ 212 hpa = vm_gpa_hold_global(sc->vm, gpa, c, prot, &cookie); 213 if (hpa == NULL) { 214 if (uio->uio_rw == UIO_READ && gpa < maxaddr) 215 error = uiomove(__DECONST(void *, zero_region), 216 c, uio); 217 else 218 error = EFAULT; 219 } else { 220 error = uiomove(hpa, c, uio); 221 vm_gpa_release(cookie); 222 } 223 } 224 vm_unlock_memsegs(sc->vm); 225 return (error); 226 } 227 228 CTASSERT(sizeof(((struct vm_memseg *)0)->name) >= VM_MAX_SUFFIXLEN + 1); 229 230 static int 231 get_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len) 232 { 233 struct devmem_softc *dsc; 234 int error; 235 bool sysmem; 236 237 error = vm_get_memseg(sc->vm, mseg->segid, &mseg->len, &sysmem, NULL); 238 if (error || mseg->len == 0) 239 return (error); 240 241 if (!sysmem) { 242 SLIST_FOREACH(dsc, &sc->devmem, link) { 243 if (dsc->segid == mseg->segid) 244 break; 245 } 246 KASSERT(dsc != NULL, ("%s: devmem segment %d not found", 247 __func__, mseg->segid)); 248 error = copystr(dsc->name, mseg->name, len, NULL); 249 } else { 250 bzero(mseg->name, len); 251 } 252 253 return (error); 254 } 255 256 static int 257 alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len) 258 { 259 char *name; 260 int error; 261 bool sysmem; 262 263 error = 0; 264 name = NULL; 265 sysmem = true; 266 267 /* 268 * The allocation is lengthened by 1 to hold a terminating NUL. It'll 269 * by stripped off when devfs processes the full string. 270 */ 271 if (VM_MEMSEG_NAME(mseg)) { 272 sysmem = false; 273 name = malloc(len, M_VMMDEV, M_WAITOK); 274 error = copystr(mseg->name, name, len, NULL); 275 if (error) 276 goto done; 277 } 278 279 error = vm_alloc_memseg(sc->vm, mseg->segid, mseg->len, sysmem); 280 if (error) 281 goto done; 282 283 if (VM_MEMSEG_NAME(mseg)) { 284 error = devmem_create_cdev(sc, mseg->segid, name); 285 if (error) 286 vm_free_memseg(sc->vm, mseg->segid); 287 else 288 name = NULL; /* freed when 'cdev' is destroyed */ 289 } 290 done: 291 free(name, M_VMMDEV); 292 return (error); 293 } 294 295 static int 296 vm_get_register_set(struct vcpu *vcpu, unsigned int count, int *regnum, 297 uint64_t *regval) 298 { 299 int error, i; 300 301 error = 0; 302 for (i = 0; i < count; i++) { 303 error = vm_get_register(vcpu, regnum[i], ®val[i]); 304 if (error) 305 break; 306 } 307 return (error); 308 } 309 310 static int 311 vm_set_register_set(struct vcpu *vcpu, unsigned int count, int *regnum, 312 uint64_t *regval) 313 { 314 int error, i; 315 316 error = 0; 317 for (i = 0; i < count; i++) { 318 error = vm_set_register(vcpu, regnum[i], regval[i]); 319 if (error) 320 break; 321 } 322 return (error); 323 } 324 325 static int 326 vmmdev_open(struct cdev *dev, int flags, int fmt, struct thread *td) 327 { 328 int error; 329 330 /* 331 * A jail without vmm access shouldn't be able to access vmm device 332 * files at all, but check here just to be thorough. 333 */ 334 error = vmm_priv_check(td->td_ucred); 335 if (error != 0) 336 return (error); 337 338 return (0); 339 } 340 341 static const struct vmmdev_ioctl vmmdev_ioctls[] = { 342 VMMDEV_IOCTL(VM_GET_REGISTER, VMMDEV_IOCTL_LOCK_ONE_VCPU), 343 VMMDEV_IOCTL(VM_SET_REGISTER, VMMDEV_IOCTL_LOCK_ONE_VCPU), 344 VMMDEV_IOCTL(VM_GET_REGISTER_SET, VMMDEV_IOCTL_LOCK_ONE_VCPU), 345 VMMDEV_IOCTL(VM_SET_REGISTER_SET, VMMDEV_IOCTL_LOCK_ONE_VCPU), 346 VMMDEV_IOCTL(VM_GET_CAPABILITY, VMMDEV_IOCTL_LOCK_ONE_VCPU), 347 VMMDEV_IOCTL(VM_SET_CAPABILITY, VMMDEV_IOCTL_LOCK_ONE_VCPU), 348 VMMDEV_IOCTL(VM_ACTIVATE_CPU, VMMDEV_IOCTL_LOCK_ONE_VCPU), 349 VMMDEV_IOCTL(VM_INJECT_EXCEPTION, VMMDEV_IOCTL_LOCK_ONE_VCPU), 350 VMMDEV_IOCTL(VM_STATS, VMMDEV_IOCTL_LOCK_ONE_VCPU), 351 352 #if defined(__amd64__) && defined(COMPAT_FREEBSD12) 353 VMMDEV_IOCTL(VM_ALLOC_MEMSEG_12, 354 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), 355 #endif 356 VMMDEV_IOCTL(VM_ALLOC_MEMSEG, 357 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), 358 VMMDEV_IOCTL(VM_MMAP_MEMSEG, 359 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), 360 VMMDEV_IOCTL(VM_MUNMAP_MEMSEG, 361 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), 362 VMMDEV_IOCTL(VM_REINIT, 363 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), 364 365 #if defined(__amd64__) && defined(COMPAT_FREEBSD12) 366 VMMDEV_IOCTL(VM_GET_MEMSEG_12, VMMDEV_IOCTL_SLOCK_MEMSEGS), 367 #endif 368 VMMDEV_IOCTL(VM_GET_MEMSEG, VMMDEV_IOCTL_SLOCK_MEMSEGS), 369 VMMDEV_IOCTL(VM_MMAP_GETNEXT, VMMDEV_IOCTL_SLOCK_MEMSEGS), 370 371 VMMDEV_IOCTL(VM_SUSPEND_CPU, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU), 372 VMMDEV_IOCTL(VM_RESUME_CPU, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU), 373 374 VMMDEV_IOCTL(VM_SUSPEND, 0), 375 VMMDEV_IOCTL(VM_GET_CPUS, 0), 376 VMMDEV_IOCTL(VM_GET_TOPOLOGY, 0), 377 VMMDEV_IOCTL(VM_SET_TOPOLOGY, 0), 378 }; 379 380 static int 381 vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, 382 struct thread *td) 383 { 384 struct vmmdev_softc *sc; 385 struct vcpu *vcpu; 386 const struct vmmdev_ioctl *ioctl; 387 int error, vcpuid; 388 389 sc = vmmdev_lookup2(cdev); 390 if (sc == NULL) 391 return (ENXIO); 392 393 ioctl = NULL; 394 for (size_t i = 0; i < nitems(vmmdev_ioctls); i++) { 395 if (vmmdev_ioctls[i].cmd == cmd) { 396 ioctl = &vmmdev_ioctls[i]; 397 break; 398 } 399 } 400 if (ioctl == NULL) { 401 for (size_t i = 0; i < vmmdev_machdep_ioctl_count; i++) { 402 if (vmmdev_machdep_ioctls[i].cmd == cmd) { 403 ioctl = &vmmdev_machdep_ioctls[i]; 404 break; 405 } 406 } 407 } 408 if (ioctl == NULL) 409 return (ENOTTY); 410 411 if ((ioctl->flags & VMMDEV_IOCTL_XLOCK_MEMSEGS) != 0) 412 vm_xlock_memsegs(sc->vm); 413 else if ((ioctl->flags & VMMDEV_IOCTL_SLOCK_MEMSEGS) != 0) 414 vm_slock_memsegs(sc->vm); 415 416 vcpu = NULL; 417 vcpuid = -1; 418 if ((ioctl->flags & (VMMDEV_IOCTL_LOCK_ONE_VCPU | 419 VMMDEV_IOCTL_ALLOC_VCPU | VMMDEV_IOCTL_MAYBE_ALLOC_VCPU)) != 0) { 420 vcpuid = *(int *)data; 421 if (vcpuid == -1) { 422 if ((ioctl->flags & 423 VMMDEV_IOCTL_MAYBE_ALLOC_VCPU) == 0) { 424 error = EINVAL; 425 goto lockfail; 426 } 427 } else { 428 vcpu = vm_alloc_vcpu(sc->vm, vcpuid); 429 if (vcpu == NULL) { 430 error = EINVAL; 431 goto lockfail; 432 } 433 if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ONE_VCPU) != 0) { 434 error = vcpu_lock_one(vcpu); 435 if (error) 436 goto lockfail; 437 } 438 } 439 } 440 if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ALL_VCPUS) != 0) { 441 error = vcpu_lock_all(sc); 442 if (error) 443 goto lockfail; 444 } 445 446 switch (cmd) { 447 case VM_SUSPEND: { 448 struct vm_suspend *vmsuspend; 449 450 vmsuspend = (struct vm_suspend *)data; 451 error = vm_suspend(sc->vm, vmsuspend->how); 452 break; 453 } 454 case VM_REINIT: 455 error = vm_reinit(sc->vm); 456 break; 457 case VM_STAT_DESC: { 458 struct vm_stat_desc *statdesc; 459 460 statdesc = (struct vm_stat_desc *)data; 461 error = vmm_stat_desc_copy(statdesc->index, statdesc->desc, 462 sizeof(statdesc->desc)); 463 break; 464 } 465 case VM_STATS: { 466 struct vm_stats *vmstats; 467 468 vmstats = (struct vm_stats *)data; 469 getmicrotime(&vmstats->tv); 470 error = vmm_stat_copy(vcpu, vmstats->index, 471 nitems(vmstats->statbuf), &vmstats->num_entries, 472 vmstats->statbuf); 473 break; 474 } 475 case VM_MMAP_GETNEXT: { 476 struct vm_memmap *mm; 477 478 mm = (struct vm_memmap *)data; 479 error = vm_mmap_getnext(sc->vm, &mm->gpa, &mm->segid, 480 &mm->segoff, &mm->len, &mm->prot, &mm->flags); 481 break; 482 } 483 case VM_MMAP_MEMSEG: { 484 struct vm_memmap *mm; 485 486 mm = (struct vm_memmap *)data; 487 error = vm_mmap_memseg(sc->vm, mm->gpa, mm->segid, mm->segoff, 488 mm->len, mm->prot, mm->flags); 489 break; 490 } 491 case VM_MUNMAP_MEMSEG: { 492 struct vm_munmap *mu; 493 494 mu = (struct vm_munmap *)data; 495 error = vm_munmap_memseg(sc->vm, mu->gpa, mu->len); 496 break; 497 } 498 #if defined(__amd64__) && defined(COMPAT_FREEBSD12) 499 case VM_ALLOC_MEMSEG_12: 500 error = alloc_memseg(sc, (struct vm_memseg *)data, 501 sizeof(((struct vm_memseg_12 *)0)->name)); 502 break; 503 case VM_GET_MEMSEG_12: 504 error = get_memseg(sc, (struct vm_memseg *)data, 505 sizeof(((struct vm_memseg_12 *)0)->name)); 506 break; 507 #endif 508 case VM_ALLOC_MEMSEG: 509 error = alloc_memseg(sc, (struct vm_memseg *)data, 510 sizeof(((struct vm_memseg *)0)->name)); 511 break; 512 case VM_GET_MEMSEG: 513 error = get_memseg(sc, (struct vm_memseg *)data, 514 sizeof(((struct vm_memseg *)0)->name)); 515 break; 516 case VM_GET_REGISTER: { 517 struct vm_register *vmreg; 518 519 vmreg = (struct vm_register *)data; 520 error = vm_get_register(vcpu, vmreg->regnum, &vmreg->regval); 521 break; 522 } 523 case VM_SET_REGISTER: { 524 struct vm_register *vmreg; 525 526 vmreg = (struct vm_register *)data; 527 error = vm_set_register(vcpu, vmreg->regnum, vmreg->regval); 528 break; 529 } 530 case VM_GET_REGISTER_SET: { 531 struct vm_register_set *vmregset; 532 uint64_t *regvals; 533 int *regnums; 534 535 vmregset = (struct vm_register_set *)data; 536 if (vmregset->count > VM_REG_LAST) { 537 error = EINVAL; 538 break; 539 } 540 regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV, 541 M_WAITOK); 542 regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV, 543 M_WAITOK); 544 error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) * 545 vmregset->count); 546 if (error == 0) 547 error = vm_get_register_set(vcpu, 548 vmregset->count, regnums, regvals); 549 if (error == 0) 550 error = copyout(regvals, vmregset->regvals, 551 sizeof(regvals[0]) * vmregset->count); 552 free(regvals, M_VMMDEV); 553 free(regnums, M_VMMDEV); 554 break; 555 } 556 case VM_SET_REGISTER_SET: { 557 struct vm_register_set *vmregset; 558 uint64_t *regvals; 559 int *regnums; 560 561 vmregset = (struct vm_register_set *)data; 562 if (vmregset->count > VM_REG_LAST) { 563 error = EINVAL; 564 break; 565 } 566 regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV, 567 M_WAITOK); 568 regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV, 569 M_WAITOK); 570 error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) * 571 vmregset->count); 572 if (error == 0) 573 error = copyin(vmregset->regvals, regvals, 574 sizeof(regvals[0]) * vmregset->count); 575 if (error == 0) 576 error = vm_set_register_set(vcpu, 577 vmregset->count, regnums, regvals); 578 free(regvals, M_VMMDEV); 579 free(regnums, M_VMMDEV); 580 break; 581 } 582 case VM_GET_CAPABILITY: { 583 struct vm_capability *vmcap; 584 585 vmcap = (struct vm_capability *)data; 586 error = vm_get_capability(vcpu, vmcap->captype, &vmcap->capval); 587 break; 588 } 589 case VM_SET_CAPABILITY: { 590 struct vm_capability *vmcap; 591 592 vmcap = (struct vm_capability *)data; 593 error = vm_set_capability(vcpu, vmcap->captype, vmcap->capval); 594 break; 595 } 596 case VM_ACTIVATE_CPU: 597 error = vm_activate_cpu(vcpu); 598 break; 599 case VM_GET_CPUS: { 600 struct vm_cpuset *vm_cpuset; 601 cpuset_t *cpuset; 602 int size; 603 604 error = 0; 605 vm_cpuset = (struct vm_cpuset *)data; 606 size = vm_cpuset->cpusetsize; 607 if (size < 1 || size > CPU_MAXSIZE / NBBY) { 608 error = ERANGE; 609 break; 610 } 611 cpuset = malloc(max(size, sizeof(cpuset_t)), M_TEMP, 612 M_WAITOK | M_ZERO); 613 if (vm_cpuset->which == VM_ACTIVE_CPUS) 614 *cpuset = vm_active_cpus(sc->vm); 615 else if (vm_cpuset->which == VM_SUSPENDED_CPUS) 616 *cpuset = vm_suspended_cpus(sc->vm); 617 else if (vm_cpuset->which == VM_DEBUG_CPUS) 618 *cpuset = vm_debug_cpus(sc->vm); 619 else 620 error = EINVAL; 621 if (error == 0 && size < howmany(CPU_FLS(cpuset), NBBY)) 622 error = ERANGE; 623 if (error == 0) 624 error = copyout(cpuset, vm_cpuset->cpus, size); 625 free(cpuset, M_TEMP); 626 break; 627 } 628 case VM_SUSPEND_CPU: 629 error = vm_suspend_cpu(sc->vm, vcpu); 630 break; 631 case VM_RESUME_CPU: 632 error = vm_resume_cpu(sc->vm, vcpu); 633 break; 634 case VM_SET_TOPOLOGY: { 635 struct vm_cpu_topology *topology; 636 637 topology = (struct vm_cpu_topology *)data; 638 error = vm_set_topology(sc->vm, topology->sockets, 639 topology->cores, topology->threads, topology->maxcpus); 640 break; 641 } 642 case VM_GET_TOPOLOGY: { 643 struct vm_cpu_topology *topology; 644 645 topology = (struct vm_cpu_topology *)data; 646 vm_get_topology(sc->vm, &topology->sockets, &topology->cores, 647 &topology->threads, &topology->maxcpus); 648 error = 0; 649 break; 650 } 651 default: 652 error = vmmdev_machdep_ioctl(sc->vm, vcpu, cmd, data, fflag, 653 td); 654 break; 655 } 656 657 if ((ioctl->flags & 658 (VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_SLOCK_MEMSEGS)) != 0) 659 vm_unlock_memsegs(sc->vm); 660 if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ALL_VCPUS) != 0) 661 vcpu_unlock_all(sc); 662 else if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ONE_VCPU) != 0) 663 vcpu_unlock_one(vcpu); 664 665 /* 666 * Make sure that no handler returns a kernel-internal 667 * error value to userspace. 668 */ 669 KASSERT(error == ERESTART || error >= 0, 670 ("vmmdev_ioctl: invalid error return %d", error)); 671 return (error); 672 673 lockfail: 674 if ((ioctl->flags & 675 (VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_SLOCK_MEMSEGS)) != 0) 676 vm_unlock_memsegs(sc->vm); 677 return (error); 678 } 679 680 static int 681 vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t mapsize, 682 struct vm_object **objp, int nprot) 683 { 684 struct vmmdev_softc *sc; 685 vm_paddr_t gpa; 686 size_t len; 687 vm_ooffset_t segoff, first, last; 688 int error, found, segid; 689 bool sysmem; 690 691 first = *offset; 692 last = first + mapsize; 693 if ((nprot & PROT_EXEC) || first < 0 || first >= last) 694 return (EINVAL); 695 696 sc = vmmdev_lookup2(cdev); 697 if (sc == NULL) { 698 /* virtual machine is in the process of being created */ 699 return (EINVAL); 700 } 701 702 /* 703 * Get a read lock on the guest memory map. 704 */ 705 vm_slock_memsegs(sc->vm); 706 707 gpa = 0; 708 found = 0; 709 while (!found) { 710 error = vm_mmap_getnext(sc->vm, &gpa, &segid, &segoff, &len, 711 NULL, NULL); 712 if (error) 713 break; 714 715 if (first >= gpa && last <= gpa + len) 716 found = 1; 717 else 718 gpa += len; 719 } 720 721 if (found) { 722 error = vm_get_memseg(sc->vm, segid, &len, &sysmem, objp); 723 KASSERT(error == 0 && *objp != NULL, 724 ("%s: invalid memory segment %d", __func__, segid)); 725 if (sysmem) { 726 vm_object_reference(*objp); 727 *offset = segoff + (first - gpa); 728 } else { 729 error = EINVAL; 730 } 731 } 732 vm_unlock_memsegs(sc->vm); 733 return (error); 734 } 735 736 static void 737 vmmdev_destroy(struct vmmdev_softc *sc) 738 { 739 struct devmem_softc *dsc; 740 int error __diagused; 741 742 KASSERT(sc->cdev == NULL, ("%s: cdev not free", __func__)); 743 744 /* 745 * Destroy all cdevs: 746 * 747 * - any new operations on the 'cdev' will return an error (ENXIO). 748 * 749 * - the 'devmem' cdevs are destroyed before the virtual machine 'cdev' 750 */ 751 SLIST_FOREACH(dsc, &sc->devmem, link) { 752 KASSERT(dsc->cdev != NULL, ("devmem cdev already destroyed")); 753 devmem_destroy(dsc); 754 } 755 756 vm_disable_vcpu_creation(sc->vm); 757 error = vcpu_lock_all(sc); 758 KASSERT(error == 0, ("%s: error %d freezing vcpus", __func__, error)); 759 vm_unlock_vcpus(sc->vm); 760 761 while ((dsc = SLIST_FIRST(&sc->devmem)) != NULL) { 762 KASSERT(dsc->cdev == NULL, ("%s: devmem not free", __func__)); 763 SLIST_REMOVE_HEAD(&sc->devmem, link); 764 free(dsc->name, M_VMMDEV); 765 free(dsc, M_VMMDEV); 766 } 767 768 if (sc->vm != NULL) 769 vm_destroy(sc->vm); 770 771 if (sc->ucred != NULL) 772 crfree(sc->ucred); 773 774 sx_xlock(&vmmdev_mtx); 775 SLIST_REMOVE(&head, sc, vmmdev_softc, link); 776 sx_xunlock(&vmmdev_mtx); 777 free(sc, M_VMMDEV); 778 } 779 780 static int 781 vmmdev_lookup_and_destroy(const char *name, struct ucred *cred) 782 { 783 struct cdev *cdev; 784 struct vmmdev_softc *sc; 785 786 sx_xlock(&vmmdev_mtx); 787 sc = vmmdev_lookup(name, cred); 788 if (sc == NULL || sc->cdev == NULL) { 789 sx_xunlock(&vmmdev_mtx); 790 return (EINVAL); 791 } 792 793 /* 794 * Setting 'sc->cdev' to NULL is used to indicate that the VM 795 * is scheduled for destruction. 796 */ 797 cdev = sc->cdev; 798 sc->cdev = NULL; 799 sx_xunlock(&vmmdev_mtx); 800 801 destroy_dev(cdev); 802 vmmdev_destroy(sc); 803 804 return (0); 805 } 806 807 static int 808 sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS) 809 { 810 char *buf; 811 int error, buflen; 812 813 error = vmm_priv_check(req->td->td_ucred); 814 if (error) 815 return (error); 816 817 buflen = VM_MAX_NAMELEN + 1; 818 buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO); 819 strlcpy(buf, "beavis", buflen); 820 error = sysctl_handle_string(oidp, buf, buflen, req); 821 if (error == 0 && req->newptr != NULL) 822 error = vmmdev_lookup_and_destroy(buf, req->td->td_ucred); 823 free(buf, M_VMMDEV); 824 return (error); 825 } 826 SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy, 827 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE, 828 NULL, 0, sysctl_vmm_destroy, "A", 829 NULL); 830 831 static struct cdevsw vmmdevsw = { 832 .d_name = "vmmdev", 833 .d_version = D_VERSION, 834 .d_open = vmmdev_open, 835 .d_ioctl = vmmdev_ioctl, 836 .d_mmap_single = vmmdev_mmap_single, 837 .d_read = vmmdev_rw, 838 .d_write = vmmdev_rw, 839 }; 840 841 static struct vmmdev_softc * 842 vmmdev_alloc(struct vm *vm, struct ucred *cred) 843 { 844 struct vmmdev_softc *sc; 845 846 sc = malloc(sizeof(*sc), M_VMMDEV, M_WAITOK | M_ZERO); 847 SLIST_INIT(&sc->devmem); 848 sc->vm = vm; 849 sc->ucred = crhold(cred); 850 return (sc); 851 } 852 853 static int 854 vmmdev_create(const char *name, struct ucred *cred) 855 { 856 struct make_dev_args mda; 857 struct cdev *cdev; 858 struct vmmdev_softc *sc; 859 struct vm *vm; 860 int error; 861 862 sx_xlock(&vmmdev_mtx); 863 sc = vmmdev_lookup(name, cred); 864 if (sc != NULL) { 865 sx_xunlock(&vmmdev_mtx); 866 return (EEXIST); 867 } 868 869 error = vm_create(name, &vm); 870 if (error != 0) { 871 sx_xunlock(&vmmdev_mtx); 872 return (error); 873 } 874 sc = vmmdev_alloc(vm, cred); 875 SLIST_INSERT_HEAD(&head, sc, link); 876 877 make_dev_args_init(&mda); 878 mda.mda_devsw = &vmmdevsw; 879 mda.mda_cr = sc->ucred; 880 mda.mda_uid = UID_ROOT; 881 mda.mda_gid = GID_WHEEL; 882 mda.mda_mode = 0600; 883 mda.mda_si_drv1 = sc; 884 mda.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 885 error = make_dev_s(&mda, &cdev, "vmm/%s", name); 886 if (error != 0) { 887 sx_xunlock(&vmmdev_mtx); 888 vmmdev_destroy(sc); 889 return (error); 890 } 891 sc->cdev = cdev; 892 sx_xunlock(&vmmdev_mtx); 893 return (0); 894 } 895 896 static int 897 sysctl_vmm_create(SYSCTL_HANDLER_ARGS) 898 { 899 char *buf; 900 int error, buflen; 901 902 error = vmm_priv_check(req->td->td_ucred); 903 if (error != 0) 904 return (error); 905 906 buflen = VM_MAX_NAMELEN + 1; 907 buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO); 908 strlcpy(buf, "beavis", buflen); 909 error = sysctl_handle_string(oidp, buf, buflen, req); 910 if (error == 0 && req->newptr != NULL) 911 error = vmmdev_create(buf, req->td->td_ucred); 912 free(buf, M_VMMDEV); 913 return (error); 914 } 915 SYSCTL_PROC(_hw_vmm, OID_AUTO, create, 916 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE, 917 NULL, 0, sysctl_vmm_create, "A", 918 NULL); 919 920 void 921 vmmdev_init(void) 922 { 923 pr_allow_flag = prison_add_allow(NULL, "vmm", NULL, 924 "Allow use of vmm in a jail."); 925 } 926 927 int 928 vmmdev_cleanup(void) 929 { 930 int error; 931 932 if (SLIST_EMPTY(&head)) 933 error = 0; 934 else 935 error = EBUSY; 936 937 return (error); 938 } 939 940 static int 941 devmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t len, 942 struct vm_object **objp, int nprot) 943 { 944 struct devmem_softc *dsc; 945 vm_ooffset_t first, last; 946 size_t seglen; 947 int error; 948 bool sysmem; 949 950 dsc = cdev->si_drv1; 951 if (dsc == NULL) { 952 /* 'cdev' has been created but is not ready for use */ 953 return (ENXIO); 954 } 955 956 first = *offset; 957 last = *offset + len; 958 if ((nprot & PROT_EXEC) || first < 0 || first >= last) 959 return (EINVAL); 960 961 vm_slock_memsegs(dsc->sc->vm); 962 963 error = vm_get_memseg(dsc->sc->vm, dsc->segid, &seglen, &sysmem, objp); 964 KASSERT(error == 0 && !sysmem && *objp != NULL, 965 ("%s: invalid devmem segment %d", __func__, dsc->segid)); 966 967 if (seglen >= last) 968 vm_object_reference(*objp); 969 else 970 error = EINVAL; 971 972 vm_unlock_memsegs(dsc->sc->vm); 973 return (error); 974 } 975 976 static struct cdevsw devmemsw = { 977 .d_name = "devmem", 978 .d_version = D_VERSION, 979 .d_mmap_single = devmem_mmap_single, 980 }; 981 982 static int 983 devmem_create_cdev(struct vmmdev_softc *sc, int segid, char *devname) 984 { 985 struct make_dev_args mda; 986 struct devmem_softc *dsc; 987 int error; 988 989 sx_xlock(&vmmdev_mtx); 990 991 dsc = malloc(sizeof(struct devmem_softc), M_VMMDEV, M_WAITOK | M_ZERO); 992 dsc->segid = segid; 993 dsc->name = devname; 994 dsc->sc = sc; 995 SLIST_INSERT_HEAD(&sc->devmem, dsc, link); 996 997 make_dev_args_init(&mda); 998 mda.mda_devsw = &devmemsw; 999 mda.mda_cr = sc->ucred; 1000 mda.mda_uid = UID_ROOT; 1001 mda.mda_gid = GID_WHEEL; 1002 mda.mda_mode = 0600; 1003 mda.mda_si_drv1 = dsc; 1004 mda.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 1005 error = make_dev_s(&mda, &dsc->cdev, "vmm.io/%s.%s", vm_name(sc->vm), 1006 devname); 1007 if (error != 0) { 1008 SLIST_REMOVE(&sc->devmem, dsc, devmem_softc, link); 1009 free(dsc->name, M_VMMDEV); 1010 free(dsc, M_VMMDEV); 1011 } 1012 1013 sx_xunlock(&vmmdev_mtx); 1014 1015 return (error); 1016 } 1017 1018 static void 1019 devmem_destroy(void *arg) 1020 { 1021 struct devmem_softc *dsc = arg; 1022 1023 destroy_dev(dsc->cdev); 1024 dsc->cdev = NULL; 1025 dsc->sc = NULL; 1026 } 1027