1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com> 6 * All rights reserved. 7 */ 8 9 #include <sys/param.h> 10 #include <sys/conf.h> 11 #include <sys/ioccom.h> 12 #include <sys/jail.h> 13 #include <sys/kernel.h> 14 #include <sys/malloc.h> 15 #include <sys/mman.h> 16 #include <sys/proc.h> 17 #include <sys/queue.h> 18 #include <sys/sx.h> 19 #include <sys/sysctl.h> 20 #include <sys/ucred.h> 21 #include <sys/uio.h> 22 23 #include <machine/vmm.h> 24 25 #include <vm/vm.h> 26 #include <vm/vm_object.h> 27 28 #include <dev/vmm/vmm_dev.h> 29 #include <dev/vmm/vmm_stat.h> 30 31 #if defined(__amd64__) && defined(COMPAT_FREEBSD12) 32 struct vm_memseg_12 { 33 int segid; 34 size_t len; 35 char name[64]; 36 }; 37 _Static_assert(sizeof(struct vm_memseg_12) == 80, "COMPAT_FREEBSD12 ABI"); 38 39 #define VM_ALLOC_MEMSEG_12 \ 40 _IOW('v', IOCNUM_ALLOC_MEMSEG, struct vm_memseg_12) 41 #define VM_GET_MEMSEG_12 \ 42 _IOWR('v', IOCNUM_GET_MEMSEG, struct vm_memseg_12) 43 #endif 44 45 struct devmem_softc { 46 int segid; 47 char *name; 48 struct cdev *cdev; 49 struct vmmdev_softc *sc; 50 SLIST_ENTRY(devmem_softc) link; 51 }; 52 53 struct vmmdev_softc { 54 struct vm *vm; /* vm instance cookie */ 55 struct cdev *cdev; 56 struct ucred *ucred; 57 SLIST_ENTRY(vmmdev_softc) link; 58 SLIST_HEAD(, devmem_softc) devmem; 59 int flags; 60 }; 61 62 static SLIST_HEAD(, vmmdev_softc) head; 63 64 static unsigned pr_allow_flag; 65 static struct sx vmmdev_mtx; 66 SX_SYSINIT(vmmdev_mtx, &vmmdev_mtx, "vmm device mutex"); 67 68 static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev"); 69 70 SYSCTL_DECL(_hw_vmm); 71 72 static void devmem_destroy(void *arg); 73 static int devmem_create_cdev(struct vmmdev_softc *sc, int id, char *devmem); 74 75 static int 76 vmm_priv_check(struct ucred *ucred) 77 { 78 if (jailed(ucred) && 79 !(ucred->cr_prison->pr_allow & pr_allow_flag)) 80 return (EPERM); 81 82 return (0); 83 } 84 85 static int 86 vcpu_lock_one(struct vcpu *vcpu) 87 { 88 return (vcpu_set_state(vcpu, VCPU_FROZEN, true)); 89 } 90 91 static void 92 vcpu_unlock_one(struct vcpu *vcpu) 93 { 94 enum vcpu_state state; 95 96 state = vcpu_get_state(vcpu, NULL); 97 if (state != VCPU_FROZEN) { 98 panic("vcpu %s(%d) has invalid state %d", 99 vm_name(vcpu_vm(vcpu)), vcpu_vcpuid(vcpu), state); 100 } 101 102 vcpu_set_state(vcpu, VCPU_IDLE, false); 103 } 104 105 static int 106 vcpu_lock_all(struct vmmdev_softc *sc) 107 { 108 struct vcpu *vcpu; 109 int error; 110 uint16_t i, j, maxcpus; 111 112 error = 0; 113 vm_slock_vcpus(sc->vm); 114 maxcpus = vm_get_maxcpus(sc->vm); 115 for (i = 0; i < maxcpus; i++) { 116 vcpu = vm_vcpu(sc->vm, i); 117 if (vcpu == NULL) 118 continue; 119 error = vcpu_lock_one(vcpu); 120 if (error) 121 break; 122 } 123 124 if (error) { 125 for (j = 0; j < i; j++) { 126 vcpu = vm_vcpu(sc->vm, j); 127 if (vcpu == NULL) 128 continue; 129 vcpu_unlock_one(vcpu); 130 } 131 vm_unlock_vcpus(sc->vm); 132 } 133 134 return (error); 135 } 136 137 static void 138 vcpu_unlock_all(struct vmmdev_softc *sc) 139 { 140 struct vcpu *vcpu; 141 uint16_t i, maxcpus; 142 143 maxcpus = vm_get_maxcpus(sc->vm); 144 for (i = 0; i < maxcpus; i++) { 145 vcpu = vm_vcpu(sc->vm, i); 146 if (vcpu == NULL) 147 continue; 148 vcpu_unlock_one(vcpu); 149 } 150 vm_unlock_vcpus(sc->vm); 151 } 152 153 static struct vmmdev_softc * 154 vmmdev_lookup(const char *name, struct ucred *cred) 155 { 156 struct vmmdev_softc *sc; 157 158 sx_assert(&vmmdev_mtx, SA_XLOCKED); 159 160 SLIST_FOREACH(sc, &head, link) { 161 if (strcmp(name, vm_name(sc->vm)) == 0) 162 break; 163 } 164 165 if (sc == NULL) 166 return (NULL); 167 168 if (cr_cansee(cred, sc->ucred)) 169 return (NULL); 170 171 return (sc); 172 } 173 174 static struct vmmdev_softc * 175 vmmdev_lookup2(struct cdev *cdev) 176 { 177 return (cdev->si_drv1); 178 } 179 180 static int 181 vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags) 182 { 183 int error, off, c, prot; 184 vm_paddr_t gpa, maxaddr; 185 void *hpa, *cookie; 186 struct vmmdev_softc *sc; 187 188 sc = vmmdev_lookup2(cdev); 189 if (sc == NULL) 190 return (ENXIO); 191 192 /* 193 * Get a read lock on the guest memory map. 194 */ 195 vm_slock_memsegs(sc->vm); 196 197 prot = (uio->uio_rw == UIO_WRITE ? VM_PROT_WRITE : VM_PROT_READ); 198 maxaddr = vmm_sysmem_maxaddr(sc->vm); 199 while (uio->uio_resid > 0 && error == 0) { 200 gpa = uio->uio_offset; 201 off = gpa & PAGE_MASK; 202 c = min(uio->uio_resid, PAGE_SIZE - off); 203 204 /* 205 * The VM has a hole in its physical memory map. If we want to 206 * use 'dd' to inspect memory beyond the hole we need to 207 * provide bogus data for memory that lies in the hole. 208 * 209 * Since this device does not support lseek(2), dd(1) will 210 * read(2) blocks of data to simulate the lseek(2). 211 */ 212 hpa = vm_gpa_hold_global(sc->vm, gpa, c, prot, &cookie); 213 if (hpa == NULL) { 214 if (uio->uio_rw == UIO_READ && gpa < maxaddr) 215 error = uiomove(__DECONST(void *, zero_region), 216 c, uio); 217 else 218 error = EFAULT; 219 } else { 220 error = uiomove(hpa, c, uio); 221 vm_gpa_release(cookie); 222 } 223 } 224 vm_unlock_memsegs(sc->vm); 225 return (error); 226 } 227 228 CTASSERT(sizeof(((struct vm_memseg *)0)->name) >= VM_MAX_SUFFIXLEN + 1); 229 230 static int 231 get_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len) 232 { 233 struct devmem_softc *dsc; 234 int error; 235 bool sysmem; 236 237 error = vm_get_memseg(sc->vm, mseg->segid, &mseg->len, &sysmem, NULL); 238 if (error || mseg->len == 0) 239 return (error); 240 241 if (!sysmem) { 242 SLIST_FOREACH(dsc, &sc->devmem, link) { 243 if (dsc->segid == mseg->segid) 244 break; 245 } 246 KASSERT(dsc != NULL, ("%s: devmem segment %d not found", 247 __func__, mseg->segid)); 248 error = copystr(dsc->name, mseg->name, len, NULL); 249 } else { 250 bzero(mseg->name, len); 251 } 252 253 return (error); 254 } 255 256 static int 257 alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len) 258 { 259 char *name; 260 int error; 261 bool sysmem; 262 263 error = 0; 264 name = NULL; 265 sysmem = true; 266 267 /* 268 * The allocation is lengthened by 1 to hold a terminating NUL. It'll 269 * by stripped off when devfs processes the full string. 270 */ 271 if (VM_MEMSEG_NAME(mseg)) { 272 sysmem = false; 273 name = malloc(len, M_VMMDEV, M_WAITOK); 274 error = copystr(mseg->name, name, len, NULL); 275 if (error) 276 goto done; 277 } 278 279 error = vm_alloc_memseg(sc->vm, mseg->segid, mseg->len, sysmem); 280 if (error) 281 goto done; 282 283 if (VM_MEMSEG_NAME(mseg)) { 284 error = devmem_create_cdev(sc, mseg->segid, name); 285 if (error) 286 vm_free_memseg(sc->vm, mseg->segid); 287 else 288 name = NULL; /* freed when 'cdev' is destroyed */ 289 } 290 done: 291 free(name, M_VMMDEV); 292 return (error); 293 } 294 295 static int 296 vm_get_register_set(struct vcpu *vcpu, unsigned int count, int *regnum, 297 uint64_t *regval) 298 { 299 int error, i; 300 301 error = 0; 302 for (i = 0; i < count; i++) { 303 error = vm_get_register(vcpu, regnum[i], ®val[i]); 304 if (error) 305 break; 306 } 307 return (error); 308 } 309 310 static int 311 vm_set_register_set(struct vcpu *vcpu, unsigned int count, int *regnum, 312 uint64_t *regval) 313 { 314 int error, i; 315 316 error = 0; 317 for (i = 0; i < count; i++) { 318 error = vm_set_register(vcpu, regnum[i], regval[i]); 319 if (error) 320 break; 321 } 322 return (error); 323 } 324 325 static int 326 vmmdev_open(struct cdev *dev, int flags, int fmt, struct thread *td) 327 { 328 struct vmmdev_softc *sc; 329 int error; 330 331 sc = vmmdev_lookup2(dev); 332 KASSERT(sc != NULL, ("%s: device not found", __func__)); 333 334 /* 335 * A jail without vmm access shouldn't be able to access vmm device 336 * files at all, but check here just to be thorough. 337 */ 338 error = vmm_priv_check(td->td_ucred); 339 if (error != 0) 340 return (error); 341 342 return (0); 343 } 344 345 static const struct vmmdev_ioctl vmmdev_ioctls[] = { 346 VMMDEV_IOCTL(VM_GET_REGISTER, VMMDEV_IOCTL_LOCK_ONE_VCPU), 347 VMMDEV_IOCTL(VM_SET_REGISTER, VMMDEV_IOCTL_LOCK_ONE_VCPU), 348 VMMDEV_IOCTL(VM_GET_REGISTER_SET, VMMDEV_IOCTL_LOCK_ONE_VCPU), 349 VMMDEV_IOCTL(VM_SET_REGISTER_SET, VMMDEV_IOCTL_LOCK_ONE_VCPU), 350 VMMDEV_IOCTL(VM_GET_CAPABILITY, VMMDEV_IOCTL_LOCK_ONE_VCPU), 351 VMMDEV_IOCTL(VM_SET_CAPABILITY, VMMDEV_IOCTL_LOCK_ONE_VCPU), 352 VMMDEV_IOCTL(VM_ACTIVATE_CPU, VMMDEV_IOCTL_LOCK_ONE_VCPU), 353 VMMDEV_IOCTL(VM_INJECT_EXCEPTION, VMMDEV_IOCTL_LOCK_ONE_VCPU), 354 VMMDEV_IOCTL(VM_STATS, VMMDEV_IOCTL_LOCK_ONE_VCPU), 355 356 #if defined(__amd64__) && defined(COMPAT_FREEBSD12) 357 VMMDEV_IOCTL(VM_ALLOC_MEMSEG_12, 358 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), 359 #endif 360 VMMDEV_IOCTL(VM_ALLOC_MEMSEG, 361 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), 362 VMMDEV_IOCTL(VM_MMAP_MEMSEG, 363 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), 364 VMMDEV_IOCTL(VM_MUNMAP_MEMSEG, 365 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), 366 VMMDEV_IOCTL(VM_REINIT, 367 VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), 368 369 #if defined(__amd64__) && defined(COMPAT_FREEBSD12) 370 VMMDEV_IOCTL(VM_GET_MEMSEG_12, VMMDEV_IOCTL_SLOCK_MEMSEGS), 371 #endif 372 VMMDEV_IOCTL(VM_GET_MEMSEG, VMMDEV_IOCTL_SLOCK_MEMSEGS), 373 VMMDEV_IOCTL(VM_MMAP_GETNEXT, VMMDEV_IOCTL_SLOCK_MEMSEGS), 374 375 VMMDEV_IOCTL(VM_SUSPEND_CPU, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU), 376 VMMDEV_IOCTL(VM_RESUME_CPU, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU), 377 378 VMMDEV_IOCTL(VM_SUSPEND, 0), 379 VMMDEV_IOCTL(VM_GET_CPUS, 0), 380 VMMDEV_IOCTL(VM_GET_TOPOLOGY, 0), 381 VMMDEV_IOCTL(VM_SET_TOPOLOGY, 0), 382 }; 383 384 static int 385 vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, 386 struct thread *td) 387 { 388 struct vmmdev_softc *sc; 389 struct vcpu *vcpu; 390 const struct vmmdev_ioctl *ioctl; 391 int error, vcpuid; 392 393 sc = vmmdev_lookup2(cdev); 394 if (sc == NULL) 395 return (ENXIO); 396 397 ioctl = NULL; 398 for (size_t i = 0; i < nitems(vmmdev_ioctls); i++) { 399 if (vmmdev_ioctls[i].cmd == cmd) { 400 ioctl = &vmmdev_ioctls[i]; 401 break; 402 } 403 } 404 if (ioctl == NULL) { 405 for (size_t i = 0; i < vmmdev_machdep_ioctl_count; i++) { 406 if (vmmdev_machdep_ioctls[i].cmd == cmd) { 407 ioctl = &vmmdev_machdep_ioctls[i]; 408 break; 409 } 410 } 411 } 412 if (ioctl == NULL) 413 return (ENOTTY); 414 415 if ((ioctl->flags & VMMDEV_IOCTL_XLOCK_MEMSEGS) != 0) 416 vm_xlock_memsegs(sc->vm); 417 else if ((ioctl->flags & VMMDEV_IOCTL_SLOCK_MEMSEGS) != 0) 418 vm_slock_memsegs(sc->vm); 419 420 vcpu = NULL; 421 vcpuid = -1; 422 if ((ioctl->flags & (VMMDEV_IOCTL_LOCK_ONE_VCPU | 423 VMMDEV_IOCTL_ALLOC_VCPU | VMMDEV_IOCTL_MAYBE_ALLOC_VCPU)) != 0) { 424 vcpuid = *(int *)data; 425 if (vcpuid == -1) { 426 if ((ioctl->flags & 427 VMMDEV_IOCTL_MAYBE_ALLOC_VCPU) == 0) { 428 error = EINVAL; 429 goto lockfail; 430 } 431 } else { 432 vcpu = vm_alloc_vcpu(sc->vm, vcpuid); 433 if (vcpu == NULL) { 434 error = EINVAL; 435 goto lockfail; 436 } 437 if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ONE_VCPU) != 0) { 438 error = vcpu_lock_one(vcpu); 439 if (error) 440 goto lockfail; 441 } 442 } 443 } 444 if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ALL_VCPUS) != 0) { 445 error = vcpu_lock_all(sc); 446 if (error) 447 goto lockfail; 448 } 449 450 switch (cmd) { 451 case VM_SUSPEND: { 452 struct vm_suspend *vmsuspend; 453 454 vmsuspend = (struct vm_suspend *)data; 455 error = vm_suspend(sc->vm, vmsuspend->how); 456 break; 457 } 458 case VM_REINIT: 459 error = vm_reinit(sc->vm); 460 break; 461 case VM_STAT_DESC: { 462 struct vm_stat_desc *statdesc; 463 464 statdesc = (struct vm_stat_desc *)data; 465 error = vmm_stat_desc_copy(statdesc->index, statdesc->desc, 466 sizeof(statdesc->desc)); 467 break; 468 } 469 case VM_STATS: { 470 struct vm_stats *vmstats; 471 472 vmstats = (struct vm_stats *)data; 473 getmicrotime(&vmstats->tv); 474 error = vmm_stat_copy(vcpu, vmstats->index, 475 nitems(vmstats->statbuf), &vmstats->num_entries, 476 vmstats->statbuf); 477 break; 478 } 479 case VM_MMAP_GETNEXT: { 480 struct vm_memmap *mm; 481 482 mm = (struct vm_memmap *)data; 483 error = vm_mmap_getnext(sc->vm, &mm->gpa, &mm->segid, 484 &mm->segoff, &mm->len, &mm->prot, &mm->flags); 485 break; 486 } 487 case VM_MMAP_MEMSEG: { 488 struct vm_memmap *mm; 489 490 mm = (struct vm_memmap *)data; 491 error = vm_mmap_memseg(sc->vm, mm->gpa, mm->segid, mm->segoff, 492 mm->len, mm->prot, mm->flags); 493 break; 494 } 495 case VM_MUNMAP_MEMSEG: { 496 struct vm_munmap *mu; 497 498 mu = (struct vm_munmap *)data; 499 error = vm_munmap_memseg(sc->vm, mu->gpa, mu->len); 500 break; 501 } 502 #if defined(__amd64__) && defined(COMPAT_FREEBSD12) 503 case VM_ALLOC_MEMSEG_12: 504 error = alloc_memseg(sc, (struct vm_memseg *)data, 505 sizeof(((struct vm_memseg_12 *)0)->name)); 506 break; 507 case VM_GET_MEMSEG_12: 508 error = get_memseg(sc, (struct vm_memseg *)data, 509 sizeof(((struct vm_memseg_12 *)0)->name)); 510 break; 511 #endif 512 case VM_ALLOC_MEMSEG: 513 error = alloc_memseg(sc, (struct vm_memseg *)data, 514 sizeof(((struct vm_memseg *)0)->name)); 515 break; 516 case VM_GET_MEMSEG: 517 error = get_memseg(sc, (struct vm_memseg *)data, 518 sizeof(((struct vm_memseg *)0)->name)); 519 break; 520 case VM_GET_REGISTER: { 521 struct vm_register *vmreg; 522 523 vmreg = (struct vm_register *)data; 524 error = vm_get_register(vcpu, vmreg->regnum, &vmreg->regval); 525 break; 526 } 527 case VM_SET_REGISTER: { 528 struct vm_register *vmreg; 529 530 vmreg = (struct vm_register *)data; 531 error = vm_set_register(vcpu, vmreg->regnum, vmreg->regval); 532 break; 533 } 534 case VM_GET_REGISTER_SET: { 535 struct vm_register_set *vmregset; 536 uint64_t *regvals; 537 int *regnums; 538 539 vmregset = (struct vm_register_set *)data; 540 if (vmregset->count > VM_REG_LAST) { 541 error = EINVAL; 542 break; 543 } 544 regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV, 545 M_WAITOK); 546 regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV, 547 M_WAITOK); 548 error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) * 549 vmregset->count); 550 if (error == 0) 551 error = vm_get_register_set(vcpu, 552 vmregset->count, regnums, regvals); 553 if (error == 0) 554 error = copyout(regvals, vmregset->regvals, 555 sizeof(regvals[0]) * vmregset->count); 556 free(regvals, M_VMMDEV); 557 free(regnums, M_VMMDEV); 558 break; 559 } 560 case VM_SET_REGISTER_SET: { 561 struct vm_register_set *vmregset; 562 uint64_t *regvals; 563 int *regnums; 564 565 vmregset = (struct vm_register_set *)data; 566 if (vmregset->count > VM_REG_LAST) { 567 error = EINVAL; 568 break; 569 } 570 regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV, 571 M_WAITOK); 572 regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV, 573 M_WAITOK); 574 error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) * 575 vmregset->count); 576 if (error == 0) 577 error = copyin(vmregset->regvals, regvals, 578 sizeof(regvals[0]) * vmregset->count); 579 if (error == 0) 580 error = vm_set_register_set(vcpu, 581 vmregset->count, regnums, regvals); 582 free(regvals, M_VMMDEV); 583 free(regnums, M_VMMDEV); 584 break; 585 } 586 case VM_GET_CAPABILITY: { 587 struct vm_capability *vmcap; 588 589 vmcap = (struct vm_capability *)data; 590 error = vm_get_capability(vcpu, vmcap->captype, &vmcap->capval); 591 break; 592 } 593 case VM_SET_CAPABILITY: { 594 struct vm_capability *vmcap; 595 596 vmcap = (struct vm_capability *)data; 597 error = vm_set_capability(vcpu, vmcap->captype, vmcap->capval); 598 break; 599 } 600 case VM_ACTIVATE_CPU: 601 error = vm_activate_cpu(vcpu); 602 break; 603 case VM_GET_CPUS: { 604 struct vm_cpuset *vm_cpuset; 605 cpuset_t *cpuset; 606 int size; 607 608 error = 0; 609 vm_cpuset = (struct vm_cpuset *)data; 610 size = vm_cpuset->cpusetsize; 611 if (size < 1 || size > CPU_MAXSIZE / NBBY) { 612 error = ERANGE; 613 break; 614 } 615 cpuset = malloc(max(size, sizeof(cpuset_t)), M_TEMP, 616 M_WAITOK | M_ZERO); 617 if (vm_cpuset->which == VM_ACTIVE_CPUS) 618 *cpuset = vm_active_cpus(sc->vm); 619 else if (vm_cpuset->which == VM_SUSPENDED_CPUS) 620 *cpuset = vm_suspended_cpus(sc->vm); 621 else if (vm_cpuset->which == VM_DEBUG_CPUS) 622 *cpuset = vm_debug_cpus(sc->vm); 623 else 624 error = EINVAL; 625 if (error == 0 && size < howmany(CPU_FLS(cpuset), NBBY)) 626 error = ERANGE; 627 if (error == 0) 628 error = copyout(cpuset, vm_cpuset->cpus, size); 629 free(cpuset, M_TEMP); 630 break; 631 } 632 case VM_SUSPEND_CPU: 633 error = vm_suspend_cpu(sc->vm, vcpu); 634 break; 635 case VM_RESUME_CPU: 636 error = vm_resume_cpu(sc->vm, vcpu); 637 break; 638 case VM_SET_TOPOLOGY: { 639 struct vm_cpu_topology *topology; 640 641 topology = (struct vm_cpu_topology *)data; 642 error = vm_set_topology(sc->vm, topology->sockets, 643 topology->cores, topology->threads, topology->maxcpus); 644 break; 645 } 646 case VM_GET_TOPOLOGY: { 647 struct vm_cpu_topology *topology; 648 649 topology = (struct vm_cpu_topology *)data; 650 vm_get_topology(sc->vm, &topology->sockets, &topology->cores, 651 &topology->threads, &topology->maxcpus); 652 error = 0; 653 break; 654 } 655 default: 656 error = vmmdev_machdep_ioctl(sc->vm, vcpu, cmd, data, fflag, 657 td); 658 break; 659 } 660 661 if ((ioctl->flags & 662 (VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_SLOCK_MEMSEGS)) != 0) 663 vm_unlock_memsegs(sc->vm); 664 if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ALL_VCPUS) != 0) 665 vcpu_unlock_all(sc); 666 else if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ONE_VCPU) != 0) 667 vcpu_unlock_one(vcpu); 668 669 /* 670 * Make sure that no handler returns a kernel-internal 671 * error value to userspace. 672 */ 673 KASSERT(error == ERESTART || error >= 0, 674 ("vmmdev_ioctl: invalid error return %d", error)); 675 return (error); 676 677 lockfail: 678 if ((ioctl->flags & 679 (VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_SLOCK_MEMSEGS)) != 0) 680 vm_unlock_memsegs(sc->vm); 681 return (error); 682 } 683 684 static int 685 vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t mapsize, 686 struct vm_object **objp, int nprot) 687 { 688 struct vmmdev_softc *sc; 689 vm_paddr_t gpa; 690 size_t len; 691 vm_ooffset_t segoff, first, last; 692 int error, found, segid; 693 bool sysmem; 694 695 first = *offset; 696 last = first + mapsize; 697 if ((nprot & PROT_EXEC) || first < 0 || first >= last) 698 return (EINVAL); 699 700 sc = vmmdev_lookup2(cdev); 701 if (sc == NULL) { 702 /* virtual machine is in the process of being created */ 703 return (EINVAL); 704 } 705 706 /* 707 * Get a read lock on the guest memory map. 708 */ 709 vm_slock_memsegs(sc->vm); 710 711 gpa = 0; 712 found = 0; 713 while (!found) { 714 error = vm_mmap_getnext(sc->vm, &gpa, &segid, &segoff, &len, 715 NULL, NULL); 716 if (error) 717 break; 718 719 if (first >= gpa && last <= gpa + len) 720 found = 1; 721 else 722 gpa += len; 723 } 724 725 if (found) { 726 error = vm_get_memseg(sc->vm, segid, &len, &sysmem, objp); 727 KASSERT(error == 0 && *objp != NULL, 728 ("%s: invalid memory segment %d", __func__, segid)); 729 if (sysmem) { 730 vm_object_reference(*objp); 731 *offset = segoff + (first - gpa); 732 } else { 733 error = EINVAL; 734 } 735 } 736 vm_unlock_memsegs(sc->vm); 737 return (error); 738 } 739 740 static void 741 vmmdev_destroy(struct vmmdev_softc *sc) 742 { 743 struct devmem_softc *dsc; 744 int error __diagused; 745 746 KASSERT(sc->cdev == NULL, ("%s: cdev not free", __func__)); 747 748 /* 749 * Destroy all cdevs: 750 * 751 * - any new operations on the 'cdev' will return an error (ENXIO). 752 * 753 * - the 'devmem' cdevs are destroyed before the virtual machine 'cdev' 754 */ 755 SLIST_FOREACH(dsc, &sc->devmem, link) { 756 KASSERT(dsc->cdev != NULL, ("devmem cdev already destroyed")); 757 devmem_destroy(dsc); 758 } 759 760 vm_disable_vcpu_creation(sc->vm); 761 error = vcpu_lock_all(sc); 762 KASSERT(error == 0, ("%s: error %d freezing vcpus", __func__, error)); 763 vm_unlock_vcpus(sc->vm); 764 765 while ((dsc = SLIST_FIRST(&sc->devmem)) != NULL) { 766 KASSERT(dsc->cdev == NULL, ("%s: devmem not free", __func__)); 767 SLIST_REMOVE_HEAD(&sc->devmem, link); 768 free(dsc->name, M_VMMDEV); 769 free(dsc, M_VMMDEV); 770 } 771 772 if (sc->vm != NULL) 773 vm_destroy(sc->vm); 774 775 if (sc->ucred != NULL) 776 crfree(sc->ucred); 777 778 sx_xlock(&vmmdev_mtx); 779 SLIST_REMOVE(&head, sc, vmmdev_softc, link); 780 sx_xunlock(&vmmdev_mtx); 781 free(sc, M_VMMDEV); 782 } 783 784 static int 785 vmmdev_lookup_and_destroy(const char *name, struct ucred *cred) 786 { 787 struct cdev *cdev; 788 struct vmmdev_softc *sc; 789 790 sx_xlock(&vmmdev_mtx); 791 sc = vmmdev_lookup(name, cred); 792 if (sc == NULL || sc->cdev == NULL) { 793 sx_xunlock(&vmmdev_mtx); 794 return (EINVAL); 795 } 796 797 /* 798 * Setting 'sc->cdev' to NULL is used to indicate that the VM 799 * is scheduled for destruction. 800 */ 801 cdev = sc->cdev; 802 sc->cdev = NULL; 803 sx_xunlock(&vmmdev_mtx); 804 805 destroy_dev(cdev); 806 vmmdev_destroy(sc); 807 808 return (0); 809 } 810 811 static int 812 sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS) 813 { 814 char *buf; 815 int error, buflen; 816 817 error = vmm_priv_check(req->td->td_ucred); 818 if (error) 819 return (error); 820 821 buflen = VM_MAX_NAMELEN + 1; 822 buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO); 823 strlcpy(buf, "beavis", buflen); 824 error = sysctl_handle_string(oidp, buf, buflen, req); 825 if (error == 0 && req->newptr != NULL) 826 error = vmmdev_lookup_and_destroy(buf, req->td->td_ucred); 827 free(buf, M_VMMDEV); 828 return (error); 829 } 830 SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy, 831 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE, 832 NULL, 0, sysctl_vmm_destroy, "A", 833 NULL); 834 835 static struct cdevsw vmmdevsw = { 836 .d_name = "vmmdev", 837 .d_version = D_VERSION, 838 .d_open = vmmdev_open, 839 .d_ioctl = vmmdev_ioctl, 840 .d_mmap_single = vmmdev_mmap_single, 841 .d_read = vmmdev_rw, 842 .d_write = vmmdev_rw, 843 }; 844 845 static struct vmmdev_softc * 846 vmmdev_alloc(struct vm *vm, struct ucred *cred) 847 { 848 struct vmmdev_softc *sc; 849 850 sc = malloc(sizeof(*sc), M_VMMDEV, M_WAITOK | M_ZERO); 851 SLIST_INIT(&sc->devmem); 852 sc->vm = vm; 853 sc->ucred = crhold(cred); 854 return (sc); 855 } 856 857 static int 858 vmmdev_create(const char *name, struct ucred *cred) 859 { 860 struct make_dev_args mda; 861 struct cdev *cdev; 862 struct vmmdev_softc *sc; 863 struct vm *vm; 864 int error; 865 866 sx_xlock(&vmmdev_mtx); 867 sc = vmmdev_lookup(name, cred); 868 if (sc != NULL) { 869 sx_xunlock(&vmmdev_mtx); 870 return (EEXIST); 871 } 872 873 error = vm_create(name, &vm); 874 if (error != 0) { 875 sx_xunlock(&vmmdev_mtx); 876 return (error); 877 } 878 sc = vmmdev_alloc(vm, cred); 879 SLIST_INSERT_HEAD(&head, sc, link); 880 881 make_dev_args_init(&mda); 882 mda.mda_devsw = &vmmdevsw; 883 mda.mda_cr = sc->ucred; 884 mda.mda_uid = UID_ROOT; 885 mda.mda_gid = GID_WHEEL; 886 mda.mda_mode = 0600; 887 mda.mda_si_drv1 = sc; 888 mda.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 889 error = make_dev_s(&mda, &cdev, "vmm/%s", name); 890 if (error != 0) { 891 sx_xunlock(&vmmdev_mtx); 892 vmmdev_destroy(sc); 893 return (error); 894 } 895 sc->cdev = cdev; 896 sx_xunlock(&vmmdev_mtx); 897 return (0); 898 } 899 900 static int 901 sysctl_vmm_create(SYSCTL_HANDLER_ARGS) 902 { 903 char *buf; 904 int error, buflen; 905 906 error = vmm_priv_check(req->td->td_ucred); 907 if (error != 0) 908 return (error); 909 910 buflen = VM_MAX_NAMELEN + 1; 911 buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO); 912 strlcpy(buf, "beavis", buflen); 913 error = sysctl_handle_string(oidp, buf, buflen, req); 914 if (error == 0 && req->newptr != NULL) 915 error = vmmdev_create(buf, req->td->td_ucred); 916 free(buf, M_VMMDEV); 917 return (error); 918 } 919 SYSCTL_PROC(_hw_vmm, OID_AUTO, create, 920 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE, 921 NULL, 0, sysctl_vmm_create, "A", 922 NULL); 923 924 void 925 vmmdev_init(void) 926 { 927 pr_allow_flag = prison_add_allow(NULL, "vmm", NULL, 928 "Allow use of vmm in a jail."); 929 } 930 931 int 932 vmmdev_cleanup(void) 933 { 934 int error; 935 936 if (SLIST_EMPTY(&head)) 937 error = 0; 938 else 939 error = EBUSY; 940 941 return (error); 942 } 943 944 static int 945 devmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t len, 946 struct vm_object **objp, int nprot) 947 { 948 struct devmem_softc *dsc; 949 vm_ooffset_t first, last; 950 size_t seglen; 951 int error; 952 bool sysmem; 953 954 dsc = cdev->si_drv1; 955 if (dsc == NULL) { 956 /* 'cdev' has been created but is not ready for use */ 957 return (ENXIO); 958 } 959 960 first = *offset; 961 last = *offset + len; 962 if ((nprot & PROT_EXEC) || first < 0 || first >= last) 963 return (EINVAL); 964 965 vm_slock_memsegs(dsc->sc->vm); 966 967 error = vm_get_memseg(dsc->sc->vm, dsc->segid, &seglen, &sysmem, objp); 968 KASSERT(error == 0 && !sysmem && *objp != NULL, 969 ("%s: invalid devmem segment %d", __func__, dsc->segid)); 970 971 if (seglen >= last) 972 vm_object_reference(*objp); 973 else 974 error = EINVAL; 975 976 vm_unlock_memsegs(dsc->sc->vm); 977 return (error); 978 } 979 980 static struct cdevsw devmemsw = { 981 .d_name = "devmem", 982 .d_version = D_VERSION, 983 .d_mmap_single = devmem_mmap_single, 984 }; 985 986 static int 987 devmem_create_cdev(struct vmmdev_softc *sc, int segid, char *devname) 988 { 989 struct make_dev_args mda; 990 struct devmem_softc *dsc; 991 int error; 992 993 sx_xlock(&vmmdev_mtx); 994 995 dsc = malloc(sizeof(struct devmem_softc), M_VMMDEV, M_WAITOK | M_ZERO); 996 dsc->segid = segid; 997 dsc->name = devname; 998 dsc->sc = sc; 999 SLIST_INSERT_HEAD(&sc->devmem, dsc, link); 1000 1001 make_dev_args_init(&mda); 1002 mda.mda_devsw = &devmemsw; 1003 mda.mda_cr = sc->ucred; 1004 mda.mda_uid = UID_ROOT; 1005 mda.mda_gid = GID_WHEEL; 1006 mda.mda_mode = 0600; 1007 mda.mda_si_drv1 = dsc; 1008 mda.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 1009 error = make_dev_s(&mda, &dsc->cdev, "vmm.io/%s.%s", vm_name(sc->vm), 1010 devname); 1011 if (error != 0) { 1012 SLIST_REMOVE(&sc->devmem, dsc, devmem_softc, link); 1013 free(dsc->name, M_VMMDEV); 1014 free(dsc, M_VMMDEV); 1015 } 1016 1017 sx_xunlock(&vmmdev_mtx); 1018 1019 return (error); 1020 } 1021 1022 static void 1023 devmem_destroy(void *arg) 1024 { 1025 struct devmem_softc *dsc = arg; 1026 1027 destroy_dev(dsc->cdev); 1028 dsc->cdev = NULL; 1029 dsc->sc = NULL; 1030 } 1031