1 /*- 2 * Copyright (c) 1999-2002 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/bio.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/module.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 #include <sys/vnode.h> 41 #include <sys/queue.h> 42 #include <sys/poll.h> 43 #include <sys/sx.h> 44 #include <sys/ctype.h> 45 #include <sys/ucred.h> 46 #include <sys/taskqueue.h> 47 #include <machine/stdarg.h> 48 49 #include <fs/devfs/devfs_int.h> 50 #include <vm/vm.h> 51 52 static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage"); 53 54 struct mtx devmtx; 55 static void destroy_devl(struct cdev *dev); 56 static int destroy_dev_sched_cbl(struct cdev *dev, 57 void (*cb)(void *), void *arg); 58 static int make_dev_credv(int flags, struct cdev **dres, struct cdevsw *devsw, 59 int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, 60 va_list ap); 61 62 static struct cdev_priv_list cdevp_free_list = 63 TAILQ_HEAD_INITIALIZER(cdevp_free_list); 64 static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list = 65 SLIST_HEAD_INITIALIZER(cdevsw_gt_post_list); 66 67 void 68 dev_lock(void) 69 { 70 71 mtx_lock(&devmtx); 72 } 73 74 /* 75 * Free all the memory collected while the cdev mutex was 76 * locked. Since devmtx is after the system map mutex, free() cannot 77 * be called immediately and is postponed until cdev mutex can be 78 * dropped. 79 */ 80 static void 81 dev_unlock_and_free(void) 82 { 83 struct cdev_priv_list cdp_free; 84 struct free_cdevsw csw_free; 85 struct cdev_priv *cdp; 86 struct cdevsw *csw; 87 88 mtx_assert(&devmtx, MA_OWNED); 89 90 /* 91 * Make the local copy of the list heads while the dev_mtx is 92 * held. Free it later. 93 */ 94 TAILQ_INIT(&cdp_free); 95 TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list); 96 csw_free = cdevsw_gt_post_list; 97 SLIST_INIT(&cdevsw_gt_post_list); 98 99 mtx_unlock(&devmtx); 100 101 while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) { 102 TAILQ_REMOVE(&cdp_free, cdp, cdp_list); 103 devfs_free(&cdp->cdp_c); 104 } 105 while ((csw = SLIST_FIRST(&csw_free)) != NULL) { 106 SLIST_REMOVE_HEAD(&csw_free, d_postfree_list); 107 free(csw, M_DEVT); 108 } 109 } 110 111 static void 112 dev_free_devlocked(struct cdev *cdev) 113 { 114 struct cdev_priv *cdp; 115 116 mtx_assert(&devmtx, MA_OWNED); 117 cdp = cdev2priv(cdev); 118 TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list); 119 } 120 121 static void 122 cdevsw_free_devlocked(struct cdevsw *csw) 123 { 124 125 mtx_assert(&devmtx, MA_OWNED); 126 SLIST_INSERT_HEAD(&cdevsw_gt_post_list, csw, d_postfree_list); 127 } 128 129 void 130 dev_unlock(void) 131 { 132 133 mtx_unlock(&devmtx); 134 } 135 136 void 137 dev_ref(struct cdev *dev) 138 { 139 140 mtx_assert(&devmtx, MA_NOTOWNED); 141 mtx_lock(&devmtx); 142 dev->si_refcount++; 143 mtx_unlock(&devmtx); 144 } 145 146 void 147 dev_refl(struct cdev *dev) 148 { 149 150 mtx_assert(&devmtx, MA_OWNED); 151 dev->si_refcount++; 152 } 153 154 void 155 dev_rel(struct cdev *dev) 156 { 157 int flag = 0; 158 159 mtx_assert(&devmtx, MA_NOTOWNED); 160 dev_lock(); 161 dev->si_refcount--; 162 KASSERT(dev->si_refcount >= 0, 163 ("dev_rel(%s) gave negative count", devtoname(dev))); 164 #if 0 165 if (dev->si_usecount == 0 && 166 (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED)) 167 ; 168 else 169 #endif 170 if (dev->si_devsw == NULL && dev->si_refcount == 0) { 171 LIST_REMOVE(dev, si_list); 172 flag = 1; 173 } 174 dev_unlock(); 175 if (flag) 176 devfs_free(dev); 177 } 178 179 struct cdevsw * 180 dev_refthread(struct cdev *dev) 181 { 182 struct cdevsw *csw; 183 struct cdev_priv *cdp; 184 185 mtx_assert(&devmtx, MA_NOTOWNED); 186 dev_lock(); 187 csw = dev->si_devsw; 188 if (csw != NULL) { 189 cdp = cdev2priv(dev); 190 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) 191 dev->si_threadcount++; 192 else 193 csw = NULL; 194 } 195 dev_unlock(); 196 return (csw); 197 } 198 199 struct cdevsw * 200 devvn_refthread(struct vnode *vp, struct cdev **devp) 201 { 202 struct cdevsw *csw; 203 struct cdev_priv *cdp; 204 205 mtx_assert(&devmtx, MA_NOTOWNED); 206 csw = NULL; 207 dev_lock(); 208 *devp = vp->v_rdev; 209 if (*devp != NULL) { 210 cdp = cdev2priv(*devp); 211 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { 212 csw = (*devp)->si_devsw; 213 if (csw != NULL) 214 (*devp)->si_threadcount++; 215 } 216 } 217 dev_unlock(); 218 return (csw); 219 } 220 221 void 222 dev_relthread(struct cdev *dev) 223 { 224 225 mtx_assert(&devmtx, MA_NOTOWNED); 226 dev_lock(); 227 KASSERT(dev->si_threadcount > 0, 228 ("%s threadcount is wrong", dev->si_name)); 229 dev->si_threadcount--; 230 dev_unlock(); 231 } 232 233 int 234 nullop(void) 235 { 236 237 return (0); 238 } 239 240 int 241 eopnotsupp(void) 242 { 243 244 return (EOPNOTSUPP); 245 } 246 247 static int 248 enxio(void) 249 { 250 return (ENXIO); 251 } 252 253 static int 254 enodev(void) 255 { 256 return (ENODEV); 257 } 258 259 /* Define a dead_cdevsw for use when devices leave unexpectedly. */ 260 261 #define dead_open (d_open_t *)enxio 262 #define dead_close (d_close_t *)enxio 263 #define dead_read (d_read_t *)enxio 264 #define dead_write (d_write_t *)enxio 265 #define dead_ioctl (d_ioctl_t *)enxio 266 #define dead_poll (d_poll_t *)enodev 267 #define dead_mmap (d_mmap_t *)enodev 268 269 static void 270 dead_strategy(struct bio *bp) 271 { 272 273 biofinish(bp, NULL, ENXIO); 274 } 275 276 #define dead_dump (dumper_t *)enxio 277 #define dead_kqfilter (d_kqfilter_t *)enxio 278 #define dead_mmap_single (d_mmap_single_t *)enodev 279 280 static struct cdevsw dead_cdevsw = { 281 .d_version = D_VERSION, 282 .d_flags = D_NEEDGIANT, /* XXX: does dead_strategy need this ? */ 283 .d_open = dead_open, 284 .d_close = dead_close, 285 .d_read = dead_read, 286 .d_write = dead_write, 287 .d_ioctl = dead_ioctl, 288 .d_poll = dead_poll, 289 .d_mmap = dead_mmap, 290 .d_strategy = dead_strategy, 291 .d_name = "dead", 292 .d_dump = dead_dump, 293 .d_kqfilter = dead_kqfilter, 294 .d_mmap_single = dead_mmap_single 295 }; 296 297 /* Default methods if driver does not specify method */ 298 299 #define null_open (d_open_t *)nullop 300 #define null_close (d_close_t *)nullop 301 #define no_read (d_read_t *)enodev 302 #define no_write (d_write_t *)enodev 303 #define no_ioctl (d_ioctl_t *)enodev 304 #define no_mmap (d_mmap_t *)enodev 305 #define no_kqfilter (d_kqfilter_t *)enodev 306 #define no_mmap_single (d_mmap_single_t *)enodev 307 308 static void 309 no_strategy(struct bio *bp) 310 { 311 312 biofinish(bp, NULL, ENODEV); 313 } 314 315 static int 316 no_poll(struct cdev *dev __unused, int events, struct thread *td __unused) 317 { 318 319 return (poll_no_poll(events)); 320 } 321 322 #define no_dump (dumper_t *)enodev 323 324 static int 325 giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 326 { 327 struct cdevsw *dsw; 328 int retval; 329 330 dsw = dev_refthread(dev); 331 if (dsw == NULL) 332 return (ENXIO); 333 mtx_lock(&Giant); 334 retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td); 335 mtx_unlock(&Giant); 336 dev_relthread(dev); 337 return (retval); 338 } 339 340 static int 341 giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp) 342 { 343 struct cdevsw *dsw; 344 int retval; 345 346 dsw = dev_refthread(dev); 347 if (dsw == NULL) 348 return (ENXIO); 349 mtx_lock(&Giant); 350 retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp); 351 mtx_unlock(&Giant); 352 dev_relthread(dev); 353 return (retval); 354 } 355 356 static int 357 giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 358 { 359 struct cdevsw *dsw; 360 int retval; 361 362 dsw = dev_refthread(dev); 363 if (dsw == NULL) 364 return (ENXIO); 365 mtx_lock(&Giant); 366 retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td); 367 mtx_unlock(&Giant); 368 dev_relthread(dev); 369 return (retval); 370 } 371 372 static void 373 giant_strategy(struct bio *bp) 374 { 375 struct cdevsw *dsw; 376 struct cdev *dev; 377 378 dev = bp->bio_dev; 379 dsw = dev_refthread(dev); 380 if (dsw == NULL) { 381 biofinish(bp, NULL, ENXIO); 382 return; 383 } 384 mtx_lock(&Giant); 385 dsw->d_gianttrick->d_strategy(bp); 386 mtx_unlock(&Giant); 387 dev_relthread(dev); 388 } 389 390 static int 391 giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 392 { 393 struct cdevsw *dsw; 394 int retval; 395 396 dsw = dev_refthread(dev); 397 if (dsw == NULL) 398 return (ENXIO); 399 mtx_lock(&Giant); 400 retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td); 401 mtx_unlock(&Giant); 402 dev_relthread(dev); 403 return (retval); 404 } 405 406 static int 407 giant_read(struct cdev *dev, struct uio *uio, int ioflag) 408 { 409 struct cdevsw *dsw; 410 int retval; 411 412 dsw = dev_refthread(dev); 413 if (dsw == NULL) 414 return (ENXIO); 415 mtx_lock(&Giant); 416 retval = dsw->d_gianttrick->d_read(dev, uio, ioflag); 417 mtx_unlock(&Giant); 418 dev_relthread(dev); 419 return (retval); 420 } 421 422 static int 423 giant_write(struct cdev *dev, struct uio *uio, int ioflag) 424 { 425 struct cdevsw *dsw; 426 int retval; 427 428 dsw = dev_refthread(dev); 429 if (dsw == NULL) 430 return (ENXIO); 431 mtx_lock(&Giant); 432 retval = dsw->d_gianttrick->d_write(dev, uio, ioflag); 433 mtx_unlock(&Giant); 434 dev_relthread(dev); 435 return (retval); 436 } 437 438 static int 439 giant_poll(struct cdev *dev, int events, struct thread *td) 440 { 441 struct cdevsw *dsw; 442 int retval; 443 444 dsw = dev_refthread(dev); 445 if (dsw == NULL) 446 return (ENXIO); 447 mtx_lock(&Giant); 448 retval = dsw->d_gianttrick->d_poll(dev, events, td); 449 mtx_unlock(&Giant); 450 dev_relthread(dev); 451 return (retval); 452 } 453 454 static int 455 giant_kqfilter(struct cdev *dev, struct knote *kn) 456 { 457 struct cdevsw *dsw; 458 int retval; 459 460 dsw = dev_refthread(dev); 461 if (dsw == NULL) 462 return (ENXIO); 463 mtx_lock(&Giant); 464 retval = dsw->d_gianttrick->d_kqfilter(dev, kn); 465 mtx_unlock(&Giant); 466 dev_relthread(dev); 467 return (retval); 468 } 469 470 static int 471 giant_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, 472 vm_memattr_t *memattr) 473 { 474 struct cdevsw *dsw; 475 int retval; 476 477 dsw = dev_refthread(dev); 478 if (dsw == NULL) 479 return (ENXIO); 480 mtx_lock(&Giant); 481 retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot, 482 memattr); 483 mtx_unlock(&Giant); 484 dev_relthread(dev); 485 return (retval); 486 } 487 488 static int 489 giant_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, 490 vm_object_t *object, int nprot) 491 { 492 struct cdevsw *dsw; 493 int retval; 494 495 dsw = dev_refthread(dev); 496 if (dsw == NULL) 497 return (ENXIO); 498 mtx_lock(&Giant); 499 retval = dsw->d_gianttrick->d_mmap_single(dev, offset, size, object, 500 nprot); 501 mtx_unlock(&Giant); 502 dev_relthread(dev); 503 return (retval); 504 } 505 506 static void 507 notify(struct cdev *dev, const char *ev, int flags) 508 { 509 static const char prefix[] = "cdev="; 510 char *data; 511 int namelen, mflags; 512 513 if (cold) 514 return; 515 mflags = (flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK; 516 namelen = strlen(dev->si_name); 517 data = malloc(namelen + sizeof(prefix), M_TEMP, mflags); 518 if (data == NULL) 519 return; 520 memcpy(data, prefix, sizeof(prefix) - 1); 521 memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1); 522 devctl_notify_f("DEVFS", "CDEV", ev, data, mflags); 523 free(data, M_TEMP); 524 } 525 526 static void 527 notify_create(struct cdev *dev, int flags) 528 { 529 530 notify(dev, "CREATE", flags); 531 } 532 533 static void 534 notify_destroy(struct cdev *dev) 535 { 536 537 notify(dev, "DESTROY", MAKEDEV_WAITOK); 538 } 539 540 static struct cdev * 541 newdev(struct cdevsw *csw, int unit, struct cdev *si) 542 { 543 struct cdev *si2; 544 545 mtx_assert(&devmtx, MA_OWNED); 546 if (csw->d_flags & D_NEEDMINOR) { 547 /* We may want to return an existing device */ 548 LIST_FOREACH(si2, &csw->d_devs, si_list) { 549 if (dev2unit(si2) == unit) { 550 dev_free_devlocked(si); 551 return (si2); 552 } 553 } 554 } 555 si->si_drv0 = unit; 556 si->si_devsw = csw; 557 LIST_INSERT_HEAD(&csw->d_devs, si, si_list); 558 return (si); 559 } 560 561 static void 562 fini_cdevsw(struct cdevsw *devsw) 563 { 564 struct cdevsw *gt; 565 566 if (devsw->d_gianttrick != NULL) { 567 gt = devsw->d_gianttrick; 568 memcpy(devsw, gt, sizeof *devsw); 569 cdevsw_free_devlocked(gt); 570 devsw->d_gianttrick = NULL; 571 } 572 devsw->d_flags &= ~D_INIT; 573 } 574 575 static int 576 prep_cdevsw(struct cdevsw *devsw, int flags) 577 { 578 struct cdevsw *dsw2; 579 580 mtx_assert(&devmtx, MA_OWNED); 581 if (devsw->d_flags & D_INIT) 582 return (0); 583 if (devsw->d_flags & D_NEEDGIANT) { 584 dev_unlock(); 585 dsw2 = malloc(sizeof *dsw2, M_DEVT, 586 (flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK); 587 dev_lock(); 588 if (dsw2 == NULL && !(devsw->d_flags & D_INIT)) 589 return (ENOMEM); 590 } else 591 dsw2 = NULL; 592 if (devsw->d_flags & D_INIT) { 593 if (dsw2 != NULL) 594 cdevsw_free_devlocked(dsw2); 595 return (0); 596 } 597 598 if (devsw->d_version != D_VERSION_03) { 599 printf( 600 "WARNING: Device driver \"%s\" has wrong version %s\n", 601 devsw->d_name == NULL ? "???" : devsw->d_name, 602 "and is disabled. Recompile KLD module."); 603 devsw->d_open = dead_open; 604 devsw->d_close = dead_close; 605 devsw->d_read = dead_read; 606 devsw->d_write = dead_write; 607 devsw->d_ioctl = dead_ioctl; 608 devsw->d_poll = dead_poll; 609 devsw->d_mmap = dead_mmap; 610 devsw->d_mmap_single = dead_mmap_single; 611 devsw->d_strategy = dead_strategy; 612 devsw->d_dump = dead_dump; 613 devsw->d_kqfilter = dead_kqfilter; 614 } 615 616 if (devsw->d_flags & D_NEEDGIANT) { 617 if (devsw->d_gianttrick == NULL) { 618 memcpy(dsw2, devsw, sizeof *dsw2); 619 devsw->d_gianttrick = dsw2; 620 dsw2 = NULL; 621 } 622 } 623 624 #define FIXUP(member, noop, giant) \ 625 do { \ 626 if (devsw->member == NULL) { \ 627 devsw->member = noop; \ 628 } else if (devsw->d_flags & D_NEEDGIANT) \ 629 devsw->member = giant; \ 630 } \ 631 while (0) 632 633 FIXUP(d_open, null_open, giant_open); 634 FIXUP(d_fdopen, NULL, giant_fdopen); 635 FIXUP(d_close, null_close, giant_close); 636 FIXUP(d_read, no_read, giant_read); 637 FIXUP(d_write, no_write, giant_write); 638 FIXUP(d_ioctl, no_ioctl, giant_ioctl); 639 FIXUP(d_poll, no_poll, giant_poll); 640 FIXUP(d_mmap, no_mmap, giant_mmap); 641 FIXUP(d_strategy, no_strategy, giant_strategy); 642 FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter); 643 FIXUP(d_mmap_single, no_mmap_single, giant_mmap_single); 644 645 if (devsw->d_dump == NULL) devsw->d_dump = no_dump; 646 647 LIST_INIT(&devsw->d_devs); 648 649 devsw->d_flags |= D_INIT; 650 651 if (dsw2 != NULL) 652 cdevsw_free_devlocked(dsw2); 653 return (0); 654 } 655 656 static int 657 make_dev_credv(int flags, struct cdev **dres, struct cdevsw *devsw, int unit, 658 struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, 659 va_list ap) 660 { 661 struct cdev *dev; 662 int i, res; 663 664 KASSERT((flags & MAKEDEV_WAITOK) == 0 || (flags & MAKEDEV_NOWAIT) == 0, 665 ("make_dev_credv: both WAITOK and NOWAIT specified")); 666 dev = devfs_alloc(flags); 667 if (dev == NULL) 668 return (ENOMEM); 669 dev_lock(); 670 res = prep_cdevsw(devsw, flags); 671 if (res != 0) { 672 dev_unlock(); 673 devfs_free(dev); 674 return (res); 675 } 676 dev = newdev(devsw, unit, dev); 677 if (flags & MAKEDEV_REF) 678 dev_refl(dev); 679 if (dev->si_flags & SI_CHEAPCLONE && 680 dev->si_flags & SI_NAMED) { 681 /* 682 * This is allowed as it removes races and generally 683 * simplifies cloning devices. 684 * XXX: still ?? 685 */ 686 dev_unlock_and_free(); 687 *dres = dev; 688 return (0); 689 } 690 KASSERT(!(dev->si_flags & SI_NAMED), 691 ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)", 692 devsw->d_name, dev2unit(dev), devtoname(dev))); 693 694 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 695 if (i > (sizeof dev->__si_namebuf - 1)) { 696 printf("WARNING: Device name truncated! (%s)\n", 697 dev->__si_namebuf); 698 } 699 700 dev->si_flags |= SI_NAMED; 701 if (cr != NULL) 702 dev->si_cred = crhold(cr); 703 dev->si_uid = uid; 704 dev->si_gid = gid; 705 dev->si_mode = mode; 706 707 devfs_create(dev); 708 clean_unrhdrl(devfs_inos); 709 dev_unlock_and_free(); 710 711 notify_create(dev, flags); 712 713 *dres = dev; 714 return (0); 715 } 716 717 struct cdev * 718 make_dev(struct cdevsw *devsw, int unit, uid_t uid, gid_t gid, int mode, 719 const char *fmt, ...) 720 { 721 struct cdev *dev; 722 va_list ap; 723 int res; 724 725 va_start(ap, fmt); 726 res = make_dev_credv(0, &dev, devsw, unit, NULL, uid, gid, mode, fmt, 727 ap); 728 va_end(ap); 729 KASSERT(res == 0 && dev != NULL, ("make_dev: failed make_dev_credv")); 730 return (dev); 731 } 732 733 struct cdev * 734 make_dev_cred(struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, 735 gid_t gid, int mode, const char *fmt, ...) 736 { 737 struct cdev *dev; 738 va_list ap; 739 int res; 740 741 va_start(ap, fmt); 742 res = make_dev_credv(0, &dev, devsw, unit, cr, uid, gid, mode, fmt, ap); 743 va_end(ap); 744 745 KASSERT(res == 0 && dev != NULL, 746 ("make_dev_cred: failed make_dev_credv")); 747 return (dev); 748 } 749 750 struct cdev * 751 make_dev_credf(int flags, struct cdevsw *devsw, int unit, struct ucred *cr, 752 uid_t uid, gid_t gid, int mode, const char *fmt, ...) 753 { 754 struct cdev *dev; 755 va_list ap; 756 int res; 757 758 va_start(ap, fmt); 759 res = make_dev_credv(flags, &dev, devsw, unit, cr, uid, gid, mode, 760 fmt, ap); 761 va_end(ap); 762 763 KASSERT((flags & MAKEDEV_NOWAIT) != 0 || res == 0, 764 ("make_dev_credf: failed make_dev_credv")); 765 return (res == 0 ? dev : NULL); 766 } 767 768 int 769 make_dev_p(int flags, struct cdev **cdev, struct cdevsw *devsw, 770 struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, ...) 771 { 772 va_list ap; 773 int res; 774 775 va_start(ap, fmt); 776 res = make_dev_credv(flags, cdev, devsw, 0, cr, uid, gid, mode, 777 fmt, ap); 778 va_end(ap); 779 780 KASSERT((flags & MAKEDEV_NOWAIT) != 0 || res == 0, 781 ("make_dev_p: failed make_dev_credv")); 782 return (res); 783 } 784 785 static void 786 dev_dependsl(struct cdev *pdev, struct cdev *cdev) 787 { 788 789 cdev->si_parent = pdev; 790 cdev->si_flags |= SI_CHILD; 791 LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings); 792 } 793 794 795 void 796 dev_depends(struct cdev *pdev, struct cdev *cdev) 797 { 798 799 dev_lock(); 800 dev_dependsl(pdev, cdev); 801 dev_unlock(); 802 } 803 804 struct cdev * 805 make_dev_alias(struct cdev *pdev, const char *fmt, ...) 806 { 807 struct cdev *dev; 808 va_list ap; 809 int i; 810 811 KASSERT(pdev != NULL, ("NULL pdev")); 812 dev = devfs_alloc(MAKEDEV_WAITOK); 813 dev_lock(); 814 dev->si_flags |= SI_ALIAS; 815 dev->si_flags |= SI_NAMED; 816 va_start(ap, fmt); 817 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 818 if (i > (sizeof dev->__si_namebuf - 1)) { 819 printf("WARNING: Device name truncated! (%s)\n", 820 dev->__si_namebuf); 821 } 822 va_end(ap); 823 824 devfs_create(dev); 825 dev_dependsl(pdev, dev); 826 clean_unrhdrl(devfs_inos); 827 dev_unlock(); 828 829 notify_create(dev, MAKEDEV_WAITOK); 830 831 return (dev); 832 } 833 834 static void 835 destroy_devl(struct cdev *dev) 836 { 837 struct cdevsw *csw; 838 struct cdev_privdata *p, *p1; 839 840 mtx_assert(&devmtx, MA_OWNED); 841 KASSERT(dev->si_flags & SI_NAMED, 842 ("WARNING: Driver mistake: destroy_dev on %d\n", dev2unit(dev))); 843 844 devfs_destroy(dev); 845 846 /* Remove name marking */ 847 dev->si_flags &= ~SI_NAMED; 848 849 /* If we are a child, remove us from the parents list */ 850 if (dev->si_flags & SI_CHILD) { 851 LIST_REMOVE(dev, si_siblings); 852 dev->si_flags &= ~SI_CHILD; 853 } 854 855 /* Kill our children */ 856 while (!LIST_EMPTY(&dev->si_children)) 857 destroy_devl(LIST_FIRST(&dev->si_children)); 858 859 /* Remove from clone list */ 860 if (dev->si_flags & SI_CLONELIST) { 861 LIST_REMOVE(dev, si_clone); 862 dev->si_flags &= ~SI_CLONELIST; 863 } 864 865 dev->si_refcount++; /* Avoid race with dev_rel() */ 866 csw = dev->si_devsw; 867 dev->si_devsw = NULL; /* already NULL for SI_ALIAS */ 868 while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) { 869 csw->d_purge(dev); 870 msleep(csw, &devmtx, PRIBIO, "devprg", hz/10); 871 if (dev->si_threadcount) 872 printf("Still %lu threads in %s\n", 873 dev->si_threadcount, devtoname(dev)); 874 } 875 while (dev->si_threadcount != 0) { 876 /* Use unique dummy wait ident */ 877 msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10); 878 } 879 880 dev_unlock(); 881 notify_destroy(dev); 882 mtx_lock(&cdevpriv_mtx); 883 LIST_FOREACH_SAFE(p, &cdev2priv(dev)->cdp_fdpriv, cdpd_list, p1) { 884 devfs_destroy_cdevpriv(p); 885 mtx_lock(&cdevpriv_mtx); 886 } 887 mtx_unlock(&cdevpriv_mtx); 888 dev_lock(); 889 890 dev->si_drv1 = 0; 891 dev->si_drv2 = 0; 892 bzero(&dev->__si_u, sizeof(dev->__si_u)); 893 894 if (!(dev->si_flags & SI_ALIAS)) { 895 /* Remove from cdevsw list */ 896 LIST_REMOVE(dev, si_list); 897 898 /* If cdevsw has no more struct cdev *'s, clean it */ 899 if (LIST_EMPTY(&csw->d_devs)) { 900 fini_cdevsw(csw); 901 wakeup(&csw->d_devs); 902 } 903 } 904 dev->si_flags &= ~SI_ALIAS; 905 dev->si_refcount--; /* Avoid race with dev_rel() */ 906 907 if (dev->si_refcount > 0) { 908 LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list); 909 } else { 910 dev_free_devlocked(dev); 911 } 912 } 913 914 void 915 destroy_dev(struct cdev *dev) 916 { 917 918 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "destroy_dev"); 919 dev_lock(); 920 destroy_devl(dev); 921 dev_unlock_and_free(); 922 } 923 924 const char * 925 devtoname(struct cdev *dev) 926 { 927 928 return (dev->si_name); 929 } 930 931 int 932 dev_stdclone(char *name, char **namep, const char *stem, int *unit) 933 { 934 int u, i; 935 936 i = strlen(stem); 937 if (bcmp(stem, name, i) != 0) 938 return (0); 939 if (!isdigit(name[i])) 940 return (0); 941 u = 0; 942 if (name[i] == '0' && isdigit(name[i+1])) 943 return (0); 944 while (isdigit(name[i])) { 945 u *= 10; 946 u += name[i++] - '0'; 947 } 948 if (u > 0xffffff) 949 return (0); 950 *unit = u; 951 if (namep) 952 *namep = &name[i]; 953 if (name[i]) 954 return (2); 955 return (1); 956 } 957 958 /* 959 * Helper functions for cloning device drivers. 960 * 961 * The objective here is to make it unnecessary for the device drivers to 962 * use rman or similar to manage their unit number space. Due to the way 963 * we do "on-demand" devices, using rman or other "private" methods 964 * will be very tricky to lock down properly once we lock down this file. 965 * 966 * Instead we give the drivers these routines which puts the struct cdev *'s 967 * that are to be managed on their own list, and gives the driver the ability 968 * to ask for the first free unit number or a given specified unit number. 969 * 970 * In addition these routines support paired devices (pty, nmdm and similar) 971 * by respecting a number of "flag" bits in the minor number. 972 * 973 */ 974 975 struct clonedevs { 976 LIST_HEAD(,cdev) head; 977 }; 978 979 void 980 clone_setup(struct clonedevs **cdp) 981 { 982 983 *cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO); 984 LIST_INIT(&(*cdp)->head); 985 } 986 987 int 988 clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, 989 struct cdev **dp, int extra) 990 { 991 struct clonedevs *cd; 992 struct cdev *dev, *ndev, *dl, *de; 993 int unit, low, u; 994 995 KASSERT(*cdp != NULL, 996 ("clone_setup() not called in driver \"%s\"", csw->d_name)); 997 KASSERT(!(extra & CLONE_UNITMASK), 998 ("Illegal extra bits (0x%x) in clone_create", extra)); 999 KASSERT(*up <= CLONE_UNITMASK, 1000 ("Too high unit (0x%x) in clone_create", *up)); 1001 KASSERT(csw->d_flags & D_NEEDMINOR, 1002 ("clone_create() on cdevsw without minor numbers")); 1003 1004 1005 /* 1006 * Search the list for a lot of things in one go: 1007 * A preexisting match is returned immediately. 1008 * The lowest free unit number if we are passed -1, and the place 1009 * in the list where we should insert that new element. 1010 * The place to insert a specified unit number, if applicable 1011 * the end of the list. 1012 */ 1013 unit = *up; 1014 ndev = devfs_alloc(MAKEDEV_WAITOK); 1015 dev_lock(); 1016 prep_cdevsw(csw, MAKEDEV_WAITOK); 1017 low = extra; 1018 de = dl = NULL; 1019 cd = *cdp; 1020 LIST_FOREACH(dev, &cd->head, si_clone) { 1021 KASSERT(dev->si_flags & SI_CLONELIST, 1022 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 1023 u = dev2unit(dev); 1024 if (u == (unit | extra)) { 1025 *dp = dev; 1026 dev_unlock(); 1027 devfs_free(ndev); 1028 return (0); 1029 } 1030 if (unit == -1 && u == low) { 1031 low++; 1032 de = dev; 1033 continue; 1034 } else if (u < (unit | extra)) { 1035 de = dev; 1036 continue; 1037 } else if (u > (unit | extra)) { 1038 dl = dev; 1039 break; 1040 } 1041 } 1042 if (unit == -1) 1043 unit = low & CLONE_UNITMASK; 1044 dev = newdev(csw, unit | extra, ndev); 1045 if (dev->si_flags & SI_CLONELIST) { 1046 printf("dev %p (%s) is on clonelist\n", dev, dev->si_name); 1047 printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra); 1048 LIST_FOREACH(dev, &cd->head, si_clone) { 1049 printf("\t%p %s\n", dev, dev->si_name); 1050 } 1051 panic("foo"); 1052 } 1053 KASSERT(!(dev->si_flags & SI_CLONELIST), 1054 ("Dev %p(%s) should not be on clonelist", dev, dev->si_name)); 1055 if (dl != NULL) 1056 LIST_INSERT_BEFORE(dl, dev, si_clone); 1057 else if (de != NULL) 1058 LIST_INSERT_AFTER(de, dev, si_clone); 1059 else 1060 LIST_INSERT_HEAD(&cd->head, dev, si_clone); 1061 dev->si_flags |= SI_CLONELIST; 1062 *up = unit; 1063 dev_unlock_and_free(); 1064 return (1); 1065 } 1066 1067 /* 1068 * Kill everything still on the list. The driver should already have 1069 * disposed of any softc hung of the struct cdev *'s at this time. 1070 */ 1071 void 1072 clone_cleanup(struct clonedevs **cdp) 1073 { 1074 struct cdev *dev; 1075 struct cdev_priv *cp; 1076 struct clonedevs *cd; 1077 1078 cd = *cdp; 1079 if (cd == NULL) 1080 return; 1081 dev_lock(); 1082 while (!LIST_EMPTY(&cd->head)) { 1083 dev = LIST_FIRST(&cd->head); 1084 LIST_REMOVE(dev, si_clone); 1085 KASSERT(dev->si_flags & SI_CLONELIST, 1086 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 1087 dev->si_flags &= ~SI_CLONELIST; 1088 cp = cdev2priv(dev); 1089 if (!(cp->cdp_flags & CDP_SCHED_DTR)) { 1090 cp->cdp_flags |= CDP_SCHED_DTR; 1091 KASSERT(dev->si_flags & SI_NAMED, 1092 ("Driver has goofed in cloning underways udev %x unit %x", dev2udev(dev), dev2unit(dev))); 1093 destroy_devl(dev); 1094 } 1095 } 1096 dev_unlock_and_free(); 1097 free(cd, M_DEVBUF); 1098 *cdp = NULL; 1099 } 1100 1101 static TAILQ_HEAD(, cdev_priv) dev_ddtr = 1102 TAILQ_HEAD_INITIALIZER(dev_ddtr); 1103 static struct task dev_dtr_task; 1104 1105 static void 1106 destroy_dev_tq(void *ctx, int pending) 1107 { 1108 struct cdev_priv *cp; 1109 struct cdev *dev; 1110 void (*cb)(void *); 1111 void *cb_arg; 1112 1113 dev_lock(); 1114 while (!TAILQ_EMPTY(&dev_ddtr)) { 1115 cp = TAILQ_FIRST(&dev_ddtr); 1116 dev = &cp->cdp_c; 1117 KASSERT(cp->cdp_flags & CDP_SCHED_DTR, 1118 ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp)); 1119 TAILQ_REMOVE(&dev_ddtr, cp, cdp_dtr_list); 1120 cb = cp->cdp_dtr_cb; 1121 cb_arg = cp->cdp_dtr_cb_arg; 1122 destroy_devl(dev); 1123 dev_unlock_and_free(); 1124 dev_rel(dev); 1125 if (cb != NULL) 1126 cb(cb_arg); 1127 dev_lock(); 1128 } 1129 dev_unlock(); 1130 } 1131 1132 /* 1133 * devmtx shall be locked on entry. devmtx will be unlocked after 1134 * function return. 1135 */ 1136 static int 1137 destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg) 1138 { 1139 struct cdev_priv *cp; 1140 1141 mtx_assert(&devmtx, MA_OWNED); 1142 cp = cdev2priv(dev); 1143 if (cp->cdp_flags & CDP_SCHED_DTR) { 1144 dev_unlock(); 1145 return (0); 1146 } 1147 dev_refl(dev); 1148 cp->cdp_flags |= CDP_SCHED_DTR; 1149 cp->cdp_dtr_cb = cb; 1150 cp->cdp_dtr_cb_arg = arg; 1151 TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list); 1152 dev_unlock(); 1153 taskqueue_enqueue(taskqueue_swi_giant, &dev_dtr_task); 1154 return (1); 1155 } 1156 1157 int 1158 destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg) 1159 { 1160 1161 dev_lock(); 1162 return (destroy_dev_sched_cbl(dev, cb, arg)); 1163 } 1164 1165 int 1166 destroy_dev_sched(struct cdev *dev) 1167 { 1168 1169 return (destroy_dev_sched_cb(dev, NULL, NULL)); 1170 } 1171 1172 void 1173 destroy_dev_drain(struct cdevsw *csw) 1174 { 1175 1176 dev_lock(); 1177 while (!LIST_EMPTY(&csw->d_devs)) { 1178 msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10); 1179 } 1180 dev_unlock(); 1181 } 1182 1183 void 1184 drain_dev_clone_events(void) 1185 { 1186 1187 sx_xlock(&clone_drain_lock); 1188 sx_xunlock(&clone_drain_lock); 1189 } 1190 1191 static void 1192 devdtr_init(void *dummy __unused) 1193 { 1194 1195 TASK_INIT(&dev_dtr_task, 0, destroy_dev_tq, NULL); 1196 } 1197 1198 SYSINIT(devdtr, SI_SUB_DEVFS, SI_ORDER_SECOND, devdtr_init, NULL); 1199