1 /*- 2 * Copyright (c) 1999-2002 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/systm.h> 33 #include <sys/bio.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/module.h> 37 #include <sys/malloc.h> 38 #include <sys/conf.h> 39 #include <sys/vnode.h> 40 #include <sys/queue.h> 41 #include <sys/poll.h> 42 #include <sys/sx.h> 43 #include <sys/ctype.h> 44 #include <sys/tty.h> 45 #include <sys/ucred.h> 46 #include <sys/taskqueue.h> 47 #include <machine/stdarg.h> 48 49 #include <fs/devfs/devfs_int.h> 50 51 static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage"); 52 53 struct mtx devmtx; 54 static void destroy_devl(struct cdev *dev); 55 static int destroy_dev_sched_cbl(struct cdev *dev, 56 void (*cb)(void *), void *arg); 57 static struct cdev *make_dev_credv(int flags, 58 struct cdevsw *devsw, int minornr, 59 struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, 60 va_list ap); 61 62 static struct cdev_priv_list cdevp_free_list = 63 TAILQ_HEAD_INITIALIZER(cdevp_free_list); 64 65 void 66 dev_lock(void) 67 { 68 69 mtx_lock(&devmtx); 70 } 71 72 static void 73 dev_unlock_and_free(void) 74 { 75 struct cdev_priv *cdp; 76 77 mtx_assert(&devmtx, MA_OWNED); 78 while ((cdp = TAILQ_FIRST(&cdevp_free_list)) != NULL) { 79 TAILQ_REMOVE(&cdevp_free_list, cdp, cdp_list); 80 mtx_unlock(&devmtx); 81 devfs_free(&cdp->cdp_c); 82 mtx_lock(&devmtx); 83 } 84 mtx_unlock(&devmtx); 85 } 86 87 static void 88 dev_free_devlocked(struct cdev *cdev) 89 { 90 struct cdev_priv *cdp; 91 92 mtx_assert(&devmtx, MA_OWNED); 93 cdp = cdev->si_priv; 94 TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list); 95 } 96 97 void 98 dev_unlock(void) 99 { 100 101 mtx_unlock(&devmtx); 102 } 103 104 void 105 dev_ref(struct cdev *dev) 106 { 107 108 mtx_assert(&devmtx, MA_NOTOWNED); 109 mtx_lock(&devmtx); 110 dev->si_refcount++; 111 mtx_unlock(&devmtx); 112 } 113 114 void 115 dev_refl(struct cdev *dev) 116 { 117 118 mtx_assert(&devmtx, MA_OWNED); 119 dev->si_refcount++; 120 } 121 122 void 123 dev_rel(struct cdev *dev) 124 { 125 int flag = 0; 126 127 mtx_assert(&devmtx, MA_NOTOWNED); 128 dev_lock(); 129 dev->si_refcount--; 130 KASSERT(dev->si_refcount >= 0, 131 ("dev_rel(%s) gave negative count", devtoname(dev))); 132 #if 0 133 if (dev->si_usecount == 0 && 134 (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED)) 135 ; 136 else 137 #endif 138 if (dev->si_devsw == NULL && dev->si_refcount == 0) { 139 LIST_REMOVE(dev, si_list); 140 flag = 1; 141 } 142 dev_unlock(); 143 if (flag) 144 devfs_free(dev); 145 } 146 147 struct cdevsw * 148 dev_refthread(struct cdev *dev) 149 { 150 struct cdevsw *csw; 151 struct cdev_priv *cdp; 152 153 mtx_assert(&devmtx, MA_NOTOWNED); 154 dev_lock(); 155 csw = dev->si_devsw; 156 if (csw != NULL) { 157 cdp = dev->si_priv; 158 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) 159 dev->si_threadcount++; 160 else 161 csw = NULL; 162 } 163 dev_unlock(); 164 return (csw); 165 } 166 167 struct cdevsw * 168 devvn_refthread(struct vnode *vp, struct cdev **devp) 169 { 170 struct cdevsw *csw; 171 struct cdev_priv *cdp; 172 173 mtx_assert(&devmtx, MA_NOTOWNED); 174 csw = NULL; 175 dev_lock(); 176 *devp = vp->v_rdev; 177 if (*devp != NULL) { 178 cdp = (*devp)->si_priv; 179 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { 180 csw = (*devp)->si_devsw; 181 if (csw != NULL) 182 (*devp)->si_threadcount++; 183 } 184 } 185 dev_unlock(); 186 return (csw); 187 } 188 189 void 190 dev_relthread(struct cdev *dev) 191 { 192 193 mtx_assert(&devmtx, MA_NOTOWNED); 194 dev_lock(); 195 dev->si_threadcount--; 196 dev_unlock(); 197 } 198 199 int 200 nullop(void) 201 { 202 203 return (0); 204 } 205 206 int 207 eopnotsupp(void) 208 { 209 210 return (EOPNOTSUPP); 211 } 212 213 static int 214 enxio(void) 215 { 216 return (ENXIO); 217 } 218 219 static int 220 enodev(void) 221 { 222 return (ENODEV); 223 } 224 225 /* Define a dead_cdevsw for use when devices leave unexpectedly. */ 226 227 #define dead_open (d_open_t *)enxio 228 #define dead_close (d_close_t *)enxio 229 #define dead_read (d_read_t *)enxio 230 #define dead_write (d_write_t *)enxio 231 #define dead_ioctl (d_ioctl_t *)enxio 232 #define dead_poll (d_poll_t *)enodev 233 #define dead_mmap (d_mmap_t *)enodev 234 235 static void 236 dead_strategy(struct bio *bp) 237 { 238 239 biofinish(bp, NULL, ENXIO); 240 } 241 242 #define dead_dump (dumper_t *)enxio 243 #define dead_kqfilter (d_kqfilter_t *)enxio 244 245 static struct cdevsw dead_cdevsw = { 246 .d_version = D_VERSION, 247 .d_flags = D_NEEDGIANT, /* XXX: does dead_strategy need this ? */ 248 .d_open = dead_open, 249 .d_close = dead_close, 250 .d_read = dead_read, 251 .d_write = dead_write, 252 .d_ioctl = dead_ioctl, 253 .d_poll = dead_poll, 254 .d_mmap = dead_mmap, 255 .d_strategy = dead_strategy, 256 .d_name = "dead", 257 .d_dump = dead_dump, 258 .d_kqfilter = dead_kqfilter 259 }; 260 261 /* Default methods if driver does not specify method */ 262 263 #define null_open (d_open_t *)nullop 264 #define null_close (d_close_t *)nullop 265 #define no_read (d_read_t *)enodev 266 #define no_write (d_write_t *)enodev 267 #define no_ioctl (d_ioctl_t *)enodev 268 #define no_mmap (d_mmap_t *)enodev 269 #define no_kqfilter (d_kqfilter_t *)enodev 270 271 static void 272 no_strategy(struct bio *bp) 273 { 274 275 biofinish(bp, NULL, ENODEV); 276 } 277 278 static int 279 no_poll(struct cdev *dev __unused, int events, struct thread *td __unused) 280 { 281 /* 282 * Return true for read/write. If the user asked for something 283 * special, return POLLNVAL, so that clients have a way of 284 * determining reliably whether or not the extended 285 * functionality is present without hard-coding knowledge 286 * of specific filesystem implementations. 287 * Stay in sync with vop_nopoll(). 288 */ 289 if (events & ~POLLSTANDARD) 290 return (POLLNVAL); 291 292 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 293 } 294 295 #define no_dump (dumper_t *)enodev 296 297 static int 298 giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 299 { 300 int retval; 301 302 mtx_lock(&Giant); 303 retval = dev->si_devsw->d_gianttrick-> 304 d_open(dev, oflags, devtype, td); 305 mtx_unlock(&Giant); 306 return (retval); 307 } 308 309 static int 310 giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp) 311 { 312 int retval; 313 314 mtx_lock(&Giant); 315 retval = dev->si_devsw->d_gianttrick-> 316 d_fdopen(dev, oflags, td, fp); 317 mtx_unlock(&Giant); 318 return (retval); 319 } 320 321 static int 322 giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 323 { 324 int retval; 325 326 mtx_lock(&Giant); 327 retval = dev->si_devsw->d_gianttrick-> 328 d_close(dev, fflag, devtype, td); 329 mtx_unlock(&Giant); 330 return (retval); 331 } 332 333 static void 334 giant_strategy(struct bio *bp) 335 { 336 337 mtx_lock(&Giant); 338 bp->bio_dev->si_devsw->d_gianttrick-> 339 d_strategy(bp); 340 mtx_unlock(&Giant); 341 } 342 343 static int 344 giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 345 { 346 int retval; 347 348 mtx_lock(&Giant); 349 retval = dev->si_devsw->d_gianttrick-> 350 d_ioctl(dev, cmd, data, fflag, td); 351 mtx_unlock(&Giant); 352 return (retval); 353 } 354 355 static int 356 giant_read(struct cdev *dev, struct uio *uio, int ioflag) 357 { 358 int retval; 359 360 mtx_lock(&Giant); 361 retval = dev->si_devsw->d_gianttrick-> 362 d_read(dev, uio, ioflag); 363 mtx_unlock(&Giant); 364 return (retval); 365 } 366 367 static int 368 giant_write(struct cdev *dev, struct uio *uio, int ioflag) 369 { 370 int retval; 371 372 mtx_lock(&Giant); 373 retval = dev->si_devsw->d_gianttrick-> 374 d_write(dev, uio, ioflag); 375 mtx_unlock(&Giant); 376 return (retval); 377 } 378 379 static int 380 giant_poll(struct cdev *dev, int events, struct thread *td) 381 { 382 int retval; 383 384 mtx_lock(&Giant); 385 retval = dev->si_devsw->d_gianttrick-> 386 d_poll(dev, events, td); 387 mtx_unlock(&Giant); 388 return (retval); 389 } 390 391 static int 392 giant_kqfilter(struct cdev *dev, struct knote *kn) 393 { 394 int retval; 395 396 mtx_lock(&Giant); 397 retval = dev->si_devsw->d_gianttrick-> 398 d_kqfilter(dev, kn); 399 mtx_unlock(&Giant); 400 return (retval); 401 } 402 403 static int 404 giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot) 405 { 406 int retval; 407 408 mtx_lock(&Giant); 409 retval = dev->si_devsw->d_gianttrick-> 410 d_mmap(dev, offset, paddr, nprot); 411 mtx_unlock(&Giant); 412 return (retval); 413 } 414 415 416 /* 417 * struct cdev * and u_dev_t primitives 418 */ 419 420 int 421 minor(struct cdev *x) 422 { 423 if (x == NULL) 424 return NODEV; 425 return(x->si_drv0 & MAXMINOR); 426 } 427 428 int 429 dev2unit(struct cdev *x) 430 { 431 432 if (x == NULL) 433 return NODEV; 434 return (minor2unit(minor(x))); 435 } 436 437 u_int 438 minor2unit(u_int _minor) 439 { 440 441 KASSERT((_minor & ~MAXMINOR) == 0, ("Illegal minor %x", _minor)); 442 return ((_minor & 0xff) | ((_minor >> 8) & 0xffff00)); 443 } 444 445 int 446 unit2minor(int unit) 447 { 448 449 KASSERT(unit <= 0xffffff, ("Invalid unit (%d) in unit2minor", unit)); 450 return ((unit & 0xff) | ((unit << 8) & ~0xffff)); 451 } 452 453 static struct cdev * 454 newdev(struct cdevsw *csw, int y, struct cdev *si) 455 { 456 struct cdev *si2; 457 dev_t udev; 458 459 mtx_assert(&devmtx, MA_OWNED); 460 udev = y; 461 LIST_FOREACH(si2, &csw->d_devs, si_list) { 462 if (si2->si_drv0 == udev) { 463 dev_free_devlocked(si); 464 return (si2); 465 } 466 } 467 si->si_drv0 = udev; 468 si->si_devsw = csw; 469 LIST_INSERT_HEAD(&csw->d_devs, si, si_list); 470 return (si); 471 } 472 473 int 474 uminor(dev_t dev) 475 { 476 return (dev & MAXMINOR); 477 } 478 479 int 480 umajor(dev_t dev) 481 { 482 return ((dev & ~MAXMINOR) >> 8); 483 } 484 485 static void 486 fini_cdevsw(struct cdevsw *devsw) 487 { 488 struct cdevsw *gt; 489 490 if (devsw->d_gianttrick != NULL) { 491 gt = devsw->d_gianttrick; 492 memcpy(devsw, gt, sizeof *devsw); 493 free(gt, M_DEVT); 494 devsw->d_gianttrick = NULL; 495 } 496 devsw->d_flags &= ~D_INIT; 497 } 498 499 static void 500 prep_cdevsw(struct cdevsw *devsw) 501 { 502 struct cdevsw *dsw2; 503 504 if (devsw->d_flags & D_NEEDGIANT) 505 dsw2 = malloc(sizeof *dsw2, M_DEVT, M_WAITOK); 506 else 507 dsw2 = NULL; 508 dev_lock(); 509 510 if (devsw->d_version != D_VERSION_01) { 511 printf( 512 "WARNING: Device driver \"%s\" has wrong version %s\n", 513 devsw->d_name == NULL ? "???" : devsw->d_name, 514 "and is disabled. Recompile KLD module."); 515 devsw->d_open = dead_open; 516 devsw->d_close = dead_close; 517 devsw->d_read = dead_read; 518 devsw->d_write = dead_write; 519 devsw->d_ioctl = dead_ioctl; 520 devsw->d_poll = dead_poll; 521 devsw->d_mmap = dead_mmap; 522 devsw->d_strategy = dead_strategy; 523 devsw->d_dump = dead_dump; 524 devsw->d_kqfilter = dead_kqfilter; 525 } 526 527 if (devsw->d_flags & D_TTY) { 528 if (devsw->d_ioctl == NULL) devsw->d_ioctl = ttyioctl; 529 if (devsw->d_read == NULL) devsw->d_read = ttyread; 530 if (devsw->d_write == NULL) devsw->d_write = ttywrite; 531 if (devsw->d_kqfilter == NULL) devsw->d_kqfilter = ttykqfilter; 532 if (devsw->d_poll == NULL) devsw->d_poll = ttypoll; 533 } 534 535 if (devsw->d_flags & D_NEEDGIANT) { 536 if (devsw->d_gianttrick == NULL) { 537 memcpy(dsw2, devsw, sizeof *dsw2); 538 devsw->d_gianttrick = dsw2; 539 } else 540 free(dsw2, M_DEVT); 541 } 542 543 #define FIXUP(member, noop, giant) \ 544 do { \ 545 if (devsw->member == NULL) { \ 546 devsw->member = noop; \ 547 } else if (devsw->d_flags & D_NEEDGIANT) \ 548 devsw->member = giant; \ 549 } \ 550 while (0) 551 552 FIXUP(d_open, null_open, giant_open); 553 FIXUP(d_fdopen, NULL, giant_fdopen); 554 FIXUP(d_close, null_close, giant_close); 555 FIXUP(d_read, no_read, giant_read); 556 FIXUP(d_write, no_write, giant_write); 557 FIXUP(d_ioctl, no_ioctl, giant_ioctl); 558 FIXUP(d_poll, no_poll, giant_poll); 559 FIXUP(d_mmap, no_mmap, giant_mmap); 560 FIXUP(d_strategy, no_strategy, giant_strategy); 561 FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter); 562 563 if (devsw->d_dump == NULL) devsw->d_dump = no_dump; 564 565 LIST_INIT(&devsw->d_devs); 566 567 devsw->d_flags |= D_INIT; 568 569 dev_unlock(); 570 } 571 572 struct cdev * 573 make_dev_credv(int flags, struct cdevsw *devsw, int minornr, 574 struct ucred *cr, uid_t uid, 575 gid_t gid, int mode, const char *fmt, va_list ap) 576 { 577 struct cdev *dev; 578 int i; 579 580 KASSERT((minornr & ~MAXMINOR) == 0, 581 ("Invalid minor (0x%x) in make_dev", minornr)); 582 583 if (!(devsw->d_flags & D_INIT)) 584 prep_cdevsw(devsw); 585 dev = devfs_alloc(); 586 dev_lock(); 587 dev = newdev(devsw, minornr, dev); 588 if (flags & MAKEDEV_REF) 589 dev_refl(dev); 590 if (dev->si_flags & SI_CHEAPCLONE && 591 dev->si_flags & SI_NAMED) { 592 /* 593 * This is allowed as it removes races and generally 594 * simplifies cloning devices. 595 * XXX: still ?? 596 */ 597 dev_unlock_and_free(); 598 return (dev); 599 } 600 KASSERT(!(dev->si_flags & SI_NAMED), 601 ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)", 602 devsw->d_name, minor(dev), devtoname(dev))); 603 604 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 605 if (i > (sizeof dev->__si_namebuf - 1)) { 606 printf("WARNING: Device name truncated! (%s)\n", 607 dev->__si_namebuf); 608 } 609 610 dev->si_flags |= SI_NAMED; 611 if (cr != NULL) 612 dev->si_cred = crhold(cr); 613 else 614 dev->si_cred = NULL; 615 dev->si_uid = uid; 616 dev->si_gid = gid; 617 dev->si_mode = mode; 618 619 devfs_create(dev); 620 clean_unrhdrl(devfs_inos); 621 dev_unlock(); 622 return (dev); 623 } 624 625 struct cdev * 626 make_dev(struct cdevsw *devsw, int minornr, uid_t uid, gid_t gid, int mode, 627 const char *fmt, ...) 628 { 629 struct cdev *dev; 630 va_list ap; 631 632 va_start(ap, fmt); 633 dev = make_dev_credv(0, devsw, minornr, NULL, uid, gid, mode, fmt, ap); 634 va_end(ap); 635 return (dev); 636 } 637 638 struct cdev * 639 make_dev_cred(struct cdevsw *devsw, int minornr, struct ucred *cr, uid_t uid, 640 gid_t gid, int mode, const char *fmt, ...) 641 { 642 struct cdev *dev; 643 va_list ap; 644 645 va_start(ap, fmt); 646 dev = make_dev_credv(0, devsw, minornr, cr, uid, gid, mode, fmt, ap); 647 va_end(ap); 648 649 return (dev); 650 } 651 652 struct cdev * 653 make_dev_credf(int flags, struct cdevsw *devsw, int minornr, 654 struct ucred *cr, uid_t uid, 655 gid_t gid, int mode, const char *fmt, ...) 656 { 657 struct cdev *dev; 658 va_list ap; 659 660 va_start(ap, fmt); 661 dev = make_dev_credv(flags, devsw, minornr, cr, uid, gid, mode, 662 fmt, ap); 663 va_end(ap); 664 665 return (dev); 666 } 667 668 static void 669 dev_dependsl(struct cdev *pdev, struct cdev *cdev) 670 { 671 672 cdev->si_parent = pdev; 673 cdev->si_flags |= SI_CHILD; 674 LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings); 675 } 676 677 678 void 679 dev_depends(struct cdev *pdev, struct cdev *cdev) 680 { 681 682 dev_lock(); 683 dev_dependsl(pdev, cdev); 684 dev_unlock(); 685 } 686 687 struct cdev * 688 make_dev_alias(struct cdev *pdev, const char *fmt, ...) 689 { 690 struct cdev *dev; 691 va_list ap; 692 int i; 693 694 dev = devfs_alloc(); 695 dev_lock(); 696 dev->si_flags |= SI_ALIAS; 697 dev->si_flags |= SI_NAMED; 698 va_start(ap, fmt); 699 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 700 if (i > (sizeof dev->__si_namebuf - 1)) { 701 printf("WARNING: Device name truncated! (%s)\n", 702 dev->__si_namebuf); 703 } 704 va_end(ap); 705 706 devfs_create(dev); 707 clean_unrhdrl(devfs_inos); 708 dev_unlock(); 709 dev_depends(pdev, dev); 710 return (dev); 711 } 712 713 static void 714 destroy_devl(struct cdev *dev) 715 { 716 struct cdevsw *csw; 717 718 mtx_assert(&devmtx, MA_OWNED); 719 KASSERT(dev->si_flags & SI_NAMED, 720 ("WARNING: Driver mistake: destroy_dev on %d\n", minor(dev))); 721 722 devfs_destroy(dev); 723 724 /* Remove name marking */ 725 dev->si_flags &= ~SI_NAMED; 726 727 /* If we are a child, remove us from the parents list */ 728 if (dev->si_flags & SI_CHILD) { 729 LIST_REMOVE(dev, si_siblings); 730 dev->si_flags &= ~SI_CHILD; 731 } 732 733 /* Kill our children */ 734 while (!LIST_EMPTY(&dev->si_children)) 735 destroy_devl(LIST_FIRST(&dev->si_children)); 736 737 /* Remove from clone list */ 738 if (dev->si_flags & SI_CLONELIST) { 739 LIST_REMOVE(dev, si_clone); 740 dev->si_flags &= ~SI_CLONELIST; 741 } 742 743 dev->si_refcount++; /* Avoid race with dev_rel() */ 744 csw = dev->si_devsw; 745 dev->si_devsw = NULL; /* already NULL for SI_ALIAS */ 746 while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) { 747 csw->d_purge(dev); 748 msleep(csw, &devmtx, PRIBIO, "devprg", hz/10); 749 if (dev->si_threadcount) 750 printf("Still %lu threads in %s\n", 751 dev->si_threadcount, devtoname(dev)); 752 } 753 while (dev->si_threadcount != 0) { 754 /* Use unique dummy wait ident */ 755 msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10); 756 } 757 758 dev->si_drv1 = 0; 759 dev->si_drv2 = 0; 760 bzero(&dev->__si_u, sizeof(dev->__si_u)); 761 762 if (!(dev->si_flags & SI_ALIAS)) { 763 /* Remove from cdevsw list */ 764 LIST_REMOVE(dev, si_list); 765 766 /* If cdevsw has no more struct cdev *'s, clean it */ 767 if (LIST_EMPTY(&csw->d_devs)) { 768 fini_cdevsw(csw); 769 wakeup(&csw->d_devs); 770 } 771 } 772 dev->si_flags &= ~SI_ALIAS; 773 dev->si_refcount--; /* Avoid race with dev_rel() */ 774 775 if (dev->si_refcount > 0) { 776 LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list); 777 } else { 778 dev_free_devlocked(dev); 779 } 780 } 781 782 void 783 destroy_dev(struct cdev *dev) 784 { 785 786 dev_lock(); 787 destroy_devl(dev); 788 dev_unlock_and_free(); 789 } 790 791 const char * 792 devtoname(struct cdev *dev) 793 { 794 char *p; 795 struct cdevsw *csw; 796 int mynor; 797 798 if (dev->si_name[0] == '#' || dev->si_name[0] == '\0') { 799 p = dev->si_name; 800 csw = dev_refthread(dev); 801 if (csw != NULL) { 802 sprintf(p, "(%s)", csw->d_name); 803 dev_relthread(dev); 804 } 805 p += strlen(p); 806 mynor = minor(dev); 807 if (mynor < 0 || mynor > 255) 808 sprintf(p, "/%#x", (u_int)mynor); 809 else 810 sprintf(p, "/%d", mynor); 811 } 812 return (dev->si_name); 813 } 814 815 int 816 dev_stdclone(char *name, char **namep, const char *stem, int *unit) 817 { 818 int u, i; 819 820 i = strlen(stem); 821 if (bcmp(stem, name, i) != 0) 822 return (0); 823 if (!isdigit(name[i])) 824 return (0); 825 u = 0; 826 if (name[i] == '0' && isdigit(name[i+1])) 827 return (0); 828 while (isdigit(name[i])) { 829 u *= 10; 830 u += name[i++] - '0'; 831 } 832 if (u > 0xffffff) 833 return (0); 834 *unit = u; 835 if (namep) 836 *namep = &name[i]; 837 if (name[i]) 838 return (2); 839 return (1); 840 } 841 842 /* 843 * Helper functions for cloning device drivers. 844 * 845 * The objective here is to make it unnecessary for the device drivers to 846 * use rman or similar to manage their unit number space. Due to the way 847 * we do "on-demand" devices, using rman or other "private" methods 848 * will be very tricky to lock down properly once we lock down this file. 849 * 850 * Instead we give the drivers these routines which puts the struct cdev *'s 851 * that are to be managed on their own list, and gives the driver the ability 852 * to ask for the first free unit number or a given specified unit number. 853 * 854 * In addition these routines support paired devices (pty, nmdm and similar) 855 * by respecting a number of "flag" bits in the minor number. 856 * 857 */ 858 859 struct clonedevs { 860 LIST_HEAD(,cdev) head; 861 }; 862 863 void 864 clone_setup(struct clonedevs **cdp) 865 { 866 867 *cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO); 868 LIST_INIT(&(*cdp)->head); 869 } 870 871 int 872 clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra) 873 { 874 struct clonedevs *cd; 875 struct cdev *dev, *ndev, *dl, *de; 876 int unit, low, u; 877 878 KASSERT(*cdp != NULL, 879 ("clone_setup() not called in driver \"%s\"", csw->d_name)); 880 KASSERT(!(extra & CLONE_UNITMASK), 881 ("Illegal extra bits (0x%x) in clone_create", extra)); 882 KASSERT(*up <= CLONE_UNITMASK, 883 ("Too high unit (0x%x) in clone_create", *up)); 884 885 if (!(csw->d_flags & D_INIT)) 886 prep_cdevsw(csw); 887 888 /* 889 * Search the list for a lot of things in one go: 890 * A preexisting match is returned immediately. 891 * The lowest free unit number if we are passed -1, and the place 892 * in the list where we should insert that new element. 893 * The place to insert a specified unit number, if applicable 894 * the end of the list. 895 */ 896 unit = *up; 897 ndev = devfs_alloc(); 898 dev_lock(); 899 low = extra; 900 de = dl = NULL; 901 cd = *cdp; 902 LIST_FOREACH(dev, &cd->head, si_clone) { 903 KASSERT(dev->si_flags & SI_CLONELIST, 904 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 905 u = dev2unit(dev); 906 if (u == (unit | extra)) { 907 *dp = dev; 908 dev_unlock(); 909 devfs_free(ndev); 910 return (0); 911 } 912 if (unit == -1 && u == low) { 913 low++; 914 de = dev; 915 continue; 916 } else if (u < (unit | extra)) { 917 de = dev; 918 continue; 919 } else if (u > (unit | extra)) { 920 dl = dev; 921 break; 922 } 923 } 924 if (unit == -1) 925 unit = low & CLONE_UNITMASK; 926 dev = newdev(csw, unit2minor(unit | extra), ndev); 927 if (dev->si_flags & SI_CLONELIST) { 928 printf("dev %p (%s) is on clonelist\n", dev, dev->si_name); 929 printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra); 930 LIST_FOREACH(dev, &cd->head, si_clone) { 931 printf("\t%p %s\n", dev, dev->si_name); 932 } 933 panic("foo"); 934 } 935 KASSERT(!(dev->si_flags & SI_CLONELIST), 936 ("Dev %p(%s) should not be on clonelist", dev, dev->si_name)); 937 if (dl != NULL) 938 LIST_INSERT_BEFORE(dl, dev, si_clone); 939 else if (de != NULL) 940 LIST_INSERT_AFTER(de, dev, si_clone); 941 else 942 LIST_INSERT_HEAD(&cd->head, dev, si_clone); 943 dev->si_flags |= SI_CLONELIST; 944 *up = unit; 945 dev_unlock_and_free(); 946 return (1); 947 } 948 949 /* 950 * Kill everything still on the list. The driver should already have 951 * disposed of any softc hung of the struct cdev *'s at this time. 952 */ 953 void 954 clone_cleanup(struct clonedevs **cdp) 955 { 956 struct cdev *dev; 957 struct cdev_priv *cp; 958 struct clonedevs *cd; 959 960 cd = *cdp; 961 if (cd == NULL) 962 return; 963 dev_lock(); 964 while (!LIST_EMPTY(&cd->head)) { 965 dev = LIST_FIRST(&cd->head); 966 LIST_REMOVE(dev, si_clone); 967 KASSERT(dev->si_flags & SI_CLONELIST, 968 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 969 dev->si_flags &= ~SI_CLONELIST; 970 cp = dev->si_priv; 971 if (!(cp->cdp_flags & CDP_SCHED_DTR)) { 972 cp->cdp_flags |= CDP_SCHED_DTR; 973 KASSERT(dev->si_flags & SI_NAMED, 974 ("Driver has goofed in cloning underways udev %x", dev->si_drv0)); 975 destroy_devl(dev); 976 } 977 } 978 dev_unlock(); 979 free(cd, M_DEVBUF); 980 *cdp = NULL; 981 } 982 983 static TAILQ_HEAD(, cdev_priv) dev_ddtr = 984 TAILQ_HEAD_INITIALIZER(dev_ddtr); 985 static struct task dev_dtr_task; 986 987 static void 988 destroy_dev_tq(void *ctx, int pending) 989 { 990 struct cdev_priv *cp; 991 struct cdev *dev; 992 void (*cb)(void *); 993 void *cb_arg; 994 995 dev_lock(); 996 while (!TAILQ_EMPTY(&dev_ddtr)) { 997 cp = TAILQ_FIRST(&dev_ddtr); 998 dev = &cp->cdp_c; 999 KASSERT(cp->cdp_flags & CDP_SCHED_DTR, 1000 ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp)); 1001 TAILQ_REMOVE(&dev_ddtr, cp, cdp_dtr_list); 1002 cb = cp->cdp_dtr_cb; 1003 cb_arg = cp->cdp_dtr_cb_arg; 1004 destroy_devl(dev); 1005 dev_unlock(); 1006 dev_rel(dev); 1007 if (cb != NULL) 1008 cb(cb_arg); 1009 dev_lock(); 1010 } 1011 dev_unlock(); 1012 } 1013 1014 /* 1015 * devmtx shall be locked on entry. devmtx will be unlocked after 1016 * function return. 1017 */ 1018 static int 1019 destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg) 1020 { 1021 struct cdev_priv *cp; 1022 1023 mtx_assert(&devmtx, MA_OWNED); 1024 cp = dev->si_priv; 1025 if (cp->cdp_flags & CDP_SCHED_DTR) { 1026 dev_unlock(); 1027 return (0); 1028 } 1029 dev_refl(dev); 1030 cp->cdp_flags |= CDP_SCHED_DTR; 1031 cp->cdp_dtr_cb = cb; 1032 cp->cdp_dtr_cb_arg = arg; 1033 TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list); 1034 dev_unlock(); 1035 taskqueue_enqueue(taskqueue_swi_giant, &dev_dtr_task); 1036 return (1); 1037 } 1038 1039 int 1040 destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg) 1041 { 1042 dev_lock(); 1043 return (destroy_dev_sched_cbl(dev, cb, arg)); 1044 } 1045 1046 int 1047 destroy_dev_sched(struct cdev *dev) 1048 { 1049 return (destroy_dev_sched_cb(dev, NULL, NULL)); 1050 } 1051 1052 void 1053 destroy_dev_drain(struct cdevsw *csw) 1054 { 1055 1056 dev_lock(); 1057 while (!LIST_EMPTY(&csw->d_devs)) { 1058 msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10); 1059 } 1060 dev_unlock(); 1061 } 1062 1063 void 1064 drain_dev_clone_events(void) 1065 { 1066 1067 sx_xlock(&clone_drain_lock); 1068 sx_xunlock(&clone_drain_lock); 1069 } 1070 1071 static void 1072 devdtr_init(void *dummy __unused) 1073 { 1074 1075 TASK_INIT(&dev_dtr_task, 0, destroy_dev_tq, NULL); 1076 } 1077 1078 SYSINIT(devdtr, SI_SUB_DEVFS, SI_ORDER_SECOND, devdtr_init, NULL); 1079