1 /*- 2 * Copyright (c) 1999-2002 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/systm.h> 33 #include <sys/bio.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/module.h> 37 #include <sys/malloc.h> 38 #include <sys/conf.h> 39 #include <sys/vnode.h> 40 #include <sys/queue.h> 41 #include <sys/poll.h> 42 #include <sys/sx.h> 43 #include <sys/ctype.h> 44 #include <sys/tty.h> 45 #include <sys/ucred.h> 46 #include <sys/taskqueue.h> 47 #include <machine/stdarg.h> 48 49 #include <fs/devfs/devfs_int.h> 50 51 static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage"); 52 53 struct mtx devmtx; 54 static void destroy_devl(struct cdev *dev); 55 static int destroy_dev_sched_cbl(struct cdev *dev, 56 void (*cb)(void *), void *arg); 57 static struct cdev *make_dev_credv(int flags, 58 struct cdevsw *devsw, int minornr, 59 struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, 60 va_list ap); 61 62 static struct cdev_priv_list cdevp_free_list = 63 TAILQ_HEAD_INITIALIZER(cdevp_free_list); 64 static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list = 65 SLIST_HEAD_INITIALIZER(); 66 67 void 68 dev_lock(void) 69 { 70 71 mtx_lock(&devmtx); 72 } 73 74 /* 75 * Free all the memory collected while the cdev mutex was 76 * locked. Since devmtx is after the system map mutex, free() cannot 77 * be called immediately and is postponed until cdev mutex can be 78 * dropped. 79 */ 80 static void 81 dev_unlock_and_free(void) 82 { 83 struct cdev_priv_list cdp_free; 84 struct free_cdevsw csw_free; 85 struct cdev_priv *cdp; 86 struct cdevsw *csw; 87 88 mtx_assert(&devmtx, MA_OWNED); 89 90 /* 91 * Make the local copy of the list heads while the dev_mtx is 92 * held. Free it later. 93 */ 94 TAILQ_INIT(&cdp_free); 95 TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list); 96 csw_free = cdevsw_gt_post_list; 97 SLIST_INIT(&cdevsw_gt_post_list); 98 99 mtx_unlock(&devmtx); 100 101 while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) { 102 TAILQ_REMOVE(&cdp_free, cdp, cdp_list); 103 devfs_free(&cdp->cdp_c); 104 } 105 while ((csw = SLIST_FIRST(&csw_free)) != NULL) { 106 SLIST_REMOVE_HEAD(&csw_free, d_postfree_list); 107 free(csw, M_DEVT); 108 } 109 } 110 111 static void 112 dev_free_devlocked(struct cdev *cdev) 113 { 114 struct cdev_priv *cdp; 115 116 mtx_assert(&devmtx, MA_OWNED); 117 cdp = cdev->si_priv; 118 TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list); 119 } 120 121 static void 122 cdevsw_free_devlocked(struct cdevsw *csw) 123 { 124 125 mtx_assert(&devmtx, MA_OWNED); 126 SLIST_INSERT_HEAD(&cdevsw_gt_post_list, csw, d_postfree_list); 127 } 128 129 void 130 dev_unlock(void) 131 { 132 133 mtx_unlock(&devmtx); 134 } 135 136 void 137 dev_ref(struct cdev *dev) 138 { 139 140 mtx_assert(&devmtx, MA_NOTOWNED); 141 mtx_lock(&devmtx); 142 dev->si_refcount++; 143 mtx_unlock(&devmtx); 144 } 145 146 void 147 dev_refl(struct cdev *dev) 148 { 149 150 mtx_assert(&devmtx, MA_OWNED); 151 dev->si_refcount++; 152 } 153 154 void 155 dev_rel(struct cdev *dev) 156 { 157 int flag = 0; 158 159 mtx_assert(&devmtx, MA_NOTOWNED); 160 dev_lock(); 161 dev->si_refcount--; 162 KASSERT(dev->si_refcount >= 0, 163 ("dev_rel(%s) gave negative count", devtoname(dev))); 164 #if 0 165 if (dev->si_usecount == 0 && 166 (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED)) 167 ; 168 else 169 #endif 170 if (dev->si_devsw == NULL && dev->si_refcount == 0) { 171 LIST_REMOVE(dev, si_list); 172 flag = 1; 173 } 174 dev_unlock(); 175 if (flag) 176 devfs_free(dev); 177 } 178 179 struct cdevsw * 180 dev_refthread(struct cdev *dev) 181 { 182 struct cdevsw *csw; 183 struct cdev_priv *cdp; 184 185 mtx_assert(&devmtx, MA_NOTOWNED); 186 dev_lock(); 187 csw = dev->si_devsw; 188 if (csw != NULL) { 189 cdp = dev->si_priv; 190 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) 191 dev->si_threadcount++; 192 else 193 csw = NULL; 194 } 195 dev_unlock(); 196 return (csw); 197 } 198 199 struct cdevsw * 200 devvn_refthread(struct vnode *vp, struct cdev **devp) 201 { 202 struct cdevsw *csw; 203 struct cdev_priv *cdp; 204 205 mtx_assert(&devmtx, MA_NOTOWNED); 206 csw = NULL; 207 dev_lock(); 208 *devp = vp->v_rdev; 209 if (*devp != NULL) { 210 cdp = (*devp)->si_priv; 211 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { 212 csw = (*devp)->si_devsw; 213 if (csw != NULL) 214 (*devp)->si_threadcount++; 215 } 216 } 217 dev_unlock(); 218 return (csw); 219 } 220 221 void 222 dev_relthread(struct cdev *dev) 223 { 224 225 mtx_assert(&devmtx, MA_NOTOWNED); 226 dev_lock(); 227 dev->si_threadcount--; 228 dev_unlock(); 229 } 230 231 int 232 nullop(void) 233 { 234 235 return (0); 236 } 237 238 int 239 eopnotsupp(void) 240 { 241 242 return (EOPNOTSUPP); 243 } 244 245 static int 246 enxio(void) 247 { 248 return (ENXIO); 249 } 250 251 static int 252 enodev(void) 253 { 254 return (ENODEV); 255 } 256 257 /* Define a dead_cdevsw for use when devices leave unexpectedly. */ 258 259 #define dead_open (d_open_t *)enxio 260 #define dead_close (d_close_t *)enxio 261 #define dead_read (d_read_t *)enxio 262 #define dead_write (d_write_t *)enxio 263 #define dead_ioctl (d_ioctl_t *)enxio 264 #define dead_poll (d_poll_t *)enodev 265 #define dead_mmap (d_mmap_t *)enodev 266 267 static void 268 dead_strategy(struct bio *bp) 269 { 270 271 biofinish(bp, NULL, ENXIO); 272 } 273 274 #define dead_dump (dumper_t *)enxio 275 #define dead_kqfilter (d_kqfilter_t *)enxio 276 277 static struct cdevsw dead_cdevsw = { 278 .d_version = D_VERSION, 279 .d_flags = D_NEEDGIANT, /* XXX: does dead_strategy need this ? */ 280 .d_open = dead_open, 281 .d_close = dead_close, 282 .d_read = dead_read, 283 .d_write = dead_write, 284 .d_ioctl = dead_ioctl, 285 .d_poll = dead_poll, 286 .d_mmap = dead_mmap, 287 .d_strategy = dead_strategy, 288 .d_name = "dead", 289 .d_dump = dead_dump, 290 .d_kqfilter = dead_kqfilter 291 }; 292 293 /* Default methods if driver does not specify method */ 294 295 #define null_open (d_open_t *)nullop 296 #define null_close (d_close_t *)nullop 297 #define no_read (d_read_t *)enodev 298 #define no_write (d_write_t *)enodev 299 #define no_ioctl (d_ioctl_t *)enodev 300 #define no_mmap (d_mmap_t *)enodev 301 #define no_kqfilter (d_kqfilter_t *)enodev 302 303 static void 304 no_strategy(struct bio *bp) 305 { 306 307 biofinish(bp, NULL, ENODEV); 308 } 309 310 static int 311 no_poll(struct cdev *dev __unused, int events, struct thread *td __unused) 312 { 313 /* 314 * Return true for read/write. If the user asked for something 315 * special, return POLLNVAL, so that clients have a way of 316 * determining reliably whether or not the extended 317 * functionality is present without hard-coding knowledge 318 * of specific filesystem implementations. 319 * Stay in sync with vop_nopoll(). 320 */ 321 if (events & ~POLLSTANDARD) 322 return (POLLNVAL); 323 324 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 325 } 326 327 #define no_dump (dumper_t *)enodev 328 329 static int 330 giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 331 { 332 struct cdevsw *dsw; 333 int retval; 334 335 dsw = dev_refthread(dev); 336 if (dsw == NULL) 337 return (ENXIO); 338 mtx_lock(&Giant); 339 retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td); 340 mtx_unlock(&Giant); 341 dev_relthread(dev); 342 return (retval); 343 } 344 345 static int 346 giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp) 347 { 348 struct cdevsw *dsw; 349 int retval; 350 351 dsw = dev_refthread(dev); 352 if (dsw == NULL) 353 return (ENXIO); 354 mtx_lock(&Giant); 355 retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp); 356 mtx_unlock(&Giant); 357 dev_relthread(dev); 358 return (retval); 359 } 360 361 static int 362 giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 363 { 364 struct cdevsw *dsw; 365 int retval; 366 367 dsw = dev_refthread(dev); 368 if (dsw == NULL) 369 return (ENXIO); 370 mtx_lock(&Giant); 371 retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td); 372 mtx_unlock(&Giant); 373 dev_relthread(dev); 374 return (retval); 375 } 376 377 static void 378 giant_strategy(struct bio *bp) 379 { 380 struct cdevsw *dsw; 381 struct cdev *dev; 382 383 dev = bp->bio_dev; 384 dsw = dev_refthread(dev); 385 if (dsw == NULL) { 386 biofinish(bp, NULL, ENXIO); 387 return; 388 } 389 mtx_lock(&Giant); 390 dsw->d_gianttrick->d_strategy(bp); 391 mtx_unlock(&Giant); 392 dev_relthread(dev); 393 } 394 395 static int 396 giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 397 { 398 struct cdevsw *dsw; 399 int retval; 400 401 dsw = dev_refthread(dev); 402 if (dsw == NULL) 403 return (ENXIO); 404 mtx_lock(&Giant); 405 retval = dev->si_devsw->d_gianttrick-> 406 d_ioctl(dev, cmd, data, fflag, td); 407 mtx_unlock(&Giant); 408 dev_relthread(dev); 409 return (retval); 410 } 411 412 static int 413 giant_read(struct cdev *dev, struct uio *uio, int ioflag) 414 { 415 struct cdevsw *dsw; 416 int retval; 417 418 dsw = dev_refthread(dev); 419 if (dsw == NULL) 420 return (ENXIO); 421 mtx_lock(&Giant); 422 retval = dev->si_devsw->d_gianttrick-> 423 d_read(dev, uio, ioflag); 424 mtx_unlock(&Giant); 425 dev_relthread(dev); 426 return (retval); 427 } 428 429 static int 430 giant_write(struct cdev *dev, struct uio *uio, int ioflag) 431 { 432 struct cdevsw *dsw; 433 int retval; 434 435 dsw = dev_refthread(dev); 436 if (dsw == NULL) 437 return (ENXIO); 438 mtx_lock(&Giant); 439 retval = dsw->d_gianttrick->d_write(dev, uio, ioflag); 440 mtx_unlock(&Giant); 441 dev_relthread(dev); 442 return (retval); 443 } 444 445 static int 446 giant_poll(struct cdev *dev, int events, struct thread *td) 447 { 448 struct cdevsw *dsw; 449 int retval; 450 451 dsw = dev_refthread(dev); 452 if (dsw == NULL) 453 return (ENXIO); 454 mtx_lock(&Giant); 455 retval = dsw->d_gianttrick->d_poll(dev, events, td); 456 mtx_unlock(&Giant); 457 dev_relthread(dev); 458 return (retval); 459 } 460 461 static int 462 giant_kqfilter(struct cdev *dev, struct knote *kn) 463 { 464 struct cdevsw *dsw; 465 int retval; 466 467 dsw = dev_refthread(dev); 468 if (dsw == NULL) 469 return (ENXIO); 470 mtx_lock(&Giant); 471 retval = dsw->d_gianttrick->d_kqfilter(dev, kn); 472 mtx_unlock(&Giant); 473 dev_relthread(dev); 474 return (retval); 475 } 476 477 static int 478 giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot) 479 { 480 struct cdevsw *dsw; 481 int retval; 482 483 dsw = dev_refthread(dev); 484 if (dsw == NULL) 485 return (ENXIO); 486 mtx_lock(&Giant); 487 retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot); 488 mtx_unlock(&Giant); 489 dev_relthread(dev); 490 return (retval); 491 } 492 493 494 /* 495 * struct cdev * and u_dev_t primitives 496 */ 497 498 int 499 minor(struct cdev *x) 500 { 501 if (x == NULL) 502 return NODEV; 503 return(x->si_drv0 & MAXMINOR); 504 } 505 506 int 507 dev2unit(struct cdev *x) 508 { 509 510 if (x == NULL) 511 return NODEV; 512 return (minor2unit(minor(x))); 513 } 514 515 u_int 516 minor2unit(u_int _minor) 517 { 518 519 KASSERT((_minor & ~MAXMINOR) == 0, ("Illegal minor %x", _minor)); 520 return ((_minor & 0xff) | ((_minor >> 8) & 0xffff00)); 521 } 522 523 int 524 unit2minor(int unit) 525 { 526 527 KASSERT(unit <= 0xffffff, ("Invalid unit (%d) in unit2minor", unit)); 528 return ((unit & 0xff) | ((unit << 8) & ~0xffff)); 529 } 530 531 static struct cdev * 532 newdev(struct cdevsw *csw, int y, struct cdev *si) 533 { 534 struct cdev *si2; 535 dev_t udev; 536 537 mtx_assert(&devmtx, MA_OWNED); 538 udev = y; 539 LIST_FOREACH(si2, &csw->d_devs, si_list) { 540 if (si2->si_drv0 == udev) { 541 dev_free_devlocked(si); 542 return (si2); 543 } 544 } 545 si->si_drv0 = udev; 546 si->si_devsw = csw; 547 LIST_INSERT_HEAD(&csw->d_devs, si, si_list); 548 return (si); 549 } 550 551 int 552 uminor(dev_t dev) 553 { 554 return (dev & MAXMINOR); 555 } 556 557 int 558 umajor(dev_t dev) 559 { 560 return ((dev & ~MAXMINOR) >> 8); 561 } 562 563 static void 564 fini_cdevsw(struct cdevsw *devsw) 565 { 566 struct cdevsw *gt; 567 568 if (devsw->d_gianttrick != NULL) { 569 gt = devsw->d_gianttrick; 570 memcpy(devsw, gt, sizeof *devsw); 571 cdevsw_free_devlocked(gt); 572 devsw->d_gianttrick = NULL; 573 } 574 devsw->d_flags &= ~D_INIT; 575 } 576 577 static void 578 prep_cdevsw(struct cdevsw *devsw) 579 { 580 struct cdevsw *dsw2; 581 582 mtx_assert(&devmtx, MA_OWNED); 583 if (devsw->d_flags & D_INIT) 584 return; 585 if (devsw->d_flags & D_NEEDGIANT) { 586 dev_unlock(); 587 dsw2 = malloc(sizeof *dsw2, M_DEVT, M_WAITOK); 588 dev_lock(); 589 } else 590 dsw2 = NULL; 591 if (devsw->d_flags & D_INIT) { 592 if (dsw2 != NULL) 593 cdevsw_free_devlocked(dsw2); 594 return; 595 } 596 597 if (devsw->d_version != D_VERSION_01) { 598 printf( 599 "WARNING: Device driver \"%s\" has wrong version %s\n", 600 devsw->d_name == NULL ? "???" : devsw->d_name, 601 "and is disabled. Recompile KLD module."); 602 devsw->d_open = dead_open; 603 devsw->d_close = dead_close; 604 devsw->d_read = dead_read; 605 devsw->d_write = dead_write; 606 devsw->d_ioctl = dead_ioctl; 607 devsw->d_poll = dead_poll; 608 devsw->d_mmap = dead_mmap; 609 devsw->d_strategy = dead_strategy; 610 devsw->d_dump = dead_dump; 611 devsw->d_kqfilter = dead_kqfilter; 612 } 613 614 if (devsw->d_flags & D_TTY) { 615 if (devsw->d_ioctl == NULL) devsw->d_ioctl = ttyioctl; 616 if (devsw->d_read == NULL) devsw->d_read = ttyread; 617 if (devsw->d_write == NULL) devsw->d_write = ttywrite; 618 if (devsw->d_kqfilter == NULL) devsw->d_kqfilter = ttykqfilter; 619 if (devsw->d_poll == NULL) devsw->d_poll = ttypoll; 620 } 621 622 if (devsw->d_flags & D_NEEDGIANT) { 623 if (devsw->d_gianttrick == NULL) { 624 memcpy(dsw2, devsw, sizeof *dsw2); 625 devsw->d_gianttrick = dsw2; 626 dsw2 = NULL; 627 } 628 } 629 630 #define FIXUP(member, noop, giant) \ 631 do { \ 632 if (devsw->member == NULL) { \ 633 devsw->member = noop; \ 634 } else if (devsw->d_flags & D_NEEDGIANT) \ 635 devsw->member = giant; \ 636 } \ 637 while (0) 638 639 FIXUP(d_open, null_open, giant_open); 640 FIXUP(d_fdopen, NULL, giant_fdopen); 641 FIXUP(d_close, null_close, giant_close); 642 FIXUP(d_read, no_read, giant_read); 643 FIXUP(d_write, no_write, giant_write); 644 FIXUP(d_ioctl, no_ioctl, giant_ioctl); 645 FIXUP(d_poll, no_poll, giant_poll); 646 FIXUP(d_mmap, no_mmap, giant_mmap); 647 FIXUP(d_strategy, no_strategy, giant_strategy); 648 FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter); 649 650 if (devsw->d_dump == NULL) devsw->d_dump = no_dump; 651 652 LIST_INIT(&devsw->d_devs); 653 654 devsw->d_flags |= D_INIT; 655 656 if (dsw2 != NULL) 657 cdevsw_free_devlocked(dsw2); 658 } 659 660 struct cdev * 661 make_dev_credv(int flags, struct cdevsw *devsw, int minornr, 662 struct ucred *cr, uid_t uid, 663 gid_t gid, int mode, const char *fmt, va_list ap) 664 { 665 struct cdev *dev; 666 int i; 667 668 KASSERT((minornr & ~MAXMINOR) == 0, 669 ("Invalid minor (0x%x) in make_dev", minornr)); 670 671 dev = devfs_alloc(); 672 dev_lock(); 673 prep_cdevsw(devsw); 674 dev = newdev(devsw, minornr, dev); 675 if (flags & MAKEDEV_REF) 676 dev_refl(dev); 677 if (dev->si_flags & SI_CHEAPCLONE && 678 dev->si_flags & SI_NAMED) { 679 /* 680 * This is allowed as it removes races and generally 681 * simplifies cloning devices. 682 * XXX: still ?? 683 */ 684 dev_unlock_and_free(); 685 return (dev); 686 } 687 KASSERT(!(dev->si_flags & SI_NAMED), 688 ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)", 689 devsw->d_name, minor(dev), devtoname(dev))); 690 691 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 692 if (i > (sizeof dev->__si_namebuf - 1)) { 693 printf("WARNING: Device name truncated! (%s)\n", 694 dev->__si_namebuf); 695 } 696 697 dev->si_flags |= SI_NAMED; 698 #ifdef MAC 699 if (cr != NULL) 700 dev->si_cred = crhold(cr); 701 else 702 #endif 703 dev->si_cred = NULL; 704 dev->si_uid = uid; 705 dev->si_gid = gid; 706 dev->si_mode = mode; 707 708 devfs_create(dev); 709 clean_unrhdrl(devfs_inos); 710 dev_unlock_and_free(); 711 return (dev); 712 } 713 714 struct cdev * 715 make_dev(struct cdevsw *devsw, int minornr, uid_t uid, gid_t gid, int mode, 716 const char *fmt, ...) 717 { 718 struct cdev *dev; 719 va_list ap; 720 721 va_start(ap, fmt); 722 dev = make_dev_credv(0, devsw, minornr, NULL, uid, gid, mode, fmt, ap); 723 va_end(ap); 724 return (dev); 725 } 726 727 struct cdev * 728 make_dev_cred(struct cdevsw *devsw, int minornr, struct ucred *cr, uid_t uid, 729 gid_t gid, int mode, const char *fmt, ...) 730 { 731 struct cdev *dev; 732 va_list ap; 733 734 va_start(ap, fmt); 735 dev = make_dev_credv(0, devsw, minornr, cr, uid, gid, mode, fmt, ap); 736 va_end(ap); 737 738 return (dev); 739 } 740 741 struct cdev * 742 make_dev_credf(int flags, struct cdevsw *devsw, int minornr, 743 struct ucred *cr, uid_t uid, 744 gid_t gid, int mode, const char *fmt, ...) 745 { 746 struct cdev *dev; 747 va_list ap; 748 749 va_start(ap, fmt); 750 dev = make_dev_credv(flags, devsw, minornr, cr, uid, gid, mode, 751 fmt, ap); 752 va_end(ap); 753 754 return (dev); 755 } 756 757 static void 758 dev_dependsl(struct cdev *pdev, struct cdev *cdev) 759 { 760 761 cdev->si_parent = pdev; 762 cdev->si_flags |= SI_CHILD; 763 LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings); 764 } 765 766 767 void 768 dev_depends(struct cdev *pdev, struct cdev *cdev) 769 { 770 771 dev_lock(); 772 dev_dependsl(pdev, cdev); 773 dev_unlock(); 774 } 775 776 struct cdev * 777 make_dev_alias(struct cdev *pdev, const char *fmt, ...) 778 { 779 struct cdev *dev; 780 va_list ap; 781 int i; 782 783 dev = devfs_alloc(); 784 dev_lock(); 785 dev->si_flags |= SI_ALIAS; 786 dev->si_flags |= SI_NAMED; 787 va_start(ap, fmt); 788 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 789 if (i > (sizeof dev->__si_namebuf - 1)) { 790 printf("WARNING: Device name truncated! (%s)\n", 791 dev->__si_namebuf); 792 } 793 va_end(ap); 794 795 devfs_create(dev); 796 clean_unrhdrl(devfs_inos); 797 dev_unlock(); 798 dev_depends(pdev, dev); 799 return (dev); 800 } 801 802 static void 803 destroy_devl(struct cdev *dev) 804 { 805 struct cdevsw *csw; 806 807 mtx_assert(&devmtx, MA_OWNED); 808 KASSERT(dev->si_flags & SI_NAMED, 809 ("WARNING: Driver mistake: destroy_dev on %d\n", minor(dev))); 810 811 devfs_destroy(dev); 812 813 /* Remove name marking */ 814 dev->si_flags &= ~SI_NAMED; 815 816 /* If we are a child, remove us from the parents list */ 817 if (dev->si_flags & SI_CHILD) { 818 LIST_REMOVE(dev, si_siblings); 819 dev->si_flags &= ~SI_CHILD; 820 } 821 822 /* Kill our children */ 823 while (!LIST_EMPTY(&dev->si_children)) 824 destroy_devl(LIST_FIRST(&dev->si_children)); 825 826 /* Remove from clone list */ 827 if (dev->si_flags & SI_CLONELIST) { 828 LIST_REMOVE(dev, si_clone); 829 dev->si_flags &= ~SI_CLONELIST; 830 } 831 832 dev->si_refcount++; /* Avoid race with dev_rel() */ 833 csw = dev->si_devsw; 834 dev->si_devsw = NULL; /* already NULL for SI_ALIAS */ 835 while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) { 836 csw->d_purge(dev); 837 msleep(csw, &devmtx, PRIBIO, "devprg", hz/10); 838 if (dev->si_threadcount) 839 printf("Still %lu threads in %s\n", 840 dev->si_threadcount, devtoname(dev)); 841 } 842 while (dev->si_threadcount != 0) { 843 /* Use unique dummy wait ident */ 844 msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10); 845 } 846 847 dev->si_drv1 = 0; 848 dev->si_drv2 = 0; 849 bzero(&dev->__si_u, sizeof(dev->__si_u)); 850 851 if (!(dev->si_flags & SI_ALIAS)) { 852 /* Remove from cdevsw list */ 853 LIST_REMOVE(dev, si_list); 854 855 /* If cdevsw has no more struct cdev *'s, clean it */ 856 if (LIST_EMPTY(&csw->d_devs)) { 857 fini_cdevsw(csw); 858 wakeup(&csw->d_devs); 859 } 860 } 861 dev->si_flags &= ~SI_ALIAS; 862 dev->si_refcount--; /* Avoid race with dev_rel() */ 863 864 if (dev->si_refcount > 0) { 865 LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list); 866 } else { 867 dev_free_devlocked(dev); 868 } 869 } 870 871 void 872 destroy_dev(struct cdev *dev) 873 { 874 875 dev_lock(); 876 destroy_devl(dev); 877 dev_unlock_and_free(); 878 } 879 880 const char * 881 devtoname(struct cdev *dev) 882 { 883 char *p; 884 struct cdevsw *csw; 885 int mynor; 886 887 if (dev->si_name[0] == '#' || dev->si_name[0] == '\0') { 888 p = dev->si_name; 889 csw = dev_refthread(dev); 890 if (csw != NULL) { 891 sprintf(p, "(%s)", csw->d_name); 892 dev_relthread(dev); 893 } 894 p += strlen(p); 895 mynor = minor(dev); 896 if (mynor < 0 || mynor > 255) 897 sprintf(p, "/%#x", (u_int)mynor); 898 else 899 sprintf(p, "/%d", mynor); 900 } 901 return (dev->si_name); 902 } 903 904 int 905 dev_stdclone(char *name, char **namep, const char *stem, int *unit) 906 { 907 int u, i; 908 909 i = strlen(stem); 910 if (bcmp(stem, name, i) != 0) 911 return (0); 912 if (!isdigit(name[i])) 913 return (0); 914 u = 0; 915 if (name[i] == '0' && isdigit(name[i+1])) 916 return (0); 917 while (isdigit(name[i])) { 918 u *= 10; 919 u += name[i++] - '0'; 920 } 921 if (u > 0xffffff) 922 return (0); 923 *unit = u; 924 if (namep) 925 *namep = &name[i]; 926 if (name[i]) 927 return (2); 928 return (1); 929 } 930 931 /* 932 * Helper functions for cloning device drivers. 933 * 934 * The objective here is to make it unnecessary for the device drivers to 935 * use rman or similar to manage their unit number space. Due to the way 936 * we do "on-demand" devices, using rman or other "private" methods 937 * will be very tricky to lock down properly once we lock down this file. 938 * 939 * Instead we give the drivers these routines which puts the struct cdev *'s 940 * that are to be managed on their own list, and gives the driver the ability 941 * to ask for the first free unit number or a given specified unit number. 942 * 943 * In addition these routines support paired devices (pty, nmdm and similar) 944 * by respecting a number of "flag" bits in the minor number. 945 * 946 */ 947 948 struct clonedevs { 949 LIST_HEAD(,cdev) head; 950 }; 951 952 void 953 clone_setup(struct clonedevs **cdp) 954 { 955 956 *cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO); 957 LIST_INIT(&(*cdp)->head); 958 } 959 960 int 961 clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra) 962 { 963 struct clonedevs *cd; 964 struct cdev *dev, *ndev, *dl, *de; 965 int unit, low, u; 966 967 KASSERT(*cdp != NULL, 968 ("clone_setup() not called in driver \"%s\"", csw->d_name)); 969 KASSERT(!(extra & CLONE_UNITMASK), 970 ("Illegal extra bits (0x%x) in clone_create", extra)); 971 KASSERT(*up <= CLONE_UNITMASK, 972 ("Too high unit (0x%x) in clone_create", *up)); 973 974 975 /* 976 * Search the list for a lot of things in one go: 977 * A preexisting match is returned immediately. 978 * The lowest free unit number if we are passed -1, and the place 979 * in the list where we should insert that new element. 980 * The place to insert a specified unit number, if applicable 981 * the end of the list. 982 */ 983 unit = *up; 984 ndev = devfs_alloc(); 985 dev_lock(); 986 prep_cdevsw(csw); 987 low = extra; 988 de = dl = NULL; 989 cd = *cdp; 990 LIST_FOREACH(dev, &cd->head, si_clone) { 991 KASSERT(dev->si_flags & SI_CLONELIST, 992 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 993 u = dev2unit(dev); 994 if (u == (unit | extra)) { 995 *dp = dev; 996 dev_unlock(); 997 devfs_free(ndev); 998 return (0); 999 } 1000 if (unit == -1 && u == low) { 1001 low++; 1002 de = dev; 1003 continue; 1004 } else if (u < (unit | extra)) { 1005 de = dev; 1006 continue; 1007 } else if (u > (unit | extra)) { 1008 dl = dev; 1009 break; 1010 } 1011 } 1012 if (unit == -1) 1013 unit = low & CLONE_UNITMASK; 1014 dev = newdev(csw, unit2minor(unit | extra), ndev); 1015 if (dev->si_flags & SI_CLONELIST) { 1016 printf("dev %p (%s) is on clonelist\n", dev, dev->si_name); 1017 printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra); 1018 LIST_FOREACH(dev, &cd->head, si_clone) { 1019 printf("\t%p %s\n", dev, dev->si_name); 1020 } 1021 panic("foo"); 1022 } 1023 KASSERT(!(dev->si_flags & SI_CLONELIST), 1024 ("Dev %p(%s) should not be on clonelist", dev, dev->si_name)); 1025 if (dl != NULL) 1026 LIST_INSERT_BEFORE(dl, dev, si_clone); 1027 else if (de != NULL) 1028 LIST_INSERT_AFTER(de, dev, si_clone); 1029 else 1030 LIST_INSERT_HEAD(&cd->head, dev, si_clone); 1031 dev->si_flags |= SI_CLONELIST; 1032 *up = unit; 1033 dev_unlock_and_free(); 1034 return (1); 1035 } 1036 1037 /* 1038 * Kill everything still on the list. The driver should already have 1039 * disposed of any softc hung of the struct cdev *'s at this time. 1040 */ 1041 void 1042 clone_cleanup(struct clonedevs **cdp) 1043 { 1044 struct cdev *dev; 1045 struct cdev_priv *cp; 1046 struct clonedevs *cd; 1047 1048 cd = *cdp; 1049 if (cd == NULL) 1050 return; 1051 dev_lock(); 1052 while (!LIST_EMPTY(&cd->head)) { 1053 dev = LIST_FIRST(&cd->head); 1054 LIST_REMOVE(dev, si_clone); 1055 KASSERT(dev->si_flags & SI_CLONELIST, 1056 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 1057 dev->si_flags &= ~SI_CLONELIST; 1058 cp = dev->si_priv; 1059 if (!(cp->cdp_flags & CDP_SCHED_DTR)) { 1060 cp->cdp_flags |= CDP_SCHED_DTR; 1061 KASSERT(dev->si_flags & SI_NAMED, 1062 ("Driver has goofed in cloning underways udev %x", dev->si_drv0)); 1063 destroy_devl(dev); 1064 } 1065 } 1066 dev_unlock_and_free(); 1067 free(cd, M_DEVBUF); 1068 *cdp = NULL; 1069 } 1070 1071 static TAILQ_HEAD(, cdev_priv) dev_ddtr = 1072 TAILQ_HEAD_INITIALIZER(dev_ddtr); 1073 static struct task dev_dtr_task; 1074 1075 static void 1076 destroy_dev_tq(void *ctx, int pending) 1077 { 1078 struct cdev_priv *cp; 1079 struct cdev *dev; 1080 void (*cb)(void *); 1081 void *cb_arg; 1082 1083 dev_lock(); 1084 while (!TAILQ_EMPTY(&dev_ddtr)) { 1085 cp = TAILQ_FIRST(&dev_ddtr); 1086 dev = &cp->cdp_c; 1087 KASSERT(cp->cdp_flags & CDP_SCHED_DTR, 1088 ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp)); 1089 TAILQ_REMOVE(&dev_ddtr, cp, cdp_dtr_list); 1090 cb = cp->cdp_dtr_cb; 1091 cb_arg = cp->cdp_dtr_cb_arg; 1092 destroy_devl(dev); 1093 dev_unlock_and_free(); 1094 dev_rel(dev); 1095 if (cb != NULL) 1096 cb(cb_arg); 1097 dev_lock(); 1098 } 1099 dev_unlock(); 1100 } 1101 1102 /* 1103 * devmtx shall be locked on entry. devmtx will be unlocked after 1104 * function return. 1105 */ 1106 static int 1107 destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg) 1108 { 1109 struct cdev_priv *cp; 1110 1111 mtx_assert(&devmtx, MA_OWNED); 1112 cp = dev->si_priv; 1113 if (cp->cdp_flags & CDP_SCHED_DTR) { 1114 dev_unlock(); 1115 return (0); 1116 } 1117 dev_refl(dev); 1118 cp->cdp_flags |= CDP_SCHED_DTR; 1119 cp->cdp_dtr_cb = cb; 1120 cp->cdp_dtr_cb_arg = arg; 1121 TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list); 1122 dev_unlock(); 1123 taskqueue_enqueue(taskqueue_swi_giant, &dev_dtr_task); 1124 return (1); 1125 } 1126 1127 int 1128 destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg) 1129 { 1130 dev_lock(); 1131 return (destroy_dev_sched_cbl(dev, cb, arg)); 1132 } 1133 1134 int 1135 destroy_dev_sched(struct cdev *dev) 1136 { 1137 return (destroy_dev_sched_cb(dev, NULL, NULL)); 1138 } 1139 1140 void 1141 destroy_dev_drain(struct cdevsw *csw) 1142 { 1143 1144 dev_lock(); 1145 while (!LIST_EMPTY(&csw->d_devs)) { 1146 msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10); 1147 } 1148 dev_unlock(); 1149 } 1150 1151 void 1152 drain_dev_clone_events(void) 1153 { 1154 1155 sx_xlock(&clone_drain_lock); 1156 sx_xunlock(&clone_drain_lock); 1157 } 1158 1159 static void 1160 devdtr_init(void *dummy __unused) 1161 { 1162 1163 TASK_INIT(&dev_dtr_task, 0, destroy_dev_tq, NULL); 1164 } 1165 1166 SYSINIT(devdtr, SI_SUB_DEVFS, SI_ORDER_SECOND, devdtr_init, NULL); 1167