1 /*- 2 * Copyright (c) 1999-2002 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/bio.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/module.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 #include <sys/vnode.h> 41 #include <sys/queue.h> 42 #include <sys/poll.h> 43 #include <sys/sx.h> 44 #include <sys/ctype.h> 45 #include <sys/ucred.h> 46 #include <sys/taskqueue.h> 47 #include <machine/stdarg.h> 48 49 #include <fs/devfs/devfs_int.h> 50 51 static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage"); 52 53 struct mtx devmtx; 54 static void destroy_devl(struct cdev *dev); 55 static int destroy_dev_sched_cbl(struct cdev *dev, 56 void (*cb)(void *), void *arg); 57 static struct cdev *make_dev_credv(int flags, 58 struct cdevsw *devsw, int unit, 59 struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, 60 va_list ap); 61 62 static struct cdev_priv_list cdevp_free_list = 63 TAILQ_HEAD_INITIALIZER(cdevp_free_list); 64 static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list = 65 SLIST_HEAD_INITIALIZER(); 66 67 void 68 dev_lock(void) 69 { 70 71 mtx_lock(&devmtx); 72 } 73 74 /* 75 * Free all the memory collected while the cdev mutex was 76 * locked. Since devmtx is after the system map mutex, free() cannot 77 * be called immediately and is postponed until cdev mutex can be 78 * dropped. 79 */ 80 static void 81 dev_unlock_and_free(void) 82 { 83 struct cdev_priv_list cdp_free; 84 struct free_cdevsw csw_free; 85 struct cdev_priv *cdp; 86 struct cdevsw *csw; 87 88 mtx_assert(&devmtx, MA_OWNED); 89 90 /* 91 * Make the local copy of the list heads while the dev_mtx is 92 * held. Free it later. 93 */ 94 TAILQ_INIT(&cdp_free); 95 TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list); 96 csw_free = cdevsw_gt_post_list; 97 SLIST_INIT(&cdevsw_gt_post_list); 98 99 mtx_unlock(&devmtx); 100 101 while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) { 102 TAILQ_REMOVE(&cdp_free, cdp, cdp_list); 103 devfs_free(&cdp->cdp_c); 104 } 105 while ((csw = SLIST_FIRST(&csw_free)) != NULL) { 106 SLIST_REMOVE_HEAD(&csw_free, d_postfree_list); 107 free(csw, M_DEVT); 108 } 109 } 110 111 static void 112 dev_free_devlocked(struct cdev *cdev) 113 { 114 struct cdev_priv *cdp; 115 116 mtx_assert(&devmtx, MA_OWNED); 117 cdp = cdev2priv(cdev); 118 TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list); 119 } 120 121 static void 122 cdevsw_free_devlocked(struct cdevsw *csw) 123 { 124 125 mtx_assert(&devmtx, MA_OWNED); 126 SLIST_INSERT_HEAD(&cdevsw_gt_post_list, csw, d_postfree_list); 127 } 128 129 void 130 dev_unlock(void) 131 { 132 133 mtx_unlock(&devmtx); 134 } 135 136 void 137 dev_ref(struct cdev *dev) 138 { 139 140 mtx_assert(&devmtx, MA_NOTOWNED); 141 mtx_lock(&devmtx); 142 dev->si_refcount++; 143 mtx_unlock(&devmtx); 144 } 145 146 void 147 dev_refl(struct cdev *dev) 148 { 149 150 mtx_assert(&devmtx, MA_OWNED); 151 dev->si_refcount++; 152 } 153 154 void 155 dev_rel(struct cdev *dev) 156 { 157 int flag = 0; 158 159 mtx_assert(&devmtx, MA_NOTOWNED); 160 dev_lock(); 161 dev->si_refcount--; 162 KASSERT(dev->si_refcount >= 0, 163 ("dev_rel(%s) gave negative count", devtoname(dev))); 164 #if 0 165 if (dev->si_usecount == 0 && 166 (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED)) 167 ; 168 else 169 #endif 170 if (dev->si_devsw == NULL && dev->si_refcount == 0) { 171 LIST_REMOVE(dev, si_list); 172 flag = 1; 173 } 174 dev_unlock(); 175 if (flag) 176 devfs_free(dev); 177 } 178 179 struct cdevsw * 180 dev_refthread(struct cdev *dev) 181 { 182 struct cdevsw *csw; 183 struct cdev_priv *cdp; 184 185 mtx_assert(&devmtx, MA_NOTOWNED); 186 dev_lock(); 187 csw = dev->si_devsw; 188 if (csw != NULL) { 189 cdp = cdev2priv(dev); 190 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) 191 dev->si_threadcount++; 192 else 193 csw = NULL; 194 } 195 dev_unlock(); 196 return (csw); 197 } 198 199 struct cdevsw * 200 devvn_refthread(struct vnode *vp, struct cdev **devp) 201 { 202 struct cdevsw *csw; 203 struct cdev_priv *cdp; 204 205 mtx_assert(&devmtx, MA_NOTOWNED); 206 csw = NULL; 207 dev_lock(); 208 *devp = vp->v_rdev; 209 if (*devp != NULL) { 210 cdp = cdev2priv(*devp); 211 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { 212 csw = (*devp)->si_devsw; 213 if (csw != NULL) 214 (*devp)->si_threadcount++; 215 } 216 } 217 dev_unlock(); 218 return (csw); 219 } 220 221 void 222 dev_relthread(struct cdev *dev) 223 { 224 225 mtx_assert(&devmtx, MA_NOTOWNED); 226 dev_lock(); 227 KASSERT(dev->si_threadcount > 0, 228 ("%s threadcount is wrong", dev->si_name)); 229 dev->si_threadcount--; 230 dev_unlock(); 231 } 232 233 int 234 nullop(void) 235 { 236 237 return (0); 238 } 239 240 int 241 eopnotsupp(void) 242 { 243 244 return (EOPNOTSUPP); 245 } 246 247 static int 248 enxio(void) 249 { 250 return (ENXIO); 251 } 252 253 static int 254 enodev(void) 255 { 256 return (ENODEV); 257 } 258 259 /* Define a dead_cdevsw for use when devices leave unexpectedly. */ 260 261 #define dead_open (d_open_t *)enxio 262 #define dead_close (d_close_t *)enxio 263 #define dead_read (d_read_t *)enxio 264 #define dead_write (d_write_t *)enxio 265 #define dead_ioctl (d_ioctl_t *)enxio 266 #define dead_poll (d_poll_t *)enodev 267 #define dead_mmap (d_mmap_t *)enodev 268 269 static void 270 dead_strategy(struct bio *bp) 271 { 272 273 biofinish(bp, NULL, ENXIO); 274 } 275 276 #define dead_dump (dumper_t *)enxio 277 #define dead_kqfilter (d_kqfilter_t *)enxio 278 279 static struct cdevsw dead_cdevsw = { 280 .d_version = D_VERSION, 281 .d_flags = D_NEEDGIANT, /* XXX: does dead_strategy need this ? */ 282 .d_open = dead_open, 283 .d_close = dead_close, 284 .d_read = dead_read, 285 .d_write = dead_write, 286 .d_ioctl = dead_ioctl, 287 .d_poll = dead_poll, 288 .d_mmap = dead_mmap, 289 .d_strategy = dead_strategy, 290 .d_name = "dead", 291 .d_dump = dead_dump, 292 .d_kqfilter = dead_kqfilter 293 }; 294 295 /* Default methods if driver does not specify method */ 296 297 #define null_open (d_open_t *)nullop 298 #define null_close (d_close_t *)nullop 299 #define no_read (d_read_t *)enodev 300 #define no_write (d_write_t *)enodev 301 #define no_ioctl (d_ioctl_t *)enodev 302 #define no_mmap (d_mmap_t *)enodev 303 #define no_kqfilter (d_kqfilter_t *)enodev 304 305 static void 306 no_strategy(struct bio *bp) 307 { 308 309 biofinish(bp, NULL, ENODEV); 310 } 311 312 static int 313 no_poll(struct cdev *dev __unused, int events, struct thread *td __unused) 314 { 315 /* 316 * Return true for read/write. If the user asked for something 317 * special, return POLLNVAL, so that clients have a way of 318 * determining reliably whether or not the extended 319 * functionality is present without hard-coding knowledge 320 * of specific filesystem implementations. 321 * Stay in sync with vop_nopoll(). 322 */ 323 if (events & ~POLLSTANDARD) 324 return (POLLNVAL); 325 326 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 327 } 328 329 #define no_dump (dumper_t *)enodev 330 331 static int 332 giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 333 { 334 struct cdevsw *dsw; 335 int retval; 336 337 dsw = dev_refthread(dev); 338 if (dsw == NULL) 339 return (ENXIO); 340 mtx_lock(&Giant); 341 retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td); 342 mtx_unlock(&Giant); 343 dev_relthread(dev); 344 return (retval); 345 } 346 347 static int 348 giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp) 349 { 350 struct cdevsw *dsw; 351 int retval; 352 353 dsw = dev_refthread(dev); 354 if (dsw == NULL) 355 return (ENXIO); 356 mtx_lock(&Giant); 357 retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp); 358 mtx_unlock(&Giant); 359 dev_relthread(dev); 360 return (retval); 361 } 362 363 static int 364 giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 365 { 366 struct cdevsw *dsw; 367 int retval; 368 369 dsw = dev_refthread(dev); 370 if (dsw == NULL) 371 return (ENXIO); 372 mtx_lock(&Giant); 373 retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td); 374 mtx_unlock(&Giant); 375 dev_relthread(dev); 376 return (retval); 377 } 378 379 static void 380 giant_strategy(struct bio *bp) 381 { 382 struct cdevsw *dsw; 383 struct cdev *dev; 384 385 dev = bp->bio_dev; 386 dsw = dev_refthread(dev); 387 if (dsw == NULL) { 388 biofinish(bp, NULL, ENXIO); 389 return; 390 } 391 mtx_lock(&Giant); 392 dsw->d_gianttrick->d_strategy(bp); 393 mtx_unlock(&Giant); 394 dev_relthread(dev); 395 } 396 397 static int 398 giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 399 { 400 struct cdevsw *dsw; 401 int retval; 402 403 dsw = dev_refthread(dev); 404 if (dsw == NULL) 405 return (ENXIO); 406 mtx_lock(&Giant); 407 retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td); 408 mtx_unlock(&Giant); 409 dev_relthread(dev); 410 return (retval); 411 } 412 413 static int 414 giant_read(struct cdev *dev, struct uio *uio, int ioflag) 415 { 416 struct cdevsw *dsw; 417 int retval; 418 419 dsw = dev_refthread(dev); 420 if (dsw == NULL) 421 return (ENXIO); 422 mtx_lock(&Giant); 423 retval = dsw->d_gianttrick->d_read(dev, uio, ioflag); 424 mtx_unlock(&Giant); 425 dev_relthread(dev); 426 return (retval); 427 } 428 429 static int 430 giant_write(struct cdev *dev, struct uio *uio, int ioflag) 431 { 432 struct cdevsw *dsw; 433 int retval; 434 435 dsw = dev_refthread(dev); 436 if (dsw == NULL) 437 return (ENXIO); 438 mtx_lock(&Giant); 439 retval = dsw->d_gianttrick->d_write(dev, uio, ioflag); 440 mtx_unlock(&Giant); 441 dev_relthread(dev); 442 return (retval); 443 } 444 445 static int 446 giant_poll(struct cdev *dev, int events, struct thread *td) 447 { 448 struct cdevsw *dsw; 449 int retval; 450 451 dsw = dev_refthread(dev); 452 if (dsw == NULL) 453 return (ENXIO); 454 mtx_lock(&Giant); 455 retval = dsw->d_gianttrick->d_poll(dev, events, td); 456 mtx_unlock(&Giant); 457 dev_relthread(dev); 458 return (retval); 459 } 460 461 static int 462 giant_kqfilter(struct cdev *dev, struct knote *kn) 463 { 464 struct cdevsw *dsw; 465 int retval; 466 467 dsw = dev_refthread(dev); 468 if (dsw == NULL) 469 return (ENXIO); 470 mtx_lock(&Giant); 471 retval = dsw->d_gianttrick->d_kqfilter(dev, kn); 472 mtx_unlock(&Giant); 473 dev_relthread(dev); 474 return (retval); 475 } 476 477 static int 478 giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot) 479 { 480 struct cdevsw *dsw; 481 int retval; 482 483 dsw = dev_refthread(dev); 484 if (dsw == NULL) 485 return (ENXIO); 486 mtx_lock(&Giant); 487 retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot); 488 mtx_unlock(&Giant); 489 dev_relthread(dev); 490 return (retval); 491 } 492 493 494 static void 495 notify(struct cdev *dev, const char *ev) 496 { 497 static const char prefix[] = "cdev="; 498 char *data; 499 int namelen; 500 501 if (cold) 502 return; 503 namelen = strlen(dev->si_name); 504 data = malloc(namelen + sizeof(prefix), M_TEMP, M_WAITOK); 505 memcpy(data, prefix, sizeof(prefix) - 1); 506 memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1); 507 devctl_notify("DEVFS", "CDEV", ev, data); 508 free(data, M_TEMP); 509 } 510 511 static void 512 notify_create(struct cdev *dev) 513 { 514 515 notify(dev, "CREATE"); 516 } 517 518 static void 519 notify_destroy(struct cdev *dev) 520 { 521 522 notify(dev, "DESTROY"); 523 } 524 525 static struct cdev * 526 newdev(struct cdevsw *csw, int y, struct cdev *si) 527 { 528 struct cdev *si2; 529 dev_t udev; 530 531 mtx_assert(&devmtx, MA_OWNED); 532 udev = y; 533 if (csw->d_flags & D_NEEDMINOR) { 534 /* We may want to return an existing device */ 535 LIST_FOREACH(si2, &csw->d_devs, si_list) { 536 if (si2->si_drv0 == udev) { 537 dev_free_devlocked(si); 538 return (si2); 539 } 540 } 541 } 542 si->si_drv0 = udev; 543 si->si_devsw = csw; 544 LIST_INSERT_HEAD(&csw->d_devs, si, si_list); 545 return (si); 546 } 547 548 static void 549 fini_cdevsw(struct cdevsw *devsw) 550 { 551 struct cdevsw *gt; 552 553 if (devsw->d_gianttrick != NULL) { 554 gt = devsw->d_gianttrick; 555 memcpy(devsw, gt, sizeof *devsw); 556 cdevsw_free_devlocked(gt); 557 devsw->d_gianttrick = NULL; 558 } 559 devsw->d_flags &= ~D_INIT; 560 } 561 562 static void 563 prep_cdevsw(struct cdevsw *devsw) 564 { 565 struct cdevsw *dsw2; 566 567 mtx_assert(&devmtx, MA_OWNED); 568 if (devsw->d_flags & D_INIT) 569 return; 570 if (devsw->d_flags & D_NEEDGIANT) { 571 dev_unlock(); 572 dsw2 = malloc(sizeof *dsw2, M_DEVT, M_WAITOK); 573 dev_lock(); 574 } else 575 dsw2 = NULL; 576 if (devsw->d_flags & D_INIT) { 577 if (dsw2 != NULL) 578 cdevsw_free_devlocked(dsw2); 579 return; 580 } 581 582 if (devsw->d_version != D_VERSION_01) { 583 printf( 584 "WARNING: Device driver \"%s\" has wrong version %s\n", 585 devsw->d_name == NULL ? "???" : devsw->d_name, 586 "and is disabled. Recompile KLD module."); 587 devsw->d_open = dead_open; 588 devsw->d_close = dead_close; 589 devsw->d_read = dead_read; 590 devsw->d_write = dead_write; 591 devsw->d_ioctl = dead_ioctl; 592 devsw->d_poll = dead_poll; 593 devsw->d_mmap = dead_mmap; 594 devsw->d_strategy = dead_strategy; 595 devsw->d_dump = dead_dump; 596 devsw->d_kqfilter = dead_kqfilter; 597 } 598 599 if (devsw->d_flags & D_NEEDGIANT) { 600 if (devsw->d_gianttrick == NULL) { 601 memcpy(dsw2, devsw, sizeof *dsw2); 602 devsw->d_gianttrick = dsw2; 603 dsw2 = NULL; 604 } 605 } 606 607 #define FIXUP(member, noop, giant) \ 608 do { \ 609 if (devsw->member == NULL) { \ 610 devsw->member = noop; \ 611 } else if (devsw->d_flags & D_NEEDGIANT) \ 612 devsw->member = giant; \ 613 } \ 614 while (0) 615 616 FIXUP(d_open, null_open, giant_open); 617 FIXUP(d_fdopen, NULL, giant_fdopen); 618 FIXUP(d_close, null_close, giant_close); 619 FIXUP(d_read, no_read, giant_read); 620 FIXUP(d_write, no_write, giant_write); 621 FIXUP(d_ioctl, no_ioctl, giant_ioctl); 622 FIXUP(d_poll, no_poll, giant_poll); 623 FIXUP(d_mmap, no_mmap, giant_mmap); 624 FIXUP(d_strategy, no_strategy, giant_strategy); 625 FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter); 626 627 if (devsw->d_dump == NULL) devsw->d_dump = no_dump; 628 629 LIST_INIT(&devsw->d_devs); 630 631 devsw->d_flags |= D_INIT; 632 633 if (dsw2 != NULL) 634 cdevsw_free_devlocked(dsw2); 635 } 636 637 struct cdev * 638 make_dev_credv(int flags, struct cdevsw *devsw, int unit, 639 struct ucred *cr, uid_t uid, 640 gid_t gid, int mode, const char *fmt, va_list ap) 641 { 642 struct cdev *dev; 643 int i; 644 645 dev = devfs_alloc(); 646 dev_lock(); 647 prep_cdevsw(devsw); 648 dev = newdev(devsw, unit, dev); 649 if (flags & MAKEDEV_REF) 650 dev_refl(dev); 651 if (dev->si_flags & SI_CHEAPCLONE && 652 dev->si_flags & SI_NAMED) { 653 /* 654 * This is allowed as it removes races and generally 655 * simplifies cloning devices. 656 * XXX: still ?? 657 */ 658 dev_unlock_and_free(); 659 return (dev); 660 } 661 KASSERT(!(dev->si_flags & SI_NAMED), 662 ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)", 663 devsw->d_name, dev2unit(dev), devtoname(dev))); 664 665 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 666 if (i > (sizeof dev->__si_namebuf - 1)) { 667 printf("WARNING: Device name truncated! (%s)\n", 668 dev->__si_namebuf); 669 } 670 671 dev->si_flags |= SI_NAMED; 672 if (cr != NULL) 673 dev->si_cred = crhold(cr); 674 else 675 dev->si_cred = NULL; 676 dev->si_uid = uid; 677 dev->si_gid = gid; 678 dev->si_mode = mode; 679 680 devfs_create(dev); 681 clean_unrhdrl(devfs_inos); 682 dev_unlock_and_free(); 683 684 notify_create(dev); 685 686 return (dev); 687 } 688 689 struct cdev * 690 make_dev(struct cdevsw *devsw, int unit, uid_t uid, gid_t gid, int mode, 691 const char *fmt, ...) 692 { 693 struct cdev *dev; 694 va_list ap; 695 696 va_start(ap, fmt); 697 dev = make_dev_credv(0, devsw, unit, NULL, uid, gid, mode, fmt, ap); 698 va_end(ap); 699 return (dev); 700 } 701 702 struct cdev * 703 make_dev_cred(struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, 704 gid_t gid, int mode, const char *fmt, ...) 705 { 706 struct cdev *dev; 707 va_list ap; 708 709 va_start(ap, fmt); 710 dev = make_dev_credv(0, devsw, unit, cr, uid, gid, mode, fmt, ap); 711 va_end(ap); 712 713 return (dev); 714 } 715 716 struct cdev * 717 make_dev_credf(int flags, struct cdevsw *devsw, int unit, 718 struct ucred *cr, uid_t uid, 719 gid_t gid, int mode, const char *fmt, ...) 720 { 721 struct cdev *dev; 722 va_list ap; 723 724 va_start(ap, fmt); 725 dev = make_dev_credv(flags, devsw, unit, cr, uid, gid, mode, 726 fmt, ap); 727 va_end(ap); 728 729 return (dev); 730 } 731 732 static void 733 dev_dependsl(struct cdev *pdev, struct cdev *cdev) 734 { 735 736 cdev->si_parent = pdev; 737 cdev->si_flags |= SI_CHILD; 738 LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings); 739 } 740 741 742 void 743 dev_depends(struct cdev *pdev, struct cdev *cdev) 744 { 745 746 dev_lock(); 747 dev_dependsl(pdev, cdev); 748 dev_unlock(); 749 } 750 751 struct cdev * 752 make_dev_alias(struct cdev *pdev, const char *fmt, ...) 753 { 754 struct cdev *dev; 755 va_list ap; 756 int i; 757 758 KASSERT(pdev != NULL, ("NULL pdev")); 759 dev = devfs_alloc(); 760 dev_lock(); 761 dev->si_flags |= SI_ALIAS; 762 dev->si_flags |= SI_NAMED; 763 va_start(ap, fmt); 764 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 765 if (i > (sizeof dev->__si_namebuf - 1)) { 766 printf("WARNING: Device name truncated! (%s)\n", 767 dev->__si_namebuf); 768 } 769 va_end(ap); 770 771 devfs_create(dev); 772 dev_dependsl(pdev, dev); 773 clean_unrhdrl(devfs_inos); 774 dev_unlock(); 775 776 notify_create(dev); 777 778 return (dev); 779 } 780 781 static void 782 destroy_devl(struct cdev *dev) 783 { 784 struct cdevsw *csw; 785 struct cdev_privdata *p, *p1; 786 787 mtx_assert(&devmtx, MA_OWNED); 788 KASSERT(dev->si_flags & SI_NAMED, 789 ("WARNING: Driver mistake: destroy_dev on %d\n", dev2unit(dev))); 790 791 devfs_destroy(dev); 792 793 /* Remove name marking */ 794 dev->si_flags &= ~SI_NAMED; 795 796 /* If we are a child, remove us from the parents list */ 797 if (dev->si_flags & SI_CHILD) { 798 LIST_REMOVE(dev, si_siblings); 799 dev->si_flags &= ~SI_CHILD; 800 } 801 802 /* Kill our children */ 803 while (!LIST_EMPTY(&dev->si_children)) 804 destroy_devl(LIST_FIRST(&dev->si_children)); 805 806 /* Remove from clone list */ 807 if (dev->si_flags & SI_CLONELIST) { 808 LIST_REMOVE(dev, si_clone); 809 dev->si_flags &= ~SI_CLONELIST; 810 } 811 812 dev->si_refcount++; /* Avoid race with dev_rel() */ 813 csw = dev->si_devsw; 814 dev->si_devsw = NULL; /* already NULL for SI_ALIAS */ 815 while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) { 816 csw->d_purge(dev); 817 msleep(csw, &devmtx, PRIBIO, "devprg", hz/10); 818 if (dev->si_threadcount) 819 printf("Still %lu threads in %s\n", 820 dev->si_threadcount, devtoname(dev)); 821 } 822 while (dev->si_threadcount != 0) { 823 /* Use unique dummy wait ident */ 824 msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10); 825 } 826 827 dev_unlock(); 828 notify_destroy(dev); 829 mtx_lock(&cdevpriv_mtx); 830 LIST_FOREACH_SAFE(p, &cdev2priv(dev)->cdp_fdpriv, cdpd_list, p1) { 831 devfs_destroy_cdevpriv(p); 832 mtx_lock(&cdevpriv_mtx); 833 } 834 mtx_unlock(&cdevpriv_mtx); 835 dev_lock(); 836 837 dev->si_drv1 = 0; 838 dev->si_drv2 = 0; 839 bzero(&dev->__si_u, sizeof(dev->__si_u)); 840 841 if (!(dev->si_flags & SI_ALIAS)) { 842 /* Remove from cdevsw list */ 843 LIST_REMOVE(dev, si_list); 844 845 /* If cdevsw has no more struct cdev *'s, clean it */ 846 if (LIST_EMPTY(&csw->d_devs)) { 847 fini_cdevsw(csw); 848 wakeup(&csw->d_devs); 849 } 850 } 851 dev->si_flags &= ~SI_ALIAS; 852 dev->si_refcount--; /* Avoid race with dev_rel() */ 853 854 if (dev->si_refcount > 0) { 855 LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list); 856 } else { 857 dev_free_devlocked(dev); 858 } 859 } 860 861 void 862 destroy_dev(struct cdev *dev) 863 { 864 865 dev_lock(); 866 destroy_devl(dev); 867 dev_unlock_and_free(); 868 } 869 870 const char * 871 devtoname(struct cdev *dev) 872 { 873 char *p; 874 struct cdevsw *csw; 875 int mynor; 876 877 if (dev->si_name[0] == '#' || dev->si_name[0] == '\0') { 878 p = dev->si_name; 879 csw = dev_refthread(dev); 880 if (csw != NULL) { 881 sprintf(p, "(%s)", csw->d_name); 882 dev_relthread(dev); 883 } 884 p += strlen(p); 885 mynor = dev2unit(dev); 886 if (mynor < 0 || mynor > 255) 887 sprintf(p, "/%#x", (u_int)mynor); 888 else 889 sprintf(p, "/%d", mynor); 890 } 891 return (dev->si_name); 892 } 893 894 int 895 dev_stdclone(char *name, char **namep, const char *stem, int *unit) 896 { 897 int u, i; 898 899 i = strlen(stem); 900 if (bcmp(stem, name, i) != 0) 901 return (0); 902 if (!isdigit(name[i])) 903 return (0); 904 u = 0; 905 if (name[i] == '0' && isdigit(name[i+1])) 906 return (0); 907 while (isdigit(name[i])) { 908 u *= 10; 909 u += name[i++] - '0'; 910 } 911 if (u > 0xffffff) 912 return (0); 913 *unit = u; 914 if (namep) 915 *namep = &name[i]; 916 if (name[i]) 917 return (2); 918 return (1); 919 } 920 921 /* 922 * Helper functions for cloning device drivers. 923 * 924 * The objective here is to make it unnecessary for the device drivers to 925 * use rman or similar to manage their unit number space. Due to the way 926 * we do "on-demand" devices, using rman or other "private" methods 927 * will be very tricky to lock down properly once we lock down this file. 928 * 929 * Instead we give the drivers these routines which puts the struct cdev *'s 930 * that are to be managed on their own list, and gives the driver the ability 931 * to ask for the first free unit number or a given specified unit number. 932 * 933 * In addition these routines support paired devices (pty, nmdm and similar) 934 * by respecting a number of "flag" bits in the minor number. 935 * 936 */ 937 938 struct clonedevs { 939 LIST_HEAD(,cdev) head; 940 }; 941 942 void 943 clone_setup(struct clonedevs **cdp) 944 { 945 946 *cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO); 947 LIST_INIT(&(*cdp)->head); 948 } 949 950 int 951 clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra) 952 { 953 struct clonedevs *cd; 954 struct cdev *dev, *ndev, *dl, *de; 955 int unit, low, u; 956 957 KASSERT(*cdp != NULL, 958 ("clone_setup() not called in driver \"%s\"", csw->d_name)); 959 KASSERT(!(extra & CLONE_UNITMASK), 960 ("Illegal extra bits (0x%x) in clone_create", extra)); 961 KASSERT(*up <= CLONE_UNITMASK, 962 ("Too high unit (0x%x) in clone_create", *up)); 963 KASSERT(csw->d_flags & D_NEEDMINOR, 964 ("clone_create() on cdevsw without minor numbers")); 965 966 967 /* 968 * Search the list for a lot of things in one go: 969 * A preexisting match is returned immediately. 970 * The lowest free unit number if we are passed -1, and the place 971 * in the list where we should insert that new element. 972 * The place to insert a specified unit number, if applicable 973 * the end of the list. 974 */ 975 unit = *up; 976 ndev = devfs_alloc(); 977 dev_lock(); 978 prep_cdevsw(csw); 979 low = extra; 980 de = dl = NULL; 981 cd = *cdp; 982 LIST_FOREACH(dev, &cd->head, si_clone) { 983 KASSERT(dev->si_flags & SI_CLONELIST, 984 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 985 u = dev2unit(dev); 986 if (u == (unit | extra)) { 987 *dp = dev; 988 dev_unlock(); 989 devfs_free(ndev); 990 return (0); 991 } 992 if (unit == -1 && u == low) { 993 low++; 994 de = dev; 995 continue; 996 } else if (u < (unit | extra)) { 997 de = dev; 998 continue; 999 } else if (u > (unit | extra)) { 1000 dl = dev; 1001 break; 1002 } 1003 } 1004 if (unit == -1) 1005 unit = low & CLONE_UNITMASK; 1006 dev = newdev(csw, unit | extra, ndev); 1007 if (dev->si_flags & SI_CLONELIST) { 1008 printf("dev %p (%s) is on clonelist\n", dev, dev->si_name); 1009 printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra); 1010 LIST_FOREACH(dev, &cd->head, si_clone) { 1011 printf("\t%p %s\n", dev, dev->si_name); 1012 } 1013 panic("foo"); 1014 } 1015 KASSERT(!(dev->si_flags & SI_CLONELIST), 1016 ("Dev %p(%s) should not be on clonelist", dev, dev->si_name)); 1017 if (dl != NULL) 1018 LIST_INSERT_BEFORE(dl, dev, si_clone); 1019 else if (de != NULL) 1020 LIST_INSERT_AFTER(de, dev, si_clone); 1021 else 1022 LIST_INSERT_HEAD(&cd->head, dev, si_clone); 1023 dev->si_flags |= SI_CLONELIST; 1024 *up = unit; 1025 dev_unlock_and_free(); 1026 return (1); 1027 } 1028 1029 /* 1030 * Kill everything still on the list. The driver should already have 1031 * disposed of any softc hung of the struct cdev *'s at this time. 1032 */ 1033 void 1034 clone_cleanup(struct clonedevs **cdp) 1035 { 1036 struct cdev *dev; 1037 struct cdev_priv *cp; 1038 struct clonedevs *cd; 1039 1040 cd = *cdp; 1041 if (cd == NULL) 1042 return; 1043 dev_lock(); 1044 while (!LIST_EMPTY(&cd->head)) { 1045 dev = LIST_FIRST(&cd->head); 1046 LIST_REMOVE(dev, si_clone); 1047 KASSERT(dev->si_flags & SI_CLONELIST, 1048 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 1049 dev->si_flags &= ~SI_CLONELIST; 1050 cp = cdev2priv(dev); 1051 if (!(cp->cdp_flags & CDP_SCHED_DTR)) { 1052 cp->cdp_flags |= CDP_SCHED_DTR; 1053 KASSERT(dev->si_flags & SI_NAMED, 1054 ("Driver has goofed in cloning underways udev %x", dev->si_drv0)); 1055 destroy_devl(dev); 1056 } 1057 } 1058 dev_unlock_and_free(); 1059 free(cd, M_DEVBUF); 1060 *cdp = NULL; 1061 } 1062 1063 static TAILQ_HEAD(, cdev_priv) dev_ddtr = 1064 TAILQ_HEAD_INITIALIZER(dev_ddtr); 1065 static struct task dev_dtr_task; 1066 1067 static void 1068 destroy_dev_tq(void *ctx, int pending) 1069 { 1070 struct cdev_priv *cp; 1071 struct cdev *dev; 1072 void (*cb)(void *); 1073 void *cb_arg; 1074 1075 dev_lock(); 1076 while (!TAILQ_EMPTY(&dev_ddtr)) { 1077 cp = TAILQ_FIRST(&dev_ddtr); 1078 dev = &cp->cdp_c; 1079 KASSERT(cp->cdp_flags & CDP_SCHED_DTR, 1080 ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp)); 1081 TAILQ_REMOVE(&dev_ddtr, cp, cdp_dtr_list); 1082 cb = cp->cdp_dtr_cb; 1083 cb_arg = cp->cdp_dtr_cb_arg; 1084 destroy_devl(dev); 1085 dev_unlock_and_free(); 1086 dev_rel(dev); 1087 if (cb != NULL) 1088 cb(cb_arg); 1089 dev_lock(); 1090 } 1091 dev_unlock(); 1092 } 1093 1094 /* 1095 * devmtx shall be locked on entry. devmtx will be unlocked after 1096 * function return. 1097 */ 1098 static int 1099 destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg) 1100 { 1101 struct cdev_priv *cp; 1102 1103 mtx_assert(&devmtx, MA_OWNED); 1104 cp = cdev2priv(dev); 1105 if (cp->cdp_flags & CDP_SCHED_DTR) { 1106 dev_unlock(); 1107 return (0); 1108 } 1109 dev_refl(dev); 1110 cp->cdp_flags |= CDP_SCHED_DTR; 1111 cp->cdp_dtr_cb = cb; 1112 cp->cdp_dtr_cb_arg = arg; 1113 TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list); 1114 dev_unlock(); 1115 taskqueue_enqueue(taskqueue_swi_giant, &dev_dtr_task); 1116 return (1); 1117 } 1118 1119 int 1120 destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg) 1121 { 1122 dev_lock(); 1123 return (destroy_dev_sched_cbl(dev, cb, arg)); 1124 } 1125 1126 int 1127 destroy_dev_sched(struct cdev *dev) 1128 { 1129 return (destroy_dev_sched_cb(dev, NULL, NULL)); 1130 } 1131 1132 void 1133 destroy_dev_drain(struct cdevsw *csw) 1134 { 1135 1136 dev_lock(); 1137 while (!LIST_EMPTY(&csw->d_devs)) { 1138 msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10); 1139 } 1140 dev_unlock(); 1141 } 1142 1143 void 1144 drain_dev_clone_events(void) 1145 { 1146 1147 sx_xlock(&clone_drain_lock); 1148 sx_xunlock(&clone_drain_lock); 1149 } 1150 1151 static void 1152 devdtr_init(void *dummy __unused) 1153 { 1154 1155 TASK_INIT(&dev_dtr_task, 0, destroy_dev_tq, NULL); 1156 } 1157 1158 SYSINIT(devdtr, SI_SUB_DEVFS, SI_ORDER_SECOND, devdtr_init, NULL); 1159