1 /*- 2 * Copyright (c) 1999-2002 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/bio.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/module.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 #include <sys/vnode.h> 41 #include <sys/queue.h> 42 #include <sys/poll.h> 43 #include <sys/sx.h> 44 #include <sys/ctype.h> 45 #include <sys/ucred.h> 46 #include <sys/taskqueue.h> 47 #include <machine/stdarg.h> 48 49 #include <fs/devfs/devfs_int.h> 50 51 static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage"); 52 53 struct mtx devmtx; 54 static void destroy_devl(struct cdev *dev); 55 static int destroy_dev_sched_cbl(struct cdev *dev, 56 void (*cb)(void *), void *arg); 57 static struct cdev *make_dev_credv(int flags, 58 struct cdevsw *devsw, int unit, 59 struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, 60 va_list ap); 61 62 static struct cdev_priv_list cdevp_free_list = 63 TAILQ_HEAD_INITIALIZER(cdevp_free_list); 64 static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list = 65 SLIST_HEAD_INITIALIZER(); 66 67 void 68 dev_lock(void) 69 { 70 71 mtx_lock(&devmtx); 72 } 73 74 /* 75 * Free all the memory collected while the cdev mutex was 76 * locked. Since devmtx is after the system map mutex, free() cannot 77 * be called immediately and is postponed until cdev mutex can be 78 * dropped. 79 */ 80 static void 81 dev_unlock_and_free(void) 82 { 83 struct cdev_priv_list cdp_free; 84 struct free_cdevsw csw_free; 85 struct cdev_priv *cdp; 86 struct cdevsw *csw; 87 88 mtx_assert(&devmtx, MA_OWNED); 89 90 /* 91 * Make the local copy of the list heads while the dev_mtx is 92 * held. Free it later. 93 */ 94 TAILQ_INIT(&cdp_free); 95 TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list); 96 csw_free = cdevsw_gt_post_list; 97 SLIST_INIT(&cdevsw_gt_post_list); 98 99 mtx_unlock(&devmtx); 100 101 while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) { 102 TAILQ_REMOVE(&cdp_free, cdp, cdp_list); 103 devfs_free(&cdp->cdp_c); 104 } 105 while ((csw = SLIST_FIRST(&csw_free)) != NULL) { 106 SLIST_REMOVE_HEAD(&csw_free, d_postfree_list); 107 free(csw, M_DEVT); 108 } 109 } 110 111 static void 112 dev_free_devlocked(struct cdev *cdev) 113 { 114 struct cdev_priv *cdp; 115 116 mtx_assert(&devmtx, MA_OWNED); 117 cdp = cdev2priv(cdev); 118 TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list); 119 } 120 121 static void 122 cdevsw_free_devlocked(struct cdevsw *csw) 123 { 124 125 mtx_assert(&devmtx, MA_OWNED); 126 SLIST_INSERT_HEAD(&cdevsw_gt_post_list, csw, d_postfree_list); 127 } 128 129 void 130 dev_unlock(void) 131 { 132 133 mtx_unlock(&devmtx); 134 } 135 136 void 137 dev_ref(struct cdev *dev) 138 { 139 140 mtx_assert(&devmtx, MA_NOTOWNED); 141 mtx_lock(&devmtx); 142 dev->si_refcount++; 143 mtx_unlock(&devmtx); 144 } 145 146 void 147 dev_refl(struct cdev *dev) 148 { 149 150 mtx_assert(&devmtx, MA_OWNED); 151 dev->si_refcount++; 152 } 153 154 void 155 dev_rel(struct cdev *dev) 156 { 157 int flag = 0; 158 159 mtx_assert(&devmtx, MA_NOTOWNED); 160 dev_lock(); 161 dev->si_refcount--; 162 KASSERT(dev->si_refcount >= 0, 163 ("dev_rel(%s) gave negative count", devtoname(dev))); 164 #if 0 165 if (dev->si_usecount == 0 && 166 (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED)) 167 ; 168 else 169 #endif 170 if (dev->si_devsw == NULL && dev->si_refcount == 0) { 171 LIST_REMOVE(dev, si_list); 172 flag = 1; 173 } 174 dev_unlock(); 175 if (flag) 176 devfs_free(dev); 177 } 178 179 struct cdevsw * 180 dev_refthread(struct cdev *dev) 181 { 182 struct cdevsw *csw; 183 struct cdev_priv *cdp; 184 185 mtx_assert(&devmtx, MA_NOTOWNED); 186 dev_lock(); 187 csw = dev->si_devsw; 188 if (csw != NULL) { 189 cdp = cdev2priv(dev); 190 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) 191 dev->si_threadcount++; 192 else 193 csw = NULL; 194 } 195 dev_unlock(); 196 return (csw); 197 } 198 199 struct cdevsw * 200 devvn_refthread(struct vnode *vp, struct cdev **devp) 201 { 202 struct cdevsw *csw; 203 struct cdev_priv *cdp; 204 205 mtx_assert(&devmtx, MA_NOTOWNED); 206 csw = NULL; 207 dev_lock(); 208 *devp = vp->v_rdev; 209 if (*devp != NULL) { 210 cdp = cdev2priv(*devp); 211 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { 212 csw = (*devp)->si_devsw; 213 if (csw != NULL) 214 (*devp)->si_threadcount++; 215 } 216 } 217 dev_unlock(); 218 return (csw); 219 } 220 221 void 222 dev_relthread(struct cdev *dev) 223 { 224 225 mtx_assert(&devmtx, MA_NOTOWNED); 226 dev_lock(); 227 KASSERT(dev->si_threadcount > 0, 228 ("%s threadcount is wrong", dev->si_name)); 229 dev->si_threadcount--; 230 dev_unlock(); 231 } 232 233 int 234 nullop(void) 235 { 236 237 return (0); 238 } 239 240 int 241 eopnotsupp(void) 242 { 243 244 return (EOPNOTSUPP); 245 } 246 247 static int 248 enxio(void) 249 { 250 return (ENXIO); 251 } 252 253 static int 254 enodev(void) 255 { 256 return (ENODEV); 257 } 258 259 /* Define a dead_cdevsw for use when devices leave unexpectedly. */ 260 261 #define dead_open (d_open_t *)enxio 262 #define dead_close (d_close_t *)enxio 263 #define dead_read (d_read_t *)enxio 264 #define dead_write (d_write_t *)enxio 265 #define dead_ioctl (d_ioctl_t *)enxio 266 #define dead_poll (d_poll_t *)enodev 267 #define dead_mmap (d_mmap_t *)enodev 268 269 static void 270 dead_strategy(struct bio *bp) 271 { 272 273 biofinish(bp, NULL, ENXIO); 274 } 275 276 #define dead_dump (dumper_t *)enxio 277 #define dead_kqfilter (d_kqfilter_t *)enxio 278 279 static struct cdevsw dead_cdevsw = { 280 .d_version = D_VERSION, 281 .d_flags = D_NEEDGIANT, /* XXX: does dead_strategy need this ? */ 282 .d_open = dead_open, 283 .d_close = dead_close, 284 .d_read = dead_read, 285 .d_write = dead_write, 286 .d_ioctl = dead_ioctl, 287 .d_poll = dead_poll, 288 .d_mmap = dead_mmap, 289 .d_strategy = dead_strategy, 290 .d_name = "dead", 291 .d_dump = dead_dump, 292 .d_kqfilter = dead_kqfilter 293 }; 294 295 /* Default methods if driver does not specify method */ 296 297 #define null_open (d_open_t *)nullop 298 #define null_close (d_close_t *)nullop 299 #define no_read (d_read_t *)enodev 300 #define no_write (d_write_t *)enodev 301 #define no_ioctl (d_ioctl_t *)enodev 302 #define no_mmap (d_mmap_t *)enodev 303 #define no_kqfilter (d_kqfilter_t *)enodev 304 305 static void 306 no_strategy(struct bio *bp) 307 { 308 309 biofinish(bp, NULL, ENODEV); 310 } 311 312 static int 313 no_poll(struct cdev *dev __unused, int events, struct thread *td __unused) 314 { 315 316 return (poll_no_poll(events)); 317 } 318 319 #define no_dump (dumper_t *)enodev 320 321 static int 322 giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 323 { 324 struct cdevsw *dsw; 325 int retval; 326 327 dsw = dev_refthread(dev); 328 if (dsw == NULL) 329 return (ENXIO); 330 mtx_lock(&Giant); 331 retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td); 332 mtx_unlock(&Giant); 333 dev_relthread(dev); 334 return (retval); 335 } 336 337 static int 338 giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp) 339 { 340 struct cdevsw *dsw; 341 int retval; 342 343 dsw = dev_refthread(dev); 344 if (dsw == NULL) 345 return (ENXIO); 346 mtx_lock(&Giant); 347 retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp); 348 mtx_unlock(&Giant); 349 dev_relthread(dev); 350 return (retval); 351 } 352 353 static int 354 giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 355 { 356 struct cdevsw *dsw; 357 int retval; 358 359 dsw = dev_refthread(dev); 360 if (dsw == NULL) 361 return (ENXIO); 362 mtx_lock(&Giant); 363 retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td); 364 mtx_unlock(&Giant); 365 dev_relthread(dev); 366 return (retval); 367 } 368 369 static void 370 giant_strategy(struct bio *bp) 371 { 372 struct cdevsw *dsw; 373 struct cdev *dev; 374 375 dev = bp->bio_dev; 376 dsw = dev_refthread(dev); 377 if (dsw == NULL) { 378 biofinish(bp, NULL, ENXIO); 379 return; 380 } 381 mtx_lock(&Giant); 382 dsw->d_gianttrick->d_strategy(bp); 383 mtx_unlock(&Giant); 384 dev_relthread(dev); 385 } 386 387 static int 388 giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 389 { 390 struct cdevsw *dsw; 391 int retval; 392 393 dsw = dev_refthread(dev); 394 if (dsw == NULL) 395 return (ENXIO); 396 mtx_lock(&Giant); 397 retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td); 398 mtx_unlock(&Giant); 399 dev_relthread(dev); 400 return (retval); 401 } 402 403 static int 404 giant_read(struct cdev *dev, struct uio *uio, int ioflag) 405 { 406 struct cdevsw *dsw; 407 int retval; 408 409 dsw = dev_refthread(dev); 410 if (dsw == NULL) 411 return (ENXIO); 412 mtx_lock(&Giant); 413 retval = dsw->d_gianttrick->d_read(dev, uio, ioflag); 414 mtx_unlock(&Giant); 415 dev_relthread(dev); 416 return (retval); 417 } 418 419 static int 420 giant_write(struct cdev *dev, struct uio *uio, int ioflag) 421 { 422 struct cdevsw *dsw; 423 int retval; 424 425 dsw = dev_refthread(dev); 426 if (dsw == NULL) 427 return (ENXIO); 428 mtx_lock(&Giant); 429 retval = dsw->d_gianttrick->d_write(dev, uio, ioflag); 430 mtx_unlock(&Giant); 431 dev_relthread(dev); 432 return (retval); 433 } 434 435 static int 436 giant_poll(struct cdev *dev, int events, struct thread *td) 437 { 438 struct cdevsw *dsw; 439 int retval; 440 441 dsw = dev_refthread(dev); 442 if (dsw == NULL) 443 return (ENXIO); 444 mtx_lock(&Giant); 445 retval = dsw->d_gianttrick->d_poll(dev, events, td); 446 mtx_unlock(&Giant); 447 dev_relthread(dev); 448 return (retval); 449 } 450 451 static int 452 giant_kqfilter(struct cdev *dev, struct knote *kn) 453 { 454 struct cdevsw *dsw; 455 int retval; 456 457 dsw = dev_refthread(dev); 458 if (dsw == NULL) 459 return (ENXIO); 460 mtx_lock(&Giant); 461 retval = dsw->d_gianttrick->d_kqfilter(dev, kn); 462 mtx_unlock(&Giant); 463 dev_relthread(dev); 464 return (retval); 465 } 466 467 static int 468 giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot) 469 { 470 struct cdevsw *dsw; 471 int retval; 472 473 dsw = dev_refthread(dev); 474 if (dsw == NULL) 475 return (ENXIO); 476 mtx_lock(&Giant); 477 retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot); 478 mtx_unlock(&Giant); 479 dev_relthread(dev); 480 return (retval); 481 } 482 483 484 static void 485 notify(struct cdev *dev, const char *ev) 486 { 487 static const char prefix[] = "cdev="; 488 char *data; 489 int namelen; 490 491 if (cold) 492 return; 493 namelen = strlen(dev->si_name); 494 data = malloc(namelen + sizeof(prefix), M_TEMP, M_WAITOK); 495 memcpy(data, prefix, sizeof(prefix) - 1); 496 memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1); 497 devctl_notify("DEVFS", "CDEV", ev, data); 498 free(data, M_TEMP); 499 } 500 501 static void 502 notify_create(struct cdev *dev) 503 { 504 505 notify(dev, "CREATE"); 506 } 507 508 static void 509 notify_destroy(struct cdev *dev) 510 { 511 512 notify(dev, "DESTROY"); 513 } 514 515 static struct cdev * 516 newdev(struct cdevsw *csw, int unit, struct cdev *si) 517 { 518 struct cdev *si2; 519 520 mtx_assert(&devmtx, MA_OWNED); 521 if (csw->d_flags & D_NEEDMINOR) { 522 /* We may want to return an existing device */ 523 LIST_FOREACH(si2, &csw->d_devs, si_list) { 524 if (dev2unit(si2) == unit) { 525 dev_free_devlocked(si); 526 return (si2); 527 } 528 } 529 } 530 si->si_drv0 = unit; 531 si->si_devsw = csw; 532 LIST_INSERT_HEAD(&csw->d_devs, si, si_list); 533 return (si); 534 } 535 536 static void 537 fini_cdevsw(struct cdevsw *devsw) 538 { 539 struct cdevsw *gt; 540 541 if (devsw->d_gianttrick != NULL) { 542 gt = devsw->d_gianttrick; 543 memcpy(devsw, gt, sizeof *devsw); 544 cdevsw_free_devlocked(gt); 545 devsw->d_gianttrick = NULL; 546 } 547 devsw->d_flags &= ~D_INIT; 548 } 549 550 static void 551 prep_cdevsw(struct cdevsw *devsw) 552 { 553 struct cdevsw *dsw2; 554 555 mtx_assert(&devmtx, MA_OWNED); 556 if (devsw->d_flags & D_INIT) 557 return; 558 if (devsw->d_flags & D_NEEDGIANT) { 559 dev_unlock(); 560 dsw2 = malloc(sizeof *dsw2, M_DEVT, M_WAITOK); 561 dev_lock(); 562 } else 563 dsw2 = NULL; 564 if (devsw->d_flags & D_INIT) { 565 if (dsw2 != NULL) 566 cdevsw_free_devlocked(dsw2); 567 return; 568 } 569 570 if (devsw->d_version != D_VERSION_01) { 571 printf( 572 "WARNING: Device driver \"%s\" has wrong version %s\n", 573 devsw->d_name == NULL ? "???" : devsw->d_name, 574 "and is disabled. Recompile KLD module."); 575 devsw->d_open = dead_open; 576 devsw->d_close = dead_close; 577 devsw->d_read = dead_read; 578 devsw->d_write = dead_write; 579 devsw->d_ioctl = dead_ioctl; 580 devsw->d_poll = dead_poll; 581 devsw->d_mmap = dead_mmap; 582 devsw->d_strategy = dead_strategy; 583 devsw->d_dump = dead_dump; 584 devsw->d_kqfilter = dead_kqfilter; 585 } 586 587 if (devsw->d_flags & D_NEEDGIANT) { 588 if (devsw->d_gianttrick == NULL) { 589 memcpy(dsw2, devsw, sizeof *dsw2); 590 devsw->d_gianttrick = dsw2; 591 dsw2 = NULL; 592 } 593 } 594 595 #define FIXUP(member, noop, giant) \ 596 do { \ 597 if (devsw->member == NULL) { \ 598 devsw->member = noop; \ 599 } else if (devsw->d_flags & D_NEEDGIANT) \ 600 devsw->member = giant; \ 601 } \ 602 while (0) 603 604 FIXUP(d_open, null_open, giant_open); 605 FIXUP(d_fdopen, NULL, giant_fdopen); 606 FIXUP(d_close, null_close, giant_close); 607 FIXUP(d_read, no_read, giant_read); 608 FIXUP(d_write, no_write, giant_write); 609 FIXUP(d_ioctl, no_ioctl, giant_ioctl); 610 FIXUP(d_poll, no_poll, giant_poll); 611 FIXUP(d_mmap, no_mmap, giant_mmap); 612 FIXUP(d_strategy, no_strategy, giant_strategy); 613 FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter); 614 615 if (devsw->d_dump == NULL) devsw->d_dump = no_dump; 616 617 LIST_INIT(&devsw->d_devs); 618 619 devsw->d_flags |= D_INIT; 620 621 if (dsw2 != NULL) 622 cdevsw_free_devlocked(dsw2); 623 } 624 625 struct cdev * 626 make_dev_credv(int flags, struct cdevsw *devsw, int unit, 627 struct ucred *cr, uid_t uid, 628 gid_t gid, int mode, const char *fmt, va_list ap) 629 { 630 struct cdev *dev; 631 int i; 632 633 dev = devfs_alloc(); 634 dev_lock(); 635 prep_cdevsw(devsw); 636 dev = newdev(devsw, unit, dev); 637 if (flags & MAKEDEV_REF) 638 dev_refl(dev); 639 if (dev->si_flags & SI_CHEAPCLONE && 640 dev->si_flags & SI_NAMED) { 641 /* 642 * This is allowed as it removes races and generally 643 * simplifies cloning devices. 644 * XXX: still ?? 645 */ 646 dev_unlock_and_free(); 647 return (dev); 648 } 649 KASSERT(!(dev->si_flags & SI_NAMED), 650 ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)", 651 devsw->d_name, dev2unit(dev), devtoname(dev))); 652 653 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 654 if (i > (sizeof dev->__si_namebuf - 1)) { 655 printf("WARNING: Device name truncated! (%s)\n", 656 dev->__si_namebuf); 657 } 658 659 dev->si_flags |= SI_NAMED; 660 if (cr != NULL) 661 dev->si_cred = crhold(cr); 662 else 663 dev->si_cred = NULL; 664 dev->si_uid = uid; 665 dev->si_gid = gid; 666 dev->si_mode = mode; 667 668 devfs_create(dev); 669 clean_unrhdrl(devfs_inos); 670 dev_unlock_and_free(); 671 672 notify_create(dev); 673 674 return (dev); 675 } 676 677 struct cdev * 678 make_dev(struct cdevsw *devsw, int unit, uid_t uid, gid_t gid, int mode, 679 const char *fmt, ...) 680 { 681 struct cdev *dev; 682 va_list ap; 683 684 va_start(ap, fmt); 685 dev = make_dev_credv(0, devsw, unit, NULL, uid, gid, mode, fmt, ap); 686 va_end(ap); 687 return (dev); 688 } 689 690 struct cdev * 691 make_dev_cred(struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, 692 gid_t gid, int mode, const char *fmt, ...) 693 { 694 struct cdev *dev; 695 va_list ap; 696 697 va_start(ap, fmt); 698 dev = make_dev_credv(0, devsw, unit, cr, uid, gid, mode, fmt, ap); 699 va_end(ap); 700 701 return (dev); 702 } 703 704 struct cdev * 705 make_dev_credf(int flags, struct cdevsw *devsw, int unit, 706 struct ucred *cr, uid_t uid, 707 gid_t gid, int mode, const char *fmt, ...) 708 { 709 struct cdev *dev; 710 va_list ap; 711 712 va_start(ap, fmt); 713 dev = make_dev_credv(flags, devsw, unit, cr, uid, gid, mode, 714 fmt, ap); 715 va_end(ap); 716 717 return (dev); 718 } 719 720 static void 721 dev_dependsl(struct cdev *pdev, struct cdev *cdev) 722 { 723 724 cdev->si_parent = pdev; 725 cdev->si_flags |= SI_CHILD; 726 LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings); 727 } 728 729 730 void 731 dev_depends(struct cdev *pdev, struct cdev *cdev) 732 { 733 734 dev_lock(); 735 dev_dependsl(pdev, cdev); 736 dev_unlock(); 737 } 738 739 struct cdev * 740 make_dev_alias(struct cdev *pdev, const char *fmt, ...) 741 { 742 struct cdev *dev; 743 va_list ap; 744 int i; 745 746 KASSERT(pdev != NULL, ("NULL pdev")); 747 dev = devfs_alloc(); 748 dev_lock(); 749 dev->si_flags |= SI_ALIAS; 750 dev->si_flags |= SI_NAMED; 751 va_start(ap, fmt); 752 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 753 if (i > (sizeof dev->__si_namebuf - 1)) { 754 printf("WARNING: Device name truncated! (%s)\n", 755 dev->__si_namebuf); 756 } 757 va_end(ap); 758 759 devfs_create(dev); 760 dev_dependsl(pdev, dev); 761 clean_unrhdrl(devfs_inos); 762 dev_unlock(); 763 764 notify_create(dev); 765 766 return (dev); 767 } 768 769 static void 770 destroy_devl(struct cdev *dev) 771 { 772 struct cdevsw *csw; 773 struct cdev_privdata *p, *p1; 774 775 mtx_assert(&devmtx, MA_OWNED); 776 KASSERT(dev->si_flags & SI_NAMED, 777 ("WARNING: Driver mistake: destroy_dev on %d\n", dev2unit(dev))); 778 779 devfs_destroy(dev); 780 781 /* Remove name marking */ 782 dev->si_flags &= ~SI_NAMED; 783 784 /* If we are a child, remove us from the parents list */ 785 if (dev->si_flags & SI_CHILD) { 786 LIST_REMOVE(dev, si_siblings); 787 dev->si_flags &= ~SI_CHILD; 788 } 789 790 /* Kill our children */ 791 while (!LIST_EMPTY(&dev->si_children)) 792 destroy_devl(LIST_FIRST(&dev->si_children)); 793 794 /* Remove from clone list */ 795 if (dev->si_flags & SI_CLONELIST) { 796 LIST_REMOVE(dev, si_clone); 797 dev->si_flags &= ~SI_CLONELIST; 798 } 799 800 dev->si_refcount++; /* Avoid race with dev_rel() */ 801 csw = dev->si_devsw; 802 dev->si_devsw = NULL; /* already NULL for SI_ALIAS */ 803 while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) { 804 csw->d_purge(dev); 805 msleep(csw, &devmtx, PRIBIO, "devprg", hz/10); 806 if (dev->si_threadcount) 807 printf("Still %lu threads in %s\n", 808 dev->si_threadcount, devtoname(dev)); 809 } 810 while (dev->si_threadcount != 0) { 811 /* Use unique dummy wait ident */ 812 msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10); 813 } 814 815 dev_unlock(); 816 notify_destroy(dev); 817 mtx_lock(&cdevpriv_mtx); 818 LIST_FOREACH_SAFE(p, &cdev2priv(dev)->cdp_fdpriv, cdpd_list, p1) { 819 devfs_destroy_cdevpriv(p); 820 mtx_lock(&cdevpriv_mtx); 821 } 822 mtx_unlock(&cdevpriv_mtx); 823 dev_lock(); 824 825 dev->si_drv1 = 0; 826 dev->si_drv2 = 0; 827 bzero(&dev->__si_u, sizeof(dev->__si_u)); 828 829 if (!(dev->si_flags & SI_ALIAS)) { 830 /* Remove from cdevsw list */ 831 LIST_REMOVE(dev, si_list); 832 833 /* If cdevsw has no more struct cdev *'s, clean it */ 834 if (LIST_EMPTY(&csw->d_devs)) { 835 fini_cdevsw(csw); 836 wakeup(&csw->d_devs); 837 } 838 } 839 dev->si_flags &= ~SI_ALIAS; 840 dev->si_refcount--; /* Avoid race with dev_rel() */ 841 842 if (dev->si_refcount > 0) { 843 LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list); 844 } else { 845 dev_free_devlocked(dev); 846 } 847 } 848 849 void 850 destroy_dev(struct cdev *dev) 851 { 852 853 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "destroy_dev"); 854 dev_lock(); 855 destroy_devl(dev); 856 dev_unlock_and_free(); 857 } 858 859 const char * 860 devtoname(struct cdev *dev) 861 { 862 863 return (dev->si_name); 864 } 865 866 int 867 dev_stdclone(char *name, char **namep, const char *stem, int *unit) 868 { 869 int u, i; 870 871 i = strlen(stem); 872 if (bcmp(stem, name, i) != 0) 873 return (0); 874 if (!isdigit(name[i])) 875 return (0); 876 u = 0; 877 if (name[i] == '0' && isdigit(name[i+1])) 878 return (0); 879 while (isdigit(name[i])) { 880 u *= 10; 881 u += name[i++] - '0'; 882 } 883 if (u > 0xffffff) 884 return (0); 885 *unit = u; 886 if (namep) 887 *namep = &name[i]; 888 if (name[i]) 889 return (2); 890 return (1); 891 } 892 893 /* 894 * Helper functions for cloning device drivers. 895 * 896 * The objective here is to make it unnecessary for the device drivers to 897 * use rman or similar to manage their unit number space. Due to the way 898 * we do "on-demand" devices, using rman or other "private" methods 899 * will be very tricky to lock down properly once we lock down this file. 900 * 901 * Instead we give the drivers these routines which puts the struct cdev *'s 902 * that are to be managed on their own list, and gives the driver the ability 903 * to ask for the first free unit number or a given specified unit number. 904 * 905 * In addition these routines support paired devices (pty, nmdm and similar) 906 * by respecting a number of "flag" bits in the minor number. 907 * 908 */ 909 910 struct clonedevs { 911 LIST_HEAD(,cdev) head; 912 }; 913 914 void 915 clone_setup(struct clonedevs **cdp) 916 { 917 918 *cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO); 919 LIST_INIT(&(*cdp)->head); 920 } 921 922 int 923 clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra) 924 { 925 struct clonedevs *cd; 926 struct cdev *dev, *ndev, *dl, *de; 927 int unit, low, u; 928 929 KASSERT(*cdp != NULL, 930 ("clone_setup() not called in driver \"%s\"", csw->d_name)); 931 KASSERT(!(extra & CLONE_UNITMASK), 932 ("Illegal extra bits (0x%x) in clone_create", extra)); 933 KASSERT(*up <= CLONE_UNITMASK, 934 ("Too high unit (0x%x) in clone_create", *up)); 935 KASSERT(csw->d_flags & D_NEEDMINOR, 936 ("clone_create() on cdevsw without minor numbers")); 937 938 939 /* 940 * Search the list for a lot of things in one go: 941 * A preexisting match is returned immediately. 942 * The lowest free unit number if we are passed -1, and the place 943 * in the list where we should insert that new element. 944 * The place to insert a specified unit number, if applicable 945 * the end of the list. 946 */ 947 unit = *up; 948 ndev = devfs_alloc(); 949 dev_lock(); 950 prep_cdevsw(csw); 951 low = extra; 952 de = dl = NULL; 953 cd = *cdp; 954 LIST_FOREACH(dev, &cd->head, si_clone) { 955 KASSERT(dev->si_flags & SI_CLONELIST, 956 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 957 u = dev2unit(dev); 958 if (u == (unit | extra)) { 959 *dp = dev; 960 dev_unlock(); 961 devfs_free(ndev); 962 return (0); 963 } 964 if (unit == -1 && u == low) { 965 low++; 966 de = dev; 967 continue; 968 } else if (u < (unit | extra)) { 969 de = dev; 970 continue; 971 } else if (u > (unit | extra)) { 972 dl = dev; 973 break; 974 } 975 } 976 if (unit == -1) 977 unit = low & CLONE_UNITMASK; 978 dev = newdev(csw, unit | extra, ndev); 979 if (dev->si_flags & SI_CLONELIST) { 980 printf("dev %p (%s) is on clonelist\n", dev, dev->si_name); 981 printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra); 982 LIST_FOREACH(dev, &cd->head, si_clone) { 983 printf("\t%p %s\n", dev, dev->si_name); 984 } 985 panic("foo"); 986 } 987 KASSERT(!(dev->si_flags & SI_CLONELIST), 988 ("Dev %p(%s) should not be on clonelist", dev, dev->si_name)); 989 if (dl != NULL) 990 LIST_INSERT_BEFORE(dl, dev, si_clone); 991 else if (de != NULL) 992 LIST_INSERT_AFTER(de, dev, si_clone); 993 else 994 LIST_INSERT_HEAD(&cd->head, dev, si_clone); 995 dev->si_flags |= SI_CLONELIST; 996 *up = unit; 997 dev_unlock_and_free(); 998 return (1); 999 } 1000 1001 /* 1002 * Kill everything still on the list. The driver should already have 1003 * disposed of any softc hung of the struct cdev *'s at this time. 1004 */ 1005 void 1006 clone_cleanup(struct clonedevs **cdp) 1007 { 1008 struct cdev *dev; 1009 struct cdev_priv *cp; 1010 struct clonedevs *cd; 1011 1012 cd = *cdp; 1013 if (cd == NULL) 1014 return; 1015 dev_lock(); 1016 while (!LIST_EMPTY(&cd->head)) { 1017 dev = LIST_FIRST(&cd->head); 1018 LIST_REMOVE(dev, si_clone); 1019 KASSERT(dev->si_flags & SI_CLONELIST, 1020 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 1021 dev->si_flags &= ~SI_CLONELIST; 1022 cp = cdev2priv(dev); 1023 if (!(cp->cdp_flags & CDP_SCHED_DTR)) { 1024 cp->cdp_flags |= CDP_SCHED_DTR; 1025 KASSERT(dev->si_flags & SI_NAMED, 1026 ("Driver has goofed in cloning underways udev %x unit %x", dev2udev(dev), dev2unit(dev))); 1027 destroy_devl(dev); 1028 } 1029 } 1030 dev_unlock_and_free(); 1031 free(cd, M_DEVBUF); 1032 *cdp = NULL; 1033 } 1034 1035 static TAILQ_HEAD(, cdev_priv) dev_ddtr = 1036 TAILQ_HEAD_INITIALIZER(dev_ddtr); 1037 static struct task dev_dtr_task; 1038 1039 static void 1040 destroy_dev_tq(void *ctx, int pending) 1041 { 1042 struct cdev_priv *cp; 1043 struct cdev *dev; 1044 void (*cb)(void *); 1045 void *cb_arg; 1046 1047 dev_lock(); 1048 while (!TAILQ_EMPTY(&dev_ddtr)) { 1049 cp = TAILQ_FIRST(&dev_ddtr); 1050 dev = &cp->cdp_c; 1051 KASSERT(cp->cdp_flags & CDP_SCHED_DTR, 1052 ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp)); 1053 TAILQ_REMOVE(&dev_ddtr, cp, cdp_dtr_list); 1054 cb = cp->cdp_dtr_cb; 1055 cb_arg = cp->cdp_dtr_cb_arg; 1056 destroy_devl(dev); 1057 dev_unlock_and_free(); 1058 dev_rel(dev); 1059 if (cb != NULL) 1060 cb(cb_arg); 1061 dev_lock(); 1062 } 1063 dev_unlock(); 1064 } 1065 1066 /* 1067 * devmtx shall be locked on entry. devmtx will be unlocked after 1068 * function return. 1069 */ 1070 static int 1071 destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg) 1072 { 1073 struct cdev_priv *cp; 1074 1075 mtx_assert(&devmtx, MA_OWNED); 1076 cp = cdev2priv(dev); 1077 if (cp->cdp_flags & CDP_SCHED_DTR) { 1078 dev_unlock(); 1079 return (0); 1080 } 1081 dev_refl(dev); 1082 cp->cdp_flags |= CDP_SCHED_DTR; 1083 cp->cdp_dtr_cb = cb; 1084 cp->cdp_dtr_cb_arg = arg; 1085 TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list); 1086 dev_unlock(); 1087 taskqueue_enqueue(taskqueue_swi_giant, &dev_dtr_task); 1088 return (1); 1089 } 1090 1091 int 1092 destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg) 1093 { 1094 dev_lock(); 1095 return (destroy_dev_sched_cbl(dev, cb, arg)); 1096 } 1097 1098 int 1099 destroy_dev_sched(struct cdev *dev) 1100 { 1101 return (destroy_dev_sched_cb(dev, NULL, NULL)); 1102 } 1103 1104 void 1105 destroy_dev_drain(struct cdevsw *csw) 1106 { 1107 1108 dev_lock(); 1109 while (!LIST_EMPTY(&csw->d_devs)) { 1110 msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10); 1111 } 1112 dev_unlock(); 1113 } 1114 1115 void 1116 drain_dev_clone_events(void) 1117 { 1118 1119 sx_xlock(&clone_drain_lock); 1120 sx_xunlock(&clone_drain_lock); 1121 } 1122 1123 static void 1124 devdtr_init(void *dummy __unused) 1125 { 1126 1127 TASK_INIT(&dev_dtr_task, 0, destroy_dev_tq, NULL); 1128 } 1129 1130 SYSINIT(devdtr, SI_SUB_DEVFS, SI_ORDER_SECOND, devdtr_init, NULL); 1131