1 /*- 2 * Copyright (c) 1999-2002 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/bio.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/module.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 #include <sys/vnode.h> 41 #include <sys/queue.h> 42 #include <sys/poll.h> 43 #include <sys/sx.h> 44 #include <sys/ctype.h> 45 #include <sys/ucred.h> 46 #include <sys/taskqueue.h> 47 #include <machine/stdarg.h> 48 49 #include <fs/devfs/devfs_int.h> 50 #include <vm/vm.h> 51 52 static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage"); 53 54 struct mtx devmtx; 55 static void destroy_devl(struct cdev *dev); 56 static int destroy_dev_sched_cbl(struct cdev *dev, 57 void (*cb)(void *), void *arg); 58 static struct cdev *make_dev_credv(int flags, 59 struct cdevsw *devsw, int unit, 60 struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, 61 va_list ap); 62 63 static struct cdev_priv_list cdevp_free_list = 64 TAILQ_HEAD_INITIALIZER(cdevp_free_list); 65 static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list = 66 SLIST_HEAD_INITIALIZER(); 67 68 void 69 dev_lock(void) 70 { 71 72 mtx_lock(&devmtx); 73 } 74 75 /* 76 * Free all the memory collected while the cdev mutex was 77 * locked. Since devmtx is after the system map mutex, free() cannot 78 * be called immediately and is postponed until cdev mutex can be 79 * dropped. 80 */ 81 static void 82 dev_unlock_and_free(void) 83 { 84 struct cdev_priv_list cdp_free; 85 struct free_cdevsw csw_free; 86 struct cdev_priv *cdp; 87 struct cdevsw *csw; 88 89 mtx_assert(&devmtx, MA_OWNED); 90 91 /* 92 * Make the local copy of the list heads while the dev_mtx is 93 * held. Free it later. 94 */ 95 TAILQ_INIT(&cdp_free); 96 TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list); 97 csw_free = cdevsw_gt_post_list; 98 SLIST_INIT(&cdevsw_gt_post_list); 99 100 mtx_unlock(&devmtx); 101 102 while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) { 103 TAILQ_REMOVE(&cdp_free, cdp, cdp_list); 104 devfs_free(&cdp->cdp_c); 105 } 106 while ((csw = SLIST_FIRST(&csw_free)) != NULL) { 107 SLIST_REMOVE_HEAD(&csw_free, d_postfree_list); 108 free(csw, M_DEVT); 109 } 110 } 111 112 static void 113 dev_free_devlocked(struct cdev *cdev) 114 { 115 struct cdev_priv *cdp; 116 117 mtx_assert(&devmtx, MA_OWNED); 118 cdp = cdev2priv(cdev); 119 TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list); 120 } 121 122 static void 123 cdevsw_free_devlocked(struct cdevsw *csw) 124 { 125 126 mtx_assert(&devmtx, MA_OWNED); 127 SLIST_INSERT_HEAD(&cdevsw_gt_post_list, csw, d_postfree_list); 128 } 129 130 void 131 dev_unlock(void) 132 { 133 134 mtx_unlock(&devmtx); 135 } 136 137 void 138 dev_ref(struct cdev *dev) 139 { 140 141 mtx_assert(&devmtx, MA_NOTOWNED); 142 mtx_lock(&devmtx); 143 dev->si_refcount++; 144 mtx_unlock(&devmtx); 145 } 146 147 void 148 dev_refl(struct cdev *dev) 149 { 150 151 mtx_assert(&devmtx, MA_OWNED); 152 dev->si_refcount++; 153 } 154 155 void 156 dev_rel(struct cdev *dev) 157 { 158 int flag = 0; 159 160 mtx_assert(&devmtx, MA_NOTOWNED); 161 dev_lock(); 162 dev->si_refcount--; 163 KASSERT(dev->si_refcount >= 0, 164 ("dev_rel(%s) gave negative count", devtoname(dev))); 165 #if 0 166 if (dev->si_usecount == 0 && 167 (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED)) 168 ; 169 else 170 #endif 171 if (dev->si_devsw == NULL && dev->si_refcount == 0) { 172 LIST_REMOVE(dev, si_list); 173 flag = 1; 174 } 175 dev_unlock(); 176 if (flag) 177 devfs_free(dev); 178 } 179 180 struct cdevsw * 181 dev_refthread(struct cdev *dev) 182 { 183 struct cdevsw *csw; 184 struct cdev_priv *cdp; 185 186 mtx_assert(&devmtx, MA_NOTOWNED); 187 dev_lock(); 188 csw = dev->si_devsw; 189 if (csw != NULL) { 190 cdp = cdev2priv(dev); 191 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) 192 dev->si_threadcount++; 193 else 194 csw = NULL; 195 } 196 dev_unlock(); 197 return (csw); 198 } 199 200 struct cdevsw * 201 devvn_refthread(struct vnode *vp, struct cdev **devp) 202 { 203 struct cdevsw *csw; 204 struct cdev_priv *cdp; 205 206 mtx_assert(&devmtx, MA_NOTOWNED); 207 csw = NULL; 208 dev_lock(); 209 *devp = vp->v_rdev; 210 if (*devp != NULL) { 211 cdp = cdev2priv(*devp); 212 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { 213 csw = (*devp)->si_devsw; 214 if (csw != NULL) 215 (*devp)->si_threadcount++; 216 } 217 } 218 dev_unlock(); 219 return (csw); 220 } 221 222 void 223 dev_relthread(struct cdev *dev) 224 { 225 226 mtx_assert(&devmtx, MA_NOTOWNED); 227 dev_lock(); 228 KASSERT(dev->si_threadcount > 0, 229 ("%s threadcount is wrong", dev->si_name)); 230 dev->si_threadcount--; 231 dev_unlock(); 232 } 233 234 int 235 nullop(void) 236 { 237 238 return (0); 239 } 240 241 int 242 eopnotsupp(void) 243 { 244 245 return (EOPNOTSUPP); 246 } 247 248 static int 249 enxio(void) 250 { 251 return (ENXIO); 252 } 253 254 static int 255 enodev(void) 256 { 257 return (ENODEV); 258 } 259 260 /* Define a dead_cdevsw for use when devices leave unexpectedly. */ 261 262 #define dead_open (d_open_t *)enxio 263 #define dead_close (d_close_t *)enxio 264 #define dead_read (d_read_t *)enxio 265 #define dead_write (d_write_t *)enxio 266 #define dead_ioctl (d_ioctl_t *)enxio 267 #define dead_poll (d_poll_t *)enodev 268 #define dead_mmap (d_mmap_t *)enodev 269 270 static void 271 dead_strategy(struct bio *bp) 272 { 273 274 biofinish(bp, NULL, ENXIO); 275 } 276 277 #define dead_dump (dumper_t *)enxio 278 #define dead_kqfilter (d_kqfilter_t *)enxio 279 #define dead_mmap_single (d_mmap_single_t *)enodev 280 281 static struct cdevsw dead_cdevsw = { 282 .d_version = D_VERSION, 283 .d_flags = D_NEEDGIANT, /* XXX: does dead_strategy need this ? */ 284 .d_open = dead_open, 285 .d_close = dead_close, 286 .d_read = dead_read, 287 .d_write = dead_write, 288 .d_ioctl = dead_ioctl, 289 .d_poll = dead_poll, 290 .d_mmap = dead_mmap, 291 .d_strategy = dead_strategy, 292 .d_name = "dead", 293 .d_dump = dead_dump, 294 .d_kqfilter = dead_kqfilter, 295 .d_mmap_single = dead_mmap_single 296 }; 297 298 /* Default methods if driver does not specify method */ 299 300 #define null_open (d_open_t *)nullop 301 #define null_close (d_close_t *)nullop 302 #define no_read (d_read_t *)enodev 303 #define no_write (d_write_t *)enodev 304 #define no_ioctl (d_ioctl_t *)enodev 305 #define no_mmap (d_mmap_t *)enodev 306 #define no_kqfilter (d_kqfilter_t *)enodev 307 #define no_mmap_single (d_mmap_single_t *)enodev 308 309 static void 310 no_strategy(struct bio *bp) 311 { 312 313 biofinish(bp, NULL, ENODEV); 314 } 315 316 static int 317 no_poll(struct cdev *dev __unused, int events, struct thread *td __unused) 318 { 319 320 return (poll_no_poll(events)); 321 } 322 323 #define no_dump (dumper_t *)enodev 324 325 static int 326 giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 327 { 328 struct cdevsw *dsw; 329 int retval; 330 331 dsw = dev_refthread(dev); 332 if (dsw == NULL) 333 return (ENXIO); 334 mtx_lock(&Giant); 335 retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td); 336 mtx_unlock(&Giant); 337 dev_relthread(dev); 338 return (retval); 339 } 340 341 static int 342 giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp) 343 { 344 struct cdevsw *dsw; 345 int retval; 346 347 dsw = dev_refthread(dev); 348 if (dsw == NULL) 349 return (ENXIO); 350 mtx_lock(&Giant); 351 retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp); 352 mtx_unlock(&Giant); 353 dev_relthread(dev); 354 return (retval); 355 } 356 357 static int 358 giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 359 { 360 struct cdevsw *dsw; 361 int retval; 362 363 dsw = dev_refthread(dev); 364 if (dsw == NULL) 365 return (ENXIO); 366 mtx_lock(&Giant); 367 retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td); 368 mtx_unlock(&Giant); 369 dev_relthread(dev); 370 return (retval); 371 } 372 373 static void 374 giant_strategy(struct bio *bp) 375 { 376 struct cdevsw *dsw; 377 struct cdev *dev; 378 379 dev = bp->bio_dev; 380 dsw = dev_refthread(dev); 381 if (dsw == NULL) { 382 biofinish(bp, NULL, ENXIO); 383 return; 384 } 385 mtx_lock(&Giant); 386 dsw->d_gianttrick->d_strategy(bp); 387 mtx_unlock(&Giant); 388 dev_relthread(dev); 389 } 390 391 static int 392 giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 393 { 394 struct cdevsw *dsw; 395 int retval; 396 397 dsw = dev_refthread(dev); 398 if (dsw == NULL) 399 return (ENXIO); 400 mtx_lock(&Giant); 401 retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td); 402 mtx_unlock(&Giant); 403 dev_relthread(dev); 404 return (retval); 405 } 406 407 static int 408 giant_read(struct cdev *dev, struct uio *uio, int ioflag) 409 { 410 struct cdevsw *dsw; 411 int retval; 412 413 dsw = dev_refthread(dev); 414 if (dsw == NULL) 415 return (ENXIO); 416 mtx_lock(&Giant); 417 retval = dsw->d_gianttrick->d_read(dev, uio, ioflag); 418 mtx_unlock(&Giant); 419 dev_relthread(dev); 420 return (retval); 421 } 422 423 static int 424 giant_write(struct cdev *dev, struct uio *uio, int ioflag) 425 { 426 struct cdevsw *dsw; 427 int retval; 428 429 dsw = dev_refthread(dev); 430 if (dsw == NULL) 431 return (ENXIO); 432 mtx_lock(&Giant); 433 retval = dsw->d_gianttrick->d_write(dev, uio, ioflag); 434 mtx_unlock(&Giant); 435 dev_relthread(dev); 436 return (retval); 437 } 438 439 static int 440 giant_poll(struct cdev *dev, int events, struct thread *td) 441 { 442 struct cdevsw *dsw; 443 int retval; 444 445 dsw = dev_refthread(dev); 446 if (dsw == NULL) 447 return (ENXIO); 448 mtx_lock(&Giant); 449 retval = dsw->d_gianttrick->d_poll(dev, events, td); 450 mtx_unlock(&Giant); 451 dev_relthread(dev); 452 return (retval); 453 } 454 455 static int 456 giant_kqfilter(struct cdev *dev, struct knote *kn) 457 { 458 struct cdevsw *dsw; 459 int retval; 460 461 dsw = dev_refthread(dev); 462 if (dsw == NULL) 463 return (ENXIO); 464 mtx_lock(&Giant); 465 retval = dsw->d_gianttrick->d_kqfilter(dev, kn); 466 mtx_unlock(&Giant); 467 dev_relthread(dev); 468 return (retval); 469 } 470 471 static int 472 giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot) 473 { 474 struct cdevsw *dsw; 475 int retval; 476 477 dsw = dev_refthread(dev); 478 if (dsw == NULL) 479 return (ENXIO); 480 mtx_lock(&Giant); 481 retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot); 482 mtx_unlock(&Giant); 483 dev_relthread(dev); 484 return (retval); 485 } 486 487 static int 488 giant_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, 489 vm_object_t *object, int nprot) 490 { 491 struct cdevsw *dsw; 492 int retval; 493 494 dsw = dev_refthread(dev); 495 if (dsw == NULL) 496 return (ENXIO); 497 mtx_lock(&Giant); 498 retval = dsw->d_gianttrick->d_mmap_single(dev, offset, size, object, 499 nprot); 500 mtx_unlock(&Giant); 501 dev_relthread(dev); 502 return (retval); 503 } 504 505 static void 506 notify(struct cdev *dev, const char *ev) 507 { 508 static const char prefix[] = "cdev="; 509 char *data; 510 int namelen; 511 512 if (cold) 513 return; 514 namelen = strlen(dev->si_name); 515 data = malloc(namelen + sizeof(prefix), M_TEMP, M_NOWAIT); 516 if (data == NULL) 517 return; 518 memcpy(data, prefix, sizeof(prefix) - 1); 519 memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1); 520 devctl_notify("DEVFS", "CDEV", ev, data); 521 free(data, M_TEMP); 522 } 523 524 static void 525 notify_create(struct cdev *dev) 526 { 527 528 notify(dev, "CREATE"); 529 } 530 531 static void 532 notify_destroy(struct cdev *dev) 533 { 534 535 notify(dev, "DESTROY"); 536 } 537 538 static struct cdev * 539 newdev(struct cdevsw *csw, int unit, struct cdev *si) 540 { 541 struct cdev *si2; 542 543 mtx_assert(&devmtx, MA_OWNED); 544 if (csw->d_flags & D_NEEDMINOR) { 545 /* We may want to return an existing device */ 546 LIST_FOREACH(si2, &csw->d_devs, si_list) { 547 if (dev2unit(si2) == unit) { 548 dev_free_devlocked(si); 549 return (si2); 550 } 551 } 552 } 553 si->si_drv0 = unit; 554 si->si_devsw = csw; 555 LIST_INSERT_HEAD(&csw->d_devs, si, si_list); 556 return (si); 557 } 558 559 static void 560 fini_cdevsw(struct cdevsw *devsw) 561 { 562 struct cdevsw *gt; 563 564 if (devsw->d_gianttrick != NULL) { 565 gt = devsw->d_gianttrick; 566 memcpy(devsw, gt, sizeof *devsw); 567 cdevsw_free_devlocked(gt); 568 devsw->d_gianttrick = NULL; 569 } 570 devsw->d_flags &= ~D_INIT; 571 } 572 573 static void 574 prep_cdevsw(struct cdevsw *devsw) 575 { 576 struct cdevsw *dsw2; 577 578 mtx_assert(&devmtx, MA_OWNED); 579 if (devsw->d_flags & D_INIT) 580 return; 581 if (devsw->d_flags & D_NEEDGIANT) { 582 dev_unlock(); 583 dsw2 = malloc(sizeof *dsw2, M_DEVT, M_WAITOK); 584 dev_lock(); 585 } else 586 dsw2 = NULL; 587 if (devsw->d_flags & D_INIT) { 588 if (dsw2 != NULL) 589 cdevsw_free_devlocked(dsw2); 590 return; 591 } 592 593 if (devsw->d_version != D_VERSION_01 && 594 devsw->d_version != D_VERSION_02) { 595 printf( 596 "WARNING: Device driver \"%s\" has wrong version %s\n", 597 devsw->d_name == NULL ? "???" : devsw->d_name, 598 "and is disabled. Recompile KLD module."); 599 devsw->d_open = dead_open; 600 devsw->d_close = dead_close; 601 devsw->d_read = dead_read; 602 devsw->d_write = dead_write; 603 devsw->d_ioctl = dead_ioctl; 604 devsw->d_poll = dead_poll; 605 devsw->d_mmap = dead_mmap; 606 devsw->d_strategy = dead_strategy; 607 devsw->d_dump = dead_dump; 608 devsw->d_kqfilter = dead_kqfilter; 609 } 610 if (devsw->d_version == D_VERSION_01) 611 devsw->d_mmap_single = NULL; 612 613 if (devsw->d_flags & D_NEEDGIANT) { 614 if (devsw->d_gianttrick == NULL) { 615 memcpy(dsw2, devsw, sizeof *dsw2); 616 devsw->d_gianttrick = dsw2; 617 dsw2 = NULL; 618 } 619 } 620 621 #define FIXUP(member, noop, giant) \ 622 do { \ 623 if (devsw->member == NULL) { \ 624 devsw->member = noop; \ 625 } else if (devsw->d_flags & D_NEEDGIANT) \ 626 devsw->member = giant; \ 627 } \ 628 while (0) 629 630 FIXUP(d_open, null_open, giant_open); 631 FIXUP(d_fdopen, NULL, giant_fdopen); 632 FIXUP(d_close, null_close, giant_close); 633 FIXUP(d_read, no_read, giant_read); 634 FIXUP(d_write, no_write, giant_write); 635 FIXUP(d_ioctl, no_ioctl, giant_ioctl); 636 FIXUP(d_poll, no_poll, giant_poll); 637 FIXUP(d_mmap, no_mmap, giant_mmap); 638 FIXUP(d_strategy, no_strategy, giant_strategy); 639 FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter); 640 FIXUP(d_mmap_single, no_mmap_single, giant_mmap_single); 641 642 if (devsw->d_dump == NULL) devsw->d_dump = no_dump; 643 644 LIST_INIT(&devsw->d_devs); 645 646 devsw->d_flags |= D_INIT; 647 648 if (dsw2 != NULL) 649 cdevsw_free_devlocked(dsw2); 650 } 651 652 struct cdev * 653 make_dev_credv(int flags, struct cdevsw *devsw, int unit, 654 struct ucred *cr, uid_t uid, 655 gid_t gid, int mode, const char *fmt, va_list ap) 656 { 657 struct cdev *dev; 658 int i; 659 660 dev = devfs_alloc(); 661 dev_lock(); 662 prep_cdevsw(devsw); 663 dev = newdev(devsw, unit, dev); 664 if (flags & MAKEDEV_REF) 665 dev_refl(dev); 666 if (dev->si_flags & SI_CHEAPCLONE && 667 dev->si_flags & SI_NAMED) { 668 /* 669 * This is allowed as it removes races and generally 670 * simplifies cloning devices. 671 * XXX: still ?? 672 */ 673 dev_unlock_and_free(); 674 return (dev); 675 } 676 KASSERT(!(dev->si_flags & SI_NAMED), 677 ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)", 678 devsw->d_name, dev2unit(dev), devtoname(dev))); 679 680 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 681 if (i > (sizeof dev->__si_namebuf - 1)) { 682 printf("WARNING: Device name truncated! (%s)\n", 683 dev->__si_namebuf); 684 } 685 686 dev->si_flags |= SI_NAMED; 687 if (cr != NULL) 688 dev->si_cred = crhold(cr); 689 else 690 dev->si_cred = NULL; 691 dev->si_uid = uid; 692 dev->si_gid = gid; 693 dev->si_mode = mode; 694 695 devfs_create(dev); 696 clean_unrhdrl(devfs_inos); 697 dev_unlock_and_free(); 698 699 notify_create(dev); 700 701 return (dev); 702 } 703 704 struct cdev * 705 make_dev(struct cdevsw *devsw, int unit, uid_t uid, gid_t gid, int mode, 706 const char *fmt, ...) 707 { 708 struct cdev *dev; 709 va_list ap; 710 711 va_start(ap, fmt); 712 dev = make_dev_credv(0, devsw, unit, NULL, uid, gid, mode, fmt, ap); 713 va_end(ap); 714 return (dev); 715 } 716 717 struct cdev * 718 make_dev_cred(struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, 719 gid_t gid, int mode, const char *fmt, ...) 720 { 721 struct cdev *dev; 722 va_list ap; 723 724 va_start(ap, fmt); 725 dev = make_dev_credv(0, devsw, unit, cr, uid, gid, mode, fmt, ap); 726 va_end(ap); 727 728 return (dev); 729 } 730 731 struct cdev * 732 make_dev_credf(int flags, struct cdevsw *devsw, int unit, 733 struct ucred *cr, uid_t uid, 734 gid_t gid, int mode, const char *fmt, ...) 735 { 736 struct cdev *dev; 737 va_list ap; 738 739 va_start(ap, fmt); 740 dev = make_dev_credv(flags, devsw, unit, cr, uid, gid, mode, 741 fmt, ap); 742 va_end(ap); 743 744 return (dev); 745 } 746 747 static void 748 dev_dependsl(struct cdev *pdev, struct cdev *cdev) 749 { 750 751 cdev->si_parent = pdev; 752 cdev->si_flags |= SI_CHILD; 753 LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings); 754 } 755 756 757 void 758 dev_depends(struct cdev *pdev, struct cdev *cdev) 759 { 760 761 dev_lock(); 762 dev_dependsl(pdev, cdev); 763 dev_unlock(); 764 } 765 766 struct cdev * 767 make_dev_alias(struct cdev *pdev, const char *fmt, ...) 768 { 769 struct cdev *dev; 770 va_list ap; 771 int i; 772 773 KASSERT(pdev != NULL, ("NULL pdev")); 774 dev = devfs_alloc(); 775 dev_lock(); 776 dev->si_flags |= SI_ALIAS; 777 dev->si_flags |= SI_NAMED; 778 va_start(ap, fmt); 779 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 780 if (i > (sizeof dev->__si_namebuf - 1)) { 781 printf("WARNING: Device name truncated! (%s)\n", 782 dev->__si_namebuf); 783 } 784 va_end(ap); 785 786 devfs_create(dev); 787 dev_dependsl(pdev, dev); 788 clean_unrhdrl(devfs_inos); 789 dev_unlock(); 790 791 notify_create(dev); 792 793 return (dev); 794 } 795 796 static void 797 destroy_devl(struct cdev *dev) 798 { 799 struct cdevsw *csw; 800 struct cdev_privdata *p, *p1; 801 802 mtx_assert(&devmtx, MA_OWNED); 803 KASSERT(dev->si_flags & SI_NAMED, 804 ("WARNING: Driver mistake: destroy_dev on %d\n", dev2unit(dev))); 805 806 devfs_destroy(dev); 807 808 /* Remove name marking */ 809 dev->si_flags &= ~SI_NAMED; 810 811 /* If we are a child, remove us from the parents list */ 812 if (dev->si_flags & SI_CHILD) { 813 LIST_REMOVE(dev, si_siblings); 814 dev->si_flags &= ~SI_CHILD; 815 } 816 817 /* Kill our children */ 818 while (!LIST_EMPTY(&dev->si_children)) 819 destroy_devl(LIST_FIRST(&dev->si_children)); 820 821 /* Remove from clone list */ 822 if (dev->si_flags & SI_CLONELIST) { 823 LIST_REMOVE(dev, si_clone); 824 dev->si_flags &= ~SI_CLONELIST; 825 } 826 827 dev->si_refcount++; /* Avoid race with dev_rel() */ 828 csw = dev->si_devsw; 829 dev->si_devsw = NULL; /* already NULL for SI_ALIAS */ 830 while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) { 831 csw->d_purge(dev); 832 msleep(csw, &devmtx, PRIBIO, "devprg", hz/10); 833 if (dev->si_threadcount) 834 printf("Still %lu threads in %s\n", 835 dev->si_threadcount, devtoname(dev)); 836 } 837 while (dev->si_threadcount != 0) { 838 /* Use unique dummy wait ident */ 839 msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10); 840 } 841 842 dev_unlock(); 843 notify_destroy(dev); 844 mtx_lock(&cdevpriv_mtx); 845 LIST_FOREACH_SAFE(p, &cdev2priv(dev)->cdp_fdpriv, cdpd_list, p1) { 846 devfs_destroy_cdevpriv(p); 847 mtx_lock(&cdevpriv_mtx); 848 } 849 mtx_unlock(&cdevpriv_mtx); 850 dev_lock(); 851 852 dev->si_drv1 = 0; 853 dev->si_drv2 = 0; 854 bzero(&dev->__si_u, sizeof(dev->__si_u)); 855 856 if (!(dev->si_flags & SI_ALIAS)) { 857 /* Remove from cdevsw list */ 858 LIST_REMOVE(dev, si_list); 859 860 /* If cdevsw has no more struct cdev *'s, clean it */ 861 if (LIST_EMPTY(&csw->d_devs)) { 862 fini_cdevsw(csw); 863 wakeup(&csw->d_devs); 864 } 865 } 866 dev->si_flags &= ~SI_ALIAS; 867 dev->si_refcount--; /* Avoid race with dev_rel() */ 868 869 if (dev->si_refcount > 0) { 870 LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list); 871 } else { 872 dev_free_devlocked(dev); 873 } 874 } 875 876 void 877 destroy_dev(struct cdev *dev) 878 { 879 880 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "destroy_dev"); 881 dev_lock(); 882 destroy_devl(dev); 883 dev_unlock_and_free(); 884 } 885 886 const char * 887 devtoname(struct cdev *dev) 888 { 889 890 return (dev->si_name); 891 } 892 893 int 894 dev_stdclone(char *name, char **namep, const char *stem, int *unit) 895 { 896 int u, i; 897 898 i = strlen(stem); 899 if (bcmp(stem, name, i) != 0) 900 return (0); 901 if (!isdigit(name[i])) 902 return (0); 903 u = 0; 904 if (name[i] == '0' && isdigit(name[i+1])) 905 return (0); 906 while (isdigit(name[i])) { 907 u *= 10; 908 u += name[i++] - '0'; 909 } 910 if (u > 0xffffff) 911 return (0); 912 *unit = u; 913 if (namep) 914 *namep = &name[i]; 915 if (name[i]) 916 return (2); 917 return (1); 918 } 919 920 /* 921 * Helper functions for cloning device drivers. 922 * 923 * The objective here is to make it unnecessary for the device drivers to 924 * use rman or similar to manage their unit number space. Due to the way 925 * we do "on-demand" devices, using rman or other "private" methods 926 * will be very tricky to lock down properly once we lock down this file. 927 * 928 * Instead we give the drivers these routines which puts the struct cdev *'s 929 * that are to be managed on their own list, and gives the driver the ability 930 * to ask for the first free unit number or a given specified unit number. 931 * 932 * In addition these routines support paired devices (pty, nmdm and similar) 933 * by respecting a number of "flag" bits in the minor number. 934 * 935 */ 936 937 struct clonedevs { 938 LIST_HEAD(,cdev) head; 939 }; 940 941 void 942 clone_setup(struct clonedevs **cdp) 943 { 944 945 *cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO); 946 LIST_INIT(&(*cdp)->head); 947 } 948 949 int 950 clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra) 951 { 952 struct clonedevs *cd; 953 struct cdev *dev, *ndev, *dl, *de; 954 int unit, low, u; 955 956 KASSERT(*cdp != NULL, 957 ("clone_setup() not called in driver \"%s\"", csw->d_name)); 958 KASSERT(!(extra & CLONE_UNITMASK), 959 ("Illegal extra bits (0x%x) in clone_create", extra)); 960 KASSERT(*up <= CLONE_UNITMASK, 961 ("Too high unit (0x%x) in clone_create", *up)); 962 KASSERT(csw->d_flags & D_NEEDMINOR, 963 ("clone_create() on cdevsw without minor numbers")); 964 965 966 /* 967 * Search the list for a lot of things in one go: 968 * A preexisting match is returned immediately. 969 * The lowest free unit number if we are passed -1, and the place 970 * in the list where we should insert that new element. 971 * The place to insert a specified unit number, if applicable 972 * the end of the list. 973 */ 974 unit = *up; 975 ndev = devfs_alloc(); 976 dev_lock(); 977 prep_cdevsw(csw); 978 low = extra; 979 de = dl = NULL; 980 cd = *cdp; 981 LIST_FOREACH(dev, &cd->head, si_clone) { 982 KASSERT(dev->si_flags & SI_CLONELIST, 983 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 984 u = dev2unit(dev); 985 if (u == (unit | extra)) { 986 *dp = dev; 987 dev_unlock(); 988 devfs_free(ndev); 989 return (0); 990 } 991 if (unit == -1 && u == low) { 992 low++; 993 de = dev; 994 continue; 995 } else if (u < (unit | extra)) { 996 de = dev; 997 continue; 998 } else if (u > (unit | extra)) { 999 dl = dev; 1000 break; 1001 } 1002 } 1003 if (unit == -1) 1004 unit = low & CLONE_UNITMASK; 1005 dev = newdev(csw, unit | extra, ndev); 1006 if (dev->si_flags & SI_CLONELIST) { 1007 printf("dev %p (%s) is on clonelist\n", dev, dev->si_name); 1008 printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra); 1009 LIST_FOREACH(dev, &cd->head, si_clone) { 1010 printf("\t%p %s\n", dev, dev->si_name); 1011 } 1012 panic("foo"); 1013 } 1014 KASSERT(!(dev->si_flags & SI_CLONELIST), 1015 ("Dev %p(%s) should not be on clonelist", dev, dev->si_name)); 1016 if (dl != NULL) 1017 LIST_INSERT_BEFORE(dl, dev, si_clone); 1018 else if (de != NULL) 1019 LIST_INSERT_AFTER(de, dev, si_clone); 1020 else 1021 LIST_INSERT_HEAD(&cd->head, dev, si_clone); 1022 dev->si_flags |= SI_CLONELIST; 1023 *up = unit; 1024 dev_unlock_and_free(); 1025 return (1); 1026 } 1027 1028 /* 1029 * Kill everything still on the list. The driver should already have 1030 * disposed of any softc hung of the struct cdev *'s at this time. 1031 */ 1032 void 1033 clone_cleanup(struct clonedevs **cdp) 1034 { 1035 struct cdev *dev; 1036 struct cdev_priv *cp; 1037 struct clonedevs *cd; 1038 1039 cd = *cdp; 1040 if (cd == NULL) 1041 return; 1042 dev_lock(); 1043 while (!LIST_EMPTY(&cd->head)) { 1044 dev = LIST_FIRST(&cd->head); 1045 LIST_REMOVE(dev, si_clone); 1046 KASSERT(dev->si_flags & SI_CLONELIST, 1047 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 1048 dev->si_flags &= ~SI_CLONELIST; 1049 cp = cdev2priv(dev); 1050 if (!(cp->cdp_flags & CDP_SCHED_DTR)) { 1051 cp->cdp_flags |= CDP_SCHED_DTR; 1052 KASSERT(dev->si_flags & SI_NAMED, 1053 ("Driver has goofed in cloning underways udev %x unit %x", dev2udev(dev), dev2unit(dev))); 1054 destroy_devl(dev); 1055 } 1056 } 1057 dev_unlock_and_free(); 1058 free(cd, M_DEVBUF); 1059 *cdp = NULL; 1060 } 1061 1062 static TAILQ_HEAD(, cdev_priv) dev_ddtr = 1063 TAILQ_HEAD_INITIALIZER(dev_ddtr); 1064 static struct task dev_dtr_task; 1065 1066 static void 1067 destroy_dev_tq(void *ctx, int pending) 1068 { 1069 struct cdev_priv *cp; 1070 struct cdev *dev; 1071 void (*cb)(void *); 1072 void *cb_arg; 1073 1074 dev_lock(); 1075 while (!TAILQ_EMPTY(&dev_ddtr)) { 1076 cp = TAILQ_FIRST(&dev_ddtr); 1077 dev = &cp->cdp_c; 1078 KASSERT(cp->cdp_flags & CDP_SCHED_DTR, 1079 ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp)); 1080 TAILQ_REMOVE(&dev_ddtr, cp, cdp_dtr_list); 1081 cb = cp->cdp_dtr_cb; 1082 cb_arg = cp->cdp_dtr_cb_arg; 1083 destroy_devl(dev); 1084 dev_unlock_and_free(); 1085 dev_rel(dev); 1086 if (cb != NULL) 1087 cb(cb_arg); 1088 dev_lock(); 1089 } 1090 dev_unlock(); 1091 } 1092 1093 /* 1094 * devmtx shall be locked on entry. devmtx will be unlocked after 1095 * function return. 1096 */ 1097 static int 1098 destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg) 1099 { 1100 struct cdev_priv *cp; 1101 1102 mtx_assert(&devmtx, MA_OWNED); 1103 cp = cdev2priv(dev); 1104 if (cp->cdp_flags & CDP_SCHED_DTR) { 1105 dev_unlock(); 1106 return (0); 1107 } 1108 dev_refl(dev); 1109 cp->cdp_flags |= CDP_SCHED_DTR; 1110 cp->cdp_dtr_cb = cb; 1111 cp->cdp_dtr_cb_arg = arg; 1112 TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list); 1113 dev_unlock(); 1114 taskqueue_enqueue(taskqueue_swi_giant, &dev_dtr_task); 1115 return (1); 1116 } 1117 1118 int 1119 destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg) 1120 { 1121 dev_lock(); 1122 return (destroy_dev_sched_cbl(dev, cb, arg)); 1123 } 1124 1125 int 1126 destroy_dev_sched(struct cdev *dev) 1127 { 1128 return (destroy_dev_sched_cb(dev, NULL, NULL)); 1129 } 1130 1131 void 1132 destroy_dev_drain(struct cdevsw *csw) 1133 { 1134 1135 dev_lock(); 1136 while (!LIST_EMPTY(&csw->d_devs)) { 1137 msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10); 1138 } 1139 dev_unlock(); 1140 } 1141 1142 void 1143 drain_dev_clone_events(void) 1144 { 1145 1146 sx_xlock(&clone_drain_lock); 1147 sx_xunlock(&clone_drain_lock); 1148 } 1149 1150 static void 1151 devdtr_init(void *dummy __unused) 1152 { 1153 1154 TASK_INIT(&dev_dtr_task, 0, destroy_dev_tq, NULL); 1155 } 1156 1157 SYSINIT(devdtr, SI_SUB_DEVFS, SI_ORDER_SECOND, devdtr_init, NULL); 1158