1 /*- 2 * Copyright (c) 1999-2002 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/bio.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/module.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 #include <sys/vnode.h> 41 #include <sys/queue.h> 42 #include <sys/poll.h> 43 #include <sys/sx.h> 44 #include <sys/ctype.h> 45 #include <sys/tty.h> 46 #include <sys/ucred.h> 47 #include <sys/taskqueue.h> 48 #include <machine/stdarg.h> 49 50 #include <fs/devfs/devfs_int.h> 51 52 static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage"); 53 54 struct mtx devmtx; 55 static void destroy_devl(struct cdev *dev); 56 static int destroy_dev_sched_cbl(struct cdev *dev, 57 void (*cb)(void *), void *arg); 58 static struct cdev *make_dev_credv(int flags, 59 struct cdevsw *devsw, int minornr, 60 struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, 61 va_list ap); 62 63 static struct cdev_priv_list cdevp_free_list = 64 TAILQ_HEAD_INITIALIZER(cdevp_free_list); 65 static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list = 66 SLIST_HEAD_INITIALIZER(); 67 68 void 69 dev_lock(void) 70 { 71 72 mtx_lock(&devmtx); 73 } 74 75 /* 76 * Free all the memory collected while the cdev mutex was 77 * locked. Since devmtx is after the system map mutex, free() cannot 78 * be called immediately and is postponed until cdev mutex can be 79 * dropped. 80 */ 81 static void 82 dev_unlock_and_free(void) 83 { 84 struct cdev_priv_list cdp_free; 85 struct free_cdevsw csw_free; 86 struct cdev_priv *cdp; 87 struct cdevsw *csw; 88 89 mtx_assert(&devmtx, MA_OWNED); 90 91 /* 92 * Make the local copy of the list heads while the dev_mtx is 93 * held. Free it later. 94 */ 95 TAILQ_INIT(&cdp_free); 96 TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list); 97 csw_free = cdevsw_gt_post_list; 98 SLIST_INIT(&cdevsw_gt_post_list); 99 100 mtx_unlock(&devmtx); 101 102 while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) { 103 TAILQ_REMOVE(&cdp_free, cdp, cdp_list); 104 devfs_free(&cdp->cdp_c); 105 } 106 while ((csw = SLIST_FIRST(&csw_free)) != NULL) { 107 SLIST_REMOVE_HEAD(&csw_free, d_postfree_list); 108 free(csw, M_DEVT); 109 } 110 } 111 112 static void 113 dev_free_devlocked(struct cdev *cdev) 114 { 115 struct cdev_priv *cdp; 116 117 mtx_assert(&devmtx, MA_OWNED); 118 cdp = cdev2priv(cdev); 119 TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list); 120 } 121 122 static void 123 cdevsw_free_devlocked(struct cdevsw *csw) 124 { 125 126 mtx_assert(&devmtx, MA_OWNED); 127 SLIST_INSERT_HEAD(&cdevsw_gt_post_list, csw, d_postfree_list); 128 } 129 130 void 131 dev_unlock(void) 132 { 133 134 mtx_unlock(&devmtx); 135 } 136 137 void 138 dev_ref(struct cdev *dev) 139 { 140 141 mtx_assert(&devmtx, MA_NOTOWNED); 142 mtx_lock(&devmtx); 143 dev->si_refcount++; 144 mtx_unlock(&devmtx); 145 } 146 147 void 148 dev_refl(struct cdev *dev) 149 { 150 151 mtx_assert(&devmtx, MA_OWNED); 152 dev->si_refcount++; 153 } 154 155 void 156 dev_rel(struct cdev *dev) 157 { 158 int flag = 0; 159 160 mtx_assert(&devmtx, MA_NOTOWNED); 161 dev_lock(); 162 dev->si_refcount--; 163 KASSERT(dev->si_refcount >= 0, 164 ("dev_rel(%s) gave negative count", devtoname(dev))); 165 #if 0 166 if (dev->si_usecount == 0 && 167 (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED)) 168 ; 169 else 170 #endif 171 if (dev->si_devsw == NULL && dev->si_refcount == 0) { 172 LIST_REMOVE(dev, si_list); 173 flag = 1; 174 } 175 dev_unlock(); 176 if (flag) 177 devfs_free(dev); 178 } 179 180 struct cdevsw * 181 dev_refthread(struct cdev *dev) 182 { 183 struct cdevsw *csw; 184 struct cdev_priv *cdp; 185 186 mtx_assert(&devmtx, MA_NOTOWNED); 187 dev_lock(); 188 csw = dev->si_devsw; 189 if (csw != NULL) { 190 cdp = cdev2priv(dev); 191 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) 192 dev->si_threadcount++; 193 else 194 csw = NULL; 195 } 196 dev_unlock(); 197 return (csw); 198 } 199 200 struct cdevsw * 201 devvn_refthread(struct vnode *vp, struct cdev **devp) 202 { 203 struct cdevsw *csw; 204 struct cdev_priv *cdp; 205 206 mtx_assert(&devmtx, MA_NOTOWNED); 207 csw = NULL; 208 dev_lock(); 209 *devp = vp->v_rdev; 210 if (*devp != NULL) { 211 cdp = cdev2priv(*devp); 212 if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { 213 csw = (*devp)->si_devsw; 214 if (csw != NULL) 215 (*devp)->si_threadcount++; 216 } 217 } 218 dev_unlock(); 219 return (csw); 220 } 221 222 void 223 dev_relthread(struct cdev *dev) 224 { 225 226 mtx_assert(&devmtx, MA_NOTOWNED); 227 dev_lock(); 228 KASSERT(dev->si_threadcount > 0, 229 ("%s threadcount is wrong", dev->si_name)); 230 dev->si_threadcount--; 231 dev_unlock(); 232 } 233 234 int 235 nullop(void) 236 { 237 238 return (0); 239 } 240 241 int 242 eopnotsupp(void) 243 { 244 245 return (EOPNOTSUPP); 246 } 247 248 static int 249 enxio(void) 250 { 251 return (ENXIO); 252 } 253 254 static int 255 enodev(void) 256 { 257 return (ENODEV); 258 } 259 260 /* Define a dead_cdevsw for use when devices leave unexpectedly. */ 261 262 #define dead_open (d_open_t *)enxio 263 #define dead_close (d_close_t *)enxio 264 #define dead_read (d_read_t *)enxio 265 #define dead_write (d_write_t *)enxio 266 #define dead_ioctl (d_ioctl_t *)enxio 267 #define dead_poll (d_poll_t *)enodev 268 #define dead_mmap (d_mmap_t *)enodev 269 270 static void 271 dead_strategy(struct bio *bp) 272 { 273 274 biofinish(bp, NULL, ENXIO); 275 } 276 277 #define dead_dump (dumper_t *)enxio 278 #define dead_kqfilter (d_kqfilter_t *)enxio 279 280 static struct cdevsw dead_cdevsw = { 281 .d_version = D_VERSION, 282 .d_flags = D_NEEDGIANT, /* XXX: does dead_strategy need this ? */ 283 .d_open = dead_open, 284 .d_close = dead_close, 285 .d_read = dead_read, 286 .d_write = dead_write, 287 .d_ioctl = dead_ioctl, 288 .d_poll = dead_poll, 289 .d_mmap = dead_mmap, 290 .d_strategy = dead_strategy, 291 .d_name = "dead", 292 .d_dump = dead_dump, 293 .d_kqfilter = dead_kqfilter 294 }; 295 296 /* Default methods if driver does not specify method */ 297 298 #define null_open (d_open_t *)nullop 299 #define null_close (d_close_t *)nullop 300 #define no_read (d_read_t *)enodev 301 #define no_write (d_write_t *)enodev 302 #define no_ioctl (d_ioctl_t *)enodev 303 #define no_mmap (d_mmap_t *)enodev 304 #define no_kqfilter (d_kqfilter_t *)enodev 305 306 static void 307 no_strategy(struct bio *bp) 308 { 309 310 biofinish(bp, NULL, ENODEV); 311 } 312 313 static int 314 no_poll(struct cdev *dev __unused, int events, struct thread *td __unused) 315 { 316 /* 317 * Return true for read/write. If the user asked for something 318 * special, return POLLNVAL, so that clients have a way of 319 * determining reliably whether or not the extended 320 * functionality is present without hard-coding knowledge 321 * of specific filesystem implementations. 322 * Stay in sync with vop_nopoll(). 323 */ 324 if (events & ~POLLSTANDARD) 325 return (POLLNVAL); 326 327 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 328 } 329 330 #define no_dump (dumper_t *)enodev 331 332 static int 333 giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 334 { 335 struct cdevsw *dsw; 336 int retval; 337 338 dsw = dev_refthread(dev); 339 if (dsw == NULL) 340 return (ENXIO); 341 mtx_lock(&Giant); 342 retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td); 343 mtx_unlock(&Giant); 344 dev_relthread(dev); 345 return (retval); 346 } 347 348 static int 349 giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp) 350 { 351 struct cdevsw *dsw; 352 int retval; 353 354 dsw = dev_refthread(dev); 355 if (dsw == NULL) 356 return (ENXIO); 357 mtx_lock(&Giant); 358 retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp); 359 mtx_unlock(&Giant); 360 dev_relthread(dev); 361 return (retval); 362 } 363 364 static int 365 giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 366 { 367 struct cdevsw *dsw; 368 int retval; 369 370 dsw = dev_refthread(dev); 371 if (dsw == NULL) 372 return (ENXIO); 373 mtx_lock(&Giant); 374 retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td); 375 mtx_unlock(&Giant); 376 dev_relthread(dev); 377 return (retval); 378 } 379 380 static void 381 giant_strategy(struct bio *bp) 382 { 383 struct cdevsw *dsw; 384 struct cdev *dev; 385 386 dev = bp->bio_dev; 387 dsw = dev_refthread(dev); 388 if (dsw == NULL) { 389 biofinish(bp, NULL, ENXIO); 390 return; 391 } 392 mtx_lock(&Giant); 393 dsw->d_gianttrick->d_strategy(bp); 394 mtx_unlock(&Giant); 395 dev_relthread(dev); 396 } 397 398 static int 399 giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 400 { 401 struct cdevsw *dsw; 402 int retval; 403 404 dsw = dev_refthread(dev); 405 if (dsw == NULL) 406 return (ENXIO); 407 mtx_lock(&Giant); 408 retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td); 409 mtx_unlock(&Giant); 410 dev_relthread(dev); 411 return (retval); 412 } 413 414 static int 415 giant_read(struct cdev *dev, struct uio *uio, int ioflag) 416 { 417 struct cdevsw *dsw; 418 int retval; 419 420 dsw = dev_refthread(dev); 421 if (dsw == NULL) 422 return (ENXIO); 423 mtx_lock(&Giant); 424 retval = dsw->d_gianttrick->d_read(dev, uio, ioflag); 425 mtx_unlock(&Giant); 426 dev_relthread(dev); 427 return (retval); 428 } 429 430 static int 431 giant_write(struct cdev *dev, struct uio *uio, int ioflag) 432 { 433 struct cdevsw *dsw; 434 int retval; 435 436 dsw = dev_refthread(dev); 437 if (dsw == NULL) 438 return (ENXIO); 439 mtx_lock(&Giant); 440 retval = dsw->d_gianttrick->d_write(dev, uio, ioflag); 441 mtx_unlock(&Giant); 442 dev_relthread(dev); 443 return (retval); 444 } 445 446 static int 447 giant_poll(struct cdev *dev, int events, struct thread *td) 448 { 449 struct cdevsw *dsw; 450 int retval; 451 452 dsw = dev_refthread(dev); 453 if (dsw == NULL) 454 return (ENXIO); 455 mtx_lock(&Giant); 456 retval = dsw->d_gianttrick->d_poll(dev, events, td); 457 mtx_unlock(&Giant); 458 dev_relthread(dev); 459 return (retval); 460 } 461 462 static int 463 giant_kqfilter(struct cdev *dev, struct knote *kn) 464 { 465 struct cdevsw *dsw; 466 int retval; 467 468 dsw = dev_refthread(dev); 469 if (dsw == NULL) 470 return (ENXIO); 471 mtx_lock(&Giant); 472 retval = dsw->d_gianttrick->d_kqfilter(dev, kn); 473 mtx_unlock(&Giant); 474 dev_relthread(dev); 475 return (retval); 476 } 477 478 static int 479 giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot) 480 { 481 struct cdevsw *dsw; 482 int retval; 483 484 dsw = dev_refthread(dev); 485 if (dsw == NULL) 486 return (ENXIO); 487 mtx_lock(&Giant); 488 retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot); 489 mtx_unlock(&Giant); 490 dev_relthread(dev); 491 return (retval); 492 } 493 494 495 static void 496 notify(struct cdev *dev, const char *ev) 497 { 498 static const char prefix[] = "cdev="; 499 char *data; 500 int namelen; 501 502 if (cold) 503 return; 504 namelen = strlen(dev->si_name); 505 data = malloc(namelen + sizeof(prefix), M_TEMP, M_WAITOK); 506 memcpy(data, prefix, sizeof(prefix) - 1); 507 memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1); 508 devctl_notify("DEVFS", "CDEV", ev, data); 509 free(data, M_TEMP); 510 } 511 512 static void 513 notify_create(struct cdev *dev) 514 { 515 516 notify(dev, "CREATE"); 517 } 518 519 static void 520 notify_destroy(struct cdev *dev) 521 { 522 523 notify(dev, "DESTROY"); 524 } 525 526 static struct cdev * 527 newdev(struct cdevsw *csw, int y, struct cdev *si) 528 { 529 struct cdev *si2; 530 dev_t udev; 531 532 mtx_assert(&devmtx, MA_OWNED); 533 udev = y; 534 if (csw->d_flags & D_NEEDMINOR) { 535 /* We may want to return an existing device */ 536 LIST_FOREACH(si2, &csw->d_devs, si_list) { 537 if (si2->si_drv0 == udev) { 538 dev_free_devlocked(si); 539 return (si2); 540 } 541 } 542 } 543 si->si_drv0 = udev; 544 si->si_devsw = csw; 545 LIST_INSERT_HEAD(&csw->d_devs, si, si_list); 546 return (si); 547 } 548 549 #define UMINORMASK 0xffff00ffU 550 551 int 552 uminor(dev_t dev) 553 { 554 return (dev & UMINORMASK); 555 } 556 557 int 558 umajor(dev_t dev) 559 { 560 return ((dev & ~UMINORMASK) >> 8); 561 } 562 563 static void 564 fini_cdevsw(struct cdevsw *devsw) 565 { 566 struct cdevsw *gt; 567 568 if (devsw->d_gianttrick != NULL) { 569 gt = devsw->d_gianttrick; 570 memcpy(devsw, gt, sizeof *devsw); 571 cdevsw_free_devlocked(gt); 572 devsw->d_gianttrick = NULL; 573 } 574 devsw->d_flags &= ~D_INIT; 575 } 576 577 static void 578 prep_cdevsw(struct cdevsw *devsw) 579 { 580 struct cdevsw *dsw2; 581 582 mtx_assert(&devmtx, MA_OWNED); 583 if (devsw->d_flags & D_INIT) 584 return; 585 if (devsw->d_flags & D_NEEDGIANT) { 586 dev_unlock(); 587 dsw2 = malloc(sizeof *dsw2, M_DEVT, M_WAITOK); 588 dev_lock(); 589 } else 590 dsw2 = NULL; 591 if (devsw->d_flags & D_INIT) { 592 if (dsw2 != NULL) 593 cdevsw_free_devlocked(dsw2); 594 return; 595 } 596 597 if (devsw->d_version != D_VERSION_01) { 598 printf( 599 "WARNING: Device driver \"%s\" has wrong version %s\n", 600 devsw->d_name == NULL ? "???" : devsw->d_name, 601 "and is disabled. Recompile KLD module."); 602 devsw->d_open = dead_open; 603 devsw->d_close = dead_close; 604 devsw->d_read = dead_read; 605 devsw->d_write = dead_write; 606 devsw->d_ioctl = dead_ioctl; 607 devsw->d_poll = dead_poll; 608 devsw->d_mmap = dead_mmap; 609 devsw->d_strategy = dead_strategy; 610 devsw->d_dump = dead_dump; 611 devsw->d_kqfilter = dead_kqfilter; 612 } 613 614 if (devsw->d_flags & D_TTY) { 615 if (devsw->d_ioctl == NULL) devsw->d_ioctl = ttyioctl; 616 if (devsw->d_read == NULL) devsw->d_read = ttyread; 617 if (devsw->d_write == NULL) devsw->d_write = ttywrite; 618 if (devsw->d_kqfilter == NULL) devsw->d_kqfilter = ttykqfilter; 619 if (devsw->d_poll == NULL) devsw->d_poll = ttypoll; 620 } 621 622 if (devsw->d_flags & D_NEEDGIANT) { 623 if (devsw->d_gianttrick == NULL) { 624 memcpy(dsw2, devsw, sizeof *dsw2); 625 devsw->d_gianttrick = dsw2; 626 dsw2 = NULL; 627 } 628 } 629 630 #define FIXUP(member, noop, giant) \ 631 do { \ 632 if (devsw->member == NULL) { \ 633 devsw->member = noop; \ 634 } else if (devsw->d_flags & D_NEEDGIANT) \ 635 devsw->member = giant; \ 636 } \ 637 while (0) 638 639 FIXUP(d_open, null_open, giant_open); 640 FIXUP(d_fdopen, NULL, giant_fdopen); 641 FIXUP(d_close, null_close, giant_close); 642 FIXUP(d_read, no_read, giant_read); 643 FIXUP(d_write, no_write, giant_write); 644 FIXUP(d_ioctl, no_ioctl, giant_ioctl); 645 FIXUP(d_poll, no_poll, giant_poll); 646 FIXUP(d_mmap, no_mmap, giant_mmap); 647 FIXUP(d_strategy, no_strategy, giant_strategy); 648 FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter); 649 650 if (devsw->d_dump == NULL) devsw->d_dump = no_dump; 651 652 LIST_INIT(&devsw->d_devs); 653 654 devsw->d_flags |= D_INIT; 655 656 if (dsw2 != NULL) 657 cdevsw_free_devlocked(dsw2); 658 } 659 660 struct cdev * 661 make_dev_credv(int flags, struct cdevsw *devsw, int minornr, 662 struct ucred *cr, uid_t uid, 663 gid_t gid, int mode, const char *fmt, va_list ap) 664 { 665 struct cdev *dev; 666 int i; 667 668 dev = devfs_alloc(); 669 dev_lock(); 670 prep_cdevsw(devsw); 671 dev = newdev(devsw, minornr, dev); 672 if (flags & MAKEDEV_REF) 673 dev_refl(dev); 674 if (dev->si_flags & SI_CHEAPCLONE && 675 dev->si_flags & SI_NAMED) { 676 /* 677 * This is allowed as it removes races and generally 678 * simplifies cloning devices. 679 * XXX: still ?? 680 */ 681 dev_unlock_and_free(); 682 return (dev); 683 } 684 KASSERT(!(dev->si_flags & SI_NAMED), 685 ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)", 686 devsw->d_name, minor(dev), devtoname(dev))); 687 688 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 689 if (i > (sizeof dev->__si_namebuf - 1)) { 690 printf("WARNING: Device name truncated! (%s)\n", 691 dev->__si_namebuf); 692 } 693 694 dev->si_flags |= SI_NAMED; 695 #ifdef MAC 696 if (cr != NULL) 697 dev->si_cred = crhold(cr); 698 else 699 #endif 700 dev->si_cred = NULL; 701 dev->si_uid = uid; 702 dev->si_gid = gid; 703 dev->si_mode = mode; 704 705 devfs_create(dev); 706 clean_unrhdrl(devfs_inos); 707 dev_unlock_and_free(); 708 709 notify_create(dev); 710 711 return (dev); 712 } 713 714 struct cdev * 715 make_dev(struct cdevsw *devsw, int minornr, uid_t uid, gid_t gid, int mode, 716 const char *fmt, ...) 717 { 718 struct cdev *dev; 719 va_list ap; 720 721 va_start(ap, fmt); 722 dev = make_dev_credv(0, devsw, minornr, NULL, uid, gid, mode, fmt, ap); 723 va_end(ap); 724 return (dev); 725 } 726 727 struct cdev * 728 make_dev_cred(struct cdevsw *devsw, int minornr, struct ucred *cr, uid_t uid, 729 gid_t gid, int mode, const char *fmt, ...) 730 { 731 struct cdev *dev; 732 va_list ap; 733 734 va_start(ap, fmt); 735 dev = make_dev_credv(0, devsw, minornr, cr, uid, gid, mode, fmt, ap); 736 va_end(ap); 737 738 return (dev); 739 } 740 741 struct cdev * 742 make_dev_credf(int flags, struct cdevsw *devsw, int minornr, 743 struct ucred *cr, uid_t uid, 744 gid_t gid, int mode, const char *fmt, ...) 745 { 746 struct cdev *dev; 747 va_list ap; 748 749 va_start(ap, fmt); 750 dev = make_dev_credv(flags, devsw, minornr, cr, uid, gid, mode, 751 fmt, ap); 752 va_end(ap); 753 754 return (dev); 755 } 756 757 static void 758 dev_dependsl(struct cdev *pdev, struct cdev *cdev) 759 { 760 761 cdev->si_parent = pdev; 762 cdev->si_flags |= SI_CHILD; 763 LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings); 764 } 765 766 767 void 768 dev_depends(struct cdev *pdev, struct cdev *cdev) 769 { 770 771 dev_lock(); 772 dev_dependsl(pdev, cdev); 773 dev_unlock(); 774 } 775 776 struct cdev * 777 make_dev_alias(struct cdev *pdev, const char *fmt, ...) 778 { 779 struct cdev *dev; 780 va_list ap; 781 int i; 782 783 KASSERT(pdev != NULL, ("NULL pdev")); 784 dev = devfs_alloc(); 785 dev_lock(); 786 dev->si_flags |= SI_ALIAS; 787 dev->si_flags |= SI_NAMED; 788 va_start(ap, fmt); 789 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap); 790 if (i > (sizeof dev->__si_namebuf - 1)) { 791 printf("WARNING: Device name truncated! (%s)\n", 792 dev->__si_namebuf); 793 } 794 va_end(ap); 795 796 devfs_create(dev); 797 dev_dependsl(pdev, dev); 798 clean_unrhdrl(devfs_inos); 799 dev_unlock(); 800 801 notify_create(dev); 802 803 return (dev); 804 } 805 806 static void 807 destroy_devl(struct cdev *dev) 808 { 809 struct cdevsw *csw; 810 struct cdev_privdata *p, *p1; 811 812 mtx_assert(&devmtx, MA_OWNED); 813 KASSERT(dev->si_flags & SI_NAMED, 814 ("WARNING: Driver mistake: destroy_dev on %d\n", minor(dev))); 815 816 devfs_destroy(dev); 817 818 /* Remove name marking */ 819 dev->si_flags &= ~SI_NAMED; 820 821 /* If we are a child, remove us from the parents list */ 822 if (dev->si_flags & SI_CHILD) { 823 LIST_REMOVE(dev, si_siblings); 824 dev->si_flags &= ~SI_CHILD; 825 } 826 827 /* Kill our children */ 828 while (!LIST_EMPTY(&dev->si_children)) 829 destroy_devl(LIST_FIRST(&dev->si_children)); 830 831 /* Remove from clone list */ 832 if (dev->si_flags & SI_CLONELIST) { 833 LIST_REMOVE(dev, si_clone); 834 dev->si_flags &= ~SI_CLONELIST; 835 } 836 837 dev->si_refcount++; /* Avoid race with dev_rel() */ 838 csw = dev->si_devsw; 839 dev->si_devsw = NULL; /* already NULL for SI_ALIAS */ 840 while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) { 841 csw->d_purge(dev); 842 msleep(csw, &devmtx, PRIBIO, "devprg", hz/10); 843 if (dev->si_threadcount) 844 printf("Still %lu threads in %s\n", 845 dev->si_threadcount, devtoname(dev)); 846 } 847 while (dev->si_threadcount != 0) { 848 /* Use unique dummy wait ident */ 849 msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10); 850 } 851 852 dev_unlock(); 853 notify_destroy(dev); 854 mtx_lock(&cdevpriv_mtx); 855 LIST_FOREACH_SAFE(p, &cdev2priv(dev)->cdp_fdpriv, cdpd_list, p1) { 856 devfs_destroy_cdevpriv(p); 857 mtx_lock(&cdevpriv_mtx); 858 } 859 mtx_unlock(&cdevpriv_mtx); 860 dev_lock(); 861 862 dev->si_drv1 = 0; 863 dev->si_drv2 = 0; 864 bzero(&dev->__si_u, sizeof(dev->__si_u)); 865 866 if (!(dev->si_flags & SI_ALIAS)) { 867 /* Remove from cdevsw list */ 868 LIST_REMOVE(dev, si_list); 869 870 /* If cdevsw has no more struct cdev *'s, clean it */ 871 if (LIST_EMPTY(&csw->d_devs)) { 872 fini_cdevsw(csw); 873 wakeup(&csw->d_devs); 874 } 875 } 876 dev->si_flags &= ~SI_ALIAS; 877 dev->si_refcount--; /* Avoid race with dev_rel() */ 878 879 if (dev->si_refcount > 0) { 880 LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list); 881 } else { 882 dev_free_devlocked(dev); 883 } 884 } 885 886 void 887 destroy_dev(struct cdev *dev) 888 { 889 890 dev_lock(); 891 destroy_devl(dev); 892 dev_unlock_and_free(); 893 } 894 895 const char * 896 devtoname(struct cdev *dev) 897 { 898 char *p; 899 struct cdevsw *csw; 900 int mynor; 901 902 if (dev->si_name[0] == '#' || dev->si_name[0] == '\0') { 903 p = dev->si_name; 904 csw = dev_refthread(dev); 905 if (csw != NULL) { 906 sprintf(p, "(%s)", csw->d_name); 907 dev_relthread(dev); 908 } 909 p += strlen(p); 910 mynor = minor(dev); 911 if (mynor < 0 || mynor > 255) 912 sprintf(p, "/%#x", (u_int)mynor); 913 else 914 sprintf(p, "/%d", mynor); 915 } 916 return (dev->si_name); 917 } 918 919 int 920 dev_stdclone(char *name, char **namep, const char *stem, int *unit) 921 { 922 int u, i; 923 924 i = strlen(stem); 925 if (bcmp(stem, name, i) != 0) 926 return (0); 927 if (!isdigit(name[i])) 928 return (0); 929 u = 0; 930 if (name[i] == '0' && isdigit(name[i+1])) 931 return (0); 932 while (isdigit(name[i])) { 933 u *= 10; 934 u += name[i++] - '0'; 935 } 936 if (u > 0xffffff) 937 return (0); 938 *unit = u; 939 if (namep) 940 *namep = &name[i]; 941 if (name[i]) 942 return (2); 943 return (1); 944 } 945 946 /* 947 * Helper functions for cloning device drivers. 948 * 949 * The objective here is to make it unnecessary for the device drivers to 950 * use rman or similar to manage their unit number space. Due to the way 951 * we do "on-demand" devices, using rman or other "private" methods 952 * will be very tricky to lock down properly once we lock down this file. 953 * 954 * Instead we give the drivers these routines which puts the struct cdev *'s 955 * that are to be managed on their own list, and gives the driver the ability 956 * to ask for the first free unit number or a given specified unit number. 957 * 958 * In addition these routines support paired devices (pty, nmdm and similar) 959 * by respecting a number of "flag" bits in the minor number. 960 * 961 */ 962 963 struct clonedevs { 964 LIST_HEAD(,cdev) head; 965 }; 966 967 void 968 clone_setup(struct clonedevs **cdp) 969 { 970 971 *cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO); 972 LIST_INIT(&(*cdp)->head); 973 } 974 975 int 976 clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra) 977 { 978 struct clonedevs *cd; 979 struct cdev *dev, *ndev, *dl, *de; 980 int unit, low, u; 981 982 KASSERT(*cdp != NULL, 983 ("clone_setup() not called in driver \"%s\"", csw->d_name)); 984 KASSERT(!(extra & CLONE_UNITMASK), 985 ("Illegal extra bits (0x%x) in clone_create", extra)); 986 KASSERT(*up <= CLONE_UNITMASK, 987 ("Too high unit (0x%x) in clone_create", *up)); 988 KASSERT(csw->d_flags & D_NEEDMINOR, 989 ("clone_create() on cdevsw without minor numbers")); 990 991 992 /* 993 * Search the list for a lot of things in one go: 994 * A preexisting match is returned immediately. 995 * The lowest free unit number if we are passed -1, and the place 996 * in the list where we should insert that new element. 997 * The place to insert a specified unit number, if applicable 998 * the end of the list. 999 */ 1000 unit = *up; 1001 ndev = devfs_alloc(); 1002 dev_lock(); 1003 prep_cdevsw(csw); 1004 low = extra; 1005 de = dl = NULL; 1006 cd = *cdp; 1007 LIST_FOREACH(dev, &cd->head, si_clone) { 1008 KASSERT(dev->si_flags & SI_CLONELIST, 1009 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 1010 u = dev2unit(dev); 1011 if (u == (unit | extra)) { 1012 *dp = dev; 1013 dev_unlock(); 1014 devfs_free(ndev); 1015 return (0); 1016 } 1017 if (unit == -1 && u == low) { 1018 low++; 1019 de = dev; 1020 continue; 1021 } else if (u < (unit | extra)) { 1022 de = dev; 1023 continue; 1024 } else if (u > (unit | extra)) { 1025 dl = dev; 1026 break; 1027 } 1028 } 1029 if (unit == -1) 1030 unit = low & CLONE_UNITMASK; 1031 dev = newdev(csw, unit2minor(unit | extra), ndev); 1032 if (dev->si_flags & SI_CLONELIST) { 1033 printf("dev %p (%s) is on clonelist\n", dev, dev->si_name); 1034 printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra); 1035 LIST_FOREACH(dev, &cd->head, si_clone) { 1036 printf("\t%p %s\n", dev, dev->si_name); 1037 } 1038 panic("foo"); 1039 } 1040 KASSERT(!(dev->si_flags & SI_CLONELIST), 1041 ("Dev %p(%s) should not be on clonelist", dev, dev->si_name)); 1042 if (dl != NULL) 1043 LIST_INSERT_BEFORE(dl, dev, si_clone); 1044 else if (de != NULL) 1045 LIST_INSERT_AFTER(de, dev, si_clone); 1046 else 1047 LIST_INSERT_HEAD(&cd->head, dev, si_clone); 1048 dev->si_flags |= SI_CLONELIST; 1049 *up = unit; 1050 dev_unlock_and_free(); 1051 return (1); 1052 } 1053 1054 /* 1055 * Kill everything still on the list. The driver should already have 1056 * disposed of any softc hung of the struct cdev *'s at this time. 1057 */ 1058 void 1059 clone_cleanup(struct clonedevs **cdp) 1060 { 1061 struct cdev *dev; 1062 struct cdev_priv *cp; 1063 struct clonedevs *cd; 1064 1065 cd = *cdp; 1066 if (cd == NULL) 1067 return; 1068 dev_lock(); 1069 while (!LIST_EMPTY(&cd->head)) { 1070 dev = LIST_FIRST(&cd->head); 1071 LIST_REMOVE(dev, si_clone); 1072 KASSERT(dev->si_flags & SI_CLONELIST, 1073 ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); 1074 dev->si_flags &= ~SI_CLONELIST; 1075 cp = cdev2priv(dev); 1076 if (!(cp->cdp_flags & CDP_SCHED_DTR)) { 1077 cp->cdp_flags |= CDP_SCHED_DTR; 1078 KASSERT(dev->si_flags & SI_NAMED, 1079 ("Driver has goofed in cloning underways udev %x", dev->si_drv0)); 1080 destroy_devl(dev); 1081 } 1082 } 1083 dev_unlock_and_free(); 1084 free(cd, M_DEVBUF); 1085 *cdp = NULL; 1086 } 1087 1088 static TAILQ_HEAD(, cdev_priv) dev_ddtr = 1089 TAILQ_HEAD_INITIALIZER(dev_ddtr); 1090 static struct task dev_dtr_task; 1091 1092 static void 1093 destroy_dev_tq(void *ctx, int pending) 1094 { 1095 struct cdev_priv *cp; 1096 struct cdev *dev; 1097 void (*cb)(void *); 1098 void *cb_arg; 1099 1100 dev_lock(); 1101 while (!TAILQ_EMPTY(&dev_ddtr)) { 1102 cp = TAILQ_FIRST(&dev_ddtr); 1103 dev = &cp->cdp_c; 1104 KASSERT(cp->cdp_flags & CDP_SCHED_DTR, 1105 ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp)); 1106 TAILQ_REMOVE(&dev_ddtr, cp, cdp_dtr_list); 1107 cb = cp->cdp_dtr_cb; 1108 cb_arg = cp->cdp_dtr_cb_arg; 1109 destroy_devl(dev); 1110 dev_unlock_and_free(); 1111 dev_rel(dev); 1112 if (cb != NULL) 1113 cb(cb_arg); 1114 dev_lock(); 1115 } 1116 dev_unlock(); 1117 } 1118 1119 /* 1120 * devmtx shall be locked on entry. devmtx will be unlocked after 1121 * function return. 1122 */ 1123 static int 1124 destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg) 1125 { 1126 struct cdev_priv *cp; 1127 1128 mtx_assert(&devmtx, MA_OWNED); 1129 cp = cdev2priv(dev); 1130 if (cp->cdp_flags & CDP_SCHED_DTR) { 1131 dev_unlock(); 1132 return (0); 1133 } 1134 dev_refl(dev); 1135 cp->cdp_flags |= CDP_SCHED_DTR; 1136 cp->cdp_dtr_cb = cb; 1137 cp->cdp_dtr_cb_arg = arg; 1138 TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list); 1139 dev_unlock(); 1140 taskqueue_enqueue(taskqueue_swi_giant, &dev_dtr_task); 1141 return (1); 1142 } 1143 1144 int 1145 destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg) 1146 { 1147 dev_lock(); 1148 return (destroy_dev_sched_cbl(dev, cb, arg)); 1149 } 1150 1151 int 1152 destroy_dev_sched(struct cdev *dev) 1153 { 1154 return (destroy_dev_sched_cb(dev, NULL, NULL)); 1155 } 1156 1157 void 1158 destroy_dev_drain(struct cdevsw *csw) 1159 { 1160 1161 dev_lock(); 1162 while (!LIST_EMPTY(&csw->d_devs)) { 1163 msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10); 1164 } 1165 dev_unlock(); 1166 } 1167 1168 void 1169 drain_dev_clone_events(void) 1170 { 1171 1172 sx_xlock(&clone_drain_lock); 1173 sx_xunlock(&clone_drain_lock); 1174 } 1175 1176 static void 1177 devdtr_init(void *dummy __unused) 1178 { 1179 1180 TASK_INIT(&dev_dtr_task, 0, destroy_dev_tq, NULL); 1181 } 1182 1183 SYSINIT(devdtr, SI_SUB_DEVFS, SI_ORDER_SECOND, devdtr_init, NULL); 1184