1 /*- 2 * Copyright (c) 2000,2004 3 * Poul-Henning Kamp. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Neither the name of the University nor the names of its contributors 11 * may be used to endorse or promote products derived from this software 12 * without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/conf.h> 34 #include <sys/dirent.h> 35 #include <sys/kernel.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/sx.h> 41 #include <sys/sysctl.h> 42 #include <sys/vnode.h> 43 44 #include <sys/kdb.h> 45 46 #include <fs/devfs/devfs.h> 47 #include <fs/devfs/devfs_int.h> 48 49 #include <security/mac/mac_framework.h> 50 51 /* 52 * The one true (but secret) list of active devices in the system. 53 * Locked by dev_lock()/devmtx 54 */ 55 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list); 56 57 struct unrhdr *devfs_inos; 58 59 60 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2"); 61 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3"); 62 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage"); 63 64 static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "DEVFS filesystem"); 65 66 static unsigned devfs_generation; 67 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD, 68 &devfs_generation, 0, "DEVFS generation number"); 69 70 unsigned devfs_rule_depth = 1; 71 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW, 72 &devfs_rule_depth, 0, "Max depth of ruleset include"); 73 74 /* 75 * Helper sysctl for devname(3). We're given a dev_t and return the 76 * name, if any, registered by the device driver. 77 */ 78 static int 79 sysctl_devname(SYSCTL_HANDLER_ARGS) 80 { 81 int error; 82 dev_t ud; 83 struct cdev_priv *cdp; 84 struct cdev *dev; 85 86 error = SYSCTL_IN(req, &ud, sizeof (ud)); 87 if (error) 88 return (error); 89 if (ud == NODEV) 90 return (EINVAL); 91 dev = NULL; 92 dev_lock(); 93 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) 94 if (cdp->cdp_inode == ud) { 95 dev = &cdp->cdp_c; 96 dev_refl(dev); 97 break; 98 } 99 dev_unlock(); 100 if (dev == NULL) 101 return (ENOENT); 102 error = SYSCTL_OUT(req, dev->si_name, strlen(dev->si_name) + 1); 103 dev_rel(dev); 104 return (error); 105 } 106 107 SYSCTL_PROC(_kern, OID_AUTO, devname, 108 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE, 109 NULL, 0, sysctl_devname, "", "devname(3) handler"); 110 111 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD, 112 SYSCTL_NULL_INT_PTR, sizeof(struct cdev), "sizeof(struct cdev)"); 113 114 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD, 115 SYSCTL_NULL_INT_PTR, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)"); 116 117 struct cdev * 118 devfs_alloc(int flags) 119 { 120 struct cdev_priv *cdp; 121 struct cdev *cdev; 122 struct timespec ts; 123 124 cdp = malloc(sizeof *cdp, M_CDEVP, M_ZERO | 125 ((flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK)); 126 if (cdp == NULL) 127 return (NULL); 128 129 cdp->cdp_dirents = &cdp->cdp_dirent0; 130 cdp->cdp_dirent0 = NULL; 131 cdp->cdp_maxdirent = 0; 132 cdp->cdp_inode = 0; 133 134 cdev = &cdp->cdp_c; 135 136 LIST_INIT(&cdev->si_children); 137 vfs_timestamp(&ts); 138 cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts; 139 cdev->si_cred = NULL; 140 141 return (cdev); 142 } 143 144 int 145 devfs_dev_exists(const char *name) 146 { 147 struct cdev_priv *cdp; 148 149 mtx_assert(&devmtx, MA_OWNED); 150 151 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 152 if ((cdp->cdp_flags & CDP_ACTIVE) == 0) 153 continue; 154 if (devfs_pathpath(cdp->cdp_c.si_name, name) != 0) 155 return (1); 156 if (devfs_pathpath(name, cdp->cdp_c.si_name) != 0) 157 return (1); 158 } 159 if (devfs_dir_find(name) != 0) 160 return (1); 161 162 return (0); 163 } 164 165 void 166 devfs_free(struct cdev *cdev) 167 { 168 struct cdev_priv *cdp; 169 170 cdp = cdev2priv(cdev); 171 if (cdev->si_cred != NULL) 172 crfree(cdev->si_cred); 173 devfs_free_cdp_inode(cdp->cdp_inode); 174 if (cdp->cdp_maxdirent > 0) 175 free(cdp->cdp_dirents, M_DEVFS2); 176 free(cdp, M_CDEVP); 177 } 178 179 struct devfs_dirent * 180 devfs_find(struct devfs_dirent *dd, const char *name, int namelen, int type) 181 { 182 struct devfs_dirent *de; 183 184 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 185 if (namelen != de->de_dirent->d_namlen) 186 continue; 187 if (type != 0 && type != de->de_dirent->d_type) 188 continue; 189 if (bcmp(name, de->de_dirent->d_name, namelen) != 0) 190 continue; 191 break; 192 } 193 KASSERT(de == NULL || (de->de_flags & DE_DOOMED) == 0, 194 ("devfs_find: returning a doomed entry")); 195 return (de); 196 } 197 198 struct devfs_dirent * 199 devfs_newdirent(char *name, int namelen) 200 { 201 int i; 202 struct devfs_dirent *de; 203 struct dirent d; 204 205 d.d_namlen = namelen; 206 i = sizeof (*de) + GENERIC_DIRSIZ(&d); 207 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO); 208 de->de_dirent = (struct dirent *)(de + 1); 209 de->de_dirent->d_namlen = namelen; 210 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d); 211 bcopy(name, de->de_dirent->d_name, namelen); 212 de->de_dirent->d_name[namelen] = '\0'; 213 vfs_timestamp(&de->de_ctime); 214 de->de_mtime = de->de_atime = de->de_ctime; 215 de->de_links = 1; 216 de->de_holdcnt = 1; 217 #ifdef MAC 218 mac_devfs_init(de); 219 #endif 220 return (de); 221 } 222 223 struct devfs_dirent * 224 devfs_parent_dirent(struct devfs_dirent *de) 225 { 226 227 if (de->de_dirent->d_type != DT_DIR) 228 return (de->de_dir); 229 230 if (de->de_flags & (DE_DOT | DE_DOTDOT)) 231 return (NULL); 232 233 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 234 if (de == NULL) 235 return (NULL); 236 de = TAILQ_NEXT(de, de_list); /* ".." */ 237 if (de == NULL) 238 return (NULL); 239 240 return (de->de_dir); 241 } 242 243 struct devfs_dirent * 244 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen, struct devfs_dirent *dotdot, u_int inode) 245 { 246 struct devfs_dirent *dd; 247 struct devfs_dirent *de; 248 249 /* Create the new directory */ 250 dd = devfs_newdirent(name, namelen); 251 TAILQ_INIT(&dd->de_dlist); 252 dd->de_dirent->d_type = DT_DIR; 253 dd->de_mode = 0555; 254 dd->de_links = 2; 255 dd->de_dir = dd; 256 if (inode != 0) 257 dd->de_inode = inode; 258 else 259 dd->de_inode = alloc_unr(devfs_inos); 260 261 /* 262 * "." and ".." are always the two first entries in the 263 * de_dlist list. 264 * 265 * Create the "." entry in the new directory. 266 */ 267 de = devfs_newdirent(".", 1); 268 de->de_dirent->d_type = DT_DIR; 269 de->de_flags |= DE_DOT; 270 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 271 de->de_dir = dd; 272 273 /* Create the ".." entry in the new directory. */ 274 de = devfs_newdirent("..", 2); 275 de->de_dirent->d_type = DT_DIR; 276 de->de_flags |= DE_DOTDOT; 277 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 278 if (dotdot == NULL) { 279 de->de_dir = dd; 280 } else { 281 de->de_dir = dotdot; 282 sx_assert(&dmp->dm_lock, SX_XLOCKED); 283 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list); 284 dotdot->de_links++; 285 devfs_rules_apply(dmp, dd); 286 } 287 288 #ifdef MAC 289 mac_devfs_create_directory(dmp->dm_mount, name, namelen, dd); 290 #endif 291 return (dd); 292 } 293 294 void 295 devfs_dirent_free(struct devfs_dirent *de) 296 { 297 free(de, M_DEVFS3); 298 } 299 300 /* 301 * Removes a directory if it is empty. Also empty parent directories are 302 * removed recursively. 303 */ 304 static void 305 devfs_rmdir_empty(struct devfs_mount *dm, struct devfs_dirent *de) 306 { 307 struct devfs_dirent *dd, *de_dot, *de_dotdot; 308 309 sx_assert(&dm->dm_lock, SX_XLOCKED); 310 311 for (;;) { 312 KASSERT(de->de_dirent->d_type == DT_DIR, 313 ("devfs_rmdir_empty: de is not a directory")); 314 315 if ((de->de_flags & DE_DOOMED) != 0 || de == dm->dm_rootdir) 316 return; 317 318 de_dot = TAILQ_FIRST(&de->de_dlist); 319 KASSERT(de_dot != NULL, ("devfs_rmdir_empty: . missing")); 320 de_dotdot = TAILQ_NEXT(de_dot, de_list); 321 KASSERT(de_dotdot != NULL, ("devfs_rmdir_empty: .. missing")); 322 /* Return if the directory is not empty. */ 323 if (TAILQ_NEXT(de_dotdot, de_list) != NULL) 324 return; 325 326 dd = devfs_parent_dirent(de); 327 KASSERT(dd != NULL, ("devfs_rmdir_empty: NULL dd")); 328 TAILQ_REMOVE(&de->de_dlist, de_dot, de_list); 329 TAILQ_REMOVE(&de->de_dlist, de_dotdot, de_list); 330 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 331 DEVFS_DE_HOLD(dd); 332 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 333 devfs_delete(dm, de_dot, DEVFS_DEL_NORECURSE); 334 devfs_delete(dm, de_dotdot, DEVFS_DEL_NORECURSE); 335 if (DEVFS_DE_DROP(dd)) { 336 devfs_dirent_free(dd); 337 return; 338 } 339 340 de = dd; 341 } 342 } 343 344 /* 345 * The caller needs to hold the dm for the duration of the call since 346 * dm->dm_lock may be temporary dropped. 347 */ 348 void 349 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int flags) 350 { 351 struct devfs_dirent *dd; 352 struct vnode *vp; 353 354 KASSERT((de->de_flags & DE_DOOMED) == 0, 355 ("devfs_delete doomed dirent")); 356 de->de_flags |= DE_DOOMED; 357 358 if ((flags & DEVFS_DEL_NORECURSE) == 0) { 359 dd = devfs_parent_dirent(de); 360 if (dd != NULL) 361 DEVFS_DE_HOLD(dd); 362 if (de->de_flags & DE_USER) { 363 KASSERT(dd != NULL, ("devfs_delete: NULL dd")); 364 devfs_dir_unref_de(dm, dd); 365 } 366 } else 367 dd = NULL; 368 369 mtx_lock(&devfs_de_interlock); 370 vp = de->de_vnode; 371 if (vp != NULL) { 372 VI_LOCK(vp); 373 mtx_unlock(&devfs_de_interlock); 374 vholdl(vp); 375 sx_unlock(&dm->dm_lock); 376 if ((flags & DEVFS_DEL_VNLOCKED) == 0) 377 vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); 378 else 379 VI_UNLOCK(vp); 380 vgone(vp); 381 if ((flags & DEVFS_DEL_VNLOCKED) == 0) 382 VOP_UNLOCK(vp, 0); 383 vdrop(vp); 384 sx_xlock(&dm->dm_lock); 385 } else 386 mtx_unlock(&devfs_de_interlock); 387 if (de->de_symlink) { 388 free(de->de_symlink, M_DEVFS); 389 de->de_symlink = NULL; 390 } 391 #ifdef MAC 392 mac_devfs_destroy(de); 393 #endif 394 if (de->de_inode > DEVFS_ROOTINO) { 395 devfs_free_cdp_inode(de->de_inode); 396 de->de_inode = 0; 397 } 398 if (DEVFS_DE_DROP(de)) 399 devfs_dirent_free(de); 400 401 if (dd != NULL) { 402 if (DEVFS_DE_DROP(dd)) 403 devfs_dirent_free(dd); 404 else 405 devfs_rmdir_empty(dm, dd); 406 } 407 } 408 409 /* 410 * Called on unmount. 411 * Recursively removes the entire tree. 412 * The caller needs to hold the dm for the duration of the call. 413 */ 414 415 static void 416 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd) 417 { 418 struct devfs_dirent *de; 419 420 sx_assert(&dm->dm_lock, SX_XLOCKED); 421 422 DEVFS_DE_HOLD(dd); 423 for (;;) { 424 /* 425 * Use TAILQ_LAST() to remove "." and ".." last. 426 * We might need ".." to resolve a path in 427 * devfs_dir_unref_de(). 428 */ 429 de = TAILQ_LAST(&dd->de_dlist, devfs_dlist_head); 430 if (de == NULL) 431 break; 432 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 433 if (de->de_flags & DE_USER) 434 devfs_dir_unref_de(dm, dd); 435 if (de->de_flags & (DE_DOT | DE_DOTDOT)) 436 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 437 else if (de->de_dirent->d_type == DT_DIR) 438 devfs_purge(dm, de); 439 else 440 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 441 } 442 if (DEVFS_DE_DROP(dd)) 443 devfs_dirent_free(dd); 444 else if ((dd->de_flags & DE_DOOMED) == 0) 445 devfs_delete(dm, dd, DEVFS_DEL_NORECURSE); 446 } 447 448 /* 449 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed 450 * by the mount points dm_idx. 451 * This function extends the array when necessary, taking into account that 452 * the default array is 1 element and not malloc'ed. 453 */ 454 static void 455 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm) 456 { 457 struct devfs_dirent **dep; 458 int siz; 459 460 siz = (dm->dm_idx + 1) * sizeof *dep; 461 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO); 462 dev_lock(); 463 if (dm->dm_idx <= cdp->cdp_maxdirent) { 464 /* We got raced */ 465 dev_unlock(); 466 free(dep, M_DEVFS2); 467 return; 468 } 469 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep); 470 if (cdp->cdp_maxdirent > 0) 471 free(cdp->cdp_dirents, M_DEVFS2); 472 cdp->cdp_dirents = dep; 473 /* 474 * XXX: if malloc told us how much we actually got this could 475 * XXX: be optimized. 476 */ 477 cdp->cdp_maxdirent = dm->dm_idx; 478 dev_unlock(); 479 } 480 481 /* 482 * The caller needs to hold the dm for the duration of the call. 483 */ 484 static int 485 devfs_populate_loop(struct devfs_mount *dm, int cleanup) 486 { 487 struct cdev_priv *cdp; 488 struct devfs_dirent *de; 489 struct devfs_dirent *dd, *dt; 490 struct cdev *pdev; 491 int de_flags, depth, j; 492 char *q, *s; 493 494 sx_assert(&dm->dm_lock, SX_XLOCKED); 495 dev_lock(); 496 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 497 498 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents")); 499 500 /* 501 * If we are unmounting, or the device has been destroyed, 502 * clean up our dirent. 503 */ 504 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) && 505 dm->dm_idx <= cdp->cdp_maxdirent && 506 cdp->cdp_dirents[dm->dm_idx] != NULL) { 507 de = cdp->cdp_dirents[dm->dm_idx]; 508 cdp->cdp_dirents[dm->dm_idx] = NULL; 509 KASSERT(cdp == de->de_cdp, 510 ("%s %d %s %p %p", __func__, __LINE__, 511 cdp->cdp_c.si_name, cdp, de->de_cdp)); 512 KASSERT(de->de_dir != NULL, ("Null de->de_dir")); 513 dev_unlock(); 514 515 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list); 516 de->de_cdp = NULL; 517 de->de_inode = 0; 518 devfs_delete(dm, de, 0); 519 dev_lock(); 520 cdp->cdp_inuse--; 521 dev_unlock(); 522 return (1); 523 } 524 /* 525 * GC any lingering devices 526 */ 527 if (!(cdp->cdp_flags & CDP_ACTIVE)) { 528 if (cdp->cdp_inuse > 0) 529 continue; 530 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); 531 dev_unlock(); 532 dev_rel(&cdp->cdp_c); 533 return (1); 534 } 535 /* 536 * Don't create any new dirents if we are unmounting 537 */ 538 if (cleanup) 539 continue; 540 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!")); 541 542 if (dm->dm_idx <= cdp->cdp_maxdirent && 543 cdp->cdp_dirents[dm->dm_idx] != NULL) { 544 de = cdp->cdp_dirents[dm->dm_idx]; 545 KASSERT(cdp == de->de_cdp, ("inconsistent cdp")); 546 continue; 547 } 548 549 550 cdp->cdp_inuse++; 551 dev_unlock(); 552 553 if (dm->dm_idx > cdp->cdp_maxdirent) 554 devfs_metoo(cdp, dm); 555 556 dd = dm->dm_rootdir; 557 s = cdp->cdp_c.si_name; 558 for (;;) { 559 for (q = s; *q != '/' && *q != '\0'; q++) 560 continue; 561 if (*q != '/') 562 break; 563 de = devfs_find(dd, s, q - s, 0); 564 if (de == NULL) 565 de = devfs_vmkdir(dm, s, q - s, dd, 0); 566 else if (de->de_dirent->d_type == DT_LNK) { 567 de = devfs_find(dd, s, q - s, DT_DIR); 568 if (de == NULL) 569 de = devfs_vmkdir(dm, s, q - s, dd, 0); 570 de->de_flags |= DE_COVERED; 571 } 572 s = q + 1; 573 dd = de; 574 KASSERT(dd->de_dirent->d_type == DT_DIR && 575 (dd->de_flags & (DE_DOT | DE_DOTDOT)) == 0, 576 ("%s: invalid directory (si_name=%s)", 577 __func__, cdp->cdp_c.si_name)); 578 579 } 580 de_flags = 0; 581 de = devfs_find(dd, s, q - s, DT_LNK); 582 if (de != NULL) 583 de_flags |= DE_COVERED; 584 585 de = devfs_newdirent(s, q - s); 586 if (cdp->cdp_c.si_flags & SI_ALIAS) { 587 de->de_uid = 0; 588 de->de_gid = 0; 589 de->de_mode = 0755; 590 de->de_dirent->d_type = DT_LNK; 591 pdev = cdp->cdp_c.si_parent; 592 dt = dd; 593 depth = 0; 594 while (dt != dm->dm_rootdir && 595 (dt = devfs_parent_dirent(dt)) != NULL) 596 depth++; 597 j = depth * 3 + strlen(pdev->si_name) + 1; 598 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK); 599 de->de_symlink[0] = 0; 600 while (depth-- > 0) 601 strcat(de->de_symlink, "../"); 602 strcat(de->de_symlink, pdev->si_name); 603 } else { 604 de->de_uid = cdp->cdp_c.si_uid; 605 de->de_gid = cdp->cdp_c.si_gid; 606 de->de_mode = cdp->cdp_c.si_mode; 607 de->de_dirent->d_type = DT_CHR; 608 } 609 de->de_flags |= de_flags; 610 de->de_inode = cdp->cdp_inode; 611 de->de_cdp = cdp; 612 #ifdef MAC 613 mac_devfs_create_device(cdp->cdp_c.si_cred, dm->dm_mount, 614 &cdp->cdp_c, de); 615 #endif 616 de->de_dir = dd; 617 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 618 devfs_rules_apply(dm, de); 619 dev_lock(); 620 /* XXX: could check that cdp is still active here */ 621 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL, 622 ("%s %d\n", __func__, __LINE__)); 623 cdp->cdp_dirents[dm->dm_idx] = de; 624 KASSERT(de->de_cdp != (void *)0xdeadc0de, 625 ("%s %d\n", __func__, __LINE__)); 626 dev_unlock(); 627 return (1); 628 } 629 dev_unlock(); 630 return (0); 631 } 632 633 /* 634 * The caller needs to hold the dm for the duration of the call. 635 */ 636 void 637 devfs_populate(struct devfs_mount *dm) 638 { 639 unsigned gen; 640 641 sx_assert(&dm->dm_lock, SX_XLOCKED); 642 gen = devfs_generation; 643 if (dm->dm_generation == gen) 644 return; 645 while (devfs_populate_loop(dm, 0)) 646 continue; 647 dm->dm_generation = gen; 648 } 649 650 /* 651 * The caller needs to hold the dm for the duration of the call. 652 */ 653 void 654 devfs_cleanup(struct devfs_mount *dm) 655 { 656 657 sx_assert(&dm->dm_lock, SX_XLOCKED); 658 while (devfs_populate_loop(dm, 1)) 659 continue; 660 devfs_purge(dm, dm->dm_rootdir); 661 } 662 663 /* 664 * devfs_create() and devfs_destroy() are called from kern_conf.c and 665 * in both cases the devlock() mutex is held, so no further locking 666 * is necessary and no sleeping allowed. 667 */ 668 669 void 670 devfs_create(struct cdev *dev) 671 { 672 struct cdev_priv *cdp; 673 674 mtx_assert(&devmtx, MA_OWNED); 675 cdp = cdev2priv(dev); 676 cdp->cdp_flags |= CDP_ACTIVE; 677 cdp->cdp_inode = alloc_unrl(devfs_inos); 678 dev_refl(dev); 679 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list); 680 devfs_generation++; 681 } 682 683 void 684 devfs_destroy(struct cdev *dev) 685 { 686 struct cdev_priv *cdp; 687 688 mtx_assert(&devmtx, MA_OWNED); 689 cdp = cdev2priv(dev); 690 cdp->cdp_flags &= ~CDP_ACTIVE; 691 devfs_generation++; 692 } 693 694 ino_t 695 devfs_alloc_cdp_inode(void) 696 { 697 698 return (alloc_unr(devfs_inos)); 699 } 700 701 void 702 devfs_free_cdp_inode(ino_t ino) 703 { 704 705 if (ino > 0) 706 free_unr(devfs_inos, ino); 707 } 708 709 static void 710 devfs_devs_init(void *junk __unused) 711 { 712 713 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx); 714 } 715 716 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL); 717