1 /*- 2 * Copyright (c) 2000,2004 3 * Poul-Henning Kamp. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Neither the name of the University nor the names of its contributors 11 * may be used to endorse or promote products derived from this software 12 * without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/conf.h> 34 #include <sys/dirent.h> 35 #include <sys/kernel.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/sx.h> 41 #include <sys/sysctl.h> 42 #include <sys/vnode.h> 43 44 #include <sys/kdb.h> 45 46 #include <fs/devfs/devfs.h> 47 #include <fs/devfs/devfs_int.h> 48 49 #include <security/mac/mac_framework.h> 50 51 /* 52 * The one true (but secret) list of active devices in the system. 53 * Locked by dev_lock()/devmtx 54 */ 55 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list); 56 57 struct unrhdr *devfs_inos; 58 59 60 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2"); 61 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3"); 62 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage"); 63 64 SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "DEVFS filesystem"); 65 66 static unsigned devfs_generation; 67 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD, 68 &devfs_generation, 0, "DEVFS generation number"); 69 70 unsigned devfs_rule_depth = 1; 71 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW, 72 &devfs_rule_depth, 0, "Max depth of ruleset include"); 73 74 /* 75 * Helper sysctl for devname(3). We're given a dev_t and return the 76 * name, if any, registered by the device driver. 77 */ 78 static int 79 sysctl_devname(SYSCTL_HANDLER_ARGS) 80 { 81 int error; 82 dev_t ud; 83 struct cdev_priv *cdp; 84 struct cdev *dev; 85 86 error = SYSCTL_IN(req, &ud, sizeof (ud)); 87 if (error) 88 return (error); 89 if (ud == NODEV) 90 return (EINVAL); 91 dev = NULL; 92 dev_lock(); 93 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) 94 if (cdp->cdp_inode == ud) { 95 dev = &cdp->cdp_c; 96 dev_refl(dev); 97 break; 98 } 99 dev_unlock(); 100 if (dev == NULL) 101 return (ENOENT); 102 error = SYSCTL_OUT(req, dev->si_name, strlen(dev->si_name) + 1); 103 dev_rel(dev); 104 return (error); 105 } 106 107 SYSCTL_PROC(_kern, OID_AUTO, devname, 108 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE, 109 NULL, 0, sysctl_devname, "", "devname(3) handler"); 110 111 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD, 112 SYSCTL_NULL_INT_PTR, sizeof(struct cdev), "sizeof(struct cdev)"); 113 114 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD, 115 SYSCTL_NULL_INT_PTR, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)"); 116 117 struct cdev * 118 devfs_alloc(int flags) 119 { 120 struct cdev_priv *cdp; 121 struct cdev *cdev; 122 struct timespec ts; 123 124 cdp = malloc(sizeof *cdp, M_CDEVP, M_ZERO | 125 ((flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK)); 126 if (cdp == NULL) 127 return (NULL); 128 129 cdp->cdp_dirents = &cdp->cdp_dirent0; 130 131 cdev = &cdp->cdp_c; 132 LIST_INIT(&cdev->si_children); 133 vfs_timestamp(&ts); 134 cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts; 135 136 return (cdev); 137 } 138 139 int 140 devfs_dev_exists(const char *name) 141 { 142 struct cdev_priv *cdp; 143 144 mtx_assert(&devmtx, MA_OWNED); 145 146 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 147 if ((cdp->cdp_flags & CDP_ACTIVE) == 0) 148 continue; 149 if (devfs_pathpath(cdp->cdp_c.si_name, name) != 0) 150 return (1); 151 if (devfs_pathpath(name, cdp->cdp_c.si_name) != 0) 152 return (1); 153 } 154 if (devfs_dir_find(name) != 0) 155 return (1); 156 157 return (0); 158 } 159 160 void 161 devfs_free(struct cdev *cdev) 162 { 163 struct cdev_priv *cdp; 164 165 cdp = cdev2priv(cdev); 166 if (cdev->si_cred != NULL) 167 crfree(cdev->si_cred); 168 devfs_free_cdp_inode(cdp->cdp_inode); 169 if (cdp->cdp_maxdirent > 0) 170 free(cdp->cdp_dirents, M_DEVFS2); 171 free(cdp, M_CDEVP); 172 } 173 174 struct devfs_dirent * 175 devfs_find(struct devfs_dirent *dd, const char *name, int namelen, int type) 176 { 177 struct devfs_dirent *de; 178 179 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 180 if (namelen != de->de_dirent->d_namlen) 181 continue; 182 if (type != 0 && type != de->de_dirent->d_type) 183 continue; 184 185 /* 186 * The race with finding non-active name is not 187 * completely closed by the check, but it is similar 188 * to the devfs_allocv() in making it unlikely enough. 189 */ 190 if (de->de_dirent->d_type == DT_CHR && 191 (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0) 192 continue; 193 194 if (bcmp(name, de->de_dirent->d_name, namelen) != 0) 195 continue; 196 break; 197 } 198 KASSERT(de == NULL || (de->de_flags & DE_DOOMED) == 0, 199 ("devfs_find: returning a doomed entry")); 200 return (de); 201 } 202 203 struct devfs_dirent * 204 devfs_newdirent(char *name, int namelen) 205 { 206 int i; 207 struct devfs_dirent *de; 208 struct dirent d; 209 210 d.d_namlen = namelen; 211 i = sizeof(*de) + GENERIC_DIRSIZ(&d); 212 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO); 213 de->de_dirent = (struct dirent *)(de + 1); 214 de->de_dirent->d_namlen = namelen; 215 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d); 216 bcopy(name, de->de_dirent->d_name, namelen); 217 de->de_dirent->d_name[namelen] = '\0'; 218 vfs_timestamp(&de->de_ctime); 219 de->de_mtime = de->de_atime = de->de_ctime; 220 de->de_links = 1; 221 de->de_holdcnt = 1; 222 #ifdef MAC 223 mac_devfs_init(de); 224 #endif 225 return (de); 226 } 227 228 struct devfs_dirent * 229 devfs_parent_dirent(struct devfs_dirent *de) 230 { 231 232 if (de->de_dirent->d_type != DT_DIR) 233 return (de->de_dir); 234 235 if (de->de_flags & (DE_DOT | DE_DOTDOT)) 236 return (NULL); 237 238 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 239 if (de == NULL) 240 return (NULL); 241 de = TAILQ_NEXT(de, de_list); /* ".." */ 242 if (de == NULL) 243 return (NULL); 244 245 return (de->de_dir); 246 } 247 248 struct devfs_dirent * 249 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen, 250 struct devfs_dirent *dotdot, u_int inode) 251 { 252 struct devfs_dirent *dd; 253 struct devfs_dirent *de; 254 255 /* Create the new directory */ 256 dd = devfs_newdirent(name, namelen); 257 TAILQ_INIT(&dd->de_dlist); 258 dd->de_dirent->d_type = DT_DIR; 259 dd->de_mode = 0555; 260 dd->de_links = 2; 261 dd->de_dir = dd; 262 if (inode != 0) 263 dd->de_inode = inode; 264 else 265 dd->de_inode = alloc_unr(devfs_inos); 266 267 /* 268 * "." and ".." are always the two first entries in the 269 * de_dlist list. 270 * 271 * Create the "." entry in the new directory. 272 */ 273 de = devfs_newdirent(".", 1); 274 de->de_dirent->d_type = DT_DIR; 275 de->de_flags |= DE_DOT; 276 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 277 de->de_dir = dd; 278 279 /* Create the ".." entry in the new directory. */ 280 de = devfs_newdirent("..", 2); 281 de->de_dirent->d_type = DT_DIR; 282 de->de_flags |= DE_DOTDOT; 283 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 284 if (dotdot == NULL) { 285 de->de_dir = dd; 286 } else { 287 de->de_dir = dotdot; 288 sx_assert(&dmp->dm_lock, SX_XLOCKED); 289 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list); 290 dotdot->de_links++; 291 devfs_rules_apply(dmp, dd); 292 } 293 294 #ifdef MAC 295 mac_devfs_create_directory(dmp->dm_mount, name, namelen, dd); 296 #endif 297 return (dd); 298 } 299 300 void 301 devfs_dirent_free(struct devfs_dirent *de) 302 { 303 struct vnode *vp; 304 305 vp = de->de_vnode; 306 mtx_lock(&devfs_de_interlock); 307 if (vp != NULL && vp->v_data == de) 308 vp->v_data = NULL; 309 mtx_unlock(&devfs_de_interlock); 310 free(de, M_DEVFS3); 311 } 312 313 /* 314 * Removes a directory if it is empty. Also empty parent directories are 315 * removed recursively. 316 */ 317 static void 318 devfs_rmdir_empty(struct devfs_mount *dm, struct devfs_dirent *de) 319 { 320 struct devfs_dirent *dd, *de_dot, *de_dotdot; 321 322 sx_assert(&dm->dm_lock, SX_XLOCKED); 323 324 for (;;) { 325 KASSERT(de->de_dirent->d_type == DT_DIR, 326 ("devfs_rmdir_empty: de is not a directory")); 327 328 if ((de->de_flags & DE_DOOMED) != 0 || de == dm->dm_rootdir) 329 return; 330 331 de_dot = TAILQ_FIRST(&de->de_dlist); 332 KASSERT(de_dot != NULL, ("devfs_rmdir_empty: . missing")); 333 de_dotdot = TAILQ_NEXT(de_dot, de_list); 334 KASSERT(de_dotdot != NULL, ("devfs_rmdir_empty: .. missing")); 335 /* Return if the directory is not empty. */ 336 if (TAILQ_NEXT(de_dotdot, de_list) != NULL) 337 return; 338 339 dd = devfs_parent_dirent(de); 340 KASSERT(dd != NULL, ("devfs_rmdir_empty: NULL dd")); 341 TAILQ_REMOVE(&de->de_dlist, de_dot, de_list); 342 TAILQ_REMOVE(&de->de_dlist, de_dotdot, de_list); 343 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 344 DEVFS_DE_HOLD(dd); 345 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 346 devfs_delete(dm, de_dot, DEVFS_DEL_NORECURSE); 347 devfs_delete(dm, de_dotdot, DEVFS_DEL_NORECURSE); 348 if (DEVFS_DE_DROP(dd)) { 349 devfs_dirent_free(dd); 350 return; 351 } 352 353 de = dd; 354 } 355 } 356 357 /* 358 * The caller needs to hold the dm for the duration of the call since 359 * dm->dm_lock may be temporary dropped. 360 */ 361 void 362 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int flags) 363 { 364 struct devfs_dirent *dd; 365 struct vnode *vp; 366 367 KASSERT((de->de_flags & DE_DOOMED) == 0, 368 ("devfs_delete doomed dirent")); 369 de->de_flags |= DE_DOOMED; 370 371 if ((flags & DEVFS_DEL_NORECURSE) == 0) { 372 dd = devfs_parent_dirent(de); 373 if (dd != NULL) 374 DEVFS_DE_HOLD(dd); 375 if (de->de_flags & DE_USER) { 376 KASSERT(dd != NULL, ("devfs_delete: NULL dd")); 377 devfs_dir_unref_de(dm, dd); 378 } 379 } else 380 dd = NULL; 381 382 mtx_lock(&devfs_de_interlock); 383 vp = de->de_vnode; 384 if (vp != NULL) { 385 VI_LOCK(vp); 386 mtx_unlock(&devfs_de_interlock); 387 vholdl(vp); 388 sx_unlock(&dm->dm_lock); 389 if ((flags & DEVFS_DEL_VNLOCKED) == 0) 390 vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); 391 else 392 VI_UNLOCK(vp); 393 vgone(vp); 394 if ((flags & DEVFS_DEL_VNLOCKED) == 0) 395 VOP_UNLOCK(vp, 0); 396 vdrop(vp); 397 sx_xlock(&dm->dm_lock); 398 } else 399 mtx_unlock(&devfs_de_interlock); 400 if (de->de_symlink) { 401 free(de->de_symlink, M_DEVFS); 402 de->de_symlink = NULL; 403 } 404 #ifdef MAC 405 mac_devfs_destroy(de); 406 #endif 407 if (de->de_inode > DEVFS_ROOTINO) { 408 devfs_free_cdp_inode(de->de_inode); 409 de->de_inode = 0; 410 } 411 if (DEVFS_DE_DROP(de)) 412 devfs_dirent_free(de); 413 414 if (dd != NULL) { 415 if (DEVFS_DE_DROP(dd)) 416 devfs_dirent_free(dd); 417 else 418 devfs_rmdir_empty(dm, dd); 419 } 420 } 421 422 /* 423 * Called on unmount. 424 * Recursively removes the entire tree. 425 * The caller needs to hold the dm for the duration of the call. 426 */ 427 428 static void 429 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd) 430 { 431 struct devfs_dirent *de; 432 433 sx_assert(&dm->dm_lock, SX_XLOCKED); 434 435 DEVFS_DE_HOLD(dd); 436 for (;;) { 437 /* 438 * Use TAILQ_LAST() to remove "." and ".." last. 439 * We might need ".." to resolve a path in 440 * devfs_dir_unref_de(). 441 */ 442 de = TAILQ_LAST(&dd->de_dlist, devfs_dlist_head); 443 if (de == NULL) 444 break; 445 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 446 if (de->de_flags & DE_USER) 447 devfs_dir_unref_de(dm, dd); 448 if (de->de_flags & (DE_DOT | DE_DOTDOT)) 449 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 450 else if (de->de_dirent->d_type == DT_DIR) 451 devfs_purge(dm, de); 452 else 453 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 454 } 455 if (DEVFS_DE_DROP(dd)) 456 devfs_dirent_free(dd); 457 else if ((dd->de_flags & DE_DOOMED) == 0) 458 devfs_delete(dm, dd, DEVFS_DEL_NORECURSE); 459 } 460 461 /* 462 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed 463 * by the mount points dm_idx. 464 * This function extends the array when necessary, taking into account that 465 * the default array is 1 element and not malloc'ed. 466 */ 467 static void 468 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm) 469 { 470 struct devfs_dirent **dep; 471 int siz; 472 473 siz = (dm->dm_idx + 1) * sizeof *dep; 474 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO); 475 dev_lock(); 476 if (dm->dm_idx <= cdp->cdp_maxdirent) { 477 /* We got raced */ 478 dev_unlock(); 479 free(dep, M_DEVFS2); 480 return; 481 } 482 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep); 483 if (cdp->cdp_maxdirent > 0) 484 free(cdp->cdp_dirents, M_DEVFS2); 485 cdp->cdp_dirents = dep; 486 /* 487 * XXX: if malloc told us how much we actually got this could 488 * XXX: be optimized. 489 */ 490 cdp->cdp_maxdirent = dm->dm_idx; 491 dev_unlock(); 492 } 493 494 /* 495 * The caller needs to hold the dm for the duration of the call. 496 */ 497 static int 498 devfs_populate_loop(struct devfs_mount *dm, int cleanup) 499 { 500 struct cdev_priv *cdp; 501 struct devfs_dirent *de; 502 struct devfs_dirent *dd, *dt; 503 struct cdev *pdev; 504 int de_flags, depth, j; 505 char *q, *s; 506 507 sx_assert(&dm->dm_lock, SX_XLOCKED); 508 dev_lock(); 509 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 510 511 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents")); 512 513 /* 514 * If we are unmounting, or the device has been destroyed, 515 * clean up our dirent. 516 */ 517 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) && 518 dm->dm_idx <= cdp->cdp_maxdirent && 519 cdp->cdp_dirents[dm->dm_idx] != NULL) { 520 de = cdp->cdp_dirents[dm->dm_idx]; 521 cdp->cdp_dirents[dm->dm_idx] = NULL; 522 KASSERT(cdp == de->de_cdp, 523 ("%s %d %s %p %p", __func__, __LINE__, 524 cdp->cdp_c.si_name, cdp, de->de_cdp)); 525 KASSERT(de->de_dir != NULL, ("Null de->de_dir")); 526 dev_unlock(); 527 528 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list); 529 de->de_cdp = NULL; 530 de->de_inode = 0; 531 devfs_delete(dm, de, 0); 532 dev_lock(); 533 cdp->cdp_inuse--; 534 dev_unlock(); 535 return (1); 536 } 537 /* 538 * GC any lingering devices 539 */ 540 if (!(cdp->cdp_flags & CDP_ACTIVE)) { 541 if (cdp->cdp_inuse > 0) 542 continue; 543 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); 544 dev_unlock(); 545 dev_rel(&cdp->cdp_c); 546 return (1); 547 } 548 /* 549 * Don't create any new dirents if we are unmounting 550 */ 551 if (cleanup) 552 continue; 553 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!")); 554 555 if (dm->dm_idx <= cdp->cdp_maxdirent && 556 cdp->cdp_dirents[dm->dm_idx] != NULL) { 557 de = cdp->cdp_dirents[dm->dm_idx]; 558 KASSERT(cdp == de->de_cdp, ("inconsistent cdp")); 559 continue; 560 } 561 562 563 cdp->cdp_inuse++; 564 dev_unlock(); 565 566 if (dm->dm_idx > cdp->cdp_maxdirent) 567 devfs_metoo(cdp, dm); 568 569 dd = dm->dm_rootdir; 570 s = cdp->cdp_c.si_name; 571 for (;;) { 572 for (q = s; *q != '/' && *q != '\0'; q++) 573 continue; 574 if (*q != '/') 575 break; 576 de = devfs_find(dd, s, q - s, 0); 577 if (de == NULL) 578 de = devfs_vmkdir(dm, s, q - s, dd, 0); 579 else if (de->de_dirent->d_type == DT_LNK) { 580 de = devfs_find(dd, s, q - s, DT_DIR); 581 if (de == NULL) 582 de = devfs_vmkdir(dm, s, q - s, dd, 0); 583 de->de_flags |= DE_COVERED; 584 } 585 s = q + 1; 586 dd = de; 587 KASSERT(dd->de_dirent->d_type == DT_DIR && 588 (dd->de_flags & (DE_DOT | DE_DOTDOT)) == 0, 589 ("%s: invalid directory (si_name=%s)", 590 __func__, cdp->cdp_c.si_name)); 591 592 } 593 de_flags = 0; 594 de = devfs_find(dd, s, q - s, DT_LNK); 595 if (de != NULL) 596 de_flags |= DE_COVERED; 597 598 de = devfs_newdirent(s, q - s); 599 if (cdp->cdp_c.si_flags & SI_ALIAS) { 600 de->de_uid = 0; 601 de->de_gid = 0; 602 de->de_mode = 0755; 603 de->de_dirent->d_type = DT_LNK; 604 pdev = cdp->cdp_c.si_parent; 605 dt = dd; 606 depth = 0; 607 while (dt != dm->dm_rootdir && 608 (dt = devfs_parent_dirent(dt)) != NULL) 609 depth++; 610 j = depth * 3 + strlen(pdev->si_name) + 1; 611 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK); 612 de->de_symlink[0] = 0; 613 while (depth-- > 0) 614 strcat(de->de_symlink, "../"); 615 strcat(de->de_symlink, pdev->si_name); 616 } else { 617 de->de_uid = cdp->cdp_c.si_uid; 618 de->de_gid = cdp->cdp_c.si_gid; 619 de->de_mode = cdp->cdp_c.si_mode; 620 de->de_dirent->d_type = DT_CHR; 621 } 622 de->de_flags |= de_flags; 623 de->de_inode = cdp->cdp_inode; 624 de->de_cdp = cdp; 625 #ifdef MAC 626 mac_devfs_create_device(cdp->cdp_c.si_cred, dm->dm_mount, 627 &cdp->cdp_c, de); 628 #endif 629 de->de_dir = dd; 630 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 631 devfs_rules_apply(dm, de); 632 dev_lock(); 633 /* XXX: could check that cdp is still active here */ 634 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL, 635 ("%s %d\n", __func__, __LINE__)); 636 cdp->cdp_dirents[dm->dm_idx] = de; 637 KASSERT(de->de_cdp != (void *)0xdeadc0de, 638 ("%s %d\n", __func__, __LINE__)); 639 dev_unlock(); 640 return (1); 641 } 642 dev_unlock(); 643 return (0); 644 } 645 646 /* 647 * The caller needs to hold the dm for the duration of the call. 648 */ 649 void 650 devfs_populate(struct devfs_mount *dm) 651 { 652 unsigned gen; 653 654 sx_assert(&dm->dm_lock, SX_XLOCKED); 655 gen = devfs_generation; 656 if (dm->dm_generation == gen) 657 return; 658 while (devfs_populate_loop(dm, 0)) 659 continue; 660 dm->dm_generation = gen; 661 } 662 663 /* 664 * The caller needs to hold the dm for the duration of the call. 665 */ 666 void 667 devfs_cleanup(struct devfs_mount *dm) 668 { 669 670 sx_assert(&dm->dm_lock, SX_XLOCKED); 671 while (devfs_populate_loop(dm, 1)) 672 continue; 673 devfs_purge(dm, dm->dm_rootdir); 674 } 675 676 /* 677 * devfs_create() and devfs_destroy() are called from kern_conf.c and 678 * in both cases the devlock() mutex is held, so no further locking 679 * is necessary and no sleeping allowed. 680 */ 681 682 void 683 devfs_create(struct cdev *dev) 684 { 685 struct cdev_priv *cdp; 686 687 mtx_assert(&devmtx, MA_OWNED); 688 cdp = cdev2priv(dev); 689 cdp->cdp_flags |= CDP_ACTIVE; 690 cdp->cdp_inode = alloc_unrl(devfs_inos); 691 dev_refl(dev); 692 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list); 693 devfs_generation++; 694 } 695 696 void 697 devfs_destroy(struct cdev *dev) 698 { 699 struct cdev_priv *cdp; 700 701 mtx_assert(&devmtx, MA_OWNED); 702 cdp = cdev2priv(dev); 703 cdp->cdp_flags &= ~CDP_ACTIVE; 704 devfs_generation++; 705 } 706 707 ino_t 708 devfs_alloc_cdp_inode(void) 709 { 710 711 return (alloc_unr(devfs_inos)); 712 } 713 714 void 715 devfs_free_cdp_inode(ino_t ino) 716 { 717 718 if (ino > 0) 719 free_unr(devfs_inos, ino); 720 } 721 722 static void 723 devfs_devs_init(void *junk __unused) 724 { 725 726 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx); 727 } 728 729 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL); 730