1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2000,2004 5 * Poul-Henning Kamp. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Neither the name of the University nor the names of its contributors 13 * may be used to endorse or promote products derived from this software 14 * without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36 29 * 30 * $FreeBSD$ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/conf.h> 36 #include <sys/dirent.h> 37 #include <sys/kernel.h> 38 #include <sys/limits.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/proc.h> 42 #include <sys/sx.h> 43 #include <sys/sysctl.h> 44 #include <sys/vnode.h> 45 46 #include <sys/kdb.h> 47 48 #include <fs/devfs/devfs.h> 49 #include <fs/devfs/devfs_int.h> 50 51 #include <security/mac/mac_framework.h> 52 53 /* 54 * The one true (but secret) list of active devices in the system. 55 * Locked by dev_lock()/devmtx 56 */ 57 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list); 58 59 struct unrhdr *devfs_inos; 60 61 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2"); 62 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3"); 63 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage"); 64 65 SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 66 "DEVFS filesystem"); 67 68 static unsigned devfs_generation; 69 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD, 70 &devfs_generation, 0, "DEVFS generation number"); 71 72 unsigned devfs_rule_depth = 1; 73 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW, 74 &devfs_rule_depth, 0, "Max depth of ruleset include"); 75 76 /* 77 * Helper sysctl for devname(3). We're given a dev_t and return the 78 * name, if any, registered by the device driver. 79 */ 80 static int 81 sysctl_devname(SYSCTL_HANDLER_ARGS) 82 { 83 int error; 84 dev_t ud; 85 #ifdef COMPAT_FREEBSD11 86 uint32_t ud_compat; 87 #endif 88 struct cdev_priv *cdp; 89 struct cdev *dev; 90 91 #ifdef COMPAT_FREEBSD11 92 if (req->newlen == sizeof(ud_compat)) { 93 error = SYSCTL_IN(req, &ud_compat, sizeof(ud_compat)); 94 if (error == 0) 95 ud = ud_compat == (uint32_t)NODEV ? NODEV : ud_compat; 96 } else 97 #endif 98 error = SYSCTL_IN(req, &ud, sizeof (ud)); 99 if (error) 100 return (error); 101 if (ud == NODEV) 102 return (EINVAL); 103 dev = NULL; 104 dev_lock(); 105 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) 106 if (cdp->cdp_inode == ud) { 107 dev = &cdp->cdp_c; 108 dev_refl(dev); 109 break; 110 } 111 dev_unlock(); 112 if (dev == NULL) 113 return (ENOENT); 114 error = SYSCTL_OUT(req, dev->si_name, strlen(dev->si_name) + 1); 115 dev_rel(dev); 116 return (error); 117 } 118 119 SYSCTL_PROC(_kern, OID_AUTO, devname, 120 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE, 121 NULL, 0, sysctl_devname, "", "devname(3) handler"); 122 123 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD, 124 SYSCTL_NULL_INT_PTR, sizeof(struct cdev), "sizeof(struct cdev)"); 125 126 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD, 127 SYSCTL_NULL_INT_PTR, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)"); 128 129 struct cdev * 130 devfs_alloc(int flags) 131 { 132 struct cdev_priv *cdp; 133 struct cdev *cdev; 134 struct timespec ts; 135 136 cdp = malloc(sizeof *cdp, M_CDEVP, M_ZERO | 137 ((flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK)); 138 if (cdp == NULL) 139 return (NULL); 140 141 mtx_init(&cdp->cdp_threadlock, "devthrd", NULL, MTX_DEF); 142 143 cdp->cdp_dirents = &cdp->cdp_dirent0; 144 145 cdev = &cdp->cdp_c; 146 LIST_INIT(&cdev->si_children); 147 vfs_timestamp(&ts); 148 cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts; 149 150 return (cdev); 151 } 152 153 int 154 devfs_dev_exists(const char *name) 155 { 156 struct cdev_priv *cdp; 157 158 dev_lock_assert_locked(); 159 160 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 161 if ((cdp->cdp_flags & CDP_ACTIVE) == 0) 162 continue; 163 if (devfs_pathpath(cdp->cdp_c.si_name, name) != 0) 164 return (1); 165 if (devfs_pathpath(name, cdp->cdp_c.si_name) != 0) 166 return (1); 167 } 168 if (devfs_dir_find(name) != 0) 169 return (1); 170 171 return (0); 172 } 173 174 void 175 devfs_free(struct cdev *cdev) 176 { 177 struct cdev_priv *cdp; 178 179 cdp = cdev2priv(cdev); 180 if (cdev->si_cred != NULL) 181 crfree(cdev->si_cred); 182 devfs_free_cdp_inode(cdp->cdp_inode); 183 if (cdp->cdp_maxdirent > 0) 184 free(cdp->cdp_dirents, M_DEVFS2); 185 mtx_destroy(&cdp->cdp_threadlock); 186 free(cdp, M_CDEVP); 187 } 188 189 struct devfs_dirent * 190 devfs_find(struct devfs_dirent *dd, const char *name, int namelen, int type) 191 { 192 struct devfs_dirent *de; 193 194 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 195 if (namelen != de->de_dirent->d_namlen) 196 continue; 197 if (type != 0 && type != de->de_dirent->d_type) 198 continue; 199 200 /* 201 * The race with finding non-active name is not 202 * completely closed by the check, but it is similar 203 * to the devfs_allocv() in making it unlikely enough. 204 */ 205 if (de->de_dirent->d_type == DT_CHR && 206 (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0) 207 continue; 208 209 if (bcmp(name, de->de_dirent->d_name, namelen) != 0) 210 continue; 211 break; 212 } 213 KASSERT(de == NULL || (de->de_flags & DE_DOOMED) == 0, 214 ("devfs_find: returning a doomed entry")); 215 return (de); 216 } 217 218 struct devfs_dirent * 219 devfs_newdirent(char *name, int namelen) 220 { 221 int i; 222 struct devfs_dirent *de; 223 struct dirent d; 224 225 d.d_namlen = namelen; 226 i = sizeof(*de) + GENERIC_DIRSIZ(&d); 227 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO); 228 de->de_dirent = (struct dirent *)(de + 1); 229 de->de_dirent->d_namlen = namelen; 230 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d); 231 bcopy(name, de->de_dirent->d_name, namelen); 232 dirent_terminate(de->de_dirent); 233 vfs_timestamp(&de->de_ctime); 234 de->de_mtime = de->de_atime = de->de_ctime; 235 de->de_links = 1; 236 de->de_holdcnt = 1; 237 #ifdef MAC 238 mac_devfs_init(de); 239 #endif 240 return (de); 241 } 242 243 struct devfs_dirent * 244 devfs_parent_dirent(struct devfs_dirent *de) 245 { 246 247 if (de->de_dirent->d_type != DT_DIR) 248 return (de->de_dir); 249 250 if (de->de_flags & (DE_DOT | DE_DOTDOT)) 251 return (NULL); 252 253 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 254 if (de == NULL) 255 return (NULL); 256 de = TAILQ_NEXT(de, de_list); /* ".." */ 257 if (de == NULL) 258 return (NULL); 259 260 return (de->de_dir); 261 } 262 263 struct devfs_dirent * 264 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen, 265 struct devfs_dirent *dotdot, u_int inode) 266 { 267 struct devfs_dirent *dd; 268 struct devfs_dirent *de; 269 270 /* Create the new directory */ 271 dd = devfs_newdirent(name, namelen); 272 TAILQ_INIT(&dd->de_dlist); 273 dd->de_dirent->d_type = DT_DIR; 274 dd->de_mode = 0555; 275 dd->de_links = 2; 276 dd->de_dir = dd; 277 if (inode != 0) 278 dd->de_inode = inode; 279 else 280 dd->de_inode = alloc_unr(devfs_inos); 281 282 /* 283 * "." and ".." are always the two first entries in the 284 * de_dlist list. 285 * 286 * Create the "." entry in the new directory. 287 */ 288 de = devfs_newdirent(".", 1); 289 de->de_dirent->d_type = DT_DIR; 290 de->de_flags |= DE_DOT; 291 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 292 de->de_dir = dd; 293 294 /* Create the ".." entry in the new directory. */ 295 de = devfs_newdirent("..", 2); 296 de->de_dirent->d_type = DT_DIR; 297 de->de_flags |= DE_DOTDOT; 298 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 299 if (dotdot == NULL) { 300 de->de_dir = dd; 301 } else { 302 de->de_dir = dotdot; 303 sx_assert(&dmp->dm_lock, SX_XLOCKED); 304 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list); 305 dotdot->de_links++; 306 devfs_rules_apply(dmp, dd); 307 } 308 309 #ifdef MAC 310 mac_devfs_create_directory(dmp->dm_mount, name, namelen, dd); 311 #endif 312 return (dd); 313 } 314 315 void 316 devfs_dirent_free(struct devfs_dirent *de) 317 { 318 struct vnode *vp; 319 320 vp = de->de_vnode; 321 mtx_lock(&devfs_de_interlock); 322 if (vp != NULL && vp->v_data == de) 323 vp->v_data = NULL; 324 mtx_unlock(&devfs_de_interlock); 325 free(de, M_DEVFS3); 326 } 327 328 /* 329 * Removes a directory if it is empty. Also empty parent directories are 330 * removed recursively. 331 */ 332 static void 333 devfs_rmdir_empty(struct devfs_mount *dm, struct devfs_dirent *de) 334 { 335 struct devfs_dirent *dd, *de_dot, *de_dotdot; 336 337 sx_assert(&dm->dm_lock, SX_XLOCKED); 338 339 for (;;) { 340 KASSERT(de->de_dirent->d_type == DT_DIR, 341 ("devfs_rmdir_empty: de is not a directory")); 342 343 if ((de->de_flags & DE_DOOMED) != 0 || de == dm->dm_rootdir) 344 return; 345 346 de_dot = TAILQ_FIRST(&de->de_dlist); 347 KASSERT(de_dot != NULL, ("devfs_rmdir_empty: . missing")); 348 de_dotdot = TAILQ_NEXT(de_dot, de_list); 349 KASSERT(de_dotdot != NULL, ("devfs_rmdir_empty: .. missing")); 350 /* Return if the directory is not empty. */ 351 if (TAILQ_NEXT(de_dotdot, de_list) != NULL) 352 return; 353 354 dd = devfs_parent_dirent(de); 355 KASSERT(dd != NULL, ("devfs_rmdir_empty: NULL dd")); 356 TAILQ_REMOVE(&de->de_dlist, de_dot, de_list); 357 TAILQ_REMOVE(&de->de_dlist, de_dotdot, de_list); 358 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 359 DEVFS_DE_HOLD(dd); 360 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 361 devfs_delete(dm, de_dot, DEVFS_DEL_NORECURSE); 362 devfs_delete(dm, de_dotdot, DEVFS_DEL_NORECURSE); 363 if (DEVFS_DE_DROP(dd)) { 364 devfs_dirent_free(dd); 365 return; 366 } 367 368 de = dd; 369 } 370 } 371 372 /* 373 * The caller needs to hold the dm for the duration of the call since 374 * dm->dm_lock may be temporary dropped. 375 */ 376 void 377 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int flags) 378 { 379 struct devfs_dirent *dd; 380 struct vnode *vp; 381 382 KASSERT((de->de_flags & DE_DOOMED) == 0, 383 ("devfs_delete doomed dirent")); 384 de->de_flags |= DE_DOOMED; 385 386 if ((flags & DEVFS_DEL_NORECURSE) == 0) { 387 dd = devfs_parent_dirent(de); 388 if (dd != NULL) 389 DEVFS_DE_HOLD(dd); 390 if (de->de_flags & DE_USER) { 391 KASSERT(dd != NULL, ("devfs_delete: NULL dd")); 392 devfs_dir_unref_de(dm, dd); 393 } 394 } else 395 dd = NULL; 396 397 mtx_lock(&devfs_de_interlock); 398 vp = de->de_vnode; 399 if (vp != NULL) { 400 VI_LOCK(vp); 401 mtx_unlock(&devfs_de_interlock); 402 vholdl(vp); 403 sx_unlock(&dm->dm_lock); 404 if ((flags & DEVFS_DEL_VNLOCKED) == 0) 405 vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); 406 else 407 VI_UNLOCK(vp); 408 vgone(vp); 409 if ((flags & DEVFS_DEL_VNLOCKED) == 0) 410 VOP_UNLOCK(vp); 411 vdrop(vp); 412 sx_xlock(&dm->dm_lock); 413 } else 414 mtx_unlock(&devfs_de_interlock); 415 if (de->de_symlink) { 416 free(de->de_symlink, M_DEVFS); 417 de->de_symlink = NULL; 418 } 419 #ifdef MAC 420 mac_devfs_destroy(de); 421 #endif 422 if (de->de_inode > DEVFS_ROOTINO) { 423 devfs_free_cdp_inode(de->de_inode); 424 de->de_inode = 0; 425 } 426 if (DEVFS_DE_DROP(de)) 427 devfs_dirent_free(de); 428 429 if (dd != NULL) { 430 if (DEVFS_DE_DROP(dd)) 431 devfs_dirent_free(dd); 432 else 433 devfs_rmdir_empty(dm, dd); 434 } 435 } 436 437 /* 438 * Called on unmount. 439 * Recursively removes the entire tree. 440 * The caller needs to hold the dm for the duration of the call. 441 */ 442 443 static void 444 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd) 445 { 446 struct devfs_dirent *de; 447 448 sx_assert(&dm->dm_lock, SX_XLOCKED); 449 450 DEVFS_DE_HOLD(dd); 451 for (;;) { 452 /* 453 * Use TAILQ_LAST() to remove "." and ".." last. 454 * We might need ".." to resolve a path in 455 * devfs_dir_unref_de(). 456 */ 457 de = TAILQ_LAST(&dd->de_dlist, devfs_dlist_head); 458 if (de == NULL) 459 break; 460 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 461 if (de->de_flags & DE_USER) 462 devfs_dir_unref_de(dm, dd); 463 if (de->de_flags & (DE_DOT | DE_DOTDOT)) 464 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 465 else if (de->de_dirent->d_type == DT_DIR) 466 devfs_purge(dm, de); 467 else 468 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 469 } 470 if (DEVFS_DE_DROP(dd)) 471 devfs_dirent_free(dd); 472 else if ((dd->de_flags & DE_DOOMED) == 0) 473 devfs_delete(dm, dd, DEVFS_DEL_NORECURSE); 474 } 475 476 /* 477 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed 478 * by the mount points dm_idx. 479 * This function extends the array when necessary, taking into account that 480 * the default array is 1 element and not malloc'ed. 481 */ 482 static void 483 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm) 484 { 485 struct devfs_dirent **dep, **olddep; 486 int siz; 487 488 siz = (dm->dm_idx + 1) * sizeof *dep; 489 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO); 490 dev_lock(); 491 if (dm->dm_idx <= cdp->cdp_maxdirent) { 492 /* We got raced */ 493 dev_unlock(); 494 free(dep, M_DEVFS2); 495 return; 496 } 497 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep); 498 olddep = cdp->cdp_maxdirent > 0 ? cdp->cdp_dirents : NULL; 499 cdp->cdp_dirents = dep; 500 /* 501 * XXX: if malloc told us how much we actually got this could 502 * XXX: be optimized. 503 */ 504 cdp->cdp_maxdirent = dm->dm_idx; 505 dev_unlock(); 506 free(olddep, M_DEVFS2); 507 } 508 509 /* 510 * The caller needs to hold the dm for the duration of the call. 511 */ 512 static int 513 devfs_populate_loop(struct devfs_mount *dm, int cleanup) 514 { 515 struct cdev_priv *cdp; 516 struct devfs_dirent *de; 517 struct devfs_dirent *dd, *dt; 518 struct cdev *pdev; 519 int de_flags, depth, j; 520 char *q, *s; 521 522 sx_assert(&dm->dm_lock, SX_XLOCKED); 523 dev_lock(); 524 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 525 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents")); 526 527 /* 528 * If we are unmounting, or the device has been destroyed, 529 * clean up our dirent. 530 */ 531 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) && 532 dm->dm_idx <= cdp->cdp_maxdirent && 533 cdp->cdp_dirents[dm->dm_idx] != NULL) { 534 de = cdp->cdp_dirents[dm->dm_idx]; 535 cdp->cdp_dirents[dm->dm_idx] = NULL; 536 KASSERT(cdp == de->de_cdp, 537 ("%s %d %s %p %p", __func__, __LINE__, 538 cdp->cdp_c.si_name, cdp, de->de_cdp)); 539 KASSERT(de->de_dir != NULL, ("Null de->de_dir")); 540 dev_unlock(); 541 542 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list); 543 de->de_cdp = NULL; 544 de->de_inode = 0; 545 devfs_delete(dm, de, 0); 546 dev_lock(); 547 cdp->cdp_inuse--; 548 dev_unlock(); 549 return (1); 550 } 551 /* 552 * GC any lingering devices 553 */ 554 if (!(cdp->cdp_flags & CDP_ACTIVE)) { 555 if (cdp->cdp_inuse > 0) 556 continue; 557 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); 558 dev_unlock(); 559 dev_rel(&cdp->cdp_c); 560 return (1); 561 } 562 /* 563 * Don't create any new dirents if we are unmounting 564 */ 565 if (cleanup) 566 continue; 567 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!")); 568 569 if (dm->dm_idx <= cdp->cdp_maxdirent && 570 cdp->cdp_dirents[dm->dm_idx] != NULL) { 571 de = cdp->cdp_dirents[dm->dm_idx]; 572 KASSERT(cdp == de->de_cdp, ("inconsistent cdp")); 573 continue; 574 } 575 576 cdp->cdp_inuse++; 577 dev_unlock(); 578 579 if (dm->dm_idx > cdp->cdp_maxdirent) 580 devfs_metoo(cdp, dm); 581 582 dd = dm->dm_rootdir; 583 s = cdp->cdp_c.si_name; 584 for (;;) { 585 for (q = s; *q != '/' && *q != '\0'; q++) 586 continue; 587 if (*q != '/') 588 break; 589 de = devfs_find(dd, s, q - s, 0); 590 if (de == NULL) 591 de = devfs_vmkdir(dm, s, q - s, dd, 0); 592 else if (de->de_dirent->d_type == DT_LNK) { 593 de = devfs_find(dd, s, q - s, DT_DIR); 594 if (de == NULL) 595 de = devfs_vmkdir(dm, s, q - s, dd, 0); 596 de->de_flags |= DE_COVERED; 597 } 598 s = q + 1; 599 dd = de; 600 KASSERT(dd->de_dirent->d_type == DT_DIR && 601 (dd->de_flags & (DE_DOT | DE_DOTDOT)) == 0, 602 ("%s: invalid directory (si_name=%s)", 603 __func__, cdp->cdp_c.si_name)); 604 } 605 de_flags = 0; 606 de = devfs_find(dd, s, q - s, DT_LNK); 607 if (de != NULL) 608 de_flags |= DE_COVERED; 609 610 de = devfs_newdirent(s, q - s); 611 if (cdp->cdp_c.si_flags & SI_ALIAS) { 612 de->de_uid = 0; 613 de->de_gid = 0; 614 de->de_mode = 0755; 615 de->de_dirent->d_type = DT_LNK; 616 pdev = cdp->cdp_c.si_parent; 617 dt = dd; 618 depth = 0; 619 while (dt != dm->dm_rootdir && 620 (dt = devfs_parent_dirent(dt)) != NULL) 621 depth++; 622 j = depth * 3 + strlen(pdev->si_name) + 1; 623 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK); 624 de->de_symlink[0] = 0; 625 while (depth-- > 0) 626 strcat(de->de_symlink, "../"); 627 strcat(de->de_symlink, pdev->si_name); 628 } else { 629 de->de_uid = cdp->cdp_c.si_uid; 630 de->de_gid = cdp->cdp_c.si_gid; 631 de->de_mode = cdp->cdp_c.si_mode; 632 de->de_dirent->d_type = DT_CHR; 633 } 634 de->de_flags |= de_flags; 635 de->de_inode = cdp->cdp_inode; 636 de->de_cdp = cdp; 637 #ifdef MAC 638 mac_devfs_create_device(cdp->cdp_c.si_cred, dm->dm_mount, 639 &cdp->cdp_c, de); 640 #endif 641 de->de_dir = dd; 642 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 643 devfs_rules_apply(dm, de); 644 dev_lock(); 645 /* XXX: could check that cdp is still active here */ 646 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL, 647 ("%s %d\n", __func__, __LINE__)); 648 cdp->cdp_dirents[dm->dm_idx] = de; 649 KASSERT(de->de_cdp != (void *)0xdeadc0de, 650 ("%s %d\n", __func__, __LINE__)); 651 dev_unlock(); 652 return (1); 653 } 654 dev_unlock(); 655 return (0); 656 } 657 658 int 659 devfs_populate_needed(struct devfs_mount *dm) 660 { 661 662 return (dm->dm_generation != devfs_generation); 663 } 664 665 /* 666 * The caller needs to hold the dm for the duration of the call. 667 */ 668 void 669 devfs_populate(struct devfs_mount *dm) 670 { 671 unsigned gen; 672 673 sx_assert(&dm->dm_lock, SX_XLOCKED); 674 if (!devfs_populate_needed(dm)) 675 return; 676 gen = devfs_generation; 677 while (devfs_populate_loop(dm, 0)) 678 continue; 679 dm->dm_generation = gen; 680 } 681 682 /* 683 * The caller needs to hold the dm for the duration of the call. 684 */ 685 void 686 devfs_cleanup(struct devfs_mount *dm) 687 { 688 689 sx_assert(&dm->dm_lock, SX_XLOCKED); 690 while (devfs_populate_loop(dm, 1)) 691 continue; 692 devfs_purge(dm, dm->dm_rootdir); 693 } 694 695 /* 696 * devfs_create() and devfs_destroy() are called from kern_conf.c and 697 * in both cases the devlock() mutex is held, so no further locking 698 * is necessary and no sleeping allowed. 699 */ 700 701 void 702 devfs_create(struct cdev *dev) 703 { 704 struct cdev_priv *cdp; 705 706 dev_lock_assert_locked(); 707 cdp = cdev2priv(dev); 708 cdp->cdp_flags |= CDP_ACTIVE; 709 cdp->cdp_inode = alloc_unrl(devfs_inos); 710 dev_refl(dev); 711 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list); 712 devfs_generation++; 713 } 714 715 void 716 devfs_destroy(struct cdev *dev) 717 { 718 struct cdev_priv *cdp; 719 720 dev_lock_assert_locked(); 721 cdp = cdev2priv(dev); 722 cdp->cdp_flags &= ~CDP_ACTIVE; 723 devfs_generation++; 724 } 725 726 ino_t 727 devfs_alloc_cdp_inode(void) 728 { 729 730 return (alloc_unr(devfs_inos)); 731 } 732 733 void 734 devfs_free_cdp_inode(ino_t ino) 735 { 736 737 if (ino > 0) 738 free_unr(devfs_inos, ino); 739 } 740 741 static void 742 devfs_devs_init(void *junk __unused) 743 { 744 745 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx); 746 } 747 748 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL); 749