1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2000,2004 5 * Poul-Henning Kamp. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Neither the name of the University nor the names of its contributors 13 * may be used to endorse or promote products derived from this software 14 * without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36 29 * 30 * $FreeBSD$ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/conf.h> 36 #include <sys/dirent.h> 37 #include <sys/kernel.h> 38 #include <sys/limits.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/proc.h> 42 #include <sys/sx.h> 43 #include <sys/sysctl.h> 44 #include <sys/vnode.h> 45 46 #include <sys/kdb.h> 47 48 #include <fs/devfs/devfs.h> 49 #include <fs/devfs/devfs_int.h> 50 51 #include <security/mac/mac_framework.h> 52 53 /* 54 * The one true (but secret) list of active devices in the system. 55 * Locked by dev_lock()/devmtx 56 */ 57 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list); 58 59 struct unrhdr *devfs_inos; 60 61 62 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2"); 63 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3"); 64 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage"); 65 66 SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "DEVFS filesystem"); 67 68 static unsigned devfs_generation; 69 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD, 70 &devfs_generation, 0, "DEVFS generation number"); 71 72 unsigned devfs_rule_depth = 1; 73 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW, 74 &devfs_rule_depth, 0, "Max depth of ruleset include"); 75 76 /* 77 * Helper sysctl for devname(3). We're given a dev_t and return the 78 * name, if any, registered by the device driver. 79 */ 80 static int 81 sysctl_devname(SYSCTL_HANDLER_ARGS) 82 { 83 int error; 84 dev_t ud; 85 #ifdef COMPAT_FREEBSD11 86 uint32_t ud_compat; 87 #endif 88 struct cdev_priv *cdp; 89 struct cdev *dev; 90 91 #ifdef COMPAT_FREEBSD11 92 if (req->newlen == sizeof(ud_compat)) { 93 error = SYSCTL_IN(req, &ud_compat, sizeof(ud_compat)); 94 if (error == 0) 95 ud = ud_compat == (uint32_t)NODEV ? NODEV : ud_compat; 96 } else 97 #endif 98 error = SYSCTL_IN(req, &ud, sizeof (ud)); 99 if (error) 100 return (error); 101 if (ud == NODEV) 102 return (EINVAL); 103 dev = NULL; 104 dev_lock(); 105 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) 106 if (cdp->cdp_inode == ud) { 107 dev = &cdp->cdp_c; 108 dev_refl(dev); 109 break; 110 } 111 dev_unlock(); 112 if (dev == NULL) 113 return (ENOENT); 114 error = SYSCTL_OUT(req, dev->si_name, strlen(dev->si_name) + 1); 115 dev_rel(dev); 116 return (error); 117 } 118 119 SYSCTL_PROC(_kern, OID_AUTO, devname, 120 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE, 121 NULL, 0, sysctl_devname, "", "devname(3) handler"); 122 123 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD, 124 SYSCTL_NULL_INT_PTR, sizeof(struct cdev), "sizeof(struct cdev)"); 125 126 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD, 127 SYSCTL_NULL_INT_PTR, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)"); 128 129 struct cdev * 130 devfs_alloc(int flags) 131 { 132 struct cdev_priv *cdp; 133 struct cdev *cdev; 134 struct timespec ts; 135 136 cdp = malloc(sizeof *cdp, M_CDEVP, M_ZERO | 137 ((flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK)); 138 if (cdp == NULL) 139 return (NULL); 140 141 cdp->cdp_dirents = &cdp->cdp_dirent0; 142 143 cdev = &cdp->cdp_c; 144 LIST_INIT(&cdev->si_children); 145 vfs_timestamp(&ts); 146 cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts; 147 148 return (cdev); 149 } 150 151 int 152 devfs_dev_exists(const char *name) 153 { 154 struct cdev_priv *cdp; 155 156 mtx_assert(&devmtx, MA_OWNED); 157 158 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 159 if ((cdp->cdp_flags & CDP_ACTIVE) == 0) 160 continue; 161 if (devfs_pathpath(cdp->cdp_c.si_name, name) != 0) 162 return (1); 163 if (devfs_pathpath(name, cdp->cdp_c.si_name) != 0) 164 return (1); 165 } 166 if (devfs_dir_find(name) != 0) 167 return (1); 168 169 return (0); 170 } 171 172 void 173 devfs_free(struct cdev *cdev) 174 { 175 struct cdev_priv *cdp; 176 177 cdp = cdev2priv(cdev); 178 if (cdev->si_cred != NULL) 179 crfree(cdev->si_cred); 180 devfs_free_cdp_inode(cdp->cdp_inode); 181 if (cdp->cdp_maxdirent > 0) 182 free(cdp->cdp_dirents, M_DEVFS2); 183 free(cdp, M_CDEVP); 184 } 185 186 struct devfs_dirent * 187 devfs_find(struct devfs_dirent *dd, const char *name, int namelen, int type) 188 { 189 struct devfs_dirent *de; 190 191 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 192 if (namelen != de->de_dirent->d_namlen) 193 continue; 194 if (type != 0 && type != de->de_dirent->d_type) 195 continue; 196 197 /* 198 * The race with finding non-active name is not 199 * completely closed by the check, but it is similar 200 * to the devfs_allocv() in making it unlikely enough. 201 */ 202 if (de->de_dirent->d_type == DT_CHR && 203 (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0) 204 continue; 205 206 if (bcmp(name, de->de_dirent->d_name, namelen) != 0) 207 continue; 208 break; 209 } 210 KASSERT(de == NULL || (de->de_flags & DE_DOOMED) == 0, 211 ("devfs_find: returning a doomed entry")); 212 return (de); 213 } 214 215 struct devfs_dirent * 216 devfs_newdirent(char *name, int namelen) 217 { 218 int i; 219 struct devfs_dirent *de; 220 struct dirent d; 221 222 d.d_namlen = namelen; 223 i = sizeof(*de) + GENERIC_DIRSIZ(&d); 224 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO); 225 de->de_dirent = (struct dirent *)(de + 1); 226 de->de_dirent->d_namlen = namelen; 227 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d); 228 bcopy(name, de->de_dirent->d_name, namelen); 229 dirent_terminate(de->de_dirent); 230 vfs_timestamp(&de->de_ctime); 231 de->de_mtime = de->de_atime = de->de_ctime; 232 de->de_links = 1; 233 de->de_holdcnt = 1; 234 #ifdef MAC 235 mac_devfs_init(de); 236 #endif 237 return (de); 238 } 239 240 struct devfs_dirent * 241 devfs_parent_dirent(struct devfs_dirent *de) 242 { 243 244 if (de->de_dirent->d_type != DT_DIR) 245 return (de->de_dir); 246 247 if (de->de_flags & (DE_DOT | DE_DOTDOT)) 248 return (NULL); 249 250 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 251 if (de == NULL) 252 return (NULL); 253 de = TAILQ_NEXT(de, de_list); /* ".." */ 254 if (de == NULL) 255 return (NULL); 256 257 return (de->de_dir); 258 } 259 260 struct devfs_dirent * 261 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen, 262 struct devfs_dirent *dotdot, u_int inode) 263 { 264 struct devfs_dirent *dd; 265 struct devfs_dirent *de; 266 267 /* Create the new directory */ 268 dd = devfs_newdirent(name, namelen); 269 TAILQ_INIT(&dd->de_dlist); 270 dd->de_dirent->d_type = DT_DIR; 271 dd->de_mode = 0555; 272 dd->de_links = 2; 273 dd->de_dir = dd; 274 if (inode != 0) 275 dd->de_inode = inode; 276 else 277 dd->de_inode = alloc_unr(devfs_inos); 278 279 /* 280 * "." and ".." are always the two first entries in the 281 * de_dlist list. 282 * 283 * Create the "." entry in the new directory. 284 */ 285 de = devfs_newdirent(".", 1); 286 de->de_dirent->d_type = DT_DIR; 287 de->de_flags |= DE_DOT; 288 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 289 de->de_dir = dd; 290 291 /* Create the ".." entry in the new directory. */ 292 de = devfs_newdirent("..", 2); 293 de->de_dirent->d_type = DT_DIR; 294 de->de_flags |= DE_DOTDOT; 295 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 296 if (dotdot == NULL) { 297 de->de_dir = dd; 298 } else { 299 de->de_dir = dotdot; 300 sx_assert(&dmp->dm_lock, SX_XLOCKED); 301 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list); 302 dotdot->de_links++; 303 devfs_rules_apply(dmp, dd); 304 } 305 306 #ifdef MAC 307 mac_devfs_create_directory(dmp->dm_mount, name, namelen, dd); 308 #endif 309 return (dd); 310 } 311 312 void 313 devfs_dirent_free(struct devfs_dirent *de) 314 { 315 struct vnode *vp; 316 317 vp = de->de_vnode; 318 mtx_lock(&devfs_de_interlock); 319 if (vp != NULL && vp->v_data == de) 320 vp->v_data = NULL; 321 mtx_unlock(&devfs_de_interlock); 322 free(de, M_DEVFS3); 323 } 324 325 /* 326 * Removes a directory if it is empty. Also empty parent directories are 327 * removed recursively. 328 */ 329 static void 330 devfs_rmdir_empty(struct devfs_mount *dm, struct devfs_dirent *de) 331 { 332 struct devfs_dirent *dd, *de_dot, *de_dotdot; 333 334 sx_assert(&dm->dm_lock, SX_XLOCKED); 335 336 for (;;) { 337 KASSERT(de->de_dirent->d_type == DT_DIR, 338 ("devfs_rmdir_empty: de is not a directory")); 339 340 if ((de->de_flags & DE_DOOMED) != 0 || de == dm->dm_rootdir) 341 return; 342 343 de_dot = TAILQ_FIRST(&de->de_dlist); 344 KASSERT(de_dot != NULL, ("devfs_rmdir_empty: . missing")); 345 de_dotdot = TAILQ_NEXT(de_dot, de_list); 346 KASSERT(de_dotdot != NULL, ("devfs_rmdir_empty: .. missing")); 347 /* Return if the directory is not empty. */ 348 if (TAILQ_NEXT(de_dotdot, de_list) != NULL) 349 return; 350 351 dd = devfs_parent_dirent(de); 352 KASSERT(dd != NULL, ("devfs_rmdir_empty: NULL dd")); 353 TAILQ_REMOVE(&de->de_dlist, de_dot, de_list); 354 TAILQ_REMOVE(&de->de_dlist, de_dotdot, de_list); 355 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 356 DEVFS_DE_HOLD(dd); 357 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 358 devfs_delete(dm, de_dot, DEVFS_DEL_NORECURSE); 359 devfs_delete(dm, de_dotdot, DEVFS_DEL_NORECURSE); 360 if (DEVFS_DE_DROP(dd)) { 361 devfs_dirent_free(dd); 362 return; 363 } 364 365 de = dd; 366 } 367 } 368 369 /* 370 * The caller needs to hold the dm for the duration of the call since 371 * dm->dm_lock may be temporary dropped. 372 */ 373 void 374 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int flags) 375 { 376 struct devfs_dirent *dd; 377 struct vnode *vp; 378 379 KASSERT((de->de_flags & DE_DOOMED) == 0, 380 ("devfs_delete doomed dirent")); 381 de->de_flags |= DE_DOOMED; 382 383 if ((flags & DEVFS_DEL_NORECURSE) == 0) { 384 dd = devfs_parent_dirent(de); 385 if (dd != NULL) 386 DEVFS_DE_HOLD(dd); 387 if (de->de_flags & DE_USER) { 388 KASSERT(dd != NULL, ("devfs_delete: NULL dd")); 389 devfs_dir_unref_de(dm, dd); 390 } 391 } else 392 dd = NULL; 393 394 mtx_lock(&devfs_de_interlock); 395 vp = de->de_vnode; 396 if (vp != NULL) { 397 VI_LOCK(vp); 398 mtx_unlock(&devfs_de_interlock); 399 vholdl(vp); 400 sx_unlock(&dm->dm_lock); 401 if ((flags & DEVFS_DEL_VNLOCKED) == 0) 402 vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); 403 else 404 VI_UNLOCK(vp); 405 vgone(vp); 406 if ((flags & DEVFS_DEL_VNLOCKED) == 0) 407 VOP_UNLOCK(vp, 0); 408 vdrop(vp); 409 sx_xlock(&dm->dm_lock); 410 } else 411 mtx_unlock(&devfs_de_interlock); 412 if (de->de_symlink) { 413 free(de->de_symlink, M_DEVFS); 414 de->de_symlink = NULL; 415 } 416 #ifdef MAC 417 mac_devfs_destroy(de); 418 #endif 419 if (de->de_inode > DEVFS_ROOTINO) { 420 devfs_free_cdp_inode(de->de_inode); 421 de->de_inode = 0; 422 } 423 if (DEVFS_DE_DROP(de)) 424 devfs_dirent_free(de); 425 426 if (dd != NULL) { 427 if (DEVFS_DE_DROP(dd)) 428 devfs_dirent_free(dd); 429 else 430 devfs_rmdir_empty(dm, dd); 431 } 432 } 433 434 /* 435 * Called on unmount. 436 * Recursively removes the entire tree. 437 * The caller needs to hold the dm for the duration of the call. 438 */ 439 440 static void 441 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd) 442 { 443 struct devfs_dirent *de; 444 445 sx_assert(&dm->dm_lock, SX_XLOCKED); 446 447 DEVFS_DE_HOLD(dd); 448 for (;;) { 449 /* 450 * Use TAILQ_LAST() to remove "." and ".." last. 451 * We might need ".." to resolve a path in 452 * devfs_dir_unref_de(). 453 */ 454 de = TAILQ_LAST(&dd->de_dlist, devfs_dlist_head); 455 if (de == NULL) 456 break; 457 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 458 if (de->de_flags & DE_USER) 459 devfs_dir_unref_de(dm, dd); 460 if (de->de_flags & (DE_DOT | DE_DOTDOT)) 461 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 462 else if (de->de_dirent->d_type == DT_DIR) 463 devfs_purge(dm, de); 464 else 465 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 466 } 467 if (DEVFS_DE_DROP(dd)) 468 devfs_dirent_free(dd); 469 else if ((dd->de_flags & DE_DOOMED) == 0) 470 devfs_delete(dm, dd, DEVFS_DEL_NORECURSE); 471 } 472 473 /* 474 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed 475 * by the mount points dm_idx. 476 * This function extends the array when necessary, taking into account that 477 * the default array is 1 element and not malloc'ed. 478 */ 479 static void 480 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm) 481 { 482 struct devfs_dirent **dep; 483 int siz; 484 485 siz = (dm->dm_idx + 1) * sizeof *dep; 486 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO); 487 dev_lock(); 488 if (dm->dm_idx <= cdp->cdp_maxdirent) { 489 /* We got raced */ 490 dev_unlock(); 491 free(dep, M_DEVFS2); 492 return; 493 } 494 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep); 495 if (cdp->cdp_maxdirent > 0) 496 free(cdp->cdp_dirents, M_DEVFS2); 497 cdp->cdp_dirents = dep; 498 /* 499 * XXX: if malloc told us how much we actually got this could 500 * XXX: be optimized. 501 */ 502 cdp->cdp_maxdirent = dm->dm_idx; 503 dev_unlock(); 504 } 505 506 /* 507 * The caller needs to hold the dm for the duration of the call. 508 */ 509 static int 510 devfs_populate_loop(struct devfs_mount *dm, int cleanup) 511 { 512 struct cdev_priv *cdp; 513 struct devfs_dirent *de; 514 struct devfs_dirent *dd, *dt; 515 struct cdev *pdev; 516 int de_flags, depth, j; 517 char *q, *s; 518 519 sx_assert(&dm->dm_lock, SX_XLOCKED); 520 dev_lock(); 521 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 522 523 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents")); 524 525 /* 526 * If we are unmounting, or the device has been destroyed, 527 * clean up our dirent. 528 */ 529 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) && 530 dm->dm_idx <= cdp->cdp_maxdirent && 531 cdp->cdp_dirents[dm->dm_idx] != NULL) { 532 de = cdp->cdp_dirents[dm->dm_idx]; 533 cdp->cdp_dirents[dm->dm_idx] = NULL; 534 KASSERT(cdp == de->de_cdp, 535 ("%s %d %s %p %p", __func__, __LINE__, 536 cdp->cdp_c.si_name, cdp, de->de_cdp)); 537 KASSERT(de->de_dir != NULL, ("Null de->de_dir")); 538 dev_unlock(); 539 540 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list); 541 de->de_cdp = NULL; 542 de->de_inode = 0; 543 devfs_delete(dm, de, 0); 544 dev_lock(); 545 cdp->cdp_inuse--; 546 dev_unlock(); 547 return (1); 548 } 549 /* 550 * GC any lingering devices 551 */ 552 if (!(cdp->cdp_flags & CDP_ACTIVE)) { 553 if (cdp->cdp_inuse > 0) 554 continue; 555 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); 556 dev_unlock(); 557 dev_rel(&cdp->cdp_c); 558 return (1); 559 } 560 /* 561 * Don't create any new dirents if we are unmounting 562 */ 563 if (cleanup) 564 continue; 565 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!")); 566 567 if (dm->dm_idx <= cdp->cdp_maxdirent && 568 cdp->cdp_dirents[dm->dm_idx] != NULL) { 569 de = cdp->cdp_dirents[dm->dm_idx]; 570 KASSERT(cdp == de->de_cdp, ("inconsistent cdp")); 571 continue; 572 } 573 574 575 cdp->cdp_inuse++; 576 dev_unlock(); 577 578 if (dm->dm_idx > cdp->cdp_maxdirent) 579 devfs_metoo(cdp, dm); 580 581 dd = dm->dm_rootdir; 582 s = cdp->cdp_c.si_name; 583 for (;;) { 584 for (q = s; *q != '/' && *q != '\0'; q++) 585 continue; 586 if (*q != '/') 587 break; 588 de = devfs_find(dd, s, q - s, 0); 589 if (de == NULL) 590 de = devfs_vmkdir(dm, s, q - s, dd, 0); 591 else if (de->de_dirent->d_type == DT_LNK) { 592 de = devfs_find(dd, s, q - s, DT_DIR); 593 if (de == NULL) 594 de = devfs_vmkdir(dm, s, q - s, dd, 0); 595 de->de_flags |= DE_COVERED; 596 } 597 s = q + 1; 598 dd = de; 599 KASSERT(dd->de_dirent->d_type == DT_DIR && 600 (dd->de_flags & (DE_DOT | DE_DOTDOT)) == 0, 601 ("%s: invalid directory (si_name=%s)", 602 __func__, cdp->cdp_c.si_name)); 603 604 } 605 de_flags = 0; 606 de = devfs_find(dd, s, q - s, DT_LNK); 607 if (de != NULL) 608 de_flags |= DE_COVERED; 609 610 de = devfs_newdirent(s, q - s); 611 if (cdp->cdp_c.si_flags & SI_ALIAS) { 612 de->de_uid = 0; 613 de->de_gid = 0; 614 de->de_mode = 0755; 615 de->de_dirent->d_type = DT_LNK; 616 pdev = cdp->cdp_c.si_parent; 617 dt = dd; 618 depth = 0; 619 while (dt != dm->dm_rootdir && 620 (dt = devfs_parent_dirent(dt)) != NULL) 621 depth++; 622 j = depth * 3 + strlen(pdev->si_name) + 1; 623 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK); 624 de->de_symlink[0] = 0; 625 while (depth-- > 0) 626 strcat(de->de_symlink, "../"); 627 strcat(de->de_symlink, pdev->si_name); 628 } else { 629 de->de_uid = cdp->cdp_c.si_uid; 630 de->de_gid = cdp->cdp_c.si_gid; 631 de->de_mode = cdp->cdp_c.si_mode; 632 de->de_dirent->d_type = DT_CHR; 633 } 634 de->de_flags |= de_flags; 635 de->de_inode = cdp->cdp_inode; 636 de->de_cdp = cdp; 637 #ifdef MAC 638 mac_devfs_create_device(cdp->cdp_c.si_cred, dm->dm_mount, 639 &cdp->cdp_c, de); 640 #endif 641 de->de_dir = dd; 642 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 643 devfs_rules_apply(dm, de); 644 dev_lock(); 645 /* XXX: could check that cdp is still active here */ 646 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL, 647 ("%s %d\n", __func__, __LINE__)); 648 cdp->cdp_dirents[dm->dm_idx] = de; 649 KASSERT(de->de_cdp != (void *)0xdeadc0de, 650 ("%s %d\n", __func__, __LINE__)); 651 dev_unlock(); 652 return (1); 653 } 654 dev_unlock(); 655 return (0); 656 } 657 658 /* 659 * The caller needs to hold the dm for the duration of the call. 660 */ 661 void 662 devfs_populate(struct devfs_mount *dm) 663 { 664 unsigned gen; 665 666 sx_assert(&dm->dm_lock, SX_XLOCKED); 667 gen = devfs_generation; 668 if (dm->dm_generation == gen) 669 return; 670 while (devfs_populate_loop(dm, 0)) 671 continue; 672 dm->dm_generation = gen; 673 } 674 675 /* 676 * The caller needs to hold the dm for the duration of the call. 677 */ 678 void 679 devfs_cleanup(struct devfs_mount *dm) 680 { 681 682 sx_assert(&dm->dm_lock, SX_XLOCKED); 683 while (devfs_populate_loop(dm, 1)) 684 continue; 685 devfs_purge(dm, dm->dm_rootdir); 686 } 687 688 /* 689 * devfs_create() and devfs_destroy() are called from kern_conf.c and 690 * in both cases the devlock() mutex is held, so no further locking 691 * is necessary and no sleeping allowed. 692 */ 693 694 void 695 devfs_create(struct cdev *dev) 696 { 697 struct cdev_priv *cdp; 698 699 mtx_assert(&devmtx, MA_OWNED); 700 cdp = cdev2priv(dev); 701 cdp->cdp_flags |= CDP_ACTIVE; 702 cdp->cdp_inode = alloc_unrl(devfs_inos); 703 dev_refl(dev); 704 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list); 705 devfs_generation++; 706 } 707 708 void 709 devfs_destroy(struct cdev *dev) 710 { 711 struct cdev_priv *cdp; 712 713 mtx_assert(&devmtx, MA_OWNED); 714 cdp = cdev2priv(dev); 715 cdp->cdp_flags &= ~CDP_ACTIVE; 716 devfs_generation++; 717 } 718 719 ino_t 720 devfs_alloc_cdp_inode(void) 721 { 722 723 return (alloc_unr(devfs_inos)); 724 } 725 726 void 727 devfs_free_cdp_inode(ino_t ino) 728 { 729 730 if (ino > 0) 731 free_unr(devfs_inos, ino); 732 } 733 734 static void 735 devfs_devs_init(void *junk __unused) 736 { 737 738 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx); 739 } 740 741 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL); 742