1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2000,2004 5 * Poul-Henning Kamp. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Neither the name of the University nor the names of its contributors 13 * may be used to endorse or promote products derived from this software 14 * without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/conf.h> 34 #include <sys/dirent.h> 35 #include <sys/kernel.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/sx.h> 41 #include <sys/sysctl.h> 42 #include <sys/vnode.h> 43 44 #include <sys/kdb.h> 45 46 #include <fs/devfs/devfs.h> 47 #include <fs/devfs/devfs_int.h> 48 49 #include <security/mac/mac_framework.h> 50 51 /* 52 * The one true (but secret) list of active devices in the system. 53 * Locked by dev_lock()/devmtx 54 */ 55 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list); 56 57 struct unrhdr *devfs_inos; 58 59 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2"); 60 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3"); 61 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage"); 62 63 SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 64 "DEVFS filesystem"); 65 66 static unsigned devfs_generation; 67 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD, 68 &devfs_generation, 0, "DEVFS generation number"); 69 70 unsigned devfs_rule_depth = 1; 71 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW, 72 &devfs_rule_depth, 0, "Max depth of ruleset include"); 73 74 /* 75 * Helper sysctl for devname(3). We're given a dev_t and return the 76 * name, if any, registered by the device driver. 77 */ 78 static int 79 sysctl_devname(SYSCTL_HANDLER_ARGS) 80 { 81 int error; 82 dev_t ud; 83 #ifdef COMPAT_FREEBSD11 84 uint32_t ud_compat; 85 #endif 86 struct cdev_priv *cdp; 87 struct cdev *dev; 88 89 #ifdef COMPAT_FREEBSD11 90 if (req->newlen == sizeof(ud_compat)) { 91 error = SYSCTL_IN(req, &ud_compat, sizeof(ud_compat)); 92 if (error == 0) 93 ud = ud_compat == (uint32_t)NODEV ? NODEV : ud_compat; 94 } else 95 #endif 96 error = SYSCTL_IN(req, &ud, sizeof (ud)); 97 if (error) 98 return (error); 99 if (ud == NODEV) 100 return (EINVAL); 101 dev = NULL; 102 dev_lock(); 103 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) 104 if (cdp->cdp_inode == ud) { 105 dev = &cdp->cdp_c; 106 dev_refl(dev); 107 break; 108 } 109 dev_unlock(); 110 if (dev == NULL) 111 return (ENOENT); 112 error = SYSCTL_OUT(req, dev->si_name, strlen(dev->si_name) + 1); 113 dev_rel(dev); 114 return (error); 115 } 116 117 SYSCTL_PROC(_kern, OID_AUTO, devname, 118 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE, 119 NULL, 0, sysctl_devname, "", "devname(3) handler"); 120 121 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD, 122 SYSCTL_NULL_INT_PTR, sizeof(struct cdev), "sizeof(struct cdev)"); 123 124 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD, 125 SYSCTL_NULL_INT_PTR, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)"); 126 127 struct cdev * 128 devfs_alloc(int flags) 129 { 130 struct cdev_priv *cdp; 131 struct cdev *cdev; 132 struct timespec ts; 133 134 cdp = malloc(sizeof *cdp, M_CDEVP, M_ZERO | 135 ((flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK)); 136 if (cdp == NULL) 137 return (NULL); 138 139 mtx_init(&cdp->cdp_threadlock, "devthrd", NULL, MTX_DEF); 140 141 cdp->cdp_dirents = &cdp->cdp_dirent0; 142 143 cdev = &cdp->cdp_c; 144 LIST_INIT(&cdev->si_children); 145 vfs_timestamp(&ts); 146 cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts; 147 148 return (cdev); 149 } 150 151 int 152 devfs_dev_exists(const char *name) 153 { 154 struct cdev_priv *cdp; 155 156 dev_lock_assert_locked(); 157 158 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 159 if ((cdp->cdp_flags & CDP_ACTIVE) == 0) 160 continue; 161 if (devfs_pathpath(cdp->cdp_c.si_name, name) != 0) 162 return (1); 163 if (devfs_pathpath(name, cdp->cdp_c.si_name) != 0) 164 return (1); 165 } 166 if (devfs_dir_find(name) != 0) 167 return (1); 168 169 return (0); 170 } 171 172 void 173 devfs_free(struct cdev *cdev) 174 { 175 struct cdev_priv *cdp; 176 177 cdp = cdev2priv(cdev); 178 if (cdev->si_cred != NULL) 179 crfree(cdev->si_cred); 180 devfs_free_cdp_inode(cdp->cdp_inode); 181 if (cdp->cdp_maxdirent > 0) 182 free(cdp->cdp_dirents, M_DEVFS2); 183 mtx_destroy(&cdp->cdp_threadlock); 184 free(cdp, M_CDEVP); 185 } 186 187 struct devfs_dirent * 188 devfs_find(struct devfs_dirent *dd, const char *name, int namelen, int type) 189 { 190 struct devfs_dirent *de; 191 192 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 193 if (namelen != de->de_dirent->d_namlen) 194 continue; 195 if (type != 0 && type != de->de_dirent->d_type) 196 continue; 197 198 /* 199 * The race with finding non-active name is not 200 * completely closed by the check, but it is similar 201 * to the devfs_allocv() in making it unlikely enough. 202 */ 203 if (de->de_dirent->d_type == DT_CHR && 204 (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0) 205 continue; 206 207 if (bcmp(name, de->de_dirent->d_name, namelen) != 0) 208 continue; 209 break; 210 } 211 KASSERT(de == NULL || (de->de_flags & DE_DOOMED) == 0, 212 ("devfs_find: returning a doomed entry")); 213 return (de); 214 } 215 216 struct devfs_dirent * 217 devfs_newdirent(char *name, int namelen) 218 { 219 int i; 220 struct devfs_dirent *de; 221 struct dirent d; 222 223 d.d_namlen = namelen; 224 i = sizeof(*de) + GENERIC_DIRSIZ(&d); 225 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO); 226 de->de_dirent = (struct dirent *)(de + 1); 227 de->de_dirent->d_namlen = namelen; 228 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d); 229 bcopy(name, de->de_dirent->d_name, namelen); 230 dirent_terminate(de->de_dirent); 231 vfs_timestamp(&de->de_ctime); 232 de->de_mtime = de->de_atime = de->de_ctime; 233 de->de_links = 1; 234 de->de_holdcnt = 1; 235 #ifdef MAC 236 mac_devfs_init(de); 237 #endif 238 return (de); 239 } 240 241 struct devfs_dirent * 242 devfs_parent_dirent(struct devfs_dirent *de) 243 { 244 245 if (de->de_dirent->d_type != DT_DIR) 246 return (de->de_dir); 247 248 if (de->de_flags & (DE_DOT | DE_DOTDOT)) 249 return (NULL); 250 251 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 252 if (de == NULL) 253 return (NULL); 254 de = TAILQ_NEXT(de, de_list); /* ".." */ 255 if (de == NULL) 256 return (NULL); 257 258 return (de->de_dir); 259 } 260 261 struct devfs_dirent * 262 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen, 263 struct devfs_dirent *dotdot, u_int inode) 264 { 265 struct devfs_dirent *dd; 266 struct devfs_dirent *de; 267 268 /* Create the new directory */ 269 dd = devfs_newdirent(name, namelen); 270 TAILQ_INIT(&dd->de_dlist); 271 dd->de_dirent->d_type = DT_DIR; 272 dd->de_mode = 0555; 273 dd->de_links = 2; 274 dd->de_dir = dd; 275 if (inode != 0) 276 dd->de_inode = inode; 277 else 278 dd->de_inode = alloc_unr(devfs_inos); 279 280 /* 281 * "." and ".." are always the two first entries in the 282 * de_dlist list. 283 * 284 * Create the "." entry in the new directory. 285 */ 286 de = devfs_newdirent(".", 1); 287 de->de_dirent->d_type = DT_DIR; 288 de->de_flags |= DE_DOT; 289 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 290 de->de_dir = dd; 291 292 /* Create the ".." entry in the new directory. */ 293 de = devfs_newdirent("..", 2); 294 de->de_dirent->d_type = DT_DIR; 295 de->de_flags |= DE_DOTDOT; 296 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 297 if (dotdot == NULL) { 298 de->de_dir = dd; 299 } else { 300 de->de_dir = dotdot; 301 sx_assert(&dmp->dm_lock, SX_XLOCKED); 302 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list); 303 dotdot->de_links++; 304 devfs_rules_apply(dmp, dd); 305 } 306 307 #ifdef MAC 308 mac_devfs_create_directory(dmp->dm_mount, name, namelen, dd); 309 #endif 310 return (dd); 311 } 312 313 void 314 devfs_dirent_free(struct devfs_dirent *de) 315 { 316 struct vnode *vp; 317 318 vp = de->de_vnode; 319 mtx_lock(&devfs_de_interlock); 320 if (vp != NULL && vp->v_data == de) 321 vp->v_data = NULL; 322 mtx_unlock(&devfs_de_interlock); 323 free(de, M_DEVFS3); 324 } 325 326 /* 327 * Removes a directory if it is empty. Also empty parent directories are 328 * removed recursively. 329 */ 330 static void 331 devfs_rmdir_empty(struct devfs_mount *dm, struct devfs_dirent *de) 332 { 333 struct devfs_dirent *dd, *de_dot, *de_dotdot; 334 335 sx_assert(&dm->dm_lock, SX_XLOCKED); 336 337 for (;;) { 338 KASSERT(de->de_dirent->d_type == DT_DIR, 339 ("devfs_rmdir_empty: de is not a directory")); 340 341 if ((de->de_flags & DE_DOOMED) != 0 || de == dm->dm_rootdir) 342 return; 343 344 de_dot = TAILQ_FIRST(&de->de_dlist); 345 KASSERT(de_dot != NULL, ("devfs_rmdir_empty: . missing")); 346 de_dotdot = TAILQ_NEXT(de_dot, de_list); 347 KASSERT(de_dotdot != NULL, ("devfs_rmdir_empty: .. missing")); 348 /* Return if the directory is not empty. */ 349 if (TAILQ_NEXT(de_dotdot, de_list) != NULL) 350 return; 351 352 dd = devfs_parent_dirent(de); 353 KASSERT(dd != NULL, ("devfs_rmdir_empty: NULL dd")); 354 TAILQ_REMOVE(&de->de_dlist, de_dot, de_list); 355 TAILQ_REMOVE(&de->de_dlist, de_dotdot, de_list); 356 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 357 DEVFS_DE_HOLD(dd); 358 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 359 devfs_delete(dm, de_dot, DEVFS_DEL_NORECURSE); 360 devfs_delete(dm, de_dotdot, DEVFS_DEL_NORECURSE); 361 if (DEVFS_DE_DROP(dd)) { 362 devfs_dirent_free(dd); 363 return; 364 } 365 366 de = dd; 367 } 368 } 369 370 /* 371 * The caller needs to hold the dm for the duration of the call since 372 * dm->dm_lock may be temporary dropped. 373 */ 374 void 375 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int flags) 376 { 377 struct devfs_dirent *dd; 378 struct vnode *vp; 379 380 KASSERT((de->de_flags & DE_DOOMED) == 0, 381 ("devfs_delete doomed dirent")); 382 de->de_flags |= DE_DOOMED; 383 384 if ((flags & DEVFS_DEL_NORECURSE) == 0) { 385 dd = devfs_parent_dirent(de); 386 if (dd != NULL) 387 DEVFS_DE_HOLD(dd); 388 if (de->de_flags & DE_USER) { 389 KASSERT(dd != NULL, ("devfs_delete: NULL dd")); 390 devfs_dir_unref_de(dm, dd); 391 } 392 } else 393 dd = NULL; 394 395 mtx_lock(&devfs_de_interlock); 396 vp = de->de_vnode; 397 if (vp != NULL) { 398 vhold(vp); 399 mtx_unlock(&devfs_de_interlock); 400 sx_unlock(&dm->dm_lock); 401 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 402 vgone(vp); 403 VOP_UNLOCK(vp); 404 vdrop(vp); 405 sx_xlock(&dm->dm_lock); 406 } else 407 mtx_unlock(&devfs_de_interlock); 408 if (de->de_symlink) { 409 free(de->de_symlink, M_DEVFS); 410 de->de_symlink = NULL; 411 } 412 #ifdef MAC 413 mac_devfs_destroy(de); 414 #endif 415 if (de->de_inode > DEVFS_ROOTINO) { 416 devfs_free_cdp_inode(de->de_inode); 417 de->de_inode = 0; 418 } 419 if (DEVFS_DE_DROP(de)) 420 devfs_dirent_free(de); 421 422 if (dd != NULL) { 423 if (DEVFS_DE_DROP(dd)) 424 devfs_dirent_free(dd); 425 else 426 devfs_rmdir_empty(dm, dd); 427 } 428 } 429 430 /* 431 * Called on unmount. 432 * Recursively removes the entire tree. 433 * The caller needs to hold the dm for the duration of the call. 434 */ 435 436 static void 437 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd) 438 { 439 struct devfs_dirent *de; 440 441 sx_assert(&dm->dm_lock, SX_XLOCKED); 442 443 DEVFS_DE_HOLD(dd); 444 for (;;) { 445 /* 446 * Use TAILQ_LAST() to remove "." and ".." last. 447 * We might need ".." to resolve a path in 448 * devfs_dir_unref_de(). 449 */ 450 de = TAILQ_LAST(&dd->de_dlist, devfs_dlist_head); 451 if (de == NULL) 452 break; 453 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 454 if (de->de_flags & DE_USER) 455 devfs_dir_unref_de(dm, dd); 456 if (de->de_flags & (DE_DOT | DE_DOTDOT)) 457 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 458 else if (de->de_dirent->d_type == DT_DIR) 459 devfs_purge(dm, de); 460 else 461 devfs_delete(dm, de, DEVFS_DEL_NORECURSE); 462 } 463 if (DEVFS_DE_DROP(dd)) 464 devfs_dirent_free(dd); 465 else if ((dd->de_flags & DE_DOOMED) == 0) 466 devfs_delete(dm, dd, DEVFS_DEL_NORECURSE); 467 } 468 469 /* 470 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed 471 * by the mount points dm_idx. 472 * This function extends the array when necessary, taking into account that 473 * the default array is 1 element and not malloc'ed. 474 */ 475 static void 476 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm) 477 { 478 struct devfs_dirent **dep, **olddep; 479 int siz; 480 481 siz = (dm->dm_idx + 1) * sizeof *dep; 482 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO); 483 dev_lock(); 484 if (dm->dm_idx <= cdp->cdp_maxdirent) { 485 /* We got raced */ 486 dev_unlock(); 487 free(dep, M_DEVFS2); 488 return; 489 } 490 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep); 491 olddep = cdp->cdp_maxdirent > 0 ? cdp->cdp_dirents : NULL; 492 cdp->cdp_dirents = dep; 493 /* 494 * XXX: if malloc told us how much we actually got this could 495 * XXX: be optimized. 496 */ 497 cdp->cdp_maxdirent = dm->dm_idx; 498 dev_unlock(); 499 free(olddep, M_DEVFS2); 500 } 501 502 /* 503 * The caller needs to hold the dm for the duration of the call. 504 */ 505 static int 506 devfs_populate_loop(struct devfs_mount *dm, int cleanup) 507 { 508 struct cdev_priv *cdp; 509 struct devfs_dirent *de; 510 struct devfs_dirent *dd, *dt; 511 struct cdev *pdev; 512 int de_flags, depth, j; 513 char *q, *s; 514 515 sx_assert(&dm->dm_lock, SX_XLOCKED); 516 dev_lock(); 517 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 518 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents")); 519 520 /* 521 * If we are unmounting, or the device has been destroyed, 522 * clean up our dirent. 523 */ 524 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) && 525 dm->dm_idx <= cdp->cdp_maxdirent && 526 cdp->cdp_dirents[dm->dm_idx] != NULL) { 527 de = cdp->cdp_dirents[dm->dm_idx]; 528 cdp->cdp_dirents[dm->dm_idx] = NULL; 529 KASSERT(cdp == de->de_cdp, 530 ("%s %d %s %p %p", __func__, __LINE__, 531 cdp->cdp_c.si_name, cdp, de->de_cdp)); 532 KASSERT(de->de_dir != NULL, ("Null de->de_dir")); 533 dev_unlock(); 534 535 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list); 536 de->de_cdp = NULL; 537 de->de_inode = 0; 538 devfs_delete(dm, de, 0); 539 dev_lock(); 540 cdp->cdp_inuse--; 541 dev_unlock(); 542 return (1); 543 } 544 /* 545 * GC any lingering devices 546 */ 547 if (!(cdp->cdp_flags & CDP_ACTIVE)) { 548 if (cdp->cdp_inuse > 0) 549 continue; 550 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); 551 dev_unlock(); 552 dev_rel(&cdp->cdp_c); 553 return (1); 554 } 555 /* 556 * Don't create any new dirents if we are unmounting 557 */ 558 if (cleanup) 559 continue; 560 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!")); 561 562 if (dm->dm_idx <= cdp->cdp_maxdirent && 563 cdp->cdp_dirents[dm->dm_idx] != NULL) { 564 de = cdp->cdp_dirents[dm->dm_idx]; 565 KASSERT(cdp == de->de_cdp, ("inconsistent cdp")); 566 continue; 567 } 568 569 cdp->cdp_inuse++; 570 dev_unlock(); 571 572 if (dm->dm_idx > cdp->cdp_maxdirent) 573 devfs_metoo(cdp, dm); 574 575 dd = dm->dm_rootdir; 576 s = cdp->cdp_c.si_name; 577 for (;;) { 578 for (q = s; *q != '/' && *q != '\0'; q++) 579 continue; 580 if (*q != '/') 581 break; 582 de = devfs_find(dd, s, q - s, 0); 583 if (de == NULL) 584 de = devfs_vmkdir(dm, s, q - s, dd, 0); 585 else if (de->de_dirent->d_type == DT_LNK) { 586 de = devfs_find(dd, s, q - s, DT_DIR); 587 if (de == NULL) 588 de = devfs_vmkdir(dm, s, q - s, dd, 0); 589 de->de_flags |= DE_COVERED; 590 } 591 s = q + 1; 592 dd = de; 593 KASSERT(dd->de_dirent->d_type == DT_DIR && 594 (dd->de_flags & (DE_DOT | DE_DOTDOT)) == 0, 595 ("%s: invalid directory (si_name=%s)", 596 __func__, cdp->cdp_c.si_name)); 597 } 598 de_flags = 0; 599 de = devfs_find(dd, s, q - s, DT_LNK); 600 if (de != NULL) 601 de_flags |= DE_COVERED; 602 603 de = devfs_newdirent(s, q - s); 604 if (cdp->cdp_c.si_flags & SI_ALIAS) { 605 de->de_uid = 0; 606 de->de_gid = 0; 607 de->de_mode = 0755; 608 de->de_dirent->d_type = DT_LNK; 609 pdev = cdp->cdp_c.si_parent; 610 dt = dd; 611 depth = 0; 612 while (dt != dm->dm_rootdir && 613 (dt = devfs_parent_dirent(dt)) != NULL) 614 depth++; 615 j = depth * 3 + strlen(pdev->si_name) + 1; 616 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK); 617 de->de_symlink[0] = 0; 618 while (depth-- > 0) 619 strcat(de->de_symlink, "../"); 620 strcat(de->de_symlink, pdev->si_name); 621 } else { 622 de->de_uid = cdp->cdp_c.si_uid; 623 de->de_gid = cdp->cdp_c.si_gid; 624 de->de_mode = cdp->cdp_c.si_mode; 625 de->de_dirent->d_type = DT_CHR; 626 } 627 de->de_flags |= de_flags; 628 de->de_inode = cdp->cdp_inode; 629 de->de_cdp = cdp; 630 #ifdef MAC 631 mac_devfs_create_device(cdp->cdp_c.si_cred, dm->dm_mount, 632 &cdp->cdp_c, de); 633 #endif 634 de->de_dir = dd; 635 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 636 devfs_rules_apply(dm, de); 637 dev_lock(); 638 /* XXX: could check that cdp is still active here */ 639 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL, 640 ("%s %d\n", __func__, __LINE__)); 641 cdp->cdp_dirents[dm->dm_idx] = de; 642 KASSERT(de->de_cdp != (void *)0xdeadc0de, 643 ("%s %d\n", __func__, __LINE__)); 644 dev_unlock(); 645 return (1); 646 } 647 dev_unlock(); 648 return (0); 649 } 650 651 int 652 devfs_populate_needed(struct devfs_mount *dm) 653 { 654 655 return (dm->dm_generation != devfs_generation); 656 } 657 658 /* 659 * The caller needs to hold the dm for the duration of the call. 660 */ 661 void 662 devfs_populate(struct devfs_mount *dm) 663 { 664 unsigned gen; 665 666 sx_assert(&dm->dm_lock, SX_XLOCKED); 667 if (!devfs_populate_needed(dm)) 668 return; 669 gen = devfs_generation; 670 while (devfs_populate_loop(dm, 0)) 671 continue; 672 dm->dm_generation = gen; 673 } 674 675 /* 676 * The caller needs to hold the dm for the duration of the call. 677 */ 678 void 679 devfs_cleanup(struct devfs_mount *dm) 680 { 681 682 sx_assert(&dm->dm_lock, SX_XLOCKED); 683 while (devfs_populate_loop(dm, 1)) 684 continue; 685 devfs_purge(dm, dm->dm_rootdir); 686 } 687 688 /* 689 * devfs_create() and devfs_destroy() are called from kern_conf.c and 690 * in both cases the devlock() mutex is held, so no further locking 691 * is necessary and no sleeping allowed. 692 */ 693 694 void 695 devfs_create(struct cdev *dev) 696 { 697 struct cdev_priv *cdp; 698 699 dev_lock_assert_locked(); 700 cdp = cdev2priv(dev); 701 cdp->cdp_flags |= CDP_ACTIVE; 702 cdp->cdp_inode = alloc_unrl(devfs_inos); 703 dev_refl(dev); 704 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list); 705 devfs_generation++; 706 } 707 708 void 709 devfs_destroy(struct cdev *dev) 710 { 711 struct cdev_priv *cdp; 712 713 dev_lock_assert_locked(); 714 cdp = cdev2priv(dev); 715 cdp->cdp_flags &= ~CDP_ACTIVE; 716 devfs_generation++; 717 } 718 719 ino_t 720 devfs_alloc_cdp_inode(void) 721 { 722 723 return (alloc_unr(devfs_inos)); 724 } 725 726 void 727 devfs_free_cdp_inode(ino_t ino) 728 { 729 730 if (ino > 0) 731 free_unr(devfs_inos, ino); 732 } 733 734 static void 735 devfs_devs_init(void *junk __unused) 736 { 737 738 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx); 739 } 740 741 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL); 742