1 /*- 2 * Copyright (c) 2000,2004 3 * Poul-Henning Kamp. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Neither the name of the University nor the names of its contributors 11 * may be used to endorse or promote products derived from this software 12 * without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/conf.h> 34 #include <sys/dirent.h> 35 #include <sys/kernel.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/sx.h> 41 #include <sys/sysctl.h> 42 #include <sys/vnode.h> 43 44 #include <sys/kdb.h> 45 46 #include <fs/devfs/devfs.h> 47 #include <fs/devfs/devfs_int.h> 48 49 #include <security/mac/mac_framework.h> 50 51 /* 52 * The one true (but secret) list of active devices in the system. 53 * Locked by dev_lock()/devmtx 54 */ 55 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list); 56 57 struct unrhdr *devfs_inos; 58 59 60 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2"); 61 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3"); 62 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage"); 63 64 static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "DEVFS filesystem"); 65 66 static unsigned devfs_generation; 67 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD, 68 &devfs_generation, 0, "DEVFS generation number"); 69 70 unsigned devfs_rule_depth = 1; 71 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW, 72 &devfs_rule_depth, 0, "Max depth of ruleset include"); 73 74 /* 75 * Helper sysctl for devname(3). We're given a dev_t and return the 76 * name, if any, registered by the device driver. 77 */ 78 static int 79 sysctl_devname(SYSCTL_HANDLER_ARGS) 80 { 81 int error; 82 dev_t ud; 83 struct cdev_priv *cdp; 84 struct cdev *dev; 85 86 error = SYSCTL_IN(req, &ud, sizeof (ud)); 87 if (error) 88 return (error); 89 if (ud == NODEV) 90 return (EINVAL); 91 dev = NULL; 92 dev_lock(); 93 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) 94 if (cdp->cdp_inode == ud) { 95 dev = &cdp->cdp_c; 96 dev_refl(dev); 97 break; 98 } 99 dev_unlock(); 100 if (dev == NULL) 101 return (ENOENT); 102 error = SYSCTL_OUT(req, dev->si_name, strlen(dev->si_name) + 1); 103 dev_rel(dev); 104 return (error); 105 } 106 107 SYSCTL_PROC(_kern, OID_AUTO, devname, 108 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE, 109 NULL, 0, sysctl_devname, "", "devname(3) handler"); 110 111 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD, 112 0, sizeof(struct cdev), "sizeof(struct cdev)"); 113 114 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD, 115 0, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)"); 116 117 struct cdev * 118 devfs_alloc(void) 119 { 120 struct cdev_priv *cdp; 121 struct cdev *cdev; 122 struct timespec ts; 123 124 cdp = malloc(sizeof *cdp, M_CDEVP, M_USE_RESERVE | M_ZERO | M_WAITOK); 125 126 cdp->cdp_dirents = &cdp->cdp_dirent0; 127 cdp->cdp_dirent0 = NULL; 128 cdp->cdp_maxdirent = 0; 129 130 cdev = &cdp->cdp_c; 131 132 cdev->si_name = cdev->__si_namebuf; 133 LIST_INIT(&cdev->si_children); 134 vfs_timestamp(&ts); 135 cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts; 136 137 return (cdev); 138 } 139 140 void 141 devfs_free(struct cdev *cdev) 142 { 143 struct cdev_priv *cdp; 144 145 cdp = cdev2priv(cdev); 146 if (cdev->si_cred != NULL) 147 crfree(cdev->si_cred); 148 if (cdp->cdp_inode > 0) 149 free_unr(devfs_inos, cdp->cdp_inode); 150 if (cdp->cdp_maxdirent > 0) 151 free(cdp->cdp_dirents, M_DEVFS2); 152 free(cdp, M_CDEVP); 153 } 154 155 struct devfs_dirent * 156 devfs_find(struct devfs_dirent *dd, const char *name, int namelen) 157 { 158 struct devfs_dirent *de; 159 160 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 161 if (namelen != de->de_dirent->d_namlen) 162 continue; 163 if (bcmp(name, de->de_dirent->d_name, namelen) != 0) 164 continue; 165 break; 166 } 167 return (de); 168 } 169 170 struct devfs_dirent * 171 devfs_newdirent(char *name, int namelen) 172 { 173 int i; 174 struct devfs_dirent *de; 175 struct dirent d; 176 177 d.d_namlen = namelen; 178 i = sizeof (*de) + GENERIC_DIRSIZ(&d); 179 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO); 180 de->de_dirent = (struct dirent *)(de + 1); 181 de->de_dirent->d_namlen = namelen; 182 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d); 183 bcopy(name, de->de_dirent->d_name, namelen); 184 de->de_dirent->d_name[namelen] = '\0'; 185 vfs_timestamp(&de->de_ctime); 186 de->de_mtime = de->de_atime = de->de_ctime; 187 de->de_links = 1; 188 de->de_holdcnt = 1; 189 #ifdef MAC 190 mac_devfs_init(de); 191 #endif 192 return (de); 193 } 194 195 struct devfs_dirent * 196 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen, struct devfs_dirent *dotdot, u_int inode) 197 { 198 struct devfs_dirent *dd; 199 struct devfs_dirent *de; 200 201 /* Create the new directory */ 202 dd = devfs_newdirent(name, namelen); 203 TAILQ_INIT(&dd->de_dlist); 204 dd->de_dirent->d_type = DT_DIR; 205 dd->de_mode = 0555; 206 dd->de_links = 2; 207 dd->de_dir = dd; 208 if (inode != 0) 209 dd->de_inode = inode; 210 else 211 dd->de_inode = alloc_unr(devfs_inos); 212 213 /* Create the "." entry in the new directory */ 214 de = devfs_newdirent(".", 1); 215 de->de_dirent->d_type = DT_DIR; 216 de->de_flags |= DE_DOT; 217 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 218 de->de_dir = dd; 219 220 /* Create the ".." entry in the new directory */ 221 de = devfs_newdirent("..", 2); 222 de->de_dirent->d_type = DT_DIR; 223 de->de_flags |= DE_DOTDOT; 224 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 225 if (dotdot == NULL) { 226 de->de_dir = dd; 227 } else { 228 de->de_dir = dotdot; 229 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list); 230 dotdot->de_links++; 231 } 232 233 #ifdef MAC 234 mac_devfs_create_directory(dmp->dm_mount, name, namelen, dd); 235 #endif 236 return (dd); 237 } 238 239 void 240 devfs_dirent_free(struct devfs_dirent *de) 241 { 242 free(de, M_DEVFS3); 243 } 244 245 /* 246 * The caller needs to hold the dm for the duration of the call since 247 * dm->dm_lock may be temporary dropped. 248 */ 249 void 250 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int vp_locked) 251 { 252 struct vnode *vp; 253 254 KASSERT((de->de_flags & DE_DOOMED) == 0, 255 ("devfs_delete doomed dirent")); 256 de->de_flags |= DE_DOOMED; 257 mtx_lock(&devfs_de_interlock); 258 vp = de->de_vnode; 259 if (vp != NULL) { 260 VI_LOCK(vp); 261 mtx_unlock(&devfs_de_interlock); 262 vholdl(vp); 263 sx_unlock(&dm->dm_lock); 264 if (!vp_locked) 265 vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); 266 else 267 VI_UNLOCK(vp); 268 vgone(vp); 269 if (!vp_locked) 270 VOP_UNLOCK(vp, 0); 271 vdrop(vp); 272 sx_xlock(&dm->dm_lock); 273 } else 274 mtx_unlock(&devfs_de_interlock); 275 if (de->de_symlink) { 276 free(de->de_symlink, M_DEVFS); 277 de->de_symlink = NULL; 278 } 279 #ifdef MAC 280 mac_devfs_destroy(de); 281 #endif 282 if (de->de_inode > DEVFS_ROOTINO) { 283 free_unr(devfs_inos, de->de_inode); 284 de->de_inode = 0; 285 } 286 if (DEVFS_DE_DROP(de)) 287 devfs_dirent_free(de); 288 } 289 290 /* 291 * Called on unmount. 292 * Recursively removes the entire tree. 293 * The caller needs to hold the dm for the duration of the call. 294 */ 295 296 static void 297 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd) 298 { 299 struct devfs_dirent *de; 300 301 sx_assert(&dm->dm_lock, SX_XLOCKED); 302 for (;;) { 303 de = TAILQ_FIRST(&dd->de_dlist); 304 if (de == NULL) 305 break; 306 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 307 if (de->de_flags & (DE_DOT|DE_DOTDOT)) 308 devfs_delete(dm, de, 0); 309 else if (de->de_dirent->d_type == DT_DIR) 310 devfs_purge(dm, de); 311 else 312 devfs_delete(dm, de, 0); 313 } 314 devfs_delete(dm, dd, 0); 315 } 316 317 /* 318 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed 319 * by the mount points dm_idx. 320 * This function extends the array when necessary, taking into account that 321 * the default array is 1 element and not malloc'ed. 322 */ 323 static void 324 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm) 325 { 326 struct devfs_dirent **dep; 327 int siz; 328 329 siz = (dm->dm_idx + 1) * sizeof *dep; 330 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO); 331 dev_lock(); 332 if (dm->dm_idx <= cdp->cdp_maxdirent) { 333 /* We got raced */ 334 dev_unlock(); 335 free(dep, M_DEVFS2); 336 return; 337 } 338 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep); 339 if (cdp->cdp_maxdirent > 0) 340 free(cdp->cdp_dirents, M_DEVFS2); 341 cdp->cdp_dirents = dep; 342 /* 343 * XXX: if malloc told us how much we actually got this could 344 * XXX: be optimized. 345 */ 346 cdp->cdp_maxdirent = dm->dm_idx; 347 dev_unlock(); 348 } 349 350 /* 351 * The caller needs to hold the dm for the duration of the call. 352 */ 353 static int 354 devfs_populate_loop(struct devfs_mount *dm, int cleanup) 355 { 356 struct cdev_priv *cdp; 357 struct devfs_dirent *de; 358 struct devfs_dirent *dd; 359 struct cdev *pdev; 360 int j; 361 char *q, *s; 362 363 sx_assert(&dm->dm_lock, SX_XLOCKED); 364 dev_lock(); 365 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 366 367 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents")); 368 369 /* 370 * If we are unmounting, or the device has been destroyed, 371 * clean up our dirent. 372 */ 373 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) && 374 dm->dm_idx <= cdp->cdp_maxdirent && 375 cdp->cdp_dirents[dm->dm_idx] != NULL) { 376 de = cdp->cdp_dirents[dm->dm_idx]; 377 cdp->cdp_dirents[dm->dm_idx] = NULL; 378 KASSERT(cdp == de->de_cdp, 379 ("%s %d %s %p %p", __func__, __LINE__, 380 cdp->cdp_c.si_name, cdp, de->de_cdp)); 381 KASSERT(de->de_dir != NULL, ("Null de->de_dir")); 382 dev_unlock(); 383 384 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list); 385 de->de_cdp = NULL; 386 de->de_inode = 0; 387 devfs_delete(dm, de, 0); 388 dev_lock(); 389 cdp->cdp_inuse--; 390 dev_unlock(); 391 return (1); 392 } 393 /* 394 * GC any lingering devices 395 */ 396 if (!(cdp->cdp_flags & CDP_ACTIVE)) { 397 if (cdp->cdp_inuse > 0) 398 continue; 399 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); 400 dev_unlock(); 401 dev_rel(&cdp->cdp_c); 402 return (1); 403 } 404 /* 405 * Don't create any new dirents if we are unmounting 406 */ 407 if (cleanup) 408 continue; 409 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!")); 410 411 if (dm->dm_idx <= cdp->cdp_maxdirent && 412 cdp->cdp_dirents[dm->dm_idx] != NULL) { 413 de = cdp->cdp_dirents[dm->dm_idx]; 414 KASSERT(cdp == de->de_cdp, ("inconsistent cdp")); 415 continue; 416 } 417 418 419 cdp->cdp_inuse++; 420 dev_unlock(); 421 422 if (dm->dm_idx > cdp->cdp_maxdirent) 423 devfs_metoo(cdp, dm); 424 425 dd = dm->dm_rootdir; 426 s = cdp->cdp_c.si_name; 427 for (;;) { 428 for (q = s; *q != '/' && *q != '\0'; q++) 429 continue; 430 if (*q != '/') 431 break; 432 de = devfs_find(dd, s, q - s); 433 if (de == NULL) 434 de = devfs_vmkdir(dm, s, q - s, dd, 0); 435 s = q + 1; 436 dd = de; 437 } 438 439 de = devfs_newdirent(s, q - s); 440 if (cdp->cdp_c.si_flags & SI_ALIAS) { 441 de->de_uid = 0; 442 de->de_gid = 0; 443 de->de_mode = 0755; 444 de->de_dirent->d_type = DT_LNK; 445 pdev = cdp->cdp_c.si_parent; 446 j = strlen(pdev->si_name) + 1; 447 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK); 448 bcopy(pdev->si_name, de->de_symlink, j); 449 } else { 450 de->de_uid = cdp->cdp_c.si_uid; 451 de->de_gid = cdp->cdp_c.si_gid; 452 de->de_mode = cdp->cdp_c.si_mode; 453 de->de_dirent->d_type = DT_CHR; 454 } 455 de->de_inode = cdp->cdp_inode; 456 de->de_cdp = cdp; 457 #ifdef MAC 458 mac_devfs_create_device(cdp->cdp_c.si_cred, dm->dm_mount, 459 &cdp->cdp_c, de); 460 #endif 461 de->de_dir = dd; 462 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 463 devfs_rules_apply(dm, de); 464 dev_lock(); 465 /* XXX: could check that cdp is still active here */ 466 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL, 467 ("%s %d\n", __func__, __LINE__)); 468 cdp->cdp_dirents[dm->dm_idx] = de; 469 KASSERT(de->de_cdp != (void *)0xdeadc0de, 470 ("%s %d\n", __func__, __LINE__)); 471 dev_unlock(); 472 return (1); 473 } 474 dev_unlock(); 475 return (0); 476 } 477 478 /* 479 * The caller needs to hold the dm for the duration of the call. 480 */ 481 void 482 devfs_populate(struct devfs_mount *dm) 483 { 484 485 sx_assert(&dm->dm_lock, SX_XLOCKED); 486 if (dm->dm_generation == devfs_generation) 487 return; 488 while (devfs_populate_loop(dm, 0)) 489 continue; 490 dm->dm_generation = devfs_generation; 491 } 492 493 /* 494 * The caller needs to hold the dm for the duration of the call. 495 */ 496 void 497 devfs_cleanup(struct devfs_mount *dm) 498 { 499 500 sx_assert(&dm->dm_lock, SX_XLOCKED); 501 while (devfs_populate_loop(dm, 1)) 502 continue; 503 devfs_purge(dm, dm->dm_rootdir); 504 } 505 506 /* 507 * devfs_create() and devfs_destroy() are called from kern_conf.c and 508 * in both cases the devlock() mutex is held, so no further locking 509 * is necesary and no sleeping allowed. 510 */ 511 512 void 513 devfs_create(struct cdev *dev) 514 { 515 struct cdev_priv *cdp; 516 517 mtx_assert(&devmtx, MA_OWNED); 518 cdp = cdev2priv(dev); 519 cdp->cdp_flags |= CDP_ACTIVE; 520 cdp->cdp_inode = alloc_unrl(devfs_inos); 521 dev_refl(dev); 522 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list); 523 devfs_generation++; 524 } 525 526 void 527 devfs_destroy(struct cdev *dev) 528 { 529 struct cdev_priv *cdp; 530 531 mtx_assert(&devmtx, MA_OWNED); 532 cdp = cdev2priv(dev); 533 cdp->cdp_flags &= ~CDP_ACTIVE; 534 devfs_generation++; 535 } 536 537 static void 538 devfs_devs_init(void *junk __unused) 539 { 540 541 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx); 542 } 543 544 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL); 545