1 /*- 2 * Copyright (c) 2000,2004 3 * Poul-Henning Kamp. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Neither the name of the University nor the names of its contributors 11 * may be used to endorse or promote products derived from this software 12 * without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36 27 * 28 * $FreeBSD$ 29 */ 30 31 #include "opt_mac.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/conf.h> 36 #include <sys/dirent.h> 37 #include <sys/kernel.h> 38 #include <sys/limits.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/proc.h> 42 #include <sys/sx.h> 43 #include <sys/sysctl.h> 44 #include <sys/vnode.h> 45 46 #include <sys/kdb.h> 47 48 #include <fs/devfs/devfs.h> 49 #include <fs/devfs/devfs_int.h> 50 51 #include <security/mac/mac_framework.h> 52 53 /* 54 * The one true (but secret) list of active devices in the system. 55 * Locked by dev_lock()/devmtx 56 */ 57 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list); 58 59 struct unrhdr *devfs_inos; 60 61 62 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2"); 63 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3"); 64 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage"); 65 66 static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "DEVFS filesystem"); 67 68 static unsigned devfs_generation; 69 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD, 70 &devfs_generation, 0, "DEVFS generation number"); 71 72 unsigned devfs_rule_depth = 1; 73 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW, 74 &devfs_rule_depth, 0, "Max depth of ruleset include"); 75 76 /* 77 * Helper sysctl for devname(3). We're given a struct cdev * and return 78 * the name, if any, registered by the device driver. 79 */ 80 static int 81 sysctl_devname(SYSCTL_HANDLER_ARGS) 82 { 83 int error; 84 dev_t ud; 85 struct cdev_priv *cdp; 86 87 error = SYSCTL_IN(req, &ud, sizeof (ud)); 88 if (error) 89 return (error); 90 if (ud == NODEV) 91 return(EINVAL); 92 /* 93 ud ^ devfs_random(); 94 */ 95 dev_lock(); 96 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) 97 if (cdp->cdp_inode == ud) 98 break; 99 dev_unlock(); 100 if (cdp == NULL) 101 return(ENOENT); 102 return(SYSCTL_OUT(req, cdp->cdp_c.si_name, strlen(cdp->cdp_c.si_name) + 1)); 103 return (error); 104 } 105 106 SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY, 107 NULL, 0, sysctl_devname, "", "devname(3) handler"); 108 109 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD, 110 0, sizeof(struct cdev), "sizeof(struct cdev)"); 111 112 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD, 113 0, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)"); 114 115 struct cdev * 116 devfs_alloc(void) 117 { 118 struct cdev_priv *cdp; 119 struct cdev *cdev; 120 struct timespec ts; 121 122 cdp = malloc(sizeof *cdp, M_CDEVP, M_USE_RESERVE | M_ZERO | M_WAITOK); 123 124 cdp->cdp_dirents = &cdp->cdp_dirent0; 125 cdp->cdp_dirent0 = NULL; 126 cdp->cdp_maxdirent = 0; 127 128 cdev = &cdp->cdp_c; 129 130 cdev->si_name = cdev->__si_namebuf; 131 LIST_INIT(&cdev->si_children); 132 vfs_timestamp(&ts); 133 cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts; 134 135 return (cdev); 136 } 137 138 void 139 devfs_free(struct cdev *cdev) 140 { 141 struct cdev_priv *cdp; 142 143 cdp = cdev2priv(cdev); 144 if (cdev->si_cred != NULL) 145 crfree(cdev->si_cred); 146 if (cdp->cdp_inode > 0) 147 free_unr(devfs_inos, cdp->cdp_inode); 148 if (cdp->cdp_maxdirent > 0) 149 free(cdp->cdp_dirents, M_DEVFS2); 150 free(cdp, M_CDEVP); 151 } 152 153 struct devfs_dirent * 154 devfs_find(struct devfs_dirent *dd, const char *name, int namelen) 155 { 156 struct devfs_dirent *de; 157 158 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 159 if (namelen != de->de_dirent->d_namlen) 160 continue; 161 if (bcmp(name, de->de_dirent->d_name, namelen) != 0) 162 continue; 163 break; 164 } 165 return (de); 166 } 167 168 struct devfs_dirent * 169 devfs_newdirent(char *name, int namelen) 170 { 171 int i; 172 struct devfs_dirent *de; 173 struct dirent d; 174 175 d.d_namlen = namelen; 176 i = sizeof (*de) + GENERIC_DIRSIZ(&d); 177 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO); 178 de->de_dirent = (struct dirent *)(de + 1); 179 de->de_dirent->d_namlen = namelen; 180 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d); 181 bcopy(name, de->de_dirent->d_name, namelen); 182 de->de_dirent->d_name[namelen] = '\0'; 183 vfs_timestamp(&de->de_ctime); 184 de->de_mtime = de->de_atime = de->de_ctime; 185 de->de_links = 1; 186 de->de_holdcnt = 1; 187 #ifdef MAC 188 mac_devfs_init(de); 189 #endif 190 return (de); 191 } 192 193 struct devfs_dirent * 194 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen, struct devfs_dirent *dotdot, u_int inode) 195 { 196 struct devfs_dirent *dd; 197 struct devfs_dirent *de; 198 199 /* Create the new directory */ 200 dd = devfs_newdirent(name, namelen); 201 TAILQ_INIT(&dd->de_dlist); 202 dd->de_dirent->d_type = DT_DIR; 203 dd->de_mode = 0555; 204 dd->de_links = 2; 205 dd->de_dir = dd; 206 if (inode != 0) 207 dd->de_inode = inode; 208 else 209 dd->de_inode = alloc_unr(devfs_inos); 210 211 /* Create the "." entry in the new directory */ 212 de = devfs_newdirent(".", 1); 213 de->de_dirent->d_type = DT_DIR; 214 de->de_flags |= DE_DOT; 215 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 216 de->de_dir = dd; 217 218 /* Create the ".." entry in the new directory */ 219 de = devfs_newdirent("..", 2); 220 de->de_dirent->d_type = DT_DIR; 221 de->de_flags |= DE_DOTDOT; 222 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 223 if (dotdot == NULL) { 224 de->de_dir = dd; 225 } else { 226 de->de_dir = dotdot; 227 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list); 228 dotdot->de_links++; 229 } 230 231 #ifdef MAC 232 mac_devfs_create_directory(dmp->dm_mount, name, namelen, dd); 233 #endif 234 return (dd); 235 } 236 237 void 238 devfs_dirent_free(struct devfs_dirent *de) 239 { 240 free(de, M_DEVFS3); 241 } 242 243 /* 244 * The caller needs to hold the dm for the duration of the call since 245 * dm->dm_lock may be temporary dropped. 246 */ 247 void 248 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int vp_locked) 249 { 250 struct vnode *vp; 251 252 KASSERT((de->de_flags & DE_DOOMED) == 0, 253 ("devfs_delete doomed dirent")); 254 de->de_flags |= DE_DOOMED; 255 mtx_lock(&devfs_de_interlock); 256 vp = de->de_vnode; 257 if (vp != NULL) { 258 VI_LOCK(vp); 259 mtx_unlock(&devfs_de_interlock); 260 vholdl(vp); 261 sx_unlock(&dm->dm_lock); 262 if (!vp_locked) 263 vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); 264 else 265 VI_UNLOCK(vp); 266 vgone(vp); 267 if (!vp_locked) 268 VOP_UNLOCK(vp, 0); 269 vdrop(vp); 270 sx_xlock(&dm->dm_lock); 271 } else 272 mtx_unlock(&devfs_de_interlock); 273 if (de->de_symlink) { 274 free(de->de_symlink, M_DEVFS); 275 de->de_symlink = NULL; 276 } 277 #ifdef MAC 278 mac_devfs_destroy(de); 279 #endif 280 if (de->de_inode > DEVFS_ROOTINO) { 281 free_unr(devfs_inos, de->de_inode); 282 de->de_inode = 0; 283 } 284 if (DEVFS_DE_DROP(de)) 285 devfs_dirent_free(de); 286 } 287 288 /* 289 * Called on unmount. 290 * Recursively removes the entire tree. 291 * The caller needs to hold the dm for the duration of the call. 292 */ 293 294 static void 295 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd) 296 { 297 struct devfs_dirent *de; 298 299 sx_assert(&dm->dm_lock, SX_XLOCKED); 300 for (;;) { 301 de = TAILQ_FIRST(&dd->de_dlist); 302 if (de == NULL) 303 break; 304 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 305 if (de->de_flags & (DE_DOT|DE_DOTDOT)) 306 devfs_delete(dm, de, 0); 307 else if (de->de_dirent->d_type == DT_DIR) 308 devfs_purge(dm, de); 309 else 310 devfs_delete(dm, de, 0); 311 } 312 devfs_delete(dm, dd, 0); 313 } 314 315 /* 316 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed 317 * by the mount points dm_idx. 318 * This function extends the array when necessary, taking into account that 319 * the default array is 1 element and not malloc'ed. 320 */ 321 static void 322 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm) 323 { 324 struct devfs_dirent **dep; 325 int siz; 326 327 siz = (dm->dm_idx + 1) * sizeof *dep; 328 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO); 329 dev_lock(); 330 if (dm->dm_idx <= cdp->cdp_maxdirent) { 331 /* We got raced */ 332 dev_unlock(); 333 free(dep, M_DEVFS2); 334 return; 335 } 336 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep); 337 if (cdp->cdp_maxdirent > 0) 338 free(cdp->cdp_dirents, M_DEVFS2); 339 cdp->cdp_dirents = dep; 340 /* 341 * XXX: if malloc told us how much we actually got this could 342 * XXX: be optimized. 343 */ 344 cdp->cdp_maxdirent = dm->dm_idx; 345 dev_unlock(); 346 } 347 348 /* 349 * The caller needs to hold the dm for the duration of the call. 350 */ 351 static int 352 devfs_populate_loop(struct devfs_mount *dm, int cleanup) 353 { 354 struct cdev_priv *cdp; 355 struct devfs_dirent *de; 356 struct devfs_dirent *dd; 357 struct cdev *pdev; 358 int j; 359 char *q, *s; 360 361 sx_assert(&dm->dm_lock, SX_XLOCKED); 362 dev_lock(); 363 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { 364 365 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents")); 366 367 /* 368 * If we are unmounting, or the device has been destroyed, 369 * clean up our dirent. 370 */ 371 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) && 372 dm->dm_idx <= cdp->cdp_maxdirent && 373 cdp->cdp_dirents[dm->dm_idx] != NULL) { 374 de = cdp->cdp_dirents[dm->dm_idx]; 375 cdp->cdp_dirents[dm->dm_idx] = NULL; 376 KASSERT(cdp == de->de_cdp, 377 ("%s %d %s %p %p", __func__, __LINE__, 378 cdp->cdp_c.si_name, cdp, de->de_cdp)); 379 KASSERT(de->de_dir != NULL, ("Null de->de_dir")); 380 dev_unlock(); 381 382 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list); 383 de->de_cdp = NULL; 384 de->de_inode = 0; 385 devfs_delete(dm, de, 0); 386 dev_lock(); 387 cdp->cdp_inuse--; 388 dev_unlock(); 389 return (1); 390 } 391 /* 392 * GC any lingering devices 393 */ 394 if (!(cdp->cdp_flags & CDP_ACTIVE)) { 395 if (cdp->cdp_inuse > 0) 396 continue; 397 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); 398 dev_unlock(); 399 dev_rel(&cdp->cdp_c); 400 return (1); 401 } 402 /* 403 * Don't create any new dirents if we are unmounting 404 */ 405 if (cleanup) 406 continue; 407 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!")); 408 409 if (dm->dm_idx <= cdp->cdp_maxdirent && 410 cdp->cdp_dirents[dm->dm_idx] != NULL) { 411 de = cdp->cdp_dirents[dm->dm_idx]; 412 KASSERT(cdp == de->de_cdp, ("inconsistent cdp")); 413 continue; 414 } 415 416 417 cdp->cdp_inuse++; 418 dev_unlock(); 419 420 if (dm->dm_idx > cdp->cdp_maxdirent) 421 devfs_metoo(cdp, dm); 422 423 dd = dm->dm_rootdir; 424 s = cdp->cdp_c.si_name; 425 for (;;) { 426 for (q = s; *q != '/' && *q != '\0'; q++) 427 continue; 428 if (*q != '/') 429 break; 430 de = devfs_find(dd, s, q - s); 431 if (de == NULL) 432 de = devfs_vmkdir(dm, s, q - s, dd, 0); 433 s = q + 1; 434 dd = de; 435 } 436 437 de = devfs_newdirent(s, q - s); 438 if (cdp->cdp_c.si_flags & SI_ALIAS) { 439 de->de_uid = 0; 440 de->de_gid = 0; 441 de->de_mode = 0755; 442 de->de_dirent->d_type = DT_LNK; 443 pdev = cdp->cdp_c.si_parent; 444 j = strlen(pdev->si_name) + 1; 445 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK); 446 bcopy(pdev->si_name, de->de_symlink, j); 447 } else { 448 de->de_uid = cdp->cdp_c.si_uid; 449 de->de_gid = cdp->cdp_c.si_gid; 450 de->de_mode = cdp->cdp_c.si_mode; 451 de->de_dirent->d_type = DT_CHR; 452 } 453 de->de_inode = cdp->cdp_inode; 454 de->de_cdp = cdp; 455 #ifdef MAC 456 mac_devfs_create_device(cdp->cdp_c.si_cred, dm->dm_mount, 457 &cdp->cdp_c, de); 458 #endif 459 de->de_dir = dd; 460 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 461 devfs_rules_apply(dm, de); 462 dev_lock(); 463 /* XXX: could check that cdp is still active here */ 464 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL, 465 ("%s %d\n", __func__, __LINE__)); 466 cdp->cdp_dirents[dm->dm_idx] = de; 467 KASSERT(de->de_cdp != (void *)0xdeadc0de, 468 ("%s %d\n", __func__, __LINE__)); 469 dev_unlock(); 470 return (1); 471 } 472 dev_unlock(); 473 return (0); 474 } 475 476 /* 477 * The caller needs to hold the dm for the duration of the call. 478 */ 479 void 480 devfs_populate(struct devfs_mount *dm) 481 { 482 483 sx_assert(&dm->dm_lock, SX_XLOCKED); 484 if (dm->dm_generation == devfs_generation) 485 return; 486 while (devfs_populate_loop(dm, 0)) 487 continue; 488 dm->dm_generation = devfs_generation; 489 } 490 491 /* 492 * The caller needs to hold the dm for the duration of the call. 493 */ 494 void 495 devfs_cleanup(struct devfs_mount *dm) 496 { 497 498 sx_assert(&dm->dm_lock, SX_XLOCKED); 499 while (devfs_populate_loop(dm, 1)) 500 continue; 501 devfs_purge(dm, dm->dm_rootdir); 502 } 503 504 /* 505 * devfs_create() and devfs_destroy() are called from kern_conf.c and 506 * in both cases the devlock() mutex is held, so no further locking 507 * is necesary and no sleeping allowed. 508 */ 509 510 void 511 devfs_create(struct cdev *dev) 512 { 513 struct cdev_priv *cdp; 514 515 mtx_assert(&devmtx, MA_OWNED); 516 cdp = cdev2priv(dev); 517 cdp->cdp_flags |= CDP_ACTIVE; 518 cdp->cdp_inode = alloc_unrl(devfs_inos); 519 dev_refl(dev); 520 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list); 521 devfs_generation++; 522 } 523 524 void 525 devfs_destroy(struct cdev *dev) 526 { 527 struct cdev_priv *cdp; 528 529 mtx_assert(&devmtx, MA_OWNED); 530 cdp = cdev2priv(dev); 531 cdp->cdp_flags &= ~CDP_ACTIVE; 532 devfs_generation++; 533 } 534 535 static void 536 devfs_devs_init(void *junk __unused) 537 { 538 539 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx); 540 } 541 542 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL); 543