1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2000,2004
5 * Poul-Henning Kamp. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Neither the name of the University nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/conf.h>
34 #include <sys/dirent.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/sx.h>
41 #include <sys/sysctl.h>
42 #include <sys/vnode.h>
43
44 #include <sys/kdb.h>
45
46 #include <fs/devfs/devfs.h>
47 #include <fs/devfs/devfs_int.h>
48
49 #include <security/mac/mac_framework.h>
50
51 /*
52 * The one true (but secret) list of active devices in the system.
53 * Locked by dev_lock()/devmtx
54 */
55 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list);
56
57 struct unrhdr *devfs_inos;
58
59 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2");
60 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3");
61 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage");
62
63 SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
64 "DEVFS filesystem");
65
66 static unsigned devfs_generation;
67 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD,
68 &devfs_generation, 0, "DEVFS generation number");
69
70 unsigned devfs_rule_depth = 1;
71 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW,
72 &devfs_rule_depth, 0, "Max depth of ruleset include");
73
74 /*
75 * Helper sysctl for devname(3). We're given a dev_t and return the
76 * name, if any, registered by the device driver.
77 */
78 static int
sysctl_devname(SYSCTL_HANDLER_ARGS)79 sysctl_devname(SYSCTL_HANDLER_ARGS)
80 {
81 int error;
82 dev_t ud;
83 #ifdef COMPAT_FREEBSD11
84 uint32_t ud_compat;
85 #endif
86 struct cdev_priv *cdp;
87 struct cdev *dev;
88
89 if (req->newptr == NULL)
90 return (EINVAL);
91
92 #ifdef COMPAT_FREEBSD11
93 if (req->newlen == sizeof(ud_compat)) {
94 error = SYSCTL_IN(req, &ud_compat, sizeof(ud_compat));
95 if (error == 0)
96 ud = ud_compat == (uint32_t)NODEV ? NODEV : ud_compat;
97 } else
98 #endif
99 error = SYSCTL_IN(req, &ud, sizeof (ud));
100 if (error)
101 return (error);
102 if (ud == NODEV)
103 return (EINVAL);
104 dev = NULL;
105 dev_lock();
106 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list)
107 if (cdp->cdp_inode == ud) {
108 dev = &cdp->cdp_c;
109 dev_refl(dev);
110 break;
111 }
112 dev_unlock();
113 if (dev == NULL)
114 return (ENOENT);
115 error = SYSCTL_OUT(req, dev->si_name, strlen(dev->si_name) + 1);
116 dev_rel(dev);
117 return (error);
118 }
119
120 SYSCTL_PROC(_kern, OID_AUTO, devname,
121 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE,
122 NULL, 0, sysctl_devname, "", "devname(3) handler");
123
124 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD,
125 SYSCTL_NULL_INT_PTR, sizeof(struct cdev), "sizeof(struct cdev)");
126
127 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD,
128 SYSCTL_NULL_INT_PTR, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)");
129
130 struct cdev *
devfs_alloc(int flags)131 devfs_alloc(int flags)
132 {
133 struct cdev_priv *cdp;
134 struct cdev *cdev;
135 struct timespec ts;
136
137 cdp = malloc(sizeof *cdp, M_CDEVP, M_ZERO |
138 ((flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK));
139 if (cdp == NULL)
140 return (NULL);
141
142 mtx_init(&cdp->cdp_threadlock, "devthrd", NULL, MTX_DEF);
143
144 cdp->cdp_dirents = &cdp->cdp_dirent0;
145
146 cdev = &cdp->cdp_c;
147 LIST_INIT(&cdev->si_children);
148 vfs_timestamp(&ts);
149 cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts;
150
151 return (cdev);
152 }
153
154 int
devfs_dev_exists(const char * name)155 devfs_dev_exists(const char *name)
156 {
157 struct cdev_priv *cdp;
158
159 dev_lock_assert_locked();
160
161 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) {
162 if ((cdp->cdp_flags & CDP_ACTIVE) == 0)
163 continue;
164 if (devfs_pathpath(cdp->cdp_c.si_name, name) != 0)
165 return (1);
166 if (devfs_pathpath(name, cdp->cdp_c.si_name) != 0)
167 return (1);
168 }
169 if (devfs_dir_find(name) != 0)
170 return (1);
171
172 return (0);
173 }
174
175 void
devfs_free(struct cdev * cdev)176 devfs_free(struct cdev *cdev)
177 {
178 struct cdev_priv *cdp;
179
180 cdp = cdev2priv(cdev);
181 KASSERT((cdp->cdp_flags & (CDP_ACTIVE | CDP_ON_ACTIVE_LIST)) == 0,
182 ("%s: cdp %p (%s) still on active list",
183 __func__, cdp, cdev->si_name));
184 if (cdev->si_cred != NULL)
185 crfree(cdev->si_cred);
186 devfs_free_cdp_inode(cdp->cdp_inode);
187 if (cdp->cdp_maxdirent > 0)
188 free(cdp->cdp_dirents, M_DEVFS2);
189 mtx_destroy(&cdp->cdp_threadlock);
190 free(cdp, M_CDEVP);
191 }
192
193 struct devfs_dirent *
devfs_find(struct devfs_dirent * dd,const char * name,int namelen,int type)194 devfs_find(struct devfs_dirent *dd, const char *name, int namelen, int type)
195 {
196 struct devfs_dirent *de;
197
198 TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
199 if (namelen != de->de_dirent->d_namlen)
200 continue;
201 if (type != 0 && type != de->de_dirent->d_type)
202 continue;
203
204 /*
205 * The race with finding non-active name is not
206 * completely closed by the check, but it is similar
207 * to the devfs_allocv() in making it unlikely enough.
208 */
209 if (de->de_dirent->d_type == DT_CHR &&
210 (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0)
211 continue;
212
213 if (bcmp(name, de->de_dirent->d_name, namelen) != 0)
214 continue;
215 break;
216 }
217 KASSERT(de == NULL || (de->de_flags & DE_DOOMED) == 0,
218 ("devfs_find: returning a doomed entry"));
219 return (de);
220 }
221
222 struct devfs_dirent *
devfs_newdirent(char * name,int namelen)223 devfs_newdirent(char *name, int namelen)
224 {
225 int i;
226 struct devfs_dirent *de;
227 struct dirent d;
228
229 d.d_namlen = namelen;
230 i = sizeof(*de) + GENERIC_DIRSIZ(&d);
231 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO);
232 de->de_dirent = (struct dirent *)(de + 1);
233 de->de_dirent->d_namlen = namelen;
234 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d);
235 bcopy(name, de->de_dirent->d_name, namelen);
236 dirent_terminate(de->de_dirent);
237 vfs_timestamp(&de->de_ctime);
238 de->de_mtime = de->de_atime = de->de_ctime;
239 de->de_links = 1;
240 de->de_holdcnt = 1;
241 #ifdef MAC
242 mac_devfs_init(de);
243 #endif
244 return (de);
245 }
246
247 struct devfs_dirent *
devfs_parent_dirent(struct devfs_dirent * de)248 devfs_parent_dirent(struct devfs_dirent *de)
249 {
250
251 if (de->de_dirent->d_type != DT_DIR)
252 return (de->de_dir);
253
254 if (de->de_flags & (DE_DOT | DE_DOTDOT))
255 return (NULL);
256
257 de = TAILQ_FIRST(&de->de_dlist); /* "." */
258 if (de == NULL)
259 return (NULL);
260 de = TAILQ_NEXT(de, de_list); /* ".." */
261 if (de == NULL)
262 return (NULL);
263
264 return (de->de_dir);
265 }
266
267 struct devfs_dirent *
devfs_vmkdir(struct devfs_mount * dmp,char * name,int namelen,struct devfs_dirent * dotdot,u_int inode)268 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen,
269 struct devfs_dirent *dotdot, u_int inode)
270 {
271 struct devfs_dirent *dd;
272 struct devfs_dirent *de;
273
274 /* Create the new directory */
275 dd = devfs_newdirent(name, namelen);
276 TAILQ_INIT(&dd->de_dlist);
277 dd->de_dirent->d_type = DT_DIR;
278 dd->de_mode = 0555;
279 dd->de_links = 2;
280 dd->de_dir = dd;
281 if (inode != 0)
282 dd->de_inode = inode;
283 else
284 dd->de_inode = alloc_unr(devfs_inos);
285
286 /*
287 * "." and ".." are always the two first entries in the
288 * de_dlist list.
289 *
290 * Create the "." entry in the new directory.
291 */
292 de = devfs_newdirent(".", 1);
293 de->de_dirent->d_type = DT_DIR;
294 de->de_flags |= DE_DOT;
295 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
296 de->de_dir = dd;
297
298 /* Create the ".." entry in the new directory. */
299 de = devfs_newdirent("..", 2);
300 de->de_dirent->d_type = DT_DIR;
301 de->de_flags |= DE_DOTDOT;
302 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
303 if (dotdot == NULL) {
304 de->de_dir = dd;
305 } else {
306 de->de_dir = dotdot;
307 sx_assert(&dmp->dm_lock, SX_XLOCKED);
308 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list);
309 dotdot->de_links++;
310 devfs_rules_apply(dmp, dd);
311 }
312
313 #ifdef MAC
314 mac_devfs_create_directory(dmp->dm_mount, name, namelen, dd);
315 #endif
316 return (dd);
317 }
318
319 void
devfs_dirent_free(struct devfs_dirent * de)320 devfs_dirent_free(struct devfs_dirent *de)
321 {
322 struct vnode *vp;
323
324 vp = de->de_vnode;
325 mtx_lock(&devfs_de_interlock);
326 if (vp != NULL && vp->v_data == de)
327 vp->v_data = NULL;
328 mtx_unlock(&devfs_de_interlock);
329 free(de, M_DEVFS3);
330 }
331
332 /*
333 * Removes a directory if it is empty. Also empty parent directories are
334 * removed recursively.
335 */
336 static void
devfs_rmdir_empty(struct devfs_mount * dm,struct devfs_dirent * de)337 devfs_rmdir_empty(struct devfs_mount *dm, struct devfs_dirent *de)
338 {
339 struct devfs_dirent *dd, *de_dot, *de_dotdot;
340
341 sx_assert(&dm->dm_lock, SX_XLOCKED);
342
343 for (;;) {
344 KASSERT(de->de_dirent->d_type == DT_DIR,
345 ("devfs_rmdir_empty: de is not a directory"));
346
347 if ((de->de_flags & DE_DOOMED) != 0 || de == dm->dm_rootdir)
348 return;
349
350 de_dot = TAILQ_FIRST(&de->de_dlist);
351 KASSERT(de_dot != NULL, ("devfs_rmdir_empty: . missing"));
352 de_dotdot = TAILQ_NEXT(de_dot, de_list);
353 KASSERT(de_dotdot != NULL, ("devfs_rmdir_empty: .. missing"));
354 /* Return if the directory is not empty. */
355 if (TAILQ_NEXT(de_dotdot, de_list) != NULL)
356 return;
357
358 dd = devfs_parent_dirent(de);
359 KASSERT(dd != NULL, ("devfs_rmdir_empty: NULL dd"));
360 TAILQ_REMOVE(&de->de_dlist, de_dot, de_list);
361 TAILQ_REMOVE(&de->de_dlist, de_dotdot, de_list);
362 TAILQ_REMOVE(&dd->de_dlist, de, de_list);
363 DEVFS_DE_HOLD(dd);
364 devfs_delete(dm, de, DEVFS_DEL_NORECURSE);
365 devfs_delete(dm, de_dot, DEVFS_DEL_NORECURSE);
366 devfs_delete(dm, de_dotdot, DEVFS_DEL_NORECURSE);
367 if (DEVFS_DE_DROP(dd)) {
368 devfs_dirent_free(dd);
369 return;
370 }
371
372 de = dd;
373 }
374 }
375
376 /*
377 * The caller needs to hold the dm for the duration of the call since
378 * dm->dm_lock may be temporary dropped.
379 */
380 void
devfs_delete(struct devfs_mount * dm,struct devfs_dirent * de,int flags)381 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int flags)
382 {
383 struct devfs_dirent *dd;
384 struct vnode *vp;
385
386 KASSERT((de->de_flags & DE_DOOMED) == 0,
387 ("devfs_delete doomed dirent"));
388 de->de_flags |= DE_DOOMED;
389
390 if ((flags & DEVFS_DEL_NORECURSE) == 0) {
391 dd = devfs_parent_dirent(de);
392 if (dd != NULL)
393 DEVFS_DE_HOLD(dd);
394 if (de->de_flags & DE_USER) {
395 KASSERT(dd != NULL, ("devfs_delete: NULL dd"));
396 devfs_dir_unref_de(dm, dd);
397 }
398 } else
399 dd = NULL;
400
401 mtx_lock(&devfs_de_interlock);
402 vp = de->de_vnode;
403 if (vp != NULL) {
404 vhold(vp);
405 mtx_unlock(&devfs_de_interlock);
406 sx_unlock(&dm->dm_lock);
407 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
408 vgone(vp);
409 VOP_UNLOCK(vp);
410 vdrop(vp);
411 sx_xlock(&dm->dm_lock);
412 } else
413 mtx_unlock(&devfs_de_interlock);
414 if (de->de_symlink) {
415 free(de->de_symlink, M_DEVFS);
416 de->de_symlink = NULL;
417 }
418 #ifdef MAC
419 mac_devfs_destroy(de);
420 #endif
421 if (de->de_inode > DEVFS_ROOTINO) {
422 devfs_free_cdp_inode(de->de_inode);
423 de->de_inode = 0;
424 }
425 if (DEVFS_DE_DROP(de))
426 devfs_dirent_free(de);
427
428 if (dd != NULL) {
429 if (DEVFS_DE_DROP(dd))
430 devfs_dirent_free(dd);
431 else
432 devfs_rmdir_empty(dm, dd);
433 }
434 }
435
436 /*
437 * Called on unmount.
438 * Recursively removes the entire tree.
439 * The caller needs to hold the dm for the duration of the call.
440 */
441
442 static void
devfs_purge(struct devfs_mount * dm,struct devfs_dirent * dd)443 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd)
444 {
445 struct devfs_dirent *de;
446
447 sx_assert(&dm->dm_lock, SX_XLOCKED);
448
449 DEVFS_DE_HOLD(dd);
450 for (;;) {
451 /*
452 * Use TAILQ_LAST() to remove "." and ".." last.
453 * We might need ".." to resolve a path in
454 * devfs_dir_unref_de().
455 */
456 de = TAILQ_LAST(&dd->de_dlist, devfs_dlist_head);
457 if (de == NULL)
458 break;
459 TAILQ_REMOVE(&dd->de_dlist, de, de_list);
460 if (de->de_flags & DE_USER)
461 devfs_dir_unref_de(dm, dd);
462 if (de->de_flags & (DE_DOT | DE_DOTDOT))
463 devfs_delete(dm, de, DEVFS_DEL_NORECURSE);
464 else if (de->de_dirent->d_type == DT_DIR)
465 devfs_purge(dm, de);
466 else
467 devfs_delete(dm, de, DEVFS_DEL_NORECURSE);
468 }
469 if (DEVFS_DE_DROP(dd))
470 devfs_dirent_free(dd);
471 else if ((dd->de_flags & DE_DOOMED) == 0)
472 devfs_delete(dm, dd, DEVFS_DEL_NORECURSE);
473 }
474
475 /*
476 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed
477 * by the mount points dm_idx.
478 * This function extends the array when necessary, taking into account that
479 * the default array is 1 element and not malloc'ed.
480 */
481 static void
devfs_metoo(struct cdev_priv * cdp,struct devfs_mount * dm)482 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm)
483 {
484 struct devfs_dirent **dep, **olddep;
485 int siz;
486
487 siz = (dm->dm_idx + 1) * sizeof *dep;
488 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO);
489 dev_lock();
490 if (dm->dm_idx <= cdp->cdp_maxdirent) {
491 /* We got raced */
492 dev_unlock();
493 free(dep, M_DEVFS2);
494 return;
495 }
496 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep);
497 olddep = cdp->cdp_maxdirent > 0 ? cdp->cdp_dirents : NULL;
498 cdp->cdp_dirents = dep;
499 /*
500 * XXX: if malloc told us how much we actually got this could
501 * XXX: be optimized.
502 */
503 cdp->cdp_maxdirent = dm->dm_idx;
504 dev_unlock();
505 free(olddep, M_DEVFS2);
506 }
507
508 /*
509 * The caller needs to hold the dm for the duration of the call.
510 */
511 static int
devfs_populate_loop(struct devfs_mount * dm,int cleanup)512 devfs_populate_loop(struct devfs_mount *dm, int cleanup)
513 {
514 struct cdev_priv *cdp;
515 struct devfs_dirent *de;
516 struct devfs_dirent *dd, *dt;
517 struct cdev *pdev;
518 int de_flags, depth, j;
519 char *q, *s;
520
521 sx_assert(&dm->dm_lock, SX_XLOCKED);
522 dev_lock();
523 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) {
524 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents"));
525 KASSERT((cdp->cdp_flags & CDP_ON_ACTIVE_LIST) != 0,
526 ("%s: cdp %p (%s) should not be on active list",
527 __func__, cdp, cdp->cdp_c.si_name));
528
529 /*
530 * If we are unmounting, or the device has been destroyed,
531 * clean up our dirent.
532 */
533 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) &&
534 dm->dm_idx <= cdp->cdp_maxdirent &&
535 cdp->cdp_dirents[dm->dm_idx] != NULL) {
536 de = cdp->cdp_dirents[dm->dm_idx];
537 cdp->cdp_dirents[dm->dm_idx] = NULL;
538 KASSERT(cdp == de->de_cdp,
539 ("%s %d %s %p %p", __func__, __LINE__,
540 cdp->cdp_c.si_name, cdp, de->de_cdp));
541 KASSERT(de->de_dir != NULL, ("Null de->de_dir"));
542 dev_unlock();
543
544 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list);
545 de->de_cdp = NULL;
546 de->de_inode = 0;
547 devfs_delete(dm, de, 0);
548 dev_lock();
549 cdp->cdp_inuse--;
550 dev_unlock();
551 return (1);
552 }
553 /*
554 * GC any lingering devices
555 */
556 if (!(cdp->cdp_flags & CDP_ACTIVE)) {
557 if (cdp->cdp_inuse > 0)
558 continue;
559 cdp->cdp_flags &= ~CDP_ON_ACTIVE_LIST;
560 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
561 dev_unlock();
562 dev_rel(&cdp->cdp_c);
563 return (1);
564 }
565 /*
566 * Don't create any new dirents if we are unmounting
567 */
568 if (cleanup)
569 continue;
570 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!"));
571
572 if (dm->dm_idx <= cdp->cdp_maxdirent &&
573 cdp->cdp_dirents[dm->dm_idx] != NULL) {
574 de = cdp->cdp_dirents[dm->dm_idx];
575 KASSERT(cdp == de->de_cdp, ("inconsistent cdp"));
576 continue;
577 }
578
579 cdp->cdp_inuse++;
580 dev_unlock();
581
582 if (dm->dm_idx > cdp->cdp_maxdirent)
583 devfs_metoo(cdp, dm);
584
585 dd = dm->dm_rootdir;
586 s = cdp->cdp_c.si_name;
587 for (;;) {
588 for (q = s; *q != '/' && *q != '\0'; q++)
589 continue;
590 if (*q != '/')
591 break;
592 de = devfs_find(dd, s, q - s, 0);
593 if (de == NULL)
594 de = devfs_vmkdir(dm, s, q - s, dd, 0);
595 else if (de->de_dirent->d_type == DT_LNK) {
596 de = devfs_find(dd, s, q - s, DT_DIR);
597 if (de == NULL)
598 de = devfs_vmkdir(dm, s, q - s, dd, 0);
599 de->de_flags |= DE_COVERED;
600 }
601 s = q + 1;
602 dd = de;
603 KASSERT(dd->de_dirent->d_type == DT_DIR &&
604 (dd->de_flags & (DE_DOT | DE_DOTDOT)) == 0,
605 ("%s: invalid directory (si_name=%s)",
606 __func__, cdp->cdp_c.si_name));
607 }
608 de_flags = 0;
609 de = devfs_find(dd, s, q - s, DT_LNK);
610 if (de != NULL)
611 de_flags |= DE_COVERED;
612
613 de = devfs_newdirent(s, q - s);
614 if (cdp->cdp_c.si_flags & SI_ALIAS) {
615 de->de_uid = 0;
616 de->de_gid = 0;
617 de->de_mode = 0755;
618 de->de_dirent->d_type = DT_LNK;
619 pdev = cdp->cdp_c.si_parent;
620 dt = dd;
621 depth = 0;
622 while (dt != dm->dm_rootdir &&
623 (dt = devfs_parent_dirent(dt)) != NULL)
624 depth++;
625 j = depth * 3 + strlen(pdev->si_name) + 1;
626 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK);
627 de->de_symlink[0] = 0;
628 while (depth-- > 0)
629 strcat(de->de_symlink, "../");
630 strcat(de->de_symlink, pdev->si_name);
631 } else {
632 de->de_uid = cdp->cdp_c.si_uid;
633 de->de_gid = cdp->cdp_c.si_gid;
634 de->de_mode = cdp->cdp_c.si_mode;
635 de->de_dirent->d_type = DT_CHR;
636 }
637 de->de_flags |= de_flags;
638 de->de_inode = cdp->cdp_inode;
639 de->de_cdp = cdp;
640 #ifdef MAC
641 mac_devfs_create_device(cdp->cdp_c.si_cred, dm->dm_mount,
642 &cdp->cdp_c, de);
643 #endif
644 de->de_dir = dd;
645 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
646 devfs_rules_apply(dm, de);
647 dev_lock();
648 /* XXX: could check that cdp is still active here */
649 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL,
650 ("%s %d\n", __func__, __LINE__));
651 cdp->cdp_dirents[dm->dm_idx] = de;
652 KASSERT(de->de_cdp != (void *)0xdeadc0de,
653 ("%s %d\n", __func__, __LINE__));
654 dev_unlock();
655 return (1);
656 }
657 dev_unlock();
658 return (0);
659 }
660
661 int
devfs_populate_needed(struct devfs_mount * dm)662 devfs_populate_needed(struct devfs_mount *dm)
663 {
664
665 return (dm->dm_generation != devfs_generation);
666 }
667
668 /*
669 * The caller needs to hold the dm for the duration of the call.
670 */
671 void
devfs_populate(struct devfs_mount * dm)672 devfs_populate(struct devfs_mount *dm)
673 {
674 unsigned gen;
675
676 sx_assert(&dm->dm_lock, SX_XLOCKED);
677 if (!devfs_populate_needed(dm))
678 return;
679 gen = devfs_generation;
680 while (devfs_populate_loop(dm, 0))
681 continue;
682 dm->dm_generation = gen;
683 }
684
685 /*
686 * The caller needs to hold the dm for the duration of the call.
687 */
688 void
devfs_cleanup(struct devfs_mount * dm)689 devfs_cleanup(struct devfs_mount *dm)
690 {
691
692 sx_assert(&dm->dm_lock, SX_XLOCKED);
693 while (devfs_populate_loop(dm, 1))
694 continue;
695 devfs_purge(dm, dm->dm_rootdir);
696 }
697
698 /*
699 * devfs_create() and devfs_destroy() are called from kern_conf.c and
700 * in both cases the devlock() mutex is held, so no further locking
701 * is necessary and no sleeping allowed.
702 */
703
704 void
devfs_create(struct cdev * dev)705 devfs_create(struct cdev *dev)
706 {
707 struct cdev_priv *cdp;
708
709 dev_lock_assert_locked();
710 cdp = cdev2priv(dev);
711 KASSERT((cdp->cdp_flags & CDP_ON_ACTIVE_LIST) == 0,
712 ("%s: cdp %p (%s) already on active list",
713 __func__, cdp, dev->si_name));
714 cdp->cdp_flags |= (CDP_ACTIVE | CDP_ON_ACTIVE_LIST);
715 cdp->cdp_inode = alloc_unrl(devfs_inos);
716 dev_refl(dev);
717 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list);
718 devfs_generation++;
719 }
720
721 void
devfs_destroy(struct cdev * dev)722 devfs_destroy(struct cdev *dev)
723 {
724 struct cdev_priv *cdp;
725
726 dev_lock_assert_locked();
727 cdp = cdev2priv(dev);
728 cdp->cdp_flags &= ~CDP_ACTIVE;
729 devfs_generation++;
730 }
731
732 ino_t
devfs_alloc_cdp_inode(void)733 devfs_alloc_cdp_inode(void)
734 {
735
736 return (alloc_unr(devfs_inos));
737 }
738
739 void
devfs_free_cdp_inode(ino_t ino)740 devfs_free_cdp_inode(ino_t ino)
741 {
742
743 if (ino > 0)
744 free_unr(devfs_inos, ino);
745 }
746
747 static void
devfs_devs_init(void * junk __unused)748 devfs_devs_init(void *junk __unused)
749 {
750
751 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx);
752 }
753
754 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL);
755