1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2000,2004
5 * Poul-Henning Kamp. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Neither the name of the University nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/conf.h>
34 #include <sys/dirent.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/sx.h>
41 #include <sys/sysctl.h>
42 #include <sys/vnode.h>
43
44 #include <sys/kdb.h>
45
46 #include <fs/devfs/devfs.h>
47 #include <fs/devfs/devfs_int.h>
48
49 #include <security/mac/mac_framework.h>
50
51 /*
52 * The one true (but secret) list of active devices in the system.
53 * Locked by dev_lock()/devmtx
54 */
55 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list);
56
57 struct unrhdr *devfs_inos;
58
59 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2");
60 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3");
61 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage");
62
63 SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
64 "DEVFS filesystem");
65
66 static unsigned devfs_generation;
67 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD,
68 &devfs_generation, 0, "DEVFS generation number");
69
70 unsigned devfs_rule_depth = 1;
71 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW,
72 &devfs_rule_depth, 0, "Max depth of ruleset include");
73
74 /*
75 * Helper sysctl for devname(3). We're given a dev_t and return the
76 * name, if any, registered by the device driver.
77 */
78 static int
sysctl_devname(SYSCTL_HANDLER_ARGS)79 sysctl_devname(SYSCTL_HANDLER_ARGS)
80 {
81 int error;
82 dev_t ud;
83 #ifdef COMPAT_FREEBSD11
84 uint32_t ud_compat;
85 #endif
86 struct cdev_priv *cdp;
87 struct cdev *dev;
88
89 #ifdef COMPAT_FREEBSD11
90 if (req->newlen == sizeof(ud_compat)) {
91 error = SYSCTL_IN(req, &ud_compat, sizeof(ud_compat));
92 if (error == 0)
93 ud = ud_compat == (uint32_t)NODEV ? NODEV : ud_compat;
94 } else
95 #endif
96 error = SYSCTL_IN(req, &ud, sizeof (ud));
97 if (error)
98 return (error);
99 if (ud == NODEV)
100 return (EINVAL);
101 dev = NULL;
102 dev_lock();
103 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list)
104 if (cdp->cdp_inode == ud) {
105 dev = &cdp->cdp_c;
106 dev_refl(dev);
107 break;
108 }
109 dev_unlock();
110 if (dev == NULL)
111 return (ENOENT);
112 error = SYSCTL_OUT(req, dev->si_name, strlen(dev->si_name) + 1);
113 dev_rel(dev);
114 return (error);
115 }
116
117 SYSCTL_PROC(_kern, OID_AUTO, devname,
118 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE,
119 NULL, 0, sysctl_devname, "", "devname(3) handler");
120
121 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD,
122 SYSCTL_NULL_INT_PTR, sizeof(struct cdev), "sizeof(struct cdev)");
123
124 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD,
125 SYSCTL_NULL_INT_PTR, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)");
126
127 struct cdev *
devfs_alloc(int flags)128 devfs_alloc(int flags)
129 {
130 struct cdev_priv *cdp;
131 struct cdev *cdev;
132 struct timespec ts;
133
134 cdp = malloc(sizeof *cdp, M_CDEVP, M_ZERO |
135 ((flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK));
136 if (cdp == NULL)
137 return (NULL);
138
139 mtx_init(&cdp->cdp_threadlock, "devthrd", NULL, MTX_DEF);
140
141 cdp->cdp_dirents = &cdp->cdp_dirent0;
142
143 cdev = &cdp->cdp_c;
144 LIST_INIT(&cdev->si_children);
145 vfs_timestamp(&ts);
146 cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts;
147
148 return (cdev);
149 }
150
151 int
devfs_dev_exists(const char * name)152 devfs_dev_exists(const char *name)
153 {
154 struct cdev_priv *cdp;
155
156 dev_lock_assert_locked();
157
158 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) {
159 if ((cdp->cdp_flags & CDP_ACTIVE) == 0)
160 continue;
161 if (devfs_pathpath(cdp->cdp_c.si_name, name) != 0)
162 return (1);
163 if (devfs_pathpath(name, cdp->cdp_c.si_name) != 0)
164 return (1);
165 }
166 if (devfs_dir_find(name) != 0)
167 return (1);
168
169 return (0);
170 }
171
172 void
devfs_free(struct cdev * cdev)173 devfs_free(struct cdev *cdev)
174 {
175 struct cdev_priv *cdp;
176
177 cdp = cdev2priv(cdev);
178 KASSERT((cdp->cdp_flags & (CDP_ACTIVE | CDP_ON_ACTIVE_LIST)) == 0,
179 ("%s: cdp %p (%s) still on active list",
180 __func__, cdp, cdev->si_name));
181 if (cdev->si_cred != NULL)
182 crfree(cdev->si_cred);
183 devfs_free_cdp_inode(cdp->cdp_inode);
184 if (cdp->cdp_maxdirent > 0)
185 free(cdp->cdp_dirents, M_DEVFS2);
186 mtx_destroy(&cdp->cdp_threadlock);
187 free(cdp, M_CDEVP);
188 }
189
190 struct devfs_dirent *
devfs_find(struct devfs_dirent * dd,const char * name,int namelen,int type)191 devfs_find(struct devfs_dirent *dd, const char *name, int namelen, int type)
192 {
193 struct devfs_dirent *de;
194
195 TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
196 if (namelen != de->de_dirent->d_namlen)
197 continue;
198 if (type != 0 && type != de->de_dirent->d_type)
199 continue;
200
201 /*
202 * The race with finding non-active name is not
203 * completely closed by the check, but it is similar
204 * to the devfs_allocv() in making it unlikely enough.
205 */
206 if (de->de_dirent->d_type == DT_CHR &&
207 (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0)
208 continue;
209
210 if (bcmp(name, de->de_dirent->d_name, namelen) != 0)
211 continue;
212 break;
213 }
214 KASSERT(de == NULL || (de->de_flags & DE_DOOMED) == 0,
215 ("devfs_find: returning a doomed entry"));
216 return (de);
217 }
218
219 struct devfs_dirent *
devfs_newdirent(char * name,int namelen)220 devfs_newdirent(char *name, int namelen)
221 {
222 int i;
223 struct devfs_dirent *de;
224 struct dirent d;
225
226 d.d_namlen = namelen;
227 i = sizeof(*de) + GENERIC_DIRSIZ(&d);
228 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO);
229 de->de_dirent = (struct dirent *)(de + 1);
230 de->de_dirent->d_namlen = namelen;
231 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d);
232 bcopy(name, de->de_dirent->d_name, namelen);
233 dirent_terminate(de->de_dirent);
234 vfs_timestamp(&de->de_ctime);
235 de->de_mtime = de->de_atime = de->de_ctime;
236 de->de_links = 1;
237 de->de_holdcnt = 1;
238 #ifdef MAC
239 mac_devfs_init(de);
240 #endif
241 return (de);
242 }
243
244 struct devfs_dirent *
devfs_parent_dirent(struct devfs_dirent * de)245 devfs_parent_dirent(struct devfs_dirent *de)
246 {
247
248 if (de->de_dirent->d_type != DT_DIR)
249 return (de->de_dir);
250
251 if (de->de_flags & (DE_DOT | DE_DOTDOT))
252 return (NULL);
253
254 de = TAILQ_FIRST(&de->de_dlist); /* "." */
255 if (de == NULL)
256 return (NULL);
257 de = TAILQ_NEXT(de, de_list); /* ".." */
258 if (de == NULL)
259 return (NULL);
260
261 return (de->de_dir);
262 }
263
264 struct devfs_dirent *
devfs_vmkdir(struct devfs_mount * dmp,char * name,int namelen,struct devfs_dirent * dotdot,u_int inode)265 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen,
266 struct devfs_dirent *dotdot, u_int inode)
267 {
268 struct devfs_dirent *dd;
269 struct devfs_dirent *de;
270
271 /* Create the new directory */
272 dd = devfs_newdirent(name, namelen);
273 TAILQ_INIT(&dd->de_dlist);
274 dd->de_dirent->d_type = DT_DIR;
275 dd->de_mode = 0555;
276 dd->de_links = 2;
277 dd->de_dir = dd;
278 if (inode != 0)
279 dd->de_inode = inode;
280 else
281 dd->de_inode = alloc_unr(devfs_inos);
282
283 /*
284 * "." and ".." are always the two first entries in the
285 * de_dlist list.
286 *
287 * Create the "." entry in the new directory.
288 */
289 de = devfs_newdirent(".", 1);
290 de->de_dirent->d_type = DT_DIR;
291 de->de_flags |= DE_DOT;
292 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
293 de->de_dir = dd;
294
295 /* Create the ".." entry in the new directory. */
296 de = devfs_newdirent("..", 2);
297 de->de_dirent->d_type = DT_DIR;
298 de->de_flags |= DE_DOTDOT;
299 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
300 if (dotdot == NULL) {
301 de->de_dir = dd;
302 } else {
303 de->de_dir = dotdot;
304 sx_assert(&dmp->dm_lock, SX_XLOCKED);
305 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list);
306 dotdot->de_links++;
307 devfs_rules_apply(dmp, dd);
308 }
309
310 #ifdef MAC
311 mac_devfs_create_directory(dmp->dm_mount, name, namelen, dd);
312 #endif
313 return (dd);
314 }
315
316 void
devfs_dirent_free(struct devfs_dirent * de)317 devfs_dirent_free(struct devfs_dirent *de)
318 {
319 struct vnode *vp;
320
321 vp = de->de_vnode;
322 mtx_lock(&devfs_de_interlock);
323 if (vp != NULL && vp->v_data == de)
324 vp->v_data = NULL;
325 mtx_unlock(&devfs_de_interlock);
326 free(de, M_DEVFS3);
327 }
328
329 /*
330 * Removes a directory if it is empty. Also empty parent directories are
331 * removed recursively.
332 */
333 static void
devfs_rmdir_empty(struct devfs_mount * dm,struct devfs_dirent * de)334 devfs_rmdir_empty(struct devfs_mount *dm, struct devfs_dirent *de)
335 {
336 struct devfs_dirent *dd, *de_dot, *de_dotdot;
337
338 sx_assert(&dm->dm_lock, SX_XLOCKED);
339
340 for (;;) {
341 KASSERT(de->de_dirent->d_type == DT_DIR,
342 ("devfs_rmdir_empty: de is not a directory"));
343
344 if ((de->de_flags & DE_DOOMED) != 0 || de == dm->dm_rootdir)
345 return;
346
347 de_dot = TAILQ_FIRST(&de->de_dlist);
348 KASSERT(de_dot != NULL, ("devfs_rmdir_empty: . missing"));
349 de_dotdot = TAILQ_NEXT(de_dot, de_list);
350 KASSERT(de_dotdot != NULL, ("devfs_rmdir_empty: .. missing"));
351 /* Return if the directory is not empty. */
352 if (TAILQ_NEXT(de_dotdot, de_list) != NULL)
353 return;
354
355 dd = devfs_parent_dirent(de);
356 KASSERT(dd != NULL, ("devfs_rmdir_empty: NULL dd"));
357 TAILQ_REMOVE(&de->de_dlist, de_dot, de_list);
358 TAILQ_REMOVE(&de->de_dlist, de_dotdot, de_list);
359 TAILQ_REMOVE(&dd->de_dlist, de, de_list);
360 DEVFS_DE_HOLD(dd);
361 devfs_delete(dm, de, DEVFS_DEL_NORECURSE);
362 devfs_delete(dm, de_dot, DEVFS_DEL_NORECURSE);
363 devfs_delete(dm, de_dotdot, DEVFS_DEL_NORECURSE);
364 if (DEVFS_DE_DROP(dd)) {
365 devfs_dirent_free(dd);
366 return;
367 }
368
369 de = dd;
370 }
371 }
372
373 /*
374 * The caller needs to hold the dm for the duration of the call since
375 * dm->dm_lock may be temporary dropped.
376 */
377 void
devfs_delete(struct devfs_mount * dm,struct devfs_dirent * de,int flags)378 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int flags)
379 {
380 struct devfs_dirent *dd;
381 struct vnode *vp;
382
383 KASSERT((de->de_flags & DE_DOOMED) == 0,
384 ("devfs_delete doomed dirent"));
385 de->de_flags |= DE_DOOMED;
386
387 if ((flags & DEVFS_DEL_NORECURSE) == 0) {
388 dd = devfs_parent_dirent(de);
389 if (dd != NULL)
390 DEVFS_DE_HOLD(dd);
391 if (de->de_flags & DE_USER) {
392 KASSERT(dd != NULL, ("devfs_delete: NULL dd"));
393 devfs_dir_unref_de(dm, dd);
394 }
395 } else
396 dd = NULL;
397
398 mtx_lock(&devfs_de_interlock);
399 vp = de->de_vnode;
400 if (vp != NULL) {
401 vhold(vp);
402 mtx_unlock(&devfs_de_interlock);
403 sx_unlock(&dm->dm_lock);
404 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
405 vgone(vp);
406 VOP_UNLOCK(vp);
407 vdrop(vp);
408 sx_xlock(&dm->dm_lock);
409 } else
410 mtx_unlock(&devfs_de_interlock);
411 if (de->de_symlink) {
412 free(de->de_symlink, M_DEVFS);
413 de->de_symlink = NULL;
414 }
415 #ifdef MAC
416 mac_devfs_destroy(de);
417 #endif
418 if (de->de_inode > DEVFS_ROOTINO) {
419 devfs_free_cdp_inode(de->de_inode);
420 de->de_inode = 0;
421 }
422 if (DEVFS_DE_DROP(de))
423 devfs_dirent_free(de);
424
425 if (dd != NULL) {
426 if (DEVFS_DE_DROP(dd))
427 devfs_dirent_free(dd);
428 else
429 devfs_rmdir_empty(dm, dd);
430 }
431 }
432
433 /*
434 * Called on unmount.
435 * Recursively removes the entire tree.
436 * The caller needs to hold the dm for the duration of the call.
437 */
438
439 static void
devfs_purge(struct devfs_mount * dm,struct devfs_dirent * dd)440 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd)
441 {
442 struct devfs_dirent *de;
443
444 sx_assert(&dm->dm_lock, SX_XLOCKED);
445
446 DEVFS_DE_HOLD(dd);
447 for (;;) {
448 /*
449 * Use TAILQ_LAST() to remove "." and ".." last.
450 * We might need ".." to resolve a path in
451 * devfs_dir_unref_de().
452 */
453 de = TAILQ_LAST(&dd->de_dlist, devfs_dlist_head);
454 if (de == NULL)
455 break;
456 TAILQ_REMOVE(&dd->de_dlist, de, de_list);
457 if (de->de_flags & DE_USER)
458 devfs_dir_unref_de(dm, dd);
459 if (de->de_flags & (DE_DOT | DE_DOTDOT))
460 devfs_delete(dm, de, DEVFS_DEL_NORECURSE);
461 else if (de->de_dirent->d_type == DT_DIR)
462 devfs_purge(dm, de);
463 else
464 devfs_delete(dm, de, DEVFS_DEL_NORECURSE);
465 }
466 if (DEVFS_DE_DROP(dd))
467 devfs_dirent_free(dd);
468 else if ((dd->de_flags & DE_DOOMED) == 0)
469 devfs_delete(dm, dd, DEVFS_DEL_NORECURSE);
470 }
471
472 /*
473 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed
474 * by the mount points dm_idx.
475 * This function extends the array when necessary, taking into account that
476 * the default array is 1 element and not malloc'ed.
477 */
478 static void
devfs_metoo(struct cdev_priv * cdp,struct devfs_mount * dm)479 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm)
480 {
481 struct devfs_dirent **dep, **olddep;
482 int siz;
483
484 siz = (dm->dm_idx + 1) * sizeof *dep;
485 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO);
486 dev_lock();
487 if (dm->dm_idx <= cdp->cdp_maxdirent) {
488 /* We got raced */
489 dev_unlock();
490 free(dep, M_DEVFS2);
491 return;
492 }
493 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep);
494 olddep = cdp->cdp_maxdirent > 0 ? cdp->cdp_dirents : NULL;
495 cdp->cdp_dirents = dep;
496 /*
497 * XXX: if malloc told us how much we actually got this could
498 * XXX: be optimized.
499 */
500 cdp->cdp_maxdirent = dm->dm_idx;
501 dev_unlock();
502 free(olddep, M_DEVFS2);
503 }
504
505 /*
506 * The caller needs to hold the dm for the duration of the call.
507 */
508 static int
devfs_populate_loop(struct devfs_mount * dm,int cleanup)509 devfs_populate_loop(struct devfs_mount *dm, int cleanup)
510 {
511 struct cdev_priv *cdp;
512 struct devfs_dirent *de;
513 struct devfs_dirent *dd, *dt;
514 struct cdev *pdev;
515 int de_flags, depth, j;
516 char *q, *s;
517
518 sx_assert(&dm->dm_lock, SX_XLOCKED);
519 dev_lock();
520 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) {
521 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents"));
522 KASSERT((cdp->cdp_flags & CDP_ON_ACTIVE_LIST) != 0,
523 ("%s: cdp %p (%s) should not be on active list",
524 __func__, cdp, cdp->cdp_c.si_name));
525
526 /*
527 * If we are unmounting, or the device has been destroyed,
528 * clean up our dirent.
529 */
530 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) &&
531 dm->dm_idx <= cdp->cdp_maxdirent &&
532 cdp->cdp_dirents[dm->dm_idx] != NULL) {
533 de = cdp->cdp_dirents[dm->dm_idx];
534 cdp->cdp_dirents[dm->dm_idx] = NULL;
535 KASSERT(cdp == de->de_cdp,
536 ("%s %d %s %p %p", __func__, __LINE__,
537 cdp->cdp_c.si_name, cdp, de->de_cdp));
538 KASSERT(de->de_dir != NULL, ("Null de->de_dir"));
539 dev_unlock();
540
541 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list);
542 de->de_cdp = NULL;
543 de->de_inode = 0;
544 devfs_delete(dm, de, 0);
545 dev_lock();
546 cdp->cdp_inuse--;
547 dev_unlock();
548 return (1);
549 }
550 /*
551 * GC any lingering devices
552 */
553 if (!(cdp->cdp_flags & CDP_ACTIVE)) {
554 if (cdp->cdp_inuse > 0)
555 continue;
556 cdp->cdp_flags &= ~CDP_ON_ACTIVE_LIST;
557 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
558 dev_unlock();
559 dev_rel(&cdp->cdp_c);
560 return (1);
561 }
562 /*
563 * Don't create any new dirents if we are unmounting
564 */
565 if (cleanup)
566 continue;
567 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!"));
568
569 if (dm->dm_idx <= cdp->cdp_maxdirent &&
570 cdp->cdp_dirents[dm->dm_idx] != NULL) {
571 de = cdp->cdp_dirents[dm->dm_idx];
572 KASSERT(cdp == de->de_cdp, ("inconsistent cdp"));
573 continue;
574 }
575
576 cdp->cdp_inuse++;
577 dev_unlock();
578
579 if (dm->dm_idx > cdp->cdp_maxdirent)
580 devfs_metoo(cdp, dm);
581
582 dd = dm->dm_rootdir;
583 s = cdp->cdp_c.si_name;
584 for (;;) {
585 for (q = s; *q != '/' && *q != '\0'; q++)
586 continue;
587 if (*q != '/')
588 break;
589 de = devfs_find(dd, s, q - s, 0);
590 if (de == NULL)
591 de = devfs_vmkdir(dm, s, q - s, dd, 0);
592 else if (de->de_dirent->d_type == DT_LNK) {
593 de = devfs_find(dd, s, q - s, DT_DIR);
594 if (de == NULL)
595 de = devfs_vmkdir(dm, s, q - s, dd, 0);
596 de->de_flags |= DE_COVERED;
597 }
598 s = q + 1;
599 dd = de;
600 KASSERT(dd->de_dirent->d_type == DT_DIR &&
601 (dd->de_flags & (DE_DOT | DE_DOTDOT)) == 0,
602 ("%s: invalid directory (si_name=%s)",
603 __func__, cdp->cdp_c.si_name));
604 }
605 de_flags = 0;
606 de = devfs_find(dd, s, q - s, DT_LNK);
607 if (de != NULL)
608 de_flags |= DE_COVERED;
609
610 de = devfs_newdirent(s, q - s);
611 if (cdp->cdp_c.si_flags & SI_ALIAS) {
612 de->de_uid = 0;
613 de->de_gid = 0;
614 de->de_mode = 0755;
615 de->de_dirent->d_type = DT_LNK;
616 pdev = cdp->cdp_c.si_parent;
617 dt = dd;
618 depth = 0;
619 while (dt != dm->dm_rootdir &&
620 (dt = devfs_parent_dirent(dt)) != NULL)
621 depth++;
622 j = depth * 3 + strlen(pdev->si_name) + 1;
623 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK);
624 de->de_symlink[0] = 0;
625 while (depth-- > 0)
626 strcat(de->de_symlink, "../");
627 strcat(de->de_symlink, pdev->si_name);
628 } else {
629 de->de_uid = cdp->cdp_c.si_uid;
630 de->de_gid = cdp->cdp_c.si_gid;
631 de->de_mode = cdp->cdp_c.si_mode;
632 de->de_dirent->d_type = DT_CHR;
633 }
634 de->de_flags |= de_flags;
635 de->de_inode = cdp->cdp_inode;
636 de->de_cdp = cdp;
637 #ifdef MAC
638 mac_devfs_create_device(cdp->cdp_c.si_cred, dm->dm_mount,
639 &cdp->cdp_c, de);
640 #endif
641 de->de_dir = dd;
642 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
643 devfs_rules_apply(dm, de);
644 dev_lock();
645 /* XXX: could check that cdp is still active here */
646 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL,
647 ("%s %d\n", __func__, __LINE__));
648 cdp->cdp_dirents[dm->dm_idx] = de;
649 KASSERT(de->de_cdp != (void *)0xdeadc0de,
650 ("%s %d\n", __func__, __LINE__));
651 dev_unlock();
652 return (1);
653 }
654 dev_unlock();
655 return (0);
656 }
657
658 int
devfs_populate_needed(struct devfs_mount * dm)659 devfs_populate_needed(struct devfs_mount *dm)
660 {
661
662 return (dm->dm_generation != devfs_generation);
663 }
664
665 /*
666 * The caller needs to hold the dm for the duration of the call.
667 */
668 void
devfs_populate(struct devfs_mount * dm)669 devfs_populate(struct devfs_mount *dm)
670 {
671 unsigned gen;
672
673 sx_assert(&dm->dm_lock, SX_XLOCKED);
674 if (!devfs_populate_needed(dm))
675 return;
676 gen = devfs_generation;
677 while (devfs_populate_loop(dm, 0))
678 continue;
679 dm->dm_generation = gen;
680 }
681
682 /*
683 * The caller needs to hold the dm for the duration of the call.
684 */
685 void
devfs_cleanup(struct devfs_mount * dm)686 devfs_cleanup(struct devfs_mount *dm)
687 {
688
689 sx_assert(&dm->dm_lock, SX_XLOCKED);
690 while (devfs_populate_loop(dm, 1))
691 continue;
692 devfs_purge(dm, dm->dm_rootdir);
693 }
694
695 /*
696 * devfs_create() and devfs_destroy() are called from kern_conf.c and
697 * in both cases the devlock() mutex is held, so no further locking
698 * is necessary and no sleeping allowed.
699 */
700
701 void
devfs_create(struct cdev * dev)702 devfs_create(struct cdev *dev)
703 {
704 struct cdev_priv *cdp;
705
706 dev_lock_assert_locked();
707 cdp = cdev2priv(dev);
708 KASSERT((cdp->cdp_flags & CDP_ON_ACTIVE_LIST) == 0,
709 ("%s: cdp %p (%s) already on active list",
710 __func__, cdp, dev->si_name));
711 cdp->cdp_flags |= (CDP_ACTIVE | CDP_ON_ACTIVE_LIST);
712 cdp->cdp_inode = alloc_unrl(devfs_inos);
713 dev_refl(dev);
714 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list);
715 devfs_generation++;
716 }
717
718 void
devfs_destroy(struct cdev * dev)719 devfs_destroy(struct cdev *dev)
720 {
721 struct cdev_priv *cdp;
722
723 dev_lock_assert_locked();
724 cdp = cdev2priv(dev);
725 cdp->cdp_flags &= ~CDP_ACTIVE;
726 devfs_generation++;
727 }
728
729 ino_t
devfs_alloc_cdp_inode(void)730 devfs_alloc_cdp_inode(void)
731 {
732
733 return (alloc_unr(devfs_inos));
734 }
735
736 void
devfs_free_cdp_inode(ino_t ino)737 devfs_free_cdp_inode(ino_t ino)
738 {
739
740 if (ino > 0)
741 free_unr(devfs_inos, ino);
742 }
743
744 static void
devfs_devs_init(void * junk __unused)745 devfs_devs_init(void *junk __unused)
746 {
747
748 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx);
749 }
750
751 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL);
752