xref: /freebsd/sys/fs/devfs/devfs_vnops.c (revision 3df058ffaf72b8715c9a5a6a4cbaf1eac1910e43)
1 /*-
2  * Copyright (c) 2000-2004
3  *	Poul-Henning Kamp.  All rights reserved.
4  * Copyright (c) 1989, 1992-1993, 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software donated to Berkeley by
8  * Jan-Simon Pendry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)kernfs_vnops.c	8.15 (Berkeley) 5/21/95
32  * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43
33  *
34  * $FreeBSD$
35  */
36 
37 /*
38  * TODO:
39  *	mkdir: want it ?
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/conf.h>
45 #include <sys/dirent.h>
46 #include <sys/fcntl.h>
47 #include <sys/file.h>
48 #include <sys/filedesc.h>
49 #include <sys/filio.h>
50 #include <sys/jail.h>
51 #include <sys/kernel.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mman.h>
55 #include <sys/mount.h>
56 #include <sys/namei.h>
57 #include <sys/priv.h>
58 #include <sys/proc.h>
59 #include <sys/stat.h>
60 #include <sys/sx.h>
61 #include <sys/sysctl.h>
62 #include <sys/time.h>
63 #include <sys/ttycom.h>
64 #include <sys/unistd.h>
65 #include <sys/vnode.h>
66 
67 static struct vop_vector devfs_vnodeops;
68 static struct fileops devfs_ops_f;
69 
70 #include <fs/devfs/devfs.h>
71 #include <fs/devfs/devfs_int.h>
72 
73 #include <security/mac/mac_framework.h>
74 
75 #include <vm/vm.h>
76 #include <vm/vm_extern.h>
77 #include <vm/vm_object.h>
78 
79 static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data");
80 
81 struct mtx	devfs_de_interlock;
82 MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF);
83 struct sx	clone_drain_lock;
84 SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock");
85 struct mtx	cdevpriv_mtx;
86 MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF);
87 
88 SYSCTL_DECL(_vfs_devfs);
89 
90 static int devfs_dotimes;
91 SYSCTL_INT(_vfs_devfs, OID_AUTO, dotimes, CTLFLAG_RW,
92     &devfs_dotimes, 0, "Update timestamps on DEVFS with default precision");
93 
94 /*
95  * Update devfs node timestamp.  Note that updates are unlocked and
96  * stat(2) could see partially updated times.
97  */
98 static void
99 devfs_timestamp(struct timespec *tsp)
100 {
101 	time_t ts;
102 
103 	if (devfs_dotimes) {
104 		vfs_timestamp(tsp);
105 	} else {
106 		ts = time_second;
107 		if (tsp->tv_sec != ts) {
108 			tsp->tv_sec = ts;
109 			tsp->tv_nsec = 0;
110 		}
111 	}
112 }
113 
114 static int
115 devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp,
116     int *ref)
117 {
118 
119 	*dswp = devvn_refthread(fp->f_vnode, devp, ref);
120 	if (*devp != fp->f_data) {
121 		if (*dswp != NULL)
122 			dev_relthread(*devp, *ref);
123 		return (ENXIO);
124 	}
125 	KASSERT((*devp)->si_refcount > 0,
126 	    ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp)));
127 	if (*dswp == NULL)
128 		return (ENXIO);
129 	curthread->td_fpop = fp;
130 	return (0);
131 }
132 
133 int
134 devfs_get_cdevpriv(void **datap)
135 {
136 	struct file *fp;
137 	struct cdev_privdata *p;
138 	int error;
139 
140 	fp = curthread->td_fpop;
141 	if (fp == NULL)
142 		return (EBADF);
143 	p = fp->f_cdevpriv;
144 	if (p != NULL) {
145 		error = 0;
146 		*datap = p->cdpd_data;
147 	} else
148 		error = ENOENT;
149 	return (error);
150 }
151 
152 int
153 devfs_set_cdevpriv(void *priv, cdevpriv_dtr_t priv_dtr)
154 {
155 	struct file *fp;
156 	struct cdev_priv *cdp;
157 	struct cdev_privdata *p;
158 	int error;
159 
160 	fp = curthread->td_fpop;
161 	if (fp == NULL)
162 		return (ENOENT);
163 	cdp = cdev2priv((struct cdev *)fp->f_data);
164 	p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK);
165 	p->cdpd_data = priv;
166 	p->cdpd_dtr = priv_dtr;
167 	p->cdpd_fp = fp;
168 	mtx_lock(&cdevpriv_mtx);
169 	if (fp->f_cdevpriv == NULL) {
170 		LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list);
171 		fp->f_cdevpriv = p;
172 		mtx_unlock(&cdevpriv_mtx);
173 		error = 0;
174 	} else {
175 		mtx_unlock(&cdevpriv_mtx);
176 		free(p, M_CDEVPDATA);
177 		error = EBUSY;
178 	}
179 	return (error);
180 }
181 
182 void
183 devfs_destroy_cdevpriv(struct cdev_privdata *p)
184 {
185 
186 	mtx_assert(&cdevpriv_mtx, MA_OWNED);
187 	p->cdpd_fp->f_cdevpriv = NULL;
188 	LIST_REMOVE(p, cdpd_list);
189 	mtx_unlock(&cdevpriv_mtx);
190 	(p->cdpd_dtr)(p->cdpd_data);
191 	free(p, M_CDEVPDATA);
192 }
193 
194 void
195 devfs_fpdrop(struct file *fp)
196 {
197 	struct cdev_privdata *p;
198 
199 	mtx_lock(&cdevpriv_mtx);
200 	if ((p = fp->f_cdevpriv) == NULL) {
201 		mtx_unlock(&cdevpriv_mtx);
202 		return;
203 	}
204 	devfs_destroy_cdevpriv(p);
205 }
206 
207 void
208 devfs_clear_cdevpriv(void)
209 {
210 	struct file *fp;
211 
212 	fp = curthread->td_fpop;
213 	if (fp == NULL)
214 		return;
215 	devfs_fpdrop(fp);
216 }
217 
218 /*
219  * On success devfs_populate_vp() returns with dmp->dm_lock held.
220  */
221 static int
222 devfs_populate_vp(struct vnode *vp)
223 {
224 	struct devfs_dirent *de;
225 	struct devfs_mount *dmp;
226 	int locked;
227 
228 	ASSERT_VOP_LOCKED(vp, "devfs_populate_vp");
229 
230 	dmp = VFSTODEVFS(vp->v_mount);
231 	locked = VOP_ISLOCKED(vp);
232 
233 	sx_xlock(&dmp->dm_lock);
234 	DEVFS_DMP_HOLD(dmp);
235 
236 	/* Can't call devfs_populate() with the vnode lock held. */
237 	VOP_UNLOCK(vp, 0);
238 	devfs_populate(dmp);
239 
240 	sx_xunlock(&dmp->dm_lock);
241 	vn_lock(vp, locked | LK_RETRY);
242 	sx_xlock(&dmp->dm_lock);
243 	if (DEVFS_DMP_DROP(dmp)) {
244 		sx_xunlock(&dmp->dm_lock);
245 		devfs_unmount_final(dmp);
246 		return (EBADF);
247 	}
248 	if ((vp->v_iflag & VI_DOOMED) != 0) {
249 		sx_xunlock(&dmp->dm_lock);
250 		return (EBADF);
251 	}
252 	de = vp->v_data;
253 	KASSERT(de != NULL,
254 	    ("devfs_populate_vp: vp->v_data == NULL but vnode not doomed"));
255 	if ((de->de_flags & DE_DOOMED) != 0) {
256 		sx_xunlock(&dmp->dm_lock);
257 		return (EBADF);
258 	}
259 
260 	return (0);
261 }
262 
263 static int
264 devfs_vptocnp(struct vop_vptocnp_args *ap)
265 {
266 	struct vnode *vp = ap->a_vp;
267 	struct vnode **dvp = ap->a_vpp;
268 	struct devfs_mount *dmp;
269 	char *buf = ap->a_buf;
270 	int *buflen = ap->a_buflen;
271 	struct devfs_dirent *dd, *de;
272 	int i, error;
273 
274 	dmp = VFSTODEVFS(vp->v_mount);
275 
276 	error = devfs_populate_vp(vp);
277 	if (error != 0)
278 		return (error);
279 
280 	i = *buflen;
281 	dd = vp->v_data;
282 
283 	if (vp->v_type == VCHR) {
284 		i -= strlen(dd->de_cdp->cdp_c.si_name);
285 		if (i < 0) {
286 			error = ENOMEM;
287 			goto finished;
288 		}
289 		bcopy(dd->de_cdp->cdp_c.si_name, buf + i,
290 		    strlen(dd->de_cdp->cdp_c.si_name));
291 		de = dd->de_dir;
292 	} else if (vp->v_type == VDIR) {
293 		if (dd == dmp->dm_rootdir) {
294 			*dvp = vp;
295 			vref(*dvp);
296 			goto finished;
297 		}
298 		i -= dd->de_dirent->d_namlen;
299 		if (i < 0) {
300 			error = ENOMEM;
301 			goto finished;
302 		}
303 		bcopy(dd->de_dirent->d_name, buf + i,
304 		    dd->de_dirent->d_namlen);
305 		de = dd;
306 	} else {
307 		error = ENOENT;
308 		goto finished;
309 	}
310 	*buflen = i;
311 	de = devfs_parent_dirent(de);
312 	if (de == NULL) {
313 		error = ENOENT;
314 		goto finished;
315 	}
316 	mtx_lock(&devfs_de_interlock);
317 	*dvp = de->de_vnode;
318 	if (*dvp != NULL) {
319 		VI_LOCK(*dvp);
320 		mtx_unlock(&devfs_de_interlock);
321 		vholdl(*dvp);
322 		VI_UNLOCK(*dvp);
323 		vref(*dvp);
324 		vdrop(*dvp);
325 	} else {
326 		mtx_unlock(&devfs_de_interlock);
327 		error = ENOENT;
328 	}
329 finished:
330 	sx_xunlock(&dmp->dm_lock);
331 	return (error);
332 }
333 
334 /*
335  * Construct the fully qualified path name relative to the mountpoint.
336  * If a NULL cnp is provided, no '/' is appended to the resulting path.
337  */
338 char *
339 devfs_fqpn(char *buf, struct devfs_mount *dmp, struct devfs_dirent *dd,
340     struct componentname *cnp)
341 {
342 	int i;
343 	struct devfs_dirent *de;
344 
345 	sx_assert(&dmp->dm_lock, SA_LOCKED);
346 
347 	i = SPECNAMELEN;
348 	buf[i] = '\0';
349 	if (cnp != NULL)
350 		i -= cnp->cn_namelen;
351 	if (i < 0)
352 		 return (NULL);
353 	if (cnp != NULL)
354 		bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen);
355 	de = dd;
356 	while (de != dmp->dm_rootdir) {
357 		if (cnp != NULL || i < SPECNAMELEN) {
358 			i--;
359 			if (i < 0)
360 				 return (NULL);
361 			buf[i] = '/';
362 		}
363 		i -= de->de_dirent->d_namlen;
364 		if (i < 0)
365 			 return (NULL);
366 		bcopy(de->de_dirent->d_name, buf + i,
367 		    de->de_dirent->d_namlen);
368 		de = devfs_parent_dirent(de);
369 		if (de == NULL)
370 			return (NULL);
371 	}
372 	return (buf + i);
373 }
374 
375 static int
376 devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp,
377 	struct devfs_dirent *de)
378 {
379 	int not_found;
380 
381 	not_found = 0;
382 	if (de->de_flags & DE_DOOMED)
383 		not_found = 1;
384 	if (DEVFS_DE_DROP(de)) {
385 		KASSERT(not_found == 1, ("DEVFS de dropped but not doomed"));
386 		devfs_dirent_free(de);
387 	}
388 	if (DEVFS_DMP_DROP(dmp)) {
389 		KASSERT(not_found == 1,
390 			("DEVFS mount struct freed before dirent"));
391 		not_found = 2;
392 		sx_xunlock(&dmp->dm_lock);
393 		devfs_unmount_final(dmp);
394 	}
395 	if (not_found == 1 || (drop_dm_lock && not_found != 2))
396 		sx_unlock(&dmp->dm_lock);
397 	return (not_found);
398 }
399 
400 static void
401 devfs_insmntque_dtr(struct vnode *vp, void *arg)
402 {
403 	struct devfs_dirent *de;
404 
405 	de = (struct devfs_dirent *)arg;
406 	mtx_lock(&devfs_de_interlock);
407 	vp->v_data = NULL;
408 	de->de_vnode = NULL;
409 	mtx_unlock(&devfs_de_interlock);
410 	vgone(vp);
411 	vput(vp);
412 }
413 
414 /*
415  * devfs_allocv shall be entered with dmp->dm_lock held, and it drops
416  * it on return.
417  */
418 int
419 devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
420     struct vnode **vpp)
421 {
422 	int error;
423 	struct vnode *vp;
424 	struct cdev *dev;
425 	struct devfs_mount *dmp;
426 	struct cdevsw *dsw;
427 
428 	dmp = VFSTODEVFS(mp);
429 	if (de->de_flags & DE_DOOMED) {
430 		sx_xunlock(&dmp->dm_lock);
431 		return (ENOENT);
432 	}
433 loop:
434 	DEVFS_DE_HOLD(de);
435 	DEVFS_DMP_HOLD(dmp);
436 	mtx_lock(&devfs_de_interlock);
437 	vp = de->de_vnode;
438 	if (vp != NULL) {
439 		VI_LOCK(vp);
440 		mtx_unlock(&devfs_de_interlock);
441 		sx_xunlock(&dmp->dm_lock);
442 		vget(vp, lockmode | LK_INTERLOCK | LK_RETRY, curthread);
443 		sx_xlock(&dmp->dm_lock);
444 		if (devfs_allocv_drop_refs(0, dmp, de)) {
445 			vput(vp);
446 			return (ENOENT);
447 		}
448 		else if ((vp->v_iflag & VI_DOOMED) != 0) {
449 			mtx_lock(&devfs_de_interlock);
450 			if (de->de_vnode == vp) {
451 				de->de_vnode = NULL;
452 				vp->v_data = NULL;
453 			}
454 			mtx_unlock(&devfs_de_interlock);
455 			vput(vp);
456 			goto loop;
457 		}
458 		sx_xunlock(&dmp->dm_lock);
459 		*vpp = vp;
460 		return (0);
461 	}
462 	mtx_unlock(&devfs_de_interlock);
463 	if (de->de_dirent->d_type == DT_CHR) {
464 		if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) {
465 			devfs_allocv_drop_refs(1, dmp, de);
466 			return (ENOENT);
467 		}
468 		dev = &de->de_cdp->cdp_c;
469 	} else {
470 		dev = NULL;
471 	}
472 	error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp);
473 	if (error != 0) {
474 		devfs_allocv_drop_refs(1, dmp, de);
475 		printf("devfs_allocv: failed to allocate new vnode\n");
476 		return (error);
477 	}
478 
479 	if (de->de_dirent->d_type == DT_CHR) {
480 		vp->v_type = VCHR;
481 		VI_LOCK(vp);
482 		dev_lock();
483 		dev_refl(dev);
484 		/* XXX: v_rdev should be protect by vnode lock */
485 		vp->v_rdev = dev;
486 		KASSERT(vp->v_usecount == 1,
487 		    ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount));
488 		dev->si_usecount += vp->v_usecount;
489 		/* Special casing of ttys for deadfs.  Probably redundant. */
490 		dsw = dev->si_devsw;
491 		if (dsw != NULL && (dsw->d_flags & D_TTY) != 0)
492 			vp->v_vflag |= VV_ISTTY;
493 		dev_unlock();
494 		VI_UNLOCK(vp);
495 		if ((dev->si_flags & SI_ETERNAL) != 0)
496 			vp->v_vflag |= VV_ETERNALDEV;
497 		vp->v_op = &devfs_specops;
498 	} else if (de->de_dirent->d_type == DT_DIR) {
499 		vp->v_type = VDIR;
500 	} else if (de->de_dirent->d_type == DT_LNK) {
501 		vp->v_type = VLNK;
502 	} else {
503 		vp->v_type = VBAD;
504 	}
505 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS);
506 	VN_LOCK_ASHARE(vp);
507 	mtx_lock(&devfs_de_interlock);
508 	vp->v_data = de;
509 	de->de_vnode = vp;
510 	mtx_unlock(&devfs_de_interlock);
511 	error = insmntque1(vp, mp, devfs_insmntque_dtr, de);
512 	if (error != 0) {
513 		(void) devfs_allocv_drop_refs(1, dmp, de);
514 		return (error);
515 	}
516 	if (devfs_allocv_drop_refs(0, dmp, de)) {
517 		vput(vp);
518 		return (ENOENT);
519 	}
520 #ifdef MAC
521 	mac_devfs_vnode_associate(mp, de, vp);
522 #endif
523 	sx_xunlock(&dmp->dm_lock);
524 	*vpp = vp;
525 	return (0);
526 }
527 
528 static int
529 devfs_access(struct vop_access_args *ap)
530 {
531 	struct vnode *vp = ap->a_vp;
532 	struct devfs_dirent *de;
533 	struct proc *p;
534 	int error;
535 
536 	de = vp->v_data;
537 	if (vp->v_type == VDIR)
538 		de = de->de_dir;
539 
540 	error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid,
541 	    ap->a_accmode, ap->a_cred, NULL);
542 	if (error == 0)
543 		return (0);
544 	if (error != EACCES)
545 		return (error);
546 	p = ap->a_td->td_proc;
547 	/* We do, however, allow access to the controlling terminal */
548 	PROC_LOCK(p);
549 	if (!(p->p_flag & P_CONTROLT)) {
550 		PROC_UNLOCK(p);
551 		return (error);
552 	}
553 	if (p->p_session->s_ttydp == de->de_cdp)
554 		error = 0;
555 	PROC_UNLOCK(p);
556 	return (error);
557 }
558 
559 /* ARGSUSED */
560 static int
561 devfs_close(struct vop_close_args *ap)
562 {
563 	struct vnode *vp = ap->a_vp, *oldvp;
564 	struct thread *td = ap->a_td;
565 	struct proc *p;
566 	struct cdev *dev = vp->v_rdev;
567 	struct cdevsw *dsw;
568 	int vp_locked, error, ref;
569 
570 	/*
571 	 * XXX: Don't call d_close() if we were called because of
572 	 * XXX: insmntque1() failure.
573 	 */
574 	if (vp->v_data == NULL)
575 		return (0);
576 
577 	/*
578 	 * Hack: a tty device that is a controlling terminal
579 	 * has a reference from the session structure.
580 	 * We cannot easily tell that a character device is
581 	 * a controlling terminal, unless it is the closing
582 	 * process' controlling terminal.  In that case,
583 	 * if the reference count is 2 (this last descriptor
584 	 * plus the session), release the reference from the session.
585 	 */
586 	if (td != NULL) {
587 		p = td->td_proc;
588 		PROC_LOCK(p);
589 		if (vp == p->p_session->s_ttyvp) {
590 			PROC_UNLOCK(p);
591 			oldvp = NULL;
592 			sx_xlock(&proctree_lock);
593 			if (vp == p->p_session->s_ttyvp) {
594 				SESS_LOCK(p->p_session);
595 				VI_LOCK(vp);
596 				if (count_dev(dev) == 2 &&
597 				    (vp->v_iflag & VI_DOOMED) == 0) {
598 					p->p_session->s_ttyvp = NULL;
599 					p->p_session->s_ttydp = NULL;
600 					oldvp = vp;
601 				}
602 				VI_UNLOCK(vp);
603 				SESS_UNLOCK(p->p_session);
604 			}
605 			sx_xunlock(&proctree_lock);
606 			if (oldvp != NULL)
607 				vrele(oldvp);
608 		} else
609 			PROC_UNLOCK(p);
610 	}
611 	/*
612 	 * We do not want to really close the device if it
613 	 * is still in use unless we are trying to close it
614 	 * forcibly. Since every use (buffer, vnode, swap, cmap)
615 	 * holds a reference to the vnode, and because we mark
616 	 * any other vnodes that alias this device, when the
617 	 * sum of the reference counts on all the aliased
618 	 * vnodes descends to one, we are on last close.
619 	 */
620 	dsw = dev_refthread(dev, &ref);
621 	if (dsw == NULL)
622 		return (ENXIO);
623 	VI_LOCK(vp);
624 	if (vp->v_iflag & VI_DOOMED) {
625 		/* Forced close. */
626 	} else if (dsw->d_flags & D_TRACKCLOSE) {
627 		/* Keep device updated on status. */
628 	} else if (count_dev(dev) > 1) {
629 		VI_UNLOCK(vp);
630 		dev_relthread(dev, ref);
631 		return (0);
632 	}
633 	vholdl(vp);
634 	VI_UNLOCK(vp);
635 	vp_locked = VOP_ISLOCKED(vp);
636 	VOP_UNLOCK(vp, 0);
637 	KASSERT(dev->si_refcount > 0,
638 	    ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));
639 	error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td);
640 	dev_relthread(dev, ref);
641 	vn_lock(vp, vp_locked | LK_RETRY);
642 	vdrop(vp);
643 	return (error);
644 }
645 
646 static int
647 devfs_close_f(struct file *fp, struct thread *td)
648 {
649 	int error;
650 	struct file *fpop;
651 
652 	/*
653 	 * NB: td may be NULL if this descriptor is closed due to
654 	 * garbage collection from a closed UNIX domain socket.
655 	 */
656 	fpop = curthread->td_fpop;
657 	curthread->td_fpop = fp;
658 	error = vnops.fo_close(fp, td);
659 	curthread->td_fpop = fpop;
660 
661 	/*
662 	 * The f_cdevpriv cannot be assigned non-NULL value while we
663 	 * are destroying the file.
664 	 */
665 	if (fp->f_cdevpriv != NULL)
666 		devfs_fpdrop(fp);
667 	return (error);
668 }
669 
670 static int
671 devfs_fsync(struct vop_fsync_args *ap)
672 {
673 	int error;
674 	struct bufobj *bo;
675 	struct devfs_dirent *de;
676 
677 	if (!vn_isdisk(ap->a_vp, &error)) {
678 		bo = &ap->a_vp->v_bufobj;
679 		de = ap->a_vp->v_data;
680 		if (error == ENXIO && bo->bo_dirty.bv_cnt > 0) {
681 			printf("Device %s went missing before all of the data "
682 			    "could be written to it; expect data loss.\n",
683 			    de->de_dirent->d_name);
684 
685 			error = vop_stdfsync(ap);
686 			if (bo->bo_dirty.bv_cnt != 0 || error != 0)
687 				panic("devfs_fsync: vop_stdfsync failed.");
688 		}
689 
690 		return (0);
691 	}
692 
693 	return (vop_stdfsync(ap));
694 }
695 
696 static int
697 devfs_getattr(struct vop_getattr_args *ap)
698 {
699 	struct vnode *vp = ap->a_vp;
700 	struct vattr *vap = ap->a_vap;
701 	int error;
702 	struct devfs_dirent *de;
703 	struct devfs_mount *dmp;
704 	struct cdev *dev;
705 
706 	error = devfs_populate_vp(vp);
707 	if (error != 0)
708 		return (error);
709 
710 	dmp = VFSTODEVFS(vp->v_mount);
711 	sx_xunlock(&dmp->dm_lock);
712 
713 	de = vp->v_data;
714 	KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp));
715 	if (vp->v_type == VDIR) {
716 		de = de->de_dir;
717 		KASSERT(de != NULL,
718 		    ("Null dir dirent in devfs_getattr vp=%p", vp));
719 	}
720 	vap->va_uid = de->de_uid;
721 	vap->va_gid = de->de_gid;
722 	vap->va_mode = de->de_mode;
723 	if (vp->v_type == VLNK)
724 		vap->va_size = strlen(de->de_symlink);
725 	else if (vp->v_type == VDIR)
726 		vap->va_size = vap->va_bytes = DEV_BSIZE;
727 	else
728 		vap->va_size = 0;
729 	if (vp->v_type != VDIR)
730 		vap->va_bytes = 0;
731 	vap->va_blocksize = DEV_BSIZE;
732 	vap->va_type = vp->v_type;
733 
734 #define fix(aa)							\
735 	do {							\
736 		if ((aa).tv_sec <= 3600) {			\
737 			(aa).tv_sec = boottime.tv_sec;		\
738 			(aa).tv_nsec = boottime.tv_usec * 1000; \
739 		}						\
740 	} while (0)
741 
742 	if (vp->v_type != VCHR)  {
743 		fix(de->de_atime);
744 		vap->va_atime = de->de_atime;
745 		fix(de->de_mtime);
746 		vap->va_mtime = de->de_mtime;
747 		fix(de->de_ctime);
748 		vap->va_ctime = de->de_ctime;
749 	} else {
750 		dev = vp->v_rdev;
751 		fix(dev->si_atime);
752 		vap->va_atime = dev->si_atime;
753 		fix(dev->si_mtime);
754 		vap->va_mtime = dev->si_mtime;
755 		fix(dev->si_ctime);
756 		vap->va_ctime = dev->si_ctime;
757 
758 		vap->va_rdev = cdev2priv(dev)->cdp_inode;
759 	}
760 	vap->va_gen = 0;
761 	vap->va_flags = 0;
762 	vap->va_filerev = 0;
763 	vap->va_nlink = de->de_links;
764 	vap->va_fileid = de->de_inode;
765 
766 	return (error);
767 }
768 
769 /* ARGSUSED */
770 static int
771 devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td)
772 {
773 	struct cdev *dev;
774 	struct cdevsw *dsw;
775 	struct vnode *vp;
776 	struct vnode *vpold;
777 	int error, i, ref;
778 	const char *p;
779 	struct fiodgname_arg *fgn;
780 	struct file *fpop;
781 
782 	fpop = td->td_fpop;
783 	error = devfs_fp_check(fp, &dev, &dsw, &ref);
784 	if (error != 0) {
785 		error = vnops.fo_ioctl(fp, com, data, cred, td);
786 		return (error);
787 	}
788 
789 	if (com == FIODTYPE) {
790 		*(int *)data = dsw->d_flags & D_TYPEMASK;
791 		td->td_fpop = fpop;
792 		dev_relthread(dev, ref);
793 		return (0);
794 	} else if (com == FIODGNAME) {
795 		fgn = data;
796 		p = devtoname(dev);
797 		i = strlen(p) + 1;
798 		if (i > fgn->len)
799 			error = EINVAL;
800 		else
801 			error = copyout(p, fgn->buf, i);
802 		td->td_fpop = fpop;
803 		dev_relthread(dev, ref);
804 		return (error);
805 	}
806 	error = dsw->d_ioctl(dev, com, data, fp->f_flag, td);
807 	td->td_fpop = NULL;
808 	dev_relthread(dev, ref);
809 	if (error == ENOIOCTL)
810 		error = ENOTTY;
811 	if (error == 0 && com == TIOCSCTTY) {
812 		vp = fp->f_vnode;
813 
814 		/* Do nothing if reassigning same control tty */
815 		sx_slock(&proctree_lock);
816 		if (td->td_proc->p_session->s_ttyvp == vp) {
817 			sx_sunlock(&proctree_lock);
818 			return (0);
819 		}
820 
821 		vpold = td->td_proc->p_session->s_ttyvp;
822 		VREF(vp);
823 		SESS_LOCK(td->td_proc->p_session);
824 		td->td_proc->p_session->s_ttyvp = vp;
825 		td->td_proc->p_session->s_ttydp = cdev2priv(dev);
826 		SESS_UNLOCK(td->td_proc->p_session);
827 
828 		sx_sunlock(&proctree_lock);
829 
830 		/* Get rid of reference to old control tty */
831 		if (vpold)
832 			vrele(vpold);
833 	}
834 	return (error);
835 }
836 
837 /* ARGSUSED */
838 static int
839 devfs_kqfilter_f(struct file *fp, struct knote *kn)
840 {
841 	struct cdev *dev;
842 	struct cdevsw *dsw;
843 	int error, ref;
844 	struct file *fpop;
845 	struct thread *td;
846 
847 	td = curthread;
848 	fpop = td->td_fpop;
849 	error = devfs_fp_check(fp, &dev, &dsw, &ref);
850 	if (error)
851 		return (error);
852 	error = dsw->d_kqfilter(dev, kn);
853 	td->td_fpop = fpop;
854 	dev_relthread(dev, ref);
855 	return (error);
856 }
857 
858 static inline int
859 devfs_prison_check(struct devfs_dirent *de, struct thread *td)
860 {
861 	struct cdev_priv *cdp;
862 	struct ucred *dcr;
863 	struct proc *p;
864 	int error;
865 
866 	cdp = de->de_cdp;
867 	if (cdp == NULL)
868 		return (0);
869 	dcr = cdp->cdp_c.si_cred;
870 	if (dcr == NULL)
871 		return (0);
872 
873 	error = prison_check(td->td_ucred, dcr);
874 	if (error == 0)
875 		return (0);
876 	/* We do, however, allow access to the controlling terminal */
877 	p = td->td_proc;
878 	PROC_LOCK(p);
879 	if (!(p->p_flag & P_CONTROLT)) {
880 		PROC_UNLOCK(p);
881 		return (error);
882 	}
883 	if (p->p_session->s_ttydp == cdp)
884 		error = 0;
885 	PROC_UNLOCK(p);
886 	return (error);
887 }
888 
889 static int
890 devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
891 {
892 	struct componentname *cnp;
893 	struct vnode *dvp, **vpp;
894 	struct thread *td;
895 	struct devfs_dirent *de, *dd;
896 	struct devfs_dirent **dde;
897 	struct devfs_mount *dmp;
898 	struct cdev *cdev;
899 	int error, flags, nameiop, dvplocked;
900 	char specname[SPECNAMELEN + 1], *pname;
901 
902 	cnp = ap->a_cnp;
903 	vpp = ap->a_vpp;
904 	dvp = ap->a_dvp;
905 	pname = cnp->cn_nameptr;
906 	td = cnp->cn_thread;
907 	flags = cnp->cn_flags;
908 	nameiop = cnp->cn_nameiop;
909 	dmp = VFSTODEVFS(dvp->v_mount);
910 	dd = dvp->v_data;
911 	*vpp = NULLVP;
912 
913 	if ((flags & ISLASTCN) && nameiop == RENAME)
914 		return (EOPNOTSUPP);
915 
916 	if (dvp->v_type != VDIR)
917 		return (ENOTDIR);
918 
919 	if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
920 		return (EIO);
921 
922 	error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
923 	if (error)
924 		return (error);
925 
926 	if (cnp->cn_namelen == 1 && *pname == '.') {
927 		if ((flags & ISLASTCN) && nameiop != LOOKUP)
928 			return (EINVAL);
929 		*vpp = dvp;
930 		VREF(dvp);
931 		return (0);
932 	}
933 
934 	if (flags & ISDOTDOT) {
935 		if ((flags & ISLASTCN) && nameiop != LOOKUP)
936 			return (EINVAL);
937 		de = devfs_parent_dirent(dd);
938 		if (de == NULL)
939 			return (ENOENT);
940 		dvplocked = VOP_ISLOCKED(dvp);
941 		VOP_UNLOCK(dvp, 0);
942 		error = devfs_allocv(de, dvp->v_mount,
943 		    cnp->cn_lkflags & LK_TYPE_MASK, vpp);
944 		*dm_unlock = 0;
945 		vn_lock(dvp, dvplocked | LK_RETRY);
946 		return (error);
947 	}
948 
949 	dd = dvp->v_data;
950 	de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen, 0);
951 	while (de == NULL) {	/* While(...) so we can use break */
952 
953 		if (nameiop == DELETE)
954 			return (ENOENT);
955 
956 		/*
957 		 * OK, we didn't have an entry for the name we were asked for
958 		 * so we try to see if anybody can create it on demand.
959 		 */
960 		pname = devfs_fqpn(specname, dmp, dd, cnp);
961 		if (pname == NULL)
962 			break;
963 
964 		cdev = NULL;
965 		DEVFS_DMP_HOLD(dmp);
966 		sx_xunlock(&dmp->dm_lock);
967 		sx_slock(&clone_drain_lock);
968 		EVENTHANDLER_INVOKE(dev_clone,
969 		    td->td_ucred, pname, strlen(pname), &cdev);
970 		sx_sunlock(&clone_drain_lock);
971 
972 		if (cdev == NULL)
973 			sx_xlock(&dmp->dm_lock);
974 		else if (devfs_populate_vp(dvp) != 0) {
975 			*dm_unlock = 0;
976 			sx_xlock(&dmp->dm_lock);
977 			if (DEVFS_DMP_DROP(dmp)) {
978 				sx_xunlock(&dmp->dm_lock);
979 				devfs_unmount_final(dmp);
980 			} else
981 				sx_xunlock(&dmp->dm_lock);
982 			dev_rel(cdev);
983 			return (ENOENT);
984 		}
985 		if (DEVFS_DMP_DROP(dmp)) {
986 			*dm_unlock = 0;
987 			sx_xunlock(&dmp->dm_lock);
988 			devfs_unmount_final(dmp);
989 			if (cdev != NULL)
990 				dev_rel(cdev);
991 			return (ENOENT);
992 		}
993 
994 		if (cdev == NULL)
995 			break;
996 
997 		dev_lock();
998 		dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx];
999 		if (dde != NULL && *dde != NULL)
1000 			de = *dde;
1001 		dev_unlock();
1002 		dev_rel(cdev);
1003 		break;
1004 	}
1005 
1006 	if (de == NULL || de->de_flags & DE_WHITEOUT) {
1007 		if ((nameiop == CREATE || nameiop == RENAME) &&
1008 		    (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) {
1009 			cnp->cn_flags |= SAVENAME;
1010 			return (EJUSTRETURN);
1011 		}
1012 		return (ENOENT);
1013 	}
1014 
1015 	if (devfs_prison_check(de, td))
1016 		return (ENOENT);
1017 
1018 	if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) {
1019 		error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
1020 		if (error)
1021 			return (error);
1022 		if (*vpp == dvp) {
1023 			VREF(dvp);
1024 			*vpp = dvp;
1025 			return (0);
1026 		}
1027 	}
1028 	error = devfs_allocv(de, dvp->v_mount, cnp->cn_lkflags & LK_TYPE_MASK,
1029 	    vpp);
1030 	*dm_unlock = 0;
1031 	return (error);
1032 }
1033 
1034 static int
1035 devfs_lookup(struct vop_lookup_args *ap)
1036 {
1037 	int j;
1038 	struct devfs_mount *dmp;
1039 	int dm_unlock;
1040 
1041 	if (devfs_populate_vp(ap->a_dvp) != 0)
1042 		return (ENOTDIR);
1043 
1044 	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
1045 	dm_unlock = 1;
1046 	j = devfs_lookupx(ap, &dm_unlock);
1047 	if (dm_unlock == 1)
1048 		sx_xunlock(&dmp->dm_lock);
1049 	return (j);
1050 }
1051 
1052 static int
1053 devfs_mknod(struct vop_mknod_args *ap)
1054 {
1055 	struct componentname *cnp;
1056 	struct vnode *dvp, **vpp;
1057 	struct devfs_dirent *dd, *de;
1058 	struct devfs_mount *dmp;
1059 	int error;
1060 
1061 	/*
1062 	 * The only type of node we should be creating here is a
1063 	 * character device, for anything else return EOPNOTSUPP.
1064 	 */
1065 	if (ap->a_vap->va_type != VCHR)
1066 		return (EOPNOTSUPP);
1067 	dvp = ap->a_dvp;
1068 	dmp = VFSTODEVFS(dvp->v_mount);
1069 
1070 	cnp = ap->a_cnp;
1071 	vpp = ap->a_vpp;
1072 	dd = dvp->v_data;
1073 
1074 	error = ENOENT;
1075 	sx_xlock(&dmp->dm_lock);
1076 	TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
1077 		if (cnp->cn_namelen != de->de_dirent->d_namlen)
1078 			continue;
1079 		if (de->de_dirent->d_type == DT_CHR &&
1080 		    (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0)
1081 			continue;
1082 		if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name,
1083 		    de->de_dirent->d_namlen) != 0)
1084 			continue;
1085 		if (de->de_flags & DE_WHITEOUT)
1086 			break;
1087 		goto notfound;
1088 	}
1089 	if (de == NULL)
1090 		goto notfound;
1091 	de->de_flags &= ~DE_WHITEOUT;
1092 	error = devfs_allocv(de, dvp->v_mount, LK_EXCLUSIVE, vpp);
1093 	return (error);
1094 notfound:
1095 	sx_xunlock(&dmp->dm_lock);
1096 	return (error);
1097 }
1098 
1099 /* ARGSUSED */
1100 static int
1101 devfs_open(struct vop_open_args *ap)
1102 {
1103 	struct thread *td = ap->a_td;
1104 	struct vnode *vp = ap->a_vp;
1105 	struct cdev *dev = vp->v_rdev;
1106 	struct file *fp = ap->a_fp;
1107 	int error, ref, vlocked;
1108 	struct cdevsw *dsw;
1109 	struct file *fpop;
1110 	struct mtx *mtxp;
1111 
1112 	if (vp->v_type == VBLK)
1113 		return (ENXIO);
1114 
1115 	if (dev == NULL)
1116 		return (ENXIO);
1117 
1118 	/* Make this field valid before any I/O in d_open. */
1119 	if (dev->si_iosize_max == 0)
1120 		dev->si_iosize_max = DFLTPHYS;
1121 
1122 	dsw = dev_refthread(dev, &ref);
1123 	if (dsw == NULL)
1124 		return (ENXIO);
1125 	if (fp == NULL && dsw->d_fdopen != NULL) {
1126 		dev_relthread(dev, ref);
1127 		return (ENXIO);
1128 	}
1129 
1130 	vlocked = VOP_ISLOCKED(vp);
1131 	VOP_UNLOCK(vp, 0);
1132 
1133 	fpop = td->td_fpop;
1134 	td->td_fpop = fp;
1135 	if (fp != NULL) {
1136 		fp->f_data = dev;
1137 		fp->f_vnode = vp;
1138 	}
1139 	if (dsw->d_fdopen != NULL)
1140 		error = dsw->d_fdopen(dev, ap->a_mode, td, fp);
1141 	else
1142 		error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td);
1143 	/* cleanup any cdevpriv upon error */
1144 	if (error != 0)
1145 		devfs_clear_cdevpriv();
1146 	td->td_fpop = fpop;
1147 
1148 	vn_lock(vp, vlocked | LK_RETRY);
1149 	dev_relthread(dev, ref);
1150 	if (error != 0) {
1151 		if (error == ERESTART)
1152 			error = EINTR;
1153 		return (error);
1154 	}
1155 
1156 #if 0	/* /dev/console */
1157 	KASSERT(fp != NULL, ("Could not vnode bypass device on NULL fp"));
1158 #else
1159 	if (fp == NULL)
1160 		return (error);
1161 #endif
1162 	if (fp->f_ops == &badfileops)
1163 		finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f);
1164 	mtxp = mtx_pool_find(mtxpool_sleep, fp);
1165 
1166 	/*
1167 	 * Hint to the dofilewrite() to not force the buffer draining
1168 	 * on the writer to the file.  Most likely, the write would
1169 	 * not need normal buffers.
1170 	 */
1171 	mtx_lock(mtxp);
1172 	fp->f_vnread_flags |= FDEVFS_VNODE;
1173 	mtx_unlock(mtxp);
1174 	return (error);
1175 }
1176 
1177 static int
1178 devfs_pathconf(struct vop_pathconf_args *ap)
1179 {
1180 
1181 	switch (ap->a_name) {
1182 	case _PC_MAC_PRESENT:
1183 #ifdef MAC
1184 		/*
1185 		 * If MAC is enabled, devfs automatically supports
1186 		 * trivial non-persistant label storage.
1187 		 */
1188 		*ap->a_retval = 1;
1189 #else
1190 		*ap->a_retval = 0;
1191 #endif
1192 		return (0);
1193 	default:
1194 		return (vop_stdpathconf(ap));
1195 	}
1196 	/* NOTREACHED */
1197 }
1198 
1199 /* ARGSUSED */
1200 static int
1201 devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td)
1202 {
1203 	struct cdev *dev;
1204 	struct cdevsw *dsw;
1205 	int error, ref;
1206 	struct file *fpop;
1207 
1208 	fpop = td->td_fpop;
1209 	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1210 	if (error != 0) {
1211 		error = vnops.fo_poll(fp, events, cred, td);
1212 		return (error);
1213 	}
1214 	error = dsw->d_poll(dev, events, td);
1215 	td->td_fpop = fpop;
1216 	dev_relthread(dev, ref);
1217 	return(error);
1218 }
1219 
1220 /*
1221  * Print out the contents of a special device vnode.
1222  */
1223 static int
1224 devfs_print(struct vop_print_args *ap)
1225 {
1226 
1227 	printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev));
1228 	return (0);
1229 }
1230 
1231 static int
1232 devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred,
1233     int flags, struct thread *td)
1234 {
1235 	struct cdev *dev;
1236 	int ioflag, error, ref;
1237 	ssize_t resid;
1238 	struct cdevsw *dsw;
1239 	struct file *fpop;
1240 
1241 	if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1242 		return (EINVAL);
1243 	fpop = td->td_fpop;
1244 	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1245 	if (error != 0) {
1246 		error = vnops.fo_read(fp, uio, cred, flags, td);
1247 		return (error);
1248 	}
1249 	resid = uio->uio_resid;
1250 	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT);
1251 	if (ioflag & O_DIRECT)
1252 		ioflag |= IO_DIRECT;
1253 
1254 	foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
1255 	error = dsw->d_read(dev, uio, ioflag);
1256 	if (uio->uio_resid != resid || (error == 0 && resid != 0))
1257 		devfs_timestamp(&dev->si_atime);
1258 	td->td_fpop = fpop;
1259 	dev_relthread(dev, ref);
1260 
1261 	foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
1262 	return (error);
1263 }
1264 
1265 static int
1266 devfs_readdir(struct vop_readdir_args *ap)
1267 {
1268 	int error;
1269 	struct uio *uio;
1270 	struct dirent *dp;
1271 	struct devfs_dirent *dd;
1272 	struct devfs_dirent *de;
1273 	struct devfs_mount *dmp;
1274 	off_t off;
1275 	int *tmp_ncookies = NULL;
1276 
1277 	if (ap->a_vp->v_type != VDIR)
1278 		return (ENOTDIR);
1279 
1280 	uio = ap->a_uio;
1281 	if (uio->uio_offset < 0)
1282 		return (EINVAL);
1283 
1284 	/*
1285 	 * XXX: This is a temporary hack to get around this filesystem not
1286 	 * supporting cookies. We store the location of the ncookies pointer
1287 	 * in a temporary variable before calling vfs_subr.c:vfs_read_dirent()
1288 	 * and set the number of cookies to 0. We then set the pointer to
1289 	 * NULL so that vfs_read_dirent doesn't try to call realloc() on
1290 	 * ap->a_cookies. Later in this function, we restore the ap->a_ncookies
1291 	 * pointer to its original location before returning to the caller.
1292 	 */
1293 	if (ap->a_ncookies != NULL) {
1294 		tmp_ncookies = ap->a_ncookies;
1295 		*ap->a_ncookies = 0;
1296 		ap->a_ncookies = NULL;
1297 	}
1298 
1299 	dmp = VFSTODEVFS(ap->a_vp->v_mount);
1300 	if (devfs_populate_vp(ap->a_vp) != 0) {
1301 		if (tmp_ncookies != NULL)
1302 			ap->a_ncookies = tmp_ncookies;
1303 		return (EIO);
1304 	}
1305 	error = 0;
1306 	de = ap->a_vp->v_data;
1307 	off = 0;
1308 	TAILQ_FOREACH(dd, &de->de_dlist, de_list) {
1309 		KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__));
1310 		if (dd->de_flags & (DE_COVERED | DE_WHITEOUT))
1311 			continue;
1312 		if (devfs_prison_check(dd, uio->uio_td))
1313 			continue;
1314 		if (dd->de_dirent->d_type == DT_DIR)
1315 			de = dd->de_dir;
1316 		else
1317 			de = dd;
1318 		dp = dd->de_dirent;
1319 		if (dp->d_reclen > uio->uio_resid)
1320 			break;
1321 		dp->d_fileno = de->de_inode;
1322 		if (off >= uio->uio_offset) {
1323 			error = vfs_read_dirent(ap, dp, off);
1324 			if (error)
1325 				break;
1326 		}
1327 		off += dp->d_reclen;
1328 	}
1329 	sx_xunlock(&dmp->dm_lock);
1330 	uio->uio_offset = off;
1331 
1332 	/*
1333 	 * Restore ap->a_ncookies if it wasn't originally NULL in the first
1334 	 * place.
1335 	 */
1336 	if (tmp_ncookies != NULL)
1337 		ap->a_ncookies = tmp_ncookies;
1338 
1339 	return (error);
1340 }
1341 
1342 static int
1343 devfs_readlink(struct vop_readlink_args *ap)
1344 {
1345 	struct devfs_dirent *de;
1346 
1347 	de = ap->a_vp->v_data;
1348 	return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio));
1349 }
1350 
1351 static int
1352 devfs_reclaim(struct vop_reclaim_args *ap)
1353 {
1354 	struct vnode *vp = ap->a_vp;
1355 	struct devfs_dirent *de;
1356 	struct cdev *dev;
1357 
1358 	mtx_lock(&devfs_de_interlock);
1359 	de = vp->v_data;
1360 	if (de != NULL) {
1361 		de->de_vnode = NULL;
1362 		vp->v_data = NULL;
1363 	}
1364 	mtx_unlock(&devfs_de_interlock);
1365 
1366 	vnode_destroy_vobject(vp);
1367 
1368 	VI_LOCK(vp);
1369 	dev_lock();
1370 	dev = vp->v_rdev;
1371 	vp->v_rdev = NULL;
1372 
1373 	if (dev == NULL) {
1374 		dev_unlock();
1375 		VI_UNLOCK(vp);
1376 		return (0);
1377 	}
1378 
1379 	dev->si_usecount -= vp->v_usecount;
1380 	dev_unlock();
1381 	VI_UNLOCK(vp);
1382 	dev_rel(dev);
1383 	return (0);
1384 }
1385 
1386 static int
1387 devfs_remove(struct vop_remove_args *ap)
1388 {
1389 	struct vnode *dvp = ap->a_dvp;
1390 	struct vnode *vp = ap->a_vp;
1391 	struct devfs_dirent *dd;
1392 	struct devfs_dirent *de, *de_covered;
1393 	struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount);
1394 
1395 	ASSERT_VOP_ELOCKED(dvp, "devfs_remove");
1396 	ASSERT_VOP_ELOCKED(vp, "devfs_remove");
1397 
1398 	sx_xlock(&dmp->dm_lock);
1399 	dd = ap->a_dvp->v_data;
1400 	de = vp->v_data;
1401 	if (de->de_cdp == NULL) {
1402 		TAILQ_REMOVE(&dd->de_dlist, de, de_list);
1403 		if (de->de_dirent->d_type == DT_LNK) {
1404 			de_covered = devfs_find(dd, de->de_dirent->d_name,
1405 			    de->de_dirent->d_namlen, 0);
1406 			if (de_covered != NULL)
1407 				de_covered->de_flags &= ~DE_COVERED;
1408 		}
1409 		/* We need to unlock dvp because devfs_delete() may lock it. */
1410 		VOP_UNLOCK(vp, 0);
1411 		if (dvp != vp)
1412 			VOP_UNLOCK(dvp, 0);
1413 		devfs_delete(dmp, de, 0);
1414 		sx_xunlock(&dmp->dm_lock);
1415 		if (dvp != vp)
1416 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1417 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1418 	} else {
1419 		de->de_flags |= DE_WHITEOUT;
1420 		sx_xunlock(&dmp->dm_lock);
1421 	}
1422 	return (0);
1423 }
1424 
1425 /*
1426  * Revoke is called on a tty when a terminal session ends.  The vnode
1427  * is orphaned by setting v_op to deadfs so we need to let go of it
1428  * as well so that we create a new one next time around.
1429  *
1430  */
1431 static int
1432 devfs_revoke(struct vop_revoke_args *ap)
1433 {
1434 	struct vnode *vp = ap->a_vp, *vp2;
1435 	struct cdev *dev;
1436 	struct cdev_priv *cdp;
1437 	struct devfs_dirent *de;
1438 	int i;
1439 
1440 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL"));
1441 
1442 	dev = vp->v_rdev;
1443 	cdp = cdev2priv(dev);
1444 
1445 	dev_lock();
1446 	cdp->cdp_inuse++;
1447 	dev_unlock();
1448 
1449 	vhold(vp);
1450 	vgone(vp);
1451 	vdrop(vp);
1452 
1453 	VOP_UNLOCK(vp,0);
1454  loop:
1455 	for (;;) {
1456 		mtx_lock(&devfs_de_interlock);
1457 		dev_lock();
1458 		vp2 = NULL;
1459 		for (i = 0; i <= cdp->cdp_maxdirent; i++) {
1460 			de = cdp->cdp_dirents[i];
1461 			if (de == NULL)
1462 				continue;
1463 
1464 			vp2 = de->de_vnode;
1465 			if (vp2 != NULL) {
1466 				dev_unlock();
1467 				VI_LOCK(vp2);
1468 				mtx_unlock(&devfs_de_interlock);
1469 				if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK,
1470 				    curthread))
1471 					goto loop;
1472 				vhold(vp2);
1473 				vgone(vp2);
1474 				vdrop(vp2);
1475 				vput(vp2);
1476 				break;
1477 			}
1478 		}
1479 		if (vp2 != NULL) {
1480 			continue;
1481 		}
1482 		dev_unlock();
1483 		mtx_unlock(&devfs_de_interlock);
1484 		break;
1485 	}
1486 	dev_lock();
1487 	cdp->cdp_inuse--;
1488 	if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) {
1489 		TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
1490 		dev_unlock();
1491 		dev_rel(&cdp->cdp_c);
1492 	} else
1493 		dev_unlock();
1494 
1495 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1496 	return (0);
1497 }
1498 
1499 static int
1500 devfs_rioctl(struct vop_ioctl_args *ap)
1501 {
1502 	struct vnode *vp;
1503 	struct devfs_mount *dmp;
1504 	int error;
1505 
1506 	vp = ap->a_vp;
1507 	vn_lock(vp, LK_SHARED | LK_RETRY);
1508 	if (vp->v_iflag & VI_DOOMED) {
1509 		VOP_UNLOCK(vp, 0);
1510 		return (EBADF);
1511 	}
1512 	dmp = VFSTODEVFS(vp->v_mount);
1513 	sx_xlock(&dmp->dm_lock);
1514 	VOP_UNLOCK(vp, 0);
1515 	DEVFS_DMP_HOLD(dmp);
1516 	devfs_populate(dmp);
1517 	if (DEVFS_DMP_DROP(dmp)) {
1518 		sx_xunlock(&dmp->dm_lock);
1519 		devfs_unmount_final(dmp);
1520 		return (ENOENT);
1521 	}
1522 	error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td);
1523 	sx_xunlock(&dmp->dm_lock);
1524 	return (error);
1525 }
1526 
1527 static int
1528 devfs_rread(struct vop_read_args *ap)
1529 {
1530 
1531 	if (ap->a_vp->v_type != VDIR)
1532 		return (EINVAL);
1533 	return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL));
1534 }
1535 
1536 static int
1537 devfs_setattr(struct vop_setattr_args *ap)
1538 {
1539 	struct devfs_dirent *de;
1540 	struct vattr *vap;
1541 	struct vnode *vp;
1542 	struct thread *td;
1543 	int c, error;
1544 	uid_t uid;
1545 	gid_t gid;
1546 
1547 	vap = ap->a_vap;
1548 	vp = ap->a_vp;
1549 	td = curthread;
1550 	if ((vap->va_type != VNON) ||
1551 	    (vap->va_nlink != VNOVAL) ||
1552 	    (vap->va_fsid != VNOVAL) ||
1553 	    (vap->va_fileid != VNOVAL) ||
1554 	    (vap->va_blocksize != VNOVAL) ||
1555 	    (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1556 	    (vap->va_rdev != VNOVAL) ||
1557 	    ((int)vap->va_bytes != VNOVAL) ||
1558 	    (vap->va_gen != VNOVAL)) {
1559 		return (EINVAL);
1560 	}
1561 
1562 	de = vp->v_data;
1563 	if (vp->v_type == VDIR)
1564 		de = de->de_dir;
1565 
1566 	error = c = 0;
1567 	if (vap->va_uid == (uid_t)VNOVAL)
1568 		uid = de->de_uid;
1569 	else
1570 		uid = vap->va_uid;
1571 	if (vap->va_gid == (gid_t)VNOVAL)
1572 		gid = de->de_gid;
1573 	else
1574 		gid = vap->va_gid;
1575 	if (uid != de->de_uid || gid != de->de_gid) {
1576 		if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid ||
1577 		    (gid != de->de_gid && !groupmember(gid, ap->a_cred))) {
1578 			error = priv_check(td, PRIV_VFS_CHOWN);
1579 			if (error)
1580 				return (error);
1581 		}
1582 		de->de_uid = uid;
1583 		de->de_gid = gid;
1584 		c = 1;
1585 	}
1586 
1587 	if (vap->va_mode != (mode_t)VNOVAL) {
1588 		if (ap->a_cred->cr_uid != de->de_uid) {
1589 			error = priv_check(td, PRIV_VFS_ADMIN);
1590 			if (error)
1591 				return (error);
1592 		}
1593 		de->de_mode = vap->va_mode;
1594 		c = 1;
1595 	}
1596 
1597 	if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1598 		error = vn_utimes_perm(vp, vap, ap->a_cred, td);
1599 		if (error != 0)
1600 			return (error);
1601 		if (vap->va_atime.tv_sec != VNOVAL) {
1602 			if (vp->v_type == VCHR)
1603 				vp->v_rdev->si_atime = vap->va_atime;
1604 			else
1605 				de->de_atime = vap->va_atime;
1606 		}
1607 		if (vap->va_mtime.tv_sec != VNOVAL) {
1608 			if (vp->v_type == VCHR)
1609 				vp->v_rdev->si_mtime = vap->va_mtime;
1610 			else
1611 				de->de_mtime = vap->va_mtime;
1612 		}
1613 		c = 1;
1614 	}
1615 
1616 	if (c) {
1617 		if (vp->v_type == VCHR)
1618 			vfs_timestamp(&vp->v_rdev->si_ctime);
1619 		else
1620 			vfs_timestamp(&de->de_mtime);
1621 	}
1622 	return (0);
1623 }
1624 
1625 #ifdef MAC
1626 static int
1627 devfs_setlabel(struct vop_setlabel_args *ap)
1628 {
1629 	struct vnode *vp;
1630 	struct devfs_dirent *de;
1631 
1632 	vp = ap->a_vp;
1633 	de = vp->v_data;
1634 
1635 	mac_vnode_relabel(ap->a_cred, vp, ap->a_label);
1636 	mac_devfs_update(vp->v_mount, de, vp);
1637 
1638 	return (0);
1639 }
1640 #endif
1641 
1642 static int
1643 devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td)
1644 {
1645 
1646 	return (vnops.fo_stat(fp, sb, cred, td));
1647 }
1648 
1649 static int
1650 devfs_symlink(struct vop_symlink_args *ap)
1651 {
1652 	int i, error;
1653 	struct devfs_dirent *dd;
1654 	struct devfs_dirent *de, *de_covered, *de_dotdot;
1655 	struct devfs_mount *dmp;
1656 
1657 	error = priv_check(curthread, PRIV_DEVFS_SYMLINK);
1658 	if (error)
1659 		return(error);
1660 	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
1661 	if (devfs_populate_vp(ap->a_dvp) != 0)
1662 		return (ENOENT);
1663 
1664 	dd = ap->a_dvp->v_data;
1665 	de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen);
1666 	de->de_flags = DE_USER;
1667 	de->de_uid = 0;
1668 	de->de_gid = 0;
1669 	de->de_mode = 0755;
1670 	de->de_inode = alloc_unr(devfs_inos);
1671 	de->de_dir = dd;
1672 	de->de_dirent->d_type = DT_LNK;
1673 	i = strlen(ap->a_target) + 1;
1674 	de->de_symlink = malloc(i, M_DEVFS, M_WAITOK);
1675 	bcopy(ap->a_target, de->de_symlink, i);
1676 #ifdef MAC
1677 	mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de);
1678 #endif
1679 	de_covered = devfs_find(dd, de->de_dirent->d_name,
1680 	    de->de_dirent->d_namlen, 0);
1681 	if (de_covered != NULL) {
1682 		if ((de_covered->de_flags & DE_USER) != 0) {
1683 			devfs_delete(dmp, de, DEVFS_DEL_NORECURSE);
1684 			sx_xunlock(&dmp->dm_lock);
1685 			return (EEXIST);
1686 		}
1687 		KASSERT((de_covered->de_flags & DE_COVERED) == 0,
1688 		    ("devfs_symlink: entry %p already covered", de_covered));
1689 		de_covered->de_flags |= DE_COVERED;
1690 	}
1691 
1692 	de_dotdot = TAILQ_FIRST(&dd->de_dlist);		/* "." */
1693 	de_dotdot = TAILQ_NEXT(de_dotdot, de_list);	/* ".." */
1694 	TAILQ_INSERT_AFTER(&dd->de_dlist, de_dotdot, de, de_list);
1695 	devfs_dir_ref_de(dmp, dd);
1696 	devfs_rules_apply(dmp, de);
1697 
1698 	return (devfs_allocv(de, ap->a_dvp->v_mount, LK_EXCLUSIVE, ap->a_vpp));
1699 }
1700 
1701 static int
1702 devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td)
1703 {
1704 
1705 	return (vnops.fo_truncate(fp, length, cred, td));
1706 }
1707 
1708 static int
1709 devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred,
1710     int flags, struct thread *td)
1711 {
1712 	struct cdev *dev;
1713 	int error, ioflag, ref;
1714 	ssize_t resid;
1715 	struct cdevsw *dsw;
1716 	struct file *fpop;
1717 
1718 	if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1719 		return (EINVAL);
1720 	fpop = td->td_fpop;
1721 	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1722 	if (error != 0) {
1723 		error = vnops.fo_write(fp, uio, cred, flags, td);
1724 		return (error);
1725 	}
1726 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
1727 	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC);
1728 	if (ioflag & O_DIRECT)
1729 		ioflag |= IO_DIRECT;
1730 	foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
1731 
1732 	resid = uio->uio_resid;
1733 
1734 	error = dsw->d_write(dev, uio, ioflag);
1735 	if (uio->uio_resid != resid || (error == 0 && resid != 0)) {
1736 		devfs_timestamp(&dev->si_ctime);
1737 		dev->si_mtime = dev->si_ctime;
1738 	}
1739 	td->td_fpop = fpop;
1740 	dev_relthread(dev, ref);
1741 
1742 	foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
1743 	return (error);
1744 }
1745 
1746 static int
1747 devfs_mmap_f(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
1748     vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
1749     struct thread *td)
1750 {
1751 	struct cdev *dev;
1752 	struct cdevsw *dsw;
1753 	struct mount *mp;
1754 	struct vnode *vp;
1755 	struct file *fpop;
1756 	vm_object_t object;
1757 	vm_prot_t maxprot;
1758 	int error, ref;
1759 
1760 	vp = fp->f_vnode;
1761 
1762 	/*
1763 	 * Ensure that file and memory protections are
1764 	 * compatible.
1765 	 */
1766 	mp = vp->v_mount;
1767 	if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0)
1768 		maxprot = VM_PROT_NONE;
1769 	else
1770 		maxprot = VM_PROT_EXECUTE;
1771 	if ((fp->f_flag & FREAD) != 0)
1772 		maxprot |= VM_PROT_READ;
1773 	else if ((prot & VM_PROT_READ) != 0)
1774 		return (EACCES);
1775 
1776 	/*
1777 	 * If we are sharing potential changes via MAP_SHARED and we
1778 	 * are trying to get write permission although we opened it
1779 	 * without asking for it, bail out.
1780 	 *
1781 	 * Note that most character devices always share mappings.
1782 	 * The one exception is that D_MMAP_ANON devices
1783 	 * (i.e. /dev/zero) permit private writable mappings.
1784 	 *
1785 	 * Rely on vm_mmap_cdev() to fail invalid MAP_PRIVATE requests
1786 	 * as well as updating maxprot to permit writing for
1787 	 * D_MMAP_ANON devices rather than doing that here.
1788 	 */
1789 	if ((flags & MAP_SHARED) != 0) {
1790 		if ((fp->f_flag & FWRITE) != 0)
1791 			maxprot |= VM_PROT_WRITE;
1792 		else if ((prot & VM_PROT_WRITE) != 0)
1793 			return (EACCES);
1794 	}
1795 	maxprot &= cap_maxprot;
1796 
1797 	fpop = td->td_fpop;
1798 	error = devfs_fp_check(fp, &dev, &dsw, &ref);
1799 	if (error != 0)
1800 		return (error);
1801 
1802 	error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, dev, dsw, &foff,
1803 	    &object);
1804 	td->td_fpop = fpop;
1805 	dev_relthread(dev, ref);
1806 	if (error != 0)
1807 		return (error);
1808 
1809 	error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1810 	    foff, FALSE, td);
1811 	if (error != 0)
1812 		vm_object_deallocate(object);
1813 	return (error);
1814 }
1815 
1816 dev_t
1817 dev2udev(struct cdev *x)
1818 {
1819 	if (x == NULL)
1820 		return (NODEV);
1821 	return (cdev2priv(x)->cdp_inode);
1822 }
1823 
1824 static struct fileops devfs_ops_f = {
1825 	.fo_read =	devfs_read_f,
1826 	.fo_write =	devfs_write_f,
1827 	.fo_truncate =	devfs_truncate_f,
1828 	.fo_ioctl =	devfs_ioctl_f,
1829 	.fo_poll =	devfs_poll_f,
1830 	.fo_kqfilter =	devfs_kqfilter_f,
1831 	.fo_stat =	devfs_stat_f,
1832 	.fo_close =	devfs_close_f,
1833 	.fo_chmod =	vn_chmod,
1834 	.fo_chown =	vn_chown,
1835 	.fo_sendfile =	vn_sendfile,
1836 	.fo_seek =	vn_seek,
1837 	.fo_fill_kinfo = vn_fill_kinfo,
1838 	.fo_mmap =	devfs_mmap_f,
1839 	.fo_flags =	DFLAG_PASSABLE | DFLAG_SEEKABLE
1840 };
1841 
1842 static struct vop_vector devfs_vnodeops = {
1843 	.vop_default =		&default_vnodeops,
1844 
1845 	.vop_access =		devfs_access,
1846 	.vop_getattr =		devfs_getattr,
1847 	.vop_ioctl =		devfs_rioctl,
1848 	.vop_lookup =		devfs_lookup,
1849 	.vop_mknod =		devfs_mknod,
1850 	.vop_pathconf =		devfs_pathconf,
1851 	.vop_read =		devfs_rread,
1852 	.vop_readdir =		devfs_readdir,
1853 	.vop_readlink =		devfs_readlink,
1854 	.vop_reclaim =		devfs_reclaim,
1855 	.vop_remove =		devfs_remove,
1856 	.vop_revoke =		devfs_revoke,
1857 	.vop_setattr =		devfs_setattr,
1858 #ifdef MAC
1859 	.vop_setlabel =		devfs_setlabel,
1860 #endif
1861 	.vop_symlink =		devfs_symlink,
1862 	.vop_vptocnp =		devfs_vptocnp,
1863 };
1864 
1865 struct vop_vector devfs_specops = {
1866 	.vop_default =		&default_vnodeops,
1867 
1868 	.vop_access =		devfs_access,
1869 	.vop_bmap =		VOP_PANIC,
1870 	.vop_close =		devfs_close,
1871 	.vop_create =		VOP_PANIC,
1872 	.vop_fsync =		devfs_fsync,
1873 	.vop_getattr =		devfs_getattr,
1874 	.vop_link =		VOP_PANIC,
1875 	.vop_mkdir =		VOP_PANIC,
1876 	.vop_mknod =		VOP_PANIC,
1877 	.vop_open =		devfs_open,
1878 	.vop_pathconf =		devfs_pathconf,
1879 	.vop_poll =		dead_poll,
1880 	.vop_print =		devfs_print,
1881 	.vop_read =		dead_read,
1882 	.vop_readdir =		VOP_PANIC,
1883 	.vop_readlink =		VOP_PANIC,
1884 	.vop_reallocblks =	VOP_PANIC,
1885 	.vop_reclaim =		devfs_reclaim,
1886 	.vop_remove =		devfs_remove,
1887 	.vop_rename =		VOP_PANIC,
1888 	.vop_revoke =		devfs_revoke,
1889 	.vop_rmdir =		VOP_PANIC,
1890 	.vop_setattr =		devfs_setattr,
1891 #ifdef MAC
1892 	.vop_setlabel =		devfs_setlabel,
1893 #endif
1894 	.vop_strategy =		VOP_PANIC,
1895 	.vop_symlink =		VOP_PANIC,
1896 	.vop_vptocnp =		devfs_vptocnp,
1897 	.vop_write =		dead_write,
1898 };
1899 
1900 /*
1901  * Our calling convention to the device drivers used to be that we passed
1902  * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_
1903  * flags instead since that's what open(), close() and ioctl() takes and
1904  * we don't really want vnode.h in device drivers.
1905  * We solved the source compatibility by redefining some vnode flags to
1906  * be the same as the fcntl ones and by sending down the bitwise OR of
1907  * the respective fcntl/vnode flags.  These CTASSERTS make sure nobody
1908  * pulls the rug out under this.
1909  */
1910 CTASSERT(O_NONBLOCK == IO_NDELAY);
1911 CTASSERT(O_FSYNC == IO_SYNC);
1912