xref: /freebsd/sys/fs/devfs/devfs_vnops.c (revision f7c4bd95ba735bd6a5454b4953945a99cefbb80c)
1 /*-
2  * Copyright (c) 2000-2004
3  *	Poul-Henning Kamp.  All rights reserved.
4  * Copyright (c) 1989, 1992-1993, 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software donated to Berkeley by
8  * Jan-Simon Pendry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)kernfs_vnops.c	8.15 (Berkeley) 5/21/95
32  * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43
33  *
34  * $FreeBSD$
35  */
36 
37 /*
38  * TODO:
39  *	remove empty directories
40  *	mkdir: want it ?
41  */
42 
43 #include "opt_mac.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/conf.h>
48 #include <sys/dirent.h>
49 #include <sys/fcntl.h>
50 #include <sys/file.h>
51 #include <sys/filedesc.h>
52 #include <sys/filio.h>
53 #include <sys/kernel.h>
54 #include <sys/lock.h>
55 #include <sys/malloc.h>
56 #include <sys/mount.h>
57 #include <sys/namei.h>
58 #include <sys/priv.h>
59 #include <sys/proc.h>
60 #include <sys/stat.h>
61 #include <sys/sx.h>
62 #include <sys/time.h>
63 #include <sys/ttycom.h>
64 #include <sys/unistd.h>
65 #include <sys/vnode.h>
66 
67 static struct vop_vector devfs_vnodeops;
68 static struct vop_vector devfs_specops;
69 static struct fileops devfs_ops_f;
70 
71 #include <fs/devfs/devfs.h>
72 #include <fs/devfs/devfs_int.h>
73 
74 #include <security/mac/mac_framework.h>
75 
76 static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data");
77 
78 struct mtx	devfs_de_interlock;
79 MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF);
80 struct sx	clone_drain_lock;
81 SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock");
82 struct mtx	cdevpriv_mtx;
83 MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF);
84 
85 static int
86 devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp)
87 {
88 
89 	*dswp = devvn_refthread(fp->f_vnode, devp);
90 	if (*devp != fp->f_data) {
91 		if (*dswp != NULL)
92 			dev_relthread(*devp);
93 		return (ENXIO);
94 	}
95 	KASSERT((*devp)->si_refcount > 0,
96 	    ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp)));
97 	if (*dswp == NULL)
98 		return (ENXIO);
99 	curthread->td_fpop = fp;
100 	return (0);
101 }
102 
103 int
104 devfs_get_cdevpriv(void **datap)
105 {
106 	struct file *fp;
107 	struct cdev_privdata *p;
108 	int error;
109 
110 	fp = curthread->td_fpop;
111 	if (fp == NULL)
112 		return (EBADF);
113 	mtx_lock(&cdevpriv_mtx);
114 	p = fp->f_cdevpriv;
115 	mtx_unlock(&cdevpriv_mtx);
116 	if (p != NULL) {
117 		error = 0;
118 		*datap = p->cdpd_data;
119 	} else
120 		error = ENOENT;
121 	return (error);
122 }
123 
124 int
125 devfs_set_cdevpriv(void *priv, cdevpriv_dtr_t priv_dtr)
126 {
127 	struct file *fp;
128 	struct cdev_priv *cdp;
129 	struct cdev_privdata *p;
130 	int error;
131 
132 	fp = curthread->td_fpop;
133 	if (fp == NULL)
134 		return (ENOENT);
135 	cdp = cdev2priv((struct cdev *)fp->f_data);
136 	p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK);
137 	p->cdpd_data = priv;
138 	p->cdpd_dtr = priv_dtr;
139 	p->cdpd_fp = fp;
140 	mtx_lock(&cdevpriv_mtx);
141 	if (fp->f_cdevpriv == NULL) {
142 		LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list);
143 		fp->f_cdevpriv = p;
144 		mtx_unlock(&cdevpriv_mtx);
145 		error = 0;
146 	} else {
147 		mtx_unlock(&cdevpriv_mtx);
148 		free(p, M_CDEVPDATA);
149 		error = EBUSY;
150 	}
151 	return (error);
152 }
153 
154 void
155 devfs_destroy_cdevpriv(struct cdev_privdata *p)
156 {
157 
158 	mtx_assert(&cdevpriv_mtx, MA_OWNED);
159 	p->cdpd_fp->f_cdevpriv = NULL;
160 	LIST_REMOVE(p, cdpd_list);
161 	mtx_unlock(&cdevpriv_mtx);
162 	(p->cdpd_dtr)(p->cdpd_data);
163 	free(p, M_CDEVPDATA);
164 }
165 
166 void
167 devfs_fpdrop(struct file *fp)
168 {
169 	struct cdev_privdata *p;
170 
171 	mtx_lock(&cdevpriv_mtx);
172 	if ((p = fp->f_cdevpriv) == NULL) {
173 		mtx_unlock(&cdevpriv_mtx);
174 		return;
175 	}
176 	devfs_destroy_cdevpriv(p);
177 }
178 
179 void
180 devfs_clear_cdevpriv(void)
181 {
182 	struct file *fp;
183 
184 	fp = curthread->td_fpop;
185 	if (fp == NULL)
186 		return;
187 	devfs_fpdrop(fp);
188 }
189 
190 /*
191  * Construct the fully qualified path name relative to the mountpoint
192  */
193 static char *
194 devfs_fqpn(char *buf, struct vnode *dvp, struct componentname *cnp)
195 {
196 	int i;
197 	struct devfs_dirent *de, *dd;
198 	struct devfs_mount *dmp;
199 
200 	dmp = VFSTODEVFS(dvp->v_mount);
201 	dd = dvp->v_data;
202 	i = SPECNAMELEN;
203 	buf[i] = '\0';
204 	i -= cnp->cn_namelen;
205 	if (i < 0)
206 		 return (NULL);
207 	bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen);
208 	de = dd;
209 	while (de != dmp->dm_rootdir) {
210 		i--;
211 		if (i < 0)
212 			 return (NULL);
213 		buf[i] = '/';
214 		i -= de->de_dirent->d_namlen;
215 		if (i < 0)
216 			 return (NULL);
217 		bcopy(de->de_dirent->d_name, buf + i,
218 		    de->de_dirent->d_namlen);
219 		de = TAILQ_FIRST(&de->de_dlist);	/* "." */
220 		de = TAILQ_NEXT(de, de_list);		/* ".." */
221 		de = de->de_dir;
222 	}
223 	return (buf + i);
224 }
225 
226 static int
227 devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp,
228 	struct devfs_dirent *de)
229 {
230 	int not_found;
231 
232 	not_found = 0;
233 	if (de->de_flags & DE_DOOMED)
234 		not_found = 1;
235 	if (DEVFS_DE_DROP(de)) {
236 		KASSERT(not_found == 1, ("DEVFS de dropped but not doomed"));
237 		devfs_dirent_free(de);
238 	}
239 	if (DEVFS_DMP_DROP(dmp)) {
240 		KASSERT(not_found == 1,
241 			("DEVFS mount struct freed before dirent"));
242 		not_found = 2;
243 		sx_xunlock(&dmp->dm_lock);
244 		devfs_unmount_final(dmp);
245 	}
246 	if (not_found == 1 || (drop_dm_lock && not_found != 2))
247 		sx_unlock(&dmp->dm_lock);
248 	return (not_found);
249 }
250 
251 static void
252 devfs_insmntque_dtr(struct vnode *vp, void *arg)
253 {
254 	struct devfs_dirent *de;
255 
256 	de = (struct devfs_dirent *)arg;
257 	mtx_lock(&devfs_de_interlock);
258 	vp->v_data = NULL;
259 	de->de_vnode = NULL;
260 	mtx_unlock(&devfs_de_interlock);
261 	vgone(vp);
262 	vput(vp);
263 }
264 
265 /*
266  * devfs_allocv shall be entered with dmp->dm_lock held, and it drops
267  * it on return.
268  */
269 int
270 devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp, struct thread *td)
271 {
272 	int error;
273 	struct vnode *vp;
274 	struct cdev *dev;
275 	struct devfs_mount *dmp;
276 
277 	KASSERT(td == curthread, ("devfs_allocv: td != curthread"));
278 	dmp = VFSTODEVFS(mp);
279 	if (de->de_flags & DE_DOOMED) {
280 		sx_xunlock(&dmp->dm_lock);
281 		return (ENOENT);
282 	}
283 	DEVFS_DE_HOLD(de);
284 	DEVFS_DMP_HOLD(dmp);
285 	mtx_lock(&devfs_de_interlock);
286 	vp = de->de_vnode;
287 	if (vp != NULL) {
288 		VI_LOCK(vp);
289 		mtx_unlock(&devfs_de_interlock);
290 		sx_xunlock(&dmp->dm_lock);
291 		error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td);
292 		sx_xlock(&dmp->dm_lock);
293 		if (devfs_allocv_drop_refs(0, dmp, de)) {
294 			if (error == 0)
295 				vput(vp);
296 			return (ENOENT);
297 		}
298 		else if (error) {
299 			sx_xunlock(&dmp->dm_lock);
300 			return (error);
301 		}
302 		sx_xunlock(&dmp->dm_lock);
303 		*vpp = vp;
304 		return (0);
305 	}
306 	mtx_unlock(&devfs_de_interlock);
307 	if (de->de_dirent->d_type == DT_CHR) {
308 		if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) {
309 			devfs_allocv_drop_refs(1, dmp, de);
310 			return (ENOENT);
311 		}
312 		dev = &de->de_cdp->cdp_c;
313 	} else {
314 		dev = NULL;
315 	}
316 	error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp);
317 	if (error != 0) {
318 		devfs_allocv_drop_refs(1, dmp, de);
319 		printf("devfs_allocv: failed to allocate new vnode\n");
320 		return (error);
321 	}
322 
323 	if (de->de_dirent->d_type == DT_CHR) {
324 		vp->v_type = VCHR;
325 		VI_LOCK(vp);
326 		dev_lock();
327 		dev_refl(dev);
328 		/* XXX: v_rdev should be protect by vnode lock */
329 		vp->v_rdev = dev;
330 		KASSERT(vp->v_usecount == 1,
331 		    ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount));
332 		dev->si_usecount += vp->v_usecount;
333 		dev_unlock();
334 		VI_UNLOCK(vp);
335 		vp->v_op = &devfs_specops;
336 	} else if (de->de_dirent->d_type == DT_DIR) {
337 		vp->v_type = VDIR;
338 	} else if (de->de_dirent->d_type == DT_LNK) {
339 		vp->v_type = VLNK;
340 	} else {
341 		vp->v_type = VBAD;
342 	}
343 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS);
344 	mtx_lock(&devfs_de_interlock);
345 	vp->v_data = de;
346 	de->de_vnode = vp;
347 	mtx_unlock(&devfs_de_interlock);
348 	error = insmntque1(vp, mp, devfs_insmntque_dtr, de);
349 	if (error != 0) {
350 		(void) devfs_allocv_drop_refs(1, dmp, de);
351 		return (error);
352 	}
353 	if (devfs_allocv_drop_refs(0, dmp, de)) {
354 		vput(vp);
355 		return (ENOENT);
356 	}
357 #ifdef MAC
358 	mac_devfs_vnode_associate(mp, de, vp);
359 #endif
360 	sx_xunlock(&dmp->dm_lock);
361 	*vpp = vp;
362 	return (0);
363 }
364 
365 static int
366 devfs_access(struct vop_access_args *ap)
367 {
368 	struct vnode *vp = ap->a_vp;
369 	struct devfs_dirent *de;
370 	int error;
371 
372 	de = vp->v_data;
373 	if (vp->v_type == VDIR)
374 		de = de->de_dir;
375 
376 	error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid,
377 	    ap->a_mode, ap->a_cred, NULL);
378 	if (!error)
379 		return (error);
380 	if (error != EACCES)
381 		return (error);
382 	/* We do, however, allow access to the controlling terminal */
383 	if (!(ap->a_td->td_proc->p_flag & P_CONTROLT))
384 		return (error);
385 	if (ap->a_td->td_proc->p_session->s_ttyvp == de->de_vnode)
386 		return (0);
387 	return (error);
388 }
389 
390 /* ARGSUSED */
391 static int
392 devfs_advlock(struct vop_advlock_args *ap)
393 {
394 
395 	return (ap->a_flags & F_FLOCK ? EOPNOTSUPP : EINVAL);
396 }
397 
398 /* ARGSUSED */
399 static int
400 devfs_close(struct vop_close_args *ap)
401 {
402 	struct vnode *vp = ap->a_vp, *oldvp;
403 	struct thread *td = ap->a_td;
404 	struct cdev *dev = vp->v_rdev;
405 	struct cdevsw *dsw;
406 	int vp_locked, error;
407 
408 	/*
409 	 * Hack: a tty device that is a controlling terminal
410 	 * has a reference from the session structure.
411 	 * We cannot easily tell that a character device is
412 	 * a controlling terminal, unless it is the closing
413 	 * process' controlling terminal.  In that case,
414 	 * if the reference count is 2 (this last descriptor
415 	 * plus the session), release the reference from the session.
416 	 */
417 	oldvp = NULL;
418 	sx_xlock(&proctree_lock);
419 	if (td && vp == td->td_proc->p_session->s_ttyvp) {
420 		SESS_LOCK(td->td_proc->p_session);
421 		VI_LOCK(vp);
422 		if (count_dev(dev) == 2 && (vp->v_iflag & VI_DOOMED) == 0) {
423 			td->td_proc->p_session->s_ttyvp = NULL;
424 			oldvp = vp;
425 		}
426 		VI_UNLOCK(vp);
427 		SESS_UNLOCK(td->td_proc->p_session);
428 	}
429 	sx_xunlock(&proctree_lock);
430 	if (oldvp != NULL)
431 		vrele(oldvp);
432 	/*
433 	 * We do not want to really close the device if it
434 	 * is still in use unless we are trying to close it
435 	 * forcibly. Since every use (buffer, vnode, swap, cmap)
436 	 * holds a reference to the vnode, and because we mark
437 	 * any other vnodes that alias this device, when the
438 	 * sum of the reference counts on all the aliased
439 	 * vnodes descends to one, we are on last close.
440 	 */
441 	dsw = dev_refthread(dev);
442 	if (dsw == NULL)
443 		return (ENXIO);
444 	VI_LOCK(vp);
445 	if (vp->v_iflag & VI_DOOMED) {
446 		/* Forced close. */
447 	} else if (dsw->d_flags & D_TRACKCLOSE) {
448 		/* Keep device updated on status. */
449 	} else if (count_dev(dev) > 1) {
450 		VI_UNLOCK(vp);
451 		dev_relthread(dev);
452 		return (0);
453 	}
454 	vholdl(vp);
455 	VI_UNLOCK(vp);
456 	vp_locked = VOP_ISLOCKED(vp);
457 	VOP_UNLOCK(vp, 0);
458 	KASSERT(dev->si_refcount > 0,
459 	    ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));
460 	error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td);
461 	dev_relthread(dev);
462 	vn_lock(vp, vp_locked | LK_RETRY);
463 	vdrop(vp);
464 	return (error);
465 }
466 
467 static int
468 devfs_close_f(struct file *fp, struct thread *td)
469 {
470 	int error;
471 
472 	curthread->td_fpop = fp;
473 	error = vnops.fo_close(fp, td);
474 	curthread->td_fpop = NULL;
475 	return (error);
476 }
477 
478 /* ARGSUSED */
479 static int
480 devfs_fsync(struct vop_fsync_args *ap)
481 {
482 	if (!vn_isdisk(ap->a_vp, NULL))
483 		return (0);
484 
485 	return (vop_stdfsync(ap));
486 }
487 
488 static int
489 devfs_getattr(struct vop_getattr_args *ap)
490 {
491 	struct vnode *vp = ap->a_vp;
492 	struct vattr *vap = ap->a_vap;
493 	int error = 0;
494 	struct devfs_dirent *de;
495 	struct cdev *dev;
496 
497 	de = vp->v_data;
498 	KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp));
499 	if (vp->v_type == VDIR) {
500 		de = de->de_dir;
501 		KASSERT(de != NULL,
502 		    ("Null dir dirent in devfs_getattr vp=%p", vp));
503 	}
504 	bzero((caddr_t) vap, sizeof(*vap));
505 	vattr_null(vap);
506 	vap->va_uid = de->de_uid;
507 	vap->va_gid = de->de_gid;
508 	vap->va_mode = de->de_mode;
509 	if (vp->v_type == VLNK)
510 		vap->va_size = strlen(de->de_symlink);
511 	else if (vp->v_type == VDIR)
512 		vap->va_size = vap->va_bytes = DEV_BSIZE;
513 	else
514 		vap->va_size = 0;
515 	if (vp->v_type != VDIR)
516 		vap->va_bytes = 0;
517 	vap->va_blocksize = DEV_BSIZE;
518 	vap->va_type = vp->v_type;
519 
520 #define fix(aa)							\
521 	do {							\
522 		if ((aa).tv_sec <= 3600) {			\
523 			(aa).tv_sec = boottime.tv_sec;		\
524 			(aa).tv_nsec = boottime.tv_usec * 1000; \
525 		}						\
526 	} while (0)
527 
528 	if (vp->v_type != VCHR)  {
529 		fix(de->de_atime);
530 		vap->va_atime = de->de_atime;
531 		fix(de->de_mtime);
532 		vap->va_mtime = de->de_mtime;
533 		fix(de->de_ctime);
534 		vap->va_ctime = de->de_ctime;
535 	} else {
536 		dev = vp->v_rdev;
537 		fix(dev->si_atime);
538 		vap->va_atime = dev->si_atime;
539 		fix(dev->si_mtime);
540 		vap->va_mtime = dev->si_mtime;
541 		fix(dev->si_ctime);
542 		vap->va_ctime = dev->si_ctime;
543 
544 		vap->va_rdev = cdev2priv(dev)->cdp_inode;
545 	}
546 	vap->va_gen = 0;
547 	vap->va_flags = 0;
548 	vap->va_nlink = de->de_links;
549 	vap->va_fileid = de->de_inode;
550 
551 	return (error);
552 }
553 
554 /* ARGSUSED */
555 static int
556 devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td)
557 {
558 	struct cdev *dev;
559 	struct cdevsw *dsw;
560 	struct vnode *vp;
561 	struct vnode *vpold;
562 	int error, i;
563 	const char *p;
564 	struct fiodgname_arg *fgn;
565 
566 	error = devfs_fp_check(fp, &dev, &dsw);
567 	if (error)
568 		return (error);
569 
570 	if (com == FIODTYPE) {
571 		*(int *)data = dsw->d_flags & D_TYPEMASK;
572 		td->td_fpop = NULL;
573 		dev_relthread(dev);
574 		return (0);
575 	} else if (com == FIODGNAME) {
576 		fgn = data;
577 		p = devtoname(dev);
578 		i = strlen(p) + 1;
579 		if (i > fgn->len)
580 			error = EINVAL;
581 		else
582 			error = copyout(p, fgn->buf, i);
583 		td->td_fpop = NULL;
584 		dev_relthread(dev);
585 		return (error);
586 	}
587 	error = dsw->d_ioctl(dev, com, data, fp->f_flag, td);
588 	td->td_fpop = NULL;
589 	dev_relthread(dev);
590 	if (error == ENOIOCTL)
591 		error = ENOTTY;
592 	if (error == 0 && com == TIOCSCTTY) {
593 		vp = fp->f_vnode;
594 
595 		/* Do nothing if reassigning same control tty */
596 		sx_slock(&proctree_lock);
597 		if (td->td_proc->p_session->s_ttyvp == vp) {
598 			sx_sunlock(&proctree_lock);
599 			return (0);
600 		}
601 
602 		mtx_lock(&Giant);	/* XXX TTY */
603 
604 		vpold = td->td_proc->p_session->s_ttyvp;
605 		VREF(vp);
606 		SESS_LOCK(td->td_proc->p_session);
607 		td->td_proc->p_session->s_ttyvp = vp;
608 		SESS_UNLOCK(td->td_proc->p_session);
609 
610 		sx_sunlock(&proctree_lock);
611 
612 		/* Get rid of reference to old control tty */
613 		if (vpold)
614 			vrele(vpold);
615 		mtx_unlock(&Giant);	/* XXX TTY */
616 	}
617 	return (error);
618 }
619 
620 /* ARGSUSED */
621 static int
622 devfs_kqfilter_f(struct file *fp, struct knote *kn)
623 {
624 	struct cdev *dev;
625 	struct cdevsw *dsw;
626 	int error;
627 
628 	error = devfs_fp_check(fp, &dev, &dsw);
629 	if (error)
630 		return (error);
631 	error = dsw->d_kqfilter(dev, kn);
632 	curthread->td_fpop = NULL;
633 	dev_relthread(dev);
634 	return (error);
635 }
636 
637 static int
638 devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
639 {
640 	struct componentname *cnp;
641 	struct vnode *dvp, **vpp;
642 	struct thread *td;
643 	struct devfs_dirent *de, *dd;
644 	struct devfs_dirent **dde;
645 	struct devfs_mount *dmp;
646 	struct cdev *cdev;
647 	int error, flags, nameiop;
648 	char specname[SPECNAMELEN + 1], *pname;
649 
650 	cnp = ap->a_cnp;
651 	vpp = ap->a_vpp;
652 	dvp = ap->a_dvp;
653 	pname = cnp->cn_nameptr;
654 	td = cnp->cn_thread;
655 	flags = cnp->cn_flags;
656 	nameiop = cnp->cn_nameiop;
657 	dmp = VFSTODEVFS(dvp->v_mount);
658 	dd = dvp->v_data;
659 	*vpp = NULLVP;
660 
661 	if ((flags & ISLASTCN) && nameiop == RENAME)
662 		return (EOPNOTSUPP);
663 
664 	if (dvp->v_type != VDIR)
665 		return (ENOTDIR);
666 
667 	if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
668 		return (EIO);
669 
670 	error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
671 	if (error)
672 		return (error);
673 
674 	if (cnp->cn_namelen == 1 && *pname == '.') {
675 		if ((flags & ISLASTCN) && nameiop != LOOKUP)
676 			return (EINVAL);
677 		*vpp = dvp;
678 		VREF(dvp);
679 		return (0);
680 	}
681 
682 	if (flags & ISDOTDOT) {
683 		if ((flags & ISLASTCN) && nameiop != LOOKUP)
684 			return (EINVAL);
685 		VOP_UNLOCK(dvp, 0);
686 		de = TAILQ_FIRST(&dd->de_dlist);	/* "." */
687 		de = TAILQ_NEXT(de, de_list);		/* ".." */
688 		de = de->de_dir;
689 		error = devfs_allocv(de, dvp->v_mount, vpp, td);
690 		*dm_unlock = 0;
691 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
692 		return (error);
693 	}
694 
695 	DEVFS_DMP_HOLD(dmp);
696 	devfs_populate(dmp);
697 	if (DEVFS_DMP_DROP(dmp)) {
698 		*dm_unlock = 0;
699 		sx_xunlock(&dmp->dm_lock);
700 		devfs_unmount_final(dmp);
701 		return (ENOENT);
702 	}
703 	dd = dvp->v_data;
704 	de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen);
705 	while (de == NULL) {	/* While(...) so we can use break */
706 
707 		if (nameiop == DELETE)
708 			return (ENOENT);
709 
710 		/*
711 		 * OK, we didn't have an entry for the name we were asked for
712 		 * so we try to see if anybody can create it on demand.
713 		 */
714 		pname = devfs_fqpn(specname, dvp, cnp);
715 		if (pname == NULL)
716 			break;
717 
718 		cdev = NULL;
719 		DEVFS_DMP_HOLD(dmp);
720 		sx_xunlock(&dmp->dm_lock);
721 		sx_slock(&clone_drain_lock);
722 		EVENTHANDLER_INVOKE(dev_clone,
723 		    td->td_ucred, pname, strlen(pname), &cdev);
724 		sx_sunlock(&clone_drain_lock);
725 		sx_xlock(&dmp->dm_lock);
726 		if (DEVFS_DMP_DROP(dmp)) {
727 			*dm_unlock = 0;
728 			sx_xunlock(&dmp->dm_lock);
729 			devfs_unmount_final(dmp);
730 			return (ENOENT);
731 		}
732 		if (cdev == NULL)
733 			break;
734 
735 		DEVFS_DMP_HOLD(dmp);
736 		devfs_populate(dmp);
737 		if (DEVFS_DMP_DROP(dmp)) {
738 			*dm_unlock = 0;
739 			sx_xunlock(&dmp->dm_lock);
740 			devfs_unmount_final(dmp);
741 			return (ENOENT);
742 		}
743 
744 		dev_lock();
745 		dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx];
746 		if (dde != NULL && *dde != NULL)
747 			de = *dde;
748 		dev_unlock();
749 		dev_rel(cdev);
750 		break;
751 	}
752 
753 	if (de == NULL || de->de_flags & DE_WHITEOUT) {
754 		if ((nameiop == CREATE || nameiop == RENAME) &&
755 		    (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) {
756 			cnp->cn_flags |= SAVENAME;
757 			return (EJUSTRETURN);
758 		}
759 		return (ENOENT);
760 	}
761 
762 	if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) {
763 		error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
764 		if (error)
765 			return (error);
766 		if (*vpp == dvp) {
767 			VREF(dvp);
768 			*vpp = dvp;
769 			return (0);
770 		}
771 	}
772 	error = devfs_allocv(de, dvp->v_mount, vpp, td);
773 	*dm_unlock = 0;
774 	return (error);
775 }
776 
777 static int
778 devfs_lookup(struct vop_lookup_args *ap)
779 {
780 	int j;
781 	struct devfs_mount *dmp;
782 	int dm_unlock;
783 
784 	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
785 	dm_unlock = 1;
786 	sx_xlock(&dmp->dm_lock);
787 	j = devfs_lookupx(ap, &dm_unlock);
788 	if (dm_unlock == 1)
789 		sx_xunlock(&dmp->dm_lock);
790 	return (j);
791 }
792 
793 static int
794 devfs_mknod(struct vop_mknod_args *ap)
795 {
796 	struct componentname *cnp;
797 	struct vnode *dvp, **vpp;
798 	struct thread *td;
799 	struct devfs_dirent *dd, *de;
800 	struct devfs_mount *dmp;
801 	int error;
802 
803 	/*
804 	 * The only type of node we should be creating here is a
805 	 * character device, for anything else return EOPNOTSUPP.
806 	 */
807 	if (ap->a_vap->va_type != VCHR)
808 		return (EOPNOTSUPP);
809 	dvp = ap->a_dvp;
810 	dmp = VFSTODEVFS(dvp->v_mount);
811 
812 	cnp = ap->a_cnp;
813 	vpp = ap->a_vpp;
814 	td = cnp->cn_thread;
815 	dd = dvp->v_data;
816 
817 	error = ENOENT;
818 	sx_xlock(&dmp->dm_lock);
819 	TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
820 		if (cnp->cn_namelen != de->de_dirent->d_namlen)
821 			continue;
822 		if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name,
823 		    de->de_dirent->d_namlen) != 0)
824 			continue;
825 		if (de->de_flags & DE_WHITEOUT)
826 			break;
827 		goto notfound;
828 	}
829 	if (de == NULL)
830 		goto notfound;
831 	de->de_flags &= ~DE_WHITEOUT;
832 	error = devfs_allocv(de, dvp->v_mount, vpp, td);
833 	return (error);
834 notfound:
835 	sx_xunlock(&dmp->dm_lock);
836 	return (error);
837 }
838 
839 /* ARGSUSED */
840 static int
841 devfs_open(struct vop_open_args *ap)
842 {
843 	struct thread *td = ap->a_td;
844 	struct vnode *vp = ap->a_vp;
845 	struct cdev *dev = vp->v_rdev;
846 	struct file *fp = ap->a_fp;
847 	int error;
848 	struct cdevsw *dsw;
849 
850 	if (vp->v_type == VBLK)
851 		return (ENXIO);
852 
853 	if (dev == NULL)
854 		return (ENXIO);
855 
856 	/* Make this field valid before any I/O in d_open. */
857 	if (dev->si_iosize_max == 0)
858 		dev->si_iosize_max = DFLTPHYS;
859 
860 	dsw = dev_refthread(dev);
861 	if (dsw == NULL)
862 		return (ENXIO);
863 
864 	/* XXX: Special casing of ttys for deadfs.  Probably redundant. */
865 	if (dsw->d_flags & D_TTY)
866 		vp->v_vflag |= VV_ISTTY;
867 
868 	VOP_UNLOCK(vp, 0);
869 
870 	if (fp != NULL) {
871 		td->td_fpop = fp;
872 		fp->f_data = dev;
873 	}
874 	if (dsw->d_fdopen != NULL)
875 		error = dsw->d_fdopen(dev, ap->a_mode, td, fp);
876 	else
877 		error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td);
878 	td->td_fpop = NULL;
879 
880 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
881 
882 	dev_relthread(dev);
883 
884 	if (error)
885 		return (error);
886 
887 #if 0	/* /dev/console */
888 	KASSERT(fp != NULL,
889 	     ("Could not vnode bypass device on NULL fp"));
890 #else
891 	if(fp == NULL)
892 		return (error);
893 #endif
894 	if (fp->f_ops == &badfileops)
895 		finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f);
896 	return (error);
897 }
898 
899 static int
900 devfs_pathconf(struct vop_pathconf_args *ap)
901 {
902 
903 	switch (ap->a_name) {
904 	case _PC_MAC_PRESENT:
905 #ifdef MAC
906 		/*
907 		 * If MAC is enabled, devfs automatically supports
908 		 * trivial non-persistant label storage.
909 		 */
910 		*ap->a_retval = 1;
911 #else
912 		*ap->a_retval = 0;
913 #endif
914 		return (0);
915 	default:
916 		return (vop_stdpathconf(ap));
917 	}
918 	/* NOTREACHED */
919 }
920 
921 /* ARGSUSED */
922 static int
923 devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td)
924 {
925 	struct cdev *dev;
926 	struct cdevsw *dsw;
927 	int error;
928 
929 	error = devfs_fp_check(fp, &dev, &dsw);
930 	if (error)
931 		return (error);
932 	error = dsw->d_poll(dev, events, td);
933 	curthread->td_fpop = NULL;
934 	dev_relthread(dev);
935 	return(error);
936 }
937 
938 /*
939  * Print out the contents of a special device vnode.
940  */
941 static int
942 devfs_print(struct vop_print_args *ap)
943 {
944 
945 	printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev));
946 	return (0);
947 }
948 
949 /* ARGSUSED */
950 static int
951 devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td)
952 {
953 	struct cdev *dev;
954 	int ioflag, error, resid;
955 	struct cdevsw *dsw;
956 
957 	error = devfs_fp_check(fp, &dev, &dsw);
958 	if (error)
959 		return (error);
960 	resid = uio->uio_resid;
961 	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT);
962 	if (ioflag & O_DIRECT)
963 		ioflag |= IO_DIRECT;
964 
965 	if ((flags & FOF_OFFSET) == 0)
966 		uio->uio_offset = fp->f_offset;
967 
968 	error = dsw->d_read(dev, uio, ioflag);
969 	if (uio->uio_resid != resid || (error == 0 && resid != 0))
970 		vfs_timestamp(&dev->si_atime);
971 	curthread->td_fpop = NULL;
972 	dev_relthread(dev);
973 
974 	if ((flags & FOF_OFFSET) == 0)
975 		fp->f_offset = uio->uio_offset;
976 	fp->f_nextoff = uio->uio_offset;
977 	return (error);
978 }
979 
980 static int
981 devfs_readdir(struct vop_readdir_args *ap)
982 {
983 	int error;
984 	struct uio *uio;
985 	struct dirent *dp;
986 	struct devfs_dirent *dd;
987 	struct devfs_dirent *de;
988 	struct devfs_mount *dmp;
989 	off_t off, oldoff;
990 	int *tmp_ncookies = NULL;
991 
992 	if (ap->a_vp->v_type != VDIR)
993 		return (ENOTDIR);
994 
995 	uio = ap->a_uio;
996 	if (uio->uio_offset < 0)
997 		return (EINVAL);
998 
999 	/*
1000 	 * XXX: This is a temporary hack to get around this filesystem not
1001 	 * supporting cookies. We store the location of the ncookies pointer
1002 	 * in a temporary variable before calling vfs_subr.c:vfs_read_dirent()
1003 	 * and set the number of cookies to 0. We then set the pointer to
1004 	 * NULL so that vfs_read_dirent doesn't try to call realloc() on
1005 	 * ap->a_cookies. Later in this function, we restore the ap->a_ncookies
1006 	 * pointer to its original location before returning to the caller.
1007 	 */
1008 	if (ap->a_ncookies != NULL) {
1009 		tmp_ncookies = ap->a_ncookies;
1010 		*ap->a_ncookies = 0;
1011 		ap->a_ncookies = NULL;
1012 	}
1013 
1014 	dmp = VFSTODEVFS(ap->a_vp->v_mount);
1015 	sx_xlock(&dmp->dm_lock);
1016 	DEVFS_DMP_HOLD(dmp);
1017 	devfs_populate(dmp);
1018 	if (DEVFS_DMP_DROP(dmp)) {
1019 		sx_xunlock(&dmp->dm_lock);
1020 		devfs_unmount_final(dmp);
1021 		if (tmp_ncookies != NULL)
1022 			ap->a_ncookies = tmp_ncookies;
1023 		return (EIO);
1024 	}
1025 	error = 0;
1026 	de = ap->a_vp->v_data;
1027 	off = 0;
1028 	oldoff = uio->uio_offset;
1029 	TAILQ_FOREACH(dd, &de->de_dlist, de_list) {
1030 		KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__));
1031 		if (dd->de_flags & DE_WHITEOUT)
1032 			continue;
1033 		if (dd->de_dirent->d_type == DT_DIR)
1034 			de = dd->de_dir;
1035 		else
1036 			de = dd;
1037 		dp = dd->de_dirent;
1038 		if (dp->d_reclen > uio->uio_resid)
1039 			break;
1040 		dp->d_fileno = de->de_inode;
1041 		if (off >= uio->uio_offset) {
1042 			error = vfs_read_dirent(ap, dp, off);
1043 			if (error)
1044 				break;
1045 		}
1046 		off += dp->d_reclen;
1047 	}
1048 	sx_xunlock(&dmp->dm_lock);
1049 	uio->uio_offset = off;
1050 
1051 	/*
1052 	 * Restore ap->a_ncookies if it wasn't originally NULL in the first
1053 	 * place.
1054 	 */
1055 	if (tmp_ncookies != NULL)
1056 		ap->a_ncookies = tmp_ncookies;
1057 
1058 	return (error);
1059 }
1060 
1061 static int
1062 devfs_readlink(struct vop_readlink_args *ap)
1063 {
1064 	struct devfs_dirent *de;
1065 
1066 	de = ap->a_vp->v_data;
1067 	return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio));
1068 }
1069 
1070 static int
1071 devfs_reclaim(struct vop_reclaim_args *ap)
1072 {
1073 	struct vnode *vp = ap->a_vp;
1074 	struct devfs_dirent *de;
1075 	struct cdev *dev;
1076 
1077 	mtx_lock(&devfs_de_interlock);
1078 	de = vp->v_data;
1079 	if (de != NULL) {
1080 		de->de_vnode = NULL;
1081 		vp->v_data = NULL;
1082 	}
1083 	mtx_unlock(&devfs_de_interlock);
1084 
1085 	vnode_destroy_vobject(vp);
1086 
1087 	VI_LOCK(vp);
1088 	dev_lock();
1089 	dev = vp->v_rdev;
1090 	vp->v_rdev = NULL;
1091 
1092 	if (dev == NULL) {
1093 		dev_unlock();
1094 		VI_UNLOCK(vp);
1095 		return (0);
1096 	}
1097 
1098 	dev->si_usecount -= vp->v_usecount;
1099 	dev_unlock();
1100 	VI_UNLOCK(vp);
1101 	dev_rel(dev);
1102 	return (0);
1103 }
1104 
1105 static int
1106 devfs_remove(struct vop_remove_args *ap)
1107 {
1108 	struct vnode *vp = ap->a_vp;
1109 	struct devfs_dirent *dd;
1110 	struct devfs_dirent *de;
1111 	struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount);
1112 
1113 	sx_xlock(&dmp->dm_lock);
1114 	dd = ap->a_dvp->v_data;
1115 	de = vp->v_data;
1116 	if (de->de_cdp == NULL) {
1117 		TAILQ_REMOVE(&dd->de_dlist, de, de_list);
1118 		devfs_delete(dmp, de, 1);
1119 	} else {
1120 		de->de_flags |= DE_WHITEOUT;
1121 	}
1122 	sx_xunlock(&dmp->dm_lock);
1123 	return (0);
1124 }
1125 
1126 /*
1127  * Revoke is called on a tty when a terminal session ends.  The vnode
1128  * is orphaned by setting v_op to deadfs so we need to let go of it
1129  * as well so that we create a new one next time around.
1130  *
1131  */
1132 static int
1133 devfs_revoke(struct vop_revoke_args *ap)
1134 {
1135 	struct vnode *vp = ap->a_vp, *vp2;
1136 	struct cdev *dev;
1137 	struct cdev_priv *cdp;
1138 	struct devfs_dirent *de;
1139 	int i;
1140 
1141 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL"));
1142 
1143 	dev = vp->v_rdev;
1144 	cdp = cdev2priv(dev);
1145 
1146 	dev_lock();
1147 	cdp->cdp_inuse++;
1148 	dev_unlock();
1149 
1150 	vhold(vp);
1151 	vgone(vp);
1152 	vdrop(vp);
1153 
1154 	VOP_UNLOCK(vp,0);
1155  loop:
1156 	for (;;) {
1157 		mtx_lock(&devfs_de_interlock);
1158 		dev_lock();
1159 		vp2 = NULL;
1160 		for (i = 0; i <= cdp->cdp_maxdirent; i++) {
1161 			de = cdp->cdp_dirents[i];
1162 			if (de == NULL)
1163 				continue;
1164 
1165 			vp2 = de->de_vnode;
1166 			if (vp2 != NULL) {
1167 				dev_unlock();
1168 				VI_LOCK(vp2);
1169 				mtx_unlock(&devfs_de_interlock);
1170 				if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK,
1171 				    curthread))
1172 					goto loop;
1173 				vhold(vp2);
1174 				vgone(vp2);
1175 				vdrop(vp2);
1176 				vput(vp2);
1177 				break;
1178 			}
1179 		}
1180 		if (vp2 != NULL) {
1181 			continue;
1182 		}
1183 		dev_unlock();
1184 		mtx_unlock(&devfs_de_interlock);
1185 		break;
1186 	}
1187 	dev_lock();
1188 	cdp->cdp_inuse--;
1189 	if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) {
1190 		TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
1191 		dev_unlock();
1192 		dev_rel(&cdp->cdp_c);
1193 	} else
1194 		dev_unlock();
1195 
1196 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1197 	return (0);
1198 }
1199 
1200 static int
1201 devfs_rioctl(struct vop_ioctl_args *ap)
1202 {
1203 	int error;
1204 	struct devfs_mount *dmp;
1205 
1206 	dmp = VFSTODEVFS(ap->a_vp->v_mount);
1207 	sx_xlock(&dmp->dm_lock);
1208 	DEVFS_DMP_HOLD(dmp);
1209 	devfs_populate(dmp);
1210 	if (DEVFS_DMP_DROP(dmp)) {
1211 		sx_xunlock(&dmp->dm_lock);
1212 		devfs_unmount_final(dmp);
1213 		return (ENOENT);
1214 	}
1215 	error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td);
1216 	sx_xunlock(&dmp->dm_lock);
1217 	return (error);
1218 }
1219 
1220 static int
1221 devfs_rread(struct vop_read_args *ap)
1222 {
1223 
1224 	if (ap->a_vp->v_type != VDIR)
1225 		return (EINVAL);
1226 	return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL));
1227 }
1228 
1229 static int
1230 devfs_setattr(struct vop_setattr_args *ap)
1231 {
1232 	struct devfs_dirent *de;
1233 	struct vattr *vap;
1234 	struct vnode *vp;
1235 	int c, error;
1236 	uid_t uid;
1237 	gid_t gid;
1238 
1239 	vap = ap->a_vap;
1240 	vp = ap->a_vp;
1241 	if ((vap->va_type != VNON) ||
1242 	    (vap->va_nlink != VNOVAL) ||
1243 	    (vap->va_fsid != VNOVAL) ||
1244 	    (vap->va_fileid != VNOVAL) ||
1245 	    (vap->va_blocksize != VNOVAL) ||
1246 	    (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1247 	    (vap->va_rdev != VNOVAL) ||
1248 	    ((int)vap->va_bytes != VNOVAL) ||
1249 	    (vap->va_gen != VNOVAL)) {
1250 		return (EINVAL);
1251 	}
1252 
1253 	de = vp->v_data;
1254 	if (vp->v_type == VDIR)
1255 		de = de->de_dir;
1256 
1257 	error = c = 0;
1258 	if (vap->va_uid == (uid_t)VNOVAL)
1259 		uid = de->de_uid;
1260 	else
1261 		uid = vap->va_uid;
1262 	if (vap->va_gid == (gid_t)VNOVAL)
1263 		gid = de->de_gid;
1264 	else
1265 		gid = vap->va_gid;
1266 	if (uid != de->de_uid || gid != de->de_gid) {
1267 		if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid ||
1268 		    (gid != de->de_gid && !groupmember(gid, ap->a_cred))) {
1269 			error = priv_check(ap->a_td, PRIV_VFS_CHOWN);
1270 			if (error)
1271 				return (error);
1272 		}
1273 		de->de_uid = uid;
1274 		de->de_gid = gid;
1275 		c = 1;
1276 	}
1277 
1278 	if (vap->va_mode != (mode_t)VNOVAL) {
1279 		if (ap->a_cred->cr_uid != de->de_uid) {
1280 			error = priv_check(ap->a_td, PRIV_VFS_ADMIN);
1281 			if (error)
1282 				return (error);
1283 		}
1284 		de->de_mode = vap->va_mode;
1285 		c = 1;
1286 	}
1287 
1288 	if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1289 		/* See the comment in ufs_vnops::ufs_setattr(). */
1290 		if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td)) &&
1291 		    ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
1292 		    (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, ap->a_td))))
1293 			return (error);
1294 		if (vap->va_atime.tv_sec != VNOVAL) {
1295 			if (vp->v_type == VCHR)
1296 				vp->v_rdev->si_atime = vap->va_atime;
1297 			else
1298 				de->de_atime = vap->va_atime;
1299 		}
1300 		if (vap->va_mtime.tv_sec != VNOVAL) {
1301 			if (vp->v_type == VCHR)
1302 				vp->v_rdev->si_mtime = vap->va_mtime;
1303 			else
1304 				de->de_mtime = vap->va_mtime;
1305 		}
1306 		c = 1;
1307 	}
1308 
1309 	if (c) {
1310 		if (vp->v_type == VCHR)
1311 			vfs_timestamp(&vp->v_rdev->si_ctime);
1312 		else
1313 			vfs_timestamp(&de->de_mtime);
1314 	}
1315 	return (0);
1316 }
1317 
1318 #ifdef MAC
1319 static int
1320 devfs_setlabel(struct vop_setlabel_args *ap)
1321 {
1322 	struct vnode *vp;
1323 	struct devfs_dirent *de;
1324 
1325 	vp = ap->a_vp;
1326 	de = vp->v_data;
1327 
1328 	mac_vnode_relabel(ap->a_cred, vp, ap->a_label);
1329 	mac_devfs_update(vp->v_mount, de, vp);
1330 
1331 	return (0);
1332 }
1333 #endif
1334 
1335 static int
1336 devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td)
1337 {
1338 
1339 	return (vnops.fo_stat(fp, sb, cred, td));
1340 }
1341 
1342 static int
1343 devfs_symlink(struct vop_symlink_args *ap)
1344 {
1345 	int i, error;
1346 	struct devfs_dirent *dd;
1347 	struct devfs_dirent *de;
1348 	struct devfs_mount *dmp;
1349 	struct thread *td;
1350 
1351 	td = ap->a_cnp->cn_thread;
1352 	KASSERT(td == curthread, ("devfs_symlink: td != curthread"));
1353 
1354 	error = priv_check(td, PRIV_DEVFS_SYMLINK);
1355 	if (error)
1356 		return(error);
1357 	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
1358 	dd = ap->a_dvp->v_data;
1359 	de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen);
1360 	de->de_uid = 0;
1361 	de->de_gid = 0;
1362 	de->de_mode = 0755;
1363 	de->de_inode = alloc_unr(devfs_inos);
1364 	de->de_dirent->d_type = DT_LNK;
1365 	i = strlen(ap->a_target) + 1;
1366 	de->de_symlink = malloc(i, M_DEVFS, M_WAITOK);
1367 	bcopy(ap->a_target, de->de_symlink, i);
1368 	sx_xlock(&dmp->dm_lock);
1369 #ifdef MAC
1370 	mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de);
1371 #endif
1372 	TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
1373 	return (devfs_allocv(de, ap->a_dvp->v_mount, ap->a_vpp, td));
1374 }
1375 
1376 static int
1377 devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td)
1378 {
1379 
1380 	return (vnops.fo_truncate(fp, length, cred, td));
1381 }
1382 
1383 /* ARGSUSED */
1384 static int
1385 devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td)
1386 {
1387 	struct cdev *dev;
1388 	int error, ioflag, resid;
1389 	struct cdevsw *dsw;
1390 
1391 	error = devfs_fp_check(fp, &dev, &dsw);
1392 	if (error)
1393 		return (error);
1394 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
1395 	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC);
1396 	if (ioflag & O_DIRECT)
1397 		ioflag |= IO_DIRECT;
1398 	if ((flags & FOF_OFFSET) == 0)
1399 		uio->uio_offset = fp->f_offset;
1400 
1401 	resid = uio->uio_resid;
1402 
1403 	error = dsw->d_write(dev, uio, ioflag);
1404 	if (uio->uio_resid != resid || (error == 0 && resid != 0)) {
1405 		vfs_timestamp(&dev->si_ctime);
1406 		dev->si_mtime = dev->si_ctime;
1407 	}
1408 	curthread->td_fpop = NULL;
1409 	dev_relthread(dev);
1410 
1411 	if ((flags & FOF_OFFSET) == 0)
1412 		fp->f_offset = uio->uio_offset;
1413 	fp->f_nextoff = uio->uio_offset;
1414 	return (error);
1415 }
1416 
1417 dev_t
1418 dev2udev(struct cdev *x)
1419 {
1420 	if (x == NULL)
1421 		return (NODEV);
1422 	return (cdev2priv(x)->cdp_inode);
1423 }
1424 
1425 static struct fileops devfs_ops_f = {
1426 	.fo_read =	devfs_read_f,
1427 	.fo_write =	devfs_write_f,
1428 	.fo_truncate =	devfs_truncate_f,
1429 	.fo_ioctl =	devfs_ioctl_f,
1430 	.fo_poll =	devfs_poll_f,
1431 	.fo_kqfilter =	devfs_kqfilter_f,
1432 	.fo_stat =	devfs_stat_f,
1433 	.fo_close =	devfs_close_f,
1434 	.fo_flags =	DFLAG_PASSABLE | DFLAG_SEEKABLE
1435 };
1436 
1437 static struct vop_vector devfs_vnodeops = {
1438 	.vop_default =		&default_vnodeops,
1439 
1440 	.vop_access =		devfs_access,
1441 	.vop_getattr =		devfs_getattr,
1442 	.vop_ioctl =		devfs_rioctl,
1443 	.vop_lookup =		devfs_lookup,
1444 	.vop_mknod =		devfs_mknod,
1445 	.vop_pathconf =		devfs_pathconf,
1446 	.vop_read =		devfs_rread,
1447 	.vop_readdir =		devfs_readdir,
1448 	.vop_readlink =		devfs_readlink,
1449 	.vop_reclaim =		devfs_reclaim,
1450 	.vop_remove =		devfs_remove,
1451 	.vop_revoke =		devfs_revoke,
1452 	.vop_setattr =		devfs_setattr,
1453 #ifdef MAC
1454 	.vop_setlabel =		devfs_setlabel,
1455 #endif
1456 	.vop_symlink =		devfs_symlink,
1457 };
1458 
1459 static struct vop_vector devfs_specops = {
1460 	.vop_default =		&default_vnodeops,
1461 
1462 	.vop_access =		devfs_access,
1463 	.vop_advlock =		devfs_advlock,
1464 	.vop_bmap =		VOP_PANIC,
1465 	.vop_close =		devfs_close,
1466 	.vop_create =		VOP_PANIC,
1467 	.vop_fsync =		devfs_fsync,
1468 	.vop_getattr =		devfs_getattr,
1469 	.vop_lease =		VOP_NULL,
1470 	.vop_link =		VOP_PANIC,
1471 	.vop_mkdir =		VOP_PANIC,
1472 	.vop_mknod =		VOP_PANIC,
1473 	.vop_open =		devfs_open,
1474 	.vop_pathconf =		devfs_pathconf,
1475 	.vop_print =		devfs_print,
1476 	.vop_read =		VOP_PANIC,
1477 	.vop_readdir =		VOP_PANIC,
1478 	.vop_readlink =		VOP_PANIC,
1479 	.vop_reallocblks =	VOP_PANIC,
1480 	.vop_reclaim =		devfs_reclaim,
1481 	.vop_remove =		devfs_remove,
1482 	.vop_rename =		VOP_PANIC,
1483 	.vop_revoke =		devfs_revoke,
1484 	.vop_rmdir =		VOP_PANIC,
1485 	.vop_setattr =		devfs_setattr,
1486 #ifdef MAC
1487 	.vop_setlabel =		devfs_setlabel,
1488 #endif
1489 	.vop_strategy =		VOP_PANIC,
1490 	.vop_symlink =		VOP_PANIC,
1491 	.vop_write =		VOP_PANIC,
1492 };
1493 
1494 /*
1495  * Our calling convention to the device drivers used to be that we passed
1496  * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_
1497  * flags instead since that's what open(), close() and ioctl() takes and
1498  * we don't really want vnode.h in device drivers.
1499  * We solved the source compatibility by redefining some vnode flags to
1500  * be the same as the fcntl ones and by sending down the bitwise OR of
1501  * the respective fcntl/vnode flags.  These CTASSERTS make sure nobody
1502  * pulls the rug out under this.
1503  */
1504 CTASSERT(O_NONBLOCK == IO_NDELAY);
1505 CTASSERT(O_FSYNC == IO_SYNC);
1506