xref: /freebsd/sys/kern/vfs_vnops.c (revision adeb92a24c57f97d5cd3c3c45be239cbb23aed68)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD$
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/stat.h>
47 #include <sys/proc.h>
48 #include <sys/lock.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/namei.h>
52 #include <sys/vnode.h>
53 #include <sys/bio.h>
54 #include <sys/buf.h>
55 #include <sys/filio.h>
56 #include <sys/ttycom.h>
57 #include <sys/conf.h>
58 #include <sys/syslog.h>
59 
60 #include <machine/limits.h>
61 
62 static int vn_closefile __P((struct file *fp, struct thread *td));
63 static int vn_ioctl __P((struct file *fp, u_long com, caddr_t data,
64 		struct thread *td));
65 static int vn_read __P((struct file *fp, struct uio *uio,
66 		struct ucred *cred, int flags, struct thread *td));
67 static int vn_poll __P((struct file *fp, int events, struct ucred *cred,
68 		struct thread *td));
69 static int vn_kqfilter __P((struct file *fp, struct knote *kn));
70 static int vn_statfile __P((struct file *fp, struct stat *sb, struct thread *td));
71 static int vn_write __P((struct file *fp, struct uio *uio,
72 		struct ucred *cred, int flags, struct thread *td));
73 
74 struct 	fileops vnops = {
75 	vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
76 	vn_statfile, vn_closefile
77 };
78 
79 int
80 vn_open(ndp, flagp, cmode)
81 	register struct nameidata *ndp;
82 	int *flagp, cmode;
83 {
84 	struct thread *td = ndp->ni_cnd.cn_thread;
85 
86 	return (vn_open_cred(ndp, flagp, cmode, td->td_proc->p_ucred));
87 }
88 
89 /*
90  * Common code for vnode open operations.
91  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
92  *
93  * Note that this does NOT free nameidata for the successful case,
94  * due to the NDINIT being done elsewhere.
95  */
96 int
97 vn_open_cred(ndp, flagp, cmode, cred)
98 	register struct nameidata *ndp;
99 	int *flagp, cmode;
100 	struct ucred *cred;
101 {
102 	struct vnode *vp;
103 	struct mount *mp;
104 	struct thread *td = ndp->ni_cnd.cn_thread;
105 	struct vattr vat;
106 	struct vattr *vap = &vat;
107 	int mode, fmode, error;
108 
109 restart:
110 	fmode = *flagp;
111 	if (fmode & O_CREAT) {
112 		ndp->ni_cnd.cn_nameiop = CREATE;
113 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
114 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
115 			ndp->ni_cnd.cn_flags |= FOLLOW;
116 		bwillwrite();
117 		if ((error = namei(ndp)) != 0)
118 			return (error);
119 		if (ndp->ni_vp == NULL) {
120 			VATTR_NULL(vap);
121 			vap->va_type = VREG;
122 			vap->va_mode = cmode;
123 			if (fmode & O_EXCL)
124 				vap->va_vaflags |= VA_EXCLUSIVE;
125 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
126 				NDFREE(ndp, NDF_ONLY_PNBUF);
127 				vput(ndp->ni_dvp);
128 				if ((error = vn_start_write(NULL, &mp,
129 				    V_XSLEEP | PCATCH)) != 0)
130 					return (error);
131 				goto restart;
132 			}
133 			VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
134 			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
135 					   &ndp->ni_cnd, vap);
136 			vput(ndp->ni_dvp);
137 			vn_finished_write(mp);
138 			if (error) {
139 				NDFREE(ndp, NDF_ONLY_PNBUF);
140 				return (error);
141 			}
142 			ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
143 			ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
144 			fmode &= ~O_TRUNC;
145 			vp = ndp->ni_vp;
146 		} else {
147 			if (ndp->ni_dvp == ndp->ni_vp)
148 				vrele(ndp->ni_dvp);
149 			else
150 				vput(ndp->ni_dvp);
151 			ndp->ni_dvp = NULL;
152 			vp = ndp->ni_vp;
153 			if (fmode & O_EXCL) {
154 				error = EEXIST;
155 				goto bad;
156 			}
157 			fmode &= ~O_CREAT;
158 		}
159 	} else {
160 		ndp->ni_cnd.cn_nameiop = LOOKUP;
161 		ndp->ni_cnd.cn_flags =
162 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
163 		if ((error = namei(ndp)) != 0)
164 			return (error);
165 		vp = ndp->ni_vp;
166 	}
167 	if (vp->v_type == VLNK) {
168 		error = EMLINK;
169 		goto bad;
170 	}
171 	if (vp->v_type == VSOCK) {
172 		error = EOPNOTSUPP;
173 		goto bad;
174 	}
175 	if ((fmode & O_CREAT) == 0) {
176 		mode = 0;
177 		if (fmode & (FWRITE | O_TRUNC)) {
178 			if (vp->v_type == VDIR) {
179 				error = EISDIR;
180 				goto bad;
181 			}
182 			error = vn_writechk(vp);
183 			if (error)
184 				goto bad;
185 			mode |= VWRITE;
186 		}
187 		if (fmode & FREAD)
188 			mode |= VREAD;
189 		if (mode) {
190 		        error = VOP_ACCESS(vp, mode, cred, td);
191 			if (error)
192 				goto bad;
193 		}
194 	}
195 	if ((error = VOP_OPEN(vp, fmode, cred, td)) != 0)
196 		goto bad;
197 	/*
198 	 * Make sure that a VM object is created for VMIO support.
199 	 */
200 	if (vn_canvmio(vp) == TRUE) {
201 		if ((error = vfs_object_create(vp, td, cred)) != 0)
202 			/* XXX: Should VOP_CLOSE() again here. */
203 			goto bad;
204 	}
205 
206 	if (fmode & FWRITE)
207 		vp->v_writecount++;
208 	*flagp = fmode;
209 	return (0);
210 bad:
211 	NDFREE(ndp, NDF_ONLY_PNBUF);
212 	vput(vp);
213 	*flagp = fmode;
214 	return (error);
215 }
216 
217 /*
218  * Check for write permissions on the specified vnode.
219  * Prototype text segments cannot be written.
220  */
221 int
222 vn_writechk(vp)
223 	register struct vnode *vp;
224 {
225 
226 	/*
227 	 * If there's shared text associated with
228 	 * the vnode, try to free it up once.  If
229 	 * we fail, we can't allow writing.
230 	 */
231 	if (vp->v_flag & VTEXT)
232 		return (ETXTBSY);
233 	return (0);
234 }
235 
236 /*
237  * Vnode close call
238  */
239 int
240 vn_close(vp, flags, cred, td)
241 	register struct vnode *vp;
242 	int flags;
243 	struct ucred *cred;
244 	struct thread *td;
245 {
246 	int error;
247 
248 	if (flags & FWRITE)
249 		vp->v_writecount--;
250 	error = VOP_CLOSE(vp, flags, cred, td);
251 	/*
252 	 * XXX - In certain instances VOP_CLOSE has to do the vrele
253 	 * itself. If the vrele has been done, it will return EAGAIN
254 	 * to indicate that the vrele should not be done again. When
255 	 * this happens, we just return success. The correct thing to
256 	 * do would be to have all VOP_CLOSE instances do the vrele.
257 	 */
258 	if (error == EAGAIN)
259 		return (0);
260 	vrele(vp);
261 	return (error);
262 }
263 
264 static __inline
265 int
266 sequential_heuristic(struct uio *uio, struct file *fp)
267 {
268 	/*
269 	 * Sequential heuristic - detect sequential operation
270 	 */
271 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
272 	    uio->uio_offset == fp->f_nextoff) {
273 		/*
274 		 * XXX we assume that the filesystem block size is
275 		 * the default.  Not true, but still gives us a pretty
276 		 * good indicator of how sequential the read operations
277 		 * are.
278 		 */
279 		fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
280 		if (fp->f_seqcount >= 127)
281 			fp->f_seqcount = 127;
282 		return(fp->f_seqcount << 16);
283 	}
284 
285 	/*
286 	 * Not sequential, quick draw-down of seqcount
287 	 */
288 	if (fp->f_seqcount > 1)
289 		fp->f_seqcount = 1;
290 	else
291 		fp->f_seqcount = 0;
292 	return(0);
293 }
294 
295 /*
296  * Package up an I/O request on a vnode into a uio and do it.
297  */
298 int
299 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
300 	enum uio_rw rw;
301 	struct vnode *vp;
302 	caddr_t base;
303 	int len;
304 	off_t offset;
305 	enum uio_seg segflg;
306 	int ioflg;
307 	struct ucred *cred;
308 	int *aresid;
309 	struct thread *td;
310 {
311 	struct uio auio;
312 	struct iovec aiov;
313 	struct mount *mp;
314 	int error;
315 
316 	if ((ioflg & IO_NODELOCKED) == 0) {
317 		mp = NULL;
318 		if (rw == UIO_WRITE &&
319 		    vp->v_type != VCHR &&
320 		    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
321 			return (error);
322 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
323 	}
324 	auio.uio_iov = &aiov;
325 	auio.uio_iovcnt = 1;
326 	aiov.iov_base = base;
327 	aiov.iov_len = len;
328 	auio.uio_resid = len;
329 	auio.uio_offset = offset;
330 	auio.uio_segflg = segflg;
331 	auio.uio_rw = rw;
332 	auio.uio_td = td;
333 	if (rw == UIO_READ) {
334 		error = VOP_READ(vp, &auio, ioflg, cred);
335 	} else {
336 		error = VOP_WRITE(vp, &auio, ioflg, cred);
337 	}
338 	if (aresid)
339 		*aresid = auio.uio_resid;
340 	else
341 		if (auio.uio_resid && error == 0)
342 			error = EIO;
343 	if ((ioflg & IO_NODELOCKED) == 0) {
344 		vn_finished_write(mp);
345 		VOP_UNLOCK(vp, 0, td);
346 	}
347 	return (error);
348 }
349 
350 /*
351  * Package up an I/O request on a vnode into a uio and do it.  The I/O
352  * request is split up into smaller chunks and we try to avoid saturating
353  * the buffer cache while potentially holding a vnode locked, so we
354  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
355  * to give other processes a chance to lock the vnode (either other processes
356  * core'ing the same binary, or unrelated processes scanning the directory).
357  */
358 int
359 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
360 	enum uio_rw rw;
361 	struct vnode *vp;
362 	caddr_t base;
363 	int len;
364 	off_t offset;
365 	enum uio_seg segflg;
366 	int ioflg;
367 	struct ucred *cred;
368 	int *aresid;
369 	struct thread *td;
370 {
371 	int error = 0;
372 
373 	do {
374 		int chunk = (len > MAXBSIZE) ? MAXBSIZE : len;
375 
376 		if (rw != UIO_READ && vp->v_type == VREG)
377 			bwillwrite();
378 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
379 		    ioflg, cred, aresid, td);
380 		len -= chunk;	/* aresid calc already includes length */
381 		if (error)
382 			break;
383 		offset += chunk;
384 		base += chunk;
385 		uio_yield();
386 	} while (len);
387 	if (aresid)
388 		*aresid += len;
389 	return (error);
390 }
391 
392 /*
393  * File table vnode read routine.
394  */
395 static int
396 vn_read(fp, uio, cred, flags, td)
397 	struct file *fp;
398 	struct uio *uio;
399 	struct ucred *cred;
400 	struct thread *td;
401 	int flags;
402 {
403 	struct vnode *vp;
404 	int error, ioflag;
405 
406 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
407 	    uio->uio_td, td));
408 	vp = (struct vnode *)fp->f_data;
409 	ioflag = 0;
410 	if (fp->f_flag & FNONBLOCK)
411 		ioflag |= IO_NDELAY;
412 	if (fp->f_flag & O_DIRECT)
413 		ioflag |= IO_DIRECT;
414 	VOP_LEASE(vp, td, cred, LEASE_READ);
415 	vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
416 	if ((flags & FOF_OFFSET) == 0)
417 		uio->uio_offset = fp->f_offset;
418 
419 	ioflag |= sequential_heuristic(uio, fp);
420 
421 	error = VOP_READ(vp, uio, ioflag, cred);
422 	if ((flags & FOF_OFFSET) == 0)
423 		fp->f_offset = uio->uio_offset;
424 	fp->f_nextoff = uio->uio_offset;
425 	VOP_UNLOCK(vp, 0, td);
426 	return (error);
427 }
428 
429 /*
430  * File table vnode write routine.
431  */
432 static int
433 vn_write(fp, uio, cred, flags, td)
434 	struct file *fp;
435 	struct uio *uio;
436 	struct ucred *cred;
437 	struct thread *td;
438 	int flags;
439 {
440 	struct vnode *vp;
441 	struct mount *mp;
442 	int error, ioflag;
443 
444 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
445 	    uio->uio_td, td));
446 	vp = (struct vnode *)fp->f_data;
447 	if (vp->v_type == VREG)
448 		bwillwrite();
449 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
450 	ioflag = IO_UNIT;
451 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
452 		ioflag |= IO_APPEND;
453 	if (fp->f_flag & FNONBLOCK)
454 		ioflag |= IO_NDELAY;
455 	if (fp->f_flag & O_DIRECT)
456 		ioflag |= IO_DIRECT;
457 	if ((fp->f_flag & O_FSYNC) ||
458 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
459 		ioflag |= IO_SYNC;
460 	mp = NULL;
461 	if (vp->v_type != VCHR &&
462 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
463 		return (error);
464 	VOP_LEASE(vp, td, cred, LEASE_WRITE);
465 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
466 	if ((flags & FOF_OFFSET) == 0)
467 		uio->uio_offset = fp->f_offset;
468 	ioflag |= sequential_heuristic(uio, fp);
469 	error = VOP_WRITE(vp, uio, ioflag, cred);
470 	if ((flags & FOF_OFFSET) == 0)
471 		fp->f_offset = uio->uio_offset;
472 	fp->f_nextoff = uio->uio_offset;
473 	VOP_UNLOCK(vp, 0, td);
474 	vn_finished_write(mp);
475 	return (error);
476 }
477 
478 /*
479  * File table vnode stat routine.
480  */
481 static int
482 vn_statfile(fp, sb, td)
483 	struct file *fp;
484 	struct stat *sb;
485 	struct thread *td;
486 {
487 	struct vnode *vp = (struct vnode *)fp->f_data;
488 
489 	return vn_stat(vp, sb, td);
490 }
491 
492 int
493 vn_stat(vp, sb, td)
494 	struct vnode *vp;
495 	register struct stat *sb;
496 	struct thread *td;
497 {
498 	struct vattr vattr;
499 	register struct vattr *vap;
500 	int error;
501 	u_short mode;
502 
503 	vap = &vattr;
504 	error = VOP_GETATTR(vp, vap, td->td_proc->p_ucred, td);
505 	if (error)
506 		return (error);
507 
508 	/*
509 	 * Zero the spare stat fields
510 	 */
511 	sb->st_lspare = 0;
512 	sb->st_qspare[0] = 0;
513 	sb->st_qspare[1] = 0;
514 
515 	/*
516 	 * Copy from vattr table
517 	 */
518 	if (vap->va_fsid != VNOVAL)
519 		sb->st_dev = vap->va_fsid;
520 	else
521 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
522 	sb->st_ino = vap->va_fileid;
523 	mode = vap->va_mode;
524 	switch (vap->va_type) {
525 	case VREG:
526 		mode |= S_IFREG;
527 		break;
528 	case VDIR:
529 		mode |= S_IFDIR;
530 		break;
531 	case VBLK:
532 		mode |= S_IFBLK;
533 		break;
534 	case VCHR:
535 		mode |= S_IFCHR;
536 		break;
537 	case VLNK:
538 		mode |= S_IFLNK;
539 		/* This is a cosmetic change, symlinks do not have a mode. */
540 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
541 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
542 		else
543 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
544 		break;
545 	case VSOCK:
546 		mode |= S_IFSOCK;
547 		break;
548 	case VFIFO:
549 		mode |= S_IFIFO;
550 		break;
551 	default:
552 		return (EBADF);
553 	};
554 	sb->st_mode = mode;
555 	sb->st_nlink = vap->va_nlink;
556 	sb->st_uid = vap->va_uid;
557 	sb->st_gid = vap->va_gid;
558 	sb->st_rdev = vap->va_rdev;
559 	if (vap->va_size > OFF_MAX)
560 		return (EOVERFLOW);
561 	sb->st_size = vap->va_size;
562 	sb->st_atimespec = vap->va_atime;
563 	sb->st_mtimespec = vap->va_mtime;
564 	sb->st_ctimespec = vap->va_ctime;
565 
566         /*
567 	 * According to www.opengroup.org, the meaning of st_blksize is
568 	 *   "a filesystem-specific preferred I/O block size for this
569 	 *    object.  In some filesystem types, this may vary from file
570 	 *    to file"
571 	 * Default to zero to catch bogus uses of this field.
572 	 */
573 
574 	if (vap->va_type == VREG) {
575 		sb->st_blksize = vap->va_blocksize;
576 	} else if (vn_isdisk(vp, NULL)) {
577 		sb->st_blksize = vp->v_rdev->si_bsize_best;
578 		if (sb->st_blksize < vp->v_rdev->si_bsize_phys)
579 			sb->st_blksize = vp->v_rdev->si_bsize_phys;
580 		if (sb->st_blksize < BLKDEV_IOSIZE)
581 			sb->st_blksize = BLKDEV_IOSIZE;
582 	} else {
583 		sb->st_blksize = 0;
584 	}
585 
586 	sb->st_flags = vap->va_flags;
587 	if (suser_xxx(td->td_proc->p_ucred, 0, 0))
588 		sb->st_gen = 0;
589 	else
590 		sb->st_gen = vap->va_gen;
591 
592 #if (S_BLKSIZE == 512)
593 	/* Optimize this case */
594 	sb->st_blocks = vap->va_bytes >> 9;
595 #else
596 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
597 #endif
598 	return (0);
599 }
600 
601 /*
602  * File table vnode ioctl routine.
603  */
604 static int
605 vn_ioctl(fp, com, data, td)
606 	struct file *fp;
607 	u_long com;
608 	caddr_t data;
609 	struct thread *td;
610 {
611 	register struct vnode *vp = ((struct vnode *)fp->f_data);
612 	struct vattr vattr;
613 	int error;
614 
615 	switch (vp->v_type) {
616 
617 	case VREG:
618 	case VDIR:
619 		if (com == FIONREAD) {
620 			error = VOP_GETATTR(vp, &vattr, td->td_proc->p_ucred, td);
621 			if (error)
622 				return (error);
623 			*(int *)data = vattr.va_size - fp->f_offset;
624 			return (0);
625 		}
626 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
627 			return (0);			/* XXX */
628 		/* fall into ... */
629 
630 	default:
631 #if 0
632 		return (ENOTTY);
633 #endif
634 	case VFIFO:
635 	case VCHR:
636 	case VBLK:
637 		if (com == FIODTYPE) {
638 			if (vp->v_type != VCHR && vp->v_type != VBLK)
639 				return (ENOTTY);
640 			*(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK;
641 			return (0);
642 		}
643 		error = VOP_IOCTL(vp, com, data, fp->f_flag, td->td_proc->p_ucred, td);
644 		if (error == 0 && com == TIOCSCTTY) {
645 
646 			/* Do nothing if reassigning same control tty */
647 			if (td->td_proc->p_session->s_ttyvp == vp)
648 				return (0);
649 
650 			/* Get rid of reference to old control tty */
651 			if (td->td_proc->p_session->s_ttyvp)
652 				vrele(td->td_proc->p_session->s_ttyvp);
653 
654 			td->td_proc->p_session->s_ttyvp = vp;
655 			VREF(vp);
656 		}
657 		return (error);
658 	}
659 }
660 
661 /*
662  * File table vnode poll routine.
663  */
664 static int
665 vn_poll(fp, events, cred, td)
666 	struct file *fp;
667 	int events;
668 	struct ucred *cred;
669 	struct thread *td;
670 {
671 
672 	return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td));
673 }
674 
675 /*
676  * Check that the vnode is still valid, and if so
677  * acquire requested lock.
678  */
679 int
680 #ifndef	DEBUG_LOCKS
681 vn_lock(vp, flags, td)
682 #else
683 debug_vn_lock(vp, flags, td, filename, line)
684 #endif
685 	struct vnode *vp;
686 	int flags;
687 	struct thread *td;
688 #ifdef	DEBUG_LOCKS
689 	const char *filename;
690 	int line;
691 #endif
692 {
693 	int error;
694 
695 	do {
696 		if ((flags & LK_INTERLOCK) == 0)
697 			mtx_lock(&vp->v_interlock);
698 		if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curthread) {
699 			vp->v_flag |= VXWANT;
700 			msleep(vp, &vp->v_interlock, PINOD | PDROP,
701 			    "vn_lock", 0);
702 			error = ENOENT;
703 		} else {
704 			if (vp->v_vxproc != NULL)
705 				log(LOG_INFO, "VXLOCK interlock avoided in vn_lock\n");
706 #ifdef	DEBUG_LOCKS
707 			vp->filename = filename;
708 			vp->line = line;
709 #endif
710 			error = VOP_LOCK(vp,
711 				    flags | LK_NOPAUSE | LK_INTERLOCK, td);
712 			if (error == 0)
713 				return (error);
714 		}
715 		flags &= ~LK_INTERLOCK;
716 	} while (flags & LK_RETRY);
717 	return (error);
718 }
719 
720 /*
721  * File table vnode close routine.
722  */
723 static int
724 vn_closefile(fp, td)
725 	struct file *fp;
726 	struct thread *td;
727 {
728 
729 	fp->f_ops = &badfileops;
730 	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
731 		fp->f_cred, td));
732 }
733 
734 /*
735  * Preparing to start a filesystem write operation. If the operation is
736  * permitted, then we bump the count of operations in progress and
737  * proceed. If a suspend request is in progress, we wait until the
738  * suspension is over, and then proceed.
739  */
740 int
741 vn_start_write(vp, mpp, flags)
742 	struct vnode *vp;
743 	struct mount **mpp;
744 	int flags;
745 {
746 	struct mount *mp;
747 	int error;
748 
749 	/*
750 	 * If a vnode is provided, get and return the mount point that
751 	 * to which it will write.
752 	 */
753 	if (vp != NULL) {
754 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
755 			*mpp = NULL;
756 			if (error != EOPNOTSUPP)
757 				return (error);
758 			return (0);
759 		}
760 	}
761 	if ((mp = *mpp) == NULL)
762 		return (0);
763 	/*
764 	 * Check on status of suspension.
765 	 */
766 	while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
767 		if (flags & V_NOWAIT)
768 			return (EWOULDBLOCK);
769 		error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
770 		    "suspfs", 0);
771 		if (error)
772 			return (error);
773 	}
774 	if (flags & V_XSLEEP)
775 		return (0);
776 	mp->mnt_writeopcount++;
777 	return (0);
778 }
779 
780 /*
781  * Secondary suspension. Used by operations such as vop_inactive
782  * routines that are needed by the higher level functions. These
783  * are allowed to proceed until all the higher level functions have
784  * completed (indicated by mnt_writeopcount dropping to zero). At that
785  * time, these operations are halted until the suspension is over.
786  */
787 int
788 vn_write_suspend_wait(vp, mp, flags)
789 	struct vnode *vp;
790 	struct mount *mp;
791 	int flags;
792 {
793 	int error;
794 
795 	if (vp != NULL) {
796 		if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) {
797 			if (error != EOPNOTSUPP)
798 				return (error);
799 			return (0);
800 		}
801 	}
802 	/*
803 	 * If we are not suspended or have not yet reached suspended
804 	 * mode, then let the operation proceed.
805 	 */
806 	if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0)
807 		return (0);
808 	if (flags & V_NOWAIT)
809 		return (EWOULDBLOCK);
810 	/*
811 	 * Wait for the suspension to finish.
812 	 */
813 	return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
814 	    "suspfs", 0));
815 }
816 
817 /*
818  * Filesystem write operation has completed. If we are suspending and this
819  * operation is the last one, notify the suspender that the suspension is
820  * now in effect.
821  */
822 void
823 vn_finished_write(mp)
824 	struct mount *mp;
825 {
826 
827 	if (mp == NULL)
828 		return;
829 	mp->mnt_writeopcount--;
830 	if (mp->mnt_writeopcount < 0)
831 		panic("vn_finished_write: neg cnt");
832 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
833 	    mp->mnt_writeopcount <= 0)
834 		wakeup(&mp->mnt_writeopcount);
835 }
836 
837 /*
838  * Request a filesystem to suspend write operations.
839  */
840 void
841 vfs_write_suspend(mp)
842 	struct mount *mp;
843 {
844 	struct thread *td = curthread;
845 
846 	if (mp->mnt_kern_flag & MNTK_SUSPEND)
847 		return;
848 	mp->mnt_kern_flag |= MNTK_SUSPEND;
849 	if (mp->mnt_writeopcount > 0)
850 		(void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0);
851 	VFS_SYNC(mp, MNT_WAIT, td->td_proc->p_ucred, td);
852 	mp->mnt_kern_flag |= MNTK_SUSPENDED;
853 }
854 
855 /*
856  * Request a filesystem to resume write operations.
857  */
858 void
859 vfs_write_resume(mp)
860 	struct mount *mp;
861 {
862 
863 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0)
864 		return;
865 	mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED);
866 	wakeup(&mp->mnt_writeopcount);
867 	wakeup(&mp->mnt_flag);
868 }
869 
870 static int
871 vn_kqfilter(struct file *fp, struct knote *kn)
872 {
873 
874 	return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn));
875 }
876 
877 /*
878  * Simplified in-kernel wrapper calls for extended attribute access.
879  * Both calls pass in a NULL credential, authorizing as "kernel" access.
880  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
881  */
882 int
883 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
884     const char *attrname, int *buflen, char *buf, struct thread *td)
885 {
886 	struct uio	auio;
887 	struct iovec	iov;
888 	int	error;
889 
890 	iov.iov_len = *buflen;
891 	iov.iov_base = buf;
892 
893 	auio.uio_iov = &iov;
894 	auio.uio_iovcnt = 1;
895 	auio.uio_rw = UIO_READ;
896 	auio.uio_segflg = UIO_SYSSPACE;
897 	auio.uio_td = td;
898 	auio.uio_offset = 0;
899 	auio.uio_resid = *buflen;
900 
901 	if ((ioflg & IO_NODELOCKED) == 0)
902 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
903 
904 	/* authorize attribute retrieval as kernel */
905 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
906 
907 	if ((ioflg & IO_NODELOCKED) == 0)
908 		VOP_UNLOCK(vp, 0, td);
909 
910 	if (error == 0) {
911 		*buflen = *buflen - auio.uio_resid;
912 	}
913 
914 	return (error);
915 }
916 
917 /*
918  * XXX failure mode if partially written?
919  */
920 int
921 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
922     const char *attrname, int buflen, char *buf, struct thread *td)
923 {
924 	struct uio	auio;
925 	struct iovec	iov;
926 	struct mount	*mp;
927 	int	error;
928 
929 	iov.iov_len = buflen;
930 	iov.iov_base = buf;
931 
932 	auio.uio_iov = &iov;
933 	auio.uio_iovcnt = 1;
934 	auio.uio_rw = UIO_WRITE;
935 	auio.uio_segflg = UIO_SYSSPACE;
936 	auio.uio_td = td;
937 	auio.uio_offset = 0;
938 	auio.uio_resid = buflen;
939 
940 	if ((ioflg & IO_NODELOCKED) == 0) {
941 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
942 			return (error);
943 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
944 	}
945 
946 	/* authorize attribute setting as kernel */
947 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
948 
949 	if ((ioflg & IO_NODELOCKED) == 0) {
950 		vn_finished_write(mp);
951 		VOP_UNLOCK(vp, 0, td);
952 	}
953 
954 	return (error);
955 }
956 
957 int
958 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
959     const char *attrname, struct thread *td)
960 {
961 	struct mount	*mp;
962 	int	error;
963 
964 	if ((ioflg & IO_NODELOCKED) == 0) {
965 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
966 			return (error);
967 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
968 	}
969 
970 	/* authorize attribute removal as kernel */
971 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, td);
972 
973 	if ((ioflg & IO_NODELOCKED) == 0) {
974 		vn_finished_write(mp);
975 		VOP_UNLOCK(vp, 0, td);
976 	}
977 
978 	return (error);
979 }
980