xref: /freebsd/sys/kern/vfs_vnops.c (revision 70fe064ad7cab6c0444b91622f60ec6a462f308a)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD$
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/stat.h>
47 #include <sys/proc.h>
48 #include <sys/lock.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/namei.h>
52 #include <sys/vnode.h>
53 #include <sys/bio.h>
54 #include <sys/buf.h>
55 #include <sys/filio.h>
56 #include <sys/ttycom.h>
57 #include <sys/conf.h>
58 #include <sys/syslog.h>
59 
60 #include <machine/limits.h>
61 
62 static int vn_closefile __P((struct file *fp, struct thread *td));
63 static int vn_ioctl __P((struct file *fp, u_long com, caddr_t data,
64 		struct thread *td));
65 static int vn_read __P((struct file *fp, struct uio *uio,
66 		struct ucred *cred, int flags, struct thread *td));
67 static int vn_poll __P((struct file *fp, int events, struct ucred *cred,
68 		struct thread *td));
69 static int vn_kqfilter __P((struct file *fp, struct knote *kn));
70 static int vn_statfile __P((struct file *fp, struct stat *sb, struct thread *td));
71 static int vn_write __P((struct file *fp, struct uio *uio,
72 		struct ucred *cred, int flags, struct thread *td));
73 
74 struct 	fileops vnops = {
75 	vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
76 	vn_statfile, vn_closefile
77 };
78 
79 int
80 vn_open(ndp, flagp, cmode)
81 	register struct nameidata *ndp;
82 	int *flagp, cmode;
83 {
84 	struct thread *td = ndp->ni_cnd.cn_thread;
85 
86 	return (vn_open_cred(ndp, flagp, cmode, td->td_proc->p_ucred));
87 }
88 
89 /*
90  * Common code for vnode open operations.
91  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
92  *
93  * Note that this does NOT free nameidata for the successful case,
94  * due to the NDINIT being done elsewhere.
95  */
96 int
97 vn_open_cred(ndp, flagp, cmode, cred)
98 	register struct nameidata *ndp;
99 	int *flagp, cmode;
100 	struct ucred *cred;
101 {
102 	struct vnode *vp;
103 	struct mount *mp;
104 	struct thread *td = ndp->ni_cnd.cn_thread;
105 	struct vattr vat;
106 	struct vattr *vap = &vat;
107 	int mode, fmode, error;
108 
109 restart:
110 	fmode = *flagp;
111 	if (fmode & O_CREAT) {
112 		ndp->ni_cnd.cn_nameiop = CREATE;
113 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
114 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
115 			ndp->ni_cnd.cn_flags |= FOLLOW;
116 		bwillwrite();
117 		if ((error = namei(ndp)) != 0)
118 			return (error);
119 		if (ndp->ni_vp == NULL) {
120 			VATTR_NULL(vap);
121 			vap->va_type = VREG;
122 			vap->va_mode = cmode;
123 			if (fmode & O_EXCL)
124 				vap->va_vaflags |= VA_EXCLUSIVE;
125 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
126 				NDFREE(ndp, NDF_ONLY_PNBUF);
127 				vput(ndp->ni_dvp);
128 				if ((error = vn_start_write(NULL, &mp,
129 				    V_XSLEEP | PCATCH)) != 0)
130 					return (error);
131 				goto restart;
132 			}
133 			VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
134 			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
135 					   &ndp->ni_cnd, vap);
136 			vput(ndp->ni_dvp);
137 			vn_finished_write(mp);
138 			if (error) {
139 				NDFREE(ndp, NDF_ONLY_PNBUF);
140 				return (error);
141 			}
142 			ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
143 			ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
144 			fmode &= ~O_TRUNC;
145 			vp = ndp->ni_vp;
146 		} else {
147 			if (ndp->ni_dvp == ndp->ni_vp)
148 				vrele(ndp->ni_dvp);
149 			else
150 				vput(ndp->ni_dvp);
151 			ndp->ni_dvp = NULL;
152 			vp = ndp->ni_vp;
153 			if (fmode & O_EXCL) {
154 				error = EEXIST;
155 				goto bad;
156 			}
157 			fmode &= ~O_CREAT;
158 		}
159 	} else {
160 		ndp->ni_cnd.cn_nameiop = LOOKUP;
161 		ndp->ni_cnd.cn_flags =
162 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
163 		if ((error = namei(ndp)) != 0)
164 			return (error);
165 		vp = ndp->ni_vp;
166 	}
167 	if (vp->v_type == VLNK) {
168 		error = EMLINK;
169 		goto bad;
170 	}
171 	if (vp->v_type == VSOCK) {
172 		error = EOPNOTSUPP;
173 		goto bad;
174 	}
175 	if ((fmode & O_CREAT) == 0) {
176 		mode = 0;
177 		if (fmode & (FWRITE | O_TRUNC)) {
178 			if (vp->v_type == VDIR) {
179 				error = EISDIR;
180 				goto bad;
181 			}
182 			error = vn_writechk(vp);
183 			if (error)
184 				goto bad;
185 			mode |= VWRITE;
186 		}
187 		if (fmode & FREAD)
188 			mode |= VREAD;
189 		if (mode) {
190 		        error = VOP_ACCESS(vp, mode, cred, td);
191 			if (error)
192 				goto bad;
193 		}
194 	}
195 	if ((error = VOP_OPEN(vp, fmode, cred, td)) != 0)
196 		goto bad;
197 	/*
198 	 * Make sure that a VM object is created for VMIO support.
199 	 */
200 	if (vn_canvmio(vp) == TRUE) {
201 		if ((error = vfs_object_create(vp, td, cred)) != 0)
202 			/* XXX: Should VOP_CLOSE() again here. */
203 			goto bad;
204 	}
205 
206 	if (fmode & FWRITE)
207 		vp->v_writecount++;
208 	*flagp = fmode;
209 	return (0);
210 bad:
211 	NDFREE(ndp, NDF_ONLY_PNBUF);
212 	vput(vp);
213 	*flagp = fmode;
214 	return (error);
215 }
216 
217 /*
218  * Check for write permissions on the specified vnode.
219  * Prototype text segments cannot be written.
220  */
221 int
222 vn_writechk(vp)
223 	register struct vnode *vp;
224 {
225 
226 	/*
227 	 * If there's shared text associated with
228 	 * the vnode, try to free it up once.  If
229 	 * we fail, we can't allow writing.
230 	 */
231 	if (vp->v_flag & VTEXT)
232 		return (ETXTBSY);
233 	return (0);
234 }
235 
236 /*
237  * Vnode close call
238  */
239 int
240 vn_close(vp, flags, cred, td)
241 	register struct vnode *vp;
242 	int flags;
243 	struct ucred *cred;
244 	struct thread *td;
245 {
246 	int error;
247 
248 	if (flags & FWRITE)
249 		vp->v_writecount--;
250 	error = VOP_CLOSE(vp, flags, cred, td);
251 	/*
252 	 * XXX - In certain instances VOP_CLOSE has to do the vrele
253 	 * itself. If the vrele has been done, it will return EAGAIN
254 	 * to indicate that the vrele should not be done again. When
255 	 * this happens, we just return success. The correct thing to
256 	 * do would be to have all VOP_CLOSE instances do the vrele.
257 	 */
258 	if (error == EAGAIN)
259 		return (0);
260 	vrele(vp);
261 	return (error);
262 }
263 
264 static __inline
265 int
266 sequential_heuristic(struct uio *uio, struct file *fp)
267 {
268 
269 	/*
270 	 * Sequential heuristic - detect sequential operation
271 	 */
272 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
273 	    uio->uio_offset == fp->f_nextoff) {
274 		/*
275 		 * XXX we assume that the filesystem block size is
276 		 * the default.  Not true, but still gives us a pretty
277 		 * good indicator of how sequential the read operations
278 		 * are.
279 		 */
280 		fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
281 		if (fp->f_seqcount >= 127)
282 			fp->f_seqcount = 127;
283 		return(fp->f_seqcount << 16);
284 	}
285 
286 	/*
287 	 * Not sequential, quick draw-down of seqcount
288 	 */
289 	if (fp->f_seqcount > 1)
290 		fp->f_seqcount = 1;
291 	else
292 		fp->f_seqcount = 0;
293 	return(0);
294 }
295 
296 /*
297  * Package up an I/O request on a vnode into a uio and do it.
298  */
299 int
300 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
301 	enum uio_rw rw;
302 	struct vnode *vp;
303 	caddr_t base;
304 	int len;
305 	off_t offset;
306 	enum uio_seg segflg;
307 	int ioflg;
308 	struct ucred *cred;
309 	int *aresid;
310 	struct thread *td;
311 {
312 	struct uio auio;
313 	struct iovec aiov;
314 	struct mount *mp;
315 	int error;
316 
317 	if ((ioflg & IO_NODELOCKED) == 0) {
318 		mp = NULL;
319 		if (rw == UIO_WRITE &&
320 		    vp->v_type != VCHR &&
321 		    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
322 			return (error);
323 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
324 	}
325 	auio.uio_iov = &aiov;
326 	auio.uio_iovcnt = 1;
327 	aiov.iov_base = base;
328 	aiov.iov_len = len;
329 	auio.uio_resid = len;
330 	auio.uio_offset = offset;
331 	auio.uio_segflg = segflg;
332 	auio.uio_rw = rw;
333 	auio.uio_td = td;
334 	if (rw == UIO_READ) {
335 		error = VOP_READ(vp, &auio, ioflg, cred);
336 	} else {
337 		error = VOP_WRITE(vp, &auio, ioflg, cred);
338 	}
339 	if (aresid)
340 		*aresid = auio.uio_resid;
341 	else
342 		if (auio.uio_resid && error == 0)
343 			error = EIO;
344 	if ((ioflg & IO_NODELOCKED) == 0) {
345 		vn_finished_write(mp);
346 		VOP_UNLOCK(vp, 0, td);
347 	}
348 	return (error);
349 }
350 
351 /*
352  * Package up an I/O request on a vnode into a uio and do it.  The I/O
353  * request is split up into smaller chunks and we try to avoid saturating
354  * the buffer cache while potentially holding a vnode locked, so we
355  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
356  * to give other processes a chance to lock the vnode (either other processes
357  * core'ing the same binary, or unrelated processes scanning the directory).
358  */
359 int
360 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
361 	enum uio_rw rw;
362 	struct vnode *vp;
363 	caddr_t base;
364 	int len;
365 	off_t offset;
366 	enum uio_seg segflg;
367 	int ioflg;
368 	struct ucred *cred;
369 	int *aresid;
370 	struct thread *td;
371 {
372 	int error = 0;
373 
374 	do {
375 		int chunk = (len > MAXBSIZE) ? MAXBSIZE : len;
376 
377 		if (rw != UIO_READ && vp->v_type == VREG)
378 			bwillwrite();
379 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
380 		    ioflg, cred, aresid, td);
381 		len -= chunk;	/* aresid calc already includes length */
382 		if (error)
383 			break;
384 		offset += chunk;
385 		base += chunk;
386 		uio_yield();
387 	} while (len);
388 	if (aresid)
389 		*aresid += len;
390 	return (error);
391 }
392 
393 /*
394  * File table vnode read routine.
395  */
396 static int
397 vn_read(fp, uio, cred, flags, td)
398 	struct file *fp;
399 	struct uio *uio;
400 	struct ucred *cred;
401 	struct thread *td;
402 	int flags;
403 {
404 	struct vnode *vp;
405 	int error, ioflag;
406 
407 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
408 	    uio->uio_td, td));
409 	vp = (struct vnode *)fp->f_data;
410 	ioflag = 0;
411 	if (fp->f_flag & FNONBLOCK)
412 		ioflag |= IO_NDELAY;
413 	if (fp->f_flag & O_DIRECT)
414 		ioflag |= IO_DIRECT;
415 	VOP_LEASE(vp, td, cred, LEASE_READ);
416 	vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
417 	if ((flags & FOF_OFFSET) == 0)
418 		uio->uio_offset = fp->f_offset;
419 
420 	ioflag |= sequential_heuristic(uio, fp);
421 
422 	error = VOP_READ(vp, uio, ioflag, cred);
423 	if ((flags & FOF_OFFSET) == 0)
424 		fp->f_offset = uio->uio_offset;
425 	fp->f_nextoff = uio->uio_offset;
426 	VOP_UNLOCK(vp, 0, td);
427 	return (error);
428 }
429 
430 /*
431  * File table vnode write routine.
432  */
433 static int
434 vn_write(fp, uio, cred, flags, td)
435 	struct file *fp;
436 	struct uio *uio;
437 	struct ucred *cred;
438 	struct thread *td;
439 	int flags;
440 {
441 	struct vnode *vp;
442 	struct mount *mp;
443 	int error, ioflag;
444 
445 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
446 	    uio->uio_td, td));
447 	vp = (struct vnode *)fp->f_data;
448 	if (vp->v_type == VREG)
449 		bwillwrite();
450 	ioflag = IO_UNIT;
451 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
452 		ioflag |= IO_APPEND;
453 	if (fp->f_flag & FNONBLOCK)
454 		ioflag |= IO_NDELAY;
455 	if (fp->f_flag & O_DIRECT)
456 		ioflag |= IO_DIRECT;
457 	if ((fp->f_flag & O_FSYNC) ||
458 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
459 		ioflag |= IO_SYNC;
460 	mp = NULL;
461 	if (vp->v_type != VCHR &&
462 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
463 		return (error);
464 	VOP_LEASE(vp, td, cred, LEASE_WRITE);
465 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
466 	if ((flags & FOF_OFFSET) == 0)
467 		uio->uio_offset = fp->f_offset;
468 	ioflag |= sequential_heuristic(uio, fp);
469 	error = VOP_WRITE(vp, uio, ioflag, cred);
470 	if ((flags & FOF_OFFSET) == 0)
471 		fp->f_offset = uio->uio_offset;
472 	fp->f_nextoff = uio->uio_offset;
473 	VOP_UNLOCK(vp, 0, td);
474 	vn_finished_write(mp);
475 	return (error);
476 }
477 
478 /*
479  * File table vnode stat routine.
480  */
481 static int
482 vn_statfile(fp, sb, td)
483 	struct file *fp;
484 	struct stat *sb;
485 	struct thread *td;
486 {
487 	struct vnode *vp = (struct vnode *)fp->f_data;
488 
489 	return vn_stat(vp, sb, td);
490 }
491 
492 int
493 vn_stat(vp, sb, td)
494 	struct vnode *vp;
495 	register struct stat *sb;
496 	struct thread *td;
497 {
498 	struct vattr vattr;
499 	register struct vattr *vap;
500 	int error;
501 	u_short mode;
502 
503 	vap = &vattr;
504 	error = VOP_GETATTR(vp, vap, td->td_proc->p_ucred, td);
505 	if (error)
506 		return (error);
507 
508 	/*
509 	 * Zero the spare stat fields
510 	 */
511 	sb->st_lspare = 0;
512 	sb->st_qspare[0] = 0;
513 	sb->st_qspare[1] = 0;
514 
515 	/*
516 	 * Copy from vattr table
517 	 */
518 	if (vap->va_fsid != VNOVAL)
519 		sb->st_dev = vap->va_fsid;
520 	else
521 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
522 	sb->st_ino = vap->va_fileid;
523 	mode = vap->va_mode;
524 	switch (vap->va_type) {
525 	case VREG:
526 		mode |= S_IFREG;
527 		break;
528 	case VDIR:
529 		mode |= S_IFDIR;
530 		break;
531 	case VBLK:
532 		mode |= S_IFBLK;
533 		break;
534 	case VCHR:
535 		mode |= S_IFCHR;
536 		break;
537 	case VLNK:
538 		mode |= S_IFLNK;
539 		/* This is a cosmetic change, symlinks do not have a mode. */
540 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
541 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
542 		else
543 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
544 		break;
545 	case VSOCK:
546 		mode |= S_IFSOCK;
547 		break;
548 	case VFIFO:
549 		mode |= S_IFIFO;
550 		break;
551 	default:
552 		return (EBADF);
553 	};
554 	sb->st_mode = mode;
555 	sb->st_nlink = vap->va_nlink;
556 	sb->st_uid = vap->va_uid;
557 	sb->st_gid = vap->va_gid;
558 	sb->st_rdev = vap->va_rdev;
559 	if (vap->va_size > OFF_MAX)
560 		return (EOVERFLOW);
561 	sb->st_size = vap->va_size;
562 	sb->st_atimespec = vap->va_atime;
563 	sb->st_mtimespec = vap->va_mtime;
564 	sb->st_ctimespec = vap->va_ctime;
565 
566         /*
567 	 * According to www.opengroup.org, the meaning of st_blksize is
568 	 *   "a filesystem-specific preferred I/O block size for this
569 	 *    object.  In some filesystem types, this may vary from file
570 	 *    to file"
571 	 * Default to PAGE_SIZE after much discussion.
572 	 */
573 
574 	if (vap->va_type == VREG) {
575 		sb->st_blksize = vap->va_blocksize;
576 	} else if (vn_isdisk(vp, NULL)) {
577 		sb->st_blksize = vp->v_rdev->si_bsize_best;
578 		if (sb->st_blksize < vp->v_rdev->si_bsize_phys)
579 			sb->st_blksize = vp->v_rdev->si_bsize_phys;
580 		if (sb->st_blksize < BLKDEV_IOSIZE)
581 			sb->st_blksize = BLKDEV_IOSIZE;
582 	} else {
583 		sb->st_blksize = PAGE_SIZE;
584 	}
585 
586 	sb->st_flags = vap->va_flags;
587 	if (suser_xxx(td->td_proc->p_ucred, 0, 0))
588 		sb->st_gen = 0;
589 	else
590 		sb->st_gen = vap->va_gen;
591 
592 #if (S_BLKSIZE == 512)
593 	/* Optimize this case */
594 	sb->st_blocks = vap->va_bytes >> 9;
595 #else
596 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
597 #endif
598 	return (0);
599 }
600 
601 /*
602  * File table vnode ioctl routine.
603  */
604 static int
605 vn_ioctl(fp, com, data, td)
606 	struct file *fp;
607 	u_long com;
608 	caddr_t data;
609 	struct thread *td;
610 {
611 	register struct vnode *vp = ((struct vnode *)fp->f_data);
612 	struct vattr vattr;
613 	int error;
614 
615 	switch (vp->v_type) {
616 
617 	case VREG:
618 	case VDIR:
619 		if (com == FIONREAD) {
620 			error = VOP_GETATTR(vp, &vattr, td->td_proc->p_ucred, td);
621 			if (error)
622 				return (error);
623 			*(int *)data = vattr.va_size - fp->f_offset;
624 			return (0);
625 		}
626 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
627 			return (0);			/* XXX */
628 		/* fall into ... */
629 
630 	default:
631 #if 0
632 		return (ENOTTY);
633 #endif
634 	case VFIFO:
635 	case VCHR:
636 	case VBLK:
637 		if (com == FIODTYPE) {
638 			if (vp->v_type != VCHR && vp->v_type != VBLK)
639 				return (ENOTTY);
640 			*(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK;
641 			return (0);
642 		}
643 		error = VOP_IOCTL(vp, com, data, fp->f_flag, td->td_proc->p_ucred, td);
644 		if (error == 0 && com == TIOCSCTTY) {
645 
646 			/* Do nothing if reassigning same control tty */
647 			if (td->td_proc->p_session->s_ttyvp == vp)
648 				return (0);
649 
650 			/* Get rid of reference to old control tty */
651 			if (td->td_proc->p_session->s_ttyvp)
652 				vrele(td->td_proc->p_session->s_ttyvp);
653 
654 			td->td_proc->p_session->s_ttyvp = vp;
655 			VREF(vp);
656 		}
657 		return (error);
658 	}
659 }
660 
661 /*
662  * File table vnode poll routine.
663  */
664 static int
665 vn_poll(fp, events, cred, td)
666 	struct file *fp;
667 	int events;
668 	struct ucred *cred;
669 	struct thread *td;
670 {
671 
672 	return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td));
673 }
674 
675 /*
676  * Check that the vnode is still valid, and if so
677  * acquire requested lock.
678  */
679 int
680 #ifndef	DEBUG_LOCKS
681 vn_lock(vp, flags, td)
682 #else
683 debug_vn_lock(vp, flags, td, filename, line)
684 #endif
685 	struct vnode *vp;
686 	int flags;
687 	struct thread *td;
688 #ifdef	DEBUG_LOCKS
689 	const char *filename;
690 	int line;
691 #endif
692 {
693 	int error;
694 
695 	do {
696 		if ((flags & LK_INTERLOCK) == 0)
697 			mtx_lock(&vp->v_interlock);
698 		if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curthread) {
699 			vp->v_flag |= VXWANT;
700 			msleep(vp, &vp->v_interlock, PINOD | PDROP,
701 			    "vn_lock", 0);
702 			error = ENOENT;
703 		} else {
704 #if 0
705 			/* this can now occur in normal operation */
706 			if (vp->v_vxproc != NULL)
707 				log(LOG_INFO, "VXLOCK interlock avoided in vn_lock\n");
708 #endif
709 #ifdef	DEBUG_LOCKS
710 			vp->filename = filename;
711 			vp->line = line;
712 #endif
713 			error = VOP_LOCK(vp,
714 				    flags | LK_NOPAUSE | LK_INTERLOCK, td);
715 			if (error == 0)
716 				return (error);
717 		}
718 		flags &= ~LK_INTERLOCK;
719 	} while (flags & LK_RETRY);
720 	return (error);
721 }
722 
723 /*
724  * File table vnode close routine.
725  */
726 static int
727 vn_closefile(fp, td)
728 	struct file *fp;
729 	struct thread *td;
730 {
731 
732 	fp->f_ops = &badfileops;
733 	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
734 		fp->f_cred, td));
735 }
736 
737 /*
738  * Preparing to start a filesystem write operation. If the operation is
739  * permitted, then we bump the count of operations in progress and
740  * proceed. If a suspend request is in progress, we wait until the
741  * suspension is over, and then proceed.
742  */
743 int
744 vn_start_write(vp, mpp, flags)
745 	struct vnode *vp;
746 	struct mount **mpp;
747 	int flags;
748 {
749 	struct mount *mp;
750 	int error;
751 
752 	/*
753 	 * If a vnode is provided, get and return the mount point that
754 	 * to which it will write.
755 	 */
756 	if (vp != NULL) {
757 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
758 			*mpp = NULL;
759 			if (error != EOPNOTSUPP)
760 				return (error);
761 			return (0);
762 		}
763 	}
764 	if ((mp = *mpp) == NULL)
765 		return (0);
766 	/*
767 	 * Check on status of suspension.
768 	 */
769 	while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
770 		if (flags & V_NOWAIT)
771 			return (EWOULDBLOCK);
772 		error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
773 		    "suspfs", 0);
774 		if (error)
775 			return (error);
776 	}
777 	if (flags & V_XSLEEP)
778 		return (0);
779 	mp->mnt_writeopcount++;
780 	return (0);
781 }
782 
783 /*
784  * Secondary suspension. Used by operations such as vop_inactive
785  * routines that are needed by the higher level functions. These
786  * are allowed to proceed until all the higher level functions have
787  * completed (indicated by mnt_writeopcount dropping to zero). At that
788  * time, these operations are halted until the suspension is over.
789  */
790 int
791 vn_write_suspend_wait(vp, mp, flags)
792 	struct vnode *vp;
793 	struct mount *mp;
794 	int flags;
795 {
796 	int error;
797 
798 	if (vp != NULL) {
799 		if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) {
800 			if (error != EOPNOTSUPP)
801 				return (error);
802 			return (0);
803 		}
804 	}
805 	/*
806 	 * If we are not suspended or have not yet reached suspended
807 	 * mode, then let the operation proceed.
808 	 */
809 	if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0)
810 		return (0);
811 	if (flags & V_NOWAIT)
812 		return (EWOULDBLOCK);
813 	/*
814 	 * Wait for the suspension to finish.
815 	 */
816 	return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
817 	    "suspfs", 0));
818 }
819 
820 /*
821  * Filesystem write operation has completed. If we are suspending and this
822  * operation is the last one, notify the suspender that the suspension is
823  * now in effect.
824  */
825 void
826 vn_finished_write(mp)
827 	struct mount *mp;
828 {
829 
830 	if (mp == NULL)
831 		return;
832 	mp->mnt_writeopcount--;
833 	if (mp->mnt_writeopcount < 0)
834 		panic("vn_finished_write: neg cnt");
835 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
836 	    mp->mnt_writeopcount <= 0)
837 		wakeup(&mp->mnt_writeopcount);
838 }
839 
840 /*
841  * Request a filesystem to suspend write operations.
842  */
843 void
844 vfs_write_suspend(mp)
845 	struct mount *mp;
846 {
847 	struct thread *td = curthread;
848 
849 	if (mp->mnt_kern_flag & MNTK_SUSPEND)
850 		return;
851 	mp->mnt_kern_flag |= MNTK_SUSPEND;
852 	if (mp->mnt_writeopcount > 0)
853 		(void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0);
854 	VFS_SYNC(mp, MNT_WAIT, td->td_proc->p_ucred, td);
855 	mp->mnt_kern_flag |= MNTK_SUSPENDED;
856 }
857 
858 /*
859  * Request a filesystem to resume write operations.
860  */
861 void
862 vfs_write_resume(mp)
863 	struct mount *mp;
864 {
865 
866 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0)
867 		return;
868 	mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED);
869 	wakeup(&mp->mnt_writeopcount);
870 	wakeup(&mp->mnt_flag);
871 }
872 
873 static int
874 vn_kqfilter(struct file *fp, struct knote *kn)
875 {
876 
877 	return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn));
878 }
879 
880 /*
881  * Simplified in-kernel wrapper calls for extended attribute access.
882  * Both calls pass in a NULL credential, authorizing as "kernel" access.
883  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
884  */
885 int
886 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
887     const char *attrname, int *buflen, char *buf, struct thread *td)
888 {
889 	struct uio	auio;
890 	struct iovec	iov;
891 	int	error;
892 
893 	iov.iov_len = *buflen;
894 	iov.iov_base = buf;
895 
896 	auio.uio_iov = &iov;
897 	auio.uio_iovcnt = 1;
898 	auio.uio_rw = UIO_READ;
899 	auio.uio_segflg = UIO_SYSSPACE;
900 	auio.uio_td = td;
901 	auio.uio_offset = 0;
902 	auio.uio_resid = *buflen;
903 
904 	if ((ioflg & IO_NODELOCKED) == 0)
905 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
906 
907 	/* authorize attribute retrieval as kernel */
908 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
909 
910 	if ((ioflg & IO_NODELOCKED) == 0)
911 		VOP_UNLOCK(vp, 0, td);
912 
913 	if (error == 0) {
914 		*buflen = *buflen - auio.uio_resid;
915 	}
916 
917 	return (error);
918 }
919 
920 /*
921  * XXX failure mode if partially written?
922  */
923 int
924 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
925     const char *attrname, int buflen, char *buf, struct thread *td)
926 {
927 	struct uio	auio;
928 	struct iovec	iov;
929 	struct mount	*mp;
930 	int	error;
931 
932 	iov.iov_len = buflen;
933 	iov.iov_base = buf;
934 
935 	auio.uio_iov = &iov;
936 	auio.uio_iovcnt = 1;
937 	auio.uio_rw = UIO_WRITE;
938 	auio.uio_segflg = UIO_SYSSPACE;
939 	auio.uio_td = td;
940 	auio.uio_offset = 0;
941 	auio.uio_resid = buflen;
942 
943 	if ((ioflg & IO_NODELOCKED) == 0) {
944 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
945 			return (error);
946 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
947 	}
948 
949 	/* authorize attribute setting as kernel */
950 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
951 
952 	if ((ioflg & IO_NODELOCKED) == 0) {
953 		vn_finished_write(mp);
954 		VOP_UNLOCK(vp, 0, td);
955 	}
956 
957 	return (error);
958 }
959 
960 int
961 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
962     const char *attrname, struct thread *td)
963 {
964 	struct mount	*mp;
965 	int	error;
966 
967 	if ((ioflg & IO_NODELOCKED) == 0) {
968 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
969 			return (error);
970 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
971 	}
972 
973 	/* authorize attribute removal as kernel */
974 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, td);
975 
976 	if ((ioflg & IO_NODELOCKED) == 0) {
977 		vn_finished_write(mp);
978 		VOP_UNLOCK(vp, 0, td);
979 	}
980 
981 	return (error);
982 }
983