xref: /freebsd/sys/kern/vfs_vnops.c (revision cec50dea12481dc578c0805c887ab2097e1c06c5)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_mac.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/kdb.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/limits.h>
50 #include <sys/lock.h>
51 #include <sys/mac.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/bio.h>
57 #include <sys/buf.h>
58 #include <sys/filio.h>
59 #include <sys/sx.h>
60 #include <sys/ttycom.h>
61 #include <sys/conf.h>
62 #include <sys/syslog.h>
63 #include <sys/unistd.h>
64 
65 static fo_rdwr_t	vn_read;
66 static fo_rdwr_t	vn_write;
67 static fo_ioctl_t	vn_ioctl;
68 static fo_poll_t	vn_poll;
69 static fo_kqfilter_t	vn_kqfilter;
70 static fo_stat_t	vn_statfile;
71 static fo_close_t	vn_closefile;
72 
73 struct 	fileops vnops = {
74 	.fo_read = vn_read,
75 	.fo_write = vn_write,
76 	.fo_ioctl = vn_ioctl,
77 	.fo_poll = vn_poll,
78 	.fo_kqfilter = vn_kqfilter,
79 	.fo_stat = vn_statfile,
80 	.fo_close = vn_closefile,
81 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
82 };
83 
84 int
85 vn_open(ndp, flagp, cmode, fdidx)
86 	struct nameidata *ndp;
87 	int *flagp, cmode, fdidx;
88 {
89 	struct thread *td = ndp->ni_cnd.cn_thread;
90 
91 	return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fdidx));
92 }
93 
94 /*
95  * Common code for vnode open operations.
96  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
97  *
98  * Note that this does NOT free nameidata for the successful case,
99  * due to the NDINIT being done elsewhere.
100  */
101 int
102 vn_open_cred(ndp, flagp, cmode, cred, fdidx)
103 	struct nameidata *ndp;
104 	int *flagp, cmode;
105 	struct ucred *cred;
106 	int fdidx;
107 {
108 	struct vnode *vp;
109 	struct mount *mp;
110 	struct thread *td = ndp->ni_cnd.cn_thread;
111 	struct vattr vat;
112 	struct vattr *vap = &vat;
113 	int mode, fmode, error;
114 #ifdef LOOKUP_SHARED
115 	int exclusive;	/* The current intended lock state */
116 
117 	exclusive = 0;
118 #endif
119 
120 	GIANT_REQUIRED;
121 
122 restart:
123 	fmode = *flagp;
124 	if (fmode & O_CREAT) {
125 		ndp->ni_cnd.cn_nameiop = CREATE;
126 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
127 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
128 			ndp->ni_cnd.cn_flags |= FOLLOW;
129 		bwillwrite();
130 		if ((error = namei(ndp)) != 0)
131 			return (error);
132 		if (ndp->ni_vp == NULL) {
133 			VATTR_NULL(vap);
134 			vap->va_type = VREG;
135 			vap->va_mode = cmode;
136 			if (fmode & O_EXCL)
137 				vap->va_vaflags |= VA_EXCLUSIVE;
138 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
139 				NDFREE(ndp, NDF_ONLY_PNBUF);
140 				vput(ndp->ni_dvp);
141 				if ((error = vn_start_write(NULL, &mp,
142 				    V_XSLEEP | PCATCH)) != 0)
143 					return (error);
144 				goto restart;
145 			}
146 #ifdef MAC
147 			error = mac_check_vnode_create(cred, ndp->ni_dvp,
148 			    &ndp->ni_cnd, vap);
149 			if (error == 0) {
150 #endif
151 				VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
152 				error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
153 						   &ndp->ni_cnd, vap);
154 #ifdef MAC
155 			}
156 #endif
157 			vput(ndp->ni_dvp);
158 			vn_finished_write(mp);
159 			if (error) {
160 				NDFREE(ndp, NDF_ONLY_PNBUF);
161 				return (error);
162 			}
163 			ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
164 			ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
165 			fmode &= ~O_TRUNC;
166 			vp = ndp->ni_vp;
167 #ifdef LOOKUP_SHARED
168 			exclusive = 1;
169 #endif
170 		} else {
171 			if (ndp->ni_dvp == ndp->ni_vp)
172 				vrele(ndp->ni_dvp);
173 			else
174 				vput(ndp->ni_dvp);
175 			ndp->ni_dvp = NULL;
176 			vp = ndp->ni_vp;
177 			if (fmode & O_EXCL) {
178 				error = EEXIST;
179 				goto bad;
180 			}
181 			fmode &= ~O_CREAT;
182 		}
183 	} else {
184 		ndp->ni_cnd.cn_nameiop = LOOKUP;
185 #ifdef LOOKUP_SHARED
186 		ndp->ni_cnd.cn_flags =
187 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
188 		    LOCKSHARED | LOCKLEAF;
189 #else
190 		ndp->ni_cnd.cn_flags =
191 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
192 #endif
193 		if ((error = namei(ndp)) != 0)
194 			return (error);
195 		vp = ndp->ni_vp;
196 	}
197 	if (vp->v_type == VLNK) {
198 		error = EMLINK;
199 		goto bad;
200 	}
201 	if (vp->v_type == VSOCK) {
202 		error = EOPNOTSUPP;
203 		goto bad;
204 	}
205 	mode = 0;
206 	if (fmode & (FWRITE | O_TRUNC)) {
207 		if (vp->v_type == VDIR) {
208 			error = EISDIR;
209 			goto bad;
210 		}
211 		mode |= VWRITE;
212 	}
213 	if (fmode & FREAD)
214 		mode |= VREAD;
215 	if (fmode & O_APPEND)
216 		mode |= VAPPEND;
217 #ifdef MAC
218 	error = mac_check_vnode_open(cred, vp, mode);
219 	if (error)
220 		goto bad;
221 #endif
222 	if ((fmode & O_CREAT) == 0) {
223 		if (mode & VWRITE) {
224 			error = vn_writechk(vp);
225 			if (error)
226 				goto bad;
227 		}
228 		if (mode) {
229 		        error = VOP_ACCESS(vp, mode, cred, td);
230 			if (error)
231 				goto bad;
232 		}
233 	}
234 	if ((error = VOP_GETATTR(vp, vap, cred, td)) == 0) {
235 		vp->v_cachedfs = vap->va_fsid;
236 		vp->v_cachedid = vap->va_fileid;
237 	}
238 	if ((error = VOP_OPEN(vp, fmode, cred, td, fdidx)) != 0)
239 		goto bad;
240 	/*
241 	 * Make sure that a VM object is created for VMIO support.
242 	 */
243 	if (vn_canvmio(vp) == TRUE) {
244 #ifdef LOOKUP_SHARED
245 		int flock;
246 
247 		if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0)
248 			VOP_LOCK(vp, LK_UPGRADE, td);
249 		/*
250 		 * In cases where the object is marked as dead object_create
251 		 * will unlock and relock exclusive.  It is safe to call in
252 		 * here with a shared lock because we only examine fields that
253 		 * the shared lock guarantees will be stable.  In the UPGRADE
254 		 * case it is not likely that anyone has used this vnode yet
255 		 * so there will be no contention.  The logic after this call
256 		 * restores the requested locking state.
257 		 */
258 #endif
259 		if ((error = vfs_object_create(vp, td, cred)) != 0) {
260 			VOP_UNLOCK(vp, 0, td);
261 			VOP_CLOSE(vp, fmode, cred, td);
262 			NDFREE(ndp, NDF_ONLY_PNBUF);
263 			vrele(vp);
264 			*flagp = fmode;
265 			return (error);
266 		}
267 #ifdef LOOKUP_SHARED
268 		flock = VOP_ISLOCKED(vp, td);
269 		if (!exclusive && flock == LK_EXCLUSIVE)
270 			VOP_LOCK(vp, LK_DOWNGRADE, td);
271 #endif
272 	}
273 
274 	if (fmode & FWRITE)
275 		vp->v_writecount++;
276 	*flagp = fmode;
277 	ASSERT_VOP_LOCKED(vp, "vn_open_cred");
278 	return (0);
279 bad:
280 	NDFREE(ndp, NDF_ONLY_PNBUF);
281 	vput(vp);
282 	*flagp = fmode;
283 	ndp->ni_vp = NULL;
284 	return (error);
285 }
286 
287 /*
288  * Check for write permissions on the specified vnode.
289  * Prototype text segments cannot be written.
290  */
291 int
292 vn_writechk(vp)
293 	register struct vnode *vp;
294 {
295 
296 	ASSERT_VOP_LOCKED(vp, "vn_writechk");
297 	/*
298 	 * If there's shared text associated with
299 	 * the vnode, try to free it up once.  If
300 	 * we fail, we can't allow writing.
301 	 */
302 	if (vp->v_vflag & VV_TEXT)
303 		return (ETXTBSY);
304 
305 	return (0);
306 }
307 
308 /*
309  * Vnode close call
310  */
311 int
312 vn_close(vp, flags, file_cred, td)
313 	register struct vnode *vp;
314 	int flags;
315 	struct ucred *file_cred;
316 	struct thread *td;
317 {
318 	int error;
319 
320 	GIANT_REQUIRED;
321 
322 	if (flags & FWRITE)
323 		vp->v_writecount--;
324 	error = VOP_CLOSE(vp, flags, file_cred, td);
325 	/*
326 	 * XXX - In certain instances VOP_CLOSE has to do the vrele
327 	 * itself. If the vrele has been done, it will return EAGAIN
328 	 * to indicate that the vrele should not be done again. When
329 	 * this happens, we just return success. The correct thing to
330 	 * do would be to have all VOP_CLOSE instances do the vrele.
331 	 */
332 	if (error == EAGAIN)
333 		return (0);
334 	vrele(vp);
335 	return (error);
336 }
337 
338 /*
339  * Sequential heuristic - detect sequential operation
340  */
341 static __inline
342 int
343 sequential_heuristic(struct uio *uio, struct file *fp)
344 {
345 
346 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
347 	    uio->uio_offset == fp->f_nextoff) {
348 		/*
349 		 * XXX we assume that the filesystem block size is
350 		 * the default.  Not true, but still gives us a pretty
351 		 * good indicator of how sequential the read operations
352 		 * are.
353 		 */
354 		fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
355 		if (fp->f_seqcount > IO_SEQMAX)
356 			fp->f_seqcount = IO_SEQMAX;
357 		return(fp->f_seqcount << IO_SEQSHIFT);
358 	}
359 
360 	/*
361 	 * Not sequential, quick draw-down of seqcount
362 	 */
363 	if (fp->f_seqcount > 1)
364 		fp->f_seqcount = 1;
365 	else
366 		fp->f_seqcount = 0;
367 	return(0);
368 }
369 
370 /*
371  * Package up an I/O request on a vnode into a uio and do it.
372  */
373 int
374 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
375     aresid, td)
376 	enum uio_rw rw;
377 	struct vnode *vp;
378 	caddr_t base;
379 	int len;
380 	off_t offset;
381 	enum uio_seg segflg;
382 	int ioflg;
383 	struct ucred *active_cred;
384 	struct ucred *file_cred;
385 	int *aresid;
386 	struct thread *td;
387 {
388 	struct uio auio;
389 	struct iovec aiov;
390 	struct mount *mp;
391 	struct ucred *cred;
392 	int error;
393 
394 	GIANT_REQUIRED;
395 
396 	if ((ioflg & IO_NODELOCKED) == 0) {
397 		mp = NULL;
398 		if (rw == UIO_WRITE) {
399 			if (vp->v_type != VCHR &&
400 			    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
401 			    != 0)
402 				return (error);
403 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
404 		} else {
405 			/*
406 			 * XXX This should be LK_SHARED but I don't trust VFS
407 			 * enough to leave it like that until it has been
408 			 * reviewed further.
409 			 */
410 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
411 		}
412 
413 	}
414 	auio.uio_iov = &aiov;
415 	auio.uio_iovcnt = 1;
416 	aiov.iov_base = base;
417 	aiov.iov_len = len;
418 	auio.uio_resid = len;
419 	auio.uio_offset = offset;
420 	auio.uio_segflg = segflg;
421 	auio.uio_rw = rw;
422 	auio.uio_td = td;
423 	error = 0;
424 #ifdef MAC
425 	if ((ioflg & IO_NOMACCHECK) == 0) {
426 		if (rw == UIO_READ)
427 			error = mac_check_vnode_read(active_cred, file_cred,
428 			    vp);
429 		else
430 			error = mac_check_vnode_write(active_cred, file_cred,
431 			    vp);
432 	}
433 #endif
434 	if (error == 0) {
435 		if (file_cred)
436 			cred = file_cred;
437 		else
438 			cred = active_cred;
439 		if (rw == UIO_READ)
440 			error = VOP_READ(vp, &auio, ioflg, cred);
441 		else
442 			error = VOP_WRITE(vp, &auio, ioflg, cred);
443 	}
444 	if (aresid)
445 		*aresid = auio.uio_resid;
446 	else
447 		if (auio.uio_resid && error == 0)
448 			error = EIO;
449 	if ((ioflg & IO_NODELOCKED) == 0) {
450 		if (rw == UIO_WRITE)
451 			vn_finished_write(mp);
452 		VOP_UNLOCK(vp, 0, td);
453 	}
454 	return (error);
455 }
456 
457 /*
458  * Package up an I/O request on a vnode into a uio and do it.  The I/O
459  * request is split up into smaller chunks and we try to avoid saturating
460  * the buffer cache while potentially holding a vnode locked, so we
461  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
462  * to give other processes a chance to lock the vnode (either other processes
463  * core'ing the same binary, or unrelated processes scanning the directory).
464  */
465 int
466 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
467     file_cred, aresid, td)
468 	enum uio_rw rw;
469 	struct vnode *vp;
470 	caddr_t base;
471 	size_t len;
472 	off_t offset;
473 	enum uio_seg segflg;
474 	int ioflg;
475 	struct ucred *active_cred;
476 	struct ucred *file_cred;
477 	size_t *aresid;
478 	struct thread *td;
479 {
480 	int error = 0;
481 	int iaresid;
482 
483 	GIANT_REQUIRED;
484 
485 	do {
486 		int chunk;
487 
488 		/*
489 		 * Force `offset' to a multiple of MAXBSIZE except possibly
490 		 * for the first chunk, so that filesystems only need to
491 		 * write full blocks except possibly for the first and last
492 		 * chunks.
493 		 */
494 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
495 
496 		if (chunk > len)
497 			chunk = len;
498 		if (rw != UIO_READ && vp->v_type == VREG)
499 			bwillwrite();
500 		iaresid = 0;
501 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
502 		    ioflg, active_cred, file_cred, &iaresid, td);
503 		len -= chunk;	/* aresid calc already includes length */
504 		if (error)
505 			break;
506 		offset += chunk;
507 		base += chunk;
508 		uio_yield();
509 	} while (len);
510 	if (aresid)
511 		*aresid = len + iaresid;
512 	return (error);
513 }
514 
515 /*
516  * File table vnode read routine.
517  */
518 static int
519 vn_read(fp, uio, active_cred, flags, td)
520 	struct file *fp;
521 	struct uio *uio;
522 	struct ucred *active_cred;
523 	struct thread *td;
524 	int flags;
525 {
526 	struct vnode *vp;
527 	int error, ioflag;
528 
529 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
530 	    uio->uio_td, td));
531 	vp = fp->f_vnode;
532 	ioflag = 0;
533 	if (fp->f_flag & FNONBLOCK)
534 		ioflag |= IO_NDELAY;
535 	if (fp->f_flag & O_DIRECT)
536 		ioflag |= IO_DIRECT;
537 	mtx_lock(&Giant);
538 	VOP_LEASE(vp, td, fp->f_cred, LEASE_READ);
539 	/*
540 	 * According to McKusick the vn lock is protecting f_offset here.
541 	 * Once this field has it's own lock we can acquire this shared.
542 	 */
543 	if ((flags & FOF_OFFSET) == 0) {
544 		vn_lock(vp, LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td);
545 		uio->uio_offset = fp->f_offset;
546 	} else
547 		vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
548 
549 	ioflag |= sequential_heuristic(uio, fp);
550 
551 #ifdef MAC
552 	error = mac_check_vnode_read(active_cred, fp->f_cred, vp);
553 	if (error == 0)
554 #endif
555 		error = VOP_READ(vp, uio, ioflag, fp->f_cred);
556 	if ((flags & FOF_OFFSET) == 0)
557 		fp->f_offset = uio->uio_offset;
558 	fp->f_nextoff = uio->uio_offset;
559 	VOP_UNLOCK(vp, 0, td);
560 	mtx_unlock(&Giant);
561 	return (error);
562 }
563 
564 /*
565  * File table vnode write routine.
566  */
567 static int
568 vn_write(fp, uio, active_cred, flags, td)
569 	struct file *fp;
570 	struct uio *uio;
571 	struct ucred *active_cred;
572 	struct thread *td;
573 	int flags;
574 {
575 	struct vnode *vp;
576 	struct mount *mp;
577 	int error, ioflag;
578 
579 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
580 	    uio->uio_td, td));
581 	vp = fp->f_vnode;
582 	mtx_lock(&Giant);
583 	if (vp->v_type == VREG)
584 		bwillwrite();
585 	ioflag = IO_UNIT;
586 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
587 		ioflag |= IO_APPEND;
588 	if (fp->f_flag & FNONBLOCK)
589 		ioflag |= IO_NDELAY;
590 	if (fp->f_flag & O_DIRECT)
591 		ioflag |= IO_DIRECT;
592 	if ((fp->f_flag & O_FSYNC) ||
593 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
594 		ioflag |= IO_SYNC;
595 	mp = NULL;
596 	if (vp->v_type != VCHR &&
597 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) {
598 		mtx_unlock(&Giant);
599 		return (error);
600 	}
601 	VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE);
602 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
603 	if ((flags & FOF_OFFSET) == 0)
604 		uio->uio_offset = fp->f_offset;
605 	ioflag |= sequential_heuristic(uio, fp);
606 #ifdef MAC
607 	error = mac_check_vnode_write(active_cred, fp->f_cred, vp);
608 	if (error == 0)
609 #endif
610 		error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
611 	if ((flags & FOF_OFFSET) == 0)
612 		fp->f_offset = uio->uio_offset;
613 	fp->f_nextoff = uio->uio_offset;
614 	VOP_UNLOCK(vp, 0, td);
615 	vn_finished_write(mp);
616 	mtx_unlock(&Giant);
617 	return (error);
618 }
619 
620 /*
621  * File table vnode stat routine.
622  */
623 static int
624 vn_statfile(fp, sb, active_cred, td)
625 	struct file *fp;
626 	struct stat *sb;
627 	struct ucred *active_cred;
628 	struct thread *td;
629 {
630 	struct vnode *vp = fp->f_vnode;
631 	int error;
632 
633 	mtx_lock(&Giant);
634 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
635 	error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
636 	VOP_UNLOCK(vp, 0, td);
637 	mtx_unlock(&Giant);
638 
639 	return (error);
640 }
641 
642 /*
643  * Stat a vnode; implementation for the stat syscall
644  */
645 int
646 vn_stat(vp, sb, active_cred, file_cred, td)
647 	struct vnode *vp;
648 	register struct stat *sb;
649 	struct ucred *active_cred;
650 	struct ucred *file_cred;
651 	struct thread *td;
652 {
653 	struct vattr vattr;
654 	register struct vattr *vap;
655 	int error;
656 	u_short mode;
657 
658 	GIANT_REQUIRED;
659 
660 #ifdef MAC
661 	error = mac_check_vnode_stat(active_cred, file_cred, vp);
662 	if (error)
663 		return (error);
664 #endif
665 
666 	vap = &vattr;
667 	error = VOP_GETATTR(vp, vap, active_cred, td);
668 	if (error)
669 		return (error);
670 
671 	vp->v_cachedfs = vap->va_fsid;
672 	vp->v_cachedid = vap->va_fileid;
673 
674 	/*
675 	 * Zero the spare stat fields
676 	 */
677 	bzero(sb, sizeof *sb);
678 
679 	/*
680 	 * Copy from vattr table
681 	 */
682 	if (vap->va_fsid != VNOVAL)
683 		sb->st_dev = vap->va_fsid;
684 	else
685 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
686 	sb->st_ino = vap->va_fileid;
687 	mode = vap->va_mode;
688 	switch (vap->va_type) {
689 	case VREG:
690 		mode |= S_IFREG;
691 		break;
692 	case VDIR:
693 		mode |= S_IFDIR;
694 		break;
695 	case VBLK:
696 		mode |= S_IFBLK;
697 		break;
698 	case VCHR:
699 		mode |= S_IFCHR;
700 		break;
701 	case VLNK:
702 		mode |= S_IFLNK;
703 		/* This is a cosmetic change, symlinks do not have a mode. */
704 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
705 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
706 		else
707 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
708 		break;
709 	case VSOCK:
710 		mode |= S_IFSOCK;
711 		break;
712 	case VFIFO:
713 		mode |= S_IFIFO;
714 		break;
715 	default:
716 		return (EBADF);
717 	};
718 	sb->st_mode = mode;
719 	sb->st_nlink = vap->va_nlink;
720 	sb->st_uid = vap->va_uid;
721 	sb->st_gid = vap->va_gid;
722 	sb->st_rdev = vap->va_rdev;
723 	if (vap->va_size > OFF_MAX)
724 		return (EOVERFLOW);
725 	sb->st_size = vap->va_size;
726 	sb->st_atimespec = vap->va_atime;
727 	sb->st_mtimespec = vap->va_mtime;
728 	sb->st_ctimespec = vap->va_ctime;
729 	sb->st_birthtimespec = vap->va_birthtime;
730 
731         /*
732 	 * According to www.opengroup.org, the meaning of st_blksize is
733 	 *   "a filesystem-specific preferred I/O block size for this
734 	 *    object.  In some filesystem types, this may vary from file
735 	 *    to file"
736 	 * Default to PAGE_SIZE after much discussion.
737 	 */
738 
739 	if (vap->va_type == VREG) {
740 		sb->st_blksize = vap->va_blocksize;
741 	} else if (vn_isdisk(vp, NULL)) {
742 		sb->st_blksize = vp->v_rdev->si_bsize_best;
743 		if (sb->st_blksize < vp->v_rdev->si_bsize_phys)
744 			sb->st_blksize = vp->v_rdev->si_bsize_phys;
745 		if (sb->st_blksize < BLKDEV_IOSIZE)
746 			sb->st_blksize = BLKDEV_IOSIZE;
747 	} else {
748 		sb->st_blksize = PAGE_SIZE;
749 	}
750 
751 	sb->st_flags = vap->va_flags;
752 	if (suser(td))
753 		sb->st_gen = 0;
754 	else
755 		sb->st_gen = vap->va_gen;
756 
757 #if (S_BLKSIZE == 512)
758 	/* Optimize this case */
759 	sb->st_blocks = vap->va_bytes >> 9;
760 #else
761 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
762 #endif
763 	return (0);
764 }
765 
766 /*
767  * File table vnode ioctl routine.
768  */
769 static int
770 vn_ioctl(fp, com, data, active_cred, td)
771 	struct file *fp;
772 	u_long com;
773 	void *data;
774 	struct ucred *active_cred;
775 	struct thread *td;
776 {
777 	struct vnode *vp = fp->f_vnode;
778 	struct vnode *vpold;
779 	struct vattr vattr;
780 	int error;
781 
782 	GIANT_REQUIRED;
783 
784 	switch (vp->v_type) {
785 
786 	case VREG:
787 	case VDIR:
788 		if (com == FIONREAD) {
789 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
790 			error = VOP_GETATTR(vp, &vattr, active_cred, td);
791 			VOP_UNLOCK(vp, 0, td);
792 			if (error)
793 				return (error);
794 			*(int *)data = vattr.va_size - fp->f_offset;
795 			return (0);
796 		}
797 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
798 			return (0);			/* XXX */
799 		/* FALLTHROUGH */
800 
801 	default:
802 #if 0
803 		return (ENOTTY);
804 #endif
805 	case VFIFO:
806 	case VCHR:
807 	case VBLK:
808 		if (com == FIODTYPE) {
809 			if (vp->v_type != VCHR && vp->v_type != VBLK)
810 				return (ENOTTY);
811 			*(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK;
812 			return (0);
813 		}
814 		error = VOP_IOCTL(vp, com, data, fp->f_flag, active_cred, td);
815 		if (error == ENOIOCTL) {
816 #ifdef DIAGNOSTIC
817 			kdb_enter("ENOIOCTL leaked through");
818 #endif
819 			error = ENOTTY;
820 		}
821 		if (error == 0 && com == TIOCSCTTY) {
822 
823 			/* Do nothing if reassigning same control tty */
824 			sx_slock(&proctree_lock);
825 			if (td->td_proc->p_session->s_ttyvp == vp) {
826 				sx_sunlock(&proctree_lock);
827 				return (0);
828 			}
829 
830 			vpold = td->td_proc->p_session->s_ttyvp;
831 			VREF(vp);
832 			SESS_LOCK(td->td_proc->p_session);
833 			td->td_proc->p_session->s_ttyvp = vp;
834 			SESS_UNLOCK(td->td_proc->p_session);
835 
836 			sx_sunlock(&proctree_lock);
837 
838 			/* Get rid of reference to old control tty */
839 			if (vpold)
840 				vrele(vpold);
841 		}
842 		return (error);
843 	}
844 }
845 
846 /*
847  * File table vnode poll routine.
848  */
849 static int
850 vn_poll(fp, events, active_cred, td)
851 	struct file *fp;
852 	int events;
853 	struct ucred *active_cred;
854 	struct thread *td;
855 {
856 	struct vnode *vp;
857 #ifdef MAC
858 	int error;
859 #endif
860 
861 	GIANT_REQUIRED;
862 
863 	vp = fp->f_vnode;
864 #ifdef MAC
865 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
866 	error = mac_check_vnode_poll(active_cred, fp->f_cred, vp);
867 	VOP_UNLOCK(vp, 0, td);
868 	if (error)
869 		return (error);
870 #endif
871 
872 	return (VOP_POLL(vp, events, fp->f_cred, td));
873 }
874 
875 /*
876  * Check that the vnode is still valid, and if so
877  * acquire requested lock.
878  */
879 int
880 #ifndef	DEBUG_LOCKS
881 vn_lock(vp, flags, td)
882 #else
883 debug_vn_lock(vp, flags, td, filename, line)
884 #endif
885 	struct vnode *vp;
886 	int flags;
887 	struct thread *td;
888 #ifdef	DEBUG_LOCKS
889 	const char *filename;
890 	int line;
891 #endif
892 {
893 	int error;
894 
895 	do {
896 		if ((flags & LK_INTERLOCK) == 0)
897 			VI_LOCK(vp);
898 		if ((vp->v_iflag & VI_XLOCK) && vp->v_vxthread != curthread) {
899 			if ((flags & LK_NOWAIT) != 0) {
900 				VI_UNLOCK(vp);
901 				return (ENOENT);
902 			}
903 			vp->v_iflag |= VI_XWANT;
904 			msleep(vp, VI_MTX(vp), PINOD, "vn_lock", 0);
905 			if ((flags & LK_RETRY) == 0) {
906 				VI_UNLOCK(vp);
907 				return (ENOENT);
908 			}
909 		}
910 #ifdef	DEBUG_LOCKS
911 		vp->filename = filename;
912 		vp->line = line;
913 #endif
914 		/*
915 		 * lockmgr drops interlock before it will return for
916 		 * any reason.  So force the code above to relock it.
917 		 */
918 		error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, td);
919 		flags &= ~LK_INTERLOCK;
920 	} while (flags & LK_RETRY && error != 0);
921 	return (error);
922 }
923 
924 /*
925  * File table vnode close routine.
926  */
927 static int
928 vn_closefile(fp, td)
929 	struct file *fp;
930 	struct thread *td;
931 {
932 	struct vnode *vp;
933 	struct flock lf;
934 	int error;
935 
936 	vp = fp->f_vnode;
937 
938 	mtx_lock(&Giant);
939 	if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
940 		lf.l_whence = SEEK_SET;
941 		lf.l_start = 0;
942 		lf.l_len = 0;
943 		lf.l_type = F_UNLCK;
944 		(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
945 	}
946 
947 	fp->f_ops = &badfileops;
948 
949 	error = vn_close(vp, fp->f_flag, fp->f_cred, td);
950 	mtx_unlock(&Giant);
951 	return (error);
952 }
953 
954 /*
955  * Preparing to start a filesystem write operation. If the operation is
956  * permitted, then we bump the count of operations in progress and
957  * proceed. If a suspend request is in progress, we wait until the
958  * suspension is over, and then proceed.
959  */
960 int
961 vn_start_write(vp, mpp, flags)
962 	struct vnode *vp;
963 	struct mount **mpp;
964 	int flags;
965 {
966 	struct mount *mp;
967 	int error;
968 
969 	GIANT_REQUIRED;
970 
971 	/*
972 	 * If a vnode is provided, get and return the mount point that
973 	 * to which it will write.
974 	 */
975 	if (vp != NULL) {
976 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
977 			*mpp = NULL;
978 			if (error != EOPNOTSUPP)
979 				return (error);
980 			return (0);
981 		}
982 	}
983 	if ((mp = *mpp) == NULL)
984 		return (0);
985 	/*
986 	 * Check on status of suspension.
987 	 */
988 	while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
989 		if (flags & V_NOWAIT)
990 			return (EWOULDBLOCK);
991 		error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
992 		    "suspfs", 0);
993 		if (error)
994 			return (error);
995 	}
996 	if (flags & V_XSLEEP)
997 		return (0);
998 	mp->mnt_writeopcount++;
999 	return (0);
1000 }
1001 
1002 /*
1003  * Secondary suspension. Used by operations such as vop_inactive
1004  * routines that are needed by the higher level functions. These
1005  * are allowed to proceed until all the higher level functions have
1006  * completed (indicated by mnt_writeopcount dropping to zero). At that
1007  * time, these operations are halted until the suspension is over.
1008  */
1009 int
1010 vn_write_suspend_wait(vp, mp, flags)
1011 	struct vnode *vp;
1012 	struct mount *mp;
1013 	int flags;
1014 {
1015 	int error;
1016 
1017 	GIANT_REQUIRED;
1018 
1019 	if (vp != NULL) {
1020 		if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) {
1021 			if (error != EOPNOTSUPP)
1022 				return (error);
1023 			return (0);
1024 		}
1025 	}
1026 	/*
1027 	 * If we are not suspended or have not yet reached suspended
1028 	 * mode, then let the operation proceed.
1029 	 */
1030 	if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0)
1031 		return (0);
1032 	if (flags & V_NOWAIT)
1033 		return (EWOULDBLOCK);
1034 	/*
1035 	 * Wait for the suspension to finish.
1036 	 */
1037 	return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
1038 	    "suspfs", 0));
1039 }
1040 
1041 /*
1042  * Filesystem write operation has completed. If we are suspending and this
1043  * operation is the last one, notify the suspender that the suspension is
1044  * now in effect.
1045  */
1046 void
1047 vn_finished_write(mp)
1048 	struct mount *mp;
1049 {
1050 
1051 	GIANT_REQUIRED;
1052 
1053 	if (mp == NULL)
1054 		return;
1055 	mp->mnt_writeopcount--;
1056 	if (mp->mnt_writeopcount < 0)
1057 		panic("vn_finished_write: neg cnt");
1058 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1059 	    mp->mnt_writeopcount <= 0)
1060 		wakeup(&mp->mnt_writeopcount);
1061 }
1062 
1063 /*
1064  * Request a filesystem to suspend write operations.
1065  */
1066 int
1067 vfs_write_suspend(mp)
1068 	struct mount *mp;
1069 {
1070 	struct thread *td = curthread;
1071 	int error;
1072 
1073 	GIANT_REQUIRED;
1074 
1075 	if (mp->mnt_kern_flag & MNTK_SUSPEND)
1076 		return (0);
1077 	mp->mnt_kern_flag |= MNTK_SUSPEND;
1078 	if (mp->mnt_writeopcount > 0)
1079 		(void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0);
1080 	if ((error = VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td)) != 0) {
1081 		vfs_write_resume(mp);
1082 		return (error);
1083 	}
1084 	mp->mnt_kern_flag |= MNTK_SUSPENDED;
1085 	return (0);
1086 }
1087 
1088 /*
1089  * Request a filesystem to resume write operations.
1090  */
1091 void
1092 vfs_write_resume(mp)
1093 	struct mount *mp;
1094 {
1095 
1096 	GIANT_REQUIRED;
1097 
1098 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0)
1099 		return;
1100 	mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED);
1101 	wakeup(&mp->mnt_writeopcount);
1102 	wakeup(&mp->mnt_flag);
1103 }
1104 
1105 /*
1106  * Implement kqueues for files by translating it to vnode operation.
1107  */
1108 static int
1109 vn_kqfilter(struct file *fp, struct knote *kn)
1110 {
1111 	int error;
1112 
1113 	mtx_lock(&Giant);
1114 	error = VOP_KQFILTER(fp->f_vnode, kn);
1115 	mtx_unlock(&Giant);
1116 
1117 	return error;
1118 }
1119 
1120 /*
1121  * Simplified in-kernel wrapper calls for extended attribute access.
1122  * Both calls pass in a NULL credential, authorizing as "kernel" access.
1123  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1124  */
1125 int
1126 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1127     const char *attrname, int *buflen, char *buf, struct thread *td)
1128 {
1129 	struct uio	auio;
1130 	struct iovec	iov;
1131 	int	error;
1132 
1133 	iov.iov_len = *buflen;
1134 	iov.iov_base = buf;
1135 
1136 	auio.uio_iov = &iov;
1137 	auio.uio_iovcnt = 1;
1138 	auio.uio_rw = UIO_READ;
1139 	auio.uio_segflg = UIO_SYSSPACE;
1140 	auio.uio_td = td;
1141 	auio.uio_offset = 0;
1142 	auio.uio_resid = *buflen;
1143 
1144 	if ((ioflg & IO_NODELOCKED) == 0)
1145 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1146 
1147 	/* authorize attribute retrieval as kernel */
1148 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1149 	    td);
1150 
1151 	if ((ioflg & IO_NODELOCKED) == 0)
1152 		VOP_UNLOCK(vp, 0, td);
1153 
1154 	if (error == 0) {
1155 		*buflen = *buflen - auio.uio_resid;
1156 	}
1157 
1158 	return (error);
1159 }
1160 
1161 /*
1162  * XXX failure mode if partially written?
1163  */
1164 int
1165 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1166     const char *attrname, int buflen, char *buf, struct thread *td)
1167 {
1168 	struct uio	auio;
1169 	struct iovec	iov;
1170 	struct mount	*mp;
1171 	int	error;
1172 
1173 	iov.iov_len = buflen;
1174 	iov.iov_base = buf;
1175 
1176 	auio.uio_iov = &iov;
1177 	auio.uio_iovcnt = 1;
1178 	auio.uio_rw = UIO_WRITE;
1179 	auio.uio_segflg = UIO_SYSSPACE;
1180 	auio.uio_td = td;
1181 	auio.uio_offset = 0;
1182 	auio.uio_resid = buflen;
1183 
1184 	if ((ioflg & IO_NODELOCKED) == 0) {
1185 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1186 			return (error);
1187 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1188 	}
1189 
1190 	/* authorize attribute setting as kernel */
1191 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1192 
1193 	if ((ioflg & IO_NODELOCKED) == 0) {
1194 		vn_finished_write(mp);
1195 		VOP_UNLOCK(vp, 0, td);
1196 	}
1197 
1198 	return (error);
1199 }
1200 
1201 int
1202 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1203     const char *attrname, struct thread *td)
1204 {
1205 	struct mount	*mp;
1206 	int	error;
1207 
1208 	if ((ioflg & IO_NODELOCKED) == 0) {
1209 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1210 			return (error);
1211 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1212 	}
1213 
1214 	/* authorize attribute removal as kernel */
1215 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1216 	if (error == EOPNOTSUPP)
1217 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1218 		    NULL, td);
1219 
1220 	if ((ioflg & IO_NODELOCKED) == 0) {
1221 		vn_finished_write(mp);
1222 		VOP_UNLOCK(vp, 0, td);
1223 	}
1224 
1225 	return (error);
1226 }
1227