xref: /freebsd/sys/kern/vfs_vnops.c (revision 7562eaabc01a48e6b11d5b558c41e3b92dae5c2d)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_mac.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/kdb.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/limits.h>
50 #include <sys/lock.h>
51 #include <sys/mac.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/bio.h>
57 #include <sys/buf.h>
58 #include <sys/filio.h>
59 #include <sys/sx.h>
60 #include <sys/ttycom.h>
61 #include <sys/conf.h>
62 #include <sys/syslog.h>
63 #include <sys/unistd.h>
64 
65 static fo_rdwr_t	vn_read;
66 static fo_rdwr_t	vn_write;
67 static fo_ioctl_t	vn_ioctl;
68 static fo_poll_t	vn_poll;
69 static fo_kqfilter_t	vn_kqfilter;
70 static fo_stat_t	vn_statfile;
71 static fo_close_t	vn_closefile;
72 
73 struct 	fileops vnops = {
74 	.fo_read = vn_read,
75 	.fo_write = vn_write,
76 	.fo_ioctl = vn_ioctl,
77 	.fo_poll = vn_poll,
78 	.fo_kqfilter = vn_kqfilter,
79 	.fo_stat = vn_statfile,
80 	.fo_close = vn_closefile,
81 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
82 };
83 
84 int
85 vn_open(ndp, flagp, cmode, fdidx)
86 	struct nameidata *ndp;
87 	int *flagp, cmode, fdidx;
88 {
89 	struct thread *td = ndp->ni_cnd.cn_thread;
90 
91 	return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fdidx));
92 }
93 
94 /*
95  * Common code for vnode open operations.
96  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
97  *
98  * Note that this does NOT free nameidata for the successful case,
99  * due to the NDINIT being done elsewhere.
100  */
101 int
102 vn_open_cred(ndp, flagp, cmode, cred, fdidx)
103 	struct nameidata *ndp;
104 	int *flagp, cmode;
105 	struct ucred *cred;
106 	int fdidx;
107 {
108 	struct vnode *vp;
109 	struct mount *mp;
110 	struct thread *td = ndp->ni_cnd.cn_thread;
111 	struct vattr vat;
112 	struct vattr *vap = &vat;
113 	int mode, fmode, error;
114 #ifdef LOOKUP_SHARED
115 	int exclusive;	/* The current intended lock state */
116 
117 	exclusive = 0;
118 #endif
119 
120 	GIANT_REQUIRED;
121 
122 restart:
123 	fmode = *flagp;
124 	if (fmode & O_CREAT) {
125 		ndp->ni_cnd.cn_nameiop = CREATE;
126 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
127 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
128 			ndp->ni_cnd.cn_flags |= FOLLOW;
129 		bwillwrite();
130 		if ((error = namei(ndp)) != 0)
131 			return (error);
132 		if (ndp->ni_vp == NULL) {
133 			VATTR_NULL(vap);
134 			vap->va_type = VREG;
135 			vap->va_mode = cmode;
136 			if (fmode & O_EXCL)
137 				vap->va_vaflags |= VA_EXCLUSIVE;
138 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
139 				NDFREE(ndp, NDF_ONLY_PNBUF);
140 				vput(ndp->ni_dvp);
141 				if ((error = vn_start_write(NULL, &mp,
142 				    V_XSLEEP | PCATCH)) != 0)
143 					return (error);
144 				goto restart;
145 			}
146 #ifdef MAC
147 			error = mac_check_vnode_create(cred, ndp->ni_dvp,
148 			    &ndp->ni_cnd, vap);
149 			if (error == 0) {
150 #endif
151 				VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
152 				error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
153 						   &ndp->ni_cnd, vap);
154 #ifdef MAC
155 			}
156 #endif
157 			vput(ndp->ni_dvp);
158 			vn_finished_write(mp);
159 			if (error) {
160 				NDFREE(ndp, NDF_ONLY_PNBUF);
161 				return (error);
162 			}
163 			ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
164 			ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
165 			fmode &= ~O_TRUNC;
166 			vp = ndp->ni_vp;
167 #ifdef LOOKUP_SHARED
168 			exclusive = 1;
169 #endif
170 		} else {
171 			if (ndp->ni_dvp == ndp->ni_vp)
172 				vrele(ndp->ni_dvp);
173 			else
174 				vput(ndp->ni_dvp);
175 			ndp->ni_dvp = NULL;
176 			vp = ndp->ni_vp;
177 			if (fmode & O_EXCL) {
178 				error = EEXIST;
179 				goto bad;
180 			}
181 			fmode &= ~O_CREAT;
182 		}
183 	} else {
184 		ndp->ni_cnd.cn_nameiop = LOOKUP;
185 #ifdef LOOKUP_SHARED
186 		ndp->ni_cnd.cn_flags =
187 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
188 		    LOCKSHARED | LOCKLEAF;
189 #else
190 		ndp->ni_cnd.cn_flags =
191 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
192 #endif
193 		if ((error = namei(ndp)) != 0)
194 			return (error);
195 		vp = ndp->ni_vp;
196 	}
197 	if (vp->v_type == VLNK) {
198 		error = EMLINK;
199 		goto bad;
200 	}
201 	if (vp->v_type == VSOCK) {
202 		error = EOPNOTSUPP;
203 		goto bad;
204 	}
205 	mode = 0;
206 	if (fmode & (FWRITE | O_TRUNC)) {
207 		if (vp->v_type == VDIR) {
208 			error = EISDIR;
209 			goto bad;
210 		}
211 		mode |= VWRITE;
212 	}
213 	if (fmode & FREAD)
214 		mode |= VREAD;
215 	if (fmode & O_APPEND)
216 		mode |= VAPPEND;
217 #ifdef MAC
218 	error = mac_check_vnode_open(cred, vp, mode);
219 	if (error)
220 		goto bad;
221 #endif
222 	if ((fmode & O_CREAT) == 0) {
223 		if (mode & VWRITE) {
224 			error = vn_writechk(vp);
225 			if (error)
226 				goto bad;
227 		}
228 		if (mode) {
229 		        error = VOP_ACCESS(vp, mode, cred, td);
230 			if (error)
231 				goto bad;
232 		}
233 	}
234 	if ((error = VOP_GETATTR(vp, vap, cred, td)) == 0) {
235 		vp->v_cachedfs = vap->va_fsid;
236 		vp->v_cachedid = vap->va_fileid;
237 	}
238 	if ((error = VOP_OPEN(vp, fmode, cred, td, fdidx)) != 0)
239 		goto bad;
240 	/*
241 	 * Make sure that a VM object is created for VMIO support.
242 	 */
243 	if (vn_canvmio(vp) == TRUE) {
244 #ifdef LOOKUP_SHARED
245 		int flock;
246 
247 		if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0)
248 			VOP_LOCK(vp, LK_UPGRADE, td);
249 		/*
250 		 * In cases where the object is marked as dead object_create
251 		 * will unlock and relock exclusive.  It is safe to call in
252 		 * here with a shared lock because we only examine fields that
253 		 * the shared lock guarantees will be stable.  In the UPGRADE
254 		 * case it is not likely that anyone has used this vnode yet
255 		 * so there will be no contention.  The logic after this call
256 		 * restores the requested locking state.
257 		 */
258 #endif
259 		if ((error = vfs_object_create(vp, td, cred)) != 0) {
260 			VOP_UNLOCK(vp, 0, td);
261 			VOP_CLOSE(vp, fmode, cred, td);
262 			NDFREE(ndp, NDF_ONLY_PNBUF);
263 			vrele(vp);
264 			*flagp = fmode;
265 			return (error);
266 		}
267 #ifdef LOOKUP_SHARED
268 		flock = VOP_ISLOCKED(vp, td);
269 		if (!exclusive && flock == LK_EXCLUSIVE)
270 			VOP_LOCK(vp, LK_DOWNGRADE, td);
271 #endif
272 	}
273 
274 	if (fmode & FWRITE)
275 		vp->v_writecount++;
276 	*flagp = fmode;
277 	ASSERT_VOP_LOCKED(vp, "vn_open_cred");
278 	return (0);
279 bad:
280 	NDFREE(ndp, NDF_ONLY_PNBUF);
281 	vput(vp);
282 	*flagp = fmode;
283 	ndp->ni_vp = NULL;
284 	return (error);
285 }
286 
287 /*
288  * Check for write permissions on the specified vnode.
289  * Prototype text segments cannot be written.
290  */
291 int
292 vn_writechk(vp)
293 	register struct vnode *vp;
294 {
295 
296 	ASSERT_VOP_LOCKED(vp, "vn_writechk");
297 	/*
298 	 * If there's shared text associated with
299 	 * the vnode, try to free it up once.  If
300 	 * we fail, we can't allow writing.
301 	 */
302 	if (vp->v_vflag & VV_TEXT)
303 		return (ETXTBSY);
304 
305 	return (0);
306 }
307 
308 /*
309  * Vnode close call
310  */
311 int
312 vn_close(vp, flags, file_cred, td)
313 	register struct vnode *vp;
314 	int flags;
315 	struct ucred *file_cred;
316 	struct thread *td;
317 {
318 	int error;
319 
320 	GIANT_REQUIRED;
321 
322 	if (flags & FWRITE)
323 		vp->v_writecount--;
324 	error = VOP_CLOSE(vp, flags, file_cred, td);
325 	/*
326 	 * XXX - In certain instances VOP_CLOSE has to do the vrele
327 	 * itself. If the vrele has been done, it will return EAGAIN
328 	 * to indicate that the vrele should not be done again. When
329 	 * this happens, we just return success. The correct thing to
330 	 * do would be to have all VOP_CLOSE instances do the vrele.
331 	 */
332 	if (error == EAGAIN)
333 		return (0);
334 	vrele(vp);
335 	return (error);
336 }
337 
338 /*
339  * Sequential heuristic - detect sequential operation
340  */
341 static __inline
342 int
343 sequential_heuristic(struct uio *uio, struct file *fp)
344 {
345 
346 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
347 	    uio->uio_offset == fp->f_nextoff) {
348 		/*
349 		 * XXX we assume that the filesystem block size is
350 		 * the default.  Not true, but still gives us a pretty
351 		 * good indicator of how sequential the read operations
352 		 * are.
353 		 */
354 		fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
355 		if (fp->f_seqcount > IO_SEQMAX)
356 			fp->f_seqcount = IO_SEQMAX;
357 		return(fp->f_seqcount << IO_SEQSHIFT);
358 	}
359 
360 	/*
361 	 * Not sequential, quick draw-down of seqcount
362 	 */
363 	if (fp->f_seqcount > 1)
364 		fp->f_seqcount = 1;
365 	else
366 		fp->f_seqcount = 0;
367 	return(0);
368 }
369 
370 /*
371  * Package up an I/O request on a vnode into a uio and do it.
372  */
373 int
374 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
375     aresid, td)
376 	enum uio_rw rw;
377 	struct vnode *vp;
378 	caddr_t base;
379 	int len;
380 	off_t offset;
381 	enum uio_seg segflg;
382 	int ioflg;
383 	struct ucred *active_cred;
384 	struct ucred *file_cred;
385 	int *aresid;
386 	struct thread *td;
387 {
388 	struct uio auio;
389 	struct iovec aiov;
390 	struct mount *mp;
391 	struct ucred *cred;
392 	int error;
393 
394 	GIANT_REQUIRED;
395 
396 	if ((ioflg & IO_NODELOCKED) == 0) {
397 		mp = NULL;
398 		if (rw == UIO_WRITE) {
399 			if (vp->v_type != VCHR &&
400 			    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
401 			    != 0)
402 				return (error);
403 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
404 		} else {
405 			/*
406 			 * XXX This should be LK_SHARED but I don't trust VFS
407 			 * enough to leave it like that until it has been
408 			 * reviewed further.
409 			 */
410 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
411 		}
412 
413 	}
414 	auio.uio_iov = &aiov;
415 	auio.uio_iovcnt = 1;
416 	aiov.iov_base = base;
417 	aiov.iov_len = len;
418 	auio.uio_resid = len;
419 	auio.uio_offset = offset;
420 	auio.uio_segflg = segflg;
421 	auio.uio_rw = rw;
422 	auio.uio_td = td;
423 	error = 0;
424 #ifdef MAC
425 	if ((ioflg & IO_NOMACCHECK) == 0) {
426 		if (rw == UIO_READ)
427 			error = mac_check_vnode_read(active_cred, file_cred,
428 			    vp);
429 		else
430 			error = mac_check_vnode_write(active_cred, file_cred,
431 			    vp);
432 	}
433 #endif
434 	if (error == 0) {
435 		if (file_cred)
436 			cred = file_cred;
437 		else
438 			cred = active_cred;
439 		if (rw == UIO_READ)
440 			error = VOP_READ(vp, &auio, ioflg, cred);
441 		else
442 			error = VOP_WRITE(vp, &auio, ioflg, cred);
443 	}
444 	if (aresid)
445 		*aresid = auio.uio_resid;
446 	else
447 		if (auio.uio_resid && error == 0)
448 			error = EIO;
449 	if ((ioflg & IO_NODELOCKED) == 0) {
450 		if (rw == UIO_WRITE)
451 			vn_finished_write(mp);
452 		VOP_UNLOCK(vp, 0, td);
453 	}
454 	return (error);
455 }
456 
457 /*
458  * Package up an I/O request on a vnode into a uio and do it.  The I/O
459  * request is split up into smaller chunks and we try to avoid saturating
460  * the buffer cache while potentially holding a vnode locked, so we
461  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
462  * to give other processes a chance to lock the vnode (either other processes
463  * core'ing the same binary, or unrelated processes scanning the directory).
464  */
465 int
466 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
467     file_cred, aresid, td)
468 	enum uio_rw rw;
469 	struct vnode *vp;
470 	caddr_t base;
471 	size_t len;
472 	off_t offset;
473 	enum uio_seg segflg;
474 	int ioflg;
475 	struct ucred *active_cred;
476 	struct ucred *file_cred;
477 	size_t *aresid;
478 	struct thread *td;
479 {
480 	int error = 0;
481 	int iaresid;
482 
483 	GIANT_REQUIRED;
484 
485 	do {
486 		int chunk;
487 
488 		/*
489 		 * Force `offset' to a multiple of MAXBSIZE except possibly
490 		 * for the first chunk, so that filesystems only need to
491 		 * write full blocks except possibly for the first and last
492 		 * chunks.
493 		 */
494 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
495 
496 		if (chunk > len)
497 			chunk = len;
498 		if (rw != UIO_READ && vp->v_type == VREG)
499 			bwillwrite();
500 		iaresid = 0;
501 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
502 		    ioflg, active_cred, file_cred, &iaresid, td);
503 		len -= chunk;	/* aresid calc already includes length */
504 		if (error)
505 			break;
506 		offset += chunk;
507 		base += chunk;
508 		uio_yield();
509 	} while (len);
510 	if (aresid)
511 		*aresid = len + iaresid;
512 	return (error);
513 }
514 
515 /*
516  * File table vnode read routine.
517  */
518 static int
519 vn_read(fp, uio, active_cred, flags, td)
520 	struct file *fp;
521 	struct uio *uio;
522 	struct ucred *active_cred;
523 	struct thread *td;
524 	int flags;
525 {
526 	struct vnode *vp;
527 	int error, ioflag;
528 
529 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
530 	    uio->uio_td, td));
531 	vp = fp->f_vnode;
532 	ioflag = 0;
533 	if (fp->f_flag & FNONBLOCK)
534 		ioflag |= IO_NDELAY;
535 	if (fp->f_flag & O_DIRECT)
536 		ioflag |= IO_DIRECT;
537 	mtx_lock(&Giant);
538 	VOP_LEASE(vp, td, fp->f_cred, LEASE_READ);
539 	/*
540 	 * According to McKusick the vn lock is protecting f_offset here.
541 	 * Once this field has it's own lock we can acquire this shared.
542 	 */
543 	if ((flags & FOF_OFFSET) == 0) {
544 		vn_lock(vp, LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td);
545 		uio->uio_offset = fp->f_offset;
546 	} else
547 		vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
548 
549 	ioflag |= sequential_heuristic(uio, fp);
550 
551 #ifdef MAC
552 	error = mac_check_vnode_read(active_cred, fp->f_cred, vp);
553 	if (error == 0)
554 #endif
555 		error = VOP_READ(vp, uio, ioflag, fp->f_cred);
556 	if ((flags & FOF_OFFSET) == 0)
557 		fp->f_offset = uio->uio_offset;
558 	fp->f_nextoff = uio->uio_offset;
559 	VOP_UNLOCK(vp, 0, td);
560 	mtx_unlock(&Giant);
561 	return (error);
562 }
563 
564 /*
565  * File table vnode write routine.
566  */
567 static int
568 vn_write(fp, uio, active_cred, flags, td)
569 	struct file *fp;
570 	struct uio *uio;
571 	struct ucred *active_cred;
572 	struct thread *td;
573 	int flags;
574 {
575 	struct vnode *vp;
576 	struct mount *mp;
577 	int error, ioflag;
578 
579 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
580 	    uio->uio_td, td));
581 	vp = fp->f_vnode;
582 	mtx_lock(&Giant);
583 	if (vp->v_type == VREG)
584 		bwillwrite();
585 	ioflag = IO_UNIT;
586 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
587 		ioflag |= IO_APPEND;
588 	if (fp->f_flag & FNONBLOCK)
589 		ioflag |= IO_NDELAY;
590 	if (fp->f_flag & O_DIRECT)
591 		ioflag |= IO_DIRECT;
592 	if ((fp->f_flag & O_FSYNC) ||
593 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
594 		ioflag |= IO_SYNC;
595 	mp = NULL;
596 	if (vp->v_type != VCHR &&
597 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) {
598 		mtx_unlock(&Giant);
599 		return (error);
600 	}
601 	VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE);
602 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
603 	if ((flags & FOF_OFFSET) == 0)
604 		uio->uio_offset = fp->f_offset;
605 	ioflag |= sequential_heuristic(uio, fp);
606 #ifdef MAC
607 	error = mac_check_vnode_write(active_cred, fp->f_cred, vp);
608 	if (error == 0)
609 #endif
610 		error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
611 	if ((flags & FOF_OFFSET) == 0)
612 		fp->f_offset = uio->uio_offset;
613 	fp->f_nextoff = uio->uio_offset;
614 	VOP_UNLOCK(vp, 0, td);
615 	vn_finished_write(mp);
616 	mtx_unlock(&Giant);
617 	return (error);
618 }
619 
620 /*
621  * File table vnode stat routine.
622  */
623 static int
624 vn_statfile(fp, sb, active_cred, td)
625 	struct file *fp;
626 	struct stat *sb;
627 	struct ucred *active_cred;
628 	struct thread *td;
629 {
630 	struct vnode *vp = fp->f_vnode;
631 	int error;
632 
633 	mtx_lock(&Giant);
634 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
635 	error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
636 	VOP_UNLOCK(vp, 0, td);
637 	mtx_unlock(&Giant);
638 
639 	return (error);
640 }
641 
642 /*
643  * Stat a vnode; implementation for the stat syscall
644  */
645 int
646 vn_stat(vp, sb, active_cred, file_cred, td)
647 	struct vnode *vp;
648 	register struct stat *sb;
649 	struct ucred *active_cred;
650 	struct ucred *file_cred;
651 	struct thread *td;
652 {
653 	struct vattr vattr;
654 	register struct vattr *vap;
655 	int error;
656 	u_short mode;
657 
658 	GIANT_REQUIRED;
659 
660 #ifdef MAC
661 	error = mac_check_vnode_stat(active_cred, file_cred, vp);
662 	if (error)
663 		return (error);
664 #endif
665 
666 	vap = &vattr;
667 	error = VOP_GETATTR(vp, vap, active_cred, td);
668 	if (error)
669 		return (error);
670 
671 	vp->v_cachedfs = vap->va_fsid;
672 	vp->v_cachedid = vap->va_fileid;
673 
674 	/*
675 	 * Zero the spare stat fields
676 	 */
677 	bzero(sb, sizeof *sb);
678 
679 	/*
680 	 * Copy from vattr table
681 	 */
682 	if (vap->va_fsid != VNOVAL)
683 		sb->st_dev = vap->va_fsid;
684 	else
685 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
686 	sb->st_ino = vap->va_fileid;
687 	mode = vap->va_mode;
688 	switch (vap->va_type) {
689 	case VREG:
690 		mode |= S_IFREG;
691 		break;
692 	case VDIR:
693 		mode |= S_IFDIR;
694 		break;
695 	case VBLK:
696 		mode |= S_IFBLK;
697 		break;
698 	case VCHR:
699 		mode |= S_IFCHR;
700 		break;
701 	case VLNK:
702 		mode |= S_IFLNK;
703 		/* This is a cosmetic change, symlinks do not have a mode. */
704 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
705 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
706 		else
707 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
708 		break;
709 	case VSOCK:
710 		mode |= S_IFSOCK;
711 		break;
712 	case VFIFO:
713 		mode |= S_IFIFO;
714 		break;
715 	default:
716 		return (EBADF);
717 	};
718 	sb->st_mode = mode;
719 	sb->st_nlink = vap->va_nlink;
720 	sb->st_uid = vap->va_uid;
721 	sb->st_gid = vap->va_gid;
722 	sb->st_rdev = vap->va_rdev;
723 	if (vap->va_size > OFF_MAX)
724 		return (EOVERFLOW);
725 	sb->st_size = vap->va_size;
726 	sb->st_atimespec = vap->va_atime;
727 	sb->st_mtimespec = vap->va_mtime;
728 	sb->st_ctimespec = vap->va_ctime;
729 	sb->st_birthtimespec = vap->va_birthtime;
730 
731         /*
732 	 * According to www.opengroup.org, the meaning of st_blksize is
733 	 *   "a filesystem-specific preferred I/O block size for this
734 	 *    object.  In some filesystem types, this may vary from file
735 	 *    to file"
736 	 * Default to PAGE_SIZE after much discussion.
737 	 * XXX: min(PAGE_SIZE, vp->v_bufobj.bo_bsize) may be more correct.
738 	 */
739 
740 	sb->st_blksize = PAGE_SIZE;
741 
742 	sb->st_flags = vap->va_flags;
743 	if (suser(td))
744 		sb->st_gen = 0;
745 	else
746 		sb->st_gen = vap->va_gen;
747 
748 #if (S_BLKSIZE == 512)
749 	/* Optimize this case */
750 	sb->st_blocks = vap->va_bytes >> 9;
751 #else
752 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
753 #endif
754 	return (0);
755 }
756 
757 /*
758  * File table vnode ioctl routine.
759  */
760 static int
761 vn_ioctl(fp, com, data, active_cred, td)
762 	struct file *fp;
763 	u_long com;
764 	void *data;
765 	struct ucred *active_cred;
766 	struct thread *td;
767 {
768 	struct vnode *vp = fp->f_vnode;
769 	struct vnode *vpold;
770 	struct vattr vattr;
771 	int error;
772 
773 	GIANT_REQUIRED;
774 
775 	switch (vp->v_type) {
776 
777 	case VREG:
778 	case VDIR:
779 		if (com == FIONREAD) {
780 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
781 			error = VOP_GETATTR(vp, &vattr, active_cred, td);
782 			VOP_UNLOCK(vp, 0, td);
783 			if (error)
784 				return (error);
785 			*(int *)data = vattr.va_size - fp->f_offset;
786 			return (0);
787 		}
788 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
789 			return (0);			/* XXX */
790 		/* FALLTHROUGH */
791 
792 	default:
793 #if 0
794 		return (ENOTTY);
795 #endif
796 	case VFIFO:
797 	case VCHR:
798 	case VBLK:
799 		if (com == FIODTYPE) {
800 			dev_lock();
801 			if (vp->v_type != VCHR && vp->v_type != VBLK)
802 				error = ENOTTY;
803 			else if (vp->v_rdev == NULL)
804 				error = ENXIO;
805 			else if (vp->v_rdev->si_devsw == NULL)
806 				error = ENXIO;
807 			else {
808 				error = 0;
809 				*(int *)data =
810 				    vp->v_rdev->si_devsw->d_flags & D_TYPEMASK;
811 			}
812 			dev_unlock();
813 			return (error);
814 		}
815 		error = VOP_IOCTL(vp, com, data, fp->f_flag, active_cred, td);
816 		if (error == ENOIOCTL) {
817 #ifdef DIAGNOSTIC
818 			kdb_enter("ENOIOCTL leaked through");
819 #endif
820 			error = ENOTTY;
821 		}
822 		if (error == 0 && com == TIOCSCTTY) {
823 
824 			/* Do nothing if reassigning same control tty */
825 			sx_slock(&proctree_lock);
826 			if (td->td_proc->p_session->s_ttyvp == vp) {
827 				sx_sunlock(&proctree_lock);
828 				return (0);
829 			}
830 
831 			vpold = td->td_proc->p_session->s_ttyvp;
832 			VREF(vp);
833 			SESS_LOCK(td->td_proc->p_session);
834 			td->td_proc->p_session->s_ttyvp = vp;
835 			SESS_UNLOCK(td->td_proc->p_session);
836 
837 			sx_sunlock(&proctree_lock);
838 
839 			/* Get rid of reference to old control tty */
840 			if (vpold)
841 				vrele(vpold);
842 		}
843 		return (error);
844 	}
845 }
846 
847 /*
848  * File table vnode poll routine.
849  */
850 static int
851 vn_poll(fp, events, active_cred, td)
852 	struct file *fp;
853 	int events;
854 	struct ucred *active_cred;
855 	struct thread *td;
856 {
857 	struct vnode *vp;
858 #ifdef MAC
859 	int error;
860 #endif
861 
862 	GIANT_REQUIRED;
863 
864 	vp = fp->f_vnode;
865 #ifdef MAC
866 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
867 	error = mac_check_vnode_poll(active_cred, fp->f_cred, vp);
868 	VOP_UNLOCK(vp, 0, td);
869 	if (error)
870 		return (error);
871 #endif
872 
873 	return (VOP_POLL(vp, events, fp->f_cred, td));
874 }
875 
876 /*
877  * Check that the vnode is still valid, and if so
878  * acquire requested lock.
879  */
880 int
881 #ifndef	DEBUG_LOCKS
882 vn_lock(vp, flags, td)
883 #else
884 debug_vn_lock(vp, flags, td, filename, line)
885 #endif
886 	struct vnode *vp;
887 	int flags;
888 	struct thread *td;
889 #ifdef	DEBUG_LOCKS
890 	const char *filename;
891 	int line;
892 #endif
893 {
894 	int error;
895 
896 	do {
897 		if ((flags & LK_INTERLOCK) == 0)
898 			VI_LOCK(vp);
899 		if ((vp->v_iflag & VI_XLOCK) && vp->v_vxthread != curthread) {
900 			if ((flags & LK_NOWAIT) != 0) {
901 				VI_UNLOCK(vp);
902 				return (ENOENT);
903 			}
904 			vp->v_iflag |= VI_XWANT;
905 			msleep(vp, VI_MTX(vp), PINOD, "vn_lock", 0);
906 			if ((flags & LK_RETRY) == 0) {
907 				VI_UNLOCK(vp);
908 				return (ENOENT);
909 			}
910 		}
911 #ifdef	DEBUG_LOCKS
912 		vp->filename = filename;
913 		vp->line = line;
914 #endif
915 		/*
916 		 * lockmgr drops interlock before it will return for
917 		 * any reason.  So force the code above to relock it.
918 		 */
919 		error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, td);
920 		flags &= ~LK_INTERLOCK;
921 	} while (flags & LK_RETRY && error != 0);
922 	return (error);
923 }
924 
925 /*
926  * File table vnode close routine.
927  */
928 static int
929 vn_closefile(fp, td)
930 	struct file *fp;
931 	struct thread *td;
932 {
933 	struct vnode *vp;
934 	struct flock lf;
935 	int error;
936 
937 	vp = fp->f_vnode;
938 
939 	mtx_lock(&Giant);
940 	if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
941 		lf.l_whence = SEEK_SET;
942 		lf.l_start = 0;
943 		lf.l_len = 0;
944 		lf.l_type = F_UNLCK;
945 		(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
946 	}
947 
948 	fp->f_ops = &badfileops;
949 
950 	error = vn_close(vp, fp->f_flag, fp->f_cred, td);
951 	mtx_unlock(&Giant);
952 	return (error);
953 }
954 
955 /*
956  * Preparing to start a filesystem write operation. If the operation is
957  * permitted, then we bump the count of operations in progress and
958  * proceed. If a suspend request is in progress, we wait until the
959  * suspension is over, and then proceed.
960  */
961 int
962 vn_start_write(vp, mpp, flags)
963 	struct vnode *vp;
964 	struct mount **mpp;
965 	int flags;
966 {
967 	struct mount *mp;
968 	int error;
969 
970 	GIANT_REQUIRED;
971 
972 	/*
973 	 * If a vnode is provided, get and return the mount point that
974 	 * to which it will write.
975 	 */
976 	if (vp != NULL) {
977 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
978 			*mpp = NULL;
979 			if (error != EOPNOTSUPP)
980 				return (error);
981 			return (0);
982 		}
983 	}
984 	if ((mp = *mpp) == NULL)
985 		return (0);
986 	/*
987 	 * Check on status of suspension.
988 	 */
989 	while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
990 		if (flags & V_NOWAIT)
991 			return (EWOULDBLOCK);
992 		error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
993 		    "suspfs", 0);
994 		if (error)
995 			return (error);
996 	}
997 	if (flags & V_XSLEEP)
998 		return (0);
999 	mp->mnt_writeopcount++;
1000 	return (0);
1001 }
1002 
1003 /*
1004  * Secondary suspension. Used by operations such as vop_inactive
1005  * routines that are needed by the higher level functions. These
1006  * are allowed to proceed until all the higher level functions have
1007  * completed (indicated by mnt_writeopcount dropping to zero). At that
1008  * time, these operations are halted until the suspension is over.
1009  */
1010 int
1011 vn_write_suspend_wait(vp, mp, flags)
1012 	struct vnode *vp;
1013 	struct mount *mp;
1014 	int flags;
1015 {
1016 	int error;
1017 
1018 	GIANT_REQUIRED;
1019 
1020 	if (vp != NULL) {
1021 		if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) {
1022 			if (error != EOPNOTSUPP)
1023 				return (error);
1024 			return (0);
1025 		}
1026 	}
1027 	/*
1028 	 * If we are not suspended or have not yet reached suspended
1029 	 * mode, then let the operation proceed.
1030 	 */
1031 	if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0)
1032 		return (0);
1033 	if (flags & V_NOWAIT)
1034 		return (EWOULDBLOCK);
1035 	/*
1036 	 * Wait for the suspension to finish.
1037 	 */
1038 	return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
1039 	    "suspfs", 0));
1040 }
1041 
1042 /*
1043  * Filesystem write operation has completed. If we are suspending and this
1044  * operation is the last one, notify the suspender that the suspension is
1045  * now in effect.
1046  */
1047 void
1048 vn_finished_write(mp)
1049 	struct mount *mp;
1050 {
1051 
1052 	GIANT_REQUIRED;
1053 
1054 	if (mp == NULL)
1055 		return;
1056 	mp->mnt_writeopcount--;
1057 	if (mp->mnt_writeopcount < 0)
1058 		panic("vn_finished_write: neg cnt");
1059 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1060 	    mp->mnt_writeopcount <= 0)
1061 		wakeup(&mp->mnt_writeopcount);
1062 }
1063 
1064 /*
1065  * Request a filesystem to suspend write operations.
1066  */
1067 int
1068 vfs_write_suspend(mp)
1069 	struct mount *mp;
1070 {
1071 	struct thread *td = curthread;
1072 	int error;
1073 
1074 	GIANT_REQUIRED;
1075 
1076 	if (mp->mnt_kern_flag & MNTK_SUSPEND)
1077 		return (0);
1078 	mp->mnt_kern_flag |= MNTK_SUSPEND;
1079 	if (mp->mnt_writeopcount > 0)
1080 		(void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0);
1081 	if ((error = VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td)) != 0) {
1082 		vfs_write_resume(mp);
1083 		return (error);
1084 	}
1085 	mp->mnt_kern_flag |= MNTK_SUSPENDED;
1086 	return (0);
1087 }
1088 
1089 /*
1090  * Request a filesystem to resume write operations.
1091  */
1092 void
1093 vfs_write_resume(mp)
1094 	struct mount *mp;
1095 {
1096 
1097 	GIANT_REQUIRED;
1098 
1099 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0)
1100 		return;
1101 	mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED);
1102 	wakeup(&mp->mnt_writeopcount);
1103 	wakeup(&mp->mnt_flag);
1104 }
1105 
1106 /*
1107  * Implement kqueues for files by translating it to vnode operation.
1108  */
1109 static int
1110 vn_kqfilter(struct file *fp, struct knote *kn)
1111 {
1112 	int error;
1113 
1114 	mtx_lock(&Giant);
1115 	error = VOP_KQFILTER(fp->f_vnode, kn);
1116 	mtx_unlock(&Giant);
1117 
1118 	return error;
1119 }
1120 
1121 /*
1122  * Simplified in-kernel wrapper calls for extended attribute access.
1123  * Both calls pass in a NULL credential, authorizing as "kernel" access.
1124  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1125  */
1126 int
1127 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1128     const char *attrname, int *buflen, char *buf, struct thread *td)
1129 {
1130 	struct uio	auio;
1131 	struct iovec	iov;
1132 	int	error;
1133 
1134 	iov.iov_len = *buflen;
1135 	iov.iov_base = buf;
1136 
1137 	auio.uio_iov = &iov;
1138 	auio.uio_iovcnt = 1;
1139 	auio.uio_rw = UIO_READ;
1140 	auio.uio_segflg = UIO_SYSSPACE;
1141 	auio.uio_td = td;
1142 	auio.uio_offset = 0;
1143 	auio.uio_resid = *buflen;
1144 
1145 	if ((ioflg & IO_NODELOCKED) == 0)
1146 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1147 
1148 	/* authorize attribute retrieval as kernel */
1149 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1150 	    td);
1151 
1152 	if ((ioflg & IO_NODELOCKED) == 0)
1153 		VOP_UNLOCK(vp, 0, td);
1154 
1155 	if (error == 0) {
1156 		*buflen = *buflen - auio.uio_resid;
1157 	}
1158 
1159 	return (error);
1160 }
1161 
1162 /*
1163  * XXX failure mode if partially written?
1164  */
1165 int
1166 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1167     const char *attrname, int buflen, char *buf, struct thread *td)
1168 {
1169 	struct uio	auio;
1170 	struct iovec	iov;
1171 	struct mount	*mp;
1172 	int	error;
1173 
1174 	iov.iov_len = buflen;
1175 	iov.iov_base = buf;
1176 
1177 	auio.uio_iov = &iov;
1178 	auio.uio_iovcnt = 1;
1179 	auio.uio_rw = UIO_WRITE;
1180 	auio.uio_segflg = UIO_SYSSPACE;
1181 	auio.uio_td = td;
1182 	auio.uio_offset = 0;
1183 	auio.uio_resid = buflen;
1184 
1185 	if ((ioflg & IO_NODELOCKED) == 0) {
1186 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1187 			return (error);
1188 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1189 	}
1190 
1191 	/* authorize attribute setting as kernel */
1192 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1193 
1194 	if ((ioflg & IO_NODELOCKED) == 0) {
1195 		vn_finished_write(mp);
1196 		VOP_UNLOCK(vp, 0, td);
1197 	}
1198 
1199 	return (error);
1200 }
1201 
1202 int
1203 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1204     const char *attrname, struct thread *td)
1205 {
1206 	struct mount	*mp;
1207 	int	error;
1208 
1209 	if ((ioflg & IO_NODELOCKED) == 0) {
1210 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1211 			return (error);
1212 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1213 	}
1214 
1215 	/* authorize attribute removal as kernel */
1216 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1217 	if (error == EOPNOTSUPP)
1218 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1219 		    NULL, td);
1220 
1221 	if ((ioflg & IO_NODELOCKED) == 0) {
1222 		vn_finished_write(mp);
1223 		VOP_UNLOCK(vp, 0, td);
1224 	}
1225 
1226 	return (error);
1227 }
1228