xref: /freebsd/sys/kern/vfs_vnops.c (revision 63f9a4cb2684a303e3eb2ffed39c03a2e2b28ae0)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_mac.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/kdb.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/limits.h>
50 #include <sys/lock.h>
51 #include <sys/mac.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/bio.h>
57 #include <sys/buf.h>
58 #include <sys/filio.h>
59 #include <sys/sx.h>
60 #include <sys/ttycom.h>
61 #include <sys/conf.h>
62 #include <sys/syslog.h>
63 #include <sys/unistd.h>
64 
65 static fo_rdwr_t	vn_read;
66 static fo_rdwr_t	vn_write;
67 static fo_ioctl_t	vn_ioctl;
68 static fo_poll_t	vn_poll;
69 static fo_kqfilter_t	vn_kqfilter;
70 static fo_stat_t	vn_statfile;
71 static fo_close_t	vn_closefile;
72 
73 struct 	fileops vnops = {
74 	.fo_read = vn_read,
75 	.fo_write = vn_write,
76 	.fo_ioctl = vn_ioctl,
77 	.fo_poll = vn_poll,
78 	.fo_kqfilter = vn_kqfilter,
79 	.fo_stat = vn_statfile,
80 	.fo_close = vn_closefile,
81 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
82 };
83 
84 int
85 vn_open(ndp, flagp, cmode, fdidx)
86 	struct nameidata *ndp;
87 	int *flagp, cmode, fdidx;
88 {
89 	struct thread *td = ndp->ni_cnd.cn_thread;
90 
91 	return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fdidx));
92 }
93 
94 /*
95  * Common code for vnode open operations.
96  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
97  *
98  * Note that this does NOT free nameidata for the successful case,
99  * due to the NDINIT being done elsewhere.
100  */
101 int
102 vn_open_cred(ndp, flagp, cmode, cred, fdidx)
103 	struct nameidata *ndp;
104 	int *flagp, cmode;
105 	struct ucred *cred;
106 	int fdidx;
107 {
108 	struct vnode *vp;
109 	struct mount *mp;
110 	struct thread *td = ndp->ni_cnd.cn_thread;
111 	struct vattr vat;
112 	struct vattr *vap = &vat;
113 	int mode, fmode, error;
114 #ifdef LOOKUP_SHARED
115 	int exclusive;	/* The current intended lock state */
116 
117 	exclusive = 0;
118 #endif
119 
120 	GIANT_REQUIRED;
121 
122 restart:
123 	fmode = *flagp;
124 	if (fmode & O_CREAT) {
125 		ndp->ni_cnd.cn_nameiop = CREATE;
126 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
127 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
128 			ndp->ni_cnd.cn_flags |= FOLLOW;
129 		bwillwrite();
130 		if ((error = namei(ndp)) != 0)
131 			return (error);
132 		if (ndp->ni_vp == NULL) {
133 			VATTR_NULL(vap);
134 			vap->va_type = VREG;
135 			vap->va_mode = cmode;
136 			if (fmode & O_EXCL)
137 				vap->va_vaflags |= VA_EXCLUSIVE;
138 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
139 				NDFREE(ndp, NDF_ONLY_PNBUF);
140 				vput(ndp->ni_dvp);
141 				if ((error = vn_start_write(NULL, &mp,
142 				    V_XSLEEP | PCATCH)) != 0)
143 					return (error);
144 				goto restart;
145 			}
146 #ifdef MAC
147 			error = mac_check_vnode_create(cred, ndp->ni_dvp,
148 			    &ndp->ni_cnd, vap);
149 			if (error == 0) {
150 #endif
151 				VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
152 				error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
153 						   &ndp->ni_cnd, vap);
154 #ifdef MAC
155 			}
156 #endif
157 			vput(ndp->ni_dvp);
158 			vn_finished_write(mp);
159 			if (error) {
160 				NDFREE(ndp, NDF_ONLY_PNBUF);
161 				return (error);
162 			}
163 			ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
164 			ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
165 			fmode &= ~O_TRUNC;
166 			vp = ndp->ni_vp;
167 #ifdef LOOKUP_SHARED
168 			exclusive = 1;
169 #endif
170 		} else {
171 			if (ndp->ni_dvp == ndp->ni_vp)
172 				vrele(ndp->ni_dvp);
173 			else
174 				vput(ndp->ni_dvp);
175 			ndp->ni_dvp = NULL;
176 			vp = ndp->ni_vp;
177 			if (fmode & O_EXCL) {
178 				error = EEXIST;
179 				goto bad;
180 			}
181 			fmode &= ~O_CREAT;
182 		}
183 	} else {
184 		ndp->ni_cnd.cn_nameiop = LOOKUP;
185 #ifdef LOOKUP_SHARED
186 		ndp->ni_cnd.cn_flags =
187 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
188 		    LOCKSHARED | LOCKLEAF;
189 #else
190 		ndp->ni_cnd.cn_flags =
191 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
192 #endif
193 		if ((error = namei(ndp)) != 0)
194 			return (error);
195 		vp = ndp->ni_vp;
196 	}
197 	if (vp->v_type == VLNK) {
198 		error = EMLINK;
199 		goto bad;
200 	}
201 	if (vp->v_type == VSOCK) {
202 		error = EOPNOTSUPP;
203 		goto bad;
204 	}
205 	mode = 0;
206 	if (fmode & (FWRITE | O_TRUNC)) {
207 		if (vp->v_type == VDIR) {
208 			error = EISDIR;
209 			goto bad;
210 		}
211 		mode |= VWRITE;
212 	}
213 	if (fmode & FREAD)
214 		mode |= VREAD;
215 	if (fmode & O_APPEND)
216 		mode |= VAPPEND;
217 #ifdef MAC
218 	error = mac_check_vnode_open(cred, vp, mode);
219 	if (error)
220 		goto bad;
221 #endif
222 	if ((fmode & O_CREAT) == 0) {
223 		if (mode & VWRITE) {
224 			error = vn_writechk(vp);
225 			if (error)
226 				goto bad;
227 		}
228 		if (mode) {
229 		        error = VOP_ACCESS(vp, mode, cred, td);
230 			if (error)
231 				goto bad;
232 		}
233 	}
234 	if ((error = VOP_OPEN(vp, fmode, cred, td, fdidx)) != 0)
235 		goto bad;
236 	/*
237 	 * Make sure that a VM object is created for VMIO support.
238 	 */
239 	if (vn_canvmio(vp) == TRUE) {
240 #ifdef LOOKUP_SHARED
241 		int flock;
242 
243 		if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0)
244 			VOP_LOCK(vp, LK_UPGRADE, td);
245 		/*
246 		 * In cases where the object is marked as dead object_create
247 		 * will unlock and relock exclusive.  It is safe to call in
248 		 * here with a shared lock because we only examine fields that
249 		 * the shared lock guarantees will be stable.  In the UPGRADE
250 		 * case it is not likely that anyone has used this vnode yet
251 		 * so there will be no contention.  The logic after this call
252 		 * restores the requested locking state.
253 		 */
254 #endif
255 		if ((error = vfs_object_create(vp, td, cred)) != 0) {
256 			VOP_UNLOCK(vp, 0, td);
257 			VOP_CLOSE(vp, fmode, cred, td);
258 			NDFREE(ndp, NDF_ONLY_PNBUF);
259 			vrele(vp);
260 			*flagp = fmode;
261 			return (error);
262 		}
263 #ifdef LOOKUP_SHARED
264 		flock = VOP_ISLOCKED(vp, td);
265 		if (!exclusive && flock == LK_EXCLUSIVE)
266 			VOP_LOCK(vp, LK_DOWNGRADE, td);
267 #endif
268 	}
269 
270 	if (fmode & FWRITE)
271 		vp->v_writecount++;
272 	*flagp = fmode;
273 	ASSERT_VOP_LOCKED(vp, "vn_open_cred");
274 	return (0);
275 bad:
276 	NDFREE(ndp, NDF_ONLY_PNBUF);
277 	vput(vp);
278 	*flagp = fmode;
279 	ndp->ni_vp = NULL;
280 	return (error);
281 }
282 
283 /*
284  * Check for write permissions on the specified vnode.
285  * Prototype text segments cannot be written.
286  */
287 int
288 vn_writechk(vp)
289 	register struct vnode *vp;
290 {
291 
292 	ASSERT_VOP_LOCKED(vp, "vn_writechk");
293 	/*
294 	 * If there's shared text associated with
295 	 * the vnode, try to free it up once.  If
296 	 * we fail, we can't allow writing.
297 	 */
298 	if (vp->v_vflag & VV_TEXT)
299 		return (ETXTBSY);
300 
301 	return (0);
302 }
303 
304 /*
305  * Vnode close call
306  */
307 int
308 vn_close(vp, flags, file_cred, td)
309 	register struct vnode *vp;
310 	int flags;
311 	struct ucred *file_cred;
312 	struct thread *td;
313 {
314 	int error;
315 
316 	GIANT_REQUIRED;
317 
318 	if (flags & FWRITE)
319 		vp->v_writecount--;
320 	error = VOP_CLOSE(vp, flags, file_cred, td);
321 	/*
322 	 * XXX - In certain instances VOP_CLOSE has to do the vrele
323 	 * itself. If the vrele has been done, it will return EAGAIN
324 	 * to indicate that the vrele should not be done again. When
325 	 * this happens, we just return success. The correct thing to
326 	 * do would be to have all VOP_CLOSE instances do the vrele.
327 	 */
328 	if (error == EAGAIN)
329 		return (0);
330 	vrele(vp);
331 	return (error);
332 }
333 
334 /*
335  * Sequential heuristic - detect sequential operation
336  */
337 static __inline
338 int
339 sequential_heuristic(struct uio *uio, struct file *fp)
340 {
341 
342 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
343 	    uio->uio_offset == fp->f_nextoff) {
344 		/*
345 		 * XXX we assume that the filesystem block size is
346 		 * the default.  Not true, but still gives us a pretty
347 		 * good indicator of how sequential the read operations
348 		 * are.
349 		 */
350 		fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
351 		if (fp->f_seqcount > IO_SEQMAX)
352 			fp->f_seqcount = IO_SEQMAX;
353 		return(fp->f_seqcount << IO_SEQSHIFT);
354 	}
355 
356 	/*
357 	 * Not sequential, quick draw-down of seqcount
358 	 */
359 	if (fp->f_seqcount > 1)
360 		fp->f_seqcount = 1;
361 	else
362 		fp->f_seqcount = 0;
363 	return(0);
364 }
365 
366 /*
367  * Package up an I/O request on a vnode into a uio and do it.
368  */
369 int
370 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
371     aresid, td)
372 	enum uio_rw rw;
373 	struct vnode *vp;
374 	caddr_t base;
375 	int len;
376 	off_t offset;
377 	enum uio_seg segflg;
378 	int ioflg;
379 	struct ucred *active_cred;
380 	struct ucred *file_cred;
381 	int *aresid;
382 	struct thread *td;
383 {
384 	struct uio auio;
385 	struct iovec aiov;
386 	struct mount *mp;
387 	struct ucred *cred;
388 	int error;
389 
390 	GIANT_REQUIRED;
391 
392 	if ((ioflg & IO_NODELOCKED) == 0) {
393 		mp = NULL;
394 		if (rw == UIO_WRITE) {
395 			if (vp->v_type != VCHR &&
396 			    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
397 			    != 0)
398 				return (error);
399 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
400 		} else {
401 			/*
402 			 * XXX This should be LK_SHARED but I don't trust VFS
403 			 * enough to leave it like that until it has been
404 			 * reviewed further.
405 			 */
406 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
407 		}
408 
409 	}
410 	auio.uio_iov = &aiov;
411 	auio.uio_iovcnt = 1;
412 	aiov.iov_base = base;
413 	aiov.iov_len = len;
414 	auio.uio_resid = len;
415 	auio.uio_offset = offset;
416 	auio.uio_segflg = segflg;
417 	auio.uio_rw = rw;
418 	auio.uio_td = td;
419 	error = 0;
420 #ifdef MAC
421 	if ((ioflg & IO_NOMACCHECK) == 0) {
422 		if (rw == UIO_READ)
423 			error = mac_check_vnode_read(active_cred, file_cred,
424 			    vp);
425 		else
426 			error = mac_check_vnode_write(active_cred, file_cred,
427 			    vp);
428 	}
429 #endif
430 	if (error == 0) {
431 		if (file_cred)
432 			cred = file_cred;
433 		else
434 			cred = active_cred;
435 		if (rw == UIO_READ)
436 			error = VOP_READ(vp, &auio, ioflg, cred);
437 		else
438 			error = VOP_WRITE(vp, &auio, ioflg, cred);
439 	}
440 	if (aresid)
441 		*aresid = auio.uio_resid;
442 	else
443 		if (auio.uio_resid && error == 0)
444 			error = EIO;
445 	if ((ioflg & IO_NODELOCKED) == 0) {
446 		if (rw == UIO_WRITE)
447 			vn_finished_write(mp);
448 		VOP_UNLOCK(vp, 0, td);
449 	}
450 	return (error);
451 }
452 
453 /*
454  * Package up an I/O request on a vnode into a uio and do it.  The I/O
455  * request is split up into smaller chunks and we try to avoid saturating
456  * the buffer cache while potentially holding a vnode locked, so we
457  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
458  * to give other processes a chance to lock the vnode (either other processes
459  * core'ing the same binary, or unrelated processes scanning the directory).
460  */
461 int
462 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
463     file_cred, aresid, td)
464 	enum uio_rw rw;
465 	struct vnode *vp;
466 	caddr_t base;
467 	size_t len;
468 	off_t offset;
469 	enum uio_seg segflg;
470 	int ioflg;
471 	struct ucred *active_cred;
472 	struct ucred *file_cred;
473 	size_t *aresid;
474 	struct thread *td;
475 {
476 	int error = 0;
477 	int iaresid;
478 
479 	GIANT_REQUIRED;
480 
481 	do {
482 		int chunk;
483 
484 		/*
485 		 * Force `offset' to a multiple of MAXBSIZE except possibly
486 		 * for the first chunk, so that filesystems only need to
487 		 * write full blocks except possibly for the first and last
488 		 * chunks.
489 		 */
490 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
491 
492 		if (chunk > len)
493 			chunk = len;
494 		if (rw != UIO_READ && vp->v_type == VREG)
495 			bwillwrite();
496 		iaresid = 0;
497 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
498 		    ioflg, active_cred, file_cred, &iaresid, td);
499 		len -= chunk;	/* aresid calc already includes length */
500 		if (error)
501 			break;
502 		offset += chunk;
503 		base += chunk;
504 		uio_yield();
505 	} while (len);
506 	if (aresid)
507 		*aresid = len + iaresid;
508 	return (error);
509 }
510 
511 /*
512  * File table vnode read routine.
513  */
514 static int
515 vn_read(fp, uio, active_cred, flags, td)
516 	struct file *fp;
517 	struct uio *uio;
518 	struct ucred *active_cred;
519 	struct thread *td;
520 	int flags;
521 {
522 	struct vnode *vp;
523 	int error, ioflag;
524 
525 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
526 	    uio->uio_td, td));
527 	vp = fp->f_vnode;
528 	ioflag = 0;
529 	if (fp->f_flag & FNONBLOCK)
530 		ioflag |= IO_NDELAY;
531 	if (fp->f_flag & O_DIRECT)
532 		ioflag |= IO_DIRECT;
533 	mtx_lock(&Giant);
534 	VOP_LEASE(vp, td, fp->f_cred, LEASE_READ);
535 	/*
536 	 * According to McKusick the vn lock is protecting f_offset here.
537 	 * Once this field has it's own lock we can acquire this shared.
538 	 */
539 	if ((flags & FOF_OFFSET) == 0) {
540 		vn_lock(vp, LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td);
541 		uio->uio_offset = fp->f_offset;
542 	} else
543 		vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
544 
545 	ioflag |= sequential_heuristic(uio, fp);
546 
547 #ifdef MAC
548 	error = mac_check_vnode_read(active_cred, fp->f_cred, vp);
549 	if (error == 0)
550 #endif
551 		error = VOP_READ(vp, uio, ioflag, fp->f_cred);
552 	if ((flags & FOF_OFFSET) == 0)
553 		fp->f_offset = uio->uio_offset;
554 	fp->f_nextoff = uio->uio_offset;
555 	VOP_UNLOCK(vp, 0, td);
556 	mtx_unlock(&Giant);
557 	return (error);
558 }
559 
560 /*
561  * File table vnode write routine.
562  */
563 static int
564 vn_write(fp, uio, active_cred, flags, td)
565 	struct file *fp;
566 	struct uio *uio;
567 	struct ucred *active_cred;
568 	struct thread *td;
569 	int flags;
570 {
571 	struct vnode *vp;
572 	struct mount *mp;
573 	int error, ioflag;
574 
575 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
576 	    uio->uio_td, td));
577 	vp = fp->f_vnode;
578 	mtx_lock(&Giant);
579 	if (vp->v_type == VREG)
580 		bwillwrite();
581 	ioflag = IO_UNIT;
582 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
583 		ioflag |= IO_APPEND;
584 	if (fp->f_flag & FNONBLOCK)
585 		ioflag |= IO_NDELAY;
586 	if (fp->f_flag & O_DIRECT)
587 		ioflag |= IO_DIRECT;
588 	if ((fp->f_flag & O_FSYNC) ||
589 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
590 		ioflag |= IO_SYNC;
591 	mp = NULL;
592 	if (vp->v_type != VCHR &&
593 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) {
594 		mtx_unlock(&Giant);
595 		return (error);
596 	}
597 	VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE);
598 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
599 	if ((flags & FOF_OFFSET) == 0)
600 		uio->uio_offset = fp->f_offset;
601 	ioflag |= sequential_heuristic(uio, fp);
602 #ifdef MAC
603 	error = mac_check_vnode_write(active_cred, fp->f_cred, vp);
604 	if (error == 0)
605 #endif
606 		error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
607 	if ((flags & FOF_OFFSET) == 0)
608 		fp->f_offset = uio->uio_offset;
609 	fp->f_nextoff = uio->uio_offset;
610 	VOP_UNLOCK(vp, 0, td);
611 	vn_finished_write(mp);
612 	mtx_unlock(&Giant);
613 	return (error);
614 }
615 
616 /*
617  * File table vnode stat routine.
618  */
619 static int
620 vn_statfile(fp, sb, active_cred, td)
621 	struct file *fp;
622 	struct stat *sb;
623 	struct ucred *active_cred;
624 	struct thread *td;
625 {
626 	struct vnode *vp = fp->f_vnode;
627 	int error;
628 
629 	mtx_lock(&Giant);
630 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
631 	error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
632 	VOP_UNLOCK(vp, 0, td);
633 	mtx_unlock(&Giant);
634 
635 	return (error);
636 }
637 
638 /*
639  * Stat a vnode; implementation for the stat syscall
640  */
641 int
642 vn_stat(vp, sb, active_cred, file_cred, td)
643 	struct vnode *vp;
644 	register struct stat *sb;
645 	struct ucred *active_cred;
646 	struct ucred *file_cred;
647 	struct thread *td;
648 {
649 	struct vattr vattr;
650 	register struct vattr *vap;
651 	int error;
652 	u_short mode;
653 
654 	GIANT_REQUIRED;
655 
656 #ifdef MAC
657 	error = mac_check_vnode_stat(active_cred, file_cred, vp);
658 	if (error)
659 		return (error);
660 #endif
661 
662 	vap = &vattr;
663 	error = VOP_GETATTR(vp, vap, active_cred, td);
664 	if (error)
665 		return (error);
666 
667 	/*
668 	 * Zero the spare stat fields
669 	 */
670 	bzero(sb, sizeof *sb);
671 
672 	/*
673 	 * Copy from vattr table
674 	 */
675 	if (vap->va_fsid != VNOVAL)
676 		sb->st_dev = vap->va_fsid;
677 	else
678 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
679 	sb->st_ino = vap->va_fileid;
680 	mode = vap->va_mode;
681 	switch (vap->va_type) {
682 	case VREG:
683 		mode |= S_IFREG;
684 		break;
685 	case VDIR:
686 		mode |= S_IFDIR;
687 		break;
688 	case VBLK:
689 		mode |= S_IFBLK;
690 		break;
691 	case VCHR:
692 		mode |= S_IFCHR;
693 		break;
694 	case VLNK:
695 		mode |= S_IFLNK;
696 		/* This is a cosmetic change, symlinks do not have a mode. */
697 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
698 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
699 		else
700 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
701 		break;
702 	case VSOCK:
703 		mode |= S_IFSOCK;
704 		break;
705 	case VFIFO:
706 		mode |= S_IFIFO;
707 		break;
708 	default:
709 		return (EBADF);
710 	};
711 	sb->st_mode = mode;
712 	sb->st_nlink = vap->va_nlink;
713 	sb->st_uid = vap->va_uid;
714 	sb->st_gid = vap->va_gid;
715 	sb->st_rdev = vap->va_rdev;
716 	if (vap->va_size > OFF_MAX)
717 		return (EOVERFLOW);
718 	sb->st_size = vap->va_size;
719 	sb->st_atimespec = vap->va_atime;
720 	sb->st_mtimespec = vap->va_mtime;
721 	sb->st_ctimespec = vap->va_ctime;
722 	sb->st_birthtimespec = vap->va_birthtime;
723 
724         /*
725 	 * According to www.opengroup.org, the meaning of st_blksize is
726 	 *   "a filesystem-specific preferred I/O block size for this
727 	 *    object.  In some filesystem types, this may vary from file
728 	 *    to file"
729 	 * Default to PAGE_SIZE after much discussion.
730 	 * XXX: min(PAGE_SIZE, vp->v_bufobj.bo_bsize) may be more correct.
731 	 */
732 
733 	sb->st_blksize = PAGE_SIZE;
734 
735 	sb->st_flags = vap->va_flags;
736 	if (suser(td))
737 		sb->st_gen = 0;
738 	else
739 		sb->st_gen = vap->va_gen;
740 
741 #if (S_BLKSIZE == 512)
742 	/* Optimize this case */
743 	sb->st_blocks = vap->va_bytes >> 9;
744 #else
745 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
746 #endif
747 	return (0);
748 }
749 
750 /*
751  * File table vnode ioctl routine.
752  */
753 static int
754 vn_ioctl(fp, com, data, active_cred, td)
755 	struct file *fp;
756 	u_long com;
757 	void *data;
758 	struct ucred *active_cred;
759 	struct thread *td;
760 {
761 	struct vnode *vp = fp->f_vnode;
762 	struct vattr vattr;
763 	int error;
764 
765 	mtx_lock(&Giant);
766 	error = ENOTTY;
767 	switch (vp->v_type) {
768 	case VREG:
769 	case VDIR:
770 		if (com == FIONREAD) {
771 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
772 			error = VOP_GETATTR(vp, &vattr, active_cred, td);
773 			VOP_UNLOCK(vp, 0, td);
774 			if (!error)
775 				*(int *)data = vattr.va_size - fp->f_offset;
776 		}
777 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
778 			error = 0;
779 		else
780 			error = VOP_IOCTL(vp, com, data, fp->f_flag,
781 			    active_cred, td);
782 		break;
783 
784 	default:
785 		break;
786 	}
787 	mtx_unlock(&Giant);
788 	return (error);
789 }
790 
791 /*
792  * File table vnode poll routine.
793  */
794 static int
795 vn_poll(fp, events, active_cred, td)
796 	struct file *fp;
797 	int events;
798 	struct ucred *active_cred;
799 	struct thread *td;
800 {
801 	struct vnode *vp;
802 	int error;
803 
804 	mtx_lock(&Giant);
805 
806 	vp = fp->f_vnode;
807 #ifdef MAC
808 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
809 	error = mac_check_vnode_poll(active_cred, fp->f_cred, vp);
810 	VOP_UNLOCK(vp, 0, td);
811 	if (!error)
812 #endif
813 
814 	error = VOP_POLL(vp, events, fp->f_cred, td);
815 	mtx_unlock(&Giant);
816 	return (error);
817 }
818 
819 /*
820  * Check that the vnode is still valid, and if so
821  * acquire requested lock.
822  */
823 int
824 #ifndef	DEBUG_LOCKS
825 vn_lock(vp, flags, td)
826 #else
827 debug_vn_lock(vp, flags, td, filename, line)
828 #endif
829 	struct vnode *vp;
830 	int flags;
831 	struct thread *td;
832 #ifdef	DEBUG_LOCKS
833 	const char *filename;
834 	int line;
835 #endif
836 {
837 	int error;
838 
839 	do {
840 		if ((flags & LK_INTERLOCK) == 0)
841 			VI_LOCK(vp);
842 		if ((vp->v_iflag & VI_XLOCK) && vp->v_vxthread != curthread) {
843 			if ((flags & LK_NOWAIT) != 0) {
844 				VI_UNLOCK(vp);
845 				return (ENOENT);
846 			}
847 			vp->v_iflag |= VI_XWANT;
848 			msleep(vp, VI_MTX(vp), PINOD, "vn_lock", 0);
849 			if ((flags & LK_RETRY) == 0) {
850 				VI_UNLOCK(vp);
851 				return (ENOENT);
852 			}
853 		}
854 #ifdef	DEBUG_LOCKS
855 		vp->filename = filename;
856 		vp->line = line;
857 #endif
858 		/*
859 		 * lockmgr drops interlock before it will return for
860 		 * any reason.  So force the code above to relock it.
861 		 */
862 		error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, td);
863 		flags &= ~LK_INTERLOCK;
864 	} while (flags & LK_RETRY && error != 0);
865 	return (error);
866 }
867 
868 /*
869  * File table vnode close routine.
870  */
871 static int
872 vn_closefile(fp, td)
873 	struct file *fp;
874 	struct thread *td;
875 {
876 	struct vnode *vp;
877 	struct flock lf;
878 	int error;
879 
880 	vp = fp->f_vnode;
881 
882 	mtx_lock(&Giant);
883 	if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
884 		lf.l_whence = SEEK_SET;
885 		lf.l_start = 0;
886 		lf.l_len = 0;
887 		lf.l_type = F_UNLCK;
888 		(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
889 	}
890 
891 	fp->f_ops = &badfileops;
892 
893 	error = vn_close(vp, fp->f_flag, fp->f_cred, td);
894 	mtx_unlock(&Giant);
895 	return (error);
896 }
897 
898 /*
899  * Preparing to start a filesystem write operation. If the operation is
900  * permitted, then we bump the count of operations in progress and
901  * proceed. If a suspend request is in progress, we wait until the
902  * suspension is over, and then proceed.
903  */
904 int
905 vn_start_write(vp, mpp, flags)
906 	struct vnode *vp;
907 	struct mount **mpp;
908 	int flags;
909 {
910 	struct mount *mp;
911 	int error;
912 
913 	GIANT_REQUIRED;
914 
915 	/*
916 	 * If a vnode is provided, get and return the mount point that
917 	 * to which it will write.
918 	 */
919 	if (vp != NULL) {
920 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
921 			*mpp = NULL;
922 			if (error != EOPNOTSUPP)
923 				return (error);
924 			return (0);
925 		}
926 	}
927 	if ((mp = *mpp) == NULL)
928 		return (0);
929 	/*
930 	 * Check on status of suspension.
931 	 */
932 	while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
933 		if (flags & V_NOWAIT)
934 			return (EWOULDBLOCK);
935 		error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
936 		    "suspfs", 0);
937 		if (error)
938 			return (error);
939 	}
940 	if (flags & V_XSLEEP)
941 		return (0);
942 	mp->mnt_writeopcount++;
943 	return (0);
944 }
945 
946 /*
947  * Secondary suspension. Used by operations such as vop_inactive
948  * routines that are needed by the higher level functions. These
949  * are allowed to proceed until all the higher level functions have
950  * completed (indicated by mnt_writeopcount dropping to zero). At that
951  * time, these operations are halted until the suspension is over.
952  */
953 int
954 vn_write_suspend_wait(vp, mp, flags)
955 	struct vnode *vp;
956 	struct mount *mp;
957 	int flags;
958 {
959 	int error;
960 
961 	GIANT_REQUIRED;
962 
963 	if (vp != NULL) {
964 		if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) {
965 			if (error != EOPNOTSUPP)
966 				return (error);
967 			return (0);
968 		}
969 	}
970 	/*
971 	 * If we are not suspended or have not yet reached suspended
972 	 * mode, then let the operation proceed.
973 	 */
974 	if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0)
975 		return (0);
976 	if (flags & V_NOWAIT)
977 		return (EWOULDBLOCK);
978 	/*
979 	 * Wait for the suspension to finish.
980 	 */
981 	return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
982 	    "suspfs", 0));
983 }
984 
985 /*
986  * Filesystem write operation has completed. If we are suspending and this
987  * operation is the last one, notify the suspender that the suspension is
988  * now in effect.
989  */
990 void
991 vn_finished_write(mp)
992 	struct mount *mp;
993 {
994 
995 	GIANT_REQUIRED;
996 
997 	if (mp == NULL)
998 		return;
999 	mp->mnt_writeopcount--;
1000 	if (mp->mnt_writeopcount < 0)
1001 		panic("vn_finished_write: neg cnt");
1002 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1003 	    mp->mnt_writeopcount <= 0)
1004 		wakeup(&mp->mnt_writeopcount);
1005 }
1006 
1007 /*
1008  * Request a filesystem to suspend write operations.
1009  */
1010 int
1011 vfs_write_suspend(mp)
1012 	struct mount *mp;
1013 {
1014 	struct thread *td = curthread;
1015 	int error;
1016 
1017 	GIANT_REQUIRED;
1018 
1019 	if (mp->mnt_kern_flag & MNTK_SUSPEND)
1020 		return (0);
1021 	mp->mnt_kern_flag |= MNTK_SUSPEND;
1022 	if (mp->mnt_writeopcount > 0)
1023 		(void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0);
1024 	if ((error = VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td)) != 0) {
1025 		vfs_write_resume(mp);
1026 		return (error);
1027 	}
1028 	mp->mnt_kern_flag |= MNTK_SUSPENDED;
1029 	return (0);
1030 }
1031 
1032 /*
1033  * Request a filesystem to resume write operations.
1034  */
1035 void
1036 vfs_write_resume(mp)
1037 	struct mount *mp;
1038 {
1039 
1040 	GIANT_REQUIRED;
1041 
1042 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0)
1043 		return;
1044 	mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED);
1045 	wakeup(&mp->mnt_writeopcount);
1046 	wakeup(&mp->mnt_flag);
1047 }
1048 
1049 /*
1050  * Implement kqueues for files by translating it to vnode operation.
1051  */
1052 static int
1053 vn_kqfilter(struct file *fp, struct knote *kn)
1054 {
1055 	int error;
1056 
1057 	mtx_lock(&Giant);
1058 	error = VOP_KQFILTER(fp->f_vnode, kn);
1059 	mtx_unlock(&Giant);
1060 
1061 	return error;
1062 }
1063 
1064 /*
1065  * Simplified in-kernel wrapper calls for extended attribute access.
1066  * Both calls pass in a NULL credential, authorizing as "kernel" access.
1067  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1068  */
1069 int
1070 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1071     const char *attrname, int *buflen, char *buf, struct thread *td)
1072 {
1073 	struct uio	auio;
1074 	struct iovec	iov;
1075 	int	error;
1076 
1077 	iov.iov_len = *buflen;
1078 	iov.iov_base = buf;
1079 
1080 	auio.uio_iov = &iov;
1081 	auio.uio_iovcnt = 1;
1082 	auio.uio_rw = UIO_READ;
1083 	auio.uio_segflg = UIO_SYSSPACE;
1084 	auio.uio_td = td;
1085 	auio.uio_offset = 0;
1086 	auio.uio_resid = *buflen;
1087 
1088 	if ((ioflg & IO_NODELOCKED) == 0)
1089 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1090 
1091 	/* authorize attribute retrieval as kernel */
1092 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1093 	    td);
1094 
1095 	if ((ioflg & IO_NODELOCKED) == 0)
1096 		VOP_UNLOCK(vp, 0, td);
1097 
1098 	if (error == 0) {
1099 		*buflen = *buflen - auio.uio_resid;
1100 	}
1101 
1102 	return (error);
1103 }
1104 
1105 /*
1106  * XXX failure mode if partially written?
1107  */
1108 int
1109 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1110     const char *attrname, int buflen, char *buf, struct thread *td)
1111 {
1112 	struct uio	auio;
1113 	struct iovec	iov;
1114 	struct mount	*mp;
1115 	int	error;
1116 
1117 	iov.iov_len = buflen;
1118 	iov.iov_base = buf;
1119 
1120 	auio.uio_iov = &iov;
1121 	auio.uio_iovcnt = 1;
1122 	auio.uio_rw = UIO_WRITE;
1123 	auio.uio_segflg = UIO_SYSSPACE;
1124 	auio.uio_td = td;
1125 	auio.uio_offset = 0;
1126 	auio.uio_resid = buflen;
1127 
1128 	if ((ioflg & IO_NODELOCKED) == 0) {
1129 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1130 			return (error);
1131 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1132 	}
1133 
1134 	/* authorize attribute setting as kernel */
1135 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1136 
1137 	if ((ioflg & IO_NODELOCKED) == 0) {
1138 		vn_finished_write(mp);
1139 		VOP_UNLOCK(vp, 0, td);
1140 	}
1141 
1142 	return (error);
1143 }
1144 
1145 int
1146 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1147     const char *attrname, struct thread *td)
1148 {
1149 	struct mount	*mp;
1150 	int	error;
1151 
1152 	if ((ioflg & IO_NODELOCKED) == 0) {
1153 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1154 			return (error);
1155 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1156 	}
1157 
1158 	/* authorize attribute removal as kernel */
1159 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1160 	if (error == EOPNOTSUPP)
1161 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1162 		    NULL, td);
1163 
1164 	if ((ioflg & IO_NODELOCKED) == 0) {
1165 		vn_finished_write(mp);
1166 		VOP_UNLOCK(vp, 0, td);
1167 	}
1168 
1169 	return (error);
1170 }
1171