xref: /freebsd/sys/kern/vfs_vnops.c (revision 6c6c03be2ddb04c54e455122799923deaefa4114)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_mac.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/kdb.h>
47 #include <sys/stat.h>
48 #include <sys/priv.h>
49 #include <sys/proc.h>
50 #include <sys/limits.h>
51 #include <sys/lock.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/bio.h>
57 #include <sys/buf.h>
58 #include <sys/filio.h>
59 #include <sys/sx.h>
60 #include <sys/ttycom.h>
61 #include <sys/conf.h>
62 #include <sys/syslog.h>
63 #include <sys/unistd.h>
64 
65 #include <security/mac/mac_framework.h>
66 
67 static fo_rdwr_t	vn_read;
68 static fo_rdwr_t	vn_write;
69 static fo_truncate_t	vn_truncate;
70 static fo_ioctl_t	vn_ioctl;
71 static fo_poll_t	vn_poll;
72 static fo_kqfilter_t	vn_kqfilter;
73 static fo_stat_t	vn_statfile;
74 static fo_close_t	vn_closefile;
75 
76 struct 	fileops vnops = {
77 	.fo_read = vn_read,
78 	.fo_write = vn_write,
79 	.fo_truncate = vn_truncate,
80 	.fo_ioctl = vn_ioctl,
81 	.fo_poll = vn_poll,
82 	.fo_kqfilter = vn_kqfilter,
83 	.fo_stat = vn_statfile,
84 	.fo_close = vn_closefile,
85 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
86 };
87 
88 int
89 vn_open(ndp, flagp, cmode, fp)
90 	struct nameidata *ndp;
91 	int *flagp, cmode;
92 	struct file *fp;
93 {
94 	struct thread *td = ndp->ni_cnd.cn_thread;
95 
96 	return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fp));
97 }
98 
99 /*
100  * Common code for vnode open operations.
101  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
102  *
103  * Note that this does NOT free nameidata for the successful case,
104  * due to the NDINIT being done elsewhere.
105  */
106 int
107 vn_open_cred(ndp, flagp, cmode, cred, fp)
108 	struct nameidata *ndp;
109 	int *flagp, cmode;
110 	struct ucred *cred;
111 	struct file *fp;
112 {
113 	struct vnode *vp;
114 	struct mount *mp;
115 	struct thread *td = ndp->ni_cnd.cn_thread;
116 	struct vattr vat;
117 	struct vattr *vap = &vat;
118 	int mode, fmode, error;
119 	int vfslocked, mpsafe;
120 
121 	mpsafe = ndp->ni_cnd.cn_flags & MPSAFE;
122 restart:
123 	vfslocked = 0;
124 	fmode = *flagp;
125 	if (fmode & O_CREAT) {
126 		ndp->ni_cnd.cn_nameiop = CREATE;
127 		ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF |
128 		    MPSAFE | AUDITVNODE1;
129 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
130 			ndp->ni_cnd.cn_flags |= FOLLOW;
131 		bwillwrite();
132 		if ((error = namei(ndp)) != 0)
133 			return (error);
134 		vfslocked = NDHASGIANT(ndp);
135 		if (!mpsafe)
136 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
137 		if (ndp->ni_vp == NULL) {
138 			VATTR_NULL(vap);
139 			vap->va_type = VREG;
140 			vap->va_mode = cmode;
141 			if (fmode & O_EXCL)
142 				vap->va_vaflags |= VA_EXCLUSIVE;
143 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
144 				NDFREE(ndp, NDF_ONLY_PNBUF);
145 				vput(ndp->ni_dvp);
146 				VFS_UNLOCK_GIANT(vfslocked);
147 				if ((error = vn_start_write(NULL, &mp,
148 				    V_XSLEEP | PCATCH)) != 0)
149 					return (error);
150 				goto restart;
151 			}
152 #ifdef MAC
153 			error = mac_vnode_check_create(cred, ndp->ni_dvp,
154 			    &ndp->ni_cnd, vap);
155 			if (error == 0) {
156 #endif
157 				VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
158 				error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
159 						   &ndp->ni_cnd, vap);
160 #ifdef MAC
161 			}
162 #endif
163 			vput(ndp->ni_dvp);
164 			vn_finished_write(mp);
165 			if (error) {
166 				VFS_UNLOCK_GIANT(vfslocked);
167 				NDFREE(ndp, NDF_ONLY_PNBUF);
168 				return (error);
169 			}
170 			fmode &= ~O_TRUNC;
171 			vp = ndp->ni_vp;
172 		} else {
173 			if (ndp->ni_dvp == ndp->ni_vp)
174 				vrele(ndp->ni_dvp);
175 			else
176 				vput(ndp->ni_dvp);
177 			ndp->ni_dvp = NULL;
178 			vp = ndp->ni_vp;
179 			if (fmode & O_EXCL) {
180 				error = EEXIST;
181 				goto bad;
182 			}
183 			fmode &= ~O_CREAT;
184 		}
185 	} else {
186 		ndp->ni_cnd.cn_nameiop = LOOKUP;
187 		ndp->ni_cnd.cn_flags = ISOPEN |
188 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
189 		    LOCKLEAF | MPSAFE | AUDITVNODE1;
190 		if ((error = namei(ndp)) != 0)
191 			return (error);
192 		if (!mpsafe)
193 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
194 		vfslocked = NDHASGIANT(ndp);
195 		vp = ndp->ni_vp;
196 	}
197 	if (vp->v_type == VLNK) {
198 		error = EMLINK;
199 		goto bad;
200 	}
201 	if (vp->v_type == VSOCK) {
202 		error = EOPNOTSUPP;
203 		goto bad;
204 	}
205 	mode = 0;
206 	if (fmode & (FWRITE | O_TRUNC)) {
207 		if (vp->v_type == VDIR) {
208 			error = EISDIR;
209 			goto bad;
210 		}
211 		mode |= VWRITE;
212 	}
213 	if (fmode & FREAD)
214 		mode |= VREAD;
215 	if (fmode & FEXEC)
216 		mode |= VEXEC;
217 	if (fmode & O_APPEND)
218 		mode |= VAPPEND;
219 #ifdef MAC
220 	error = mac_vnode_check_open(cred, vp, mode);
221 	if (error)
222 		goto bad;
223 #endif
224 	if ((fmode & O_CREAT) == 0) {
225 		if (mode & VWRITE) {
226 			error = vn_writechk(vp);
227 			if (error)
228 				goto bad;
229 		}
230 		if (mode) {
231 		        error = VOP_ACCESS(vp, mode, cred, td);
232 			if (error)
233 				goto bad;
234 		}
235 	}
236 	if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0)
237 		goto bad;
238 
239 	if (fmode & FWRITE)
240 		vp->v_writecount++;
241 	*flagp = fmode;
242 	ASSERT_VOP_ELOCKED(vp, "vn_open_cred");
243 	if (!mpsafe)
244 		VFS_UNLOCK_GIANT(vfslocked);
245 	return (0);
246 bad:
247 	NDFREE(ndp, NDF_ONLY_PNBUF);
248 	vput(vp);
249 	VFS_UNLOCK_GIANT(vfslocked);
250 	*flagp = fmode;
251 	ndp->ni_vp = NULL;
252 	return (error);
253 }
254 
255 /*
256  * Check for write permissions on the specified vnode.
257  * Prototype text segments cannot be written.
258  */
259 int
260 vn_writechk(vp)
261 	register struct vnode *vp;
262 {
263 
264 	ASSERT_VOP_LOCKED(vp, "vn_writechk");
265 	/*
266 	 * If there's shared text associated with
267 	 * the vnode, try to free it up once.  If
268 	 * we fail, we can't allow writing.
269 	 */
270 	if (vp->v_vflag & VV_TEXT)
271 		return (ETXTBSY);
272 
273 	return (0);
274 }
275 
276 /*
277  * Vnode close call
278  */
279 int
280 vn_close(vp, flags, file_cred, td)
281 	register struct vnode *vp;
282 	int flags;
283 	struct ucred *file_cred;
284 	struct thread *td;
285 {
286 	struct mount *mp;
287 	int error;
288 
289 	VFS_ASSERT_GIANT(vp->v_mount);
290 
291 	vn_start_write(vp, &mp, V_WAIT);
292 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
293 	if (flags & FWRITE) {
294 		VNASSERT(vp->v_writecount > 0, vp,
295 		    ("vn_close: negative writecount"));
296 		vp->v_writecount--;
297 	}
298 	error = VOP_CLOSE(vp, flags, file_cred, td);
299 	vput(vp);
300 	vn_finished_write(mp);
301 	return (error);
302 }
303 
304 /*
305  * Heuristic to detect sequential operation.
306  */
307 static int
308 sequential_heuristic(struct uio *uio, struct file *fp)
309 {
310 
311 	/*
312 	 * Offset 0 is handled specially.  open() sets f_seqcount to 1 so
313 	 * that the first I/O is normally considered to be slightly
314 	 * sequential.  Seeking to offset 0 doesn't change sequentiality
315 	 * unless previous seeks have reduced f_seqcount to 0, in which
316 	 * case offset 0 is not special.
317 	 */
318 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
319 	    uio->uio_offset == fp->f_nextoff) {
320 		/*
321 		 * f_seqcount is in units of fixed-size blocks so that it
322 		 * depends mainly on the amount of sequential I/O and not
323 		 * much on the number of sequential I/O's.  The fixed size
324 		 * of 16384 is hard-coded here since it is (not quite) just
325 		 * a magic size that works well here.  This size is more
326 		 * closely related to the best I/O size for real disks than
327 		 * to any block size used by software.
328 		 */
329 		fp->f_seqcount += howmany(uio->uio_resid, 16384);
330 		if (fp->f_seqcount > IO_SEQMAX)
331 			fp->f_seqcount = IO_SEQMAX;
332 		return (fp->f_seqcount << IO_SEQSHIFT);
333 	}
334 
335 	/* Not sequential.  Quickly draw-down sequentiality. */
336 	if (fp->f_seqcount > 1)
337 		fp->f_seqcount = 1;
338 	else
339 		fp->f_seqcount = 0;
340 	return (0);
341 }
342 
343 /*
344  * Package up an I/O request on a vnode into a uio and do it.
345  */
346 int
347 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
348     aresid, td)
349 	enum uio_rw rw;
350 	struct vnode *vp;
351 	void *base;
352 	int len;
353 	off_t offset;
354 	enum uio_seg segflg;
355 	int ioflg;
356 	struct ucred *active_cred;
357 	struct ucred *file_cred;
358 	int *aresid;
359 	struct thread *td;
360 {
361 	struct uio auio;
362 	struct iovec aiov;
363 	struct mount *mp;
364 	struct ucred *cred;
365 	int error;
366 
367 	VFS_ASSERT_GIANT(vp->v_mount);
368 
369 	if ((ioflg & IO_NODELOCKED) == 0) {
370 		mp = NULL;
371 		if (rw == UIO_WRITE) {
372 			if (vp->v_type != VCHR &&
373 			    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
374 			    != 0)
375 				return (error);
376 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
377 		} else {
378 			/*
379 			 * XXX This should be LK_SHARED but I don't trust VFS
380 			 * enough to leave it like that until it has been
381 			 * reviewed further.
382 			 */
383 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
384 		}
385 
386 	}
387 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
388 	auio.uio_iov = &aiov;
389 	auio.uio_iovcnt = 1;
390 	aiov.iov_base = base;
391 	aiov.iov_len = len;
392 	auio.uio_resid = len;
393 	auio.uio_offset = offset;
394 	auio.uio_segflg = segflg;
395 	auio.uio_rw = rw;
396 	auio.uio_td = td;
397 	error = 0;
398 #ifdef MAC
399 	if ((ioflg & IO_NOMACCHECK) == 0) {
400 		if (rw == UIO_READ)
401 			error = mac_vnode_check_read(active_cred, file_cred,
402 			    vp);
403 		else
404 			error = mac_vnode_check_write(active_cred, file_cred,
405 			    vp);
406 	}
407 #endif
408 	if (error == 0) {
409 		if (file_cred)
410 			cred = file_cred;
411 		else
412 			cred = active_cred;
413 		if (rw == UIO_READ)
414 			error = VOP_READ(vp, &auio, ioflg, cred);
415 		else
416 			error = VOP_WRITE(vp, &auio, ioflg, cred);
417 	}
418 	if (aresid)
419 		*aresid = auio.uio_resid;
420 	else
421 		if (auio.uio_resid && error == 0)
422 			error = EIO;
423 	if ((ioflg & IO_NODELOCKED) == 0) {
424 		if (rw == UIO_WRITE && vp->v_type != VCHR)
425 			vn_finished_write(mp);
426 		VOP_UNLOCK(vp, 0);
427 	}
428 	return (error);
429 }
430 
431 /*
432  * Package up an I/O request on a vnode into a uio and do it.  The I/O
433  * request is split up into smaller chunks and we try to avoid saturating
434  * the buffer cache while potentially holding a vnode locked, so we
435  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
436  * to give other processes a chance to lock the vnode (either other processes
437  * core'ing the same binary, or unrelated processes scanning the directory).
438  */
439 int
440 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
441     file_cred, aresid, td)
442 	enum uio_rw rw;
443 	struct vnode *vp;
444 	void *base;
445 	size_t len;
446 	off_t offset;
447 	enum uio_seg segflg;
448 	int ioflg;
449 	struct ucred *active_cred;
450 	struct ucred *file_cred;
451 	size_t *aresid;
452 	struct thread *td;
453 {
454 	int error = 0;
455 	int iaresid;
456 
457 	VFS_ASSERT_GIANT(vp->v_mount);
458 
459 	do {
460 		int chunk;
461 
462 		/*
463 		 * Force `offset' to a multiple of MAXBSIZE except possibly
464 		 * for the first chunk, so that filesystems only need to
465 		 * write full blocks except possibly for the first and last
466 		 * chunks.
467 		 */
468 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
469 
470 		if (chunk > len)
471 			chunk = len;
472 		if (rw != UIO_READ && vp->v_type == VREG)
473 			bwillwrite();
474 		iaresid = 0;
475 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
476 		    ioflg, active_cred, file_cred, &iaresid, td);
477 		len -= chunk;	/* aresid calc already includes length */
478 		if (error)
479 			break;
480 		offset += chunk;
481 		base = (char *)base + chunk;
482 		uio_yield();
483 	} while (len);
484 	if (aresid)
485 		*aresid = len + iaresid;
486 	return (error);
487 }
488 
489 /*
490  * File table vnode read routine.
491  */
492 static int
493 vn_read(fp, uio, active_cred, flags, td)
494 	struct file *fp;
495 	struct uio *uio;
496 	struct ucred *active_cred;
497 	struct thread *td;
498 	int flags;
499 {
500 	struct vnode *vp;
501 	int error, ioflag;
502 	struct mtx *mtxp;
503 	int vfslocked;
504 
505 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
506 	    uio->uio_td, td));
507 	mtxp = NULL;
508 	vp = fp->f_vnode;
509 	ioflag = 0;
510 	if (fp->f_flag & FNONBLOCK)
511 		ioflag |= IO_NDELAY;
512 	if (fp->f_flag & O_DIRECT)
513 		ioflag |= IO_DIRECT;
514 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
515 	VOP_LEASE(vp, td, fp->f_cred, LEASE_READ);
516 	/*
517 	 * According to McKusick the vn lock was protecting f_offset here.
518 	 * It is now protected by the FOFFSET_LOCKED flag.
519 	 */
520 	if ((flags & FOF_OFFSET) == 0) {
521 		mtxp = mtx_pool_find(mtxpool_sleep, fp);
522 		mtx_lock(mtxp);
523 		while(fp->f_vnread_flags & FOFFSET_LOCKED) {
524 			fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
525 			msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
526 			    "vnread offlock", 0);
527 		}
528 		fp->f_vnread_flags |= FOFFSET_LOCKED;
529 		mtx_unlock(mtxp);
530 		vn_lock(vp, LK_SHARED | LK_RETRY);
531 		uio->uio_offset = fp->f_offset;
532 	} else
533 		vn_lock(vp, LK_SHARED | LK_RETRY);
534 
535 	ioflag |= sequential_heuristic(uio, fp);
536 
537 #ifdef MAC
538 	error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
539 	if (error == 0)
540 #endif
541 		error = VOP_READ(vp, uio, ioflag, fp->f_cred);
542 	if ((flags & FOF_OFFSET) == 0) {
543 		fp->f_offset = uio->uio_offset;
544 		mtx_lock(mtxp);
545 		if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
546 			wakeup(&fp->f_vnread_flags);
547 		fp->f_vnread_flags = 0;
548 		mtx_unlock(mtxp);
549 	}
550 	fp->f_nextoff = uio->uio_offset;
551 	VOP_UNLOCK(vp, 0);
552 	VFS_UNLOCK_GIANT(vfslocked);
553 	return (error);
554 }
555 
556 /*
557  * File table vnode write routine.
558  */
559 static int
560 vn_write(fp, uio, active_cred, flags, td)
561 	struct file *fp;
562 	struct uio *uio;
563 	struct ucred *active_cred;
564 	struct thread *td;
565 	int flags;
566 {
567 	struct vnode *vp;
568 	struct mount *mp;
569 	int error, ioflag;
570 	int vfslocked;
571 
572 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
573 	    uio->uio_td, td));
574 	vp = fp->f_vnode;
575 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
576 	if (vp->v_type == VREG)
577 		bwillwrite();
578 	ioflag = IO_UNIT;
579 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
580 		ioflag |= IO_APPEND;
581 	if (fp->f_flag & FNONBLOCK)
582 		ioflag |= IO_NDELAY;
583 	if (fp->f_flag & O_DIRECT)
584 		ioflag |= IO_DIRECT;
585 	if ((fp->f_flag & O_FSYNC) ||
586 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
587 		ioflag |= IO_SYNC;
588 	mp = NULL;
589 	if (vp->v_type != VCHR &&
590 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
591 		goto unlock;
592 	VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE);
593 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
594 	if ((flags & FOF_OFFSET) == 0)
595 		uio->uio_offset = fp->f_offset;
596 	ioflag |= sequential_heuristic(uio, fp);
597 #ifdef MAC
598 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
599 	if (error == 0)
600 #endif
601 		error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
602 	if ((flags & FOF_OFFSET) == 0)
603 		fp->f_offset = uio->uio_offset;
604 	fp->f_nextoff = uio->uio_offset;
605 	VOP_UNLOCK(vp, 0);
606 	if (vp->v_type != VCHR)
607 		vn_finished_write(mp);
608 unlock:
609 	VFS_UNLOCK_GIANT(vfslocked);
610 	return (error);
611 }
612 
613 /*
614  * File table truncate routine.
615  */
616 static int
617 vn_truncate(fp, length, active_cred, td)
618 	struct file *fp;
619 	off_t length;
620 	struct ucred *active_cred;
621 	struct thread *td;
622 {
623 	struct vattr vattr;
624 	struct mount *mp;
625 	struct vnode *vp;
626 	int vfslocked;
627 	int error;
628 
629 	vp = fp->f_vnode;
630 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
631 	error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
632 	if (error) {
633 		VFS_UNLOCK_GIANT(vfslocked);
634 		return (error);
635 	}
636 	VOP_LEASE(vp, td, active_cred, LEASE_WRITE);
637 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
638 	if (vp->v_type == VDIR) {
639 		error = EISDIR;
640 		goto out;
641 	}
642 #ifdef MAC
643 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
644 	if (error)
645 		goto out;
646 #endif
647 	error = vn_writechk(vp);
648 	if (error == 0) {
649 		VATTR_NULL(&vattr);
650 		vattr.va_size = length;
651 		error = VOP_SETATTR(vp, &vattr, fp->f_cred);
652 	}
653 out:
654 	VOP_UNLOCK(vp, 0);
655 	vn_finished_write(mp);
656 	VFS_UNLOCK_GIANT(vfslocked);
657 	return (error);
658 }
659 
660 /*
661  * File table vnode stat routine.
662  */
663 static int
664 vn_statfile(fp, sb, active_cred, td)
665 	struct file *fp;
666 	struct stat *sb;
667 	struct ucred *active_cred;
668 	struct thread *td;
669 {
670 	struct vnode *vp = fp->f_vnode;
671 	int vfslocked;
672 	int error;
673 
674 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
675 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
676 	error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
677 	VOP_UNLOCK(vp, 0);
678 	VFS_UNLOCK_GIANT(vfslocked);
679 
680 	return (error);
681 }
682 
683 /*
684  * Stat a vnode; implementation for the stat syscall
685  */
686 int
687 vn_stat(vp, sb, active_cred, file_cred, td)
688 	struct vnode *vp;
689 	register struct stat *sb;
690 	struct ucred *active_cred;
691 	struct ucred *file_cred;
692 	struct thread *td;
693 {
694 	struct vattr vattr;
695 	register struct vattr *vap;
696 	int error;
697 	u_short mode;
698 
699 #ifdef MAC
700 	error = mac_vnode_check_stat(active_cred, file_cred, vp);
701 	if (error)
702 		return (error);
703 #endif
704 
705 	vap = &vattr;
706 
707 	/*
708 	 * Initialize defaults for new and unusual fields, so that file
709 	 * systems which don't support these fields don't need to know
710 	 * about them.
711 	 */
712 	vap->va_birthtime.tv_sec = -1;
713 	vap->va_birthtime.tv_nsec = 0;
714 	vap->va_fsid = VNOVAL;
715 	vap->va_rdev = NODEV;
716 
717 	error = VOP_GETATTR(vp, vap, active_cred);
718 	if (error)
719 		return (error);
720 
721 	/*
722 	 * Zero the spare stat fields
723 	 */
724 	bzero(sb, sizeof *sb);
725 
726 	/*
727 	 * Copy from vattr table
728 	 */
729 	if (vap->va_fsid != VNOVAL)
730 		sb->st_dev = vap->va_fsid;
731 	else
732 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
733 	sb->st_ino = vap->va_fileid;
734 	mode = vap->va_mode;
735 	switch (vap->va_type) {
736 	case VREG:
737 		mode |= S_IFREG;
738 		break;
739 	case VDIR:
740 		mode |= S_IFDIR;
741 		break;
742 	case VBLK:
743 		mode |= S_IFBLK;
744 		break;
745 	case VCHR:
746 		mode |= S_IFCHR;
747 		break;
748 	case VLNK:
749 		mode |= S_IFLNK;
750 		break;
751 	case VSOCK:
752 		mode |= S_IFSOCK;
753 		break;
754 	case VFIFO:
755 		mode |= S_IFIFO;
756 		break;
757 	default:
758 		return (EBADF);
759 	};
760 	sb->st_mode = mode;
761 	sb->st_nlink = vap->va_nlink;
762 	sb->st_uid = vap->va_uid;
763 	sb->st_gid = vap->va_gid;
764 	sb->st_rdev = vap->va_rdev;
765 	if (vap->va_size > OFF_MAX)
766 		return (EOVERFLOW);
767 	sb->st_size = vap->va_size;
768 	sb->st_atimespec = vap->va_atime;
769 	sb->st_mtimespec = vap->va_mtime;
770 	sb->st_ctimespec = vap->va_ctime;
771 	sb->st_birthtimespec = vap->va_birthtime;
772 
773         /*
774 	 * According to www.opengroup.org, the meaning of st_blksize is
775 	 *   "a filesystem-specific preferred I/O block size for this
776 	 *    object.  In some filesystem types, this may vary from file
777 	 *    to file"
778 	 * Default to PAGE_SIZE after much discussion.
779 	 * XXX: min(PAGE_SIZE, vp->v_bufobj.bo_bsize) may be more correct.
780 	 */
781 
782 	sb->st_blksize = PAGE_SIZE;
783 
784 	sb->st_flags = vap->va_flags;
785 	if (priv_check(td, PRIV_VFS_GENERATION))
786 		sb->st_gen = 0;
787 	else
788 		sb->st_gen = vap->va_gen;
789 
790 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
791 	return (0);
792 }
793 
794 /*
795  * File table vnode ioctl routine.
796  */
797 static int
798 vn_ioctl(fp, com, data, active_cred, td)
799 	struct file *fp;
800 	u_long com;
801 	void *data;
802 	struct ucred *active_cred;
803 	struct thread *td;
804 {
805 	struct vnode *vp = fp->f_vnode;
806 	struct vattr vattr;
807 	int vfslocked;
808 	int error;
809 
810 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
811 	error = ENOTTY;
812 	switch (vp->v_type) {
813 	case VREG:
814 	case VDIR:
815 		if (com == FIONREAD) {
816 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
817 			error = VOP_GETATTR(vp, &vattr, active_cred);
818 			VOP_UNLOCK(vp, 0);
819 			if (!error)
820 				*(int *)data = vattr.va_size - fp->f_offset;
821 		}
822 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
823 			error = 0;
824 		else
825 			error = VOP_IOCTL(vp, com, data, fp->f_flag,
826 			    active_cred, td);
827 		break;
828 
829 	default:
830 		break;
831 	}
832 	VFS_UNLOCK_GIANT(vfslocked);
833 	return (error);
834 }
835 
836 /*
837  * File table vnode poll routine.
838  */
839 static int
840 vn_poll(fp, events, active_cred, td)
841 	struct file *fp;
842 	int events;
843 	struct ucred *active_cred;
844 	struct thread *td;
845 {
846 	struct vnode *vp;
847 	int vfslocked;
848 	int error;
849 
850 	vp = fp->f_vnode;
851 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
852 #ifdef MAC
853 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
854 	error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
855 	VOP_UNLOCK(vp, 0);
856 	if (!error)
857 #endif
858 
859 	error = VOP_POLL(vp, events, fp->f_cred, td);
860 	VFS_UNLOCK_GIANT(vfslocked);
861 	return (error);
862 }
863 
864 /*
865  * Acquire the requested lock and then check for validity.  LK_RETRY
866  * permits vn_lock to return doomed vnodes.
867  */
868 int
869 _vn_lock(struct vnode *vp, int flags, char *file, int line)
870 {
871 	int error;
872 
873 	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
874 	    ("vn_lock called with no locktype."));
875 	do {
876 #ifdef DEBUG_VFS_LOCKS
877 		KASSERT(vp->v_holdcnt != 0,
878 		    ("vn_lock %p: zero hold count", vp));
879 #endif
880 		error = VOP_LOCK1(vp, flags, file, line);
881 		flags &= ~LK_INTERLOCK;	/* Interlock is always dropped. */
882 		KASSERT((flags & LK_RETRY) == 0 || error == 0,
883 		    ("LK_RETRY set with incompatible flags %d\n", flags));
884 		/*
885 		 * Callers specify LK_RETRY if they wish to get dead vnodes.
886 		 * If RETRY is not set, we return ENOENT instead.
887 		 */
888 		if (error == 0 && vp->v_iflag & VI_DOOMED &&
889 		    (flags & LK_RETRY) == 0) {
890 			VOP_UNLOCK(vp, 0);
891 			error = ENOENT;
892 			break;
893 		}
894 	} while (flags & LK_RETRY && error != 0);
895 	return (error);
896 }
897 
898 /*
899  * File table vnode close routine.
900  */
901 static int
902 vn_closefile(fp, td)
903 	struct file *fp;
904 	struct thread *td;
905 {
906 	struct vnode *vp;
907 	struct flock lf;
908 	int vfslocked;
909 	int error;
910 
911 	vp = fp->f_vnode;
912 
913 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
914 	if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
915 		lf.l_whence = SEEK_SET;
916 		lf.l_start = 0;
917 		lf.l_len = 0;
918 		lf.l_type = F_UNLCK;
919 		(void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
920 	}
921 
922 	fp->f_ops = &badfileops;
923 
924 	error = vn_close(vp, fp->f_flag, fp->f_cred, td);
925 	VFS_UNLOCK_GIANT(vfslocked);
926 	return (error);
927 }
928 
929 /*
930  * Preparing to start a filesystem write operation. If the operation is
931  * permitted, then we bump the count of operations in progress and
932  * proceed. If a suspend request is in progress, we wait until the
933  * suspension is over, and then proceed.
934  */
935 int
936 vn_start_write(vp, mpp, flags)
937 	struct vnode *vp;
938 	struct mount **mpp;
939 	int flags;
940 {
941 	struct mount *mp;
942 	int error;
943 
944 	error = 0;
945 	/*
946 	 * If a vnode is provided, get and return the mount point that
947 	 * to which it will write.
948 	 */
949 	if (vp != NULL) {
950 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
951 			*mpp = NULL;
952 			if (error != EOPNOTSUPP)
953 				return (error);
954 			return (0);
955 		}
956 	}
957 	if ((mp = *mpp) == NULL)
958 		return (0);
959 	MNT_ILOCK(mp);
960 	if (vp == NULL)
961 		MNT_REF(mp);
962 	/*
963 	 * Check on status of suspension.
964 	 */
965 	if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
966 	    mp->mnt_susp_owner != curthread) {
967 		while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
968 			if (flags & V_NOWAIT) {
969 				error = EWOULDBLOCK;
970 				if (vp != NULL)
971 					*mpp = NULL;
972 				goto unlock;
973 			}
974 			error = msleep(&mp->mnt_flag, MNT_MTX(mp),
975 			    (PUSER - 1) | (flags & PCATCH), "suspfs", 0);
976 			if (error) {
977 				if (vp != NULL)
978 					*mpp = NULL;
979 				goto unlock;
980 			}
981 		}
982 	}
983 	if (flags & V_XSLEEP)
984 		goto unlock;
985 	mp->mnt_writeopcount++;
986 unlock:
987 	MNT_REL(mp);
988 	MNT_IUNLOCK(mp);
989 	return (error);
990 }
991 
992 /*
993  * Secondary suspension. Used by operations such as vop_inactive
994  * routines that are needed by the higher level functions. These
995  * are allowed to proceed until all the higher level functions have
996  * completed (indicated by mnt_writeopcount dropping to zero). At that
997  * time, these operations are halted until the suspension is over.
998  */
999 int
1000 vn_start_secondary_write(vp, mpp, flags)
1001 	struct vnode *vp;
1002 	struct mount **mpp;
1003 	int flags;
1004 {
1005 	struct mount *mp;
1006 	int error;
1007 
1008  retry:
1009 	if (vp != NULL) {
1010 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1011 			*mpp = NULL;
1012 			if (error != EOPNOTSUPP)
1013 				return (error);
1014 			return (0);
1015 		}
1016 	}
1017 	/*
1018 	 * If we are not suspended or have not yet reached suspended
1019 	 * mode, then let the operation proceed.
1020 	 */
1021 	if ((mp = *mpp) == NULL)
1022 		return (0);
1023 	MNT_ILOCK(mp);
1024 	if (vp == NULL)
1025 		MNT_REF(mp);
1026 	if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1027 		mp->mnt_secondary_writes++;
1028 		mp->mnt_secondary_accwrites++;
1029 		MNT_REL(mp);
1030 		MNT_IUNLOCK(mp);
1031 		return (0);
1032 	}
1033 	if (flags & V_NOWAIT) {
1034 		MNT_REL(mp);
1035 		MNT_IUNLOCK(mp);
1036 		if (vp != NULL)
1037 			*mpp = NULL;
1038 		return (EWOULDBLOCK);
1039 	}
1040 	/*
1041 	 * Wait for the suspension to finish.
1042 	 */
1043 	error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1044 		       (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
1045 	vfs_rel(mp);
1046 	if (error == 0)
1047 		goto retry;
1048 	if (vp != NULL)
1049 		*mpp = NULL;
1050 	return (error);
1051 }
1052 
1053 /*
1054  * Filesystem write operation has completed. If we are suspending and this
1055  * operation is the last one, notify the suspender that the suspension is
1056  * now in effect.
1057  */
1058 void
1059 vn_finished_write(mp)
1060 	struct mount *mp;
1061 {
1062 	if (mp == NULL)
1063 		return;
1064 	MNT_ILOCK(mp);
1065 	mp->mnt_writeopcount--;
1066 	if (mp->mnt_writeopcount < 0)
1067 		panic("vn_finished_write: neg cnt");
1068 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1069 	    mp->mnt_writeopcount <= 0)
1070 		wakeup(&mp->mnt_writeopcount);
1071 	MNT_IUNLOCK(mp);
1072 }
1073 
1074 
1075 /*
1076  * Filesystem secondary write operation has completed. If we are
1077  * suspending and this operation is the last one, notify the suspender
1078  * that the suspension is now in effect.
1079  */
1080 void
1081 vn_finished_secondary_write(mp)
1082 	struct mount *mp;
1083 {
1084 	if (mp == NULL)
1085 		return;
1086 	MNT_ILOCK(mp);
1087 	mp->mnt_secondary_writes--;
1088 	if (mp->mnt_secondary_writes < 0)
1089 		panic("vn_finished_secondary_write: neg cnt");
1090 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1091 	    mp->mnt_secondary_writes <= 0)
1092 		wakeup(&mp->mnt_secondary_writes);
1093 	MNT_IUNLOCK(mp);
1094 }
1095 
1096 
1097 
1098 /*
1099  * Request a filesystem to suspend write operations.
1100  */
1101 int
1102 vfs_write_suspend(mp)
1103 	struct mount *mp;
1104 {
1105 	struct thread *td = curthread;
1106 	int error;
1107 
1108 	MNT_ILOCK(mp);
1109 	if (mp->mnt_susp_owner == curthread) {
1110 		MNT_IUNLOCK(mp);
1111 		return (EALREADY);
1112 	}
1113 	while (mp->mnt_kern_flag & MNTK_SUSPEND)
1114 		msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0);
1115 	mp->mnt_kern_flag |= MNTK_SUSPEND;
1116 	mp->mnt_susp_owner = curthread;
1117 	if (mp->mnt_writeopcount > 0)
1118 		(void) msleep(&mp->mnt_writeopcount,
1119 		    MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
1120 	else
1121 		MNT_IUNLOCK(mp);
1122 	if ((error = VFS_SYNC(mp, MNT_SUSPEND, td)) != 0)
1123 		vfs_write_resume(mp);
1124 	return (error);
1125 }
1126 
1127 /*
1128  * Request a filesystem to resume write operations.
1129  */
1130 void
1131 vfs_write_resume(mp)
1132 	struct mount *mp;
1133 {
1134 
1135 	MNT_ILOCK(mp);
1136 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1137 		KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner"));
1138 		mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
1139 				       MNTK_SUSPENDED);
1140 		mp->mnt_susp_owner = NULL;
1141 		wakeup(&mp->mnt_writeopcount);
1142 		wakeup(&mp->mnt_flag);
1143 		curthread->td_pflags &= ~TDP_IGNSUSP;
1144 		MNT_IUNLOCK(mp);
1145 		VFS_SUSP_CLEAN(mp);
1146 	} else
1147 		MNT_IUNLOCK(mp);
1148 }
1149 
1150 /*
1151  * Implement kqueues for files by translating it to vnode operation.
1152  */
1153 static int
1154 vn_kqfilter(struct file *fp, struct knote *kn)
1155 {
1156 	int vfslocked;
1157 	int error;
1158 
1159 	vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
1160 	error = VOP_KQFILTER(fp->f_vnode, kn);
1161 	VFS_UNLOCK_GIANT(vfslocked);
1162 
1163 	return error;
1164 }
1165 
1166 /*
1167  * Simplified in-kernel wrapper calls for extended attribute access.
1168  * Both calls pass in a NULL credential, authorizing as "kernel" access.
1169  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1170  */
1171 int
1172 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1173     const char *attrname, int *buflen, char *buf, struct thread *td)
1174 {
1175 	struct uio	auio;
1176 	struct iovec	iov;
1177 	int	error;
1178 
1179 	iov.iov_len = *buflen;
1180 	iov.iov_base = buf;
1181 
1182 	auio.uio_iov = &iov;
1183 	auio.uio_iovcnt = 1;
1184 	auio.uio_rw = UIO_READ;
1185 	auio.uio_segflg = UIO_SYSSPACE;
1186 	auio.uio_td = td;
1187 	auio.uio_offset = 0;
1188 	auio.uio_resid = *buflen;
1189 
1190 	if ((ioflg & IO_NODELOCKED) == 0)
1191 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1192 
1193 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1194 
1195 	/* authorize attribute retrieval as kernel */
1196 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1197 	    td);
1198 
1199 	if ((ioflg & IO_NODELOCKED) == 0)
1200 		VOP_UNLOCK(vp, 0);
1201 
1202 	if (error == 0) {
1203 		*buflen = *buflen - auio.uio_resid;
1204 	}
1205 
1206 	return (error);
1207 }
1208 
1209 /*
1210  * XXX failure mode if partially written?
1211  */
1212 int
1213 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1214     const char *attrname, int buflen, char *buf, struct thread *td)
1215 {
1216 	struct uio	auio;
1217 	struct iovec	iov;
1218 	struct mount	*mp;
1219 	int	error;
1220 
1221 	iov.iov_len = buflen;
1222 	iov.iov_base = buf;
1223 
1224 	auio.uio_iov = &iov;
1225 	auio.uio_iovcnt = 1;
1226 	auio.uio_rw = UIO_WRITE;
1227 	auio.uio_segflg = UIO_SYSSPACE;
1228 	auio.uio_td = td;
1229 	auio.uio_offset = 0;
1230 	auio.uio_resid = buflen;
1231 
1232 	if ((ioflg & IO_NODELOCKED) == 0) {
1233 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1234 			return (error);
1235 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1236 	}
1237 
1238 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1239 
1240 	/* authorize attribute setting as kernel */
1241 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1242 
1243 	if ((ioflg & IO_NODELOCKED) == 0) {
1244 		vn_finished_write(mp);
1245 		VOP_UNLOCK(vp, 0);
1246 	}
1247 
1248 	return (error);
1249 }
1250 
1251 int
1252 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1253     const char *attrname, struct thread *td)
1254 {
1255 	struct mount	*mp;
1256 	int	error;
1257 
1258 	if ((ioflg & IO_NODELOCKED) == 0) {
1259 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1260 			return (error);
1261 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1262 	}
1263 
1264 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1265 
1266 	/* authorize attribute removal as kernel */
1267 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1268 	if (error == EOPNOTSUPP)
1269 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1270 		    NULL, td);
1271 
1272 	if ((ioflg & IO_NODELOCKED) == 0) {
1273 		vn_finished_write(mp);
1274 		VOP_UNLOCK(vp, 0);
1275 	}
1276 
1277 	return (error);
1278 }
1279