xref: /freebsd/sys/kern/vfs_vnops.c (revision c0020399a650364d0134f79f3fa319f84064372d)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_mac.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/kdb.h>
47 #include <sys/stat.h>
48 #include <sys/priv.h>
49 #include <sys/proc.h>
50 #include <sys/limits.h>
51 #include <sys/lock.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/bio.h>
57 #include <sys/buf.h>
58 #include <sys/filio.h>
59 #include <sys/sx.h>
60 #include <sys/ttycom.h>
61 #include <sys/conf.h>
62 #include <sys/syslog.h>
63 #include <sys/unistd.h>
64 
65 #include <security/mac/mac_framework.h>
66 
67 static fo_rdwr_t	vn_read;
68 static fo_rdwr_t	vn_write;
69 static fo_truncate_t	vn_truncate;
70 static fo_ioctl_t	vn_ioctl;
71 static fo_poll_t	vn_poll;
72 static fo_kqfilter_t	vn_kqfilter;
73 static fo_stat_t	vn_statfile;
74 static fo_close_t	vn_closefile;
75 
76 struct 	fileops vnops = {
77 	.fo_read = vn_read,
78 	.fo_write = vn_write,
79 	.fo_truncate = vn_truncate,
80 	.fo_ioctl = vn_ioctl,
81 	.fo_poll = vn_poll,
82 	.fo_kqfilter = vn_kqfilter,
83 	.fo_stat = vn_statfile,
84 	.fo_close = vn_closefile,
85 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
86 };
87 
88 int
89 vn_open(ndp, flagp, cmode, fp)
90 	struct nameidata *ndp;
91 	int *flagp, cmode;
92 	struct file *fp;
93 {
94 	struct thread *td = ndp->ni_cnd.cn_thread;
95 
96 	return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fp));
97 }
98 
99 /*
100  * Common code for vnode open operations.
101  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
102  *
103  * Note that this does NOT free nameidata for the successful case,
104  * due to the NDINIT being done elsewhere.
105  */
106 int
107 vn_open_cred(ndp, flagp, cmode, cred, fp)
108 	struct nameidata *ndp;
109 	int *flagp, cmode;
110 	struct ucred *cred;
111 	struct file *fp;
112 {
113 	struct vnode *vp;
114 	struct mount *mp;
115 	struct thread *td = ndp->ni_cnd.cn_thread;
116 	struct vattr vat;
117 	struct vattr *vap = &vat;
118 	int fmode, error;
119 	accmode_t accmode;
120 	int vfslocked, mpsafe;
121 
122 	mpsafe = ndp->ni_cnd.cn_flags & MPSAFE;
123 restart:
124 	vfslocked = 0;
125 	fmode = *flagp;
126 	if (fmode & O_CREAT) {
127 		ndp->ni_cnd.cn_nameiop = CREATE;
128 		ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF |
129 		    MPSAFE | AUDITVNODE1;
130 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
131 			ndp->ni_cnd.cn_flags |= FOLLOW;
132 		bwillwrite();
133 		if ((error = namei(ndp)) != 0)
134 			return (error);
135 		vfslocked = NDHASGIANT(ndp);
136 		if (!mpsafe)
137 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
138 		if (ndp->ni_vp == NULL) {
139 			VATTR_NULL(vap);
140 			vap->va_type = VREG;
141 			vap->va_mode = cmode;
142 			if (fmode & O_EXCL)
143 				vap->va_vaflags |= VA_EXCLUSIVE;
144 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
145 				NDFREE(ndp, NDF_ONLY_PNBUF);
146 				vput(ndp->ni_dvp);
147 				VFS_UNLOCK_GIANT(vfslocked);
148 				if ((error = vn_start_write(NULL, &mp,
149 				    V_XSLEEP | PCATCH)) != 0)
150 					return (error);
151 				goto restart;
152 			}
153 #ifdef MAC
154 			error = mac_vnode_check_create(cred, ndp->ni_dvp,
155 			    &ndp->ni_cnd, vap);
156 			if (error == 0)
157 #endif
158 				error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
159 						   &ndp->ni_cnd, vap);
160 			vput(ndp->ni_dvp);
161 			vn_finished_write(mp);
162 			if (error) {
163 				VFS_UNLOCK_GIANT(vfslocked);
164 				NDFREE(ndp, NDF_ONLY_PNBUF);
165 				return (error);
166 			}
167 			fmode &= ~O_TRUNC;
168 			vp = ndp->ni_vp;
169 		} else {
170 			if (ndp->ni_dvp == ndp->ni_vp)
171 				vrele(ndp->ni_dvp);
172 			else
173 				vput(ndp->ni_dvp);
174 			ndp->ni_dvp = NULL;
175 			vp = ndp->ni_vp;
176 			if (fmode & O_EXCL) {
177 				error = EEXIST;
178 				goto bad;
179 			}
180 			fmode &= ~O_CREAT;
181 		}
182 	} else {
183 		ndp->ni_cnd.cn_nameiop = LOOKUP;
184 		ndp->ni_cnd.cn_flags = ISOPEN |
185 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
186 		    LOCKLEAF | MPSAFE | AUDITVNODE1;
187 		if (!(fmode & FWRITE))
188 			ndp->ni_cnd.cn_flags |= LOCKSHARED;
189 		if ((error = namei(ndp)) != 0)
190 			return (error);
191 		if (!mpsafe)
192 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
193 		vfslocked = NDHASGIANT(ndp);
194 		vp = ndp->ni_vp;
195 	}
196 	if (vp->v_type == VLNK) {
197 		error = EMLINK;
198 		goto bad;
199 	}
200 	if (vp->v_type == VSOCK) {
201 		error = EOPNOTSUPP;
202 		goto bad;
203 	}
204 	accmode = 0;
205 	if (fmode & (FWRITE | O_TRUNC)) {
206 		if (vp->v_type == VDIR) {
207 			error = EISDIR;
208 			goto bad;
209 		}
210 		accmode |= VWRITE;
211 	}
212 	if (fmode & FREAD)
213 		accmode |= VREAD;
214 	if (fmode & FEXEC)
215 		accmode |= VEXEC;
216 	if (fmode & O_APPEND)
217 		accmode |= VAPPEND;
218 #ifdef MAC
219 	error = mac_vnode_check_open(cred, vp, accmode);
220 	if (error)
221 		goto bad;
222 #endif
223 	if ((fmode & O_CREAT) == 0) {
224 		if (accmode & VWRITE) {
225 			error = vn_writechk(vp);
226 			if (error)
227 				goto bad;
228 		}
229 		if (accmode) {
230 		        error = VOP_ACCESS(vp, accmode, cred, td);
231 			if (error)
232 				goto bad;
233 		}
234 	}
235 	if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0)
236 		goto bad;
237 
238 	if (fmode & FWRITE)
239 		vp->v_writecount++;
240 	*flagp = fmode;
241 	ASSERT_VOP_LOCKED(vp, "vn_open_cred");
242 	if (!mpsafe)
243 		VFS_UNLOCK_GIANT(vfslocked);
244 	return (0);
245 bad:
246 	NDFREE(ndp, NDF_ONLY_PNBUF);
247 	vput(vp);
248 	VFS_UNLOCK_GIANT(vfslocked);
249 	*flagp = fmode;
250 	ndp->ni_vp = NULL;
251 	return (error);
252 }
253 
254 /*
255  * Check for write permissions on the specified vnode.
256  * Prototype text segments cannot be written.
257  */
258 int
259 vn_writechk(vp)
260 	register struct vnode *vp;
261 {
262 
263 	ASSERT_VOP_LOCKED(vp, "vn_writechk");
264 	/*
265 	 * If there's shared text associated with
266 	 * the vnode, try to free it up once.  If
267 	 * we fail, we can't allow writing.
268 	 */
269 	if (vp->v_vflag & VV_TEXT)
270 		return (ETXTBSY);
271 
272 	return (0);
273 }
274 
275 /*
276  * Vnode close call
277  */
278 int
279 vn_close(vp, flags, file_cred, td)
280 	register struct vnode *vp;
281 	int flags;
282 	struct ucred *file_cred;
283 	struct thread *td;
284 {
285 	struct mount *mp;
286 	int error, lock_flags;
287 
288 	if (!(flags & FWRITE) && vp->v_mount != NULL &&
289 	    vp->v_mount->mnt_kern_flag & MNTK_EXTENDED_SHARED)
290 		lock_flags = LK_SHARED;
291 	else
292 		lock_flags = LK_EXCLUSIVE;
293 
294 	VFS_ASSERT_GIANT(vp->v_mount);
295 
296 	vn_start_write(vp, &mp, V_WAIT);
297 	vn_lock(vp, lock_flags | LK_RETRY);
298 	if (flags & FWRITE) {
299 		VNASSERT(vp->v_writecount > 0, vp,
300 		    ("vn_close: negative writecount"));
301 		vp->v_writecount--;
302 	}
303 	error = VOP_CLOSE(vp, flags, file_cred, td);
304 	vput(vp);
305 	vn_finished_write(mp);
306 	return (error);
307 }
308 
309 /*
310  * Heuristic to detect sequential operation.
311  */
312 static int
313 sequential_heuristic(struct uio *uio, struct file *fp)
314 {
315 
316 	/*
317 	 * Offset 0 is handled specially.  open() sets f_seqcount to 1 so
318 	 * that the first I/O is normally considered to be slightly
319 	 * sequential.  Seeking to offset 0 doesn't change sequentiality
320 	 * unless previous seeks have reduced f_seqcount to 0, in which
321 	 * case offset 0 is not special.
322 	 */
323 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
324 	    uio->uio_offset == fp->f_nextoff) {
325 		/*
326 		 * f_seqcount is in units of fixed-size blocks so that it
327 		 * depends mainly on the amount of sequential I/O and not
328 		 * much on the number of sequential I/O's.  The fixed size
329 		 * of 16384 is hard-coded here since it is (not quite) just
330 		 * a magic size that works well here.  This size is more
331 		 * closely related to the best I/O size for real disks than
332 		 * to any block size used by software.
333 		 */
334 		fp->f_seqcount += howmany(uio->uio_resid, 16384);
335 		if (fp->f_seqcount > IO_SEQMAX)
336 			fp->f_seqcount = IO_SEQMAX;
337 		return (fp->f_seqcount << IO_SEQSHIFT);
338 	}
339 
340 	/* Not sequential.  Quickly draw-down sequentiality. */
341 	if (fp->f_seqcount > 1)
342 		fp->f_seqcount = 1;
343 	else
344 		fp->f_seqcount = 0;
345 	return (0);
346 }
347 
348 /*
349  * Package up an I/O request on a vnode into a uio and do it.
350  */
351 int
352 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
353     aresid, td)
354 	enum uio_rw rw;
355 	struct vnode *vp;
356 	void *base;
357 	int len;
358 	off_t offset;
359 	enum uio_seg segflg;
360 	int ioflg;
361 	struct ucred *active_cred;
362 	struct ucred *file_cred;
363 	int *aresid;
364 	struct thread *td;
365 {
366 	struct uio auio;
367 	struct iovec aiov;
368 	struct mount *mp;
369 	struct ucred *cred;
370 	int error;
371 
372 	VFS_ASSERT_GIANT(vp->v_mount);
373 
374 	if ((ioflg & IO_NODELOCKED) == 0) {
375 		mp = NULL;
376 		if (rw == UIO_WRITE) {
377 			if (vp->v_type != VCHR &&
378 			    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
379 			    != 0)
380 				return (error);
381 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
382 		} else
383 			vn_lock(vp, LK_SHARED | LK_RETRY);
384 
385 	}
386 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
387 	auio.uio_iov = &aiov;
388 	auio.uio_iovcnt = 1;
389 	aiov.iov_base = base;
390 	aiov.iov_len = len;
391 	auio.uio_resid = len;
392 	auio.uio_offset = offset;
393 	auio.uio_segflg = segflg;
394 	auio.uio_rw = rw;
395 	auio.uio_td = td;
396 	error = 0;
397 #ifdef MAC
398 	if ((ioflg & IO_NOMACCHECK) == 0) {
399 		if (rw == UIO_READ)
400 			error = mac_vnode_check_read(active_cred, file_cred,
401 			    vp);
402 		else
403 			error = mac_vnode_check_write(active_cred, file_cred,
404 			    vp);
405 	}
406 #endif
407 	if (error == 0) {
408 		if (file_cred)
409 			cred = file_cred;
410 		else
411 			cred = active_cred;
412 		if (rw == UIO_READ)
413 			error = VOP_READ(vp, &auio, ioflg, cred);
414 		else
415 			error = VOP_WRITE(vp, &auio, ioflg, cred);
416 	}
417 	if (aresid)
418 		*aresid = auio.uio_resid;
419 	else
420 		if (auio.uio_resid && error == 0)
421 			error = EIO;
422 	if ((ioflg & IO_NODELOCKED) == 0) {
423 		if (rw == UIO_WRITE && vp->v_type != VCHR)
424 			vn_finished_write(mp);
425 		VOP_UNLOCK(vp, 0);
426 	}
427 	return (error);
428 }
429 
430 /*
431  * Package up an I/O request on a vnode into a uio and do it.  The I/O
432  * request is split up into smaller chunks and we try to avoid saturating
433  * the buffer cache while potentially holding a vnode locked, so we
434  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
435  * to give other processes a chance to lock the vnode (either other processes
436  * core'ing the same binary, or unrelated processes scanning the directory).
437  */
438 int
439 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
440     file_cred, aresid, td)
441 	enum uio_rw rw;
442 	struct vnode *vp;
443 	void *base;
444 	size_t len;
445 	off_t offset;
446 	enum uio_seg segflg;
447 	int ioflg;
448 	struct ucred *active_cred;
449 	struct ucred *file_cred;
450 	size_t *aresid;
451 	struct thread *td;
452 {
453 	int error = 0;
454 	int iaresid;
455 
456 	VFS_ASSERT_GIANT(vp->v_mount);
457 
458 	do {
459 		int chunk;
460 
461 		/*
462 		 * Force `offset' to a multiple of MAXBSIZE except possibly
463 		 * for the first chunk, so that filesystems only need to
464 		 * write full blocks except possibly for the first and last
465 		 * chunks.
466 		 */
467 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
468 
469 		if (chunk > len)
470 			chunk = len;
471 		if (rw != UIO_READ && vp->v_type == VREG)
472 			bwillwrite();
473 		iaresid = 0;
474 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
475 		    ioflg, active_cred, file_cred, &iaresid, td);
476 		len -= chunk;	/* aresid calc already includes length */
477 		if (error)
478 			break;
479 		offset += chunk;
480 		base = (char *)base + chunk;
481 		uio_yield();
482 	} while (len);
483 	if (aresid)
484 		*aresid = len + iaresid;
485 	return (error);
486 }
487 
488 /*
489  * File table vnode read routine.
490  */
491 static int
492 vn_read(fp, uio, active_cred, flags, td)
493 	struct file *fp;
494 	struct uio *uio;
495 	struct ucred *active_cred;
496 	struct thread *td;
497 	int flags;
498 {
499 	struct vnode *vp;
500 	int error, ioflag;
501 	struct mtx *mtxp;
502 	int vfslocked;
503 
504 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
505 	    uio->uio_td, td));
506 	mtxp = NULL;
507 	vp = fp->f_vnode;
508 	ioflag = 0;
509 	if (fp->f_flag & FNONBLOCK)
510 		ioflag |= IO_NDELAY;
511 	if (fp->f_flag & O_DIRECT)
512 		ioflag |= IO_DIRECT;
513 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
514 	/*
515 	 * According to McKusick the vn lock was protecting f_offset here.
516 	 * It is now protected by the FOFFSET_LOCKED flag.
517 	 */
518 	if ((flags & FOF_OFFSET) == 0) {
519 		mtxp = mtx_pool_find(mtxpool_sleep, fp);
520 		mtx_lock(mtxp);
521 		while(fp->f_vnread_flags & FOFFSET_LOCKED) {
522 			fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
523 			msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
524 			    "vnread offlock", 0);
525 		}
526 		fp->f_vnread_flags |= FOFFSET_LOCKED;
527 		mtx_unlock(mtxp);
528 		vn_lock(vp, LK_SHARED | LK_RETRY);
529 		uio->uio_offset = fp->f_offset;
530 	} else
531 		vn_lock(vp, LK_SHARED | LK_RETRY);
532 
533 	ioflag |= sequential_heuristic(uio, fp);
534 
535 #ifdef MAC
536 	error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
537 	if (error == 0)
538 #endif
539 		error = VOP_READ(vp, uio, ioflag, fp->f_cred);
540 	if ((flags & FOF_OFFSET) == 0) {
541 		fp->f_offset = uio->uio_offset;
542 		mtx_lock(mtxp);
543 		if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
544 			wakeup(&fp->f_vnread_flags);
545 		fp->f_vnread_flags = 0;
546 		mtx_unlock(mtxp);
547 	}
548 	fp->f_nextoff = uio->uio_offset;
549 	VOP_UNLOCK(vp, 0);
550 	VFS_UNLOCK_GIANT(vfslocked);
551 	return (error);
552 }
553 
554 /*
555  * File table vnode write routine.
556  */
557 static int
558 vn_write(fp, uio, active_cred, flags, td)
559 	struct file *fp;
560 	struct uio *uio;
561 	struct ucred *active_cred;
562 	struct thread *td;
563 	int flags;
564 {
565 	struct vnode *vp;
566 	struct mount *mp;
567 	int error, ioflag;
568 	int vfslocked;
569 
570 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
571 	    uio->uio_td, td));
572 	vp = fp->f_vnode;
573 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
574 	if (vp->v_type == VREG)
575 		bwillwrite();
576 	ioflag = IO_UNIT;
577 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
578 		ioflag |= IO_APPEND;
579 	if (fp->f_flag & FNONBLOCK)
580 		ioflag |= IO_NDELAY;
581 	if (fp->f_flag & O_DIRECT)
582 		ioflag |= IO_DIRECT;
583 	if ((fp->f_flag & O_FSYNC) ||
584 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
585 		ioflag |= IO_SYNC;
586 	mp = NULL;
587 	if (vp->v_type != VCHR &&
588 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
589 		goto unlock;
590 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
591 	if ((flags & FOF_OFFSET) == 0)
592 		uio->uio_offset = fp->f_offset;
593 	ioflag |= sequential_heuristic(uio, fp);
594 #ifdef MAC
595 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
596 	if (error == 0)
597 #endif
598 		error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
599 	if ((flags & FOF_OFFSET) == 0)
600 		fp->f_offset = uio->uio_offset;
601 	fp->f_nextoff = uio->uio_offset;
602 	VOP_UNLOCK(vp, 0);
603 	if (vp->v_type != VCHR)
604 		vn_finished_write(mp);
605 unlock:
606 	VFS_UNLOCK_GIANT(vfslocked);
607 	return (error);
608 }
609 
610 /*
611  * File table truncate routine.
612  */
613 static int
614 vn_truncate(fp, length, active_cred, td)
615 	struct file *fp;
616 	off_t length;
617 	struct ucred *active_cred;
618 	struct thread *td;
619 {
620 	struct vattr vattr;
621 	struct mount *mp;
622 	struct vnode *vp;
623 	int vfslocked;
624 	int error;
625 
626 	vp = fp->f_vnode;
627 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
628 	error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
629 	if (error) {
630 		VFS_UNLOCK_GIANT(vfslocked);
631 		return (error);
632 	}
633 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
634 	if (vp->v_type == VDIR) {
635 		error = EISDIR;
636 		goto out;
637 	}
638 #ifdef MAC
639 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
640 	if (error)
641 		goto out;
642 #endif
643 	error = vn_writechk(vp);
644 	if (error == 0) {
645 		VATTR_NULL(&vattr);
646 		vattr.va_size = length;
647 		error = VOP_SETATTR(vp, &vattr, fp->f_cred);
648 	}
649 out:
650 	VOP_UNLOCK(vp, 0);
651 	vn_finished_write(mp);
652 	VFS_UNLOCK_GIANT(vfslocked);
653 	return (error);
654 }
655 
656 /*
657  * File table vnode stat routine.
658  */
659 static int
660 vn_statfile(fp, sb, active_cred, td)
661 	struct file *fp;
662 	struct stat *sb;
663 	struct ucred *active_cred;
664 	struct thread *td;
665 {
666 	struct vnode *vp = fp->f_vnode;
667 	int vfslocked;
668 	int error;
669 
670 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
671 	vn_lock(vp, LK_SHARED | LK_RETRY);
672 	error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
673 	VOP_UNLOCK(vp, 0);
674 	VFS_UNLOCK_GIANT(vfslocked);
675 
676 	return (error);
677 }
678 
679 /*
680  * Stat a vnode; implementation for the stat syscall
681  */
682 int
683 vn_stat(vp, sb, active_cred, file_cred, td)
684 	struct vnode *vp;
685 	register struct stat *sb;
686 	struct ucred *active_cred;
687 	struct ucred *file_cred;
688 	struct thread *td;
689 {
690 	struct vattr vattr;
691 	register struct vattr *vap;
692 	int error;
693 	u_short mode;
694 
695 #ifdef MAC
696 	error = mac_vnode_check_stat(active_cred, file_cred, vp);
697 	if (error)
698 		return (error);
699 #endif
700 
701 	vap = &vattr;
702 
703 	/*
704 	 * Initialize defaults for new and unusual fields, so that file
705 	 * systems which don't support these fields don't need to know
706 	 * about them.
707 	 */
708 	vap->va_birthtime.tv_sec = -1;
709 	vap->va_birthtime.tv_nsec = 0;
710 	vap->va_fsid = VNOVAL;
711 	vap->va_rdev = NODEV;
712 
713 	error = VOP_GETATTR(vp, vap, active_cred);
714 	if (error)
715 		return (error);
716 
717 	/*
718 	 * Zero the spare stat fields
719 	 */
720 	bzero(sb, sizeof *sb);
721 
722 	/*
723 	 * Copy from vattr table
724 	 */
725 	if (vap->va_fsid != VNOVAL)
726 		sb->st_dev = vap->va_fsid;
727 	else
728 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
729 	sb->st_ino = vap->va_fileid;
730 	mode = vap->va_mode;
731 	switch (vap->va_type) {
732 	case VREG:
733 		mode |= S_IFREG;
734 		break;
735 	case VDIR:
736 		mode |= S_IFDIR;
737 		break;
738 	case VBLK:
739 		mode |= S_IFBLK;
740 		break;
741 	case VCHR:
742 		mode |= S_IFCHR;
743 		break;
744 	case VLNK:
745 		mode |= S_IFLNK;
746 		break;
747 	case VSOCK:
748 		mode |= S_IFSOCK;
749 		break;
750 	case VFIFO:
751 		mode |= S_IFIFO;
752 		break;
753 	default:
754 		return (EBADF);
755 	};
756 	sb->st_mode = mode;
757 	sb->st_nlink = vap->va_nlink;
758 	sb->st_uid = vap->va_uid;
759 	sb->st_gid = vap->va_gid;
760 	sb->st_rdev = vap->va_rdev;
761 	if (vap->va_size > OFF_MAX)
762 		return (EOVERFLOW);
763 	sb->st_size = vap->va_size;
764 	sb->st_atimespec = vap->va_atime;
765 	sb->st_mtimespec = vap->va_mtime;
766 	sb->st_ctimespec = vap->va_ctime;
767 	sb->st_birthtimespec = vap->va_birthtime;
768 
769         /*
770 	 * According to www.opengroup.org, the meaning of st_blksize is
771 	 *   "a filesystem-specific preferred I/O block size for this
772 	 *    object.  In some filesystem types, this may vary from file
773 	 *    to file"
774 	 * Default to PAGE_SIZE after much discussion.
775 	 * XXX: min(PAGE_SIZE, vp->v_bufobj.bo_bsize) may be more correct.
776 	 */
777 
778 	sb->st_blksize = PAGE_SIZE;
779 
780 	sb->st_flags = vap->va_flags;
781 	if (priv_check(td, PRIV_VFS_GENERATION))
782 		sb->st_gen = 0;
783 	else
784 		sb->st_gen = vap->va_gen;
785 
786 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
787 	return (0);
788 }
789 
790 /*
791  * File table vnode ioctl routine.
792  */
793 static int
794 vn_ioctl(fp, com, data, active_cred, td)
795 	struct file *fp;
796 	u_long com;
797 	void *data;
798 	struct ucred *active_cred;
799 	struct thread *td;
800 {
801 	struct vnode *vp = fp->f_vnode;
802 	struct vattr vattr;
803 	int vfslocked;
804 	int error;
805 
806 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
807 	error = ENOTTY;
808 	switch (vp->v_type) {
809 	case VREG:
810 	case VDIR:
811 		if (com == FIONREAD) {
812 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
813 			error = VOP_GETATTR(vp, &vattr, active_cred);
814 			VOP_UNLOCK(vp, 0);
815 			if (!error)
816 				*(int *)data = vattr.va_size - fp->f_offset;
817 		}
818 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
819 			error = 0;
820 		else
821 			error = VOP_IOCTL(vp, com, data, fp->f_flag,
822 			    active_cred, td);
823 		break;
824 
825 	default:
826 		break;
827 	}
828 	VFS_UNLOCK_GIANT(vfslocked);
829 	return (error);
830 }
831 
832 /*
833  * File table vnode poll routine.
834  */
835 static int
836 vn_poll(fp, events, active_cred, td)
837 	struct file *fp;
838 	int events;
839 	struct ucred *active_cred;
840 	struct thread *td;
841 {
842 	struct vnode *vp;
843 	int vfslocked;
844 	int error;
845 
846 	vp = fp->f_vnode;
847 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
848 #ifdef MAC
849 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
850 	error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
851 	VOP_UNLOCK(vp, 0);
852 	if (!error)
853 #endif
854 
855 	error = VOP_POLL(vp, events, fp->f_cred, td);
856 	VFS_UNLOCK_GIANT(vfslocked);
857 	return (error);
858 }
859 
860 /*
861  * Acquire the requested lock and then check for validity.  LK_RETRY
862  * permits vn_lock to return doomed vnodes.
863  */
864 int
865 _vn_lock(struct vnode *vp, int flags, char *file, int line)
866 {
867 	int error;
868 
869 	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
870 	    ("vn_lock called with no locktype."));
871 	do {
872 #ifdef DEBUG_VFS_LOCKS
873 		KASSERT(vp->v_holdcnt != 0,
874 		    ("vn_lock %p: zero hold count", vp));
875 #endif
876 		error = VOP_LOCK1(vp, flags, file, line);
877 		flags &= ~LK_INTERLOCK;	/* Interlock is always dropped. */
878 		KASSERT((flags & LK_RETRY) == 0 || error == 0,
879 		    ("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)",
880 		    flags, error));
881 		/*
882 		 * Callers specify LK_RETRY if they wish to get dead vnodes.
883 		 * If RETRY is not set, we return ENOENT instead.
884 		 */
885 		if (error == 0 && vp->v_iflag & VI_DOOMED &&
886 		    (flags & LK_RETRY) == 0) {
887 			VOP_UNLOCK(vp, 0);
888 			error = ENOENT;
889 			break;
890 		}
891 	} while (flags & LK_RETRY && error != 0);
892 	return (error);
893 }
894 
895 /*
896  * File table vnode close routine.
897  */
898 static int
899 vn_closefile(fp, td)
900 	struct file *fp;
901 	struct thread *td;
902 {
903 	struct vnode *vp;
904 	struct flock lf;
905 	int vfslocked;
906 	int error;
907 
908 	vp = fp->f_vnode;
909 
910 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
911 	if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
912 		lf.l_whence = SEEK_SET;
913 		lf.l_start = 0;
914 		lf.l_len = 0;
915 		lf.l_type = F_UNLCK;
916 		(void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
917 	}
918 
919 	fp->f_ops = &badfileops;
920 
921 	error = vn_close(vp, fp->f_flag, fp->f_cred, td);
922 	VFS_UNLOCK_GIANT(vfslocked);
923 	return (error);
924 }
925 
926 /*
927  * Preparing to start a filesystem write operation. If the operation is
928  * permitted, then we bump the count of operations in progress and
929  * proceed. If a suspend request is in progress, we wait until the
930  * suspension is over, and then proceed.
931  */
932 int
933 vn_start_write(vp, mpp, flags)
934 	struct vnode *vp;
935 	struct mount **mpp;
936 	int flags;
937 {
938 	struct mount *mp;
939 	int error;
940 
941 	error = 0;
942 	/*
943 	 * If a vnode is provided, get and return the mount point that
944 	 * to which it will write.
945 	 */
946 	if (vp != NULL) {
947 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
948 			*mpp = NULL;
949 			if (error != EOPNOTSUPP)
950 				return (error);
951 			return (0);
952 		}
953 	}
954 	if ((mp = *mpp) == NULL)
955 		return (0);
956 
957 	/*
958 	 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
959 	 * a vfs_ref().
960 	 * As long as a vnode is not provided we need to acquire a
961 	 * refcount for the provided mountpoint too, in order to
962 	 * emulate a vfs_ref().
963 	 */
964 	MNT_ILOCK(mp);
965 	if (vp == NULL)
966 		MNT_REF(mp);
967 
968 	/*
969 	 * Check on status of suspension.
970 	 */
971 	if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
972 	    mp->mnt_susp_owner != curthread) {
973 		while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
974 			if (flags & V_NOWAIT) {
975 				error = EWOULDBLOCK;
976 				goto unlock;
977 			}
978 			error = msleep(&mp->mnt_flag, MNT_MTX(mp),
979 			    (PUSER - 1) | (flags & PCATCH), "suspfs", 0);
980 			if (error)
981 				goto unlock;
982 		}
983 	}
984 	if (flags & V_XSLEEP)
985 		goto unlock;
986 	mp->mnt_writeopcount++;
987 unlock:
988 	MNT_REL(mp);
989 	MNT_IUNLOCK(mp);
990 	return (error);
991 }
992 
993 /*
994  * Secondary suspension. Used by operations such as vop_inactive
995  * routines that are needed by the higher level functions. These
996  * are allowed to proceed until all the higher level functions have
997  * completed (indicated by mnt_writeopcount dropping to zero). At that
998  * time, these operations are halted until the suspension is over.
999  */
1000 int
1001 vn_start_secondary_write(vp, mpp, flags)
1002 	struct vnode *vp;
1003 	struct mount **mpp;
1004 	int flags;
1005 {
1006 	struct mount *mp;
1007 	int error;
1008 
1009  retry:
1010 	if (vp != NULL) {
1011 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1012 			*mpp = NULL;
1013 			if (error != EOPNOTSUPP)
1014 				return (error);
1015 			return (0);
1016 		}
1017 	}
1018 	/*
1019 	 * If we are not suspended or have not yet reached suspended
1020 	 * mode, then let the operation proceed.
1021 	 */
1022 	if ((mp = *mpp) == NULL)
1023 		return (0);
1024 
1025 	/*
1026 	 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1027 	 * a vfs_ref().
1028 	 * As long as a vnode is not provided we need to acquire a
1029 	 * refcount for the provided mountpoint too, in order to
1030 	 * emulate a vfs_ref().
1031 	 */
1032 	MNT_ILOCK(mp);
1033 	if (vp == NULL)
1034 		MNT_REF(mp);
1035 	if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1036 		mp->mnt_secondary_writes++;
1037 		mp->mnt_secondary_accwrites++;
1038 		MNT_REL(mp);
1039 		MNT_IUNLOCK(mp);
1040 		return (0);
1041 	}
1042 	if (flags & V_NOWAIT) {
1043 		MNT_REL(mp);
1044 		MNT_IUNLOCK(mp);
1045 		return (EWOULDBLOCK);
1046 	}
1047 	/*
1048 	 * Wait for the suspension to finish.
1049 	 */
1050 	error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1051 		       (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
1052 	vfs_rel(mp);
1053 	if (error == 0)
1054 		goto retry;
1055 	return (error);
1056 }
1057 
1058 /*
1059  * Filesystem write operation has completed. If we are suspending and this
1060  * operation is the last one, notify the suspender that the suspension is
1061  * now in effect.
1062  */
1063 void
1064 vn_finished_write(mp)
1065 	struct mount *mp;
1066 {
1067 	if (mp == NULL)
1068 		return;
1069 	MNT_ILOCK(mp);
1070 	mp->mnt_writeopcount--;
1071 	if (mp->mnt_writeopcount < 0)
1072 		panic("vn_finished_write: neg cnt");
1073 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1074 	    mp->mnt_writeopcount <= 0)
1075 		wakeup(&mp->mnt_writeopcount);
1076 	MNT_IUNLOCK(mp);
1077 }
1078 
1079 
1080 /*
1081  * Filesystem secondary write operation has completed. If we are
1082  * suspending and this operation is the last one, notify the suspender
1083  * that the suspension is now in effect.
1084  */
1085 void
1086 vn_finished_secondary_write(mp)
1087 	struct mount *mp;
1088 {
1089 	if (mp == NULL)
1090 		return;
1091 	MNT_ILOCK(mp);
1092 	mp->mnt_secondary_writes--;
1093 	if (mp->mnt_secondary_writes < 0)
1094 		panic("vn_finished_secondary_write: neg cnt");
1095 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1096 	    mp->mnt_secondary_writes <= 0)
1097 		wakeup(&mp->mnt_secondary_writes);
1098 	MNT_IUNLOCK(mp);
1099 }
1100 
1101 
1102 
1103 /*
1104  * Request a filesystem to suspend write operations.
1105  */
1106 int
1107 vfs_write_suspend(mp)
1108 	struct mount *mp;
1109 {
1110 	struct thread *td = curthread;
1111 	int error;
1112 
1113 	MNT_ILOCK(mp);
1114 	if (mp->mnt_susp_owner == curthread) {
1115 		MNT_IUNLOCK(mp);
1116 		return (EALREADY);
1117 	}
1118 	while (mp->mnt_kern_flag & MNTK_SUSPEND)
1119 		msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0);
1120 	mp->mnt_kern_flag |= MNTK_SUSPEND;
1121 	mp->mnt_susp_owner = curthread;
1122 	if (mp->mnt_writeopcount > 0)
1123 		(void) msleep(&mp->mnt_writeopcount,
1124 		    MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
1125 	else
1126 		MNT_IUNLOCK(mp);
1127 	if ((error = VFS_SYNC(mp, MNT_SUSPEND, td)) != 0)
1128 		vfs_write_resume(mp);
1129 	return (error);
1130 }
1131 
1132 /*
1133  * Request a filesystem to resume write operations.
1134  */
1135 void
1136 vfs_write_resume(mp)
1137 	struct mount *mp;
1138 {
1139 
1140 	MNT_ILOCK(mp);
1141 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1142 		KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner"));
1143 		mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
1144 				       MNTK_SUSPENDED);
1145 		mp->mnt_susp_owner = NULL;
1146 		wakeup(&mp->mnt_writeopcount);
1147 		wakeup(&mp->mnt_flag);
1148 		curthread->td_pflags &= ~TDP_IGNSUSP;
1149 		MNT_IUNLOCK(mp);
1150 		VFS_SUSP_CLEAN(mp);
1151 	} else
1152 		MNT_IUNLOCK(mp);
1153 }
1154 
1155 /*
1156  * Implement kqueues for files by translating it to vnode operation.
1157  */
1158 static int
1159 vn_kqfilter(struct file *fp, struct knote *kn)
1160 {
1161 	int vfslocked;
1162 	int error;
1163 
1164 	vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
1165 	error = VOP_KQFILTER(fp->f_vnode, kn);
1166 	VFS_UNLOCK_GIANT(vfslocked);
1167 
1168 	return error;
1169 }
1170 
1171 /*
1172  * Simplified in-kernel wrapper calls for extended attribute access.
1173  * Both calls pass in a NULL credential, authorizing as "kernel" access.
1174  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1175  */
1176 int
1177 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1178     const char *attrname, int *buflen, char *buf, struct thread *td)
1179 {
1180 	struct uio	auio;
1181 	struct iovec	iov;
1182 	int	error;
1183 
1184 	iov.iov_len = *buflen;
1185 	iov.iov_base = buf;
1186 
1187 	auio.uio_iov = &iov;
1188 	auio.uio_iovcnt = 1;
1189 	auio.uio_rw = UIO_READ;
1190 	auio.uio_segflg = UIO_SYSSPACE;
1191 	auio.uio_td = td;
1192 	auio.uio_offset = 0;
1193 	auio.uio_resid = *buflen;
1194 
1195 	if ((ioflg & IO_NODELOCKED) == 0)
1196 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1197 
1198 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1199 
1200 	/* authorize attribute retrieval as kernel */
1201 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1202 	    td);
1203 
1204 	if ((ioflg & IO_NODELOCKED) == 0)
1205 		VOP_UNLOCK(vp, 0);
1206 
1207 	if (error == 0) {
1208 		*buflen = *buflen - auio.uio_resid;
1209 	}
1210 
1211 	return (error);
1212 }
1213 
1214 /*
1215  * XXX failure mode if partially written?
1216  */
1217 int
1218 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1219     const char *attrname, int buflen, char *buf, struct thread *td)
1220 {
1221 	struct uio	auio;
1222 	struct iovec	iov;
1223 	struct mount	*mp;
1224 	int	error;
1225 
1226 	iov.iov_len = buflen;
1227 	iov.iov_base = buf;
1228 
1229 	auio.uio_iov = &iov;
1230 	auio.uio_iovcnt = 1;
1231 	auio.uio_rw = UIO_WRITE;
1232 	auio.uio_segflg = UIO_SYSSPACE;
1233 	auio.uio_td = td;
1234 	auio.uio_offset = 0;
1235 	auio.uio_resid = buflen;
1236 
1237 	if ((ioflg & IO_NODELOCKED) == 0) {
1238 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1239 			return (error);
1240 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1241 	}
1242 
1243 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1244 
1245 	/* authorize attribute setting as kernel */
1246 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1247 
1248 	if ((ioflg & IO_NODELOCKED) == 0) {
1249 		vn_finished_write(mp);
1250 		VOP_UNLOCK(vp, 0);
1251 	}
1252 
1253 	return (error);
1254 }
1255 
1256 int
1257 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1258     const char *attrname, struct thread *td)
1259 {
1260 	struct mount	*mp;
1261 	int	error;
1262 
1263 	if ((ioflg & IO_NODELOCKED) == 0) {
1264 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1265 			return (error);
1266 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1267 	}
1268 
1269 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1270 
1271 	/* authorize attribute removal as kernel */
1272 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1273 	if (error == EOPNOTSUPP)
1274 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1275 		    NULL, td);
1276 
1277 	if ((ioflg & IO_NODELOCKED) == 0) {
1278 		vn_finished_write(mp);
1279 		VOP_UNLOCK(vp, 0);
1280 	}
1281 
1282 	return (error);
1283 }
1284 
1285 int
1286 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp)
1287 {
1288 	struct mount *mp;
1289 	int ltype, error;
1290 
1291 	mp = vp->v_mount;
1292 	ltype = VOP_ISLOCKED(vp);
1293 	KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED,
1294 	    ("vn_vget_ino: vp not locked"));
1295 	for (;;) {
1296 		error = vfs_busy(mp, MBF_NOWAIT);
1297 		if (error == 0)
1298 			break;
1299 		VOP_UNLOCK(vp, 0);
1300 		pause("vn_vget", 1);
1301 		vn_lock(vp, ltype | LK_RETRY);
1302 		if (vp->v_iflag & VI_DOOMED)
1303 			return (ENOENT);
1304 	}
1305 	VOP_UNLOCK(vp, 0);
1306 	error = VFS_VGET(mp, ino, lkflags, rvp);
1307 	vfs_unbusy(mp);
1308 	vn_lock(vp, ltype | LK_RETRY);
1309 	if (vp->v_iflag & VI_DOOMED) {
1310 		if (error == 0)
1311 			vput(*rvp);
1312 		error = ENOENT;
1313 	}
1314 	return (error);
1315 }
1316