xref: /freebsd/sys/kern/vfs_vnops.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_mac.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/kdb.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/limits.h>
50 #include <sys/lock.h>
51 #include <sys/mac.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/bio.h>
57 #include <sys/buf.h>
58 #include <sys/filio.h>
59 #include <sys/sx.h>
60 #include <sys/ttycom.h>
61 #include <sys/conf.h>
62 #include <sys/syslog.h>
63 #include <sys/unistd.h>
64 
65 static fo_rdwr_t	vn_read;
66 static fo_rdwr_t	vn_write;
67 static fo_ioctl_t	vn_ioctl;
68 static fo_poll_t	vn_poll;
69 static fo_kqfilter_t	vn_kqfilter;
70 static fo_stat_t	vn_statfile;
71 static fo_close_t	vn_closefile;
72 
73 struct 	fileops vnops = {
74 	.fo_read = vn_read,
75 	.fo_write = vn_write,
76 	.fo_ioctl = vn_ioctl,
77 	.fo_poll = vn_poll,
78 	.fo_kqfilter = vn_kqfilter,
79 	.fo_stat = vn_statfile,
80 	.fo_close = vn_closefile,
81 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
82 };
83 
84 int
85 vn_open(ndp, flagp, cmode, fdidx)
86 	struct nameidata *ndp;
87 	int *flagp, cmode, fdidx;
88 {
89 	struct thread *td = ndp->ni_cnd.cn_thread;
90 
91 	return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fdidx));
92 }
93 
94 /*
95  * Common code for vnode open operations.
96  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
97  *
98  * Note that this does NOT free nameidata for the successful case,
99  * due to the NDINIT being done elsewhere.
100  */
101 int
102 vn_open_cred(ndp, flagp, cmode, cred, fdidx)
103 	struct nameidata *ndp;
104 	int *flagp, cmode;
105 	struct ucred *cred;
106 	int fdidx;
107 {
108 	struct vnode *vp;
109 	struct mount *mp;
110 	struct thread *td = ndp->ni_cnd.cn_thread;
111 	struct vattr vat;
112 	struct vattr *vap = &vat;
113 	int mode, fmode, error;
114 	int vfslocked, mpsafe;
115 
116 	mpsafe = ndp->ni_cnd.cn_flags & MPSAFE;
117 restart:
118 	vfslocked = 0;
119 	fmode = *flagp;
120 	if (fmode & O_CREAT) {
121 		ndp->ni_cnd.cn_nameiop = CREATE;
122 		ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF |
123 		    MPSAFE | AUDITVNODE1;
124 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
125 			ndp->ni_cnd.cn_flags |= FOLLOW;
126 		bwillwrite();
127 		if ((error = namei(ndp)) != 0)
128 			return (error);
129 		vfslocked = NDHASGIANT(ndp);
130 		if (!mpsafe)
131 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
132 		if (ndp->ni_vp == NULL) {
133 			VATTR_NULL(vap);
134 			vap->va_type = VREG;
135 			vap->va_mode = cmode;
136 			if (fmode & O_EXCL)
137 				vap->va_vaflags |= VA_EXCLUSIVE;
138 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
139 				NDFREE(ndp, NDF_ONLY_PNBUF);
140 				vput(ndp->ni_dvp);
141 				VFS_UNLOCK_GIANT(vfslocked);
142 				if ((error = vn_start_write(NULL, &mp,
143 				    V_XSLEEP | PCATCH)) != 0)
144 					return (error);
145 				goto restart;
146 			}
147 #ifdef MAC
148 			error = mac_check_vnode_create(cred, ndp->ni_dvp,
149 			    &ndp->ni_cnd, vap);
150 			if (error == 0) {
151 #endif
152 				VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
153 				error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
154 						   &ndp->ni_cnd, vap);
155 #ifdef MAC
156 			}
157 #endif
158 			vput(ndp->ni_dvp);
159 			vn_finished_write(mp);
160 			if (error) {
161 				VFS_UNLOCK_GIANT(vfslocked);
162 				NDFREE(ndp, NDF_ONLY_PNBUF);
163 				return (error);
164 			}
165 			fmode &= ~O_TRUNC;
166 			vp = ndp->ni_vp;
167 		} else {
168 			if (ndp->ni_dvp == ndp->ni_vp)
169 				vrele(ndp->ni_dvp);
170 			else
171 				vput(ndp->ni_dvp);
172 			ndp->ni_dvp = NULL;
173 			vp = ndp->ni_vp;
174 			if (fmode & O_EXCL) {
175 				error = EEXIST;
176 				goto bad;
177 			}
178 			fmode &= ~O_CREAT;
179 		}
180 	} else {
181 		ndp->ni_cnd.cn_nameiop = LOOKUP;
182 		ndp->ni_cnd.cn_flags = ISOPEN |
183 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
184 		    LOCKSHARED | LOCKLEAF | MPSAFE | AUDITVNODE1;
185 		if ((error = namei(ndp)) != 0)
186 			return (error);
187 		if (!mpsafe)
188 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
189 		vfslocked = NDHASGIANT(ndp);
190 		vp = ndp->ni_vp;
191 	}
192 	if (vp->v_type == VLNK) {
193 		error = EMLINK;
194 		goto bad;
195 	}
196 	if (vp->v_type == VSOCK) {
197 		error = EOPNOTSUPP;
198 		goto bad;
199 	}
200 	mode = 0;
201 	if (fmode & (FWRITE | O_TRUNC)) {
202 		if (vp->v_type == VDIR) {
203 			error = EISDIR;
204 			goto bad;
205 		}
206 		mode |= VWRITE;
207 	}
208 	if (fmode & FREAD)
209 		mode |= VREAD;
210 	if (fmode & O_APPEND)
211 		mode |= VAPPEND;
212 #ifdef MAC
213 	error = mac_check_vnode_open(cred, vp, mode);
214 	if (error)
215 		goto bad;
216 #endif
217 	if ((fmode & O_CREAT) == 0) {
218 		if (mode & VWRITE) {
219 			error = vn_writechk(vp);
220 			if (error)
221 				goto bad;
222 		}
223 		if (mode) {
224 		        error = VOP_ACCESS(vp, mode, cred, td);
225 			if (error)
226 				goto bad;
227 		}
228 	}
229 	if ((error = VOP_OPEN(vp, fmode, cred, td, fdidx)) != 0)
230 		goto bad;
231 
232 	if (fmode & FWRITE)
233 		vp->v_writecount++;
234 	*flagp = fmode;
235 	ASSERT_VOP_LOCKED(vp, "vn_open_cred");
236 	if (!mpsafe)
237 		VFS_UNLOCK_GIANT(vfslocked);
238 	return (0);
239 bad:
240 	NDFREE(ndp, NDF_ONLY_PNBUF);
241 	vput(vp);
242 	VFS_UNLOCK_GIANT(vfslocked);
243 	*flagp = fmode;
244 	ndp->ni_vp = NULL;
245 	return (error);
246 }
247 
248 /*
249  * Check for write permissions on the specified vnode.
250  * Prototype text segments cannot be written.
251  */
252 int
253 vn_writechk(vp)
254 	register struct vnode *vp;
255 {
256 
257 	ASSERT_VOP_LOCKED(vp, "vn_writechk");
258 	/*
259 	 * If there's shared text associated with
260 	 * the vnode, try to free it up once.  If
261 	 * we fail, we can't allow writing.
262 	 */
263 	if (vp->v_vflag & VV_TEXT)
264 		return (ETXTBSY);
265 
266 	return (0);
267 }
268 
269 /*
270  * Vnode close call
271  */
272 int
273 vn_close(vp, flags, file_cred, td)
274 	register struct vnode *vp;
275 	int flags;
276 	struct ucred *file_cred;
277 	struct thread *td;
278 {
279 	struct mount *mp;
280 	int error;
281 
282 	VFS_ASSERT_GIANT(vp->v_mount);
283 
284 	vn_start_write(vp, &mp, V_WAIT);
285 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
286 	if (flags & FWRITE)
287 		vp->v_writecount--;
288 	error = VOP_CLOSE(vp, flags, file_cred, td);
289 	vput(vp);
290 	vn_finished_write(mp);
291 	return (error);
292 }
293 
294 /*
295  * Sequential heuristic - detect sequential operation
296  */
297 static __inline
298 int
299 sequential_heuristic(struct uio *uio, struct file *fp)
300 {
301 
302 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
303 	    uio->uio_offset == fp->f_nextoff) {
304 		/*
305 		 * XXX we assume that the filesystem block size is
306 		 * the default.  Not true, but still gives us a pretty
307 		 * good indicator of how sequential the read operations
308 		 * are.
309 		 */
310 		fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
311 		if (fp->f_seqcount > IO_SEQMAX)
312 			fp->f_seqcount = IO_SEQMAX;
313 		return(fp->f_seqcount << IO_SEQSHIFT);
314 	}
315 
316 	/*
317 	 * Not sequential, quick draw-down of seqcount
318 	 */
319 	if (fp->f_seqcount > 1)
320 		fp->f_seqcount = 1;
321 	else
322 		fp->f_seqcount = 0;
323 	return(0);
324 }
325 
326 /*
327  * Package up an I/O request on a vnode into a uio and do it.
328  */
329 int
330 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
331     aresid, td)
332 	enum uio_rw rw;
333 	struct vnode *vp;
334 	void *base;
335 	int len;
336 	off_t offset;
337 	enum uio_seg segflg;
338 	int ioflg;
339 	struct ucred *active_cred;
340 	struct ucred *file_cred;
341 	int *aresid;
342 	struct thread *td;
343 {
344 	struct uio auio;
345 	struct iovec aiov;
346 	struct mount *mp;
347 	struct ucred *cred;
348 	int error;
349 
350 	VFS_ASSERT_GIANT(vp->v_mount);
351 
352 	if ((ioflg & IO_NODELOCKED) == 0) {
353 		mp = NULL;
354 		if (rw == UIO_WRITE) {
355 			if (vp->v_type != VCHR &&
356 			    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
357 			    != 0)
358 				return (error);
359 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
360 		} else {
361 			/*
362 			 * XXX This should be LK_SHARED but I don't trust VFS
363 			 * enough to leave it like that until it has been
364 			 * reviewed further.
365 			 */
366 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
367 		}
368 
369 	}
370 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
371 	auio.uio_iov = &aiov;
372 	auio.uio_iovcnt = 1;
373 	aiov.iov_base = base;
374 	aiov.iov_len = len;
375 	auio.uio_resid = len;
376 	auio.uio_offset = offset;
377 	auio.uio_segflg = segflg;
378 	auio.uio_rw = rw;
379 	auio.uio_td = td;
380 	error = 0;
381 #ifdef MAC
382 	if ((ioflg & IO_NOMACCHECK) == 0) {
383 		if (rw == UIO_READ)
384 			error = mac_check_vnode_read(active_cred, file_cred,
385 			    vp);
386 		else
387 			error = mac_check_vnode_write(active_cred, file_cred,
388 			    vp);
389 	}
390 #endif
391 	if (error == 0) {
392 		if (file_cred)
393 			cred = file_cred;
394 		else
395 			cred = active_cred;
396 		if (rw == UIO_READ)
397 			error = VOP_READ(vp, &auio, ioflg, cred);
398 		else
399 			error = VOP_WRITE(vp, &auio, ioflg, cred);
400 	}
401 	if (aresid)
402 		*aresid = auio.uio_resid;
403 	else
404 		if (auio.uio_resid && error == 0)
405 			error = EIO;
406 	if ((ioflg & IO_NODELOCKED) == 0) {
407 		if (rw == UIO_WRITE && vp->v_type != VCHR)
408 			vn_finished_write(mp);
409 		VOP_UNLOCK(vp, 0, td);
410 	}
411 	return (error);
412 }
413 
414 /*
415  * Package up an I/O request on a vnode into a uio and do it.  The I/O
416  * request is split up into smaller chunks and we try to avoid saturating
417  * the buffer cache while potentially holding a vnode locked, so we
418  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
419  * to give other processes a chance to lock the vnode (either other processes
420  * core'ing the same binary, or unrelated processes scanning the directory).
421  */
422 int
423 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
424     file_cred, aresid, td)
425 	enum uio_rw rw;
426 	struct vnode *vp;
427 	void *base;
428 	size_t len;
429 	off_t offset;
430 	enum uio_seg segflg;
431 	int ioflg;
432 	struct ucred *active_cred;
433 	struct ucred *file_cred;
434 	size_t *aresid;
435 	struct thread *td;
436 {
437 	int error = 0;
438 	int iaresid;
439 
440 	VFS_ASSERT_GIANT(vp->v_mount);
441 
442 	do {
443 		int chunk;
444 
445 		/*
446 		 * Force `offset' to a multiple of MAXBSIZE except possibly
447 		 * for the first chunk, so that filesystems only need to
448 		 * write full blocks except possibly for the first and last
449 		 * chunks.
450 		 */
451 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
452 
453 		if (chunk > len)
454 			chunk = len;
455 		if (rw != UIO_READ && vp->v_type == VREG)
456 			bwillwrite();
457 		iaresid = 0;
458 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
459 		    ioflg, active_cred, file_cred, &iaresid, td);
460 		len -= chunk;	/* aresid calc already includes length */
461 		if (error)
462 			break;
463 		offset += chunk;
464 		base = (char *)base + chunk;
465 		uio_yield();
466 	} while (len);
467 	if (aresid)
468 		*aresid = len + iaresid;
469 	return (error);
470 }
471 
472 /*
473  * File table vnode read routine.
474  */
475 static int
476 vn_read(fp, uio, active_cred, flags, td)
477 	struct file *fp;
478 	struct uio *uio;
479 	struct ucred *active_cred;
480 	struct thread *td;
481 	int flags;
482 {
483 	struct vnode *vp;
484 	int error, ioflag;
485 	int vfslocked;
486 
487 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
488 	    uio->uio_td, td));
489 	vp = fp->f_vnode;
490 	ioflag = 0;
491 	if (fp->f_flag & FNONBLOCK)
492 		ioflag |= IO_NDELAY;
493 	if (fp->f_flag & O_DIRECT)
494 		ioflag |= IO_DIRECT;
495 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
496 	VOP_LEASE(vp, td, fp->f_cred, LEASE_READ);
497 	/*
498 	 * According to McKusick the vn lock was protecting f_offset here.
499 	 * It is now protected by the FOFFSET_LOCKED flag.
500 	 */
501 	if ((flags & FOF_OFFSET) == 0) {
502 		FILE_LOCK(fp);
503 		while(fp->f_vnread_flags & FOFFSET_LOCKED) {
504 			fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
505 			msleep(&fp->f_vnread_flags,fp->f_mtxp,PUSER -1,"vnread offlock",0);
506 		}
507 		fp->f_vnread_flags |= FOFFSET_LOCKED;
508 		FILE_UNLOCK(fp);
509 		vn_lock(vp, LK_SHARED | LK_RETRY, td);
510 		uio->uio_offset = fp->f_offset;
511 	} else
512 		vn_lock(vp, LK_SHARED | LK_RETRY, td);
513 
514 	ioflag |= sequential_heuristic(uio, fp);
515 
516 #ifdef MAC
517 	error = mac_check_vnode_read(active_cred, fp->f_cred, vp);
518 	if (error == 0)
519 #endif
520 		error = VOP_READ(vp, uio, ioflag, fp->f_cred);
521 	if ((flags & FOF_OFFSET) == 0) {
522 		fp->f_offset = uio->uio_offset;
523 		FILE_LOCK(fp);
524 		if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
525 			wakeup(&fp->f_vnread_flags);
526 		fp->f_vnread_flags = 0;
527 		FILE_UNLOCK(fp);
528 	}
529 	fp->f_nextoff = uio->uio_offset;
530 	VOP_UNLOCK(vp, 0, td);
531 	VFS_UNLOCK_GIANT(vfslocked);
532 	return (error);
533 }
534 
535 /*
536  * File table vnode write routine.
537  */
538 static int
539 vn_write(fp, uio, active_cred, flags, td)
540 	struct file *fp;
541 	struct uio *uio;
542 	struct ucred *active_cred;
543 	struct thread *td;
544 	int flags;
545 {
546 	struct vnode *vp;
547 	struct mount *mp;
548 	int error, ioflag;
549 	int vfslocked;
550 
551 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
552 	    uio->uio_td, td));
553 	vp = fp->f_vnode;
554 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
555 	if (vp->v_type == VREG)
556 		bwillwrite();
557 	ioflag = IO_UNIT;
558 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
559 		ioflag |= IO_APPEND;
560 	if (fp->f_flag & FNONBLOCK)
561 		ioflag |= IO_NDELAY;
562 	if (fp->f_flag & O_DIRECT)
563 		ioflag |= IO_DIRECT;
564 	if ((fp->f_flag & O_FSYNC) ||
565 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
566 		ioflag |= IO_SYNC;
567 	mp = NULL;
568 	if (vp->v_type != VCHR &&
569 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
570 		goto unlock;
571 	VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE);
572 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
573 	if ((flags & FOF_OFFSET) == 0)
574 		uio->uio_offset = fp->f_offset;
575 	ioflag |= sequential_heuristic(uio, fp);
576 #ifdef MAC
577 	error = mac_check_vnode_write(active_cred, fp->f_cred, vp);
578 	if (error == 0)
579 #endif
580 		error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
581 	if ((flags & FOF_OFFSET) == 0)
582 		fp->f_offset = uio->uio_offset;
583 	fp->f_nextoff = uio->uio_offset;
584 	VOP_UNLOCK(vp, 0, td);
585 	if (vp->v_type != VCHR)
586 		vn_finished_write(mp);
587 unlock:
588 	VFS_UNLOCK_GIANT(vfslocked);
589 	return (error);
590 }
591 
592 /*
593  * File table vnode stat routine.
594  */
595 static int
596 vn_statfile(fp, sb, active_cred, td)
597 	struct file *fp;
598 	struct stat *sb;
599 	struct ucred *active_cred;
600 	struct thread *td;
601 {
602 	struct vnode *vp = fp->f_vnode;
603 	int vfslocked;
604 	int error;
605 
606 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
607 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
608 	error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
609 	VOP_UNLOCK(vp, 0, td);
610 	VFS_UNLOCK_GIANT(vfslocked);
611 
612 	return (error);
613 }
614 
615 /*
616  * Stat a vnode; implementation for the stat syscall
617  */
618 int
619 vn_stat(vp, sb, active_cred, file_cred, td)
620 	struct vnode *vp;
621 	register struct stat *sb;
622 	struct ucred *active_cred;
623 	struct ucred *file_cred;
624 	struct thread *td;
625 {
626 	struct vattr vattr;
627 	register struct vattr *vap;
628 	int error;
629 	u_short mode;
630 
631 #ifdef MAC
632 	error = mac_check_vnode_stat(active_cred, file_cred, vp);
633 	if (error)
634 		return (error);
635 #endif
636 
637 	vap = &vattr;
638 	error = VOP_GETATTR(vp, vap, active_cred, td);
639 	if (error)
640 		return (error);
641 
642 	/*
643 	 * Zero the spare stat fields
644 	 */
645 	bzero(sb, sizeof *sb);
646 
647 	/*
648 	 * Copy from vattr table
649 	 */
650 	if (vap->va_fsid != VNOVAL)
651 		sb->st_dev = vap->va_fsid;
652 	else
653 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
654 	sb->st_ino = vap->va_fileid;
655 	mode = vap->va_mode;
656 	switch (vap->va_type) {
657 	case VREG:
658 		mode |= S_IFREG;
659 		break;
660 	case VDIR:
661 		mode |= S_IFDIR;
662 		break;
663 	case VBLK:
664 		mode |= S_IFBLK;
665 		break;
666 	case VCHR:
667 		mode |= S_IFCHR;
668 		break;
669 	case VLNK:
670 		mode |= S_IFLNK;
671 		/* This is a cosmetic change, symlinks do not have a mode. */
672 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
673 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
674 		else
675 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
676 		break;
677 	case VSOCK:
678 		mode |= S_IFSOCK;
679 		break;
680 	case VFIFO:
681 		mode |= S_IFIFO;
682 		break;
683 	default:
684 		return (EBADF);
685 	};
686 	sb->st_mode = mode;
687 	sb->st_nlink = vap->va_nlink;
688 	sb->st_uid = vap->va_uid;
689 	sb->st_gid = vap->va_gid;
690 	sb->st_rdev = vap->va_rdev;
691 	if (vap->va_size > OFF_MAX)
692 		return (EOVERFLOW);
693 	sb->st_size = vap->va_size;
694 	sb->st_atimespec = vap->va_atime;
695 	sb->st_mtimespec = vap->va_mtime;
696 	sb->st_ctimespec = vap->va_ctime;
697 	sb->st_birthtimespec = vap->va_birthtime;
698 
699         /*
700 	 * According to www.opengroup.org, the meaning of st_blksize is
701 	 *   "a filesystem-specific preferred I/O block size for this
702 	 *    object.  In some filesystem types, this may vary from file
703 	 *    to file"
704 	 * Default to PAGE_SIZE after much discussion.
705 	 * XXX: min(PAGE_SIZE, vp->v_bufobj.bo_bsize) may be more correct.
706 	 */
707 
708 	sb->st_blksize = PAGE_SIZE;
709 
710 	sb->st_flags = vap->va_flags;
711 	if (suser(td))
712 		sb->st_gen = 0;
713 	else
714 		sb->st_gen = vap->va_gen;
715 
716 #if (S_BLKSIZE == 512)
717 	/* Optimize this case */
718 	sb->st_blocks = vap->va_bytes >> 9;
719 #else
720 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
721 #endif
722 	return (0);
723 }
724 
725 /*
726  * File table vnode ioctl routine.
727  */
728 static int
729 vn_ioctl(fp, com, data, active_cred, td)
730 	struct file *fp;
731 	u_long com;
732 	void *data;
733 	struct ucred *active_cred;
734 	struct thread *td;
735 {
736 	struct vnode *vp = fp->f_vnode;
737 	struct vattr vattr;
738 	int vfslocked;
739 	int error;
740 
741 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
742 	error = ENOTTY;
743 	switch (vp->v_type) {
744 	case VREG:
745 	case VDIR:
746 		if (com == FIONREAD) {
747 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
748 			error = VOP_GETATTR(vp, &vattr, active_cred, td);
749 			VOP_UNLOCK(vp, 0, td);
750 			if (!error)
751 				*(int *)data = vattr.va_size - fp->f_offset;
752 		}
753 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
754 			error = 0;
755 		else
756 			error = VOP_IOCTL(vp, com, data, fp->f_flag,
757 			    active_cred, td);
758 		break;
759 
760 	default:
761 		break;
762 	}
763 	VFS_UNLOCK_GIANT(vfslocked);
764 	return (error);
765 }
766 
767 /*
768  * File table vnode poll routine.
769  */
770 static int
771 vn_poll(fp, events, active_cred, td)
772 	struct file *fp;
773 	int events;
774 	struct ucred *active_cred;
775 	struct thread *td;
776 {
777 	struct vnode *vp;
778 	int vfslocked;
779 	int error;
780 
781 	vp = fp->f_vnode;
782 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
783 #ifdef MAC
784 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
785 	error = mac_check_vnode_poll(active_cred, fp->f_cred, vp);
786 	VOP_UNLOCK(vp, 0, td);
787 	if (!error)
788 #endif
789 
790 	error = VOP_POLL(vp, events, fp->f_cred, td);
791 	VFS_UNLOCK_GIANT(vfslocked);
792 	return (error);
793 }
794 
795 /*
796  * Check that the vnode is still valid, and if so
797  * acquire requested lock.
798  */
799 int
800 vn_lock(vp, flags, td)
801 	struct vnode *vp;
802 	int flags;
803 	struct thread *td;
804 {
805 	int error;
806 
807 	do {
808 		if ((flags & LK_INTERLOCK) == 0)
809 			VI_LOCK(vp);
810 		if ((flags & LK_NOWAIT || (flags & LK_TYPE_MASK) == 0) &&
811 		    vp->v_iflag & VI_DOOMED) {
812 			VI_UNLOCK(vp);
813 			return (ENOENT);
814 		}
815 		/*
816 		 * Just polling to check validity.
817 		 */
818 		if ((flags & LK_TYPE_MASK) == 0) {
819 			VI_UNLOCK(vp);
820 			return (0);
821 		}
822 		/*
823 		 * lockmgr drops interlock before it will return for
824 		 * any reason.  So force the code above to relock it.
825 		 */
826 		error = VOP_LOCK(vp, flags | LK_INTERLOCK, td);
827 		flags &= ~LK_INTERLOCK;
828 		KASSERT((flags & LK_RETRY) == 0 || error == 0,
829 		    ("LK_RETRY set with incompatible flags %d\n", flags));
830 		/*
831 		 * Callers specify LK_RETRY if they wish to get dead vnodes.
832 		 * If RETRY is not set, we return ENOENT instead.
833 		 */
834 		if (error == 0 && vp->v_iflag & VI_DOOMED &&
835 		    (flags & LK_RETRY) == 0) {
836 			VOP_UNLOCK(vp, 0, td);
837 			error = ENOENT;
838 			break;
839 		}
840 	} while (flags & LK_RETRY && error != 0);
841 	return (error);
842 }
843 
844 /*
845  * File table vnode close routine.
846  */
847 static int
848 vn_closefile(fp, td)
849 	struct file *fp;
850 	struct thread *td;
851 {
852 	struct vnode *vp;
853 	struct flock lf;
854 	int vfslocked;
855 	int error;
856 
857 	vp = fp->f_vnode;
858 
859 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
860 	if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
861 		lf.l_whence = SEEK_SET;
862 		lf.l_start = 0;
863 		lf.l_len = 0;
864 		lf.l_type = F_UNLCK;
865 		(void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
866 	}
867 
868 	fp->f_ops = &badfileops;
869 
870 	error = vn_close(vp, fp->f_flag, fp->f_cred, td);
871 	VFS_UNLOCK_GIANT(vfslocked);
872 	return (error);
873 }
874 
875 /*
876  * Preparing to start a filesystem write operation. If the operation is
877  * permitted, then we bump the count of operations in progress and
878  * proceed. If a suspend request is in progress, we wait until the
879  * suspension is over, and then proceed.
880  */
881 int
882 vn_start_write(vp, mpp, flags)
883 	struct vnode *vp;
884 	struct mount **mpp;
885 	int flags;
886 {
887 	struct mount *mp;
888 	int error;
889 
890 	error = 0;
891 	/*
892 	 * If a vnode is provided, get and return the mount point that
893 	 * to which it will write.
894 	 */
895 	if (vp != NULL) {
896 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
897 			*mpp = NULL;
898 			if (error != EOPNOTSUPP)
899 				return (error);
900 			return (0);
901 		}
902 	}
903 	if ((mp = *mpp) == NULL)
904 		return (0);
905 	MNT_ILOCK(mp);
906 	if (vp == NULL)
907 		MNT_REF(mp);
908 	/*
909 	 * Check on status of suspension.
910 	 */
911 	while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
912 		if (flags & V_NOWAIT) {
913 			error = EWOULDBLOCK;
914 			goto unlock;
915 		}
916 		error = msleep(&mp->mnt_flag, MNT_MTX(mp),
917 		    (PUSER - 1) | (flags & PCATCH), "suspfs", 0);
918 		if (error)
919 			goto unlock;
920 	}
921 	if (flags & V_XSLEEP)
922 		goto unlock;
923 	mp->mnt_writeopcount++;
924 unlock:
925 	MNT_REL(mp);
926 	MNT_IUNLOCK(mp);
927 	return (error);
928 }
929 
930 /*
931  * Secondary suspension. Used by operations such as vop_inactive
932  * routines that are needed by the higher level functions. These
933  * are allowed to proceed until all the higher level functions have
934  * completed (indicated by mnt_writeopcount dropping to zero). At that
935  * time, these operations are halted until the suspension is over.
936  */
937 int
938 vn_write_suspend_wait(vp, mp, flags)
939 	struct vnode *vp;
940 	struct mount *mp;
941 	int flags;
942 {
943 	int error;
944 
945 	if (vp != NULL) {
946 		if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) {
947 			if (error != EOPNOTSUPP)
948 				return (error);
949 			return (0);
950 		}
951 	}
952 	/*
953 	 * If we are not suspended or have not yet reached suspended
954 	 * mode, then let the operation proceed.
955 	 */
956 	if (mp == NULL)
957 		return (0);
958 	MNT_ILOCK(mp);
959 	if (vp == NULL)
960 		MNT_REF(mp);
961 	if ((mp->mnt_kern_flag & MNTK_SUSPENDED) == 0) {
962 		MNT_REL(mp);
963 		MNT_IUNLOCK(mp);
964 		return (0);
965 	}
966 	if (flags & V_NOWAIT) {
967 		MNT_REL(mp);
968 		MNT_IUNLOCK(mp);
969 		return (EWOULDBLOCK);
970 	}
971 	/*
972 	 * Wait for the suspension to finish.
973 	 */
974 	error = msleep(&mp->mnt_flag, MNT_MTX(mp),
975 	    (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
976 	vfs_rel(mp);
977 	return (error);
978 }
979 
980 /*
981  * Secondary suspension. Used by operations such as vop_inactive
982  * routines that are needed by the higher level functions. These
983  * are allowed to proceed until all the higher level functions have
984  * completed (indicated by mnt_writeopcount dropping to zero). At that
985  * time, these operations are halted until the suspension is over.
986  */
987 int
988 vn_start_secondary_write(vp, mpp, flags)
989 	struct vnode *vp;
990 	struct mount **mpp;
991 	int flags;
992 {
993 	struct mount *mp;
994 	int error;
995 
996  retry:
997 	if (vp != NULL) {
998 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
999 			*mpp = NULL;
1000 			if (error != EOPNOTSUPP)
1001 				return (error);
1002 			return (0);
1003 		}
1004 	}
1005 	/*
1006 	 * If we are not suspended or have not yet reached suspended
1007 	 * mode, then let the operation proceed.
1008 	 */
1009 	if ((mp = *mpp) == NULL)
1010 		return (0);
1011 	MNT_ILOCK(mp);
1012 	if (vp == NULL)
1013 		MNT_REF(mp);
1014 	if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1015 		mp->mnt_secondary_writes++;
1016 		mp->mnt_secondary_accwrites++;
1017 		MNT_REL(mp);
1018 		MNT_IUNLOCK(mp);
1019 		return (0);
1020 	}
1021 	if (flags & V_NOWAIT) {
1022 		MNT_REL(mp);
1023 		MNT_IUNLOCK(mp);
1024 		return (EWOULDBLOCK);
1025 	}
1026 	/*
1027 	 * Wait for the suspension to finish.
1028 	 */
1029 	error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1030 		       (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
1031 	vfs_rel(mp);
1032 	if (error == 0)
1033 		goto retry;
1034 	return (error);
1035 }
1036 
1037 /*
1038  * Filesystem write operation has completed. If we are suspending and this
1039  * operation is the last one, notify the suspender that the suspension is
1040  * now in effect.
1041  */
1042 void
1043 vn_finished_write(mp)
1044 	struct mount *mp;
1045 {
1046 	if (mp == NULL)
1047 		return;
1048 	MNT_ILOCK(mp);
1049 	mp->mnt_writeopcount--;
1050 	if (mp->mnt_writeopcount < 0)
1051 		panic("vn_finished_write: neg cnt");
1052 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1053 	    mp->mnt_writeopcount <= 0)
1054 		wakeup(&mp->mnt_writeopcount);
1055 	MNT_IUNLOCK(mp);
1056 }
1057 
1058 
1059 /*
1060  * Filesystem secondary write operation has completed. If we are
1061  * suspending and this operation is the last one, notify the suspender
1062  * that the suspension is now in effect.
1063  */
1064 void
1065 vn_finished_secondary_write(mp)
1066 	struct mount *mp;
1067 {
1068 	if (mp == NULL)
1069 		return;
1070 	MNT_ILOCK(mp);
1071 	mp->mnt_secondary_writes--;
1072 	if (mp->mnt_secondary_writes < 0)
1073 		panic("vn_finished_secondary_write: neg cnt");
1074 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1075 	    mp->mnt_secondary_writes <= 0)
1076 		wakeup(&mp->mnt_secondary_writes);
1077 	MNT_IUNLOCK(mp);
1078 }
1079 
1080 
1081 
1082 /*
1083  * Request a filesystem to suspend write operations.
1084  */
1085 int
1086 vfs_write_suspend(mp)
1087 	struct mount *mp;
1088 {
1089 	struct thread *td = curthread;
1090 	int error;
1091 
1092 	MNT_ILOCK(mp);
1093 	if (mp->mnt_kern_flag & MNTK_SUSPEND) {
1094 		MNT_IUNLOCK(mp);
1095 		return (0);
1096 	}
1097 	mp->mnt_kern_flag |= MNTK_SUSPEND;
1098 	if (mp->mnt_writeopcount > 0)
1099 		(void) msleep(&mp->mnt_writeopcount,
1100 		    MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
1101 	else
1102 		MNT_IUNLOCK(mp);
1103 	if ((error = VFS_SYNC(mp, MNT_SUSPEND, td)) != 0)
1104 		vfs_write_resume(mp);
1105 	return (error);
1106 }
1107 
1108 /*
1109  * Request a filesystem to resume write operations.
1110  */
1111 void
1112 vfs_write_resume(mp)
1113 	struct mount *mp;
1114 {
1115 
1116 	MNT_ILOCK(mp);
1117 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1118 		mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
1119 				       MNTK_SUSPENDED);
1120 		wakeup(&mp->mnt_writeopcount);
1121 		wakeup(&mp->mnt_flag);
1122 	}
1123 	MNT_IUNLOCK(mp);
1124 }
1125 
1126 /*
1127  * Implement kqueues for files by translating it to vnode operation.
1128  */
1129 static int
1130 vn_kqfilter(struct file *fp, struct knote *kn)
1131 {
1132 	int vfslocked;
1133 	int error;
1134 
1135 	vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
1136 	error = VOP_KQFILTER(fp->f_vnode, kn);
1137 	VFS_UNLOCK_GIANT(vfslocked);
1138 
1139 	return error;
1140 }
1141 
1142 /*
1143  * Simplified in-kernel wrapper calls for extended attribute access.
1144  * Both calls pass in a NULL credential, authorizing as "kernel" access.
1145  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1146  */
1147 int
1148 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1149     const char *attrname, int *buflen, char *buf, struct thread *td)
1150 {
1151 	struct uio	auio;
1152 	struct iovec	iov;
1153 	int	error;
1154 
1155 	iov.iov_len = *buflen;
1156 	iov.iov_base = buf;
1157 
1158 	auio.uio_iov = &iov;
1159 	auio.uio_iovcnt = 1;
1160 	auio.uio_rw = UIO_READ;
1161 	auio.uio_segflg = UIO_SYSSPACE;
1162 	auio.uio_td = td;
1163 	auio.uio_offset = 0;
1164 	auio.uio_resid = *buflen;
1165 
1166 	if ((ioflg & IO_NODELOCKED) == 0)
1167 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1168 
1169 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1170 
1171 	/* authorize attribute retrieval as kernel */
1172 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1173 	    td);
1174 
1175 	if ((ioflg & IO_NODELOCKED) == 0)
1176 		VOP_UNLOCK(vp, 0, td);
1177 
1178 	if (error == 0) {
1179 		*buflen = *buflen - auio.uio_resid;
1180 	}
1181 
1182 	return (error);
1183 }
1184 
1185 /*
1186  * XXX failure mode if partially written?
1187  */
1188 int
1189 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1190     const char *attrname, int buflen, char *buf, struct thread *td)
1191 {
1192 	struct uio	auio;
1193 	struct iovec	iov;
1194 	struct mount	*mp;
1195 	int	error;
1196 
1197 	iov.iov_len = buflen;
1198 	iov.iov_base = buf;
1199 
1200 	auio.uio_iov = &iov;
1201 	auio.uio_iovcnt = 1;
1202 	auio.uio_rw = UIO_WRITE;
1203 	auio.uio_segflg = UIO_SYSSPACE;
1204 	auio.uio_td = td;
1205 	auio.uio_offset = 0;
1206 	auio.uio_resid = buflen;
1207 
1208 	if ((ioflg & IO_NODELOCKED) == 0) {
1209 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1210 			return (error);
1211 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1212 	}
1213 
1214 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1215 
1216 	/* authorize attribute setting as kernel */
1217 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1218 
1219 	if ((ioflg & IO_NODELOCKED) == 0) {
1220 		vn_finished_write(mp);
1221 		VOP_UNLOCK(vp, 0, td);
1222 	}
1223 
1224 	return (error);
1225 }
1226 
1227 int
1228 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1229     const char *attrname, struct thread *td)
1230 {
1231 	struct mount	*mp;
1232 	int	error;
1233 
1234 	if ((ioflg & IO_NODELOCKED) == 0) {
1235 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1236 			return (error);
1237 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1238 	}
1239 
1240 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1241 
1242 	/* authorize attribute removal as kernel */
1243 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1244 	if (error == EOPNOTSUPP)
1245 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1246 		    NULL, td);
1247 
1248 	if ((ioflg & IO_NODELOCKED) == 0) {
1249 		vn_finished_write(mp);
1250 		VOP_UNLOCK(vp, 0, td);
1251 	}
1252 
1253 	return (error);
1254 }
1255