xref: /freebsd/sys/kern/vfs_vnops.c (revision d8b878873e7aa8df1972cc6a642804b17eb61087)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/file.h>
44 #include <sys/kdb.h>
45 #include <sys/stat.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/mount.h>
51 #include <sys/mutex.h>
52 #include <sys/namei.h>
53 #include <sys/vnode.h>
54 #include <sys/bio.h>
55 #include <sys/buf.h>
56 #include <sys/filio.h>
57 #include <sys/sx.h>
58 #include <sys/ttycom.h>
59 #include <sys/conf.h>
60 #include <sys/syslog.h>
61 #include <sys/unistd.h>
62 
63 #include <security/mac/mac_framework.h>
64 
65 static fo_rdwr_t	vn_read;
66 static fo_rdwr_t	vn_write;
67 static fo_truncate_t	vn_truncate;
68 static fo_ioctl_t	vn_ioctl;
69 static fo_poll_t	vn_poll;
70 static fo_kqfilter_t	vn_kqfilter;
71 static fo_stat_t	vn_statfile;
72 static fo_close_t	vn_closefile;
73 
74 struct 	fileops vnops = {
75 	.fo_read = vn_read,
76 	.fo_write = vn_write,
77 	.fo_truncate = vn_truncate,
78 	.fo_ioctl = vn_ioctl,
79 	.fo_poll = vn_poll,
80 	.fo_kqfilter = vn_kqfilter,
81 	.fo_stat = vn_statfile,
82 	.fo_close = vn_closefile,
83 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
84 };
85 
86 int
87 vn_open(ndp, flagp, cmode, fp)
88 	struct nameidata *ndp;
89 	int *flagp, cmode;
90 	struct file *fp;
91 {
92 	struct thread *td = ndp->ni_cnd.cn_thread;
93 
94 	return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp));
95 }
96 
97 /*
98  * Common code for vnode open operations.
99  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
100  *
101  * Note that this does NOT free nameidata for the successful case,
102  * due to the NDINIT being done elsewhere.
103  */
104 int
105 vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags,
106     struct ucred *cred, struct file *fp)
107 {
108 	struct vnode *vp;
109 	struct mount *mp;
110 	struct thread *td = ndp->ni_cnd.cn_thread;
111 	struct vattr vat;
112 	struct vattr *vap = &vat;
113 	int fmode, error;
114 	accmode_t accmode;
115 	int vfslocked, mpsafe;
116 
117 	mpsafe = ndp->ni_cnd.cn_flags & MPSAFE;
118 restart:
119 	vfslocked = 0;
120 	fmode = *flagp;
121 	if (fmode & O_CREAT) {
122 		ndp->ni_cnd.cn_nameiop = CREATE;
123 		ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF |
124 		    MPSAFE;
125 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
126 			ndp->ni_cnd.cn_flags |= FOLLOW;
127 		if (!(vn_open_flags & VN_OPEN_NOAUDIT))
128 			ndp->ni_cnd.cn_flags |= AUDITVNODE1;
129 		bwillwrite();
130 		if ((error = namei(ndp)) != 0)
131 			return (error);
132 		vfslocked = NDHASGIANT(ndp);
133 		if (!mpsafe)
134 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
135 		if (ndp->ni_vp == NULL) {
136 			VATTR_NULL(vap);
137 			vap->va_type = VREG;
138 			vap->va_mode = cmode;
139 			if (fmode & O_EXCL)
140 				vap->va_vaflags |= VA_EXCLUSIVE;
141 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
142 				NDFREE(ndp, NDF_ONLY_PNBUF);
143 				vput(ndp->ni_dvp);
144 				VFS_UNLOCK_GIANT(vfslocked);
145 				if ((error = vn_start_write(NULL, &mp,
146 				    V_XSLEEP | PCATCH)) != 0)
147 					return (error);
148 				goto restart;
149 			}
150 #ifdef MAC
151 			error = mac_vnode_check_create(cred, ndp->ni_dvp,
152 			    &ndp->ni_cnd, vap);
153 			if (error == 0)
154 #endif
155 				error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
156 						   &ndp->ni_cnd, vap);
157 			vput(ndp->ni_dvp);
158 			vn_finished_write(mp);
159 			if (error) {
160 				VFS_UNLOCK_GIANT(vfslocked);
161 				NDFREE(ndp, NDF_ONLY_PNBUF);
162 				return (error);
163 			}
164 			fmode &= ~O_TRUNC;
165 			vp = ndp->ni_vp;
166 		} else {
167 			if (ndp->ni_dvp == ndp->ni_vp)
168 				vrele(ndp->ni_dvp);
169 			else
170 				vput(ndp->ni_dvp);
171 			ndp->ni_dvp = NULL;
172 			vp = ndp->ni_vp;
173 			if (fmode & O_EXCL) {
174 				error = EEXIST;
175 				goto bad;
176 			}
177 			fmode &= ~O_CREAT;
178 		}
179 	} else {
180 		ndp->ni_cnd.cn_nameiop = LOOKUP;
181 		ndp->ni_cnd.cn_flags = ISOPEN |
182 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
183 		    LOCKLEAF | MPSAFE;
184 		if (!(fmode & FWRITE))
185 			ndp->ni_cnd.cn_flags |= LOCKSHARED;
186 		if (!(vn_open_flags & VN_OPEN_NOAUDIT))
187 			ndp->ni_cnd.cn_flags |= AUDITVNODE1;
188 		if ((error = namei(ndp)) != 0)
189 			return (error);
190 		if (!mpsafe)
191 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
192 		vfslocked = NDHASGIANT(ndp);
193 		vp = ndp->ni_vp;
194 	}
195 	if (vp->v_type == VLNK) {
196 		error = EMLINK;
197 		goto bad;
198 	}
199 	if (vp->v_type == VSOCK) {
200 		error = EOPNOTSUPP;
201 		goto bad;
202 	}
203 	if (vp->v_type != VDIR && fmode & O_DIRECTORY) {
204 		error = ENOTDIR;
205 		goto bad;
206 	}
207 	accmode = 0;
208 	if (fmode & (FWRITE | O_TRUNC)) {
209 		if (vp->v_type == VDIR) {
210 			error = EISDIR;
211 			goto bad;
212 		}
213 		accmode |= VWRITE;
214 	}
215 	if (fmode & FREAD)
216 		accmode |= VREAD;
217 	if (fmode & FEXEC)
218 		accmode |= VEXEC;
219 	if ((fmode & O_APPEND) && (fmode & FWRITE))
220 		accmode |= VAPPEND;
221 #ifdef MAC
222 	error = mac_vnode_check_open(cred, vp, accmode);
223 	if (error)
224 		goto bad;
225 #endif
226 	if ((fmode & O_CREAT) == 0) {
227 		if (accmode & VWRITE) {
228 			error = vn_writechk(vp);
229 			if (error)
230 				goto bad;
231 		}
232 		if (accmode) {
233 		        error = VOP_ACCESS(vp, accmode, cred, td);
234 			if (error)
235 				goto bad;
236 		}
237 	}
238 	if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0)
239 		goto bad;
240 
241 	if (fmode & FWRITE)
242 		vp->v_writecount++;
243 	*flagp = fmode;
244 	ASSERT_VOP_LOCKED(vp, "vn_open_cred");
245 	if (!mpsafe)
246 		VFS_UNLOCK_GIANT(vfslocked);
247 	return (0);
248 bad:
249 	NDFREE(ndp, NDF_ONLY_PNBUF);
250 	vput(vp);
251 	VFS_UNLOCK_GIANT(vfslocked);
252 	*flagp = fmode;
253 	ndp->ni_vp = NULL;
254 	return (error);
255 }
256 
257 /*
258  * Check for write permissions on the specified vnode.
259  * Prototype text segments cannot be written.
260  */
261 int
262 vn_writechk(vp)
263 	register struct vnode *vp;
264 {
265 
266 	ASSERT_VOP_LOCKED(vp, "vn_writechk");
267 	/*
268 	 * If there's shared text associated with
269 	 * the vnode, try to free it up once.  If
270 	 * we fail, we can't allow writing.
271 	 */
272 	if (vp->v_vflag & VV_TEXT)
273 		return (ETXTBSY);
274 
275 	return (0);
276 }
277 
278 /*
279  * Vnode close call
280  */
281 int
282 vn_close(vp, flags, file_cred, td)
283 	register struct vnode *vp;
284 	int flags;
285 	struct ucred *file_cred;
286 	struct thread *td;
287 {
288 	struct mount *mp;
289 	int error, lock_flags;
290 
291 	if (!(flags & FWRITE) && vp->v_mount != NULL &&
292 	    vp->v_mount->mnt_kern_flag & MNTK_EXTENDED_SHARED)
293 		lock_flags = LK_SHARED;
294 	else
295 		lock_flags = LK_EXCLUSIVE;
296 
297 	VFS_ASSERT_GIANT(vp->v_mount);
298 
299 	vn_start_write(vp, &mp, V_WAIT);
300 	vn_lock(vp, lock_flags | LK_RETRY);
301 	if (flags & FWRITE) {
302 		VNASSERT(vp->v_writecount > 0, vp,
303 		    ("vn_close: negative writecount"));
304 		vp->v_writecount--;
305 	}
306 	error = VOP_CLOSE(vp, flags, file_cred, td);
307 	vput(vp);
308 	vn_finished_write(mp);
309 	return (error);
310 }
311 
312 /*
313  * Heuristic to detect sequential operation.
314  */
315 static int
316 sequential_heuristic(struct uio *uio, struct file *fp)
317 {
318 
319 	if (atomic_load_acq_int(&(fp->f_flag)) & FRDAHEAD)
320 		return (fp->f_seqcount << IO_SEQSHIFT);
321 
322 	/*
323 	 * Offset 0 is handled specially.  open() sets f_seqcount to 1 so
324 	 * that the first I/O is normally considered to be slightly
325 	 * sequential.  Seeking to offset 0 doesn't change sequentiality
326 	 * unless previous seeks have reduced f_seqcount to 0, in which
327 	 * case offset 0 is not special.
328 	 */
329 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
330 	    uio->uio_offset == fp->f_nextoff) {
331 		/*
332 		 * f_seqcount is in units of fixed-size blocks so that it
333 		 * depends mainly on the amount of sequential I/O and not
334 		 * much on the number of sequential I/O's.  The fixed size
335 		 * of 16384 is hard-coded here since it is (not quite) just
336 		 * a magic size that works well here.  This size is more
337 		 * closely related to the best I/O size for real disks than
338 		 * to any block size used by software.
339 		 */
340 		fp->f_seqcount += howmany(uio->uio_resid, 16384);
341 		if (fp->f_seqcount > IO_SEQMAX)
342 			fp->f_seqcount = IO_SEQMAX;
343 		return (fp->f_seqcount << IO_SEQSHIFT);
344 	}
345 
346 	/* Not sequential.  Quickly draw-down sequentiality. */
347 	if (fp->f_seqcount > 1)
348 		fp->f_seqcount = 1;
349 	else
350 		fp->f_seqcount = 0;
351 	return (0);
352 }
353 
354 /*
355  * Package up an I/O request on a vnode into a uio and do it.
356  */
357 int
358 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
359     aresid, td)
360 	enum uio_rw rw;
361 	struct vnode *vp;
362 	void *base;
363 	int len;
364 	off_t offset;
365 	enum uio_seg segflg;
366 	int ioflg;
367 	struct ucred *active_cred;
368 	struct ucred *file_cred;
369 	int *aresid;
370 	struct thread *td;
371 {
372 	struct uio auio;
373 	struct iovec aiov;
374 	struct mount *mp;
375 	struct ucred *cred;
376 	int error, lock_flags;
377 
378 	VFS_ASSERT_GIANT(vp->v_mount);
379 
380 	if ((ioflg & IO_NODELOCKED) == 0) {
381 		mp = NULL;
382 		if (rw == UIO_WRITE) {
383 			if (vp->v_type != VCHR &&
384 			    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
385 			    != 0)
386 				return (error);
387 			if (MNT_SHARED_WRITES(mp) ||
388 			    ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) {
389 				lock_flags = LK_SHARED;
390 			} else {
391 				lock_flags = LK_EXCLUSIVE;
392 			}
393 			vn_lock(vp, lock_flags | LK_RETRY);
394 		} else
395 			vn_lock(vp, LK_SHARED | LK_RETRY);
396 
397 	}
398 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
399 	auio.uio_iov = &aiov;
400 	auio.uio_iovcnt = 1;
401 	aiov.iov_base = base;
402 	aiov.iov_len = len;
403 	auio.uio_resid = len;
404 	auio.uio_offset = offset;
405 	auio.uio_segflg = segflg;
406 	auio.uio_rw = rw;
407 	auio.uio_td = td;
408 	error = 0;
409 #ifdef MAC
410 	if ((ioflg & IO_NOMACCHECK) == 0) {
411 		if (rw == UIO_READ)
412 			error = mac_vnode_check_read(active_cred, file_cred,
413 			    vp);
414 		else
415 			error = mac_vnode_check_write(active_cred, file_cred,
416 			    vp);
417 	}
418 #endif
419 	if (error == 0) {
420 		if (file_cred)
421 			cred = file_cred;
422 		else
423 			cred = active_cred;
424 		if (rw == UIO_READ)
425 			error = VOP_READ(vp, &auio, ioflg, cred);
426 		else
427 			error = VOP_WRITE(vp, &auio, ioflg, cred);
428 	}
429 	if (aresid)
430 		*aresid = auio.uio_resid;
431 	else
432 		if (auio.uio_resid && error == 0)
433 			error = EIO;
434 	if ((ioflg & IO_NODELOCKED) == 0) {
435 		if (rw == UIO_WRITE && vp->v_type != VCHR)
436 			vn_finished_write(mp);
437 		VOP_UNLOCK(vp, 0);
438 	}
439 	return (error);
440 }
441 
442 /*
443  * Package up an I/O request on a vnode into a uio and do it.  The I/O
444  * request is split up into smaller chunks and we try to avoid saturating
445  * the buffer cache while potentially holding a vnode locked, so we
446  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
447  * to give other processes a chance to lock the vnode (either other processes
448  * core'ing the same binary, or unrelated processes scanning the directory).
449  */
450 int
451 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
452     file_cred, aresid, td)
453 	enum uio_rw rw;
454 	struct vnode *vp;
455 	void *base;
456 	size_t len;
457 	off_t offset;
458 	enum uio_seg segflg;
459 	int ioflg;
460 	struct ucred *active_cred;
461 	struct ucred *file_cred;
462 	size_t *aresid;
463 	struct thread *td;
464 {
465 	int error = 0;
466 	int iaresid;
467 
468 	VFS_ASSERT_GIANT(vp->v_mount);
469 
470 	do {
471 		int chunk;
472 
473 		/*
474 		 * Force `offset' to a multiple of MAXBSIZE except possibly
475 		 * for the first chunk, so that filesystems only need to
476 		 * write full blocks except possibly for the first and last
477 		 * chunks.
478 		 */
479 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
480 
481 		if (chunk > len)
482 			chunk = len;
483 		if (rw != UIO_READ && vp->v_type == VREG)
484 			bwillwrite();
485 		iaresid = 0;
486 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
487 		    ioflg, active_cred, file_cred, &iaresid, td);
488 		len -= chunk;	/* aresid calc already includes length */
489 		if (error)
490 			break;
491 		offset += chunk;
492 		base = (char *)base + chunk;
493 		uio_yield();
494 	} while (len);
495 	if (aresid)
496 		*aresid = len + iaresid;
497 	return (error);
498 }
499 
500 /*
501  * File table vnode read routine.
502  */
503 static int
504 vn_read(fp, uio, active_cred, flags, td)
505 	struct file *fp;
506 	struct uio *uio;
507 	struct ucred *active_cred;
508 	struct thread *td;
509 	int flags;
510 {
511 	struct vnode *vp;
512 	int error, ioflag;
513 	struct mtx *mtxp;
514 	int vfslocked;
515 
516 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
517 	    uio->uio_td, td));
518 	mtxp = NULL;
519 	vp = fp->f_vnode;
520 	ioflag = 0;
521 	if (fp->f_flag & FNONBLOCK)
522 		ioflag |= IO_NDELAY;
523 	if (fp->f_flag & O_DIRECT)
524 		ioflag |= IO_DIRECT;
525 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
526 	/*
527 	 * According to McKusick the vn lock was protecting f_offset here.
528 	 * It is now protected by the FOFFSET_LOCKED flag.
529 	 */
530 	if ((flags & FOF_OFFSET) == 0) {
531 		mtxp = mtx_pool_find(mtxpool_sleep, fp);
532 		mtx_lock(mtxp);
533 		while(fp->f_vnread_flags & FOFFSET_LOCKED) {
534 			fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
535 			msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
536 			    "vnread offlock", 0);
537 		}
538 		fp->f_vnread_flags |= FOFFSET_LOCKED;
539 		mtx_unlock(mtxp);
540 		vn_lock(vp, LK_SHARED | LK_RETRY);
541 		uio->uio_offset = fp->f_offset;
542 	} else
543 		vn_lock(vp, LK_SHARED | LK_RETRY);
544 
545 	ioflag |= sequential_heuristic(uio, fp);
546 
547 #ifdef MAC
548 	error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
549 	if (error == 0)
550 #endif
551 		error = VOP_READ(vp, uio, ioflag, fp->f_cred);
552 	if ((flags & FOF_OFFSET) == 0) {
553 		fp->f_offset = uio->uio_offset;
554 		mtx_lock(mtxp);
555 		if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
556 			wakeup(&fp->f_vnread_flags);
557 		fp->f_vnread_flags = 0;
558 		mtx_unlock(mtxp);
559 	}
560 	fp->f_nextoff = uio->uio_offset;
561 	VOP_UNLOCK(vp, 0);
562 	VFS_UNLOCK_GIANT(vfslocked);
563 	return (error);
564 }
565 
566 /*
567  * File table vnode write routine.
568  */
569 static int
570 vn_write(fp, uio, active_cred, flags, td)
571 	struct file *fp;
572 	struct uio *uio;
573 	struct ucred *active_cred;
574 	struct thread *td;
575 	int flags;
576 {
577 	struct vnode *vp;
578 	struct mount *mp;
579 	int error, ioflag, lock_flags;
580 	int vfslocked;
581 
582 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
583 	    uio->uio_td, td));
584 	vp = fp->f_vnode;
585 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
586 	if (vp->v_type == VREG)
587 		bwillwrite();
588 	ioflag = IO_UNIT;
589 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
590 		ioflag |= IO_APPEND;
591 	if (fp->f_flag & FNONBLOCK)
592 		ioflag |= IO_NDELAY;
593 	if (fp->f_flag & O_DIRECT)
594 		ioflag |= IO_DIRECT;
595 	if ((fp->f_flag & O_FSYNC) ||
596 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
597 		ioflag |= IO_SYNC;
598 	mp = NULL;
599 	if (vp->v_type != VCHR &&
600 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
601 		goto unlock;
602 
603 	if ((MNT_SHARED_WRITES(mp) ||
604 	    ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) &&
605 	    (flags & FOF_OFFSET) != 0) {
606 		lock_flags = LK_SHARED;
607 	} else {
608 		lock_flags = LK_EXCLUSIVE;
609 	}
610 
611 	vn_lock(vp, lock_flags | LK_RETRY);
612 	if ((flags & FOF_OFFSET) == 0)
613 		uio->uio_offset = fp->f_offset;
614 	ioflag |= sequential_heuristic(uio, fp);
615 #ifdef MAC
616 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
617 	if (error == 0)
618 #endif
619 		error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
620 	if ((flags & FOF_OFFSET) == 0)
621 		fp->f_offset = uio->uio_offset;
622 	fp->f_nextoff = uio->uio_offset;
623 	VOP_UNLOCK(vp, 0);
624 	if (vp->v_type != VCHR)
625 		vn_finished_write(mp);
626 unlock:
627 	VFS_UNLOCK_GIANT(vfslocked);
628 	return (error);
629 }
630 
631 /*
632  * File table truncate routine.
633  */
634 static int
635 vn_truncate(fp, length, active_cred, td)
636 	struct file *fp;
637 	off_t length;
638 	struct ucred *active_cred;
639 	struct thread *td;
640 {
641 	struct vattr vattr;
642 	struct mount *mp;
643 	struct vnode *vp;
644 	int vfslocked;
645 	int error;
646 
647 	vp = fp->f_vnode;
648 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
649 	error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
650 	if (error) {
651 		VFS_UNLOCK_GIANT(vfslocked);
652 		return (error);
653 	}
654 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
655 	if (vp->v_type == VDIR) {
656 		error = EISDIR;
657 		goto out;
658 	}
659 #ifdef MAC
660 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
661 	if (error)
662 		goto out;
663 #endif
664 	error = vn_writechk(vp);
665 	if (error == 0) {
666 		VATTR_NULL(&vattr);
667 		vattr.va_size = length;
668 		error = VOP_SETATTR(vp, &vattr, fp->f_cred);
669 	}
670 out:
671 	VOP_UNLOCK(vp, 0);
672 	vn_finished_write(mp);
673 	VFS_UNLOCK_GIANT(vfslocked);
674 	return (error);
675 }
676 
677 /*
678  * File table vnode stat routine.
679  */
680 static int
681 vn_statfile(fp, sb, active_cred, td)
682 	struct file *fp;
683 	struct stat *sb;
684 	struct ucred *active_cred;
685 	struct thread *td;
686 {
687 	struct vnode *vp = fp->f_vnode;
688 	int vfslocked;
689 	int error;
690 
691 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
692 	vn_lock(vp, LK_SHARED | LK_RETRY);
693 	error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
694 	VOP_UNLOCK(vp, 0);
695 	VFS_UNLOCK_GIANT(vfslocked);
696 
697 	return (error);
698 }
699 
700 /*
701  * Stat a vnode; implementation for the stat syscall
702  */
703 int
704 vn_stat(vp, sb, active_cred, file_cred, td)
705 	struct vnode *vp;
706 	register struct stat *sb;
707 	struct ucred *active_cred;
708 	struct ucred *file_cred;
709 	struct thread *td;
710 {
711 	struct vattr vattr;
712 	register struct vattr *vap;
713 	int error;
714 	u_short mode;
715 
716 #ifdef MAC
717 	error = mac_vnode_check_stat(active_cred, file_cred, vp);
718 	if (error)
719 		return (error);
720 #endif
721 
722 	vap = &vattr;
723 
724 	/*
725 	 * Initialize defaults for new and unusual fields, so that file
726 	 * systems which don't support these fields don't need to know
727 	 * about them.
728 	 */
729 	vap->va_birthtime.tv_sec = -1;
730 	vap->va_birthtime.tv_nsec = 0;
731 	vap->va_fsid = VNOVAL;
732 	vap->va_rdev = NODEV;
733 
734 	error = VOP_GETATTR(vp, vap, active_cred);
735 	if (error)
736 		return (error);
737 
738 	/*
739 	 * Zero the spare stat fields
740 	 */
741 	bzero(sb, sizeof *sb);
742 
743 	/*
744 	 * Copy from vattr table
745 	 */
746 	if (vap->va_fsid != VNOVAL)
747 		sb->st_dev = vap->va_fsid;
748 	else
749 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
750 	sb->st_ino = vap->va_fileid;
751 	mode = vap->va_mode;
752 	switch (vap->va_type) {
753 	case VREG:
754 		mode |= S_IFREG;
755 		break;
756 	case VDIR:
757 		mode |= S_IFDIR;
758 		break;
759 	case VBLK:
760 		mode |= S_IFBLK;
761 		break;
762 	case VCHR:
763 		mode |= S_IFCHR;
764 		break;
765 	case VLNK:
766 		mode |= S_IFLNK;
767 		break;
768 	case VSOCK:
769 		mode |= S_IFSOCK;
770 		break;
771 	case VFIFO:
772 		mode |= S_IFIFO;
773 		break;
774 	default:
775 		return (EBADF);
776 	};
777 	sb->st_mode = mode;
778 	sb->st_nlink = vap->va_nlink;
779 	sb->st_uid = vap->va_uid;
780 	sb->st_gid = vap->va_gid;
781 	sb->st_rdev = vap->va_rdev;
782 	if (vap->va_size > OFF_MAX)
783 		return (EOVERFLOW);
784 	sb->st_size = vap->va_size;
785 	sb->st_atim = vap->va_atime;
786 	sb->st_mtim = vap->va_mtime;
787 	sb->st_ctim = vap->va_ctime;
788 	sb->st_birthtim = vap->va_birthtime;
789 
790         /*
791 	 * According to www.opengroup.org, the meaning of st_blksize is
792 	 *   "a filesystem-specific preferred I/O block size for this
793 	 *    object.  In some filesystem types, this may vary from file
794 	 *    to file"
795 	 * Use miminum/default of PAGE_SIZE (e.g. for VCHR).
796 	 */
797 
798 	sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize);
799 
800 	sb->st_flags = vap->va_flags;
801 	if (priv_check(td, PRIV_VFS_GENERATION))
802 		sb->st_gen = 0;
803 	else
804 		sb->st_gen = vap->va_gen;
805 
806 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
807 	return (0);
808 }
809 
810 /*
811  * File table vnode ioctl routine.
812  */
813 static int
814 vn_ioctl(fp, com, data, active_cred, td)
815 	struct file *fp;
816 	u_long com;
817 	void *data;
818 	struct ucred *active_cred;
819 	struct thread *td;
820 {
821 	struct vnode *vp = fp->f_vnode;
822 	struct vattr vattr;
823 	int vfslocked;
824 	int error;
825 
826 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
827 	error = ENOTTY;
828 	switch (vp->v_type) {
829 	case VREG:
830 	case VDIR:
831 		if (com == FIONREAD) {
832 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
833 			error = VOP_GETATTR(vp, &vattr, active_cred);
834 			VOP_UNLOCK(vp, 0);
835 			if (!error)
836 				*(int *)data = vattr.va_size - fp->f_offset;
837 		}
838 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
839 			error = 0;
840 		else
841 			error = VOP_IOCTL(vp, com, data, fp->f_flag,
842 			    active_cred, td);
843 		break;
844 
845 	default:
846 		break;
847 	}
848 	VFS_UNLOCK_GIANT(vfslocked);
849 	return (error);
850 }
851 
852 /*
853  * File table vnode poll routine.
854  */
855 static int
856 vn_poll(fp, events, active_cred, td)
857 	struct file *fp;
858 	int events;
859 	struct ucred *active_cred;
860 	struct thread *td;
861 {
862 	struct vnode *vp;
863 	int vfslocked;
864 	int error;
865 
866 	vp = fp->f_vnode;
867 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
868 #ifdef MAC
869 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
870 	error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
871 	VOP_UNLOCK(vp, 0);
872 	if (!error)
873 #endif
874 
875 	error = VOP_POLL(vp, events, fp->f_cred, td);
876 	VFS_UNLOCK_GIANT(vfslocked);
877 	return (error);
878 }
879 
880 /*
881  * Acquire the requested lock and then check for validity.  LK_RETRY
882  * permits vn_lock to return doomed vnodes.
883  */
884 int
885 _vn_lock(struct vnode *vp, int flags, char *file, int line)
886 {
887 	int error;
888 
889 	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
890 	    ("vn_lock called with no locktype."));
891 	do {
892 #ifdef DEBUG_VFS_LOCKS
893 		KASSERT(vp->v_holdcnt != 0,
894 		    ("vn_lock %p: zero hold count", vp));
895 #endif
896 		error = VOP_LOCK1(vp, flags, file, line);
897 		flags &= ~LK_INTERLOCK;	/* Interlock is always dropped. */
898 		KASSERT((flags & LK_RETRY) == 0 || error == 0,
899 		    ("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)",
900 		    flags, error));
901 		/*
902 		 * Callers specify LK_RETRY if they wish to get dead vnodes.
903 		 * If RETRY is not set, we return ENOENT instead.
904 		 */
905 		if (error == 0 && vp->v_iflag & VI_DOOMED &&
906 		    (flags & LK_RETRY) == 0) {
907 			VOP_UNLOCK(vp, 0);
908 			error = ENOENT;
909 			break;
910 		}
911 	} while (flags & LK_RETRY && error != 0);
912 	return (error);
913 }
914 
915 /*
916  * File table vnode close routine.
917  */
918 static int
919 vn_closefile(fp, td)
920 	struct file *fp;
921 	struct thread *td;
922 {
923 	struct vnode *vp;
924 	struct flock lf;
925 	int vfslocked;
926 	int error;
927 
928 	vp = fp->f_vnode;
929 
930 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
931 	if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
932 		lf.l_whence = SEEK_SET;
933 		lf.l_start = 0;
934 		lf.l_len = 0;
935 		lf.l_type = F_UNLCK;
936 		(void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
937 	}
938 
939 	fp->f_ops = &badfileops;
940 
941 	error = vn_close(vp, fp->f_flag, fp->f_cred, td);
942 	VFS_UNLOCK_GIANT(vfslocked);
943 	return (error);
944 }
945 
946 /*
947  * Preparing to start a filesystem write operation. If the operation is
948  * permitted, then we bump the count of operations in progress and
949  * proceed. If a suspend request is in progress, we wait until the
950  * suspension is over, and then proceed.
951  */
952 int
953 vn_start_write(vp, mpp, flags)
954 	struct vnode *vp;
955 	struct mount **mpp;
956 	int flags;
957 {
958 	struct mount *mp;
959 	int error;
960 
961 	error = 0;
962 	/*
963 	 * If a vnode is provided, get and return the mount point that
964 	 * to which it will write.
965 	 */
966 	if (vp != NULL) {
967 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
968 			*mpp = NULL;
969 			if (error != EOPNOTSUPP)
970 				return (error);
971 			return (0);
972 		}
973 	}
974 	if ((mp = *mpp) == NULL)
975 		return (0);
976 
977 	/*
978 	 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
979 	 * a vfs_ref().
980 	 * As long as a vnode is not provided we need to acquire a
981 	 * refcount for the provided mountpoint too, in order to
982 	 * emulate a vfs_ref().
983 	 */
984 	MNT_ILOCK(mp);
985 	if (vp == NULL)
986 		MNT_REF(mp);
987 
988 	/*
989 	 * Check on status of suspension.
990 	 */
991 	if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
992 	    mp->mnt_susp_owner != curthread) {
993 		while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
994 			if (flags & V_NOWAIT) {
995 				error = EWOULDBLOCK;
996 				goto unlock;
997 			}
998 			error = msleep(&mp->mnt_flag, MNT_MTX(mp),
999 			    (PUSER - 1) | (flags & PCATCH), "suspfs", 0);
1000 			if (error)
1001 				goto unlock;
1002 		}
1003 	}
1004 	if (flags & V_XSLEEP)
1005 		goto unlock;
1006 	mp->mnt_writeopcount++;
1007 unlock:
1008 	if (error != 0 || (flags & V_XSLEEP) != 0)
1009 		MNT_REL(mp);
1010 	MNT_IUNLOCK(mp);
1011 	return (error);
1012 }
1013 
1014 /*
1015  * Secondary suspension. Used by operations such as vop_inactive
1016  * routines that are needed by the higher level functions. These
1017  * are allowed to proceed until all the higher level functions have
1018  * completed (indicated by mnt_writeopcount dropping to zero). At that
1019  * time, these operations are halted until the suspension is over.
1020  */
1021 int
1022 vn_start_secondary_write(vp, mpp, flags)
1023 	struct vnode *vp;
1024 	struct mount **mpp;
1025 	int flags;
1026 {
1027 	struct mount *mp;
1028 	int error;
1029 
1030  retry:
1031 	if (vp != NULL) {
1032 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1033 			*mpp = NULL;
1034 			if (error != EOPNOTSUPP)
1035 				return (error);
1036 			return (0);
1037 		}
1038 	}
1039 	/*
1040 	 * If we are not suspended or have not yet reached suspended
1041 	 * mode, then let the operation proceed.
1042 	 */
1043 	if ((mp = *mpp) == NULL)
1044 		return (0);
1045 
1046 	/*
1047 	 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1048 	 * a vfs_ref().
1049 	 * As long as a vnode is not provided we need to acquire a
1050 	 * refcount for the provided mountpoint too, in order to
1051 	 * emulate a vfs_ref().
1052 	 */
1053 	MNT_ILOCK(mp);
1054 	if (vp == NULL)
1055 		MNT_REF(mp);
1056 	if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1057 		mp->mnt_secondary_writes++;
1058 		mp->mnt_secondary_accwrites++;
1059 		MNT_IUNLOCK(mp);
1060 		return (0);
1061 	}
1062 	if (flags & V_NOWAIT) {
1063 		MNT_REL(mp);
1064 		MNT_IUNLOCK(mp);
1065 		return (EWOULDBLOCK);
1066 	}
1067 	/*
1068 	 * Wait for the suspension to finish.
1069 	 */
1070 	error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1071 		       (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
1072 	vfs_rel(mp);
1073 	if (error == 0)
1074 		goto retry;
1075 	return (error);
1076 }
1077 
1078 /*
1079  * Filesystem write operation has completed. If we are suspending and this
1080  * operation is the last one, notify the suspender that the suspension is
1081  * now in effect.
1082  */
1083 void
1084 vn_finished_write(mp)
1085 	struct mount *mp;
1086 {
1087 	if (mp == NULL)
1088 		return;
1089 	MNT_ILOCK(mp);
1090 	MNT_REL(mp);
1091 	mp->mnt_writeopcount--;
1092 	if (mp->mnt_writeopcount < 0)
1093 		panic("vn_finished_write: neg cnt");
1094 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1095 	    mp->mnt_writeopcount <= 0)
1096 		wakeup(&mp->mnt_writeopcount);
1097 	MNT_IUNLOCK(mp);
1098 }
1099 
1100 
1101 /*
1102  * Filesystem secondary write operation has completed. If we are
1103  * suspending and this operation is the last one, notify the suspender
1104  * that the suspension is now in effect.
1105  */
1106 void
1107 vn_finished_secondary_write(mp)
1108 	struct mount *mp;
1109 {
1110 	if (mp == NULL)
1111 		return;
1112 	MNT_ILOCK(mp);
1113 	MNT_REL(mp);
1114 	mp->mnt_secondary_writes--;
1115 	if (mp->mnt_secondary_writes < 0)
1116 		panic("vn_finished_secondary_write: neg cnt");
1117 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1118 	    mp->mnt_secondary_writes <= 0)
1119 		wakeup(&mp->mnt_secondary_writes);
1120 	MNT_IUNLOCK(mp);
1121 }
1122 
1123 
1124 
1125 /*
1126  * Request a filesystem to suspend write operations.
1127  */
1128 int
1129 vfs_write_suspend(mp)
1130 	struct mount *mp;
1131 {
1132 	int error;
1133 
1134 	MNT_ILOCK(mp);
1135 	if (mp->mnt_susp_owner == curthread) {
1136 		MNT_IUNLOCK(mp);
1137 		return (EALREADY);
1138 	}
1139 	while (mp->mnt_kern_flag & MNTK_SUSPEND)
1140 		msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0);
1141 	mp->mnt_kern_flag |= MNTK_SUSPEND;
1142 	mp->mnt_susp_owner = curthread;
1143 	if (mp->mnt_writeopcount > 0)
1144 		(void) msleep(&mp->mnt_writeopcount,
1145 		    MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
1146 	else
1147 		MNT_IUNLOCK(mp);
1148 	if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0)
1149 		vfs_write_resume(mp);
1150 	return (error);
1151 }
1152 
1153 /*
1154  * Request a filesystem to resume write operations.
1155  */
1156 void
1157 vfs_write_resume(mp)
1158 	struct mount *mp;
1159 {
1160 
1161 	MNT_ILOCK(mp);
1162 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1163 		KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner"));
1164 		mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
1165 				       MNTK_SUSPENDED);
1166 		mp->mnt_susp_owner = NULL;
1167 		wakeup(&mp->mnt_writeopcount);
1168 		wakeup(&mp->mnt_flag);
1169 		curthread->td_pflags &= ~TDP_IGNSUSP;
1170 		MNT_IUNLOCK(mp);
1171 		VFS_SUSP_CLEAN(mp);
1172 	} else
1173 		MNT_IUNLOCK(mp);
1174 }
1175 
1176 /*
1177  * Implement kqueues for files by translating it to vnode operation.
1178  */
1179 static int
1180 vn_kqfilter(struct file *fp, struct knote *kn)
1181 {
1182 	int vfslocked;
1183 	int error;
1184 
1185 	vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
1186 	error = VOP_KQFILTER(fp->f_vnode, kn);
1187 	VFS_UNLOCK_GIANT(vfslocked);
1188 
1189 	return error;
1190 }
1191 
1192 /*
1193  * Simplified in-kernel wrapper calls for extended attribute access.
1194  * Both calls pass in a NULL credential, authorizing as "kernel" access.
1195  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1196  */
1197 int
1198 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1199     const char *attrname, int *buflen, char *buf, struct thread *td)
1200 {
1201 	struct uio	auio;
1202 	struct iovec	iov;
1203 	int	error;
1204 
1205 	iov.iov_len = *buflen;
1206 	iov.iov_base = buf;
1207 
1208 	auio.uio_iov = &iov;
1209 	auio.uio_iovcnt = 1;
1210 	auio.uio_rw = UIO_READ;
1211 	auio.uio_segflg = UIO_SYSSPACE;
1212 	auio.uio_td = td;
1213 	auio.uio_offset = 0;
1214 	auio.uio_resid = *buflen;
1215 
1216 	if ((ioflg & IO_NODELOCKED) == 0)
1217 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1218 
1219 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1220 
1221 	/* authorize attribute retrieval as kernel */
1222 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1223 	    td);
1224 
1225 	if ((ioflg & IO_NODELOCKED) == 0)
1226 		VOP_UNLOCK(vp, 0);
1227 
1228 	if (error == 0) {
1229 		*buflen = *buflen - auio.uio_resid;
1230 	}
1231 
1232 	return (error);
1233 }
1234 
1235 /*
1236  * XXX failure mode if partially written?
1237  */
1238 int
1239 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1240     const char *attrname, int buflen, char *buf, struct thread *td)
1241 {
1242 	struct uio	auio;
1243 	struct iovec	iov;
1244 	struct mount	*mp;
1245 	int	error;
1246 
1247 	iov.iov_len = buflen;
1248 	iov.iov_base = buf;
1249 
1250 	auio.uio_iov = &iov;
1251 	auio.uio_iovcnt = 1;
1252 	auio.uio_rw = UIO_WRITE;
1253 	auio.uio_segflg = UIO_SYSSPACE;
1254 	auio.uio_td = td;
1255 	auio.uio_offset = 0;
1256 	auio.uio_resid = buflen;
1257 
1258 	if ((ioflg & IO_NODELOCKED) == 0) {
1259 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1260 			return (error);
1261 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1262 	}
1263 
1264 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1265 
1266 	/* authorize attribute setting as kernel */
1267 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1268 
1269 	if ((ioflg & IO_NODELOCKED) == 0) {
1270 		vn_finished_write(mp);
1271 		VOP_UNLOCK(vp, 0);
1272 	}
1273 
1274 	return (error);
1275 }
1276 
1277 int
1278 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1279     const char *attrname, struct thread *td)
1280 {
1281 	struct mount	*mp;
1282 	int	error;
1283 
1284 	if ((ioflg & IO_NODELOCKED) == 0) {
1285 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1286 			return (error);
1287 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1288 	}
1289 
1290 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1291 
1292 	/* authorize attribute removal as kernel */
1293 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1294 	if (error == EOPNOTSUPP)
1295 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1296 		    NULL, td);
1297 
1298 	if ((ioflg & IO_NODELOCKED) == 0) {
1299 		vn_finished_write(mp);
1300 		VOP_UNLOCK(vp, 0);
1301 	}
1302 
1303 	return (error);
1304 }
1305 
1306 int
1307 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp)
1308 {
1309 	struct mount *mp;
1310 	int ltype, error;
1311 
1312 	mp = vp->v_mount;
1313 	ltype = VOP_ISLOCKED(vp);
1314 	KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED,
1315 	    ("vn_vget_ino: vp not locked"));
1316 	error = vfs_busy(mp, MBF_NOWAIT);
1317 	if (error != 0) {
1318 		vfs_ref(mp);
1319 		VOP_UNLOCK(vp, 0);
1320 		error = vfs_busy(mp, 0);
1321 		vn_lock(vp, ltype | LK_RETRY);
1322 		vfs_rel(mp);
1323 		if (error != 0)
1324 			return (ENOENT);
1325 		if (vp->v_iflag & VI_DOOMED) {
1326 			vfs_unbusy(mp);
1327 			return (ENOENT);
1328 		}
1329 	}
1330 	VOP_UNLOCK(vp, 0);
1331 	error = VFS_VGET(mp, ino, lkflags, rvp);
1332 	vfs_unbusy(mp);
1333 	vn_lock(vp, ltype | LK_RETRY);
1334 	if (vp->v_iflag & VI_DOOMED) {
1335 		if (error == 0)
1336 			vput(*rvp);
1337 		error = ENOENT;
1338 	}
1339 	return (error);
1340 }
1341