xref: /freebsd/sys/kern/vfs_vnops.c (revision dc60165b73e4c4d829a2cb9fed5cce585e93d9a9)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_mac.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/kdb.h>
47 #include <sys/stat.h>
48 #include <sys/priv.h>
49 #include <sys/proc.h>
50 #include <sys/limits.h>
51 #include <sys/lock.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/bio.h>
57 #include <sys/buf.h>
58 #include <sys/filio.h>
59 #include <sys/sx.h>
60 #include <sys/ttycom.h>
61 #include <sys/conf.h>
62 #include <sys/syslog.h>
63 #include <sys/unistd.h>
64 
65 #include <security/mac/mac_framework.h>
66 
67 static fo_rdwr_t	vn_read;
68 static fo_rdwr_t	vn_write;
69 static fo_truncate_t	vn_truncate;
70 static fo_ioctl_t	vn_ioctl;
71 static fo_poll_t	vn_poll;
72 static fo_kqfilter_t	vn_kqfilter;
73 static fo_stat_t	vn_statfile;
74 static fo_close_t	vn_closefile;
75 
76 struct 	fileops vnops = {
77 	.fo_read = vn_read,
78 	.fo_write = vn_write,
79 	.fo_truncate = vn_truncate,
80 	.fo_ioctl = vn_ioctl,
81 	.fo_poll = vn_poll,
82 	.fo_kqfilter = vn_kqfilter,
83 	.fo_stat = vn_statfile,
84 	.fo_close = vn_closefile,
85 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
86 };
87 
88 int
89 vn_open(ndp, flagp, cmode, fp)
90 	struct nameidata *ndp;
91 	int *flagp, cmode;
92 	struct file *fp;
93 {
94 	struct thread *td = ndp->ni_cnd.cn_thread;
95 
96 	return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fp));
97 }
98 
99 /*
100  * Common code for vnode open operations.
101  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
102  *
103  * Note that this does NOT free nameidata for the successful case,
104  * due to the NDINIT being done elsewhere.
105  */
106 int
107 vn_open_cred(ndp, flagp, cmode, cred, fp)
108 	struct nameidata *ndp;
109 	int *flagp, cmode;
110 	struct ucred *cred;
111 	struct file *fp;
112 {
113 	struct vnode *vp;
114 	struct mount *mp;
115 	struct thread *td = ndp->ni_cnd.cn_thread;
116 	struct vattr vat;
117 	struct vattr *vap = &vat;
118 	int fmode, error;
119 	accmode_t accmode;
120 	int vfslocked, mpsafe;
121 
122 	mpsafe = ndp->ni_cnd.cn_flags & MPSAFE;
123 restart:
124 	vfslocked = 0;
125 	fmode = *flagp;
126 	if (fmode & O_CREAT) {
127 		ndp->ni_cnd.cn_nameiop = CREATE;
128 		ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF |
129 		    MPSAFE | AUDITVNODE1;
130 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
131 			ndp->ni_cnd.cn_flags |= FOLLOW;
132 		bwillwrite();
133 		if ((error = namei(ndp)) != 0)
134 			return (error);
135 		vfslocked = NDHASGIANT(ndp);
136 		if (!mpsafe)
137 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
138 		if (ndp->ni_vp == NULL) {
139 			VATTR_NULL(vap);
140 			vap->va_type = VREG;
141 			vap->va_mode = cmode;
142 			if (fmode & O_EXCL)
143 				vap->va_vaflags |= VA_EXCLUSIVE;
144 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
145 				NDFREE(ndp, NDF_ONLY_PNBUF);
146 				vput(ndp->ni_dvp);
147 				VFS_UNLOCK_GIANT(vfslocked);
148 				if ((error = vn_start_write(NULL, &mp,
149 				    V_XSLEEP | PCATCH)) != 0)
150 					return (error);
151 				goto restart;
152 			}
153 #ifdef MAC
154 			error = mac_vnode_check_create(cred, ndp->ni_dvp,
155 			    &ndp->ni_cnd, vap);
156 			if (error == 0) {
157 #endif
158 				VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
159 				error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
160 						   &ndp->ni_cnd, vap);
161 #ifdef MAC
162 			}
163 #endif
164 			vput(ndp->ni_dvp);
165 			vn_finished_write(mp);
166 			if (error) {
167 				VFS_UNLOCK_GIANT(vfslocked);
168 				NDFREE(ndp, NDF_ONLY_PNBUF);
169 				return (error);
170 			}
171 			fmode &= ~O_TRUNC;
172 			vp = ndp->ni_vp;
173 		} else {
174 			if (ndp->ni_dvp == ndp->ni_vp)
175 				vrele(ndp->ni_dvp);
176 			else
177 				vput(ndp->ni_dvp);
178 			ndp->ni_dvp = NULL;
179 			vp = ndp->ni_vp;
180 			if (fmode & O_EXCL) {
181 				error = EEXIST;
182 				goto bad;
183 			}
184 			fmode &= ~O_CREAT;
185 		}
186 	} else {
187 		ndp->ni_cnd.cn_nameiop = LOOKUP;
188 		ndp->ni_cnd.cn_flags = ISOPEN |
189 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
190 		    LOCKLEAF | MPSAFE | AUDITVNODE1;
191 		if (!(fmode & FWRITE))
192 			ndp->ni_cnd.cn_flags |= LOCKSHARED;
193 		if ((error = namei(ndp)) != 0)
194 			return (error);
195 		if (!mpsafe)
196 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
197 		vfslocked = NDHASGIANT(ndp);
198 		vp = ndp->ni_vp;
199 	}
200 	if (vp->v_type == VLNK) {
201 		error = EMLINK;
202 		goto bad;
203 	}
204 	if (vp->v_type == VSOCK) {
205 		error = EOPNOTSUPP;
206 		goto bad;
207 	}
208 	accmode = 0;
209 	if (fmode & (FWRITE | O_TRUNC)) {
210 		if (vp->v_type == VDIR) {
211 			error = EISDIR;
212 			goto bad;
213 		}
214 		accmode |= VWRITE;
215 	}
216 	if (fmode & FREAD)
217 		accmode |= VREAD;
218 	if (fmode & FEXEC)
219 		accmode |= VEXEC;
220 	if (fmode & O_APPEND)
221 		accmode |= VAPPEND;
222 #ifdef MAC
223 	error = mac_vnode_check_open(cred, vp, accmode);
224 	if (error)
225 		goto bad;
226 #endif
227 	if ((fmode & O_CREAT) == 0) {
228 		if (accmode & VWRITE) {
229 			error = vn_writechk(vp);
230 			if (error)
231 				goto bad;
232 		}
233 		if (accmode) {
234 		        error = VOP_ACCESS(vp, accmode, cred, td);
235 			if (error)
236 				goto bad;
237 		}
238 	}
239 	if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0)
240 		goto bad;
241 
242 	if (fmode & FWRITE)
243 		vp->v_writecount++;
244 	*flagp = fmode;
245 	ASSERT_VOP_LOCKED(vp, "vn_open_cred");
246 	if (!mpsafe)
247 		VFS_UNLOCK_GIANT(vfslocked);
248 	return (0);
249 bad:
250 	NDFREE(ndp, NDF_ONLY_PNBUF);
251 	vput(vp);
252 	VFS_UNLOCK_GIANT(vfslocked);
253 	*flagp = fmode;
254 	ndp->ni_vp = NULL;
255 	return (error);
256 }
257 
258 /*
259  * Check for write permissions on the specified vnode.
260  * Prototype text segments cannot be written.
261  */
262 int
263 vn_writechk(vp)
264 	register struct vnode *vp;
265 {
266 
267 	ASSERT_VOP_LOCKED(vp, "vn_writechk");
268 	/*
269 	 * If there's shared text associated with
270 	 * the vnode, try to free it up once.  If
271 	 * we fail, we can't allow writing.
272 	 */
273 	if (vp->v_vflag & VV_TEXT)
274 		return (ETXTBSY);
275 
276 	return (0);
277 }
278 
279 /*
280  * Vnode close call
281  */
282 int
283 vn_close(vp, flags, file_cred, td)
284 	register struct vnode *vp;
285 	int flags;
286 	struct ucred *file_cred;
287 	struct thread *td;
288 {
289 	struct mount *mp;
290 	int error, lock_flags;
291 
292 	if (!(flags & FWRITE) && vp->v_mount != NULL &&
293 	    vp->v_mount->mnt_kern_flag & MNTK_EXTENDED_SHARED)
294 		lock_flags = LK_SHARED;
295 	else
296 		lock_flags = LK_EXCLUSIVE;
297 
298 	VFS_ASSERT_GIANT(vp->v_mount);
299 
300 	vn_start_write(vp, &mp, V_WAIT);
301 	vn_lock(vp, lock_flags | LK_RETRY);
302 	if (flags & FWRITE) {
303 		VNASSERT(vp->v_writecount > 0, vp,
304 		    ("vn_close: negative writecount"));
305 		vp->v_writecount--;
306 	}
307 	error = VOP_CLOSE(vp, flags, file_cred, td);
308 	vput(vp);
309 	vn_finished_write(mp);
310 	return (error);
311 }
312 
313 /*
314  * Heuristic to detect sequential operation.
315  */
316 static int
317 sequential_heuristic(struct uio *uio, struct file *fp)
318 {
319 
320 	/*
321 	 * Offset 0 is handled specially.  open() sets f_seqcount to 1 so
322 	 * that the first I/O is normally considered to be slightly
323 	 * sequential.  Seeking to offset 0 doesn't change sequentiality
324 	 * unless previous seeks have reduced f_seqcount to 0, in which
325 	 * case offset 0 is not special.
326 	 */
327 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
328 	    uio->uio_offset == fp->f_nextoff) {
329 		/*
330 		 * f_seqcount is in units of fixed-size blocks so that it
331 		 * depends mainly on the amount of sequential I/O and not
332 		 * much on the number of sequential I/O's.  The fixed size
333 		 * of 16384 is hard-coded here since it is (not quite) just
334 		 * a magic size that works well here.  This size is more
335 		 * closely related to the best I/O size for real disks than
336 		 * to any block size used by software.
337 		 */
338 		fp->f_seqcount += howmany(uio->uio_resid, 16384);
339 		if (fp->f_seqcount > IO_SEQMAX)
340 			fp->f_seqcount = IO_SEQMAX;
341 		return (fp->f_seqcount << IO_SEQSHIFT);
342 	}
343 
344 	/* Not sequential.  Quickly draw-down sequentiality. */
345 	if (fp->f_seqcount > 1)
346 		fp->f_seqcount = 1;
347 	else
348 		fp->f_seqcount = 0;
349 	return (0);
350 }
351 
352 /*
353  * Package up an I/O request on a vnode into a uio and do it.
354  */
355 int
356 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
357     aresid, td)
358 	enum uio_rw rw;
359 	struct vnode *vp;
360 	void *base;
361 	int len;
362 	off_t offset;
363 	enum uio_seg segflg;
364 	int ioflg;
365 	struct ucred *active_cred;
366 	struct ucred *file_cred;
367 	int *aresid;
368 	struct thread *td;
369 {
370 	struct uio auio;
371 	struct iovec aiov;
372 	struct mount *mp;
373 	struct ucred *cred;
374 	int error;
375 
376 	VFS_ASSERT_GIANT(vp->v_mount);
377 
378 	if ((ioflg & IO_NODELOCKED) == 0) {
379 		mp = NULL;
380 		if (rw == UIO_WRITE) {
381 			if (vp->v_type != VCHR &&
382 			    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
383 			    != 0)
384 				return (error);
385 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
386 		} else {
387 			/*
388 			 * XXX This should be LK_SHARED but I don't trust VFS
389 			 * enough to leave it like that until it has been
390 			 * reviewed further.
391 			 */
392 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
393 		}
394 
395 	}
396 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
397 	auio.uio_iov = &aiov;
398 	auio.uio_iovcnt = 1;
399 	aiov.iov_base = base;
400 	aiov.iov_len = len;
401 	auio.uio_resid = len;
402 	auio.uio_offset = offset;
403 	auio.uio_segflg = segflg;
404 	auio.uio_rw = rw;
405 	auio.uio_td = td;
406 	error = 0;
407 #ifdef MAC
408 	if ((ioflg & IO_NOMACCHECK) == 0) {
409 		if (rw == UIO_READ)
410 			error = mac_vnode_check_read(active_cred, file_cred,
411 			    vp);
412 		else
413 			error = mac_vnode_check_write(active_cred, file_cred,
414 			    vp);
415 	}
416 #endif
417 	if (error == 0) {
418 		if (file_cred)
419 			cred = file_cred;
420 		else
421 			cred = active_cred;
422 		if (rw == UIO_READ)
423 			error = VOP_READ(vp, &auio, ioflg, cred);
424 		else
425 			error = VOP_WRITE(vp, &auio, ioflg, cred);
426 	}
427 	if (aresid)
428 		*aresid = auio.uio_resid;
429 	else
430 		if (auio.uio_resid && error == 0)
431 			error = EIO;
432 	if ((ioflg & IO_NODELOCKED) == 0) {
433 		if (rw == UIO_WRITE && vp->v_type != VCHR)
434 			vn_finished_write(mp);
435 		VOP_UNLOCK(vp, 0);
436 	}
437 	return (error);
438 }
439 
440 /*
441  * Package up an I/O request on a vnode into a uio and do it.  The I/O
442  * request is split up into smaller chunks and we try to avoid saturating
443  * the buffer cache while potentially holding a vnode locked, so we
444  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
445  * to give other processes a chance to lock the vnode (either other processes
446  * core'ing the same binary, or unrelated processes scanning the directory).
447  */
448 int
449 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
450     file_cred, aresid, td)
451 	enum uio_rw rw;
452 	struct vnode *vp;
453 	void *base;
454 	size_t len;
455 	off_t offset;
456 	enum uio_seg segflg;
457 	int ioflg;
458 	struct ucred *active_cred;
459 	struct ucred *file_cred;
460 	size_t *aresid;
461 	struct thread *td;
462 {
463 	int error = 0;
464 	int iaresid;
465 
466 	VFS_ASSERT_GIANT(vp->v_mount);
467 
468 	do {
469 		int chunk;
470 
471 		/*
472 		 * Force `offset' to a multiple of MAXBSIZE except possibly
473 		 * for the first chunk, so that filesystems only need to
474 		 * write full blocks except possibly for the first and last
475 		 * chunks.
476 		 */
477 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
478 
479 		if (chunk > len)
480 			chunk = len;
481 		if (rw != UIO_READ && vp->v_type == VREG)
482 			bwillwrite();
483 		iaresid = 0;
484 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
485 		    ioflg, active_cred, file_cred, &iaresid, td);
486 		len -= chunk;	/* aresid calc already includes length */
487 		if (error)
488 			break;
489 		offset += chunk;
490 		base = (char *)base + chunk;
491 		uio_yield();
492 	} while (len);
493 	if (aresid)
494 		*aresid = len + iaresid;
495 	return (error);
496 }
497 
498 /*
499  * File table vnode read routine.
500  */
501 static int
502 vn_read(fp, uio, active_cred, flags, td)
503 	struct file *fp;
504 	struct uio *uio;
505 	struct ucred *active_cred;
506 	struct thread *td;
507 	int flags;
508 {
509 	struct vnode *vp;
510 	int error, ioflag;
511 	struct mtx *mtxp;
512 	int vfslocked;
513 
514 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
515 	    uio->uio_td, td));
516 	mtxp = NULL;
517 	vp = fp->f_vnode;
518 	ioflag = 0;
519 	if (fp->f_flag & FNONBLOCK)
520 		ioflag |= IO_NDELAY;
521 	if (fp->f_flag & O_DIRECT)
522 		ioflag |= IO_DIRECT;
523 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
524 	VOP_LEASE(vp, td, fp->f_cred, LEASE_READ);
525 	/*
526 	 * According to McKusick the vn lock was protecting f_offset here.
527 	 * It is now protected by the FOFFSET_LOCKED flag.
528 	 */
529 	if ((flags & FOF_OFFSET) == 0) {
530 		mtxp = mtx_pool_find(mtxpool_sleep, fp);
531 		mtx_lock(mtxp);
532 		while(fp->f_vnread_flags & FOFFSET_LOCKED) {
533 			fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
534 			msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
535 			    "vnread offlock", 0);
536 		}
537 		fp->f_vnread_flags |= FOFFSET_LOCKED;
538 		mtx_unlock(mtxp);
539 		vn_lock(vp, LK_SHARED | LK_RETRY);
540 		uio->uio_offset = fp->f_offset;
541 	} else
542 		vn_lock(vp, LK_SHARED | LK_RETRY);
543 
544 	ioflag |= sequential_heuristic(uio, fp);
545 
546 #ifdef MAC
547 	error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
548 	if (error == 0)
549 #endif
550 		error = VOP_READ(vp, uio, ioflag, fp->f_cred);
551 	if ((flags & FOF_OFFSET) == 0) {
552 		fp->f_offset = uio->uio_offset;
553 		mtx_lock(mtxp);
554 		if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
555 			wakeup(&fp->f_vnread_flags);
556 		fp->f_vnread_flags = 0;
557 		mtx_unlock(mtxp);
558 	}
559 	fp->f_nextoff = uio->uio_offset;
560 	VOP_UNLOCK(vp, 0);
561 	VFS_UNLOCK_GIANT(vfslocked);
562 	return (error);
563 }
564 
565 /*
566  * File table vnode write routine.
567  */
568 static int
569 vn_write(fp, uio, active_cred, flags, td)
570 	struct file *fp;
571 	struct uio *uio;
572 	struct ucred *active_cred;
573 	struct thread *td;
574 	int flags;
575 {
576 	struct vnode *vp;
577 	struct mount *mp;
578 	int error, ioflag;
579 	int vfslocked;
580 
581 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
582 	    uio->uio_td, td));
583 	vp = fp->f_vnode;
584 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
585 	if (vp->v_type == VREG)
586 		bwillwrite();
587 	ioflag = IO_UNIT;
588 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
589 		ioflag |= IO_APPEND;
590 	if (fp->f_flag & FNONBLOCK)
591 		ioflag |= IO_NDELAY;
592 	if (fp->f_flag & O_DIRECT)
593 		ioflag |= IO_DIRECT;
594 	if ((fp->f_flag & O_FSYNC) ||
595 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
596 		ioflag |= IO_SYNC;
597 	mp = NULL;
598 	if (vp->v_type != VCHR &&
599 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
600 		goto unlock;
601 	VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE);
602 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
603 	if ((flags & FOF_OFFSET) == 0)
604 		uio->uio_offset = fp->f_offset;
605 	ioflag |= sequential_heuristic(uio, fp);
606 #ifdef MAC
607 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
608 	if (error == 0)
609 #endif
610 		error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
611 	if ((flags & FOF_OFFSET) == 0)
612 		fp->f_offset = uio->uio_offset;
613 	fp->f_nextoff = uio->uio_offset;
614 	VOP_UNLOCK(vp, 0);
615 	if (vp->v_type != VCHR)
616 		vn_finished_write(mp);
617 unlock:
618 	VFS_UNLOCK_GIANT(vfslocked);
619 	return (error);
620 }
621 
622 /*
623  * File table truncate routine.
624  */
625 static int
626 vn_truncate(fp, length, active_cred, td)
627 	struct file *fp;
628 	off_t length;
629 	struct ucred *active_cred;
630 	struct thread *td;
631 {
632 	struct vattr vattr;
633 	struct mount *mp;
634 	struct vnode *vp;
635 	int vfslocked;
636 	int error;
637 
638 	vp = fp->f_vnode;
639 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
640 	error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
641 	if (error) {
642 		VFS_UNLOCK_GIANT(vfslocked);
643 		return (error);
644 	}
645 	VOP_LEASE(vp, td, active_cred, LEASE_WRITE);
646 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
647 	if (vp->v_type == VDIR) {
648 		error = EISDIR;
649 		goto out;
650 	}
651 #ifdef MAC
652 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
653 	if (error)
654 		goto out;
655 #endif
656 	error = vn_writechk(vp);
657 	if (error == 0) {
658 		VATTR_NULL(&vattr);
659 		vattr.va_size = length;
660 		error = VOP_SETATTR(vp, &vattr, fp->f_cred);
661 	}
662 out:
663 	VOP_UNLOCK(vp, 0);
664 	vn_finished_write(mp);
665 	VFS_UNLOCK_GIANT(vfslocked);
666 	return (error);
667 }
668 
669 /*
670  * File table vnode stat routine.
671  */
672 static int
673 vn_statfile(fp, sb, active_cred, td)
674 	struct file *fp;
675 	struct stat *sb;
676 	struct ucred *active_cred;
677 	struct thread *td;
678 {
679 	struct vnode *vp = fp->f_vnode;
680 	int vfslocked;
681 	int error;
682 
683 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
684 	vn_lock(vp, LK_SHARED | LK_RETRY);
685 	error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
686 	VOP_UNLOCK(vp, 0);
687 	VFS_UNLOCK_GIANT(vfslocked);
688 
689 	return (error);
690 }
691 
692 /*
693  * Stat a vnode; implementation for the stat syscall
694  */
695 int
696 vn_stat(vp, sb, active_cred, file_cred, td)
697 	struct vnode *vp;
698 	register struct stat *sb;
699 	struct ucred *active_cred;
700 	struct ucred *file_cred;
701 	struct thread *td;
702 {
703 	struct vattr vattr;
704 	register struct vattr *vap;
705 	int error;
706 	u_short mode;
707 
708 #ifdef MAC
709 	error = mac_vnode_check_stat(active_cred, file_cred, vp);
710 	if (error)
711 		return (error);
712 #endif
713 
714 	vap = &vattr;
715 
716 	/*
717 	 * Initialize defaults for new and unusual fields, so that file
718 	 * systems which don't support these fields don't need to know
719 	 * about them.
720 	 */
721 	vap->va_birthtime.tv_sec = -1;
722 	vap->va_birthtime.tv_nsec = 0;
723 	vap->va_fsid = VNOVAL;
724 	vap->va_rdev = NODEV;
725 
726 	error = VOP_GETATTR(vp, vap, active_cred);
727 	if (error)
728 		return (error);
729 
730 	/*
731 	 * Zero the spare stat fields
732 	 */
733 	bzero(sb, sizeof *sb);
734 
735 	/*
736 	 * Copy from vattr table
737 	 */
738 	if (vap->va_fsid != VNOVAL)
739 		sb->st_dev = vap->va_fsid;
740 	else
741 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
742 	sb->st_ino = vap->va_fileid;
743 	mode = vap->va_mode;
744 	switch (vap->va_type) {
745 	case VREG:
746 		mode |= S_IFREG;
747 		break;
748 	case VDIR:
749 		mode |= S_IFDIR;
750 		break;
751 	case VBLK:
752 		mode |= S_IFBLK;
753 		break;
754 	case VCHR:
755 		mode |= S_IFCHR;
756 		break;
757 	case VLNK:
758 		mode |= S_IFLNK;
759 		break;
760 	case VSOCK:
761 		mode |= S_IFSOCK;
762 		break;
763 	case VFIFO:
764 		mode |= S_IFIFO;
765 		break;
766 	default:
767 		return (EBADF);
768 	};
769 	sb->st_mode = mode;
770 	sb->st_nlink = vap->va_nlink;
771 	sb->st_uid = vap->va_uid;
772 	sb->st_gid = vap->va_gid;
773 	sb->st_rdev = vap->va_rdev;
774 	if (vap->va_size > OFF_MAX)
775 		return (EOVERFLOW);
776 	sb->st_size = vap->va_size;
777 	sb->st_atimespec = vap->va_atime;
778 	sb->st_mtimespec = vap->va_mtime;
779 	sb->st_ctimespec = vap->va_ctime;
780 	sb->st_birthtimespec = vap->va_birthtime;
781 
782         /*
783 	 * According to www.opengroup.org, the meaning of st_blksize is
784 	 *   "a filesystem-specific preferred I/O block size for this
785 	 *    object.  In some filesystem types, this may vary from file
786 	 *    to file"
787 	 * Default to PAGE_SIZE after much discussion.
788 	 * XXX: min(PAGE_SIZE, vp->v_bufobj.bo_bsize) may be more correct.
789 	 */
790 
791 	sb->st_blksize = PAGE_SIZE;
792 
793 	sb->st_flags = vap->va_flags;
794 	if (priv_check(td, PRIV_VFS_GENERATION))
795 		sb->st_gen = 0;
796 	else
797 		sb->st_gen = vap->va_gen;
798 
799 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
800 	return (0);
801 }
802 
803 /*
804  * File table vnode ioctl routine.
805  */
806 static int
807 vn_ioctl(fp, com, data, active_cred, td)
808 	struct file *fp;
809 	u_long com;
810 	void *data;
811 	struct ucred *active_cred;
812 	struct thread *td;
813 {
814 	struct vnode *vp = fp->f_vnode;
815 	struct vattr vattr;
816 	int vfslocked;
817 	int error;
818 
819 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
820 	error = ENOTTY;
821 	switch (vp->v_type) {
822 	case VREG:
823 	case VDIR:
824 		if (com == FIONREAD) {
825 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
826 			error = VOP_GETATTR(vp, &vattr, active_cred);
827 			VOP_UNLOCK(vp, 0);
828 			if (!error)
829 				*(int *)data = vattr.va_size - fp->f_offset;
830 		}
831 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
832 			error = 0;
833 		else
834 			error = VOP_IOCTL(vp, com, data, fp->f_flag,
835 			    active_cred, td);
836 		break;
837 
838 	default:
839 		break;
840 	}
841 	VFS_UNLOCK_GIANT(vfslocked);
842 	return (error);
843 }
844 
845 /*
846  * File table vnode poll routine.
847  */
848 static int
849 vn_poll(fp, events, active_cred, td)
850 	struct file *fp;
851 	int events;
852 	struct ucred *active_cred;
853 	struct thread *td;
854 {
855 	struct vnode *vp;
856 	int vfslocked;
857 	int error;
858 
859 	vp = fp->f_vnode;
860 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
861 #ifdef MAC
862 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
863 	error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
864 	VOP_UNLOCK(vp, 0);
865 	if (!error)
866 #endif
867 
868 	error = VOP_POLL(vp, events, fp->f_cred, td);
869 	VFS_UNLOCK_GIANT(vfslocked);
870 	return (error);
871 }
872 
873 /*
874  * Acquire the requested lock and then check for validity.  LK_RETRY
875  * permits vn_lock to return doomed vnodes.
876  */
877 int
878 _vn_lock(struct vnode *vp, int flags, char *file, int line)
879 {
880 	int error;
881 
882 	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
883 	    ("vn_lock called with no locktype."));
884 	do {
885 #ifdef DEBUG_VFS_LOCKS
886 		KASSERT(vp->v_holdcnt != 0,
887 		    ("vn_lock %p: zero hold count", vp));
888 #endif
889 		error = VOP_LOCK1(vp, flags, file, line);
890 		flags &= ~LK_INTERLOCK;	/* Interlock is always dropped. */
891 		KASSERT((flags & LK_RETRY) == 0 || error == 0,
892 		    ("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)",
893 		    flags, error));
894 		/*
895 		 * Callers specify LK_RETRY if they wish to get dead vnodes.
896 		 * If RETRY is not set, we return ENOENT instead.
897 		 */
898 		if (error == 0 && vp->v_iflag & VI_DOOMED &&
899 		    (flags & LK_RETRY) == 0) {
900 			VOP_UNLOCK(vp, 0);
901 			error = ENOENT;
902 			break;
903 		}
904 	} while (flags & LK_RETRY && error != 0);
905 	return (error);
906 }
907 
908 /*
909  * File table vnode close routine.
910  */
911 static int
912 vn_closefile(fp, td)
913 	struct file *fp;
914 	struct thread *td;
915 {
916 	struct vnode *vp;
917 	struct flock lf;
918 	int vfslocked;
919 	int error;
920 
921 	vp = fp->f_vnode;
922 
923 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
924 	if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
925 		lf.l_whence = SEEK_SET;
926 		lf.l_start = 0;
927 		lf.l_len = 0;
928 		lf.l_type = F_UNLCK;
929 		(void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
930 	}
931 
932 	fp->f_ops = &badfileops;
933 
934 	error = vn_close(vp, fp->f_flag, fp->f_cred, td);
935 	VFS_UNLOCK_GIANT(vfslocked);
936 	return (error);
937 }
938 
939 /*
940  * Preparing to start a filesystem write operation. If the operation is
941  * permitted, then we bump the count of operations in progress and
942  * proceed. If a suspend request is in progress, we wait until the
943  * suspension is over, and then proceed.
944  */
945 int
946 vn_start_write(vp, mpp, flags)
947 	struct vnode *vp;
948 	struct mount **mpp;
949 	int flags;
950 {
951 	struct mount *mp;
952 	int error;
953 
954 	error = 0;
955 	/*
956 	 * If a vnode is provided, get and return the mount point that
957 	 * to which it will write.
958 	 */
959 	if (vp != NULL) {
960 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
961 			*mpp = NULL;
962 			if (error != EOPNOTSUPP)
963 				return (error);
964 			return (0);
965 		}
966 	}
967 	if ((mp = *mpp) == NULL)
968 		return (0);
969 
970 	/*
971 	 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
972 	 * a vfs_ref().
973 	 * As long as a vnode is not provided we need to acquire a
974 	 * refcount for the provided mountpoint too, in order to
975 	 * emulate a vfs_ref().
976 	 */
977 	MNT_ILOCK(mp);
978 	if (vp == NULL)
979 		MNT_REF(mp);
980 
981 	/*
982 	 * Check on status of suspension.
983 	 */
984 	if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
985 	    mp->mnt_susp_owner != curthread) {
986 		while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
987 			if (flags & V_NOWAIT) {
988 				error = EWOULDBLOCK;
989 				goto unlock;
990 			}
991 			error = msleep(&mp->mnt_flag, MNT_MTX(mp),
992 			    (PUSER - 1) | (flags & PCATCH), "suspfs", 0);
993 			if (error)
994 				goto unlock;
995 		}
996 	}
997 	if (flags & V_XSLEEP)
998 		goto unlock;
999 	mp->mnt_writeopcount++;
1000 unlock:
1001 	MNT_REL(mp);
1002 	MNT_IUNLOCK(mp);
1003 	return (error);
1004 }
1005 
1006 /*
1007  * Secondary suspension. Used by operations such as vop_inactive
1008  * routines that are needed by the higher level functions. These
1009  * are allowed to proceed until all the higher level functions have
1010  * completed (indicated by mnt_writeopcount dropping to zero). At that
1011  * time, these operations are halted until the suspension is over.
1012  */
1013 int
1014 vn_start_secondary_write(vp, mpp, flags)
1015 	struct vnode *vp;
1016 	struct mount **mpp;
1017 	int flags;
1018 {
1019 	struct mount *mp;
1020 	int error;
1021 
1022  retry:
1023 	if (vp != NULL) {
1024 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1025 			*mpp = NULL;
1026 			if (error != EOPNOTSUPP)
1027 				return (error);
1028 			return (0);
1029 		}
1030 	}
1031 	/*
1032 	 * If we are not suspended or have not yet reached suspended
1033 	 * mode, then let the operation proceed.
1034 	 */
1035 	if ((mp = *mpp) == NULL)
1036 		return (0);
1037 
1038 	/*
1039 	 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1040 	 * a vfs_ref().
1041 	 * As long as a vnode is not provided we need to acquire a
1042 	 * refcount for the provided mountpoint too, in order to
1043 	 * emulate a vfs_ref().
1044 	 */
1045 	MNT_ILOCK(mp);
1046 	if (vp == NULL)
1047 		MNT_REF(mp);
1048 	if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1049 		mp->mnt_secondary_writes++;
1050 		mp->mnt_secondary_accwrites++;
1051 		MNT_REL(mp);
1052 		MNT_IUNLOCK(mp);
1053 		return (0);
1054 	}
1055 	if (flags & V_NOWAIT) {
1056 		MNT_REL(mp);
1057 		MNT_IUNLOCK(mp);
1058 		return (EWOULDBLOCK);
1059 	}
1060 	/*
1061 	 * Wait for the suspension to finish.
1062 	 */
1063 	error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1064 		       (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
1065 	vfs_rel(mp);
1066 	if (error == 0)
1067 		goto retry;
1068 	return (error);
1069 }
1070 
1071 /*
1072  * Filesystem write operation has completed. If we are suspending and this
1073  * operation is the last one, notify the suspender that the suspension is
1074  * now in effect.
1075  */
1076 void
1077 vn_finished_write(mp)
1078 	struct mount *mp;
1079 {
1080 	if (mp == NULL)
1081 		return;
1082 	MNT_ILOCK(mp);
1083 	mp->mnt_writeopcount--;
1084 	if (mp->mnt_writeopcount < 0)
1085 		panic("vn_finished_write: neg cnt");
1086 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1087 	    mp->mnt_writeopcount <= 0)
1088 		wakeup(&mp->mnt_writeopcount);
1089 	MNT_IUNLOCK(mp);
1090 }
1091 
1092 
1093 /*
1094  * Filesystem secondary write operation has completed. If we are
1095  * suspending and this operation is the last one, notify the suspender
1096  * that the suspension is now in effect.
1097  */
1098 void
1099 vn_finished_secondary_write(mp)
1100 	struct mount *mp;
1101 {
1102 	if (mp == NULL)
1103 		return;
1104 	MNT_ILOCK(mp);
1105 	mp->mnt_secondary_writes--;
1106 	if (mp->mnt_secondary_writes < 0)
1107 		panic("vn_finished_secondary_write: neg cnt");
1108 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1109 	    mp->mnt_secondary_writes <= 0)
1110 		wakeup(&mp->mnt_secondary_writes);
1111 	MNT_IUNLOCK(mp);
1112 }
1113 
1114 
1115 
1116 /*
1117  * Request a filesystem to suspend write operations.
1118  */
1119 int
1120 vfs_write_suspend(mp)
1121 	struct mount *mp;
1122 {
1123 	struct thread *td = curthread;
1124 	int error;
1125 
1126 	MNT_ILOCK(mp);
1127 	if (mp->mnt_susp_owner == curthread) {
1128 		MNT_IUNLOCK(mp);
1129 		return (EALREADY);
1130 	}
1131 	while (mp->mnt_kern_flag & MNTK_SUSPEND)
1132 		msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0);
1133 	mp->mnt_kern_flag |= MNTK_SUSPEND;
1134 	mp->mnt_susp_owner = curthread;
1135 	if (mp->mnt_writeopcount > 0)
1136 		(void) msleep(&mp->mnt_writeopcount,
1137 		    MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
1138 	else
1139 		MNT_IUNLOCK(mp);
1140 	if ((error = VFS_SYNC(mp, MNT_SUSPEND, td)) != 0)
1141 		vfs_write_resume(mp);
1142 	return (error);
1143 }
1144 
1145 /*
1146  * Request a filesystem to resume write operations.
1147  */
1148 void
1149 vfs_write_resume(mp)
1150 	struct mount *mp;
1151 {
1152 
1153 	MNT_ILOCK(mp);
1154 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1155 		KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner"));
1156 		mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
1157 				       MNTK_SUSPENDED);
1158 		mp->mnt_susp_owner = NULL;
1159 		wakeup(&mp->mnt_writeopcount);
1160 		wakeup(&mp->mnt_flag);
1161 		curthread->td_pflags &= ~TDP_IGNSUSP;
1162 		MNT_IUNLOCK(mp);
1163 		VFS_SUSP_CLEAN(mp);
1164 	} else
1165 		MNT_IUNLOCK(mp);
1166 }
1167 
1168 /*
1169  * Implement kqueues for files by translating it to vnode operation.
1170  */
1171 static int
1172 vn_kqfilter(struct file *fp, struct knote *kn)
1173 {
1174 	int vfslocked;
1175 	int error;
1176 
1177 	vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
1178 	error = VOP_KQFILTER(fp->f_vnode, kn);
1179 	VFS_UNLOCK_GIANT(vfslocked);
1180 
1181 	return error;
1182 }
1183 
1184 /*
1185  * Simplified in-kernel wrapper calls for extended attribute access.
1186  * Both calls pass in a NULL credential, authorizing as "kernel" access.
1187  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1188  */
1189 int
1190 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1191     const char *attrname, int *buflen, char *buf, struct thread *td)
1192 {
1193 	struct uio	auio;
1194 	struct iovec	iov;
1195 	int	error;
1196 
1197 	iov.iov_len = *buflen;
1198 	iov.iov_base = buf;
1199 
1200 	auio.uio_iov = &iov;
1201 	auio.uio_iovcnt = 1;
1202 	auio.uio_rw = UIO_READ;
1203 	auio.uio_segflg = UIO_SYSSPACE;
1204 	auio.uio_td = td;
1205 	auio.uio_offset = 0;
1206 	auio.uio_resid = *buflen;
1207 
1208 	if ((ioflg & IO_NODELOCKED) == 0)
1209 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1210 
1211 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1212 
1213 	/* authorize attribute retrieval as kernel */
1214 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1215 	    td);
1216 
1217 	if ((ioflg & IO_NODELOCKED) == 0)
1218 		VOP_UNLOCK(vp, 0);
1219 
1220 	if (error == 0) {
1221 		*buflen = *buflen - auio.uio_resid;
1222 	}
1223 
1224 	return (error);
1225 }
1226 
1227 /*
1228  * XXX failure mode if partially written?
1229  */
1230 int
1231 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1232     const char *attrname, int buflen, char *buf, struct thread *td)
1233 {
1234 	struct uio	auio;
1235 	struct iovec	iov;
1236 	struct mount	*mp;
1237 	int	error;
1238 
1239 	iov.iov_len = buflen;
1240 	iov.iov_base = buf;
1241 
1242 	auio.uio_iov = &iov;
1243 	auio.uio_iovcnt = 1;
1244 	auio.uio_rw = UIO_WRITE;
1245 	auio.uio_segflg = UIO_SYSSPACE;
1246 	auio.uio_td = td;
1247 	auio.uio_offset = 0;
1248 	auio.uio_resid = buflen;
1249 
1250 	if ((ioflg & IO_NODELOCKED) == 0) {
1251 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1252 			return (error);
1253 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1254 	}
1255 
1256 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1257 
1258 	/* authorize attribute setting as kernel */
1259 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1260 
1261 	if ((ioflg & IO_NODELOCKED) == 0) {
1262 		vn_finished_write(mp);
1263 		VOP_UNLOCK(vp, 0);
1264 	}
1265 
1266 	return (error);
1267 }
1268 
1269 int
1270 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1271     const char *attrname, struct thread *td)
1272 {
1273 	struct mount	*mp;
1274 	int	error;
1275 
1276 	if ((ioflg & IO_NODELOCKED) == 0) {
1277 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1278 			return (error);
1279 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1280 	}
1281 
1282 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1283 
1284 	/* authorize attribute removal as kernel */
1285 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1286 	if (error == EOPNOTSUPP)
1287 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1288 		    NULL, td);
1289 
1290 	if ((ioflg & IO_NODELOCKED) == 0) {
1291 		vn_finished_write(mp);
1292 		VOP_UNLOCK(vp, 0);
1293 	}
1294 
1295 	return (error);
1296 }
1297 
1298 int
1299 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp)
1300 {
1301 	struct mount *mp;
1302 	int ltype, error;
1303 
1304 	mp = vp->v_mount;
1305 	ltype = VOP_ISLOCKED(vp);
1306 	KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED,
1307 	    ("vn_vget_ino: vp not locked"));
1308 	for (;;) {
1309 		error = vfs_busy(mp, MBF_NOWAIT);
1310 		if (error == 0)
1311 			break;
1312 		VOP_UNLOCK(vp, 0);
1313 		pause("vn_vget", 1);
1314 		vn_lock(vp, ltype | LK_RETRY);
1315 		if (vp->v_iflag & VI_DOOMED)
1316 			return (ENOENT);
1317 	}
1318 	VOP_UNLOCK(vp, 0);
1319 	error = VFS_VGET(mp, ino, lkflags, rvp);
1320 	vfs_unbusy(mp);
1321 	vn_lock(vp, ltype | LK_RETRY);
1322 	if (vp->v_iflag & VI_DOOMED) {
1323 		if (error == 0)
1324 			vput(*rvp);
1325 		error = ENOENT;
1326 	}
1327 	return (error);
1328 }
1329