xref: /freebsd/sys/kern/vfs_vnops.c (revision 908e960ea6343acd9515d89d5d5696f9d8bf090c)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/file.h>
44 #include <sys/kdb.h>
45 #include <sys/stat.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/mount.h>
51 #include <sys/mutex.h>
52 #include <sys/namei.h>
53 #include <sys/vnode.h>
54 #include <sys/bio.h>
55 #include <sys/buf.h>
56 #include <sys/filio.h>
57 #include <sys/sx.h>
58 #include <sys/ttycom.h>
59 #include <sys/conf.h>
60 #include <sys/syslog.h>
61 #include <sys/unistd.h>
62 
63 #include <security/mac/mac_framework.h>
64 
65 static fo_rdwr_t	vn_read;
66 static fo_rdwr_t	vn_write;
67 static fo_truncate_t	vn_truncate;
68 static fo_ioctl_t	vn_ioctl;
69 static fo_poll_t	vn_poll;
70 static fo_kqfilter_t	vn_kqfilter;
71 static fo_stat_t	vn_statfile;
72 static fo_close_t	vn_closefile;
73 
74 struct 	fileops vnops = {
75 	.fo_read = vn_read,
76 	.fo_write = vn_write,
77 	.fo_truncate = vn_truncate,
78 	.fo_ioctl = vn_ioctl,
79 	.fo_poll = vn_poll,
80 	.fo_kqfilter = vn_kqfilter,
81 	.fo_stat = vn_statfile,
82 	.fo_close = vn_closefile,
83 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
84 };
85 
86 int
87 vn_open(ndp, flagp, cmode, fp)
88 	struct nameidata *ndp;
89 	int *flagp, cmode;
90 	struct file *fp;
91 {
92 	struct thread *td = ndp->ni_cnd.cn_thread;
93 
94 	return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fp));
95 }
96 
97 /*
98  * Common code for vnode open operations.
99  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
100  *
101  * Note that this does NOT free nameidata for the successful case,
102  * due to the NDINIT being done elsewhere.
103  */
104 int
105 vn_open_cred(ndp, flagp, cmode, cred, fp)
106 	struct nameidata *ndp;
107 	int *flagp, cmode;
108 	struct ucred *cred;
109 	struct file *fp;
110 {
111 	struct vnode *vp;
112 	struct mount *mp;
113 	struct thread *td = ndp->ni_cnd.cn_thread;
114 	struct vattr vat;
115 	struct vattr *vap = &vat;
116 	int fmode, error;
117 	accmode_t accmode;
118 	int vfslocked, mpsafe;
119 
120 	mpsafe = ndp->ni_cnd.cn_flags & MPSAFE;
121 restart:
122 	vfslocked = 0;
123 	fmode = *flagp;
124 	if (fmode & O_CREAT) {
125 		ndp->ni_cnd.cn_nameiop = CREATE;
126 		ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF |
127 		    MPSAFE | AUDITVNODE1;
128 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
129 			ndp->ni_cnd.cn_flags |= FOLLOW;
130 		bwillwrite();
131 		if ((error = namei(ndp)) != 0)
132 			return (error);
133 		vfslocked = NDHASGIANT(ndp);
134 		if (!mpsafe)
135 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
136 		if (ndp->ni_vp == NULL) {
137 			VATTR_NULL(vap);
138 			vap->va_type = VREG;
139 			vap->va_mode = cmode;
140 			if (fmode & O_EXCL)
141 				vap->va_vaflags |= VA_EXCLUSIVE;
142 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
143 				NDFREE(ndp, NDF_ONLY_PNBUF);
144 				vput(ndp->ni_dvp);
145 				VFS_UNLOCK_GIANT(vfslocked);
146 				if ((error = vn_start_write(NULL, &mp,
147 				    V_XSLEEP | PCATCH)) != 0)
148 					return (error);
149 				goto restart;
150 			}
151 #ifdef MAC
152 			error = mac_vnode_check_create(cred, ndp->ni_dvp,
153 			    &ndp->ni_cnd, vap);
154 			if (error == 0)
155 #endif
156 				error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
157 						   &ndp->ni_cnd, vap);
158 			vput(ndp->ni_dvp);
159 			vn_finished_write(mp);
160 			if (error) {
161 				VFS_UNLOCK_GIANT(vfslocked);
162 				NDFREE(ndp, NDF_ONLY_PNBUF);
163 				return (error);
164 			}
165 			fmode &= ~O_TRUNC;
166 			vp = ndp->ni_vp;
167 		} else {
168 			if (ndp->ni_dvp == ndp->ni_vp)
169 				vrele(ndp->ni_dvp);
170 			else
171 				vput(ndp->ni_dvp);
172 			ndp->ni_dvp = NULL;
173 			vp = ndp->ni_vp;
174 			if (fmode & O_EXCL) {
175 				error = EEXIST;
176 				goto bad;
177 			}
178 			fmode &= ~O_CREAT;
179 		}
180 	} else {
181 		ndp->ni_cnd.cn_nameiop = LOOKUP;
182 		ndp->ni_cnd.cn_flags = ISOPEN |
183 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
184 		    LOCKLEAF | MPSAFE | AUDITVNODE1;
185 		if (!(fmode & FWRITE))
186 			ndp->ni_cnd.cn_flags |= LOCKSHARED;
187 		if ((error = namei(ndp)) != 0)
188 			return (error);
189 		if (!mpsafe)
190 			ndp->ni_cnd.cn_flags &= ~MPSAFE;
191 		vfslocked = NDHASGIANT(ndp);
192 		vp = ndp->ni_vp;
193 	}
194 	if (vp->v_type == VLNK) {
195 		error = EMLINK;
196 		goto bad;
197 	}
198 	if (vp->v_type == VSOCK) {
199 		error = EOPNOTSUPP;
200 		goto bad;
201 	}
202 	accmode = 0;
203 	if (fmode & (FWRITE | O_TRUNC)) {
204 		if (vp->v_type == VDIR) {
205 			error = EISDIR;
206 			goto bad;
207 		}
208 		accmode |= VWRITE;
209 	}
210 	if (fmode & FREAD)
211 		accmode |= VREAD;
212 	if (fmode & FEXEC)
213 		accmode |= VEXEC;
214 	if (fmode & O_APPEND)
215 		accmode |= VAPPEND;
216 #ifdef MAC
217 	error = mac_vnode_check_open(cred, vp, accmode);
218 	if (error)
219 		goto bad;
220 #endif
221 	if ((fmode & O_CREAT) == 0) {
222 		if (accmode & VWRITE) {
223 			error = vn_writechk(vp);
224 			if (error)
225 				goto bad;
226 		}
227 		if (accmode) {
228 		        error = VOP_ACCESS(vp, accmode, cred, td);
229 			if (error)
230 				goto bad;
231 		}
232 	}
233 	if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0)
234 		goto bad;
235 
236 	if (fmode & FWRITE)
237 		vp->v_writecount++;
238 	*flagp = fmode;
239 	ASSERT_VOP_LOCKED(vp, "vn_open_cred");
240 	if (!mpsafe)
241 		VFS_UNLOCK_GIANT(vfslocked);
242 	return (0);
243 bad:
244 	NDFREE(ndp, NDF_ONLY_PNBUF);
245 	vput(vp);
246 	VFS_UNLOCK_GIANT(vfslocked);
247 	*flagp = fmode;
248 	ndp->ni_vp = NULL;
249 	return (error);
250 }
251 
252 /*
253  * Check for write permissions on the specified vnode.
254  * Prototype text segments cannot be written.
255  */
256 int
257 vn_writechk(vp)
258 	register struct vnode *vp;
259 {
260 
261 	ASSERT_VOP_LOCKED(vp, "vn_writechk");
262 	/*
263 	 * If there's shared text associated with
264 	 * the vnode, try to free it up once.  If
265 	 * we fail, we can't allow writing.
266 	 */
267 	if (vp->v_vflag & VV_TEXT)
268 		return (ETXTBSY);
269 
270 	return (0);
271 }
272 
273 /*
274  * Vnode close call
275  */
276 int
277 vn_close(vp, flags, file_cred, td)
278 	register struct vnode *vp;
279 	int flags;
280 	struct ucred *file_cred;
281 	struct thread *td;
282 {
283 	struct mount *mp;
284 	int error, lock_flags;
285 
286 	if (!(flags & FWRITE) && vp->v_mount != NULL &&
287 	    vp->v_mount->mnt_kern_flag & MNTK_EXTENDED_SHARED)
288 		lock_flags = LK_SHARED;
289 	else
290 		lock_flags = LK_EXCLUSIVE;
291 
292 	VFS_ASSERT_GIANT(vp->v_mount);
293 
294 	vn_start_write(vp, &mp, V_WAIT);
295 	vn_lock(vp, lock_flags | LK_RETRY);
296 	if (flags & FWRITE) {
297 		VNASSERT(vp->v_writecount > 0, vp,
298 		    ("vn_close: negative writecount"));
299 		vp->v_writecount--;
300 	}
301 	error = VOP_CLOSE(vp, flags, file_cred, td);
302 	vput(vp);
303 	vn_finished_write(mp);
304 	return (error);
305 }
306 
307 /*
308  * Heuristic to detect sequential operation.
309  */
310 static int
311 sequential_heuristic(struct uio *uio, struct file *fp)
312 {
313 
314 	/*
315 	 * Offset 0 is handled specially.  open() sets f_seqcount to 1 so
316 	 * that the first I/O is normally considered to be slightly
317 	 * sequential.  Seeking to offset 0 doesn't change sequentiality
318 	 * unless previous seeks have reduced f_seqcount to 0, in which
319 	 * case offset 0 is not special.
320 	 */
321 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
322 	    uio->uio_offset == fp->f_nextoff) {
323 		/*
324 		 * f_seqcount is in units of fixed-size blocks so that it
325 		 * depends mainly on the amount of sequential I/O and not
326 		 * much on the number of sequential I/O's.  The fixed size
327 		 * of 16384 is hard-coded here since it is (not quite) just
328 		 * a magic size that works well here.  This size is more
329 		 * closely related to the best I/O size for real disks than
330 		 * to any block size used by software.
331 		 */
332 		fp->f_seqcount += howmany(uio->uio_resid, 16384);
333 		if (fp->f_seqcount > IO_SEQMAX)
334 			fp->f_seqcount = IO_SEQMAX;
335 		return (fp->f_seqcount << IO_SEQSHIFT);
336 	}
337 
338 	/* Not sequential.  Quickly draw-down sequentiality. */
339 	if (fp->f_seqcount > 1)
340 		fp->f_seqcount = 1;
341 	else
342 		fp->f_seqcount = 0;
343 	return (0);
344 }
345 
346 /*
347  * Package up an I/O request on a vnode into a uio and do it.
348  */
349 int
350 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
351     aresid, td)
352 	enum uio_rw rw;
353 	struct vnode *vp;
354 	void *base;
355 	int len;
356 	off_t offset;
357 	enum uio_seg segflg;
358 	int ioflg;
359 	struct ucred *active_cred;
360 	struct ucred *file_cred;
361 	int *aresid;
362 	struct thread *td;
363 {
364 	struct uio auio;
365 	struct iovec aiov;
366 	struct mount *mp;
367 	struct ucred *cred;
368 	int error, lock_flags;
369 
370 	VFS_ASSERT_GIANT(vp->v_mount);
371 
372 	if ((ioflg & IO_NODELOCKED) == 0) {
373 		mp = NULL;
374 		if (rw == UIO_WRITE) {
375 			if (vp->v_type != VCHR &&
376 			    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
377 			    != 0)
378 				return (error);
379 			if (mp != NULL &&
380 			    (mp->mnt_kern_flag & MNTK_SHARED_WRITES)) {
381 				lock_flags = LK_SHARED;
382 			} else {
383 				lock_flags = LK_EXCLUSIVE;
384 			}
385 			vn_lock(vp, lock_flags | LK_RETRY);
386 		} else
387 			vn_lock(vp, LK_SHARED | LK_RETRY);
388 
389 	}
390 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
391 	auio.uio_iov = &aiov;
392 	auio.uio_iovcnt = 1;
393 	aiov.iov_base = base;
394 	aiov.iov_len = len;
395 	auio.uio_resid = len;
396 	auio.uio_offset = offset;
397 	auio.uio_segflg = segflg;
398 	auio.uio_rw = rw;
399 	auio.uio_td = td;
400 	error = 0;
401 #ifdef MAC
402 	if ((ioflg & IO_NOMACCHECK) == 0) {
403 		if (rw == UIO_READ)
404 			error = mac_vnode_check_read(active_cred, file_cred,
405 			    vp);
406 		else
407 			error = mac_vnode_check_write(active_cred, file_cred,
408 			    vp);
409 	}
410 #endif
411 	if (error == 0) {
412 		if (file_cred)
413 			cred = file_cred;
414 		else
415 			cred = active_cred;
416 		if (rw == UIO_READ)
417 			error = VOP_READ(vp, &auio, ioflg, cred);
418 		else
419 			error = VOP_WRITE(vp, &auio, ioflg, cred);
420 	}
421 	if (aresid)
422 		*aresid = auio.uio_resid;
423 	else
424 		if (auio.uio_resid && error == 0)
425 			error = EIO;
426 	if ((ioflg & IO_NODELOCKED) == 0) {
427 		if (rw == UIO_WRITE && vp->v_type != VCHR)
428 			vn_finished_write(mp);
429 		VOP_UNLOCK(vp, 0);
430 	}
431 	return (error);
432 }
433 
434 /*
435  * Package up an I/O request on a vnode into a uio and do it.  The I/O
436  * request is split up into smaller chunks and we try to avoid saturating
437  * the buffer cache while potentially holding a vnode locked, so we
438  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
439  * to give other processes a chance to lock the vnode (either other processes
440  * core'ing the same binary, or unrelated processes scanning the directory).
441  */
442 int
443 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
444     file_cred, aresid, td)
445 	enum uio_rw rw;
446 	struct vnode *vp;
447 	void *base;
448 	size_t len;
449 	off_t offset;
450 	enum uio_seg segflg;
451 	int ioflg;
452 	struct ucred *active_cred;
453 	struct ucred *file_cred;
454 	size_t *aresid;
455 	struct thread *td;
456 {
457 	int error = 0;
458 	int iaresid;
459 
460 	VFS_ASSERT_GIANT(vp->v_mount);
461 
462 	do {
463 		int chunk;
464 
465 		/*
466 		 * Force `offset' to a multiple of MAXBSIZE except possibly
467 		 * for the first chunk, so that filesystems only need to
468 		 * write full blocks except possibly for the first and last
469 		 * chunks.
470 		 */
471 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
472 
473 		if (chunk > len)
474 			chunk = len;
475 		if (rw != UIO_READ && vp->v_type == VREG)
476 			bwillwrite();
477 		iaresid = 0;
478 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
479 		    ioflg, active_cred, file_cred, &iaresid, td);
480 		len -= chunk;	/* aresid calc already includes length */
481 		if (error)
482 			break;
483 		offset += chunk;
484 		base = (char *)base + chunk;
485 		uio_yield();
486 	} while (len);
487 	if (aresid)
488 		*aresid = len + iaresid;
489 	return (error);
490 }
491 
492 /*
493  * File table vnode read routine.
494  */
495 static int
496 vn_read(fp, uio, active_cred, flags, td)
497 	struct file *fp;
498 	struct uio *uio;
499 	struct ucred *active_cred;
500 	struct thread *td;
501 	int flags;
502 {
503 	struct vnode *vp;
504 	int error, ioflag;
505 	struct mtx *mtxp;
506 	int vfslocked;
507 
508 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
509 	    uio->uio_td, td));
510 	mtxp = NULL;
511 	vp = fp->f_vnode;
512 	ioflag = 0;
513 	if (fp->f_flag & FNONBLOCK)
514 		ioflag |= IO_NDELAY;
515 	if (fp->f_flag & O_DIRECT)
516 		ioflag |= IO_DIRECT;
517 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
518 	/*
519 	 * According to McKusick the vn lock was protecting f_offset here.
520 	 * It is now protected by the FOFFSET_LOCKED flag.
521 	 */
522 	if ((flags & FOF_OFFSET) == 0) {
523 		mtxp = mtx_pool_find(mtxpool_sleep, fp);
524 		mtx_lock(mtxp);
525 		while(fp->f_vnread_flags & FOFFSET_LOCKED) {
526 			fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
527 			msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
528 			    "vnread offlock", 0);
529 		}
530 		fp->f_vnread_flags |= FOFFSET_LOCKED;
531 		mtx_unlock(mtxp);
532 		vn_lock(vp, LK_SHARED | LK_RETRY);
533 		uio->uio_offset = fp->f_offset;
534 	} else
535 		vn_lock(vp, LK_SHARED | LK_RETRY);
536 
537 	ioflag |= sequential_heuristic(uio, fp);
538 
539 #ifdef MAC
540 	error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
541 	if (error == 0)
542 #endif
543 		error = VOP_READ(vp, uio, ioflag, fp->f_cred);
544 	if ((flags & FOF_OFFSET) == 0) {
545 		fp->f_offset = uio->uio_offset;
546 		mtx_lock(mtxp);
547 		if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
548 			wakeup(&fp->f_vnread_flags);
549 		fp->f_vnread_flags = 0;
550 		mtx_unlock(mtxp);
551 	}
552 	fp->f_nextoff = uio->uio_offset;
553 	VOP_UNLOCK(vp, 0);
554 	VFS_UNLOCK_GIANT(vfslocked);
555 	return (error);
556 }
557 
558 /*
559  * File table vnode write routine.
560  */
561 static int
562 vn_write(fp, uio, active_cred, flags, td)
563 	struct file *fp;
564 	struct uio *uio;
565 	struct ucred *active_cred;
566 	struct thread *td;
567 	int flags;
568 {
569 	struct vnode *vp;
570 	struct mount *mp;
571 	int error, ioflag, lock_flags;
572 	int vfslocked;
573 
574 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
575 	    uio->uio_td, td));
576 	vp = fp->f_vnode;
577 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
578 	if (vp->v_type == VREG)
579 		bwillwrite();
580 	ioflag = IO_UNIT;
581 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
582 		ioflag |= IO_APPEND;
583 	if (fp->f_flag & FNONBLOCK)
584 		ioflag |= IO_NDELAY;
585 	if (fp->f_flag & O_DIRECT)
586 		ioflag |= IO_DIRECT;
587 	if ((fp->f_flag & O_FSYNC) ||
588 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
589 		ioflag |= IO_SYNC;
590 	mp = NULL;
591 	if (vp->v_type != VCHR &&
592 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
593 		goto unlock;
594 
595 	if (mp != NULL && (mp->mnt_kern_flag & MNTK_SHARED_WRITES) &&
596 	    (flags & FOF_OFFSET) != 0) {
597 		lock_flags = LK_SHARED;
598 	} else {
599 		lock_flags = LK_EXCLUSIVE;
600 	}
601 
602 	vn_lock(vp, lock_flags | LK_RETRY);
603 	if ((flags & FOF_OFFSET) == 0)
604 		uio->uio_offset = fp->f_offset;
605 	ioflag |= sequential_heuristic(uio, fp);
606 #ifdef MAC
607 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
608 	if (error == 0)
609 #endif
610 		error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
611 	if ((flags & FOF_OFFSET) == 0)
612 		fp->f_offset = uio->uio_offset;
613 	fp->f_nextoff = uio->uio_offset;
614 	VOP_UNLOCK(vp, 0);
615 	if (vp->v_type != VCHR)
616 		vn_finished_write(mp);
617 unlock:
618 	VFS_UNLOCK_GIANT(vfslocked);
619 	return (error);
620 }
621 
622 /*
623  * File table truncate routine.
624  */
625 static int
626 vn_truncate(fp, length, active_cred, td)
627 	struct file *fp;
628 	off_t length;
629 	struct ucred *active_cred;
630 	struct thread *td;
631 {
632 	struct vattr vattr;
633 	struct mount *mp;
634 	struct vnode *vp;
635 	int vfslocked;
636 	int error;
637 
638 	vp = fp->f_vnode;
639 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
640 	error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
641 	if (error) {
642 		VFS_UNLOCK_GIANT(vfslocked);
643 		return (error);
644 	}
645 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
646 	if (vp->v_type == VDIR) {
647 		error = EISDIR;
648 		goto out;
649 	}
650 #ifdef MAC
651 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
652 	if (error)
653 		goto out;
654 #endif
655 	error = vn_writechk(vp);
656 	if (error == 0) {
657 		VATTR_NULL(&vattr);
658 		vattr.va_size = length;
659 		error = VOP_SETATTR(vp, &vattr, fp->f_cred);
660 	}
661 out:
662 	VOP_UNLOCK(vp, 0);
663 	vn_finished_write(mp);
664 	VFS_UNLOCK_GIANT(vfslocked);
665 	return (error);
666 }
667 
668 /*
669  * File table vnode stat routine.
670  */
671 static int
672 vn_statfile(fp, sb, active_cred, td)
673 	struct file *fp;
674 	struct stat *sb;
675 	struct ucred *active_cred;
676 	struct thread *td;
677 {
678 	struct vnode *vp = fp->f_vnode;
679 	int vfslocked;
680 	int error;
681 
682 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
683 	vn_lock(vp, LK_SHARED | LK_RETRY);
684 	error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
685 	VOP_UNLOCK(vp, 0);
686 	VFS_UNLOCK_GIANT(vfslocked);
687 
688 	return (error);
689 }
690 
691 /*
692  * Stat a vnode; implementation for the stat syscall
693  */
694 int
695 vn_stat(vp, sb, active_cred, file_cred, td)
696 	struct vnode *vp;
697 	register struct stat *sb;
698 	struct ucred *active_cred;
699 	struct ucred *file_cred;
700 	struct thread *td;
701 {
702 	struct vattr vattr;
703 	register struct vattr *vap;
704 	int error;
705 	u_short mode;
706 
707 #ifdef MAC
708 	error = mac_vnode_check_stat(active_cred, file_cred, vp);
709 	if (error)
710 		return (error);
711 #endif
712 
713 	vap = &vattr;
714 
715 	/*
716 	 * Initialize defaults for new and unusual fields, so that file
717 	 * systems which don't support these fields don't need to know
718 	 * about them.
719 	 */
720 	vap->va_birthtime.tv_sec = -1;
721 	vap->va_birthtime.tv_nsec = 0;
722 	vap->va_fsid = VNOVAL;
723 	vap->va_rdev = NODEV;
724 
725 	error = VOP_GETATTR(vp, vap, active_cred);
726 	if (error)
727 		return (error);
728 
729 	/*
730 	 * Zero the spare stat fields
731 	 */
732 	bzero(sb, sizeof *sb);
733 
734 	/*
735 	 * Copy from vattr table
736 	 */
737 	if (vap->va_fsid != VNOVAL)
738 		sb->st_dev = vap->va_fsid;
739 	else
740 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
741 	sb->st_ino = vap->va_fileid;
742 	mode = vap->va_mode;
743 	switch (vap->va_type) {
744 	case VREG:
745 		mode |= S_IFREG;
746 		break;
747 	case VDIR:
748 		mode |= S_IFDIR;
749 		break;
750 	case VBLK:
751 		mode |= S_IFBLK;
752 		break;
753 	case VCHR:
754 		mode |= S_IFCHR;
755 		break;
756 	case VLNK:
757 		mode |= S_IFLNK;
758 		break;
759 	case VSOCK:
760 		mode |= S_IFSOCK;
761 		break;
762 	case VFIFO:
763 		mode |= S_IFIFO;
764 		break;
765 	default:
766 		return (EBADF);
767 	};
768 	sb->st_mode = mode;
769 	sb->st_nlink = vap->va_nlink;
770 	sb->st_uid = vap->va_uid;
771 	sb->st_gid = vap->va_gid;
772 	sb->st_rdev = vap->va_rdev;
773 	if (vap->va_size > OFF_MAX)
774 		return (EOVERFLOW);
775 	sb->st_size = vap->va_size;
776 	sb->st_atimespec = vap->va_atime;
777 	sb->st_mtimespec = vap->va_mtime;
778 	sb->st_ctimespec = vap->va_ctime;
779 	sb->st_birthtimespec = vap->va_birthtime;
780 
781         /*
782 	 * According to www.opengroup.org, the meaning of st_blksize is
783 	 *   "a filesystem-specific preferred I/O block size for this
784 	 *    object.  In some filesystem types, this may vary from file
785 	 *    to file"
786 	 * Default to PAGE_SIZE after much discussion.
787 	 * XXX: min(PAGE_SIZE, vp->v_bufobj.bo_bsize) may be more correct.
788 	 */
789 
790 	sb->st_blksize = PAGE_SIZE;
791 
792 	sb->st_flags = vap->va_flags;
793 	if (priv_check(td, PRIV_VFS_GENERATION))
794 		sb->st_gen = 0;
795 	else
796 		sb->st_gen = vap->va_gen;
797 
798 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
799 	return (0);
800 }
801 
802 /*
803  * File table vnode ioctl routine.
804  */
805 static int
806 vn_ioctl(fp, com, data, active_cred, td)
807 	struct file *fp;
808 	u_long com;
809 	void *data;
810 	struct ucred *active_cred;
811 	struct thread *td;
812 {
813 	struct vnode *vp = fp->f_vnode;
814 	struct vattr vattr;
815 	int vfslocked;
816 	int error;
817 
818 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
819 	error = ENOTTY;
820 	switch (vp->v_type) {
821 	case VREG:
822 	case VDIR:
823 		if (com == FIONREAD) {
824 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
825 			error = VOP_GETATTR(vp, &vattr, active_cred);
826 			VOP_UNLOCK(vp, 0);
827 			if (!error)
828 				*(int *)data = vattr.va_size - fp->f_offset;
829 		}
830 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
831 			error = 0;
832 		else
833 			error = VOP_IOCTL(vp, com, data, fp->f_flag,
834 			    active_cred, td);
835 		break;
836 
837 	default:
838 		break;
839 	}
840 	VFS_UNLOCK_GIANT(vfslocked);
841 	return (error);
842 }
843 
844 /*
845  * File table vnode poll routine.
846  */
847 static int
848 vn_poll(fp, events, active_cred, td)
849 	struct file *fp;
850 	int events;
851 	struct ucred *active_cred;
852 	struct thread *td;
853 {
854 	struct vnode *vp;
855 	int vfslocked;
856 	int error;
857 
858 	vp = fp->f_vnode;
859 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
860 #ifdef MAC
861 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
862 	error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
863 	VOP_UNLOCK(vp, 0);
864 	if (!error)
865 #endif
866 
867 	error = VOP_POLL(vp, events, fp->f_cred, td);
868 	VFS_UNLOCK_GIANT(vfslocked);
869 	return (error);
870 }
871 
872 /*
873  * Acquire the requested lock and then check for validity.  LK_RETRY
874  * permits vn_lock to return doomed vnodes.
875  */
876 int
877 _vn_lock(struct vnode *vp, int flags, char *file, int line)
878 {
879 	int error;
880 
881 	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
882 	    ("vn_lock called with no locktype."));
883 	do {
884 #ifdef DEBUG_VFS_LOCKS
885 		KASSERT(vp->v_holdcnt != 0,
886 		    ("vn_lock %p: zero hold count", vp));
887 #endif
888 		error = VOP_LOCK1(vp, flags, file, line);
889 		flags &= ~LK_INTERLOCK;	/* Interlock is always dropped. */
890 		KASSERT((flags & LK_RETRY) == 0 || error == 0,
891 		    ("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)",
892 		    flags, error));
893 		/*
894 		 * Callers specify LK_RETRY if they wish to get dead vnodes.
895 		 * If RETRY is not set, we return ENOENT instead.
896 		 */
897 		if (error == 0 && vp->v_iflag & VI_DOOMED &&
898 		    (flags & LK_RETRY) == 0) {
899 			VOP_UNLOCK(vp, 0);
900 			error = ENOENT;
901 			break;
902 		}
903 	} while (flags & LK_RETRY && error != 0);
904 	return (error);
905 }
906 
907 /*
908  * File table vnode close routine.
909  */
910 static int
911 vn_closefile(fp, td)
912 	struct file *fp;
913 	struct thread *td;
914 {
915 	struct vnode *vp;
916 	struct flock lf;
917 	int vfslocked;
918 	int error;
919 
920 	vp = fp->f_vnode;
921 
922 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
923 	if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
924 		lf.l_whence = SEEK_SET;
925 		lf.l_start = 0;
926 		lf.l_len = 0;
927 		lf.l_type = F_UNLCK;
928 		(void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
929 	}
930 
931 	fp->f_ops = &badfileops;
932 
933 	error = vn_close(vp, fp->f_flag, fp->f_cred, td);
934 	VFS_UNLOCK_GIANT(vfslocked);
935 	return (error);
936 }
937 
938 /*
939  * Preparing to start a filesystem write operation. If the operation is
940  * permitted, then we bump the count of operations in progress and
941  * proceed. If a suspend request is in progress, we wait until the
942  * suspension is over, and then proceed.
943  */
944 int
945 vn_start_write(vp, mpp, flags)
946 	struct vnode *vp;
947 	struct mount **mpp;
948 	int flags;
949 {
950 	struct mount *mp;
951 	int error;
952 
953 	error = 0;
954 	/*
955 	 * If a vnode is provided, get and return the mount point that
956 	 * to which it will write.
957 	 */
958 	if (vp != NULL) {
959 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
960 			*mpp = NULL;
961 			if (error != EOPNOTSUPP)
962 				return (error);
963 			return (0);
964 		}
965 	}
966 	if ((mp = *mpp) == NULL)
967 		return (0);
968 
969 	/*
970 	 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
971 	 * a vfs_ref().
972 	 * As long as a vnode is not provided we need to acquire a
973 	 * refcount for the provided mountpoint too, in order to
974 	 * emulate a vfs_ref().
975 	 */
976 	MNT_ILOCK(mp);
977 	if (vp == NULL)
978 		MNT_REF(mp);
979 
980 	/*
981 	 * Check on status of suspension.
982 	 */
983 	if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
984 	    mp->mnt_susp_owner != curthread) {
985 		while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
986 			if (flags & V_NOWAIT) {
987 				error = EWOULDBLOCK;
988 				goto unlock;
989 			}
990 			error = msleep(&mp->mnt_flag, MNT_MTX(mp),
991 			    (PUSER - 1) | (flags & PCATCH), "suspfs", 0);
992 			if (error)
993 				goto unlock;
994 		}
995 	}
996 	if (flags & V_XSLEEP)
997 		goto unlock;
998 	mp->mnt_writeopcount++;
999 unlock:
1000 	MNT_REL(mp);
1001 	MNT_IUNLOCK(mp);
1002 	return (error);
1003 }
1004 
1005 /*
1006  * Secondary suspension. Used by operations such as vop_inactive
1007  * routines that are needed by the higher level functions. These
1008  * are allowed to proceed until all the higher level functions have
1009  * completed (indicated by mnt_writeopcount dropping to zero). At that
1010  * time, these operations are halted until the suspension is over.
1011  */
1012 int
1013 vn_start_secondary_write(vp, mpp, flags)
1014 	struct vnode *vp;
1015 	struct mount **mpp;
1016 	int flags;
1017 {
1018 	struct mount *mp;
1019 	int error;
1020 
1021  retry:
1022 	if (vp != NULL) {
1023 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1024 			*mpp = NULL;
1025 			if (error != EOPNOTSUPP)
1026 				return (error);
1027 			return (0);
1028 		}
1029 	}
1030 	/*
1031 	 * If we are not suspended or have not yet reached suspended
1032 	 * mode, then let the operation proceed.
1033 	 */
1034 	if ((mp = *mpp) == NULL)
1035 		return (0);
1036 
1037 	/*
1038 	 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1039 	 * a vfs_ref().
1040 	 * As long as a vnode is not provided we need to acquire a
1041 	 * refcount for the provided mountpoint too, in order to
1042 	 * emulate a vfs_ref().
1043 	 */
1044 	MNT_ILOCK(mp);
1045 	if (vp == NULL)
1046 		MNT_REF(mp);
1047 	if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1048 		mp->mnt_secondary_writes++;
1049 		mp->mnt_secondary_accwrites++;
1050 		MNT_REL(mp);
1051 		MNT_IUNLOCK(mp);
1052 		return (0);
1053 	}
1054 	if (flags & V_NOWAIT) {
1055 		MNT_REL(mp);
1056 		MNT_IUNLOCK(mp);
1057 		return (EWOULDBLOCK);
1058 	}
1059 	/*
1060 	 * Wait for the suspension to finish.
1061 	 */
1062 	error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1063 		       (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
1064 	vfs_rel(mp);
1065 	if (error == 0)
1066 		goto retry;
1067 	return (error);
1068 }
1069 
1070 /*
1071  * Filesystem write operation has completed. If we are suspending and this
1072  * operation is the last one, notify the suspender that the suspension is
1073  * now in effect.
1074  */
1075 void
1076 vn_finished_write(mp)
1077 	struct mount *mp;
1078 {
1079 	if (mp == NULL)
1080 		return;
1081 	MNT_ILOCK(mp);
1082 	mp->mnt_writeopcount--;
1083 	if (mp->mnt_writeopcount < 0)
1084 		panic("vn_finished_write: neg cnt");
1085 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1086 	    mp->mnt_writeopcount <= 0)
1087 		wakeup(&mp->mnt_writeopcount);
1088 	MNT_IUNLOCK(mp);
1089 }
1090 
1091 
1092 /*
1093  * Filesystem secondary write operation has completed. If we are
1094  * suspending and this operation is the last one, notify the suspender
1095  * that the suspension is now in effect.
1096  */
1097 void
1098 vn_finished_secondary_write(mp)
1099 	struct mount *mp;
1100 {
1101 	if (mp == NULL)
1102 		return;
1103 	MNT_ILOCK(mp);
1104 	mp->mnt_secondary_writes--;
1105 	if (mp->mnt_secondary_writes < 0)
1106 		panic("vn_finished_secondary_write: neg cnt");
1107 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1108 	    mp->mnt_secondary_writes <= 0)
1109 		wakeup(&mp->mnt_secondary_writes);
1110 	MNT_IUNLOCK(mp);
1111 }
1112 
1113 
1114 
1115 /*
1116  * Request a filesystem to suspend write operations.
1117  */
1118 int
1119 vfs_write_suspend(mp)
1120 	struct mount *mp;
1121 {
1122 	int error;
1123 
1124 	MNT_ILOCK(mp);
1125 	if (mp->mnt_susp_owner == curthread) {
1126 		MNT_IUNLOCK(mp);
1127 		return (EALREADY);
1128 	}
1129 	while (mp->mnt_kern_flag & MNTK_SUSPEND)
1130 		msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0);
1131 	mp->mnt_kern_flag |= MNTK_SUSPEND;
1132 	mp->mnt_susp_owner = curthread;
1133 	if (mp->mnt_writeopcount > 0)
1134 		(void) msleep(&mp->mnt_writeopcount,
1135 		    MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
1136 	else
1137 		MNT_IUNLOCK(mp);
1138 	if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0)
1139 		vfs_write_resume(mp);
1140 	return (error);
1141 }
1142 
1143 /*
1144  * Request a filesystem to resume write operations.
1145  */
1146 void
1147 vfs_write_resume(mp)
1148 	struct mount *mp;
1149 {
1150 
1151 	MNT_ILOCK(mp);
1152 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1153 		KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner"));
1154 		mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
1155 				       MNTK_SUSPENDED);
1156 		mp->mnt_susp_owner = NULL;
1157 		wakeup(&mp->mnt_writeopcount);
1158 		wakeup(&mp->mnt_flag);
1159 		curthread->td_pflags &= ~TDP_IGNSUSP;
1160 		MNT_IUNLOCK(mp);
1161 		VFS_SUSP_CLEAN(mp);
1162 	} else
1163 		MNT_IUNLOCK(mp);
1164 }
1165 
1166 /*
1167  * Implement kqueues for files by translating it to vnode operation.
1168  */
1169 static int
1170 vn_kqfilter(struct file *fp, struct knote *kn)
1171 {
1172 	int vfslocked;
1173 	int error;
1174 
1175 	vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
1176 	error = VOP_KQFILTER(fp->f_vnode, kn);
1177 	VFS_UNLOCK_GIANT(vfslocked);
1178 
1179 	return error;
1180 }
1181 
1182 /*
1183  * Simplified in-kernel wrapper calls for extended attribute access.
1184  * Both calls pass in a NULL credential, authorizing as "kernel" access.
1185  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1186  */
1187 int
1188 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1189     const char *attrname, int *buflen, char *buf, struct thread *td)
1190 {
1191 	struct uio	auio;
1192 	struct iovec	iov;
1193 	int	error;
1194 
1195 	iov.iov_len = *buflen;
1196 	iov.iov_base = buf;
1197 
1198 	auio.uio_iov = &iov;
1199 	auio.uio_iovcnt = 1;
1200 	auio.uio_rw = UIO_READ;
1201 	auio.uio_segflg = UIO_SYSSPACE;
1202 	auio.uio_td = td;
1203 	auio.uio_offset = 0;
1204 	auio.uio_resid = *buflen;
1205 
1206 	if ((ioflg & IO_NODELOCKED) == 0)
1207 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1208 
1209 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1210 
1211 	/* authorize attribute retrieval as kernel */
1212 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1213 	    td);
1214 
1215 	if ((ioflg & IO_NODELOCKED) == 0)
1216 		VOP_UNLOCK(vp, 0);
1217 
1218 	if (error == 0) {
1219 		*buflen = *buflen - auio.uio_resid;
1220 	}
1221 
1222 	return (error);
1223 }
1224 
1225 /*
1226  * XXX failure mode if partially written?
1227  */
1228 int
1229 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1230     const char *attrname, int buflen, char *buf, struct thread *td)
1231 {
1232 	struct uio	auio;
1233 	struct iovec	iov;
1234 	struct mount	*mp;
1235 	int	error;
1236 
1237 	iov.iov_len = buflen;
1238 	iov.iov_base = buf;
1239 
1240 	auio.uio_iov = &iov;
1241 	auio.uio_iovcnt = 1;
1242 	auio.uio_rw = UIO_WRITE;
1243 	auio.uio_segflg = UIO_SYSSPACE;
1244 	auio.uio_td = td;
1245 	auio.uio_offset = 0;
1246 	auio.uio_resid = buflen;
1247 
1248 	if ((ioflg & IO_NODELOCKED) == 0) {
1249 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1250 			return (error);
1251 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1252 	}
1253 
1254 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1255 
1256 	/* authorize attribute setting as kernel */
1257 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1258 
1259 	if ((ioflg & IO_NODELOCKED) == 0) {
1260 		vn_finished_write(mp);
1261 		VOP_UNLOCK(vp, 0);
1262 	}
1263 
1264 	return (error);
1265 }
1266 
1267 int
1268 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1269     const char *attrname, struct thread *td)
1270 {
1271 	struct mount	*mp;
1272 	int	error;
1273 
1274 	if ((ioflg & IO_NODELOCKED) == 0) {
1275 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1276 			return (error);
1277 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1278 	}
1279 
1280 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1281 
1282 	/* authorize attribute removal as kernel */
1283 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1284 	if (error == EOPNOTSUPP)
1285 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1286 		    NULL, td);
1287 
1288 	if ((ioflg & IO_NODELOCKED) == 0) {
1289 		vn_finished_write(mp);
1290 		VOP_UNLOCK(vp, 0);
1291 	}
1292 
1293 	return (error);
1294 }
1295 
1296 int
1297 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp)
1298 {
1299 	struct mount *mp;
1300 	int ltype, error;
1301 
1302 	mp = vp->v_mount;
1303 	ltype = VOP_ISLOCKED(vp);
1304 	KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED,
1305 	    ("vn_vget_ino: vp not locked"));
1306 	error = vfs_busy(mp, MBF_NOWAIT);
1307 	if (error != 0) {
1308 		VOP_UNLOCK(vp, 0);
1309 		error = vfs_busy(mp, 0);
1310 		vn_lock(vp, ltype | LK_RETRY);
1311 		if (error != 0)
1312 			return (ENOENT);
1313 		if (vp->v_iflag & VI_DOOMED) {
1314 			vfs_unbusy(mp);
1315 			return (ENOENT);
1316 		}
1317 	}
1318 	VOP_UNLOCK(vp, 0);
1319 	error = VFS_VGET(mp, ino, lkflags, rvp);
1320 	vfs_unbusy(mp);
1321 	vn_lock(vp, ltype | LK_RETRY);
1322 	if (vp->v_iflag & VI_DOOMED) {
1323 		if (error == 0)
1324 			vput(*rvp);
1325 		error = ENOENT;
1326 	}
1327 	return (error);
1328 }
1329